xref: /openbmc/linux/drivers/infiniband/hw/mlx4/main.c (revision 4800cd83)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 
43 #include <rdma/ib_smi.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <rdma/ib_addr.h>
46 
47 #include <linux/mlx4/driver.h>
48 #include <linux/mlx4/cmd.h>
49 
50 #include "mlx4_ib.h"
51 #include "user.h"
52 
53 #define DRV_NAME	"mlx4_ib"
54 #define DRV_VERSION	"1.0"
55 #define DRV_RELDATE	"April 4, 2008"
56 
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRV_VERSION);
61 
62 static const char mlx4_ib_version[] =
63 	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
64 	DRV_VERSION " (" DRV_RELDATE ")\n";
65 
66 struct update_gid_work {
67 	struct work_struct	work;
68 	union ib_gid		gids[128];
69 	struct mlx4_ib_dev     *dev;
70 	int			port;
71 };
72 
73 static struct workqueue_struct *wq;
74 
75 static void init_query_mad(struct ib_smp *mad)
76 {
77 	mad->base_version  = 1;
78 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
79 	mad->class_version = 1;
80 	mad->method	   = IB_MGMT_METHOD_GET;
81 }
82 
83 static union ib_gid zgid;
84 
85 static int mlx4_ib_query_device(struct ib_device *ibdev,
86 				struct ib_device_attr *props)
87 {
88 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
89 	struct ib_smp *in_mad  = NULL;
90 	struct ib_smp *out_mad = NULL;
91 	int err = -ENOMEM;
92 
93 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
94 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
95 	if (!in_mad || !out_mad)
96 		goto out;
97 
98 	init_query_mad(in_mad);
99 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
100 
101 	err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
102 	if (err)
103 		goto out;
104 
105 	memset(props, 0, sizeof *props);
106 
107 	props->fw_ver = dev->dev->caps.fw_ver;
108 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
109 		IB_DEVICE_PORT_ACTIVE_EVENT		|
110 		IB_DEVICE_SYS_IMAGE_GUID		|
111 		IB_DEVICE_RC_RNR_NAK_GEN		|
112 		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
113 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
114 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
115 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
116 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
117 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
118 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
119 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
120 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
121 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
122 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
123 	if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
124 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
125 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
126 		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
127 	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
128 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
129 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
130 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
131 
132 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
133 		0xffffff;
134 	props->vendor_part_id	   = be16_to_cpup((__be16 *) (out_mad->data + 30));
135 	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
136 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
137 
138 	props->max_mr_size	   = ~0ull;
139 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
140 	props->max_qp		   = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
141 	props->max_qp_wr	   = dev->dev->caps.max_wqes;
142 	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
143 					 dev->dev->caps.max_rq_sg);
144 	props->max_cq		   = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
145 	props->max_cqe		   = dev->dev->caps.max_cqes;
146 	props->max_mr		   = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
147 	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
148 	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
149 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
150 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
151 	props->max_srq		   = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
152 	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
153 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
154 	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
155 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
156 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
157 		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
158 	props->masked_atomic_cap   = IB_ATOMIC_HCA;
159 	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
160 	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
161 	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
162 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
163 					   props->max_mcast_grp;
164 	props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
165 
166 out:
167 	kfree(in_mad);
168 	kfree(out_mad);
169 
170 	return err;
171 }
172 
173 static enum rdma_link_layer
174 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
175 {
176 	struct mlx4_dev *dev = to_mdev(device)->dev;
177 
178 	return dev->caps.port_mask & (1 << (port_num - 1)) ?
179 		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
180 }
181 
182 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
183 			      struct ib_port_attr *props,
184 			      struct ib_smp *out_mad)
185 {
186 	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
187 	props->lmc		= out_mad->data[34] & 0x7;
188 	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
189 	props->sm_sl		= out_mad->data[36] & 0xf;
190 	props->state		= out_mad->data[32] & 0xf;
191 	props->phys_state	= out_mad->data[33] >> 4;
192 	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
193 	props->gid_tbl_len	= to_mdev(ibdev)->dev->caps.gid_table_len[port];
194 	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
195 	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
196 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
197 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
198 	props->active_width	= out_mad->data[31] & 0xf;
199 	props->active_speed	= out_mad->data[35] >> 4;
200 	props->max_mtu		= out_mad->data[41] & 0xf;
201 	props->active_mtu	= out_mad->data[36] >> 4;
202 	props->subnet_timeout	= out_mad->data[51] & 0x1f;
203 	props->max_vl_num	= out_mad->data[37] >> 4;
204 	props->init_type_reply	= out_mad->data[41] >> 4;
205 
206 	return 0;
207 }
208 
209 static u8 state_to_phys_state(enum ib_port_state state)
210 {
211 	return state == IB_PORT_ACTIVE ? 5 : 3;
212 }
213 
214 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
215 			       struct ib_port_attr *props,
216 			       struct ib_smp *out_mad)
217 {
218 	struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
219 	struct net_device *ndev;
220 	enum ib_mtu tmp;
221 
222 	props->active_width	= IB_WIDTH_1X;
223 	props->active_speed	= 4;
224 	props->port_cap_flags	= IB_PORT_CM_SUP;
225 	props->gid_tbl_len	= to_mdev(ibdev)->dev->caps.gid_table_len[port];
226 	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
227 	props->pkey_tbl_len	= 1;
228 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
229 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
230 	props->max_mtu		= IB_MTU_2048;
231 	props->subnet_timeout	= 0;
232 	props->max_vl_num	= out_mad->data[37] >> 4;
233 	props->init_type_reply	= 0;
234 	props->state		= IB_PORT_DOWN;
235 	props->phys_state	= state_to_phys_state(props->state);
236 	props->active_mtu	= IB_MTU_256;
237 	spin_lock(&iboe->lock);
238 	ndev = iboe->netdevs[port - 1];
239 	if (!ndev)
240 		goto out;
241 
242 	tmp = iboe_get_mtu(ndev->mtu);
243 	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
244 
245 	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
246 					IB_PORT_ACTIVE : IB_PORT_DOWN;
247 	props->phys_state	= state_to_phys_state(props->state);
248 
249 out:
250 	spin_unlock(&iboe->lock);
251 	return 0;
252 }
253 
254 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
255 			      struct ib_port_attr *props)
256 {
257 	struct ib_smp *in_mad  = NULL;
258 	struct ib_smp *out_mad = NULL;
259 	int err = -ENOMEM;
260 
261 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
262 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
263 	if (!in_mad || !out_mad)
264 		goto out;
265 
266 	memset(props, 0, sizeof *props);
267 
268 	init_query_mad(in_mad);
269 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
270 	in_mad->attr_mod = cpu_to_be32(port);
271 
272 	err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
273 	if (err)
274 		goto out;
275 
276 	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
277 		ib_link_query_port(ibdev, port, props, out_mad) :
278 		eth_link_query_port(ibdev, port, props, out_mad);
279 
280 out:
281 	kfree(in_mad);
282 	kfree(out_mad);
283 
284 	return err;
285 }
286 
287 static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
288 			       union ib_gid *gid)
289 {
290 	struct ib_smp *in_mad  = NULL;
291 	struct ib_smp *out_mad = NULL;
292 	int err = -ENOMEM;
293 
294 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
295 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
296 	if (!in_mad || !out_mad)
297 		goto out;
298 
299 	init_query_mad(in_mad);
300 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
301 	in_mad->attr_mod = cpu_to_be32(port);
302 
303 	err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
304 	if (err)
305 		goto out;
306 
307 	memcpy(gid->raw, out_mad->data + 8, 8);
308 
309 	init_query_mad(in_mad);
310 	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
311 	in_mad->attr_mod = cpu_to_be32(index / 8);
312 
313 	err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
314 	if (err)
315 		goto out;
316 
317 	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
318 
319 out:
320 	kfree(in_mad);
321 	kfree(out_mad);
322 	return err;
323 }
324 
325 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
326 			  union ib_gid *gid)
327 {
328 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
329 
330 	*gid = dev->iboe.gid_table[port - 1][index];
331 
332 	return 0;
333 }
334 
335 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
336 			     union ib_gid *gid)
337 {
338 	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
339 		return __mlx4_ib_query_gid(ibdev, port, index, gid);
340 	else
341 		return iboe_query_gid(ibdev, port, index, gid);
342 }
343 
344 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
345 			      u16 *pkey)
346 {
347 	struct ib_smp *in_mad  = NULL;
348 	struct ib_smp *out_mad = NULL;
349 	int err = -ENOMEM;
350 
351 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
352 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
353 	if (!in_mad || !out_mad)
354 		goto out;
355 
356 	init_query_mad(in_mad);
357 	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
358 	in_mad->attr_mod = cpu_to_be32(index / 32);
359 
360 	err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
361 	if (err)
362 		goto out;
363 
364 	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
365 
366 out:
367 	kfree(in_mad);
368 	kfree(out_mad);
369 	return err;
370 }
371 
372 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
373 				 struct ib_device_modify *props)
374 {
375 	struct mlx4_cmd_mailbox *mailbox;
376 
377 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
378 		return -EOPNOTSUPP;
379 
380 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
381 		return 0;
382 
383 	spin_lock(&to_mdev(ibdev)->sm_lock);
384 	memcpy(ibdev->node_desc, props->node_desc, 64);
385 	spin_unlock(&to_mdev(ibdev)->sm_lock);
386 
387 	/*
388 	 * If possible, pass node desc to FW, so it can generate
389 	 * a 144 trap.  If cmd fails, just ignore.
390 	 */
391 	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
392 	if (IS_ERR(mailbox))
393 		return 0;
394 
395 	memset(mailbox->buf, 0, 256);
396 	memcpy(mailbox->buf, props->node_desc, 64);
397 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
398 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
399 
400 	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
401 
402 	return 0;
403 }
404 
405 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
406 			 u32 cap_mask)
407 {
408 	struct mlx4_cmd_mailbox *mailbox;
409 	int err;
410 	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
411 
412 	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
413 	if (IS_ERR(mailbox))
414 		return PTR_ERR(mailbox);
415 
416 	memset(mailbox->buf, 0, 256);
417 
418 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
419 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
420 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
421 	} else {
422 		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
423 		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
424 	}
425 
426 	err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
427 		       MLX4_CMD_TIME_CLASS_B);
428 
429 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
430 	return err;
431 }
432 
433 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
434 			       struct ib_port_modify *props)
435 {
436 	struct ib_port_attr attr;
437 	u32 cap_mask;
438 	int err;
439 
440 	mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
441 
442 	err = mlx4_ib_query_port(ibdev, port, &attr);
443 	if (err)
444 		goto out;
445 
446 	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
447 		~props->clr_port_cap_mask;
448 
449 	err = mlx4_SET_PORT(to_mdev(ibdev), port,
450 			    !!(mask & IB_PORT_RESET_QKEY_CNTR),
451 			    cap_mask);
452 
453 out:
454 	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
455 	return err;
456 }
457 
458 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
459 						  struct ib_udata *udata)
460 {
461 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
462 	struct mlx4_ib_ucontext *context;
463 	struct mlx4_ib_alloc_ucontext_resp resp;
464 	int err;
465 
466 	if (!dev->ib_active)
467 		return ERR_PTR(-EAGAIN);
468 
469 	resp.qp_tab_size      = dev->dev->caps.num_qps;
470 	resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
471 	resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
472 
473 	context = kmalloc(sizeof *context, GFP_KERNEL);
474 	if (!context)
475 		return ERR_PTR(-ENOMEM);
476 
477 	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
478 	if (err) {
479 		kfree(context);
480 		return ERR_PTR(err);
481 	}
482 
483 	INIT_LIST_HEAD(&context->db_page_list);
484 	mutex_init(&context->db_page_mutex);
485 
486 	err = ib_copy_to_udata(udata, &resp, sizeof resp);
487 	if (err) {
488 		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
489 		kfree(context);
490 		return ERR_PTR(-EFAULT);
491 	}
492 
493 	return &context->ibucontext;
494 }
495 
496 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
497 {
498 	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
499 
500 	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
501 	kfree(context);
502 
503 	return 0;
504 }
505 
506 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
507 {
508 	struct mlx4_ib_dev *dev = to_mdev(context->device);
509 
510 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
511 		return -EINVAL;
512 
513 	if (vma->vm_pgoff == 0) {
514 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
515 
516 		if (io_remap_pfn_range(vma, vma->vm_start,
517 				       to_mucontext(context)->uar.pfn,
518 				       PAGE_SIZE, vma->vm_page_prot))
519 			return -EAGAIN;
520 	} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
521 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
522 
523 		if (io_remap_pfn_range(vma, vma->vm_start,
524 				       to_mucontext(context)->uar.pfn +
525 				       dev->dev->caps.num_uars,
526 				       PAGE_SIZE, vma->vm_page_prot))
527 			return -EAGAIN;
528 	} else
529 		return -EINVAL;
530 
531 	return 0;
532 }
533 
534 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
535 				      struct ib_ucontext *context,
536 				      struct ib_udata *udata)
537 {
538 	struct mlx4_ib_pd *pd;
539 	int err;
540 
541 	pd = kmalloc(sizeof *pd, GFP_KERNEL);
542 	if (!pd)
543 		return ERR_PTR(-ENOMEM);
544 
545 	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
546 	if (err) {
547 		kfree(pd);
548 		return ERR_PTR(err);
549 	}
550 
551 	if (context)
552 		if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
553 			mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
554 			kfree(pd);
555 			return ERR_PTR(-EFAULT);
556 		}
557 
558 	return &pd->ibpd;
559 }
560 
561 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
562 {
563 	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
564 	kfree(pd);
565 
566 	return 0;
567 }
568 
569 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
570 {
571 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
572 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
573 	struct mlx4_ib_gid_entry *ge;
574 
575 	ge = kzalloc(sizeof *ge, GFP_KERNEL);
576 	if (!ge)
577 		return -ENOMEM;
578 
579 	ge->gid = *gid;
580 	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
581 		ge->port = mqp->port;
582 		ge->added = 1;
583 	}
584 
585 	mutex_lock(&mqp->mutex);
586 	list_add_tail(&ge->list, &mqp->gid_list);
587 	mutex_unlock(&mqp->mutex);
588 
589 	return 0;
590 }
591 
592 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
593 		   union ib_gid *gid)
594 {
595 	u8 mac[6];
596 	struct net_device *ndev;
597 	int ret = 0;
598 
599 	if (!mqp->port)
600 		return 0;
601 
602 	spin_lock(&mdev->iboe.lock);
603 	ndev = mdev->iboe.netdevs[mqp->port - 1];
604 	if (ndev)
605 		dev_hold(ndev);
606 	spin_unlock(&mdev->iboe.lock);
607 
608 	if (ndev) {
609 		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
610 		rtnl_lock();
611 		dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
612 		ret = 1;
613 		rtnl_unlock();
614 		dev_put(ndev);
615 	}
616 
617 	return ret;
618 }
619 
620 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
621 {
622 	int err;
623 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
624 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
625 
626 	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
627 				    !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
628 				    MLX4_PROTOCOL_IB);
629 	if (err)
630 		return err;
631 
632 	err = add_gid_entry(ibqp, gid);
633 	if (err)
634 		goto err_add;
635 
636 	return 0;
637 
638 err_add:
639 	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
640 	return err;
641 }
642 
643 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
644 {
645 	struct mlx4_ib_gid_entry *ge;
646 	struct mlx4_ib_gid_entry *tmp;
647 	struct mlx4_ib_gid_entry *ret = NULL;
648 
649 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
650 		if (!memcmp(raw, ge->gid.raw, 16)) {
651 			ret = ge;
652 			break;
653 		}
654 	}
655 
656 	return ret;
657 }
658 
659 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
660 {
661 	int err;
662 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
663 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
664 	u8 mac[6];
665 	struct net_device *ndev;
666 	struct mlx4_ib_gid_entry *ge;
667 
668 	err = mlx4_multicast_detach(mdev->dev,
669 				    &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
670 	if (err)
671 		return err;
672 
673 	mutex_lock(&mqp->mutex);
674 	ge = find_gid_entry(mqp, gid->raw);
675 	if (ge) {
676 		spin_lock(&mdev->iboe.lock);
677 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
678 		if (ndev)
679 			dev_hold(ndev);
680 		spin_unlock(&mdev->iboe.lock);
681 		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
682 		if (ndev) {
683 			rtnl_lock();
684 			dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
685 			rtnl_unlock();
686 			dev_put(ndev);
687 		}
688 		list_del(&ge->list);
689 		kfree(ge);
690 	} else
691 		printk(KERN_WARNING "could not find mgid entry\n");
692 
693 	mutex_unlock(&mqp->mutex);
694 
695 	return 0;
696 }
697 
698 static int init_node_data(struct mlx4_ib_dev *dev)
699 {
700 	struct ib_smp *in_mad  = NULL;
701 	struct ib_smp *out_mad = NULL;
702 	int err = -ENOMEM;
703 
704 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
705 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
706 	if (!in_mad || !out_mad)
707 		goto out;
708 
709 	init_query_mad(in_mad);
710 	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
711 
712 	err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
713 	if (err)
714 		goto out;
715 
716 	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
717 
718 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
719 
720 	err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
721 	if (err)
722 		goto out;
723 
724 	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
725 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
726 
727 out:
728 	kfree(in_mad);
729 	kfree(out_mad);
730 	return err;
731 }
732 
733 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
734 			char *buf)
735 {
736 	struct mlx4_ib_dev *dev =
737 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
738 	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
739 }
740 
741 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
742 			   char *buf)
743 {
744 	struct mlx4_ib_dev *dev =
745 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
746 	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
747 		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
748 		       (int) dev->dev->caps.fw_ver & 0xffff);
749 }
750 
751 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
752 			char *buf)
753 {
754 	struct mlx4_ib_dev *dev =
755 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
756 	return sprintf(buf, "%x\n", dev->dev->rev_id);
757 }
758 
759 static ssize_t show_board(struct device *device, struct device_attribute *attr,
760 			  char *buf)
761 {
762 	struct mlx4_ib_dev *dev =
763 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
764 	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
765 		       dev->dev->board_id);
766 }
767 
768 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
769 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
770 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
771 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
772 
773 static struct device_attribute *mlx4_class_attributes[] = {
774 	&dev_attr_hw_rev,
775 	&dev_attr_fw_ver,
776 	&dev_attr_hca_type,
777 	&dev_attr_board_id
778 };
779 
780 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
781 {
782 	memcpy(eui, dev->dev_addr, 3);
783 	memcpy(eui + 5, dev->dev_addr + 3, 3);
784 	if (vlan_id < 0x1000) {
785 		eui[3] = vlan_id >> 8;
786 		eui[4] = vlan_id & 0xff;
787 	} else {
788 		eui[3] = 0xff;
789 		eui[4] = 0xfe;
790 	}
791 	eui[0] ^= 2;
792 }
793 
794 static void update_gids_task(struct work_struct *work)
795 {
796 	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
797 	struct mlx4_cmd_mailbox *mailbox;
798 	union ib_gid *gids;
799 	int err;
800 	struct mlx4_dev	*dev = gw->dev->dev;
801 	struct ib_event event;
802 
803 	mailbox = mlx4_alloc_cmd_mailbox(dev);
804 	if (IS_ERR(mailbox)) {
805 		printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
806 		return;
807 	}
808 
809 	gids = mailbox->buf;
810 	memcpy(gids, gw->gids, sizeof gw->gids);
811 
812 	err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
813 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
814 	if (err)
815 		printk(KERN_WARNING "set port command failed\n");
816 	else {
817 		memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
818 		event.device = &gw->dev->ib_dev;
819 		event.element.port_num = gw->port;
820 		event.event    = IB_EVENT_LID_CHANGE;
821 		ib_dispatch_event(&event);
822 	}
823 
824 	mlx4_free_cmd_mailbox(dev, mailbox);
825 	kfree(gw);
826 }
827 
828 static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
829 {
830 	struct net_device *ndev = dev->iboe.netdevs[port - 1];
831 	struct update_gid_work *work;
832 	struct net_device *tmp;
833 	int i;
834 	u8 *hits;
835 	int ret;
836 	union ib_gid gid;
837 	int free;
838 	int found;
839 	int need_update = 0;
840 	u16 vid;
841 
842 	work = kzalloc(sizeof *work, GFP_ATOMIC);
843 	if (!work)
844 		return -ENOMEM;
845 
846 	hits = kzalloc(128, GFP_ATOMIC);
847 	if (!hits) {
848 		ret = -ENOMEM;
849 		goto out;
850 	}
851 
852 	rcu_read_lock();
853 	for_each_netdev_rcu(&init_net, tmp) {
854 		if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
855 			gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
856 			vid = rdma_vlan_dev_vlan_id(tmp);
857 			mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
858 			found = 0;
859 			free = -1;
860 			for (i = 0; i < 128; ++i) {
861 				if (free < 0 &&
862 				    !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
863 					free = i;
864 				if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
865 					hits[i] = 1;
866 					found = 1;
867 					break;
868 				}
869 			}
870 
871 			if (!found) {
872 				if (tmp == ndev &&
873 				    (memcmp(&dev->iboe.gid_table[port - 1][0],
874 					    &gid, sizeof gid) ||
875 				     !memcmp(&dev->iboe.gid_table[port - 1][0],
876 					     &zgid, sizeof gid))) {
877 					dev->iboe.gid_table[port - 1][0] = gid;
878 					++need_update;
879 					hits[0] = 1;
880 				} else if (free >= 0) {
881 					dev->iboe.gid_table[port - 1][free] = gid;
882 					hits[free] = 1;
883 					++need_update;
884 				}
885 			}
886 		}
887 	}
888 	rcu_read_unlock();
889 
890 	for (i = 0; i < 128; ++i)
891 		if (!hits[i]) {
892 			if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
893 				++need_update;
894 			dev->iboe.gid_table[port - 1][i] = zgid;
895 		}
896 
897 	if (need_update) {
898 		memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
899 		INIT_WORK(&work->work, update_gids_task);
900 		work->port = port;
901 		work->dev = dev;
902 		queue_work(wq, &work->work);
903 	} else
904 		kfree(work);
905 
906 	kfree(hits);
907 	return 0;
908 
909 out:
910 	kfree(work);
911 	return ret;
912 }
913 
914 static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
915 {
916 	switch (event) {
917 	case NETDEV_UP:
918 	case NETDEV_CHANGEADDR:
919 		update_ipv6_gids(dev, port, 0);
920 		break;
921 
922 	case NETDEV_DOWN:
923 		update_ipv6_gids(dev, port, 1);
924 		dev->iboe.netdevs[port - 1] = NULL;
925 	}
926 }
927 
928 static void netdev_added(struct mlx4_ib_dev *dev, int port)
929 {
930 	update_ipv6_gids(dev, port, 0);
931 }
932 
933 static void netdev_removed(struct mlx4_ib_dev *dev, int port)
934 {
935 	update_ipv6_gids(dev, port, 1);
936 }
937 
938 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
939 				void *ptr)
940 {
941 	struct net_device *dev = ptr;
942 	struct mlx4_ib_dev *ibdev;
943 	struct net_device *oldnd;
944 	struct mlx4_ib_iboe *iboe;
945 	int port;
946 
947 	if (!net_eq(dev_net(dev), &init_net))
948 		return NOTIFY_DONE;
949 
950 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
951 	iboe = &ibdev->iboe;
952 
953 	spin_lock(&iboe->lock);
954 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
955 		oldnd = iboe->netdevs[port - 1];
956 		iboe->netdevs[port - 1] =
957 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
958 		if (oldnd != iboe->netdevs[port - 1]) {
959 			if (iboe->netdevs[port - 1])
960 				netdev_added(ibdev, port);
961 			else
962 				netdev_removed(ibdev, port);
963 		}
964 	}
965 
966 	if (dev == iboe->netdevs[0] ||
967 	    (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
968 		handle_en_event(ibdev, 1, event);
969 	else if (dev == iboe->netdevs[1]
970 		 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
971 		handle_en_event(ibdev, 2, event);
972 
973 	spin_unlock(&iboe->lock);
974 
975 	return NOTIFY_DONE;
976 }
977 
978 static void *mlx4_ib_add(struct mlx4_dev *dev)
979 {
980 	struct mlx4_ib_dev *ibdev;
981 	int num_ports = 0;
982 	int i;
983 	int err;
984 	struct mlx4_ib_iboe *iboe;
985 
986 	printk_once(KERN_INFO "%s", mlx4_ib_version);
987 
988 	mlx4_foreach_ib_transport_port(i, dev)
989 		num_ports++;
990 
991 	/* No point in registering a device with no ports... */
992 	if (num_ports == 0)
993 		return NULL;
994 
995 	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
996 	if (!ibdev) {
997 		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
998 		return NULL;
999 	}
1000 
1001 	iboe = &ibdev->iboe;
1002 
1003 	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1004 		goto err_dealloc;
1005 
1006 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1007 		goto err_pd;
1008 
1009 	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
1010 				 PAGE_SIZE);
1011 	if (!ibdev->uar_map)
1012 		goto err_uar;
1013 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1014 
1015 	ibdev->dev = dev;
1016 
1017 	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1018 	ibdev->ib_dev.owner		= THIS_MODULE;
1019 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
1020 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
1021 	ibdev->num_ports		= num_ports;
1022 	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
1023 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
1024 	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
1025 
1026 	ibdev->ib_dev.uverbs_abi_ver	= MLX4_IB_UVERBS_ABI_VERSION;
1027 	ibdev->ib_dev.uverbs_cmd_mask	=
1028 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
1029 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
1030 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
1031 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
1032 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
1033 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
1034 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
1035 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
1036 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
1037 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
1038 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
1039 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
1040 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
1041 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
1042 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
1043 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
1044 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
1045 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
1046 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
1047 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
1048 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1049 
1050 	ibdev->ib_dev.query_device	= mlx4_ib_query_device;
1051 	ibdev->ib_dev.query_port	= mlx4_ib_query_port;
1052 	ibdev->ib_dev.get_link_layer	= mlx4_ib_port_link_layer;
1053 	ibdev->ib_dev.query_gid		= mlx4_ib_query_gid;
1054 	ibdev->ib_dev.query_pkey	= mlx4_ib_query_pkey;
1055 	ibdev->ib_dev.modify_device	= mlx4_ib_modify_device;
1056 	ibdev->ib_dev.modify_port	= mlx4_ib_modify_port;
1057 	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
1058 	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
1059 	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
1060 	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
1061 	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
1062 	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
1063 	ibdev->ib_dev.query_ah		= mlx4_ib_query_ah;
1064 	ibdev->ib_dev.destroy_ah	= mlx4_ib_destroy_ah;
1065 	ibdev->ib_dev.create_srq	= mlx4_ib_create_srq;
1066 	ibdev->ib_dev.modify_srq	= mlx4_ib_modify_srq;
1067 	ibdev->ib_dev.query_srq		= mlx4_ib_query_srq;
1068 	ibdev->ib_dev.destroy_srq	= mlx4_ib_destroy_srq;
1069 	ibdev->ib_dev.post_srq_recv	= mlx4_ib_post_srq_recv;
1070 	ibdev->ib_dev.create_qp		= mlx4_ib_create_qp;
1071 	ibdev->ib_dev.modify_qp		= mlx4_ib_modify_qp;
1072 	ibdev->ib_dev.query_qp		= mlx4_ib_query_qp;
1073 	ibdev->ib_dev.destroy_qp	= mlx4_ib_destroy_qp;
1074 	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
1075 	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
1076 	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
1077 	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
1078 	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
1079 	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
1080 	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
1081 	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
1082 	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
1083 	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
1084 	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
1085 	ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
1086 	ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
1087 	ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
1088 	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
1089 	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
1090 	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
1091 
1092 	ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
1093 	ibdev->ib_dev.map_phys_fmr	= mlx4_ib_map_phys_fmr;
1094 	ibdev->ib_dev.unmap_fmr		= mlx4_ib_unmap_fmr;
1095 	ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
1096 
1097 	spin_lock_init(&iboe->lock);
1098 
1099 	if (init_node_data(ibdev))
1100 		goto err_map;
1101 
1102 	spin_lock_init(&ibdev->sm_lock);
1103 	mutex_init(&ibdev->cap_mask_mutex);
1104 
1105 	if (ib_register_device(&ibdev->ib_dev, NULL))
1106 		goto err_map;
1107 
1108 	if (mlx4_ib_mad_init(ibdev))
1109 		goto err_reg;
1110 
1111 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
1112 		iboe->nb.notifier_call = mlx4_ib_netdev_event;
1113 		err = register_netdevice_notifier(&iboe->nb);
1114 		if (err)
1115 			goto err_reg;
1116 	}
1117 
1118 	for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
1119 		if (device_create_file(&ibdev->ib_dev.dev,
1120 				       mlx4_class_attributes[i]))
1121 			goto err_notif;
1122 	}
1123 
1124 	ibdev->ib_active = true;
1125 
1126 	return ibdev;
1127 
1128 err_notif:
1129 	if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1130 		printk(KERN_WARNING "failure unregistering notifier\n");
1131 	flush_workqueue(wq);
1132 
1133 err_reg:
1134 	ib_unregister_device(&ibdev->ib_dev);
1135 
1136 err_map:
1137 	iounmap(ibdev->uar_map);
1138 
1139 err_uar:
1140 	mlx4_uar_free(dev, &ibdev->priv_uar);
1141 
1142 err_pd:
1143 	mlx4_pd_free(dev, ibdev->priv_pdn);
1144 
1145 err_dealloc:
1146 	ib_dealloc_device(&ibdev->ib_dev);
1147 
1148 	return NULL;
1149 }
1150 
1151 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1152 {
1153 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
1154 	int p;
1155 
1156 	mlx4_ib_mad_cleanup(ibdev);
1157 	ib_unregister_device(&ibdev->ib_dev);
1158 	if (ibdev->iboe.nb.notifier_call) {
1159 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1160 			printk(KERN_WARNING "failure unregistering notifier\n");
1161 		ibdev->iboe.nb.notifier_call = NULL;
1162 	}
1163 	iounmap(ibdev->uar_map);
1164 
1165 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
1166 		mlx4_CLOSE_PORT(dev, p);
1167 
1168 	mlx4_uar_free(dev, &ibdev->priv_uar);
1169 	mlx4_pd_free(dev, ibdev->priv_pdn);
1170 	ib_dealloc_device(&ibdev->ib_dev);
1171 }
1172 
1173 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1174 			  enum mlx4_dev_event event, int port)
1175 {
1176 	struct ib_event ibev;
1177 	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1178 
1179 	if (port > ibdev->num_ports)
1180 		return;
1181 
1182 	switch (event) {
1183 	case MLX4_DEV_EVENT_PORT_UP:
1184 		ibev.event = IB_EVENT_PORT_ACTIVE;
1185 		break;
1186 
1187 	case MLX4_DEV_EVENT_PORT_DOWN:
1188 		ibev.event = IB_EVENT_PORT_ERR;
1189 		break;
1190 
1191 	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
1192 		ibdev->ib_active = false;
1193 		ibev.event = IB_EVENT_DEVICE_FATAL;
1194 		break;
1195 
1196 	default:
1197 		return;
1198 	}
1199 
1200 	ibev.device	      = ibdev_ptr;
1201 	ibev.element.port_num = port;
1202 
1203 	ib_dispatch_event(&ibev);
1204 }
1205 
1206 static struct mlx4_interface mlx4_ib_interface = {
1207 	.add		= mlx4_ib_add,
1208 	.remove		= mlx4_ib_remove,
1209 	.event		= mlx4_ib_event,
1210 	.protocol	= MLX4_PROTOCOL_IB
1211 };
1212 
1213 static int __init mlx4_ib_init(void)
1214 {
1215 	int err;
1216 
1217 	wq = create_singlethread_workqueue("mlx4_ib");
1218 	if (!wq)
1219 		return -ENOMEM;
1220 
1221 	err = mlx4_register_interface(&mlx4_ib_interface);
1222 	if (err) {
1223 		destroy_workqueue(wq);
1224 		return err;
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 static void __exit mlx4_ib_cleanup(void)
1231 {
1232 	mlx4_unregister_interface(&mlx4_ib_interface);
1233 	destroy_workqueue(wq);
1234 }
1235 
1236 module_init(mlx4_ib_init);
1237 module_exit(mlx4_ib_cleanup);
1238