xref: /openbmc/linux/drivers/infiniband/hw/mlx4/main.c (revision 15e3ae36)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
44 
45 #include <net/ipv6.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
48 
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
53 
54 #include <net/bonding.h>
55 
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
59 
60 #include "mlx4_ib.h"
61 #include <rdma/mlx4-abi.h>
62 
63 #define DRV_NAME	MLX4_IB_DRV_NAME
64 #define DRV_VERSION	"4.0-0"
65 
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0   0xA0
69 
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73 
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
77 
78 static const char mlx4_ib_version[] =
79 	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
80 	DRV_VERSION "\n";
81 
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 						    u8 port_num);
85 
86 static struct workqueue_struct *wq;
87 
88 static void init_query_mad(struct ib_smp *mad)
89 {
90 	mad->base_version  = 1;
91 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 	mad->class_version = 1;
93 	mad->method	   = IB_MGMT_METHOD_GET;
94 }
95 
96 static int check_flow_steering_support(struct mlx4_dev *dev)
97 {
98 	int eth_num_ports = 0;
99 	int ib_num_ports = 0;
100 
101 	int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
102 
103 	if (dmfs) {
104 		int i;
105 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 			eth_num_ports++;
107 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 			ib_num_ports++;
109 		dmfs &= (!ib_num_ports ||
110 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 			(!eth_num_ports ||
112 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 		if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 			pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 			dmfs = 0;
116 		}
117 	}
118 	return dmfs;
119 }
120 
121 static int num_ib_ports(struct mlx4_dev *dev)
122 {
123 	int ib_ports = 0;
124 	int i;
125 
126 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 		ib_ports++;
128 
129 	return ib_ports;
130 }
131 
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133 {
134 	struct mlx4_ib_dev *ibdev = to_mdev(device);
135 	struct net_device *dev;
136 
137 	rcu_read_lock();
138 	dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139 
140 	if (dev) {
141 		if (mlx4_is_bonded(ibdev->dev)) {
142 			struct net_device *upper = NULL;
143 
144 			upper = netdev_master_upper_dev_get_rcu(dev);
145 			if (upper) {
146 				struct net_device *active;
147 
148 				active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 				if (active)
150 					dev = active;
151 			}
152 		}
153 	}
154 	if (dev)
155 		dev_hold(dev);
156 
157 	rcu_read_unlock();
158 	return dev;
159 }
160 
161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 				  struct mlx4_ib_dev *ibdev,
163 				  u8 port_num)
164 {
165 	struct mlx4_cmd_mailbox *mailbox;
166 	int err;
167 	struct mlx4_dev *dev = ibdev->dev;
168 	int i;
169 	union ib_gid *gid_tbl;
170 
171 	mailbox = mlx4_alloc_cmd_mailbox(dev);
172 	if (IS_ERR(mailbox))
173 		return -ENOMEM;
174 
175 	gid_tbl = mailbox->buf;
176 
177 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 		memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179 
180 	err = mlx4_cmd(dev, mailbox->dma,
181 		       MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 		       MLX4_CMD_WRAPPED);
184 	if (mlx4_is_bonded(dev))
185 		err += mlx4_cmd(dev, mailbox->dma,
186 				MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 				MLX4_CMD_WRAPPED);
189 
190 	mlx4_free_cmd_mailbox(dev, mailbox);
191 	return err;
192 }
193 
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 				     struct mlx4_ib_dev *ibdev,
196 				     u8 port_num)
197 {
198 	struct mlx4_cmd_mailbox *mailbox;
199 	int err;
200 	struct mlx4_dev *dev = ibdev->dev;
201 	int i;
202 	struct {
203 		union ib_gid	gid;
204 		__be32		rsrvd1[2];
205 		__be16		rsrvd2;
206 		u8		type;
207 		u8		version;
208 		__be32		rsrvd3;
209 	} *gid_tbl;
210 
211 	mailbox = mlx4_alloc_cmd_mailbox(dev);
212 	if (IS_ERR(mailbox))
213 		return -ENOMEM;
214 
215 	gid_tbl = mailbox->buf;
216 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 		memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 		if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 			gid_tbl[i].version = 2;
220 			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 				gid_tbl[i].type = 1;
222 		}
223 	}
224 
225 	err = mlx4_cmd(dev, mailbox->dma,
226 		       MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
228 		       MLX4_CMD_WRAPPED);
229 	if (mlx4_is_bonded(dev))
230 		err += mlx4_cmd(dev, mailbox->dma,
231 				MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232 				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
233 				MLX4_CMD_WRAPPED);
234 
235 	mlx4_free_cmd_mailbox(dev, mailbox);
236 	return err;
237 }
238 
239 static int mlx4_ib_update_gids(struct gid_entry *gids,
240 			       struct mlx4_ib_dev *ibdev,
241 			       u8 port_num)
242 {
243 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244 		return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
245 
246 	return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
247 }
248 
249 static void free_gid_entry(struct gid_entry *entry)
250 {
251 	memset(&entry->gid, 0, sizeof(entry->gid));
252 	kfree(entry->ctx);
253 	entry->ctx = NULL;
254 }
255 
256 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
257 {
258 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
259 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 	struct mlx4_port_gid_table   *port_gid_table;
261 	int free = -1, found = -1;
262 	int ret = 0;
263 	int hw_update = 0;
264 	int i;
265 	struct gid_entry *gids = NULL;
266 	u16 vlan_id = 0xffff;
267 	u8 mac[ETH_ALEN];
268 
269 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
270 		return -EINVAL;
271 
272 	if (attr->port_num > MLX4_MAX_PORTS)
273 		return -EINVAL;
274 
275 	if (!context)
276 		return -EINVAL;
277 
278 	ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
279 	if (ret)
280 		return ret;
281 	port_gid_table = &iboe->gids[attr->port_num - 1];
282 	spin_lock_bh(&iboe->lock);
283 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
284 		if (!memcmp(&port_gid_table->gids[i].gid,
285 			    &attr->gid, sizeof(attr->gid)) &&
286 		    port_gid_table->gids[i].gid_type == attr->gid_type &&
287 		    port_gid_table->gids[i].vlan_id == vlan_id)  {
288 			found = i;
289 			break;
290 		}
291 		if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
292 			free = i; /* HW has space */
293 	}
294 
295 	if (found < 0) {
296 		if (free < 0) {
297 			ret = -ENOSPC;
298 		} else {
299 			port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
300 			if (!port_gid_table->gids[free].ctx) {
301 				ret = -ENOMEM;
302 			} else {
303 				*context = port_gid_table->gids[free].ctx;
304 				memcpy(&port_gid_table->gids[free].gid,
305 				       &attr->gid, sizeof(attr->gid));
306 				port_gid_table->gids[free].gid_type = attr->gid_type;
307 				port_gid_table->gids[free].vlan_id = vlan_id;
308 				port_gid_table->gids[free].ctx->real_index = free;
309 				port_gid_table->gids[free].ctx->refcount = 1;
310 				hw_update = 1;
311 			}
312 		}
313 	} else {
314 		struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
315 		*context = ctx;
316 		ctx->refcount++;
317 	}
318 	if (!ret && hw_update) {
319 		gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
320 				     GFP_ATOMIC);
321 		if (!gids) {
322 			ret = -ENOMEM;
323 			*context = NULL;
324 			free_gid_entry(&port_gid_table->gids[free]);
325 		} else {
326 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
327 				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
328 				gids[i].gid_type = port_gid_table->gids[i].gid_type;
329 			}
330 		}
331 	}
332 	spin_unlock_bh(&iboe->lock);
333 
334 	if (!ret && hw_update) {
335 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
336 		if (ret) {
337 			spin_lock_bh(&iboe->lock);
338 			*context = NULL;
339 			free_gid_entry(&port_gid_table->gids[free]);
340 			spin_unlock_bh(&iboe->lock);
341 		}
342 		kfree(gids);
343 	}
344 
345 	return ret;
346 }
347 
348 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
349 {
350 	struct gid_cache_context *ctx = *context;
351 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
352 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
353 	struct mlx4_port_gid_table   *port_gid_table;
354 	int ret = 0;
355 	int hw_update = 0;
356 	struct gid_entry *gids = NULL;
357 
358 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
359 		return -EINVAL;
360 
361 	if (attr->port_num > MLX4_MAX_PORTS)
362 		return -EINVAL;
363 
364 	port_gid_table = &iboe->gids[attr->port_num - 1];
365 	spin_lock_bh(&iboe->lock);
366 	if (ctx) {
367 		ctx->refcount--;
368 		if (!ctx->refcount) {
369 			unsigned int real_index = ctx->real_index;
370 
371 			free_gid_entry(&port_gid_table->gids[real_index]);
372 			hw_update = 1;
373 		}
374 	}
375 	if (!ret && hw_update) {
376 		int i;
377 
378 		gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
379 				     GFP_ATOMIC);
380 		if (!gids) {
381 			ret = -ENOMEM;
382 		} else {
383 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
384 				memcpy(&gids[i].gid,
385 				       &port_gid_table->gids[i].gid,
386 				       sizeof(union ib_gid));
387 				gids[i].gid_type =
388 				    port_gid_table->gids[i].gid_type;
389 			}
390 		}
391 	}
392 	spin_unlock_bh(&iboe->lock);
393 
394 	if (!ret && hw_update) {
395 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
396 		kfree(gids);
397 	}
398 	return ret;
399 }
400 
401 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
402 				    const struct ib_gid_attr *attr)
403 {
404 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
405 	struct gid_cache_context *ctx = NULL;
406 	struct mlx4_port_gid_table   *port_gid_table;
407 	int real_index = -EINVAL;
408 	int i;
409 	unsigned long flags;
410 	u8 port_num = attr->port_num;
411 
412 	if (port_num > MLX4_MAX_PORTS)
413 		return -EINVAL;
414 
415 	if (mlx4_is_bonded(ibdev->dev))
416 		port_num = 1;
417 
418 	if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
419 		return attr->index;
420 
421 	spin_lock_irqsave(&iboe->lock, flags);
422 	port_gid_table = &iboe->gids[port_num - 1];
423 
424 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
425 		if (!memcmp(&port_gid_table->gids[i].gid,
426 			    &attr->gid, sizeof(attr->gid)) &&
427 		    attr->gid_type == port_gid_table->gids[i].gid_type) {
428 			ctx = port_gid_table->gids[i].ctx;
429 			break;
430 		}
431 	if (ctx)
432 		real_index = ctx->real_index;
433 	spin_unlock_irqrestore(&iboe->lock, flags);
434 	return real_index;
435 }
436 
437 static int mlx4_ib_query_device(struct ib_device *ibdev,
438 				struct ib_device_attr *props,
439 				struct ib_udata *uhw)
440 {
441 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
442 	struct ib_smp *in_mad  = NULL;
443 	struct ib_smp *out_mad = NULL;
444 	int err;
445 	int have_ib_ports;
446 	struct mlx4_uverbs_ex_query_device cmd;
447 	struct mlx4_uverbs_ex_query_device_resp resp = {};
448 	struct mlx4_clock_params clock_params;
449 
450 	if (uhw->inlen) {
451 		if (uhw->inlen < sizeof(cmd))
452 			return -EINVAL;
453 
454 		err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
455 		if (err)
456 			return err;
457 
458 		if (cmd.comp_mask)
459 			return -EINVAL;
460 
461 		if (cmd.reserved)
462 			return -EINVAL;
463 	}
464 
465 	resp.response_length = offsetof(typeof(resp), response_length) +
466 		sizeof(resp.response_length);
467 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
468 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
469 	err = -ENOMEM;
470 	if (!in_mad || !out_mad)
471 		goto out;
472 
473 	init_query_mad(in_mad);
474 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
475 
476 	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
477 			   1, NULL, NULL, in_mad, out_mad);
478 	if (err)
479 		goto out;
480 
481 	memset(props, 0, sizeof *props);
482 
483 	have_ib_ports = num_ib_ports(dev->dev);
484 
485 	props->fw_ver = dev->dev->caps.fw_ver;
486 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
487 		IB_DEVICE_PORT_ACTIVE_EVENT		|
488 		IB_DEVICE_SYS_IMAGE_GUID		|
489 		IB_DEVICE_RC_RNR_NAK_GEN		|
490 		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
491 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
492 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
493 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
494 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
495 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
496 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
497 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
498 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
499 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
500 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
501 	if (dev->dev->caps.max_gso_sz &&
502 	    (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
503 	    (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
504 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
505 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
506 		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
507 	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
508 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
509 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
510 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
511 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
512 		props->device_cap_flags |= IB_DEVICE_XRC;
513 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
514 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
515 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
516 		if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
517 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
518 		else
519 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
520 	}
521 	if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
522 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
523 
524 	props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
525 
526 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
527 		0xffffff;
528 	props->vendor_part_id	   = dev->dev->persist->pdev->device;
529 	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
530 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
531 
532 	props->max_mr_size	   = ~0ull;
533 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
534 	props->max_qp		   = dev->dev->quotas.qp;
535 	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
536 	props->max_send_sge =
537 		min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
538 	props->max_recv_sge =
539 		min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
540 	props->max_sge_rd = MLX4_MAX_SGE_RD;
541 	props->max_cq		   = dev->dev->quotas.cq;
542 	props->max_cqe		   = dev->dev->caps.max_cqes;
543 	props->max_mr		   = dev->dev->quotas.mpt;
544 	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
545 	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
546 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
547 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
548 	props->max_srq		   = dev->dev->quotas.srq;
549 	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
550 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
551 	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
552 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
553 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
554 		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
555 	props->masked_atomic_cap   = props->atomic_cap;
556 	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
557 	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
558 	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
559 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
560 					   props->max_mcast_grp;
561 	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
562 	props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
563 	props->timestamp_mask = 0xFFFFFFFFFFFFULL;
564 	props->max_ah = INT_MAX;
565 
566 	if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
567 	    mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
568 		if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
569 			props->rss_caps.max_rwq_indirection_tables =
570 				props->max_qp;
571 			props->rss_caps.max_rwq_indirection_table_size =
572 				dev->dev->caps.max_rss_tbl_sz;
573 			props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
574 			props->max_wq_type_rq = props->max_qp;
575 		}
576 
577 		if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
578 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
579 	}
580 
581 	props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
582 	props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
583 
584 	if (!mlx4_is_slave(dev->dev))
585 		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
586 
587 	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
588 		resp.response_length += sizeof(resp.hca_core_clock_offset);
589 		if (!err && !mlx4_is_slave(dev->dev)) {
590 			resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
591 			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
592 		}
593 	}
594 
595 	if (uhw->outlen >= resp.response_length +
596 	    sizeof(resp.max_inl_recv_sz)) {
597 		resp.response_length += sizeof(resp.max_inl_recv_sz);
598 		resp.max_inl_recv_sz  = dev->dev->caps.max_rq_sg *
599 			sizeof(struct mlx4_wqe_data_seg);
600 	}
601 
602 	if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
603 		if (props->rss_caps.supported_qpts) {
604 			resp.rss_caps.rx_hash_function =
605 				MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
606 
607 			resp.rss_caps.rx_hash_fields_mask =
608 				MLX4_IB_RX_HASH_SRC_IPV4 |
609 				MLX4_IB_RX_HASH_DST_IPV4 |
610 				MLX4_IB_RX_HASH_SRC_IPV6 |
611 				MLX4_IB_RX_HASH_DST_IPV6 |
612 				MLX4_IB_RX_HASH_SRC_PORT_TCP |
613 				MLX4_IB_RX_HASH_DST_PORT_TCP |
614 				MLX4_IB_RX_HASH_SRC_PORT_UDP |
615 				MLX4_IB_RX_HASH_DST_PORT_UDP;
616 
617 			if (dev->dev->caps.tunnel_offload_mode ==
618 			    MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
619 				resp.rss_caps.rx_hash_fields_mask |=
620 					MLX4_IB_RX_HASH_INNER;
621 		}
622 		resp.response_length = offsetof(typeof(resp), rss_caps) +
623 				       sizeof(resp.rss_caps);
624 	}
625 
626 	if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
627 		if (dev->dev->caps.max_gso_sz &&
628 		    ((mlx4_ib_port_link_layer(ibdev, 1) ==
629 		    IB_LINK_LAYER_ETHERNET) ||
630 		    (mlx4_ib_port_link_layer(ibdev, 2) ==
631 		    IB_LINK_LAYER_ETHERNET))) {
632 			resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
633 			resp.tso_caps.supported_qpts |=
634 				1 << IB_QPT_RAW_PACKET;
635 		}
636 		resp.response_length = offsetof(typeof(resp), tso_caps) +
637 				       sizeof(resp.tso_caps);
638 	}
639 
640 	if (uhw->outlen) {
641 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
642 		if (err)
643 			goto out;
644 	}
645 out:
646 	kfree(in_mad);
647 	kfree(out_mad);
648 
649 	return err;
650 }
651 
652 static enum rdma_link_layer
653 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
654 {
655 	struct mlx4_dev *dev = to_mdev(device)->dev;
656 
657 	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
658 		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
659 }
660 
661 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
662 			      struct ib_port_attr *props, int netw_view)
663 {
664 	struct ib_smp *in_mad  = NULL;
665 	struct ib_smp *out_mad = NULL;
666 	int ext_active_speed;
667 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
668 	int err = -ENOMEM;
669 
670 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
671 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
672 	if (!in_mad || !out_mad)
673 		goto out;
674 
675 	init_query_mad(in_mad);
676 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
677 	in_mad->attr_mod = cpu_to_be32(port);
678 
679 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
680 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
681 
682 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
683 				in_mad, out_mad);
684 	if (err)
685 		goto out;
686 
687 
688 	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
689 	props->lmc		= out_mad->data[34] & 0x7;
690 	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
691 	props->sm_sl		= out_mad->data[36] & 0xf;
692 	props->state		= out_mad->data[32] & 0xf;
693 	props->phys_state	= out_mad->data[33] >> 4;
694 	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
695 	if (netw_view)
696 		props->gid_tbl_len = out_mad->data[50];
697 	else
698 		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
699 	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
700 	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
701 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
702 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
703 	props->active_width	= out_mad->data[31] & 0xf;
704 	props->active_speed	= out_mad->data[35] >> 4;
705 	props->max_mtu		= out_mad->data[41] & 0xf;
706 	props->active_mtu	= out_mad->data[36] >> 4;
707 	props->subnet_timeout	= out_mad->data[51] & 0x1f;
708 	props->max_vl_num	= out_mad->data[37] >> 4;
709 	props->init_type_reply	= out_mad->data[41] >> 4;
710 
711 	/* Check if extended speeds (EDR/FDR/...) are supported */
712 	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
713 		ext_active_speed = out_mad->data[62] >> 4;
714 
715 		switch (ext_active_speed) {
716 		case 1:
717 			props->active_speed = IB_SPEED_FDR;
718 			break;
719 		case 2:
720 			props->active_speed = IB_SPEED_EDR;
721 			break;
722 		}
723 	}
724 
725 	/* If reported active speed is QDR, check if is FDR-10 */
726 	if (props->active_speed == IB_SPEED_QDR) {
727 		init_query_mad(in_mad);
728 		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
729 		in_mad->attr_mod = cpu_to_be32(port);
730 
731 		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
732 				   NULL, NULL, in_mad, out_mad);
733 		if (err)
734 			goto out;
735 
736 		/* Checking LinkSpeedActive for FDR-10 */
737 		if (out_mad->data[15] & 0x1)
738 			props->active_speed = IB_SPEED_FDR10;
739 	}
740 
741 	/* Avoid wrong speed value returned by FW if the IB link is down. */
742 	if (props->state == IB_PORT_DOWN)
743 		 props->active_speed = IB_SPEED_SDR;
744 
745 out:
746 	kfree(in_mad);
747 	kfree(out_mad);
748 	return err;
749 }
750 
751 static u8 state_to_phys_state(enum ib_port_state state)
752 {
753 	return state == IB_PORT_ACTIVE ?
754 		IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
755 }
756 
757 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
758 			       struct ib_port_attr *props)
759 {
760 
761 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
762 	struct mlx4_ib_iboe *iboe = &mdev->iboe;
763 	struct net_device *ndev;
764 	enum ib_mtu tmp;
765 	struct mlx4_cmd_mailbox *mailbox;
766 	int err = 0;
767 	int is_bonded = mlx4_is_bonded(mdev->dev);
768 
769 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
770 	if (IS_ERR(mailbox))
771 		return PTR_ERR(mailbox);
772 
773 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
774 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
775 			   MLX4_CMD_WRAPPED);
776 	if (err)
777 		goto out;
778 
779 	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
780 				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
781 					   IB_WIDTH_4X : IB_WIDTH_1X;
782 	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
783 					   IB_SPEED_FDR : IB_SPEED_QDR;
784 	props->port_cap_flags	= IB_PORT_CM_SUP;
785 	props->ip_gids = true;
786 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
787 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
788 	props->pkey_tbl_len	= 1;
789 	props->max_mtu		= IB_MTU_4096;
790 	props->max_vl_num	= 2;
791 	props->state		= IB_PORT_DOWN;
792 	props->phys_state	= state_to_phys_state(props->state);
793 	props->active_mtu	= IB_MTU_256;
794 	spin_lock_bh(&iboe->lock);
795 	ndev = iboe->netdevs[port - 1];
796 	if (ndev && is_bonded) {
797 		rcu_read_lock(); /* required to get upper dev */
798 		ndev = netdev_master_upper_dev_get_rcu(ndev);
799 		rcu_read_unlock();
800 	}
801 	if (!ndev)
802 		goto out_unlock;
803 
804 	tmp = iboe_get_mtu(ndev->mtu);
805 	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
806 
807 	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
808 					IB_PORT_ACTIVE : IB_PORT_DOWN;
809 	props->phys_state	= state_to_phys_state(props->state);
810 out_unlock:
811 	spin_unlock_bh(&iboe->lock);
812 out:
813 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
814 	return err;
815 }
816 
817 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
818 			 struct ib_port_attr *props, int netw_view)
819 {
820 	int err;
821 
822 	/* props being zeroed by the caller, avoid zeroing it here */
823 
824 	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
825 		ib_link_query_port(ibdev, port, props, netw_view) :
826 				eth_link_query_port(ibdev, port, props);
827 
828 	return err;
829 }
830 
831 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
832 			      struct ib_port_attr *props)
833 {
834 	/* returns host view */
835 	return __mlx4_ib_query_port(ibdev, port, props, 0);
836 }
837 
838 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
839 			union ib_gid *gid, int netw_view)
840 {
841 	struct ib_smp *in_mad  = NULL;
842 	struct ib_smp *out_mad = NULL;
843 	int err = -ENOMEM;
844 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
845 	int clear = 0;
846 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
847 
848 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
849 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
850 	if (!in_mad || !out_mad)
851 		goto out;
852 
853 	init_query_mad(in_mad);
854 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
855 	in_mad->attr_mod = cpu_to_be32(port);
856 
857 	if (mlx4_is_mfunc(dev->dev) && netw_view)
858 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
859 
860 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
861 	if (err)
862 		goto out;
863 
864 	memcpy(gid->raw, out_mad->data + 8, 8);
865 
866 	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
867 		if (index) {
868 			/* For any index > 0, return the null guid */
869 			err = 0;
870 			clear = 1;
871 			goto out;
872 		}
873 	}
874 
875 	init_query_mad(in_mad);
876 	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
877 	in_mad->attr_mod = cpu_to_be32(index / 8);
878 
879 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
880 			   NULL, NULL, in_mad, out_mad);
881 	if (err)
882 		goto out;
883 
884 	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
885 
886 out:
887 	if (clear)
888 		memset(gid->raw + 8, 0, 8);
889 	kfree(in_mad);
890 	kfree(out_mad);
891 	return err;
892 }
893 
894 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
895 			     union ib_gid *gid)
896 {
897 	if (rdma_protocol_ib(ibdev, port))
898 		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
899 	return 0;
900 }
901 
902 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
903 {
904 	union sl2vl_tbl_to_u64 sl2vl64;
905 	struct ib_smp *in_mad  = NULL;
906 	struct ib_smp *out_mad = NULL;
907 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
908 	int err = -ENOMEM;
909 	int jj;
910 
911 	if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
912 		*sl2vl_tbl = 0;
913 		return 0;
914 	}
915 
916 	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
917 	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
918 	if (!in_mad || !out_mad)
919 		goto out;
920 
921 	init_query_mad(in_mad);
922 	in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
923 	in_mad->attr_mod = 0;
924 
925 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
926 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
927 
928 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
929 			   in_mad, out_mad);
930 	if (err)
931 		goto out;
932 
933 	for (jj = 0; jj < 8; jj++)
934 		sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
935 	*sl2vl_tbl = sl2vl64.sl64;
936 
937 out:
938 	kfree(in_mad);
939 	kfree(out_mad);
940 	return err;
941 }
942 
943 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
944 {
945 	u64 sl2vl;
946 	int i;
947 	int err;
948 
949 	for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
950 		if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
951 			continue;
952 		err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
953 		if (err) {
954 			pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
955 			       i, err);
956 			sl2vl = 0;
957 		}
958 		atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
959 	}
960 }
961 
962 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
963 			 u16 *pkey, int netw_view)
964 {
965 	struct ib_smp *in_mad  = NULL;
966 	struct ib_smp *out_mad = NULL;
967 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
968 	int err = -ENOMEM;
969 
970 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
971 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
972 	if (!in_mad || !out_mad)
973 		goto out;
974 
975 	init_query_mad(in_mad);
976 	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
977 	in_mad->attr_mod = cpu_to_be32(index / 32);
978 
979 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
980 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
981 
982 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
983 			   in_mad, out_mad);
984 	if (err)
985 		goto out;
986 
987 	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
988 
989 out:
990 	kfree(in_mad);
991 	kfree(out_mad);
992 	return err;
993 }
994 
995 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
996 {
997 	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
998 }
999 
1000 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001 				 struct ib_device_modify *props)
1002 {
1003 	struct mlx4_cmd_mailbox *mailbox;
1004 	unsigned long flags;
1005 
1006 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1007 		return -EOPNOTSUPP;
1008 
1009 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1010 		return 0;
1011 
1012 	if (mlx4_is_slave(to_mdev(ibdev)->dev))
1013 		return -EOPNOTSUPP;
1014 
1015 	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017 	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1018 
1019 	/*
1020 	 * If possible, pass node desc to FW, so it can generate
1021 	 * a 144 trap.  If cmd fails, just ignore.
1022 	 */
1023 	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024 	if (IS_ERR(mailbox))
1025 		return 0;
1026 
1027 	memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1030 
1031 	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1032 
1033 	return 0;
1034 }
1035 
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1037 			    u32 cap_mask)
1038 {
1039 	struct mlx4_cmd_mailbox *mailbox;
1040 	int err;
1041 
1042 	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043 	if (IS_ERR(mailbox))
1044 		return PTR_ERR(mailbox);
1045 
1046 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
1048 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1049 	} else {
1050 		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
1051 		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1052 	}
1053 
1054 	err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1056 		       MLX4_CMD_WRAPPED);
1057 
1058 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
1059 	return err;
1060 }
1061 
1062 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1063 			       struct ib_port_modify *props)
1064 {
1065 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066 	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067 	struct ib_port_attr attr;
1068 	u32 cap_mask;
1069 	int err;
1070 
1071 	/* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072 	 * of whether port link layer is ETH or IB. For ETH ports, qkey
1073 	 * violations and port capabilities are not meaningful.
1074 	 */
1075 	if (is_eth)
1076 		return 0;
1077 
1078 	mutex_lock(&mdev->cap_mask_mutex);
1079 
1080 	err = ib_query_port(ibdev, port, &attr);
1081 	if (err)
1082 		goto out;
1083 
1084 	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085 		~props->clr_port_cap_mask;
1086 
1087 	err = mlx4_ib_SET_PORT(mdev, port,
1088 			       !!(mask & IB_PORT_RESET_QKEY_CNTR),
1089 			       cap_mask);
1090 
1091 out:
1092 	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1093 	return err;
1094 }
1095 
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097 				  struct ib_udata *udata)
1098 {
1099 	struct ib_device *ibdev = uctx->device;
1100 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101 	struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102 	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103 	struct mlx4_ib_alloc_ucontext_resp resp;
1104 	int err;
1105 
1106 	if (!dev->ib_active)
1107 		return -EAGAIN;
1108 
1109 	if (ibdev->ops.uverbs_abi_ver ==
1110 	    MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111 		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
1112 		resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
1113 		resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1114 	} else {
1115 		resp.dev_caps	      = dev->dev->caps.userspace_caps;
1116 		resp.qp_tab_size      = dev->dev->caps.num_qps;
1117 		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
1118 		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119 		resp.cqe_size	      = dev->dev->caps.cqe_size;
1120 	}
1121 
1122 	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1123 	if (err)
1124 		return err;
1125 
1126 	INIT_LIST_HEAD(&context->db_page_list);
1127 	mutex_init(&context->db_page_mutex);
1128 
1129 	INIT_LIST_HEAD(&context->wqn_ranges_list);
1130 	mutex_init(&context->wqn_ranges_mutex);
1131 
1132 	if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133 		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1134 	else
1135 		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1136 
1137 	if (err) {
1138 		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1139 		return -EFAULT;
1140 	}
1141 
1142 	return err;
1143 }
1144 
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1146 {
1147 	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1148 
1149 	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1150 }
1151 
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1153 {
1154 }
1155 
1156 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1157 {
1158 	struct mlx4_ib_dev *dev = to_mdev(context->device);
1159 
1160 	switch (vma->vm_pgoff) {
1161 	case 0:
1162 		return rdma_user_mmap_io(context, vma,
1163 					 to_mucontext(context)->uar.pfn,
1164 					 PAGE_SIZE,
1165 					 pgprot_noncached(vma->vm_page_prot),
1166 					 NULL);
1167 
1168 	case 1:
1169 		if (dev->dev->caps.bf_reg_size == 0)
1170 			return -EINVAL;
1171 		return rdma_user_mmap_io(
1172 			context, vma,
1173 			to_mucontext(context)->uar.pfn +
1174 				dev->dev->caps.num_uars,
1175 			PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1176 			NULL);
1177 
1178 	case 3: {
1179 		struct mlx4_clock_params params;
1180 		int ret;
1181 
1182 		ret = mlx4_get_internal_clock_params(dev->dev, &params);
1183 		if (ret)
1184 			return ret;
1185 
1186 		return rdma_user_mmap_io(
1187 			context, vma,
1188 			(pci_resource_start(dev->dev->persist->pdev,
1189 					    params.bar) +
1190 			 params.offset) >>
1191 				PAGE_SHIFT,
1192 			PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1193 			NULL);
1194 	}
1195 
1196 	default:
1197 		return -EINVAL;
1198 	}
1199 }
1200 
1201 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1202 {
1203 	struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204 	struct ib_device *ibdev = ibpd->device;
1205 	int err;
1206 
1207 	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1208 	if (err)
1209 		return err;
1210 
1211 	if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212 		mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1213 		return -EFAULT;
1214 	}
1215 	return 0;
1216 }
1217 
1218 static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1219 {
1220 	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1221 }
1222 
1223 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1224 					  struct ib_udata *udata)
1225 {
1226 	struct mlx4_ib_xrcd *xrcd;
1227 	struct ib_cq_init_attr cq_attr = {};
1228 	int err;
1229 
1230 	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1231 		return ERR_PTR(-ENOSYS);
1232 
1233 	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1234 	if (!xrcd)
1235 		return ERR_PTR(-ENOMEM);
1236 
1237 	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1238 	if (err)
1239 		goto err1;
1240 
1241 	xrcd->pd = ib_alloc_pd(ibdev, 0);
1242 	if (IS_ERR(xrcd->pd)) {
1243 		err = PTR_ERR(xrcd->pd);
1244 		goto err2;
1245 	}
1246 
1247 	cq_attr.cqe = 1;
1248 	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1249 	if (IS_ERR(xrcd->cq)) {
1250 		err = PTR_ERR(xrcd->cq);
1251 		goto err3;
1252 	}
1253 
1254 	return &xrcd->ibxrcd;
1255 
1256 err3:
1257 	ib_dealloc_pd(xrcd->pd);
1258 err2:
1259 	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1260 err1:
1261 	kfree(xrcd);
1262 	return ERR_PTR(err);
1263 }
1264 
1265 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1266 {
1267 	ib_destroy_cq(to_mxrcd(xrcd)->cq);
1268 	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1269 	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1270 	kfree(xrcd);
1271 
1272 	return 0;
1273 }
1274 
1275 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1276 {
1277 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1278 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1279 	struct mlx4_ib_gid_entry *ge;
1280 
1281 	ge = kzalloc(sizeof *ge, GFP_KERNEL);
1282 	if (!ge)
1283 		return -ENOMEM;
1284 
1285 	ge->gid = *gid;
1286 	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1287 		ge->port = mqp->port;
1288 		ge->added = 1;
1289 	}
1290 
1291 	mutex_lock(&mqp->mutex);
1292 	list_add_tail(&ge->list, &mqp->gid_list);
1293 	mutex_unlock(&mqp->mutex);
1294 
1295 	return 0;
1296 }
1297 
1298 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1299 					  struct mlx4_ib_counters *ctr_table)
1300 {
1301 	struct counter_index *counter, *tmp_count;
1302 
1303 	mutex_lock(&ctr_table->mutex);
1304 	list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1305 				 list) {
1306 		if (counter->allocated)
1307 			mlx4_counter_free(ibdev->dev, counter->index);
1308 		list_del(&counter->list);
1309 		kfree(counter);
1310 	}
1311 	mutex_unlock(&ctr_table->mutex);
1312 }
1313 
1314 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1315 		   union ib_gid *gid)
1316 {
1317 	struct net_device *ndev;
1318 	int ret = 0;
1319 
1320 	if (!mqp->port)
1321 		return 0;
1322 
1323 	spin_lock_bh(&mdev->iboe.lock);
1324 	ndev = mdev->iboe.netdevs[mqp->port - 1];
1325 	if (ndev)
1326 		dev_hold(ndev);
1327 	spin_unlock_bh(&mdev->iboe.lock);
1328 
1329 	if (ndev) {
1330 		ret = 1;
1331 		dev_put(ndev);
1332 	}
1333 
1334 	return ret;
1335 }
1336 
1337 struct mlx4_ib_steering {
1338 	struct list_head list;
1339 	struct mlx4_flow_reg_id reg_id;
1340 	union ib_gid gid;
1341 };
1342 
1343 #define LAST_ETH_FIELD vlan_tag
1344 #define LAST_IB_FIELD sl
1345 #define LAST_IPV4_FIELD dst_ip
1346 #define LAST_TCP_UDP_FIELD src_port
1347 
1348 /* Field is the last supported field */
1349 #define FIELDS_NOT_SUPPORTED(filter, field)\
1350 	memchr_inv((void *)&filter.field  +\
1351 		   sizeof(filter.field), 0,\
1352 		   sizeof(filter) -\
1353 		   offsetof(typeof(filter), field) -\
1354 		   sizeof(filter.field))
1355 
1356 static int parse_flow_attr(struct mlx4_dev *dev,
1357 			   u32 qp_num,
1358 			   union ib_flow_spec *ib_spec,
1359 			   struct _rule_hw *mlx4_spec)
1360 {
1361 	enum mlx4_net_trans_rule_id type;
1362 
1363 	switch (ib_spec->type) {
1364 	case IB_FLOW_SPEC_ETH:
1365 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1366 			return -ENOTSUPP;
1367 
1368 		type = MLX4_NET_TRANS_RULE_ID_ETH;
1369 		memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1370 		       ETH_ALEN);
1371 		memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1372 		       ETH_ALEN);
1373 		mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1374 		mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1375 		break;
1376 	case IB_FLOW_SPEC_IB:
1377 		if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1378 			return -ENOTSUPP;
1379 
1380 		type = MLX4_NET_TRANS_RULE_ID_IB;
1381 		mlx4_spec->ib.l3_qpn =
1382 			cpu_to_be32(qp_num);
1383 		mlx4_spec->ib.qpn_mask =
1384 			cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1385 		break;
1386 
1387 
1388 	case IB_FLOW_SPEC_IPV4:
1389 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1390 			return -ENOTSUPP;
1391 
1392 		type = MLX4_NET_TRANS_RULE_ID_IPV4;
1393 		mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1394 		mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1395 		mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1396 		mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1397 		break;
1398 
1399 	case IB_FLOW_SPEC_TCP:
1400 	case IB_FLOW_SPEC_UDP:
1401 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1402 			return -ENOTSUPP;
1403 
1404 		type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1405 					MLX4_NET_TRANS_RULE_ID_TCP :
1406 					MLX4_NET_TRANS_RULE_ID_UDP;
1407 		mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1408 		mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1409 		mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1410 		mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1411 		break;
1412 
1413 	default:
1414 		return -EINVAL;
1415 	}
1416 	if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1417 	    mlx4_hw_rule_sz(dev, type) < 0)
1418 		return -EINVAL;
1419 	mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1420 	mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1421 	return mlx4_hw_rule_sz(dev, type);
1422 }
1423 
1424 struct default_rules {
1425 	__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1426 	__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1427 	__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1428 	__u8  link_layer;
1429 };
1430 static const struct default_rules default_table[] = {
1431 	{
1432 		.mandatory_fields = {IB_FLOW_SPEC_IPV4},
1433 		.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1434 		.rules_create_list = {IB_FLOW_SPEC_IB},
1435 		.link_layer = IB_LINK_LAYER_INFINIBAND
1436 	}
1437 };
1438 
1439 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1440 					 struct ib_flow_attr *flow_attr)
1441 {
1442 	int i, j, k;
1443 	void *ib_flow;
1444 	const struct default_rules *pdefault_rules = default_table;
1445 	u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1446 
1447 	for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1448 		__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1449 		memset(&field_types, 0, sizeof(field_types));
1450 
1451 		if (link_layer != pdefault_rules->link_layer)
1452 			continue;
1453 
1454 		ib_flow = flow_attr + 1;
1455 		/* we assume the specs are sorted */
1456 		for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1457 		     j < flow_attr->num_of_specs; k++) {
1458 			union ib_flow_spec *current_flow =
1459 				(union ib_flow_spec *)ib_flow;
1460 
1461 			/* same layer but different type */
1462 			if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1463 			     (pdefault_rules->mandatory_fields[k] &
1464 			      IB_FLOW_SPEC_LAYER_MASK)) &&
1465 			    (current_flow->type !=
1466 			     pdefault_rules->mandatory_fields[k]))
1467 				goto out;
1468 
1469 			/* same layer, try match next one */
1470 			if (current_flow->type ==
1471 			    pdefault_rules->mandatory_fields[k]) {
1472 				j++;
1473 				ib_flow +=
1474 					((union ib_flow_spec *)ib_flow)->size;
1475 			}
1476 		}
1477 
1478 		ib_flow = flow_attr + 1;
1479 		for (j = 0; j < flow_attr->num_of_specs;
1480 		     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1481 			for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1482 				/* same layer and same type */
1483 				if (((union ib_flow_spec *)ib_flow)->type ==
1484 				    pdefault_rules->mandatory_not_fields[k])
1485 					goto out;
1486 
1487 		return i;
1488 	}
1489 out:
1490 	return -1;
1491 }
1492 
1493 static int __mlx4_ib_create_default_rules(
1494 		struct mlx4_ib_dev *mdev,
1495 		struct ib_qp *qp,
1496 		const struct default_rules *pdefault_rules,
1497 		struct _rule_hw *mlx4_spec) {
1498 	int size = 0;
1499 	int i;
1500 
1501 	for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1502 		int ret;
1503 		union ib_flow_spec ib_spec;
1504 		switch (pdefault_rules->rules_create_list[i]) {
1505 		case 0:
1506 			/* no rule */
1507 			continue;
1508 		case IB_FLOW_SPEC_IB:
1509 			ib_spec.type = IB_FLOW_SPEC_IB;
1510 			ib_spec.size = sizeof(struct ib_flow_spec_ib);
1511 
1512 			break;
1513 		default:
1514 			/* invalid rule */
1515 			return -EINVAL;
1516 		}
1517 		/* We must put empty rule, qpn is being ignored */
1518 		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1519 				      mlx4_spec);
1520 		if (ret < 0) {
1521 			pr_info("invalid parsing\n");
1522 			return -EINVAL;
1523 		}
1524 
1525 		mlx4_spec = (void *)mlx4_spec + ret;
1526 		size += ret;
1527 	}
1528 	return size;
1529 }
1530 
1531 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1532 			  int domain,
1533 			  enum mlx4_net_trans_promisc_mode flow_type,
1534 			  u64 *reg_id)
1535 {
1536 	int ret, i;
1537 	int size = 0;
1538 	void *ib_flow;
1539 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1540 	struct mlx4_cmd_mailbox *mailbox;
1541 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1542 	int default_flow;
1543 
1544 	static const u16 __mlx4_domain[] = {
1545 		[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1546 		[IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1547 		[IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1548 		[IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1549 	};
1550 
1551 	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1552 		pr_err("Invalid priority value %d\n", flow_attr->priority);
1553 		return -EINVAL;
1554 	}
1555 
1556 	if (domain >= IB_FLOW_DOMAIN_NUM) {
1557 		pr_err("Invalid domain value %d\n", domain);
1558 		return -EINVAL;
1559 	}
1560 
1561 	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1562 		return -EINVAL;
1563 
1564 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1565 	if (IS_ERR(mailbox))
1566 		return PTR_ERR(mailbox);
1567 	ctrl = mailbox->buf;
1568 
1569 	ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1570 				 flow_attr->priority);
1571 	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1572 	ctrl->port = flow_attr->port;
1573 	ctrl->qpn = cpu_to_be32(qp->qp_num);
1574 
1575 	ib_flow = flow_attr + 1;
1576 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1577 	/* Add default flows */
1578 	default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1579 	if (default_flow >= 0) {
1580 		ret = __mlx4_ib_create_default_rules(
1581 				mdev, qp, default_table + default_flow,
1582 				mailbox->buf + size);
1583 		if (ret < 0) {
1584 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1585 			return -EINVAL;
1586 		}
1587 		size += ret;
1588 	}
1589 	for (i = 0; i < flow_attr->num_of_specs; i++) {
1590 		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1591 				      mailbox->buf + size);
1592 		if (ret < 0) {
1593 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1594 			return -EINVAL;
1595 		}
1596 		ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1597 		size += ret;
1598 	}
1599 
1600 	if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1601 	    flow_attr->num_of_specs == 1) {
1602 		struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1603 		enum ib_flow_spec_type header_spec =
1604 			((union ib_flow_spec *)(flow_attr + 1))->type;
1605 
1606 		if (header_spec == IB_FLOW_SPEC_ETH)
1607 			mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1608 	}
1609 
1610 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1611 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1612 			   MLX4_CMD_NATIVE);
1613 	if (ret == -ENOMEM)
1614 		pr_err("mcg table is full. Fail to register network rule.\n");
1615 	else if (ret == -ENXIO)
1616 		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1617 	else if (ret)
1618 		pr_err("Invalid argument. Fail to register network rule.\n");
1619 
1620 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1621 	return ret;
1622 }
1623 
1624 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1625 {
1626 	int err;
1627 	err = mlx4_cmd(dev, reg_id, 0, 0,
1628 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1629 		       MLX4_CMD_NATIVE);
1630 	if (err)
1631 		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1632 		       reg_id);
1633 	return err;
1634 }
1635 
1636 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1637 				    u64 *reg_id)
1638 {
1639 	void *ib_flow;
1640 	union ib_flow_spec *ib_spec;
1641 	struct mlx4_dev	*dev = to_mdev(qp->device)->dev;
1642 	int err = 0;
1643 
1644 	if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1645 	    dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1646 		return 0; /* do nothing */
1647 
1648 	ib_flow = flow_attr + 1;
1649 	ib_spec = (union ib_flow_spec *)ib_flow;
1650 
1651 	if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1652 		return 0; /* do nothing */
1653 
1654 	err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1655 				    flow_attr->port, qp->qp_num,
1656 				    MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1657 				    reg_id);
1658 	return err;
1659 }
1660 
1661 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1662 				      struct ib_flow_attr *flow_attr,
1663 				      enum mlx4_net_trans_promisc_mode *type)
1664 {
1665 	int err = 0;
1666 
1667 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1668 	    (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1669 	    (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1670 		return -EOPNOTSUPP;
1671 	}
1672 
1673 	if (flow_attr->num_of_specs == 0) {
1674 		type[0] = MLX4_FS_MC_SNIFFER;
1675 		type[1] = MLX4_FS_UC_SNIFFER;
1676 	} else {
1677 		union ib_flow_spec *ib_spec;
1678 
1679 		ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1680 		if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
1681 			return -EINVAL;
1682 
1683 		/* if all is zero than MC and UC */
1684 		if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1685 			type[0] = MLX4_FS_MC_SNIFFER;
1686 			type[1] = MLX4_FS_UC_SNIFFER;
1687 		} else {
1688 			u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1689 					    ib_spec->eth.mask.dst_mac[1],
1690 					    ib_spec->eth.mask.dst_mac[2],
1691 					    ib_spec->eth.mask.dst_mac[3],
1692 					    ib_spec->eth.mask.dst_mac[4],
1693 					    ib_spec->eth.mask.dst_mac[5]};
1694 
1695 			/* Above xor was only on MC bit, non empty mask is valid
1696 			 * only if this bit is set and rest are zero.
1697 			 */
1698 			if (!is_zero_ether_addr(&mac[0]))
1699 				return -EINVAL;
1700 
1701 			if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1702 				type[0] = MLX4_FS_MC_SNIFFER;
1703 			else
1704 				type[0] = MLX4_FS_UC_SNIFFER;
1705 		}
1706 	}
1707 
1708 	return err;
1709 }
1710 
1711 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1712 				    struct ib_flow_attr *flow_attr,
1713 				    int domain, struct ib_udata *udata)
1714 {
1715 	int err = 0, i = 0, j = 0;
1716 	struct mlx4_ib_flow *mflow;
1717 	enum mlx4_net_trans_promisc_mode type[2];
1718 	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1719 	int is_bonded = mlx4_is_bonded(dev);
1720 
1721 	if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1722 		return ERR_PTR(-EINVAL);
1723 
1724 	if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1725 		return ERR_PTR(-EOPNOTSUPP);
1726 
1727 	if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1728 	    (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1729 		return ERR_PTR(-EOPNOTSUPP);
1730 
1731 	if (udata &&
1732 	    udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1733 		return ERR_PTR(-EOPNOTSUPP);
1734 
1735 	memset(type, 0, sizeof(type));
1736 
1737 	mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1738 	if (!mflow) {
1739 		err = -ENOMEM;
1740 		goto err_free;
1741 	}
1742 
1743 	switch (flow_attr->type) {
1744 	case IB_FLOW_ATTR_NORMAL:
1745 		/* If dont trap flag (continue match) is set, under specific
1746 		 * condition traffic be replicated to given qp,
1747 		 * without stealing it
1748 		 */
1749 		if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1750 			err = mlx4_ib_add_dont_trap_rule(dev,
1751 							 flow_attr,
1752 							 type);
1753 			if (err)
1754 				goto err_free;
1755 		} else {
1756 			type[0] = MLX4_FS_REGULAR;
1757 		}
1758 		break;
1759 
1760 	case IB_FLOW_ATTR_ALL_DEFAULT:
1761 		type[0] = MLX4_FS_ALL_DEFAULT;
1762 		break;
1763 
1764 	case IB_FLOW_ATTR_MC_DEFAULT:
1765 		type[0] = MLX4_FS_MC_DEFAULT;
1766 		break;
1767 
1768 	case IB_FLOW_ATTR_SNIFFER:
1769 		type[0] = MLX4_FS_MIRROR_RX_PORT;
1770 		type[1] = MLX4_FS_MIRROR_SX_PORT;
1771 		break;
1772 
1773 	default:
1774 		err = -EINVAL;
1775 		goto err_free;
1776 	}
1777 
1778 	while (i < ARRAY_SIZE(type) && type[i]) {
1779 		err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1780 					    &mflow->reg_id[i].id);
1781 		if (err)
1782 			goto err_create_flow;
1783 		if (is_bonded) {
1784 			/* Application always sees one port so the mirror rule
1785 			 * must be on port #2
1786 			 */
1787 			flow_attr->port = 2;
1788 			err = __mlx4_ib_create_flow(qp, flow_attr,
1789 						    domain, type[j],
1790 						    &mflow->reg_id[j].mirror);
1791 			flow_attr->port = 1;
1792 			if (err)
1793 				goto err_create_flow;
1794 			j++;
1795 		}
1796 
1797 		i++;
1798 	}
1799 
1800 	if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1801 		err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1802 					       &mflow->reg_id[i].id);
1803 		if (err)
1804 			goto err_create_flow;
1805 
1806 		if (is_bonded) {
1807 			flow_attr->port = 2;
1808 			err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1809 						       &mflow->reg_id[j].mirror);
1810 			flow_attr->port = 1;
1811 			if (err)
1812 				goto err_create_flow;
1813 			j++;
1814 		}
1815 		/* function to create mirror rule */
1816 		i++;
1817 	}
1818 
1819 	return &mflow->ibflow;
1820 
1821 err_create_flow:
1822 	while (i) {
1823 		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1824 					     mflow->reg_id[i].id);
1825 		i--;
1826 	}
1827 
1828 	while (j) {
1829 		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1830 					     mflow->reg_id[j].mirror);
1831 		j--;
1832 	}
1833 err_free:
1834 	kfree(mflow);
1835 	return ERR_PTR(err);
1836 }
1837 
1838 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1839 {
1840 	int err, ret = 0;
1841 	int i = 0;
1842 	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1843 	struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1844 
1845 	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1846 		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1847 		if (err)
1848 			ret = err;
1849 		if (mflow->reg_id[i].mirror) {
1850 			err = __mlx4_ib_destroy_flow(mdev->dev,
1851 						     mflow->reg_id[i].mirror);
1852 			if (err)
1853 				ret = err;
1854 		}
1855 		i++;
1856 	}
1857 
1858 	kfree(mflow);
1859 	return ret;
1860 }
1861 
1862 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1863 {
1864 	int err;
1865 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1866 	struct mlx4_dev	*dev = mdev->dev;
1867 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1868 	struct mlx4_ib_steering *ib_steering = NULL;
1869 	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1870 	struct mlx4_flow_reg_id	reg_id;
1871 
1872 	if (mdev->dev->caps.steering_mode ==
1873 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1874 		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1875 		if (!ib_steering)
1876 			return -ENOMEM;
1877 	}
1878 
1879 	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1880 				    !!(mqp->flags &
1881 				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1882 				    prot, &reg_id.id);
1883 	if (err) {
1884 		pr_err("multicast attach op failed, err %d\n", err);
1885 		goto err_malloc;
1886 	}
1887 
1888 	reg_id.mirror = 0;
1889 	if (mlx4_is_bonded(dev)) {
1890 		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1891 					    (mqp->port == 1) ? 2 : 1,
1892 					    !!(mqp->flags &
1893 					    MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1894 					    prot, &reg_id.mirror);
1895 		if (err)
1896 			goto err_add;
1897 	}
1898 
1899 	err = add_gid_entry(ibqp, gid);
1900 	if (err)
1901 		goto err_add;
1902 
1903 	if (ib_steering) {
1904 		memcpy(ib_steering->gid.raw, gid->raw, 16);
1905 		ib_steering->reg_id = reg_id;
1906 		mutex_lock(&mqp->mutex);
1907 		list_add(&ib_steering->list, &mqp->steering_rules);
1908 		mutex_unlock(&mqp->mutex);
1909 	}
1910 	return 0;
1911 
1912 err_add:
1913 	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1914 			      prot, reg_id.id);
1915 	if (reg_id.mirror)
1916 		mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1917 				      prot, reg_id.mirror);
1918 err_malloc:
1919 	kfree(ib_steering);
1920 
1921 	return err;
1922 }
1923 
1924 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1925 {
1926 	struct mlx4_ib_gid_entry *ge;
1927 	struct mlx4_ib_gid_entry *tmp;
1928 	struct mlx4_ib_gid_entry *ret = NULL;
1929 
1930 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1931 		if (!memcmp(raw, ge->gid.raw, 16)) {
1932 			ret = ge;
1933 			break;
1934 		}
1935 	}
1936 
1937 	return ret;
1938 }
1939 
1940 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1941 {
1942 	int err;
1943 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1944 	struct mlx4_dev *dev = mdev->dev;
1945 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1946 	struct net_device *ndev;
1947 	struct mlx4_ib_gid_entry *ge;
1948 	struct mlx4_flow_reg_id reg_id = {0, 0};
1949 	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1950 
1951 	if (mdev->dev->caps.steering_mode ==
1952 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1953 		struct mlx4_ib_steering *ib_steering;
1954 
1955 		mutex_lock(&mqp->mutex);
1956 		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1957 			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1958 				list_del(&ib_steering->list);
1959 				break;
1960 			}
1961 		}
1962 		mutex_unlock(&mqp->mutex);
1963 		if (&ib_steering->list == &mqp->steering_rules) {
1964 			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1965 			return -EINVAL;
1966 		}
1967 		reg_id = ib_steering->reg_id;
1968 		kfree(ib_steering);
1969 	}
1970 
1971 	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1972 				    prot, reg_id.id);
1973 	if (err)
1974 		return err;
1975 
1976 	if (mlx4_is_bonded(dev)) {
1977 		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1978 					    prot, reg_id.mirror);
1979 		if (err)
1980 			return err;
1981 	}
1982 
1983 	mutex_lock(&mqp->mutex);
1984 	ge = find_gid_entry(mqp, gid->raw);
1985 	if (ge) {
1986 		spin_lock_bh(&mdev->iboe.lock);
1987 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1988 		if (ndev)
1989 			dev_hold(ndev);
1990 		spin_unlock_bh(&mdev->iboe.lock);
1991 		if (ndev)
1992 			dev_put(ndev);
1993 		list_del(&ge->list);
1994 		kfree(ge);
1995 	} else
1996 		pr_warn("could not find mgid entry\n");
1997 
1998 	mutex_unlock(&mqp->mutex);
1999 
2000 	return 0;
2001 }
2002 
2003 static int init_node_data(struct mlx4_ib_dev *dev)
2004 {
2005 	struct ib_smp *in_mad  = NULL;
2006 	struct ib_smp *out_mad = NULL;
2007 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
2008 	int err = -ENOMEM;
2009 
2010 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
2011 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2012 	if (!in_mad || !out_mad)
2013 		goto out;
2014 
2015 	init_query_mad(in_mad);
2016 	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
2017 	if (mlx4_is_master(dev->dev))
2018 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2019 
2020 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2021 	if (err)
2022 		goto out;
2023 
2024 	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2025 
2026 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2027 
2028 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2029 	if (err)
2030 		goto out;
2031 
2032 	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2033 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2034 
2035 out:
2036 	kfree(in_mad);
2037 	kfree(out_mad);
2038 	return err;
2039 }
2040 
2041 static ssize_t hca_type_show(struct device *device,
2042 			     struct device_attribute *attr, char *buf)
2043 {
2044 	struct mlx4_ib_dev *dev =
2045 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2046 	return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
2047 }
2048 static DEVICE_ATTR_RO(hca_type);
2049 
2050 static ssize_t hw_rev_show(struct device *device,
2051 			   struct device_attribute *attr, char *buf)
2052 {
2053 	struct mlx4_ib_dev *dev =
2054 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2055 	return sprintf(buf, "%x\n", dev->dev->rev_id);
2056 }
2057 static DEVICE_ATTR_RO(hw_rev);
2058 
2059 static ssize_t board_id_show(struct device *device,
2060 			     struct device_attribute *attr, char *buf)
2061 {
2062 	struct mlx4_ib_dev *dev =
2063 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2064 
2065 	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2066 		       dev->dev->board_id);
2067 }
2068 static DEVICE_ATTR_RO(board_id);
2069 
2070 static struct attribute *mlx4_class_attributes[] = {
2071 	&dev_attr_hw_rev.attr,
2072 	&dev_attr_hca_type.attr,
2073 	&dev_attr_board_id.attr,
2074 	NULL
2075 };
2076 
2077 static const struct attribute_group mlx4_attr_group = {
2078 	.attrs = mlx4_class_attributes,
2079 };
2080 
2081 struct diag_counter {
2082 	const char *name;
2083 	u32 offset;
2084 };
2085 
2086 #define DIAG_COUNTER(_name, _offset)			\
2087 	{ .name = #_name, .offset = _offset }
2088 
2089 static const struct diag_counter diag_basic[] = {
2090 	DIAG_COUNTER(rq_num_lle, 0x00),
2091 	DIAG_COUNTER(sq_num_lle, 0x04),
2092 	DIAG_COUNTER(rq_num_lqpoe, 0x08),
2093 	DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2094 	DIAG_COUNTER(rq_num_lpe, 0x18),
2095 	DIAG_COUNTER(sq_num_lpe, 0x1C),
2096 	DIAG_COUNTER(rq_num_wrfe, 0x20),
2097 	DIAG_COUNTER(sq_num_wrfe, 0x24),
2098 	DIAG_COUNTER(sq_num_mwbe, 0x2C),
2099 	DIAG_COUNTER(sq_num_bre, 0x34),
2100 	DIAG_COUNTER(sq_num_rire, 0x44),
2101 	DIAG_COUNTER(rq_num_rire, 0x48),
2102 	DIAG_COUNTER(sq_num_rae, 0x4C),
2103 	DIAG_COUNTER(rq_num_rae, 0x50),
2104 	DIAG_COUNTER(sq_num_roe, 0x54),
2105 	DIAG_COUNTER(sq_num_tree, 0x5C),
2106 	DIAG_COUNTER(sq_num_rree, 0x64),
2107 	DIAG_COUNTER(rq_num_rnr, 0x68),
2108 	DIAG_COUNTER(sq_num_rnr, 0x6C),
2109 	DIAG_COUNTER(rq_num_oos, 0x100),
2110 	DIAG_COUNTER(sq_num_oos, 0x104),
2111 };
2112 
2113 static const struct diag_counter diag_ext[] = {
2114 	DIAG_COUNTER(rq_num_dup, 0x130),
2115 	DIAG_COUNTER(sq_num_to, 0x134),
2116 };
2117 
2118 static const struct diag_counter diag_device_only[] = {
2119 	DIAG_COUNTER(num_cqovf, 0x1A0),
2120 	DIAG_COUNTER(rq_num_udsdprd, 0x118),
2121 };
2122 
2123 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2124 						    u8 port_num)
2125 {
2126 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
2127 	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2128 
2129 	if (!diag[!!port_num].name)
2130 		return NULL;
2131 
2132 	return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2133 					  diag[!!port_num].num_counters,
2134 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
2135 }
2136 
2137 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2138 				struct rdma_hw_stats *stats,
2139 				u8 port, int index)
2140 {
2141 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
2142 	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2143 	u32 hw_value[ARRAY_SIZE(diag_device_only) +
2144 		ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2145 	int ret;
2146 	int i;
2147 
2148 	ret = mlx4_query_diag_counters(dev->dev,
2149 				       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2150 				       diag[!!port].offset, hw_value,
2151 				       diag[!!port].num_counters, port);
2152 
2153 	if (ret)
2154 		return ret;
2155 
2156 	for (i = 0; i < diag[!!port].num_counters; i++)
2157 		stats->value[i] = hw_value[i];
2158 
2159 	return diag[!!port].num_counters;
2160 }
2161 
2162 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2163 					 const char ***name,
2164 					 u32 **offset,
2165 					 u32 *num,
2166 					 bool port)
2167 {
2168 	u32 num_counters;
2169 
2170 	num_counters = ARRAY_SIZE(diag_basic);
2171 
2172 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2173 		num_counters += ARRAY_SIZE(diag_ext);
2174 
2175 	if (!port)
2176 		num_counters += ARRAY_SIZE(diag_device_only);
2177 
2178 	*name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2179 	if (!*name)
2180 		return -ENOMEM;
2181 
2182 	*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2183 	if (!*offset)
2184 		goto err_name;
2185 
2186 	*num = num_counters;
2187 
2188 	return 0;
2189 
2190 err_name:
2191 	kfree(*name);
2192 	return -ENOMEM;
2193 }
2194 
2195 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2196 				       const char **name,
2197 				       u32 *offset,
2198 				       bool port)
2199 {
2200 	int i;
2201 	int j;
2202 
2203 	for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2204 		name[i] = diag_basic[i].name;
2205 		offset[i] = diag_basic[i].offset;
2206 	}
2207 
2208 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2209 		for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2210 			name[j] = diag_ext[i].name;
2211 			offset[j] = diag_ext[i].offset;
2212 		}
2213 	}
2214 
2215 	if (!port) {
2216 		for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2217 			name[j] = diag_device_only[i].name;
2218 			offset[j] = diag_device_only[i].offset;
2219 		}
2220 	}
2221 }
2222 
2223 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2224 	.alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2225 	.get_hw_stats = mlx4_ib_get_hw_stats,
2226 };
2227 
2228 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2229 {
2230 	struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2231 	int i;
2232 	int ret;
2233 	bool per_port = !!(ibdev->dev->caps.flags2 &
2234 		MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2235 
2236 	if (mlx4_is_slave(ibdev->dev))
2237 		return 0;
2238 
2239 	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2240 		/* i == 1 means we are building port counters */
2241 		if (i && !per_port)
2242 			continue;
2243 
2244 		ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2245 						    &diag[i].offset,
2246 						    &diag[i].num_counters, i);
2247 		if (ret)
2248 			goto err_alloc;
2249 
2250 		mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2251 					   diag[i].offset, i);
2252 	}
2253 
2254 	ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2255 
2256 	return 0;
2257 
2258 err_alloc:
2259 	if (i) {
2260 		kfree(diag[i - 1].name);
2261 		kfree(diag[i - 1].offset);
2262 	}
2263 
2264 	return ret;
2265 }
2266 
2267 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2268 {
2269 	int i;
2270 
2271 	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2272 		kfree(ibdev->diag_counters[i].offset);
2273 		kfree(ibdev->diag_counters[i].name);
2274 	}
2275 }
2276 
2277 #define MLX4_IB_INVALID_MAC	((u64)-1)
2278 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2279 			       struct net_device *dev,
2280 			       int port)
2281 {
2282 	u64 new_smac = 0;
2283 	u64 release_mac = MLX4_IB_INVALID_MAC;
2284 	struct mlx4_ib_qp *qp;
2285 
2286 	read_lock(&dev_base_lock);
2287 	new_smac = mlx4_mac_to_u64(dev->dev_addr);
2288 	read_unlock(&dev_base_lock);
2289 
2290 	atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2291 
2292 	/* no need for update QP1 and mac registration in non-SRIOV */
2293 	if (!mlx4_is_mfunc(ibdev->dev))
2294 		return;
2295 
2296 	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2297 	qp = ibdev->qp1_proxy[port - 1];
2298 	if (qp) {
2299 		int new_smac_index;
2300 		u64 old_smac;
2301 		struct mlx4_update_qp_params update_params;
2302 
2303 		mutex_lock(&qp->mutex);
2304 		old_smac = qp->pri.smac;
2305 		if (new_smac == old_smac)
2306 			goto unlock;
2307 
2308 		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2309 
2310 		if (new_smac_index < 0)
2311 			goto unlock;
2312 
2313 		update_params.smac_index = new_smac_index;
2314 		if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2315 				   &update_params)) {
2316 			release_mac = new_smac;
2317 			goto unlock;
2318 		}
2319 		/* if old port was zero, no mac was yet registered for this QP */
2320 		if (qp->pri.smac_port)
2321 			release_mac = old_smac;
2322 		qp->pri.smac = new_smac;
2323 		qp->pri.smac_port = port;
2324 		qp->pri.smac_index = new_smac_index;
2325 	}
2326 
2327 unlock:
2328 	if (release_mac != MLX4_IB_INVALID_MAC)
2329 		mlx4_unregister_mac(ibdev->dev, port, release_mac);
2330 	if (qp)
2331 		mutex_unlock(&qp->mutex);
2332 	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2333 }
2334 
2335 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2336 				 struct net_device *dev,
2337 				 unsigned long event)
2338 
2339 {
2340 	struct mlx4_ib_iboe *iboe;
2341 	int update_qps_port = -1;
2342 	int port;
2343 
2344 	ASSERT_RTNL();
2345 
2346 	iboe = &ibdev->iboe;
2347 
2348 	spin_lock_bh(&iboe->lock);
2349 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2350 
2351 		iboe->netdevs[port - 1] =
2352 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2353 
2354 		if (dev == iboe->netdevs[port - 1] &&
2355 		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2356 		     event == NETDEV_UP || event == NETDEV_CHANGE))
2357 			update_qps_port = port;
2358 
2359 		if (dev == iboe->netdevs[port - 1] &&
2360 		    (event == NETDEV_UP || event == NETDEV_DOWN)) {
2361 			enum ib_port_state port_state;
2362 			struct ib_event ibev = { };
2363 
2364 			if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2365 						     &port_state))
2366 				continue;
2367 
2368 			if (event == NETDEV_UP &&
2369 			    (port_state != IB_PORT_ACTIVE ||
2370 			     iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2371 				continue;
2372 			if (event == NETDEV_DOWN &&
2373 			    (port_state != IB_PORT_DOWN ||
2374 			     iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2375 				continue;
2376 			iboe->last_port_state[port - 1] = port_state;
2377 
2378 			ibev.device = &ibdev->ib_dev;
2379 			ibev.element.port_num = port;
2380 			ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2381 							  IB_EVENT_PORT_ERR;
2382 			ib_dispatch_event(&ibev);
2383 		}
2384 
2385 	}
2386 	spin_unlock_bh(&iboe->lock);
2387 
2388 	if (update_qps_port > 0)
2389 		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2390 }
2391 
2392 static int mlx4_ib_netdev_event(struct notifier_block *this,
2393 				unsigned long event, void *ptr)
2394 {
2395 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2396 	struct mlx4_ib_dev *ibdev;
2397 
2398 	if (!net_eq(dev_net(dev), &init_net))
2399 		return NOTIFY_DONE;
2400 
2401 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2402 	mlx4_ib_scan_netdevs(ibdev, dev, event);
2403 
2404 	return NOTIFY_DONE;
2405 }
2406 
2407 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2408 {
2409 	int port;
2410 	int slave;
2411 	int i;
2412 
2413 	if (mlx4_is_master(ibdev->dev)) {
2414 		for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2415 		     ++slave) {
2416 			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2417 				for (i = 0;
2418 				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2419 				     ++i) {
2420 					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2421 					/* master has the identity virt2phys pkey mapping */
2422 						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2423 							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2424 					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2425 							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2426 				}
2427 			}
2428 		}
2429 		/* initialize pkey cache */
2430 		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2431 			for (i = 0;
2432 			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2433 			     ++i)
2434 				ibdev->pkeys.phys_pkey_cache[port-1][i] =
2435 					(i) ? 0 : 0xFFFF;
2436 		}
2437 	}
2438 }
2439 
2440 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2441 {
2442 	int i, j, eq = 0, total_eqs = 0;
2443 
2444 	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2445 				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2446 	if (!ibdev->eq_table)
2447 		return;
2448 
2449 	for (i = 1; i <= dev->caps.num_ports; i++) {
2450 		for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2451 		     j++, total_eqs++) {
2452 			if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2453 				continue;
2454 			ibdev->eq_table[eq] = total_eqs;
2455 			if (!mlx4_assign_eq(dev, i,
2456 					    &ibdev->eq_table[eq]))
2457 				eq++;
2458 			else
2459 				ibdev->eq_table[eq] = -1;
2460 		}
2461 	}
2462 
2463 	for (i = eq; i < dev->caps.num_comp_vectors;
2464 	     ibdev->eq_table[i++] = -1)
2465 		;
2466 
2467 	/* Advertise the new number of EQs to clients */
2468 	ibdev->ib_dev.num_comp_vectors = eq;
2469 }
2470 
2471 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2472 {
2473 	int i;
2474 	int total_eqs = ibdev->ib_dev.num_comp_vectors;
2475 
2476 	/* no eqs were allocated */
2477 	if (!ibdev->eq_table)
2478 		return;
2479 
2480 	/* Reset the advertised EQ number */
2481 	ibdev->ib_dev.num_comp_vectors = 0;
2482 
2483 	for (i = 0; i < total_eqs; i++)
2484 		mlx4_release_eq(dev, ibdev->eq_table[i]);
2485 
2486 	kfree(ibdev->eq_table);
2487 	ibdev->eq_table = NULL;
2488 }
2489 
2490 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2491 			       struct ib_port_immutable *immutable)
2492 {
2493 	struct ib_port_attr attr;
2494 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2495 	int err;
2496 
2497 	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2498 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2499 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2500 	} else {
2501 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2502 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2503 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2504 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2505 				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2506 		immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2507 		if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2508 		    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2509 			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2510 	}
2511 
2512 	err = ib_query_port(ibdev, port_num, &attr);
2513 	if (err)
2514 		return err;
2515 
2516 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2517 	immutable->gid_tbl_len = attr.gid_tbl_len;
2518 
2519 	return 0;
2520 }
2521 
2522 static void get_fw_ver_str(struct ib_device *device, char *str)
2523 {
2524 	struct mlx4_ib_dev *dev =
2525 		container_of(device, struct mlx4_ib_dev, ib_dev);
2526 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2527 		 (int) (dev->dev->caps.fw_ver >> 32),
2528 		 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2529 		 (int) dev->dev->caps.fw_ver & 0xffff);
2530 }
2531 
2532 static const struct ib_device_ops mlx4_ib_dev_ops = {
2533 	.owner = THIS_MODULE,
2534 	.driver_id = RDMA_DRIVER_MLX4,
2535 	.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2536 
2537 	.add_gid = mlx4_ib_add_gid,
2538 	.alloc_mr = mlx4_ib_alloc_mr,
2539 	.alloc_pd = mlx4_ib_alloc_pd,
2540 	.alloc_ucontext = mlx4_ib_alloc_ucontext,
2541 	.attach_mcast = mlx4_ib_mcg_attach,
2542 	.create_ah = mlx4_ib_create_ah,
2543 	.create_cq = mlx4_ib_create_cq,
2544 	.create_qp = mlx4_ib_create_qp,
2545 	.create_srq = mlx4_ib_create_srq,
2546 	.dealloc_pd = mlx4_ib_dealloc_pd,
2547 	.dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2548 	.del_gid = mlx4_ib_del_gid,
2549 	.dereg_mr = mlx4_ib_dereg_mr,
2550 	.destroy_ah = mlx4_ib_destroy_ah,
2551 	.destroy_cq = mlx4_ib_destroy_cq,
2552 	.destroy_qp = mlx4_ib_destroy_qp,
2553 	.destroy_srq = mlx4_ib_destroy_srq,
2554 	.detach_mcast = mlx4_ib_mcg_detach,
2555 	.disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2556 	.drain_rq = mlx4_ib_drain_rq,
2557 	.drain_sq = mlx4_ib_drain_sq,
2558 	.get_dev_fw_str = get_fw_ver_str,
2559 	.get_dma_mr = mlx4_ib_get_dma_mr,
2560 	.get_link_layer = mlx4_ib_port_link_layer,
2561 	.get_netdev = mlx4_ib_get_netdev,
2562 	.get_port_immutable = mlx4_port_immutable,
2563 	.map_mr_sg = mlx4_ib_map_mr_sg,
2564 	.mmap = mlx4_ib_mmap,
2565 	.modify_cq = mlx4_ib_modify_cq,
2566 	.modify_device = mlx4_ib_modify_device,
2567 	.modify_port = mlx4_ib_modify_port,
2568 	.modify_qp = mlx4_ib_modify_qp,
2569 	.modify_srq = mlx4_ib_modify_srq,
2570 	.poll_cq = mlx4_ib_poll_cq,
2571 	.post_recv = mlx4_ib_post_recv,
2572 	.post_send = mlx4_ib_post_send,
2573 	.post_srq_recv = mlx4_ib_post_srq_recv,
2574 	.process_mad = mlx4_ib_process_mad,
2575 	.query_ah = mlx4_ib_query_ah,
2576 	.query_device = mlx4_ib_query_device,
2577 	.query_gid = mlx4_ib_query_gid,
2578 	.query_pkey = mlx4_ib_query_pkey,
2579 	.query_port = mlx4_ib_query_port,
2580 	.query_qp = mlx4_ib_query_qp,
2581 	.query_srq = mlx4_ib_query_srq,
2582 	.reg_user_mr = mlx4_ib_reg_user_mr,
2583 	.req_notify_cq = mlx4_ib_arm_cq,
2584 	.rereg_user_mr = mlx4_ib_rereg_user_mr,
2585 	.resize_cq = mlx4_ib_resize_cq,
2586 
2587 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2588 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2589 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2590 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2591 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2592 };
2593 
2594 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2595 	.create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2596 	.create_wq = mlx4_ib_create_wq,
2597 	.destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2598 	.destroy_wq = mlx4_ib_destroy_wq,
2599 	.modify_wq = mlx4_ib_modify_wq,
2600 };
2601 
2602 static const struct ib_device_ops mlx4_ib_dev_fmr_ops = {
2603 	.alloc_fmr = mlx4_ib_fmr_alloc,
2604 	.dealloc_fmr = mlx4_ib_fmr_dealloc,
2605 	.map_phys_fmr = mlx4_ib_map_phys_fmr,
2606 	.unmap_fmr = mlx4_ib_unmap_fmr,
2607 };
2608 
2609 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2610 	.alloc_mw = mlx4_ib_alloc_mw,
2611 	.dealloc_mw = mlx4_ib_dealloc_mw,
2612 };
2613 
2614 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2615 	.alloc_xrcd = mlx4_ib_alloc_xrcd,
2616 	.dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2617 };
2618 
2619 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2620 	.create_flow = mlx4_ib_create_flow,
2621 	.destroy_flow = mlx4_ib_destroy_flow,
2622 };
2623 
2624 static void *mlx4_ib_add(struct mlx4_dev *dev)
2625 {
2626 	struct mlx4_ib_dev *ibdev;
2627 	int num_ports = 0;
2628 	int i, j;
2629 	int err;
2630 	struct mlx4_ib_iboe *iboe;
2631 	int ib_num_ports = 0;
2632 	int num_req_counters;
2633 	int allocated;
2634 	u32 counter_index;
2635 	struct counter_index *new_counter_index = NULL;
2636 
2637 	pr_info_once("%s", mlx4_ib_version);
2638 
2639 	num_ports = 0;
2640 	mlx4_foreach_ib_transport_port(i, dev)
2641 		num_ports++;
2642 
2643 	/* No point in registering a device with no ports... */
2644 	if (num_ports == 0)
2645 		return NULL;
2646 
2647 	ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2648 	if (!ibdev) {
2649 		dev_err(&dev->persist->pdev->dev,
2650 			"Device struct alloc failed\n");
2651 		return NULL;
2652 	}
2653 
2654 	iboe = &ibdev->iboe;
2655 
2656 	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2657 		goto err_dealloc;
2658 
2659 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2660 		goto err_pd;
2661 
2662 	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2663 				 PAGE_SIZE);
2664 	if (!ibdev->uar_map)
2665 		goto err_uar;
2666 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2667 
2668 	ibdev->dev = dev;
2669 	ibdev->bond_next_port	= 0;
2670 
2671 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
2672 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
2673 	ibdev->num_ports		= num_ports;
2674 	ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2675 						1 : ibdev->num_ports;
2676 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
2677 	ibdev->ib_dev.dev.parent	= &dev->persist->pdev->dev;
2678 
2679 	ibdev->ib_dev.uverbs_cmd_mask	=
2680 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
2681 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
2682 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
2683 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
2684 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
2685 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
2686 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
2687 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
2688 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
2689 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
2690 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
2691 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
2692 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
2693 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
2694 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
2695 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
2696 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
2697 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
2698 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
2699 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
2700 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
2701 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
2702 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
2703 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
2704 
2705 	ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2706 	ibdev->ib_dev.uverbs_ex_cmd_mask |=
2707 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
2708 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2709 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2710 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2711 
2712 	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2713 	    ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2714 	    IB_LINK_LAYER_ETHERNET) ||
2715 	    (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2716 	    IB_LINK_LAYER_ETHERNET))) {
2717 		ibdev->ib_dev.uverbs_ex_cmd_mask |=
2718 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ)	  |
2719 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ)	  |
2720 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ)	  |
2721 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2722 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
2723 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2724 	}
2725 
2726 	if (!mlx4_is_slave(ibdev->dev))
2727 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops);
2728 
2729 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2730 	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2731 		ibdev->ib_dev.uverbs_cmd_mask |=
2732 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2733 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2734 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2735 	}
2736 
2737 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2738 		ibdev->ib_dev.uverbs_cmd_mask |=
2739 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2740 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2741 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2742 	}
2743 
2744 	if (check_flow_steering_support(dev)) {
2745 		ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2746 		ibdev->ib_dev.uverbs_ex_cmd_mask	|=
2747 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2748 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2749 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2750 	}
2751 
2752 	if (!dev->caps.userspace_caps)
2753 		ibdev->ib_dev.ops.uverbs_abi_ver =
2754 			MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2755 
2756 	mlx4_ib_alloc_eqs(dev, ibdev);
2757 
2758 	spin_lock_init(&iboe->lock);
2759 
2760 	if (init_node_data(ibdev))
2761 		goto err_map;
2762 	mlx4_init_sl2vl_tbl(ibdev);
2763 
2764 	for (i = 0; i < ibdev->num_ports; ++i) {
2765 		mutex_init(&ibdev->counters_table[i].mutex);
2766 		INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2767 		iboe->last_port_state[i] = IB_PORT_DOWN;
2768 	}
2769 
2770 	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2771 	for (i = 0; i < num_req_counters; ++i) {
2772 		mutex_init(&ibdev->qp1_proxy_lock[i]);
2773 		allocated = 0;
2774 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2775 						IB_LINK_LAYER_ETHERNET) {
2776 			err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2777 						 MLX4_RES_USAGE_DRIVER);
2778 			/* if failed to allocate a new counter, use default */
2779 			if (err)
2780 				counter_index =
2781 					mlx4_get_default_counter_index(dev,
2782 								       i + 1);
2783 			else
2784 				allocated = 1;
2785 		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2786 			counter_index = mlx4_get_default_counter_index(dev,
2787 								       i + 1);
2788 		}
2789 		new_counter_index = kmalloc(sizeof(*new_counter_index),
2790 					    GFP_KERNEL);
2791 		if (!new_counter_index) {
2792 			if (allocated)
2793 				mlx4_counter_free(ibdev->dev, counter_index);
2794 			goto err_counter;
2795 		}
2796 		new_counter_index->index = counter_index;
2797 		new_counter_index->allocated = allocated;
2798 		list_add_tail(&new_counter_index->list,
2799 			      &ibdev->counters_table[i].counters_list);
2800 		ibdev->counters_table[i].default_counter = counter_index;
2801 		pr_info("counter index %d for port %d allocated %d\n",
2802 			counter_index, i + 1, allocated);
2803 	}
2804 	if (mlx4_is_bonded(dev))
2805 		for (i = 1; i < ibdev->num_ports ; ++i) {
2806 			new_counter_index =
2807 					kmalloc(sizeof(struct counter_index),
2808 						GFP_KERNEL);
2809 			if (!new_counter_index)
2810 				goto err_counter;
2811 			new_counter_index->index = counter_index;
2812 			new_counter_index->allocated = 0;
2813 			list_add_tail(&new_counter_index->list,
2814 				      &ibdev->counters_table[i].counters_list);
2815 			ibdev->counters_table[i].default_counter =
2816 								counter_index;
2817 		}
2818 
2819 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2820 		ib_num_ports++;
2821 
2822 	spin_lock_init(&ibdev->sm_lock);
2823 	mutex_init(&ibdev->cap_mask_mutex);
2824 	INIT_LIST_HEAD(&ibdev->qp_list);
2825 	spin_lock_init(&ibdev->reset_flow_resource_lock);
2826 
2827 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2828 	    ib_num_ports) {
2829 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2830 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2831 					    MLX4_IB_UC_STEER_QPN_ALIGN,
2832 					    &ibdev->steer_qpn_base, 0,
2833 					    MLX4_RES_USAGE_DRIVER);
2834 		if (err)
2835 			goto err_counter;
2836 
2837 		ibdev->ib_uc_qpns_bitmap =
2838 			kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2839 				      sizeof(long),
2840 				      GFP_KERNEL);
2841 		if (!ibdev->ib_uc_qpns_bitmap)
2842 			goto err_steer_qp_release;
2843 
2844 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2845 			bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2846 				    ibdev->steer_qpn_count);
2847 			err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2848 					dev, ibdev->steer_qpn_base,
2849 					ibdev->steer_qpn_base +
2850 					ibdev->steer_qpn_count - 1);
2851 			if (err)
2852 				goto err_steer_free_bitmap;
2853 		} else {
2854 			bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2855 				    ibdev->steer_qpn_count);
2856 		}
2857 	}
2858 
2859 	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2860 		atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2861 
2862 	if (mlx4_ib_alloc_diag_counters(ibdev))
2863 		goto err_steer_free_bitmap;
2864 
2865 	rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2866 	if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
2867 		goto err_diag_counters;
2868 
2869 	if (mlx4_ib_mad_init(ibdev))
2870 		goto err_reg;
2871 
2872 	if (mlx4_ib_init_sriov(ibdev))
2873 		goto err_mad;
2874 
2875 	if (!iboe->nb.notifier_call) {
2876 		iboe->nb.notifier_call = mlx4_ib_netdev_event;
2877 		err = register_netdevice_notifier(&iboe->nb);
2878 		if (err) {
2879 			iboe->nb.notifier_call = NULL;
2880 			goto err_notif;
2881 		}
2882 	}
2883 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2884 		err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2885 		if (err)
2886 			goto err_notif;
2887 	}
2888 
2889 	ibdev->ib_active = true;
2890 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2891 		devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2892 					 &ibdev->ib_dev);
2893 
2894 	if (mlx4_is_mfunc(ibdev->dev))
2895 		init_pkeys(ibdev);
2896 
2897 	/* create paravirt contexts for any VFs which are active */
2898 	if (mlx4_is_master(ibdev->dev)) {
2899 		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2900 			if (j == mlx4_master_func_num(ibdev->dev))
2901 				continue;
2902 			if (mlx4_is_slave_active(ibdev->dev, j))
2903 				do_slave_init(ibdev, j, 1);
2904 		}
2905 	}
2906 	return ibdev;
2907 
2908 err_notif:
2909 	if (ibdev->iboe.nb.notifier_call) {
2910 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2911 			pr_warn("failure unregistering notifier\n");
2912 		ibdev->iboe.nb.notifier_call = NULL;
2913 	}
2914 	flush_workqueue(wq);
2915 
2916 	mlx4_ib_close_sriov(ibdev);
2917 
2918 err_mad:
2919 	mlx4_ib_mad_cleanup(ibdev);
2920 
2921 err_reg:
2922 	ib_unregister_device(&ibdev->ib_dev);
2923 
2924 err_diag_counters:
2925 	mlx4_ib_diag_cleanup(ibdev);
2926 
2927 err_steer_free_bitmap:
2928 	kfree(ibdev->ib_uc_qpns_bitmap);
2929 
2930 err_steer_qp_release:
2931 	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2932 			      ibdev->steer_qpn_count);
2933 err_counter:
2934 	for (i = 0; i < ibdev->num_ports; ++i)
2935 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2936 
2937 err_map:
2938 	mlx4_ib_free_eqs(dev, ibdev);
2939 	iounmap(ibdev->uar_map);
2940 
2941 err_uar:
2942 	mlx4_uar_free(dev, &ibdev->priv_uar);
2943 
2944 err_pd:
2945 	mlx4_pd_free(dev, ibdev->priv_pdn);
2946 
2947 err_dealloc:
2948 	ib_dealloc_device(&ibdev->ib_dev);
2949 
2950 	return NULL;
2951 }
2952 
2953 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2954 {
2955 	int offset;
2956 
2957 	WARN_ON(!dev->ib_uc_qpns_bitmap);
2958 
2959 	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2960 					 dev->steer_qpn_count,
2961 					 get_count_order(count));
2962 	if (offset < 0)
2963 		return offset;
2964 
2965 	*qpn = dev->steer_qpn_base + offset;
2966 	return 0;
2967 }
2968 
2969 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2970 {
2971 	if (!qpn ||
2972 	    dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2973 		return;
2974 
2975 	if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2976 		 qpn, dev->steer_qpn_base))
2977 		/* not supposed to be here */
2978 		return;
2979 
2980 	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2981 			      qpn - dev->steer_qpn_base,
2982 			      get_count_order(count));
2983 }
2984 
2985 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2986 			 int is_attach)
2987 {
2988 	int err;
2989 	size_t flow_size;
2990 	struct ib_flow_attr *flow = NULL;
2991 	struct ib_flow_spec_ib *ib_spec;
2992 
2993 	if (is_attach) {
2994 		flow_size = sizeof(struct ib_flow_attr) +
2995 			    sizeof(struct ib_flow_spec_ib);
2996 		flow = kzalloc(flow_size, GFP_KERNEL);
2997 		if (!flow)
2998 			return -ENOMEM;
2999 		flow->port = mqp->port;
3000 		flow->num_of_specs = 1;
3001 		flow->size = flow_size;
3002 		ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
3003 		ib_spec->type = IB_FLOW_SPEC_IB;
3004 		ib_spec->size = sizeof(struct ib_flow_spec_ib);
3005 		/* Add an empty rule for IB L2 */
3006 		memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
3007 
3008 		err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3009 					    IB_FLOW_DOMAIN_NIC,
3010 					    MLX4_FS_REGULAR,
3011 					    &mqp->reg_id);
3012 	} else {
3013 		err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
3014 	}
3015 	kfree(flow);
3016 	return err;
3017 }
3018 
3019 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3020 {
3021 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
3022 	int p;
3023 	int i;
3024 
3025 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3026 		devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
3027 	ibdev->ib_active = false;
3028 	flush_workqueue(wq);
3029 
3030 	if (ibdev->iboe.nb.notifier_call) {
3031 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
3032 			pr_warn("failure unregistering notifier\n");
3033 		ibdev->iboe.nb.notifier_call = NULL;
3034 	}
3035 
3036 	mlx4_ib_close_sriov(ibdev);
3037 	mlx4_ib_mad_cleanup(ibdev);
3038 	ib_unregister_device(&ibdev->ib_dev);
3039 	mlx4_ib_diag_cleanup(ibdev);
3040 
3041 	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3042 			      ibdev->steer_qpn_count);
3043 	kfree(ibdev->ib_uc_qpns_bitmap);
3044 
3045 	iounmap(ibdev->uar_map);
3046 	for (p = 0; p < ibdev->num_ports; ++p)
3047 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3048 
3049 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
3050 		mlx4_CLOSE_PORT(dev, p);
3051 
3052 	mlx4_ib_free_eqs(dev, ibdev);
3053 
3054 	mlx4_uar_free(dev, &ibdev->priv_uar);
3055 	mlx4_pd_free(dev, ibdev->priv_pdn);
3056 	ib_dealloc_device(&ibdev->ib_dev);
3057 }
3058 
3059 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3060 {
3061 	struct mlx4_ib_demux_work **dm = NULL;
3062 	struct mlx4_dev *dev = ibdev->dev;
3063 	int i;
3064 	unsigned long flags;
3065 	struct mlx4_active_ports actv_ports;
3066 	unsigned int ports;
3067 	unsigned int first_port;
3068 
3069 	if (!mlx4_is_master(dev))
3070 		return;
3071 
3072 	actv_ports = mlx4_get_active_ports(dev, slave);
3073 	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3074 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3075 
3076 	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3077 	if (!dm)
3078 		return;
3079 
3080 	for (i = 0; i < ports; i++) {
3081 		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3082 		if (!dm[i]) {
3083 			while (--i >= 0)
3084 				kfree(dm[i]);
3085 			goto out;
3086 		}
3087 		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3088 		dm[i]->port = first_port + i + 1;
3089 		dm[i]->slave = slave;
3090 		dm[i]->do_init = do_init;
3091 		dm[i]->dev = ibdev;
3092 	}
3093 	/* initialize or tear down tunnel QPs for the slave */
3094 	spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3095 	if (!ibdev->sriov.is_going_down) {
3096 		for (i = 0; i < ports; i++)
3097 			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3098 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3099 	} else {
3100 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3101 		for (i = 0; i < ports; i++)
3102 			kfree(dm[i]);
3103 	}
3104 out:
3105 	kfree(dm);
3106 	return;
3107 }
3108 
3109 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3110 {
3111 	struct mlx4_ib_qp *mqp;
3112 	unsigned long flags_qp;
3113 	unsigned long flags_cq;
3114 	struct mlx4_ib_cq *send_mcq, *recv_mcq;
3115 	struct list_head    cq_notify_list;
3116 	struct mlx4_cq *mcq;
3117 	unsigned long flags;
3118 
3119 	pr_warn("mlx4_ib_handle_catas_error was started\n");
3120 	INIT_LIST_HEAD(&cq_notify_list);
3121 
3122 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3123 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3124 
3125 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3126 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3127 		if (mqp->sq.tail != mqp->sq.head) {
3128 			send_mcq = to_mcq(mqp->ibqp.send_cq);
3129 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
3130 			if (send_mcq->mcq.comp &&
3131 			    mqp->ibqp.send_cq->comp_handler) {
3132 				if (!send_mcq->mcq.reset_notify_added) {
3133 					send_mcq->mcq.reset_notify_added = 1;
3134 					list_add_tail(&send_mcq->mcq.reset_notify,
3135 						      &cq_notify_list);
3136 				}
3137 			}
3138 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3139 		}
3140 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3141 		/* Now, handle the QP's receive queue */
3142 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3143 		/* no handling is needed for SRQ */
3144 		if (!mqp->ibqp.srq) {
3145 			if (mqp->rq.tail != mqp->rq.head) {
3146 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3147 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3148 				if (recv_mcq->mcq.comp &&
3149 				    mqp->ibqp.recv_cq->comp_handler) {
3150 					if (!recv_mcq->mcq.reset_notify_added) {
3151 						recv_mcq->mcq.reset_notify_added = 1;
3152 						list_add_tail(&recv_mcq->mcq.reset_notify,
3153 							      &cq_notify_list);
3154 					}
3155 				}
3156 				spin_unlock_irqrestore(&recv_mcq->lock,
3157 						       flags_cq);
3158 			}
3159 		}
3160 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3161 	}
3162 
3163 	list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3164 		mcq->comp(mcq);
3165 	}
3166 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3167 	pr_warn("mlx4_ib_handle_catas_error ended\n");
3168 }
3169 
3170 static void handle_bonded_port_state_event(struct work_struct *work)
3171 {
3172 	struct ib_event_work *ew =
3173 		container_of(work, struct ib_event_work, work);
3174 	struct mlx4_ib_dev *ibdev = ew->ib_dev;
3175 	enum ib_port_state bonded_port_state = IB_PORT_NOP;
3176 	int i;
3177 	struct ib_event ibev;
3178 
3179 	kfree(ew);
3180 	spin_lock_bh(&ibdev->iboe.lock);
3181 	for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3182 		struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3183 		enum ib_port_state curr_port_state;
3184 
3185 		if (!curr_netdev)
3186 			continue;
3187 
3188 		curr_port_state =
3189 			(netif_running(curr_netdev) &&
3190 			 netif_carrier_ok(curr_netdev)) ?
3191 			IB_PORT_ACTIVE : IB_PORT_DOWN;
3192 
3193 		bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3194 			curr_port_state : IB_PORT_ACTIVE;
3195 	}
3196 	spin_unlock_bh(&ibdev->iboe.lock);
3197 
3198 	ibev.device = &ibdev->ib_dev;
3199 	ibev.element.port_num = 1;
3200 	ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3201 		IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3202 
3203 	ib_dispatch_event(&ibev);
3204 }
3205 
3206 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3207 {
3208 	u64 sl2vl;
3209 	int err;
3210 
3211 	err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3212 	if (err) {
3213 		pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
3214 		       port, err);
3215 		sl2vl = 0;
3216 	}
3217 	atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3218 }
3219 
3220 static void ib_sl2vl_update_work(struct work_struct *work)
3221 {
3222 	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3223 	struct mlx4_ib_dev *mdev = ew->ib_dev;
3224 	int port = ew->port;
3225 
3226 	mlx4_ib_sl2vl_update(mdev, port);
3227 
3228 	kfree(ew);
3229 }
3230 
3231 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3232 				     int port)
3233 {
3234 	struct ib_event_work *ew;
3235 
3236 	ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3237 	if (ew) {
3238 		INIT_WORK(&ew->work, ib_sl2vl_update_work);
3239 		ew->port = port;
3240 		ew->ib_dev = ibdev;
3241 		queue_work(wq, &ew->work);
3242 	}
3243 }
3244 
3245 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3246 			  enum mlx4_dev_event event, unsigned long param)
3247 {
3248 	struct ib_event ibev;
3249 	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3250 	struct mlx4_eqe *eqe = NULL;
3251 	struct ib_event_work *ew;
3252 	int p = 0;
3253 
3254 	if (mlx4_is_bonded(dev) &&
3255 	    ((event == MLX4_DEV_EVENT_PORT_UP) ||
3256 	    (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3257 		ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3258 		if (!ew)
3259 			return;
3260 		INIT_WORK(&ew->work, handle_bonded_port_state_event);
3261 		ew->ib_dev = ibdev;
3262 		queue_work(wq, &ew->work);
3263 		return;
3264 	}
3265 
3266 	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3267 		eqe = (struct mlx4_eqe *)param;
3268 	else
3269 		p = (int) param;
3270 
3271 	switch (event) {
3272 	case MLX4_DEV_EVENT_PORT_UP:
3273 		if (p > ibdev->num_ports)
3274 			return;
3275 		if (!mlx4_is_slave(dev) &&
3276 		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3277 			IB_LINK_LAYER_INFINIBAND) {
3278 			if (mlx4_is_master(dev))
3279 				mlx4_ib_invalidate_all_guid_record(ibdev, p);
3280 			if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3281 			    !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3282 				mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3283 		}
3284 		ibev.event = IB_EVENT_PORT_ACTIVE;
3285 		break;
3286 
3287 	case MLX4_DEV_EVENT_PORT_DOWN:
3288 		if (p > ibdev->num_ports)
3289 			return;
3290 		ibev.event = IB_EVENT_PORT_ERR;
3291 		break;
3292 
3293 	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3294 		ibdev->ib_active = false;
3295 		ibev.event = IB_EVENT_DEVICE_FATAL;
3296 		mlx4_ib_handle_catas_error(ibdev);
3297 		break;
3298 
3299 	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3300 		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3301 		if (!ew)
3302 			break;
3303 
3304 		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3305 		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3306 		ew->ib_dev = ibdev;
3307 		/* need to queue only for port owner, which uses GEN_EQE */
3308 		if (mlx4_is_master(dev))
3309 			queue_work(wq, &ew->work);
3310 		else
3311 			handle_port_mgmt_change_event(&ew->work);
3312 		return;
3313 
3314 	case MLX4_DEV_EVENT_SLAVE_INIT:
3315 		/* here, p is the slave id */
3316 		do_slave_init(ibdev, p, 1);
3317 		if (mlx4_is_master(dev)) {
3318 			int i;
3319 
3320 			for (i = 1; i <= ibdev->num_ports; i++) {
3321 				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3322 					== IB_LINK_LAYER_INFINIBAND)
3323 					mlx4_ib_slave_alias_guid_event(ibdev,
3324 								       p, i,
3325 								       1);
3326 			}
3327 		}
3328 		return;
3329 
3330 	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3331 		if (mlx4_is_master(dev)) {
3332 			int i;
3333 
3334 			for (i = 1; i <= ibdev->num_ports; i++) {
3335 				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3336 					== IB_LINK_LAYER_INFINIBAND)
3337 					mlx4_ib_slave_alias_guid_event(ibdev,
3338 								       p, i,
3339 								       0);
3340 			}
3341 		}
3342 		/* here, p is the slave id */
3343 		do_slave_init(ibdev, p, 0);
3344 		return;
3345 
3346 	default:
3347 		return;
3348 	}
3349 
3350 	ibev.device	      = ibdev_ptr;
3351 	ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3352 
3353 	ib_dispatch_event(&ibev);
3354 }
3355 
3356 static struct mlx4_interface mlx4_ib_interface = {
3357 	.add		= mlx4_ib_add,
3358 	.remove		= mlx4_ib_remove,
3359 	.event		= mlx4_ib_event,
3360 	.protocol	= MLX4_PROT_IB_IPV6,
3361 	.flags		= MLX4_INTFF_BONDING
3362 };
3363 
3364 static int __init mlx4_ib_init(void)
3365 {
3366 	int err;
3367 
3368 	wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3369 	if (!wq)
3370 		return -ENOMEM;
3371 
3372 	err = mlx4_ib_mcg_init();
3373 	if (err)
3374 		goto clean_wq;
3375 
3376 	err = mlx4_register_interface(&mlx4_ib_interface);
3377 	if (err)
3378 		goto clean_mcg;
3379 
3380 	return 0;
3381 
3382 clean_mcg:
3383 	mlx4_ib_mcg_destroy();
3384 
3385 clean_wq:
3386 	destroy_workqueue(wq);
3387 	return err;
3388 }
3389 
3390 static void __exit mlx4_ib_cleanup(void)
3391 {
3392 	mlx4_unregister_interface(&mlx4_ib_interface);
3393 	mlx4_ib_mcg_destroy();
3394 	destroy_workqueue(wq);
3395 }
3396 
3397 module_init(mlx4_ib_init);
3398 module_exit(mlx4_ib_cleanup);
3399