xref: /openbmc/linux/drivers/infiniband/hw/mlx5/main.c (revision 887069f4)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #include <linux/debugfs.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/bitmap.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/task.h>
19 #include <linux/delay.h>
20 #include <rdma/ib_user_verbs.h>
21 #include <rdma/ib_addr.h>
22 #include <rdma/ib_cache.h>
23 #include <linux/mlx5/port.h>
24 #include <linux/mlx5/vport.h>
25 #include <linux/mlx5/fs.h>
26 #include <linux/mlx5/eswitch.h>
27 #include <linux/list.h>
28 #include <rdma/ib_smi.h>
29 #include <rdma/ib_umem.h>
30 #include <rdma/lag.h>
31 #include <linux/in.h>
32 #include <linux/etherdevice.h>
33 #include "mlx5_ib.h"
34 #include "ib_rep.h"
35 #include "cmd.h"
36 #include "devx.h"
37 #include "dm.h"
38 #include "fs.h"
39 #include "srq.h"
40 #include "qp.h"
41 #include "wr.h"
42 #include "restrack.h"
43 #include "counters.h"
44 #include <linux/mlx5/accel.h>
45 #include <rdma/uverbs_std_types.h>
46 #include <rdma/uverbs_ioctl.h>
47 #include <rdma/mlx5_user_ioctl_verbs.h>
48 #include <rdma/mlx5_user_ioctl_cmds.h>
49 #include <rdma/ib_umem_odp.h>
50 
51 #define UVERBS_MODULE_NAME mlx5_ib
52 #include <rdma/uverbs_named_ioctl.h>
53 
54 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
55 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
56 MODULE_LICENSE("Dual BSD/GPL");
57 
58 struct mlx5_ib_event_work {
59 	struct work_struct	work;
60 	union {
61 		struct mlx5_ib_dev	      *dev;
62 		struct mlx5_ib_multiport_info *mpi;
63 	};
64 	bool			is_slave;
65 	unsigned int		event;
66 	void			*param;
67 };
68 
69 enum {
70 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
71 };
72 
73 static struct workqueue_struct *mlx5_ib_event_wq;
74 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
75 static LIST_HEAD(mlx5_ib_dev_list);
76 /*
77  * This mutex should be held when accessing either of the above lists
78  */
79 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
80 
81 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
82 {
83 	struct mlx5_ib_dev *dev;
84 
85 	mutex_lock(&mlx5_ib_multiport_mutex);
86 	dev = mpi->ibdev;
87 	mutex_unlock(&mlx5_ib_multiport_mutex);
88 	return dev;
89 }
90 
91 static enum rdma_link_layer
92 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
93 {
94 	switch (port_type_cap) {
95 	case MLX5_CAP_PORT_TYPE_IB:
96 		return IB_LINK_LAYER_INFINIBAND;
97 	case MLX5_CAP_PORT_TYPE_ETH:
98 		return IB_LINK_LAYER_ETHERNET;
99 	default:
100 		return IB_LINK_LAYER_UNSPECIFIED;
101 	}
102 }
103 
104 static enum rdma_link_layer
105 mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
106 {
107 	struct mlx5_ib_dev *dev = to_mdev(device);
108 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
109 
110 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
111 }
112 
113 static int get_port_state(struct ib_device *ibdev,
114 			  u32 port_num,
115 			  enum ib_port_state *state)
116 {
117 	struct ib_port_attr attr;
118 	int ret;
119 
120 	memset(&attr, 0, sizeof(attr));
121 	ret = ibdev->ops.query_port(ibdev, port_num, &attr);
122 	if (!ret)
123 		*state = attr.state;
124 	return ret;
125 }
126 
127 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
128 					   struct net_device *ndev,
129 					   struct net_device *upper,
130 					   u32 *port_num)
131 {
132 	struct net_device *rep_ndev;
133 	struct mlx5_ib_port *port;
134 	int i;
135 
136 	for (i = 0; i < dev->num_ports; i++) {
137 		port  = &dev->port[i];
138 		if (!port->rep)
139 			continue;
140 
141 		if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
142 			*port_num = i + 1;
143 			return &port->roce;
144 		}
145 
146 		if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
147 			continue;
148 
149 		read_lock(&port->roce.netdev_lock);
150 		rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
151 						  port->rep->vport);
152 		if (rep_ndev == ndev) {
153 			read_unlock(&port->roce.netdev_lock);
154 			*port_num = i + 1;
155 			return &port->roce;
156 		}
157 		read_unlock(&port->roce.netdev_lock);
158 	}
159 
160 	return NULL;
161 }
162 
163 static int mlx5_netdev_event(struct notifier_block *this,
164 			     unsigned long event, void *ptr)
165 {
166 	struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
167 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
168 	u32 port_num = roce->native_port_num;
169 	struct mlx5_core_dev *mdev;
170 	struct mlx5_ib_dev *ibdev;
171 
172 	ibdev = roce->dev;
173 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
174 	if (!mdev)
175 		return NOTIFY_DONE;
176 
177 	switch (event) {
178 	case NETDEV_REGISTER:
179 		/* Should already be registered during the load */
180 		if (ibdev->is_rep)
181 			break;
182 		write_lock(&roce->netdev_lock);
183 		if (ndev->dev.parent == mdev->device)
184 			roce->netdev = ndev;
185 		write_unlock(&roce->netdev_lock);
186 		break;
187 
188 	case NETDEV_UNREGISTER:
189 		/* In case of reps, ib device goes away before the netdevs */
190 		write_lock(&roce->netdev_lock);
191 		if (roce->netdev == ndev)
192 			roce->netdev = NULL;
193 		write_unlock(&roce->netdev_lock);
194 		break;
195 
196 	case NETDEV_CHANGE:
197 	case NETDEV_UP:
198 	case NETDEV_DOWN: {
199 		struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
200 		struct net_device *upper = NULL;
201 
202 		if (lag_ndev) {
203 			upper = netdev_master_upper_dev_get(lag_ndev);
204 			dev_put(lag_ndev);
205 		}
206 
207 		if (ibdev->is_rep)
208 			roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
209 		if (!roce)
210 			return NOTIFY_DONE;
211 		if ((upper == ndev ||
212 		     ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
213 		    ibdev->ib_active) {
214 			struct ib_event ibev = { };
215 			enum ib_port_state port_state;
216 
217 			if (get_port_state(&ibdev->ib_dev, port_num,
218 					   &port_state))
219 				goto done;
220 
221 			if (roce->last_port_state == port_state)
222 				goto done;
223 
224 			roce->last_port_state = port_state;
225 			ibev.device = &ibdev->ib_dev;
226 			if (port_state == IB_PORT_DOWN)
227 				ibev.event = IB_EVENT_PORT_ERR;
228 			else if (port_state == IB_PORT_ACTIVE)
229 				ibev.event = IB_EVENT_PORT_ACTIVE;
230 			else
231 				goto done;
232 
233 			ibev.element.port_num = port_num;
234 			ib_dispatch_event(&ibev);
235 		}
236 		break;
237 	}
238 
239 	default:
240 		break;
241 	}
242 done:
243 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
244 	return NOTIFY_DONE;
245 }
246 
247 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
248 					     u32 port_num)
249 {
250 	struct mlx5_ib_dev *ibdev = to_mdev(device);
251 	struct net_device *ndev;
252 	struct mlx5_core_dev *mdev;
253 
254 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
255 	if (!mdev)
256 		return NULL;
257 
258 	ndev = mlx5_lag_get_roce_netdev(mdev);
259 	if (ndev)
260 		goto out;
261 
262 	/* Ensure ndev does not disappear before we invoke dev_hold()
263 	 */
264 	read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
265 	ndev = ibdev->port[port_num - 1].roce.netdev;
266 	if (ndev)
267 		dev_hold(ndev);
268 	read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
269 
270 out:
271 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
272 	return ndev;
273 }
274 
275 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
276 						   u32 ib_port_num,
277 						   u32 *native_port_num)
278 {
279 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
280 							  ib_port_num);
281 	struct mlx5_core_dev *mdev = NULL;
282 	struct mlx5_ib_multiport_info *mpi;
283 	struct mlx5_ib_port *port;
284 
285 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
286 	    ll != IB_LINK_LAYER_ETHERNET) {
287 		if (native_port_num)
288 			*native_port_num = ib_port_num;
289 		return ibdev->mdev;
290 	}
291 
292 	if (native_port_num)
293 		*native_port_num = 1;
294 
295 	port = &ibdev->port[ib_port_num - 1];
296 	spin_lock(&port->mp.mpi_lock);
297 	mpi = ibdev->port[ib_port_num - 1].mp.mpi;
298 	if (mpi && !mpi->unaffiliate) {
299 		mdev = mpi->mdev;
300 		/* If it's the master no need to refcount, it'll exist
301 		 * as long as the ib_dev exists.
302 		 */
303 		if (!mpi->is_master)
304 			mpi->mdev_refcnt++;
305 	}
306 	spin_unlock(&port->mp.mpi_lock);
307 
308 	return mdev;
309 }
310 
311 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
312 {
313 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
314 							  port_num);
315 	struct mlx5_ib_multiport_info *mpi;
316 	struct mlx5_ib_port *port;
317 
318 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
319 		return;
320 
321 	port = &ibdev->port[port_num - 1];
322 
323 	spin_lock(&port->mp.mpi_lock);
324 	mpi = ibdev->port[port_num - 1].mp.mpi;
325 	if (mpi->is_master)
326 		goto out;
327 
328 	mpi->mdev_refcnt--;
329 	if (mpi->unaffiliate)
330 		complete(&mpi->unref_comp);
331 out:
332 	spin_unlock(&port->mp.mpi_lock);
333 }
334 
335 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
336 					   u16 *active_speed, u8 *active_width)
337 {
338 	switch (eth_proto_oper) {
339 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
340 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
341 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
342 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
343 		*active_width = IB_WIDTH_1X;
344 		*active_speed = IB_SPEED_SDR;
345 		break;
346 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
347 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
348 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
349 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
350 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
351 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
352 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
353 		*active_width = IB_WIDTH_1X;
354 		*active_speed = IB_SPEED_QDR;
355 		break;
356 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
357 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
358 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
359 		*active_width = IB_WIDTH_1X;
360 		*active_speed = IB_SPEED_EDR;
361 		break;
362 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
363 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
364 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
365 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
366 		*active_width = IB_WIDTH_4X;
367 		*active_speed = IB_SPEED_QDR;
368 		break;
369 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
370 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
371 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
372 		*active_width = IB_WIDTH_1X;
373 		*active_speed = IB_SPEED_HDR;
374 		break;
375 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
376 		*active_width = IB_WIDTH_4X;
377 		*active_speed = IB_SPEED_FDR;
378 		break;
379 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
380 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
381 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
382 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
383 		*active_width = IB_WIDTH_4X;
384 		*active_speed = IB_SPEED_EDR;
385 		break;
386 	default:
387 		return -EINVAL;
388 	}
389 
390 	return 0;
391 }
392 
393 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
394 					u8 *active_width)
395 {
396 	switch (eth_proto_oper) {
397 	case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
398 	case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
399 		*active_width = IB_WIDTH_1X;
400 		*active_speed = IB_SPEED_SDR;
401 		break;
402 	case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
403 		*active_width = IB_WIDTH_1X;
404 		*active_speed = IB_SPEED_DDR;
405 		break;
406 	case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
407 		*active_width = IB_WIDTH_1X;
408 		*active_speed = IB_SPEED_QDR;
409 		break;
410 	case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
411 		*active_width = IB_WIDTH_4X;
412 		*active_speed = IB_SPEED_QDR;
413 		break;
414 	case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
415 		*active_width = IB_WIDTH_1X;
416 		*active_speed = IB_SPEED_EDR;
417 		break;
418 	case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
419 		*active_width = IB_WIDTH_2X;
420 		*active_speed = IB_SPEED_EDR;
421 		break;
422 	case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
423 		*active_width = IB_WIDTH_1X;
424 		*active_speed = IB_SPEED_HDR;
425 		break;
426 	case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
427 		*active_width = IB_WIDTH_4X;
428 		*active_speed = IB_SPEED_EDR;
429 		break;
430 	case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
431 		*active_width = IB_WIDTH_2X;
432 		*active_speed = IB_SPEED_HDR;
433 		break;
434 	case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
435 		*active_width = IB_WIDTH_1X;
436 		*active_speed = IB_SPEED_NDR;
437 		break;
438 	case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
439 		*active_width = IB_WIDTH_4X;
440 		*active_speed = IB_SPEED_HDR;
441 		break;
442 	case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
443 		*active_width = IB_WIDTH_2X;
444 		*active_speed = IB_SPEED_NDR;
445 		break;
446 	case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
447 		*active_width = IB_WIDTH_4X;
448 		*active_speed = IB_SPEED_NDR;
449 		break;
450 	default:
451 		return -EINVAL;
452 	}
453 
454 	return 0;
455 }
456 
457 static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
458 				    u8 *active_width, bool ext)
459 {
460 	return ext ?
461 		translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
462 					     active_width) :
463 		translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
464 						active_width);
465 }
466 
467 static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
468 				struct ib_port_attr *props)
469 {
470 	struct mlx5_ib_dev *dev = to_mdev(device);
471 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
472 	struct mlx5_core_dev *mdev;
473 	struct net_device *ndev, *upper;
474 	enum ib_mtu ndev_ib_mtu;
475 	bool put_mdev = true;
476 	u32 eth_prot_oper;
477 	u32 mdev_port_num;
478 	bool ext;
479 	int err;
480 
481 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
482 	if (!mdev) {
483 		/* This means the port isn't affiliated yet. Get the
484 		 * info for the master port instead.
485 		 */
486 		put_mdev = false;
487 		mdev = dev->mdev;
488 		mdev_port_num = 1;
489 		port_num = 1;
490 	}
491 
492 	/* Possible bad flows are checked before filling out props so in case
493 	 * of an error it will still be zeroed out.
494 	 * Use native port in case of reps
495 	 */
496 	if (dev->is_rep)
497 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
498 					   1);
499 	else
500 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
501 					   mdev_port_num);
502 	if (err)
503 		goto out;
504 	ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
505 	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
506 
507 	props->active_width     = IB_WIDTH_4X;
508 	props->active_speed     = IB_SPEED_QDR;
509 
510 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
511 				 &props->active_width, ext);
512 
513 	if (!dev->is_rep && dev->mdev->roce.roce_en) {
514 		u16 qkey_viol_cntr;
515 
516 		props->port_cap_flags |= IB_PORT_CM_SUP;
517 		props->ip_gids = true;
518 		props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
519 						   roce_address_table_size);
520 		mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
521 		props->qkey_viol_cntr = qkey_viol_cntr;
522 	}
523 	props->max_mtu          = IB_MTU_4096;
524 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
525 	props->pkey_tbl_len     = 1;
526 	props->state            = IB_PORT_DOWN;
527 	props->phys_state       = IB_PORT_PHYS_STATE_DISABLED;
528 
529 	/* If this is a stub query for an unaffiliated port stop here */
530 	if (!put_mdev)
531 		goto out;
532 
533 	ndev = mlx5_ib_get_netdev(device, port_num);
534 	if (!ndev)
535 		goto out;
536 
537 	if (dev->lag_active) {
538 		rcu_read_lock();
539 		upper = netdev_master_upper_dev_get_rcu(ndev);
540 		if (upper) {
541 			dev_put(ndev);
542 			ndev = upper;
543 			dev_hold(ndev);
544 		}
545 		rcu_read_unlock();
546 	}
547 
548 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
549 		props->state      = IB_PORT_ACTIVE;
550 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
551 	}
552 
553 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
554 
555 	dev_put(ndev);
556 
557 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
558 out:
559 	if (put_mdev)
560 		mlx5_ib_put_native_port_mdev(dev, port_num);
561 	return err;
562 }
563 
564 static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
565 			 unsigned int index, const union ib_gid *gid,
566 			 const struct ib_gid_attr *attr)
567 {
568 	enum ib_gid_type gid_type;
569 	u16 vlan_id = 0xffff;
570 	u8 roce_version = 0;
571 	u8 roce_l3_type = 0;
572 	u8 mac[ETH_ALEN];
573 	int ret;
574 
575 	gid_type = attr->gid_type;
576 	if (gid) {
577 		ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
578 		if (ret)
579 			return ret;
580 	}
581 
582 	switch (gid_type) {
583 	case IB_GID_TYPE_ROCE:
584 		roce_version = MLX5_ROCE_VERSION_1;
585 		break;
586 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
587 		roce_version = MLX5_ROCE_VERSION_2;
588 		if (gid && ipv6_addr_v4mapped((void *)gid))
589 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
590 		else
591 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
592 		break;
593 
594 	default:
595 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
596 	}
597 
598 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
599 				      roce_l3_type, gid->raw, mac,
600 				      vlan_id < VLAN_CFI_MASK, vlan_id,
601 				      port_num);
602 }
603 
604 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
605 			   __always_unused void **context)
606 {
607 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
608 			     attr->index, &attr->gid, attr);
609 }
610 
611 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
612 			   __always_unused void **context)
613 {
614 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
615 			     attr->index, NULL, attr);
616 }
617 
618 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
619 				   const struct ib_gid_attr *attr)
620 {
621 	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
622 		return 0;
623 
624 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
625 }
626 
627 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
628 {
629 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
630 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
631 	return 0;
632 }
633 
634 enum {
635 	MLX5_VPORT_ACCESS_METHOD_MAD,
636 	MLX5_VPORT_ACCESS_METHOD_HCA,
637 	MLX5_VPORT_ACCESS_METHOD_NIC,
638 };
639 
640 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
641 {
642 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
643 		return MLX5_VPORT_ACCESS_METHOD_MAD;
644 
645 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
646 	    IB_LINK_LAYER_ETHERNET)
647 		return MLX5_VPORT_ACCESS_METHOD_NIC;
648 
649 	return MLX5_VPORT_ACCESS_METHOD_HCA;
650 }
651 
652 static void get_atomic_caps(struct mlx5_ib_dev *dev,
653 			    u8 atomic_size_qp,
654 			    struct ib_device_attr *props)
655 {
656 	u8 tmp;
657 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
658 	u8 atomic_req_8B_endianness_mode =
659 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
660 
661 	/* Check if HW supports 8 bytes standard atomic operations and capable
662 	 * of host endianness respond
663 	 */
664 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
665 	if (((atomic_operations & tmp) == tmp) &&
666 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
667 	    (atomic_req_8B_endianness_mode)) {
668 		props->atomic_cap = IB_ATOMIC_HCA;
669 	} else {
670 		props->atomic_cap = IB_ATOMIC_NONE;
671 	}
672 }
673 
674 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
675 			       struct ib_device_attr *props)
676 {
677 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
678 
679 	get_atomic_caps(dev, atomic_size_qp, props);
680 }
681 
682 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
683 					__be64 *sys_image_guid)
684 {
685 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
686 	struct mlx5_core_dev *mdev = dev->mdev;
687 	u64 tmp;
688 	int err;
689 
690 	switch (mlx5_get_vport_access_method(ibdev)) {
691 	case MLX5_VPORT_ACCESS_METHOD_MAD:
692 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
693 							    sys_image_guid);
694 
695 	case MLX5_VPORT_ACCESS_METHOD_HCA:
696 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
697 		break;
698 
699 	case MLX5_VPORT_ACCESS_METHOD_NIC:
700 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
701 		break;
702 
703 	default:
704 		return -EINVAL;
705 	}
706 
707 	if (!err)
708 		*sys_image_guid = cpu_to_be64(tmp);
709 
710 	return err;
711 
712 }
713 
714 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
715 				u16 *max_pkeys)
716 {
717 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
718 	struct mlx5_core_dev *mdev = dev->mdev;
719 
720 	switch (mlx5_get_vport_access_method(ibdev)) {
721 	case MLX5_VPORT_ACCESS_METHOD_MAD:
722 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
723 
724 	case MLX5_VPORT_ACCESS_METHOD_HCA:
725 	case MLX5_VPORT_ACCESS_METHOD_NIC:
726 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
727 						pkey_table_size));
728 		return 0;
729 
730 	default:
731 		return -EINVAL;
732 	}
733 }
734 
735 static int mlx5_query_vendor_id(struct ib_device *ibdev,
736 				u32 *vendor_id)
737 {
738 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
739 
740 	switch (mlx5_get_vport_access_method(ibdev)) {
741 	case MLX5_VPORT_ACCESS_METHOD_MAD:
742 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
743 
744 	case MLX5_VPORT_ACCESS_METHOD_HCA:
745 	case MLX5_VPORT_ACCESS_METHOD_NIC:
746 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
747 
748 	default:
749 		return -EINVAL;
750 	}
751 }
752 
753 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
754 				__be64 *node_guid)
755 {
756 	u64 tmp;
757 	int err;
758 
759 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
760 	case MLX5_VPORT_ACCESS_METHOD_MAD:
761 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
762 
763 	case MLX5_VPORT_ACCESS_METHOD_HCA:
764 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
765 		break;
766 
767 	case MLX5_VPORT_ACCESS_METHOD_NIC:
768 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
769 		break;
770 
771 	default:
772 		return -EINVAL;
773 	}
774 
775 	if (!err)
776 		*node_guid = cpu_to_be64(tmp);
777 
778 	return err;
779 }
780 
781 struct mlx5_reg_node_desc {
782 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
783 };
784 
785 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
786 {
787 	struct mlx5_reg_node_desc in;
788 
789 	if (mlx5_use_mad_ifc(dev))
790 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
791 
792 	memset(&in, 0, sizeof(in));
793 
794 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
795 				    sizeof(struct mlx5_reg_node_desc),
796 				    MLX5_REG_NODE_DESC, 0, 0);
797 }
798 
799 static int mlx5_ib_query_device(struct ib_device *ibdev,
800 				struct ib_device_attr *props,
801 				struct ib_udata *uhw)
802 {
803 	size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
804 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
805 	struct mlx5_core_dev *mdev = dev->mdev;
806 	int err = -ENOMEM;
807 	int max_sq_desc;
808 	int max_rq_sg;
809 	int max_sq_sg;
810 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
811 	bool raw_support = !mlx5_core_mp_enabled(mdev);
812 	struct mlx5_ib_query_device_resp resp = {};
813 	size_t resp_len;
814 	u64 max_tso;
815 
816 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
817 	if (uhw_outlen && uhw_outlen < resp_len)
818 		return -EINVAL;
819 
820 	resp.response_length = resp_len;
821 
822 	if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
823 		return -EINVAL;
824 
825 	memset(props, 0, sizeof(*props));
826 	err = mlx5_query_system_image_guid(ibdev,
827 					   &props->sys_image_guid);
828 	if (err)
829 		return err;
830 
831 	props->max_pkeys = dev->pkey_table_len;
832 
833 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
834 	if (err)
835 		return err;
836 
837 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
838 		(fw_rev_min(dev->mdev) << 16) |
839 		fw_rev_sub(dev->mdev);
840 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
841 		IB_DEVICE_PORT_ACTIVE_EVENT		|
842 		IB_DEVICE_SYS_IMAGE_GUID		|
843 		IB_DEVICE_RC_RNR_NAK_GEN;
844 
845 	if (MLX5_CAP_GEN(mdev, pkv))
846 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
847 	if (MLX5_CAP_GEN(mdev, qkv))
848 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
849 	if (MLX5_CAP_GEN(mdev, apm))
850 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
851 	if (MLX5_CAP_GEN(mdev, xrc))
852 		props->device_cap_flags |= IB_DEVICE_XRC;
853 	if (MLX5_CAP_GEN(mdev, imaicl)) {
854 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
855 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
856 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
857 		/* We support 'Gappy' memory registration too */
858 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
859 	}
860 	/* IB_WR_REG_MR always requires changing the entity size with UMR */
861 	if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
862 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
863 	if (MLX5_CAP_GEN(mdev, sho)) {
864 		props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
865 		/* At this stage no support for signature handover */
866 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
867 				      IB_PROT_T10DIF_TYPE_2 |
868 				      IB_PROT_T10DIF_TYPE_3;
869 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
870 				       IB_GUARD_T10DIF_CSUM;
871 	}
872 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
873 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
874 
875 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
876 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
877 			/* Legacy bit to support old userspace libraries */
878 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
879 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
880 		}
881 
882 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
883 			props->raw_packet_caps |=
884 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
885 
886 		if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
887 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
888 			if (max_tso) {
889 				resp.tso_caps.max_tso = 1 << max_tso;
890 				resp.tso_caps.supported_qpts |=
891 					1 << IB_QPT_RAW_PACKET;
892 				resp.response_length += sizeof(resp.tso_caps);
893 			}
894 		}
895 
896 		if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
897 			resp.rss_caps.rx_hash_function =
898 						MLX5_RX_HASH_FUNC_TOEPLITZ;
899 			resp.rss_caps.rx_hash_fields_mask =
900 						MLX5_RX_HASH_SRC_IPV4 |
901 						MLX5_RX_HASH_DST_IPV4 |
902 						MLX5_RX_HASH_SRC_IPV6 |
903 						MLX5_RX_HASH_DST_IPV6 |
904 						MLX5_RX_HASH_SRC_PORT_TCP |
905 						MLX5_RX_HASH_DST_PORT_TCP |
906 						MLX5_RX_HASH_SRC_PORT_UDP |
907 						MLX5_RX_HASH_DST_PORT_UDP |
908 						MLX5_RX_HASH_INNER;
909 			if (mlx5_accel_ipsec_device_caps(dev->mdev) &
910 			    MLX5_ACCEL_IPSEC_CAP_DEVICE)
911 				resp.rss_caps.rx_hash_fields_mask |=
912 					MLX5_RX_HASH_IPSEC_SPI;
913 			resp.response_length += sizeof(resp.rss_caps);
914 		}
915 	} else {
916 		if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
917 			resp.response_length += sizeof(resp.tso_caps);
918 		if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
919 			resp.response_length += sizeof(resp.rss_caps);
920 	}
921 
922 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
923 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
924 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
925 	}
926 
927 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
928 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
929 	    raw_support)
930 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
931 
932 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
933 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
934 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
935 
936 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
937 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
938 	    raw_support) {
939 		/* Legacy bit to support old userspace libraries */
940 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
941 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
942 	}
943 
944 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
945 		props->max_dm_size =
946 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
947 	}
948 
949 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
950 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
951 
952 	if (MLX5_CAP_GEN(mdev, end_pad))
953 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
954 
955 	props->vendor_part_id	   = mdev->pdev->device;
956 	props->hw_ver		   = mdev->pdev->revision;
957 
958 	props->max_mr_size	   = ~0ull;
959 	props->page_size_cap	   = ~(min_page_size - 1);
960 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
961 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
962 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
963 		     sizeof(struct mlx5_wqe_data_seg);
964 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
965 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
966 		     sizeof(struct mlx5_wqe_raddr_seg)) /
967 		sizeof(struct mlx5_wqe_data_seg);
968 	props->max_send_sge = max_sq_sg;
969 	props->max_recv_sge = max_rq_sg;
970 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
971 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
972 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
973 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
974 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
975 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
976 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
977 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
978 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
979 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
980 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
981 	props->max_srq_sge	   = max_rq_sg - 1;
982 	props->max_fast_reg_page_list_len =
983 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
984 	props->max_pi_fast_reg_page_list_len =
985 		props->max_fast_reg_page_list_len / 2;
986 	props->max_sgl_rd =
987 		MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
988 	get_atomic_caps_qp(dev, props);
989 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
990 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
991 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
992 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
993 					   props->max_mcast_grp;
994 	props->max_ah = INT_MAX;
995 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
996 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
997 
998 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
999 		if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1000 			props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1001 		props->odp_caps = dev->odp_caps;
1002 		if (!uhw) {
1003 			/* ODP for kernel QPs is not implemented for receive
1004 			 * WQEs and SRQ WQEs
1005 			 */
1006 			props->odp_caps.per_transport_caps.rc_odp_caps &=
1007 				~(IB_ODP_SUPPORT_READ |
1008 				  IB_ODP_SUPPORT_SRQ_RECV);
1009 			props->odp_caps.per_transport_caps.uc_odp_caps &=
1010 				~(IB_ODP_SUPPORT_READ |
1011 				  IB_ODP_SUPPORT_SRQ_RECV);
1012 			props->odp_caps.per_transport_caps.ud_odp_caps &=
1013 				~(IB_ODP_SUPPORT_READ |
1014 				  IB_ODP_SUPPORT_SRQ_RECV);
1015 			props->odp_caps.per_transport_caps.xrc_odp_caps &=
1016 				~(IB_ODP_SUPPORT_READ |
1017 				  IB_ODP_SUPPORT_SRQ_RECV);
1018 		}
1019 	}
1020 
1021 	if (MLX5_CAP_GEN(mdev, cd))
1022 		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1023 
1024 	if (mlx5_core_is_vf(mdev))
1025 		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1026 
1027 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
1028 	    IB_LINK_LAYER_ETHERNET && raw_support) {
1029 		props->rss_caps.max_rwq_indirection_tables =
1030 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1031 		props->rss_caps.max_rwq_indirection_table_size =
1032 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1033 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1034 		props->max_wq_type_rq =
1035 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1036 	}
1037 
1038 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
1039 		props->tm_caps.max_num_tags =
1040 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1041 		props->tm_caps.max_ops =
1042 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1043 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1044 	}
1045 
1046 	if (MLX5_CAP_GEN(mdev, tag_matching) &&
1047 	    MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1048 		props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1049 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1050 	}
1051 
1052 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1053 		props->cq_caps.max_cq_moderation_count =
1054 						MLX5_MAX_CQ_COUNT;
1055 		props->cq_caps.max_cq_moderation_period =
1056 						MLX5_MAX_CQ_PERIOD;
1057 	}
1058 
1059 	if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
1060 		resp.response_length += sizeof(resp.cqe_comp_caps);
1061 
1062 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1063 			resp.cqe_comp_caps.max_num =
1064 				MLX5_CAP_GEN(dev->mdev,
1065 					     cqe_compression_max_num);
1066 
1067 			resp.cqe_comp_caps.supported_format =
1068 				MLX5_IB_CQE_RES_FORMAT_HASH |
1069 				MLX5_IB_CQE_RES_FORMAT_CSUM;
1070 
1071 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1072 				resp.cqe_comp_caps.supported_format |=
1073 					MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1074 		}
1075 	}
1076 
1077 	if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
1078 	    raw_support) {
1079 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1080 		    MLX5_CAP_GEN(mdev, qos)) {
1081 			resp.packet_pacing_caps.qp_rate_limit_max =
1082 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1083 			resp.packet_pacing_caps.qp_rate_limit_min =
1084 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1085 			resp.packet_pacing_caps.supported_qpts |=
1086 				1 << IB_QPT_RAW_PACKET;
1087 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1088 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1089 				resp.packet_pacing_caps.cap_flags |=
1090 					MLX5_IB_PP_SUPPORT_BURST;
1091 		}
1092 		resp.response_length += sizeof(resp.packet_pacing_caps);
1093 	}
1094 
1095 	if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
1096 	    uhw_outlen) {
1097 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1098 			resp.mlx5_ib_support_multi_pkt_send_wqes =
1099 				MLX5_IB_ALLOW_MPW;
1100 
1101 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1102 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
1103 				MLX5_IB_SUPPORT_EMPW;
1104 
1105 		resp.response_length +=
1106 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1107 	}
1108 
1109 	if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
1110 		resp.response_length += sizeof(resp.flags);
1111 
1112 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1113 			resp.flags |=
1114 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1115 
1116 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
1117 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1118 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
1119 			resp.flags |=
1120 				MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1121 
1122 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1123 	}
1124 
1125 	if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
1126 		resp.response_length += sizeof(resp.sw_parsing_caps);
1127 		if (MLX5_CAP_ETH(mdev, swp)) {
1128 			resp.sw_parsing_caps.sw_parsing_offloads |=
1129 				MLX5_IB_SW_PARSING;
1130 
1131 			if (MLX5_CAP_ETH(mdev, swp_csum))
1132 				resp.sw_parsing_caps.sw_parsing_offloads |=
1133 					MLX5_IB_SW_PARSING_CSUM;
1134 
1135 			if (MLX5_CAP_ETH(mdev, swp_lso))
1136 				resp.sw_parsing_caps.sw_parsing_offloads |=
1137 					MLX5_IB_SW_PARSING_LSO;
1138 
1139 			if (resp.sw_parsing_caps.sw_parsing_offloads)
1140 				resp.sw_parsing_caps.supported_qpts =
1141 					BIT(IB_QPT_RAW_PACKET);
1142 		}
1143 	}
1144 
1145 	if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
1146 	    raw_support) {
1147 		resp.response_length += sizeof(resp.striding_rq_caps);
1148 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
1149 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1150 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1151 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1152 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1153 			if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1154 				resp.striding_rq_caps
1155 					.min_single_wqe_log_num_of_strides =
1156 					MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1157 			else
1158 				resp.striding_rq_caps
1159 					.min_single_wqe_log_num_of_strides =
1160 					MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1161 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1162 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1163 			resp.striding_rq_caps.supported_qpts =
1164 				BIT(IB_QPT_RAW_PACKET);
1165 		}
1166 	}
1167 
1168 	if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
1169 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
1170 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1171 			resp.tunnel_offloads_caps |=
1172 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1173 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1174 			resp.tunnel_offloads_caps |=
1175 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1176 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1177 			resp.tunnel_offloads_caps |=
1178 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
1179 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1180 			resp.tunnel_offloads_caps |=
1181 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1182 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1183 			resp.tunnel_offloads_caps |=
1184 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1185 	}
1186 
1187 	if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
1188 		resp.response_length += sizeof(resp.dci_streams_caps);
1189 
1190 		resp.dci_streams_caps.max_log_num_concurent =
1191 			MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1192 
1193 		resp.dci_streams_caps.max_log_num_errored =
1194 			MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1195 	}
1196 
1197 	if (uhw_outlen) {
1198 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1199 
1200 		if (err)
1201 			return err;
1202 	}
1203 
1204 	return 0;
1205 }
1206 
1207 static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1208 				   u8 *ib_width)
1209 {
1210 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1211 
1212 	if (active_width & MLX5_PTYS_WIDTH_1X)
1213 		*ib_width = IB_WIDTH_1X;
1214 	else if (active_width & MLX5_PTYS_WIDTH_2X)
1215 		*ib_width = IB_WIDTH_2X;
1216 	else if (active_width & MLX5_PTYS_WIDTH_4X)
1217 		*ib_width = IB_WIDTH_4X;
1218 	else if (active_width & MLX5_PTYS_WIDTH_8X)
1219 		*ib_width = IB_WIDTH_8X;
1220 	else if (active_width & MLX5_PTYS_WIDTH_12X)
1221 		*ib_width = IB_WIDTH_12X;
1222 	else {
1223 		mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1224 			    active_width);
1225 		*ib_width = IB_WIDTH_4X;
1226 	}
1227 
1228 	return;
1229 }
1230 
1231 static int mlx5_mtu_to_ib_mtu(int mtu)
1232 {
1233 	switch (mtu) {
1234 	case 256: return 1;
1235 	case 512: return 2;
1236 	case 1024: return 3;
1237 	case 2048: return 4;
1238 	case 4096: return 5;
1239 	default:
1240 		pr_warn("invalid mtu\n");
1241 		return -1;
1242 	}
1243 }
1244 
1245 enum ib_max_vl_num {
1246 	__IB_MAX_VL_0		= 1,
1247 	__IB_MAX_VL_0_1		= 2,
1248 	__IB_MAX_VL_0_3		= 3,
1249 	__IB_MAX_VL_0_7		= 4,
1250 	__IB_MAX_VL_0_14	= 5,
1251 };
1252 
1253 enum mlx5_vl_hw_cap {
1254 	MLX5_VL_HW_0	= 1,
1255 	MLX5_VL_HW_0_1	= 2,
1256 	MLX5_VL_HW_0_2	= 3,
1257 	MLX5_VL_HW_0_3	= 4,
1258 	MLX5_VL_HW_0_4	= 5,
1259 	MLX5_VL_HW_0_5	= 6,
1260 	MLX5_VL_HW_0_6	= 7,
1261 	MLX5_VL_HW_0_7	= 8,
1262 	MLX5_VL_HW_0_14	= 15
1263 };
1264 
1265 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1266 				u8 *max_vl_num)
1267 {
1268 	switch (vl_hw_cap) {
1269 	case MLX5_VL_HW_0:
1270 		*max_vl_num = __IB_MAX_VL_0;
1271 		break;
1272 	case MLX5_VL_HW_0_1:
1273 		*max_vl_num = __IB_MAX_VL_0_1;
1274 		break;
1275 	case MLX5_VL_HW_0_3:
1276 		*max_vl_num = __IB_MAX_VL_0_3;
1277 		break;
1278 	case MLX5_VL_HW_0_7:
1279 		*max_vl_num = __IB_MAX_VL_0_7;
1280 		break;
1281 	case MLX5_VL_HW_0_14:
1282 		*max_vl_num = __IB_MAX_VL_0_14;
1283 		break;
1284 
1285 	default:
1286 		return -EINVAL;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1293 			       struct ib_port_attr *props)
1294 {
1295 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1296 	struct mlx5_core_dev *mdev = dev->mdev;
1297 	struct mlx5_hca_vport_context *rep;
1298 	u16 max_mtu;
1299 	u16 oper_mtu;
1300 	int err;
1301 	u16 ib_link_width_oper;
1302 	u8 vl_hw_cap;
1303 
1304 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1305 	if (!rep) {
1306 		err = -ENOMEM;
1307 		goto out;
1308 	}
1309 
1310 	/* props being zeroed by the caller, avoid zeroing it here */
1311 
1312 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1313 	if (err)
1314 		goto out;
1315 
1316 	props->lid		= rep->lid;
1317 	props->lmc		= rep->lmc;
1318 	props->sm_lid		= rep->sm_lid;
1319 	props->sm_sl		= rep->sm_sl;
1320 	props->state		= rep->vport_state;
1321 	props->phys_state	= rep->port_physical_state;
1322 	props->port_cap_flags	= rep->cap_mask1;
1323 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1324 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1325 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1326 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1327 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1328 	props->subnet_timeout	= rep->subnet_timeout;
1329 	props->init_type_reply	= rep->init_type_reply;
1330 
1331 	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1332 		props->port_cap_flags2 = rep->cap_mask2;
1333 
1334 	err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1335 				      &props->active_speed, port);
1336 	if (err)
1337 		goto out;
1338 
1339 	translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1340 
1341 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1342 
1343 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1344 
1345 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1346 
1347 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1348 
1349 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1350 	if (err)
1351 		goto out;
1352 
1353 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1354 				   &props->max_vl_num);
1355 out:
1356 	kfree(rep);
1357 	return err;
1358 }
1359 
1360 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1361 		       struct ib_port_attr *props)
1362 {
1363 	unsigned int count;
1364 	int ret;
1365 
1366 	switch (mlx5_get_vport_access_method(ibdev)) {
1367 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1368 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1369 		break;
1370 
1371 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1372 		ret = mlx5_query_hca_port(ibdev, port, props);
1373 		break;
1374 
1375 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1376 		ret = mlx5_query_port_roce(ibdev, port, props);
1377 		break;
1378 
1379 	default:
1380 		ret = -EINVAL;
1381 	}
1382 
1383 	if (!ret && props) {
1384 		struct mlx5_ib_dev *dev = to_mdev(ibdev);
1385 		struct mlx5_core_dev *mdev;
1386 		bool put_mdev = true;
1387 
1388 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1389 		if (!mdev) {
1390 			/* If the port isn't affiliated yet query the master.
1391 			 * The master and slave will have the same values.
1392 			 */
1393 			mdev = dev->mdev;
1394 			port = 1;
1395 			put_mdev = false;
1396 		}
1397 		count = mlx5_core_reserved_gids_count(mdev);
1398 		if (put_mdev)
1399 			mlx5_ib_put_native_port_mdev(dev, port);
1400 		props->gid_tbl_len -= count;
1401 	}
1402 	return ret;
1403 }
1404 
1405 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1406 				  struct ib_port_attr *props)
1407 {
1408 	return mlx5_query_port_roce(ibdev, port, props);
1409 }
1410 
1411 static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1412 				  u16 *pkey)
1413 {
1414 	/* Default special Pkey for representor device port as per the
1415 	 * IB specification 1.3 section 10.9.1.2.
1416 	 */
1417 	*pkey = 0xffff;
1418 	return 0;
1419 }
1420 
1421 static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1422 			     union ib_gid *gid)
1423 {
1424 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1425 	struct mlx5_core_dev *mdev = dev->mdev;
1426 
1427 	switch (mlx5_get_vport_access_method(ibdev)) {
1428 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1429 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1430 
1431 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1432 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1433 
1434 	default:
1435 		return -EINVAL;
1436 	}
1437 
1438 }
1439 
1440 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1441 				   u16 index, u16 *pkey)
1442 {
1443 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1444 	struct mlx5_core_dev *mdev;
1445 	bool put_mdev = true;
1446 	u32 mdev_port_num;
1447 	int err;
1448 
1449 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1450 	if (!mdev) {
1451 		/* The port isn't affiliated yet, get the PKey from the master
1452 		 * port. For RoCE the PKey tables will be the same.
1453 		 */
1454 		put_mdev = false;
1455 		mdev = dev->mdev;
1456 		mdev_port_num = 1;
1457 	}
1458 
1459 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1460 					index, pkey);
1461 	if (put_mdev)
1462 		mlx5_ib_put_native_port_mdev(dev, port);
1463 
1464 	return err;
1465 }
1466 
1467 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1468 			      u16 *pkey)
1469 {
1470 	switch (mlx5_get_vport_access_method(ibdev)) {
1471 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1472 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1473 
1474 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1475 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1476 		return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1477 	default:
1478 		return -EINVAL;
1479 	}
1480 }
1481 
1482 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1483 				 struct ib_device_modify *props)
1484 {
1485 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1486 	struct mlx5_reg_node_desc in;
1487 	struct mlx5_reg_node_desc out;
1488 	int err;
1489 
1490 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1491 		return -EOPNOTSUPP;
1492 
1493 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1494 		return 0;
1495 
1496 	/*
1497 	 * If possible, pass node desc to FW, so it can generate
1498 	 * a 144 trap.  If cmd fails, just ignore.
1499 	 */
1500 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1501 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1502 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1503 	if (err)
1504 		return err;
1505 
1506 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1507 
1508 	return err;
1509 }
1510 
1511 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1512 				u32 value)
1513 {
1514 	struct mlx5_hca_vport_context ctx = {};
1515 	struct mlx5_core_dev *mdev;
1516 	u32 mdev_port_num;
1517 	int err;
1518 
1519 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1520 	if (!mdev)
1521 		return -ENODEV;
1522 
1523 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1524 	if (err)
1525 		goto out;
1526 
1527 	if (~ctx.cap_mask1_perm & mask) {
1528 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1529 			     mask, ctx.cap_mask1_perm);
1530 		err = -EINVAL;
1531 		goto out;
1532 	}
1533 
1534 	ctx.cap_mask1 = value;
1535 	ctx.cap_mask1_perm = mask;
1536 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1537 						 0, &ctx);
1538 
1539 out:
1540 	mlx5_ib_put_native_port_mdev(dev, port_num);
1541 
1542 	return err;
1543 }
1544 
1545 static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1546 			       struct ib_port_modify *props)
1547 {
1548 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1549 	struct ib_port_attr attr;
1550 	u32 tmp;
1551 	int err;
1552 	u32 change_mask;
1553 	u32 value;
1554 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1555 		      IB_LINK_LAYER_INFINIBAND);
1556 
1557 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1558 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1559 	 */
1560 	if (!is_ib)
1561 		return 0;
1562 
1563 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1564 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1565 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1566 		return set_port_caps_atomic(dev, port, change_mask, value);
1567 	}
1568 
1569 	mutex_lock(&dev->cap_mask_mutex);
1570 
1571 	err = ib_query_port(ibdev, port, &attr);
1572 	if (err)
1573 		goto out;
1574 
1575 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1576 		~props->clr_port_cap_mask;
1577 
1578 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1579 
1580 out:
1581 	mutex_unlock(&dev->cap_mask_mutex);
1582 	return err;
1583 }
1584 
1585 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1586 {
1587 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1588 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1589 }
1590 
1591 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1592 {
1593 	/* Large page with non 4k uar support might limit the dynamic size */
1594 	if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1595 		return MLX5_MIN_DYN_BFREGS;
1596 
1597 	return MLX5_MAX_DYN_BFREGS;
1598 }
1599 
1600 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1601 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1602 			     struct mlx5_bfreg_info *bfregi)
1603 {
1604 	int uars_per_sys_page;
1605 	int bfregs_per_sys_page;
1606 	int ref_bfregs = req->total_num_bfregs;
1607 
1608 	if (req->total_num_bfregs == 0)
1609 		return -EINVAL;
1610 
1611 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1612 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1613 
1614 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1615 		return -ENOMEM;
1616 
1617 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1618 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1619 	/* This holds the required static allocation asked by the user */
1620 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1621 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1622 		return -EINVAL;
1623 
1624 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1625 	bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1626 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1627 	bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1628 
1629 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1630 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1631 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1632 		    req->total_num_bfregs, bfregi->total_num_bfregs,
1633 		    bfregi->num_sys_pages);
1634 
1635 	return 0;
1636 }
1637 
1638 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1639 {
1640 	struct mlx5_bfreg_info *bfregi;
1641 	int err;
1642 	int i;
1643 
1644 	bfregi = &context->bfregi;
1645 	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1646 		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1647 		if (err)
1648 			goto error;
1649 
1650 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1651 	}
1652 
1653 	for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1654 		bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1655 
1656 	return 0;
1657 
1658 error:
1659 	for (--i; i >= 0; i--)
1660 		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1661 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1662 
1663 	return err;
1664 }
1665 
1666 static void deallocate_uars(struct mlx5_ib_dev *dev,
1667 			    struct mlx5_ib_ucontext *context)
1668 {
1669 	struct mlx5_bfreg_info *bfregi;
1670 	int i;
1671 
1672 	bfregi = &context->bfregi;
1673 	for (i = 0; i < bfregi->num_sys_pages; i++)
1674 		if (i < bfregi->num_static_sys_pages ||
1675 		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1676 			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1677 }
1678 
1679 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1680 {
1681 	int err = 0;
1682 
1683 	mutex_lock(&dev->lb.mutex);
1684 	if (td)
1685 		dev->lb.user_td++;
1686 	if (qp)
1687 		dev->lb.qps++;
1688 
1689 	if (dev->lb.user_td == 2 ||
1690 	    dev->lb.qps == 1) {
1691 		if (!dev->lb.enabled) {
1692 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1693 			dev->lb.enabled = true;
1694 		}
1695 	}
1696 
1697 	mutex_unlock(&dev->lb.mutex);
1698 
1699 	return err;
1700 }
1701 
1702 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1703 {
1704 	mutex_lock(&dev->lb.mutex);
1705 	if (td)
1706 		dev->lb.user_td--;
1707 	if (qp)
1708 		dev->lb.qps--;
1709 
1710 	if (dev->lb.user_td == 1 &&
1711 	    dev->lb.qps == 0) {
1712 		if (dev->lb.enabled) {
1713 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
1714 			dev->lb.enabled = false;
1715 		}
1716 	}
1717 
1718 	mutex_unlock(&dev->lb.mutex);
1719 }
1720 
1721 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1722 					  u16 uid)
1723 {
1724 	int err;
1725 
1726 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1727 		return 0;
1728 
1729 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1730 	if (err)
1731 		return err;
1732 
1733 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1734 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1735 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1736 		return err;
1737 
1738 	return mlx5_ib_enable_lb(dev, true, false);
1739 }
1740 
1741 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1742 					     u16 uid)
1743 {
1744 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1745 		return;
1746 
1747 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1748 
1749 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1750 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1751 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1752 		return;
1753 
1754 	mlx5_ib_disable_lb(dev, true, false);
1755 }
1756 
1757 static int set_ucontext_resp(struct ib_ucontext *uctx,
1758 			     struct mlx5_ib_alloc_ucontext_resp *resp)
1759 {
1760 	struct ib_device *ibdev = uctx->device;
1761 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1762 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1763 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
1764 	int err;
1765 
1766 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1767 		err = mlx5_cmd_dump_fill_mkey(dev->mdev,
1768 					      &resp->dump_fill_mkey);
1769 		if (err)
1770 			return err;
1771 		resp->comp_mask |=
1772 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1773 	}
1774 
1775 	resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1776 	if (dev->wc_support)
1777 		resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1778 						      log_bf_reg_size);
1779 	resp->cache_line_size = cache_line_size();
1780 	resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1781 	resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1782 	resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1783 	resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1784 	resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1785 	resp->cqe_version = context->cqe_version;
1786 	resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1787 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1788 	resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1789 					MLX5_CAP_GEN(dev->mdev,
1790 						     num_of_uars_per_page) : 1;
1791 
1792 	if (mlx5_accel_ipsec_device_caps(dev->mdev) &
1793 				MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1794 		if (mlx5_get_flow_namespace(dev->mdev,
1795 				MLX5_FLOW_NAMESPACE_EGRESS))
1796 			resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1797 		if (mlx5_accel_ipsec_device_caps(dev->mdev) &
1798 				MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1799 			resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1800 		if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1801 			resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1802 		if (mlx5_accel_ipsec_device_caps(dev->mdev) &
1803 				MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1804 			resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1805 		/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1806 	}
1807 
1808 	resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
1809 			bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
1810 	resp->num_ports = dev->num_ports;
1811 	resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1812 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1813 
1814 	if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1815 		mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1816 		resp->eth_min_inline++;
1817 	}
1818 
1819 	if (dev->mdev->clock_info)
1820 		resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1821 
1822 	/*
1823 	 * We don't want to expose information from the PCI bar that is located
1824 	 * after 4096 bytes, so if the arch only supports larger pages, let's
1825 	 * pretend we don't support reading the HCA's core clock. This is also
1826 	 * forced by mmap function.
1827 	 */
1828 	if (PAGE_SIZE <= 4096) {
1829 		resp->comp_mask |=
1830 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1831 		resp->hca_core_clock_offset =
1832 			offsetof(struct mlx5_init_seg,
1833 				 internal_timer_h) % PAGE_SIZE;
1834 	}
1835 
1836 	if (MLX5_CAP_GEN(dev->mdev, ece_support))
1837 		resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
1838 
1839 	if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
1840 	    rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
1841 	    rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
1842 		resp->comp_mask |=
1843 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
1844 
1845 	resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
1846 
1847 	if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
1848 		resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
1849 
1850 	return 0;
1851 }
1852 
1853 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1854 				  struct ib_udata *udata)
1855 {
1856 	struct ib_device *ibdev = uctx->device;
1857 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1858 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1859 	struct mlx5_ib_alloc_ucontext_resp resp = {};
1860 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1861 	struct mlx5_bfreg_info *bfregi;
1862 	int ver;
1863 	int err;
1864 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1865 				     max_cqe_version);
1866 	bool lib_uar_4k;
1867 	bool lib_uar_dyn;
1868 
1869 	if (!dev->ib_active)
1870 		return -EAGAIN;
1871 
1872 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1873 		ver = 0;
1874 	else if (udata->inlen >= min_req_v2)
1875 		ver = 2;
1876 	else
1877 		return -EINVAL;
1878 
1879 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1880 	if (err)
1881 		return err;
1882 
1883 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1884 		return -EOPNOTSUPP;
1885 
1886 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1887 		return -EOPNOTSUPP;
1888 
1889 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1890 				    MLX5_NON_FP_BFREGS_PER_UAR);
1891 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1892 		return -EINVAL;
1893 
1894 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1895 	lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
1896 	bfregi = &context->bfregi;
1897 
1898 	if (lib_uar_dyn) {
1899 		bfregi->lib_uar_dyn = lib_uar_dyn;
1900 		goto uar_done;
1901 	}
1902 
1903 	/* updates req->total_num_bfregs */
1904 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1905 	if (err)
1906 		goto out_ctx;
1907 
1908 	mutex_init(&bfregi->lock);
1909 	bfregi->lib_uar_4k = lib_uar_4k;
1910 	bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1911 				GFP_KERNEL);
1912 	if (!bfregi->count) {
1913 		err = -ENOMEM;
1914 		goto out_ctx;
1915 	}
1916 
1917 	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1918 				    sizeof(*bfregi->sys_pages),
1919 				    GFP_KERNEL);
1920 	if (!bfregi->sys_pages) {
1921 		err = -ENOMEM;
1922 		goto out_count;
1923 	}
1924 
1925 	err = allocate_uars(dev, context);
1926 	if (err)
1927 		goto out_sys_pages;
1928 
1929 uar_done:
1930 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1931 		err = mlx5_ib_devx_create(dev, true);
1932 		if (err < 0)
1933 			goto out_uars;
1934 		context->devx_uid = err;
1935 	}
1936 
1937 	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1938 					     context->devx_uid);
1939 	if (err)
1940 		goto out_devx;
1941 
1942 	INIT_LIST_HEAD(&context->db_page_list);
1943 	mutex_init(&context->db_page_mutex);
1944 
1945 	context->cqe_version = min_t(__u8,
1946 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1947 				 req.max_cqe_version);
1948 
1949 	err = set_ucontext_resp(uctx, &resp);
1950 	if (err)
1951 		goto out_mdev;
1952 
1953 	resp.response_length = min(udata->outlen, sizeof(resp));
1954 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1955 	if (err)
1956 		goto out_mdev;
1957 
1958 	bfregi->ver = ver;
1959 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1960 	context->lib_caps = req.lib_caps;
1961 	print_lib_caps(dev, context->lib_caps);
1962 
1963 	if (mlx5_ib_lag_should_assign_affinity(dev)) {
1964 		u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
1965 
1966 		atomic_set(&context->tx_port_affinity,
1967 			   atomic_add_return(
1968 				   1, &dev->port[port].roce.tx_port_affinity));
1969 	}
1970 
1971 	return 0;
1972 
1973 out_mdev:
1974 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1975 out_devx:
1976 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1977 		mlx5_ib_devx_destroy(dev, context->devx_uid);
1978 
1979 out_uars:
1980 	deallocate_uars(dev, context);
1981 
1982 out_sys_pages:
1983 	kfree(bfregi->sys_pages);
1984 
1985 out_count:
1986 	kfree(bfregi->count);
1987 
1988 out_ctx:
1989 	return err;
1990 }
1991 
1992 static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
1993 				  struct uverbs_attr_bundle *attrs)
1994 {
1995 	struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
1996 	int ret;
1997 
1998 	ret = set_ucontext_resp(ibcontext, &uctx_resp);
1999 	if (ret)
2000 		return ret;
2001 
2002 	uctx_resp.response_length =
2003 		min_t(size_t,
2004 		      uverbs_attr_get_len(attrs,
2005 				MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
2006 		      sizeof(uctx_resp));
2007 
2008 	ret = uverbs_copy_to_struct_or_zero(attrs,
2009 					MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
2010 					&uctx_resp,
2011 					sizeof(uctx_resp));
2012 	return ret;
2013 }
2014 
2015 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
2016 {
2017 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2018 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2019 	struct mlx5_bfreg_info *bfregi;
2020 
2021 	bfregi = &context->bfregi;
2022 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2023 
2024 	if (context->devx_uid)
2025 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2026 
2027 	deallocate_uars(dev, context);
2028 	kfree(bfregi->sys_pages);
2029 	kfree(bfregi->count);
2030 }
2031 
2032 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2033 				 int uar_idx)
2034 {
2035 	int fw_uars_per_page;
2036 
2037 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2038 
2039 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2040 }
2041 
2042 static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
2043 				 int uar_idx)
2044 {
2045 	unsigned int fw_uars_per_page;
2046 
2047 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2048 				MLX5_UARS_IN_PAGE : 1;
2049 
2050 	return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2051 }
2052 
2053 static int get_command(unsigned long offset)
2054 {
2055 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2056 }
2057 
2058 static int get_arg(unsigned long offset)
2059 {
2060 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2061 }
2062 
2063 static int get_index(unsigned long offset)
2064 {
2065 	return get_arg(offset);
2066 }
2067 
2068 /* Index resides in an extra byte to enable larger values than 255 */
2069 static int get_extended_index(unsigned long offset)
2070 {
2071 	return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2072 }
2073 
2074 
2075 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2076 {
2077 }
2078 
2079 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2080 {
2081 	switch (cmd) {
2082 	case MLX5_IB_MMAP_WC_PAGE:
2083 		return "WC";
2084 	case MLX5_IB_MMAP_REGULAR_PAGE:
2085 		return "best effort WC";
2086 	case MLX5_IB_MMAP_NC_PAGE:
2087 		return "NC";
2088 	case MLX5_IB_MMAP_DEVICE_MEM:
2089 		return "Device Memory";
2090 	default:
2091 		return NULL;
2092 	}
2093 }
2094 
2095 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2096 					struct vm_area_struct *vma,
2097 					struct mlx5_ib_ucontext *context)
2098 {
2099 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2100 	    !(vma->vm_flags & VM_SHARED))
2101 		return -EINVAL;
2102 
2103 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2104 		return -EOPNOTSUPP;
2105 
2106 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2107 		return -EPERM;
2108 	vma->vm_flags &= ~VM_MAYWRITE;
2109 
2110 	if (!dev->mdev->clock_info)
2111 		return -EOPNOTSUPP;
2112 
2113 	return vm_insert_page(vma, vma->vm_start,
2114 			      virt_to_page(dev->mdev->clock_info));
2115 }
2116 
2117 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
2118 {
2119 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2120 	struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
2121 	struct mlx5_var_table *var_table = &dev->var_table;
2122 
2123 	switch (mentry->mmap_flag) {
2124 	case MLX5_IB_MMAP_TYPE_MEMIC:
2125 	case MLX5_IB_MMAP_TYPE_MEMIC_OP:
2126 		mlx5_ib_dm_mmap_free(dev, mentry);
2127 		break;
2128 	case MLX5_IB_MMAP_TYPE_VAR:
2129 		mutex_lock(&var_table->bitmap_lock);
2130 		clear_bit(mentry->page_idx, var_table->bitmap);
2131 		mutex_unlock(&var_table->bitmap_lock);
2132 		kfree(mentry);
2133 		break;
2134 	case MLX5_IB_MMAP_TYPE_UAR_WC:
2135 	case MLX5_IB_MMAP_TYPE_UAR_NC:
2136 		mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
2137 		kfree(mentry);
2138 		break;
2139 	default:
2140 		WARN_ON(true);
2141 	}
2142 }
2143 
2144 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2145 		    struct vm_area_struct *vma,
2146 		    struct mlx5_ib_ucontext *context)
2147 {
2148 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2149 	int err;
2150 	unsigned long idx;
2151 	phys_addr_t pfn;
2152 	pgprot_t prot;
2153 	u32 bfreg_dyn_idx = 0;
2154 	u32 uar_index;
2155 	int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2156 	int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2157 				bfregi->num_static_sys_pages;
2158 
2159 	if (bfregi->lib_uar_dyn)
2160 		return -EINVAL;
2161 
2162 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2163 		return -EINVAL;
2164 
2165 	if (dyn_uar)
2166 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2167 	else
2168 		idx = get_index(vma->vm_pgoff);
2169 
2170 	if (idx >= max_valid_idx) {
2171 		mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2172 			     idx, max_valid_idx);
2173 		return -EINVAL;
2174 	}
2175 
2176 	switch (cmd) {
2177 	case MLX5_IB_MMAP_WC_PAGE:
2178 	case MLX5_IB_MMAP_ALLOC_WC:
2179 	case MLX5_IB_MMAP_REGULAR_PAGE:
2180 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2181 		prot = pgprot_writecombine(vma->vm_page_prot);
2182 		break;
2183 	case MLX5_IB_MMAP_NC_PAGE:
2184 		prot = pgprot_noncached(vma->vm_page_prot);
2185 		break;
2186 	default:
2187 		return -EINVAL;
2188 	}
2189 
2190 	if (dyn_uar) {
2191 		int uars_per_page;
2192 
2193 		uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2194 		bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2195 		if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2196 			mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2197 				     bfreg_dyn_idx, bfregi->total_num_bfregs);
2198 			return -EINVAL;
2199 		}
2200 
2201 		mutex_lock(&bfregi->lock);
2202 		/* Fail if uar already allocated, first bfreg index of each
2203 		 * page holds its count.
2204 		 */
2205 		if (bfregi->count[bfreg_dyn_idx]) {
2206 			mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2207 			mutex_unlock(&bfregi->lock);
2208 			return -EINVAL;
2209 		}
2210 
2211 		bfregi->count[bfreg_dyn_idx]++;
2212 		mutex_unlock(&bfregi->lock);
2213 
2214 		err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2215 		if (err) {
2216 			mlx5_ib_warn(dev, "UAR alloc failed\n");
2217 			goto free_bfreg;
2218 		}
2219 	} else {
2220 		uar_index = bfregi->sys_pages[idx];
2221 	}
2222 
2223 	pfn = uar_index2pfn(dev, uar_index);
2224 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2225 
2226 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2227 				prot, NULL);
2228 	if (err) {
2229 		mlx5_ib_err(dev,
2230 			    "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2231 			    err, mmap_cmd2str(cmd));
2232 		goto err;
2233 	}
2234 
2235 	if (dyn_uar)
2236 		bfregi->sys_pages[idx] = uar_index;
2237 	return 0;
2238 
2239 err:
2240 	if (!dyn_uar)
2241 		return err;
2242 
2243 	mlx5_cmd_free_uar(dev->mdev, idx);
2244 
2245 free_bfreg:
2246 	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2247 
2248 	return err;
2249 }
2250 
2251 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2252 {
2253 	unsigned long idx;
2254 	u8 command;
2255 
2256 	command = get_command(vma->vm_pgoff);
2257 	idx = get_extended_index(vma->vm_pgoff);
2258 
2259 	return (command << 16 | idx);
2260 }
2261 
2262 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2263 			       struct vm_area_struct *vma,
2264 			       struct ib_ucontext *ucontext)
2265 {
2266 	struct mlx5_user_mmap_entry *mentry;
2267 	struct rdma_user_mmap_entry *entry;
2268 	unsigned long pgoff;
2269 	pgprot_t prot;
2270 	phys_addr_t pfn;
2271 	int ret;
2272 
2273 	pgoff = mlx5_vma_to_pgoff(vma);
2274 	entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
2275 	if (!entry)
2276 		return -EINVAL;
2277 
2278 	mentry = to_mmmap(entry);
2279 	pfn = (mentry->address >> PAGE_SHIFT);
2280 	if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
2281 	    mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
2282 		prot = pgprot_noncached(vma->vm_page_prot);
2283 	else
2284 		prot = pgprot_writecombine(vma->vm_page_prot);
2285 	ret = rdma_user_mmap_io(ucontext, vma, pfn,
2286 				entry->npages * PAGE_SIZE,
2287 				prot,
2288 				entry);
2289 	rdma_user_mmap_entry_put(&mentry->rdma_entry);
2290 	return ret;
2291 }
2292 
2293 static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
2294 {
2295 	u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
2296 	u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
2297 
2298 	return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
2299 		(index & 0xFF)) << PAGE_SHIFT;
2300 }
2301 
2302 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2303 {
2304 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2305 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2306 	unsigned long command;
2307 	phys_addr_t pfn;
2308 
2309 	command = get_command(vma->vm_pgoff);
2310 	switch (command) {
2311 	case MLX5_IB_MMAP_WC_PAGE:
2312 	case MLX5_IB_MMAP_ALLOC_WC:
2313 		if (!dev->wc_support)
2314 			return -EPERM;
2315 		fallthrough;
2316 	case MLX5_IB_MMAP_NC_PAGE:
2317 	case MLX5_IB_MMAP_REGULAR_PAGE:
2318 		return uar_mmap(dev, command, vma, context);
2319 
2320 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2321 		return -ENOSYS;
2322 
2323 	case MLX5_IB_MMAP_CORE_CLOCK:
2324 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2325 			return -EINVAL;
2326 
2327 		if (vma->vm_flags & VM_WRITE)
2328 			return -EPERM;
2329 		vma->vm_flags &= ~VM_MAYWRITE;
2330 
2331 		/* Don't expose to user-space information it shouldn't have */
2332 		if (PAGE_SIZE > 4096)
2333 			return -EOPNOTSUPP;
2334 
2335 		pfn = (dev->mdev->iseg_base +
2336 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2337 			PAGE_SHIFT;
2338 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2339 					 PAGE_SIZE,
2340 					 pgprot_noncached(vma->vm_page_prot),
2341 					 NULL);
2342 	case MLX5_IB_MMAP_CLOCK_INFO:
2343 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2344 
2345 	default:
2346 		return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2347 	}
2348 
2349 	return 0;
2350 }
2351 
2352 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2353 {
2354 	struct mlx5_ib_pd *pd = to_mpd(ibpd);
2355 	struct ib_device *ibdev = ibpd->device;
2356 	struct mlx5_ib_alloc_pd_resp resp;
2357 	int err;
2358 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2359 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2360 	u16 uid = 0;
2361 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2362 		udata, struct mlx5_ib_ucontext, ibucontext);
2363 
2364 	uid = context ? context->devx_uid : 0;
2365 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2366 	MLX5_SET(alloc_pd_in, in, uid, uid);
2367 	err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2368 	if (err)
2369 		return err;
2370 
2371 	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2372 	pd->uid = uid;
2373 	if (udata) {
2374 		resp.pdn = pd->pdn;
2375 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2376 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2377 			return -EFAULT;
2378 		}
2379 	}
2380 
2381 	return 0;
2382 }
2383 
2384 static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2385 {
2386 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2387 	struct mlx5_ib_pd *mpd = to_mpd(pd);
2388 
2389 	return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2390 }
2391 
2392 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2393 {
2394 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2395 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2396 	int err;
2397 	u16 uid;
2398 
2399 	uid = ibqp->pd ?
2400 		to_mpd(ibqp->pd)->uid : 0;
2401 
2402 	if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
2403 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2404 		return -EOPNOTSUPP;
2405 	}
2406 
2407 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2408 	if (err)
2409 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2410 			     ibqp->qp_num, gid->raw);
2411 
2412 	return err;
2413 }
2414 
2415 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2416 {
2417 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2418 	int err;
2419 	u16 uid;
2420 
2421 	uid = ibqp->pd ?
2422 		to_mpd(ibqp->pd)->uid : 0;
2423 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2424 	if (err)
2425 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2426 			     ibqp->qp_num, gid->raw);
2427 
2428 	return err;
2429 }
2430 
2431 static int init_node_data(struct mlx5_ib_dev *dev)
2432 {
2433 	int err;
2434 
2435 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2436 	if (err)
2437 		return err;
2438 
2439 	dev->mdev->rev_id = dev->mdev->pdev->revision;
2440 
2441 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2442 }
2443 
2444 static ssize_t fw_pages_show(struct device *device,
2445 			     struct device_attribute *attr, char *buf)
2446 {
2447 	struct mlx5_ib_dev *dev =
2448 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2449 
2450 	return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2451 }
2452 static DEVICE_ATTR_RO(fw_pages);
2453 
2454 static ssize_t reg_pages_show(struct device *device,
2455 			      struct device_attribute *attr, char *buf)
2456 {
2457 	struct mlx5_ib_dev *dev =
2458 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2459 
2460 	return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2461 }
2462 static DEVICE_ATTR_RO(reg_pages);
2463 
2464 static ssize_t hca_type_show(struct device *device,
2465 			     struct device_attribute *attr, char *buf)
2466 {
2467 	struct mlx5_ib_dev *dev =
2468 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2469 
2470 	return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2471 }
2472 static DEVICE_ATTR_RO(hca_type);
2473 
2474 static ssize_t hw_rev_show(struct device *device,
2475 			   struct device_attribute *attr, char *buf)
2476 {
2477 	struct mlx5_ib_dev *dev =
2478 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2479 
2480 	return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2481 }
2482 static DEVICE_ATTR_RO(hw_rev);
2483 
2484 static ssize_t board_id_show(struct device *device,
2485 			     struct device_attribute *attr, char *buf)
2486 {
2487 	struct mlx5_ib_dev *dev =
2488 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2489 
2490 	return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2491 			  dev->mdev->board_id);
2492 }
2493 static DEVICE_ATTR_RO(board_id);
2494 
2495 static struct attribute *mlx5_class_attributes[] = {
2496 	&dev_attr_hw_rev.attr,
2497 	&dev_attr_hca_type.attr,
2498 	&dev_attr_board_id.attr,
2499 	&dev_attr_fw_pages.attr,
2500 	&dev_attr_reg_pages.attr,
2501 	NULL,
2502 };
2503 
2504 static const struct attribute_group mlx5_attr_group = {
2505 	.attrs = mlx5_class_attributes,
2506 };
2507 
2508 static void pkey_change_handler(struct work_struct *work)
2509 {
2510 	struct mlx5_ib_port_resources *ports =
2511 		container_of(work, struct mlx5_ib_port_resources,
2512 			     pkey_change_work);
2513 
2514 	if (!ports->gsi)
2515 		/*
2516 		 * We got this event before device was fully configured
2517 		 * and MAD registration code wasn't called/finished yet.
2518 		 */
2519 		return;
2520 
2521 	mlx5_ib_gsi_pkey_change(ports->gsi);
2522 }
2523 
2524 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2525 {
2526 	struct mlx5_ib_qp *mqp;
2527 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
2528 	struct mlx5_core_cq *mcq;
2529 	struct list_head cq_armed_list;
2530 	unsigned long flags_qp;
2531 	unsigned long flags_cq;
2532 	unsigned long flags;
2533 
2534 	INIT_LIST_HEAD(&cq_armed_list);
2535 
2536 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2537 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2538 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2539 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2540 		if (mqp->sq.tail != mqp->sq.head) {
2541 			send_mcq = to_mcq(mqp->ibqp.send_cq);
2542 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
2543 			if (send_mcq->mcq.comp &&
2544 			    mqp->ibqp.send_cq->comp_handler) {
2545 				if (!send_mcq->mcq.reset_notify_added) {
2546 					send_mcq->mcq.reset_notify_added = 1;
2547 					list_add_tail(&send_mcq->mcq.reset_notify,
2548 						      &cq_armed_list);
2549 				}
2550 			}
2551 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2552 		}
2553 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2554 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2555 		/* no handling is needed for SRQ */
2556 		if (!mqp->ibqp.srq) {
2557 			if (mqp->rq.tail != mqp->rq.head) {
2558 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2559 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2560 				if (recv_mcq->mcq.comp &&
2561 				    mqp->ibqp.recv_cq->comp_handler) {
2562 					if (!recv_mcq->mcq.reset_notify_added) {
2563 						recv_mcq->mcq.reset_notify_added = 1;
2564 						list_add_tail(&recv_mcq->mcq.reset_notify,
2565 							      &cq_armed_list);
2566 					}
2567 				}
2568 				spin_unlock_irqrestore(&recv_mcq->lock,
2569 						       flags_cq);
2570 			}
2571 		}
2572 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2573 	}
2574 	/*At that point all inflight post send were put to be executed as of we
2575 	 * lock/unlock above locks Now need to arm all involved CQs.
2576 	 */
2577 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2578 		mcq->comp(mcq, NULL);
2579 	}
2580 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2581 }
2582 
2583 static void delay_drop_handler(struct work_struct *work)
2584 {
2585 	int err;
2586 	struct mlx5_ib_delay_drop *delay_drop =
2587 		container_of(work, struct mlx5_ib_delay_drop,
2588 			     delay_drop_work);
2589 
2590 	atomic_inc(&delay_drop->events_cnt);
2591 
2592 	mutex_lock(&delay_drop->lock);
2593 	err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
2594 	if (err) {
2595 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
2596 			     delay_drop->timeout);
2597 		delay_drop->activate = false;
2598 	}
2599 	mutex_unlock(&delay_drop->lock);
2600 }
2601 
2602 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2603 				 struct ib_event *ibev)
2604 {
2605 	u32 port = (eqe->data.port.port >> 4) & 0xf;
2606 
2607 	switch (eqe->sub_type) {
2608 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
2609 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2610 					    IB_LINK_LAYER_ETHERNET)
2611 			schedule_work(&ibdev->delay_drop.delay_drop_work);
2612 		break;
2613 	default: /* do nothing */
2614 		return;
2615 	}
2616 }
2617 
2618 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2619 			      struct ib_event *ibev)
2620 {
2621 	u32 port = (eqe->data.port.port >> 4) & 0xf;
2622 
2623 	ibev->element.port_num = port;
2624 
2625 	switch (eqe->sub_type) {
2626 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
2627 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
2628 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
2629 		/* In RoCE, port up/down events are handled in
2630 		 * mlx5_netdev_event().
2631 		 */
2632 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2633 					    IB_LINK_LAYER_ETHERNET)
2634 			return -EINVAL;
2635 
2636 		ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
2637 				IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2638 		break;
2639 
2640 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
2641 		ibev->event = IB_EVENT_LID_CHANGE;
2642 		break;
2643 
2644 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
2645 		ibev->event = IB_EVENT_PKEY_CHANGE;
2646 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2647 		break;
2648 
2649 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
2650 		ibev->event = IB_EVENT_GID_CHANGE;
2651 		break;
2652 
2653 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
2654 		ibev->event = IB_EVENT_CLIENT_REREGISTER;
2655 		break;
2656 	default:
2657 		return -EINVAL;
2658 	}
2659 
2660 	return 0;
2661 }
2662 
2663 static void mlx5_ib_handle_event(struct work_struct *_work)
2664 {
2665 	struct mlx5_ib_event_work *work =
2666 		container_of(_work, struct mlx5_ib_event_work, work);
2667 	struct mlx5_ib_dev *ibdev;
2668 	struct ib_event ibev;
2669 	bool fatal = false;
2670 
2671 	if (work->is_slave) {
2672 		ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
2673 		if (!ibdev)
2674 			goto out;
2675 	} else {
2676 		ibdev = work->dev;
2677 	}
2678 
2679 	switch (work->event) {
2680 	case MLX5_DEV_EVENT_SYS_ERROR:
2681 		ibev.event = IB_EVENT_DEVICE_FATAL;
2682 		mlx5_ib_handle_internal_error(ibdev);
2683 		ibev.element.port_num  = (u8)(unsigned long)work->param;
2684 		fatal = true;
2685 		break;
2686 	case MLX5_EVENT_TYPE_PORT_CHANGE:
2687 		if (handle_port_change(ibdev, work->param, &ibev))
2688 			goto out;
2689 		break;
2690 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
2691 		handle_general_event(ibdev, work->param, &ibev);
2692 		fallthrough;
2693 	default:
2694 		goto out;
2695 	}
2696 
2697 	ibev.device = &ibdev->ib_dev;
2698 
2699 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
2700 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
2701 		goto out;
2702 	}
2703 
2704 	if (ibdev->ib_active)
2705 		ib_dispatch_event(&ibev);
2706 
2707 	if (fatal)
2708 		ibdev->ib_active = false;
2709 out:
2710 	kfree(work);
2711 }
2712 
2713 static int mlx5_ib_event(struct notifier_block *nb,
2714 			 unsigned long event, void *param)
2715 {
2716 	struct mlx5_ib_event_work *work;
2717 
2718 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
2719 	if (!work)
2720 		return NOTIFY_DONE;
2721 
2722 	INIT_WORK(&work->work, mlx5_ib_handle_event);
2723 	work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
2724 	work->is_slave = false;
2725 	work->param = param;
2726 	work->event = event;
2727 
2728 	queue_work(mlx5_ib_event_wq, &work->work);
2729 
2730 	return NOTIFY_OK;
2731 }
2732 
2733 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
2734 				    unsigned long event, void *param)
2735 {
2736 	struct mlx5_ib_event_work *work;
2737 
2738 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
2739 	if (!work)
2740 		return NOTIFY_DONE;
2741 
2742 	INIT_WORK(&work->work, mlx5_ib_handle_event);
2743 	work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
2744 	work->is_slave = true;
2745 	work->param = param;
2746 	work->event = event;
2747 	queue_work(mlx5_ib_event_wq, &work->work);
2748 
2749 	return NOTIFY_OK;
2750 }
2751 
2752 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2753 {
2754 	struct mlx5_hca_vport_context vport_ctx;
2755 	int err;
2756 	int port;
2757 
2758 	for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
2759 		dev->port_caps[port - 1].has_smi = false;
2760 		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
2761 		    MLX5_CAP_PORT_TYPE_IB) {
2762 			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2763 				err = mlx5_query_hca_vport_context(dev->mdev, 0,
2764 								   port, 0,
2765 								   &vport_ctx);
2766 				if (err) {
2767 					mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2768 						    port, err);
2769 					return err;
2770 				}
2771 				dev->port_caps[port - 1].has_smi =
2772 					vport_ctx.has_smi;
2773 			} else {
2774 				dev->port_caps[port - 1].has_smi = true;
2775 			}
2776 		}
2777 	}
2778 	return 0;
2779 }
2780 
2781 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2782 {
2783 	unsigned int port;
2784 
2785 	rdma_for_each_port (&dev->ib_dev, port)
2786 		mlx5_query_ext_port_caps(dev, port);
2787 }
2788 
2789 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
2790 {
2791 	switch (umr_fence_cap) {
2792 	case MLX5_CAP_UMR_FENCE_NONE:
2793 		return MLX5_FENCE_MODE_NONE;
2794 	case MLX5_CAP_UMR_FENCE_SMALL:
2795 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
2796 	default:
2797 		return MLX5_FENCE_MODE_STRONG_ORDERING;
2798 	}
2799 }
2800 
2801 static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
2802 {
2803 	struct mlx5_ib_resources *devr = &dev->devr;
2804 	struct ib_srq_init_attr attr;
2805 	struct ib_device *ibdev;
2806 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
2807 	int port;
2808 	int ret = 0;
2809 
2810 	ibdev = &dev->ib_dev;
2811 
2812 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
2813 		return -EOPNOTSUPP;
2814 
2815 	devr->p0 = ib_alloc_pd(ibdev, 0);
2816 	if (IS_ERR(devr->p0))
2817 		return PTR_ERR(devr->p0);
2818 
2819 	devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
2820 	if (IS_ERR(devr->c0)) {
2821 		ret = PTR_ERR(devr->c0);
2822 		goto error1;
2823 	}
2824 
2825 	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2826 	if (ret)
2827 		goto error2;
2828 
2829 	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2830 	if (ret)
2831 		goto error3;
2832 
2833 	memset(&attr, 0, sizeof(attr));
2834 	attr.attr.max_sge = 1;
2835 	attr.attr.max_wr = 1;
2836 	attr.srq_type = IB_SRQT_XRC;
2837 	attr.ext.cq = devr->c0;
2838 
2839 	devr->s0 = ib_create_srq(devr->p0, &attr);
2840 	if (IS_ERR(devr->s0)) {
2841 		ret = PTR_ERR(devr->s0);
2842 		goto err_create;
2843 	}
2844 
2845 	memset(&attr, 0, sizeof(attr));
2846 	attr.attr.max_sge = 1;
2847 	attr.attr.max_wr = 1;
2848 	attr.srq_type = IB_SRQT_BASIC;
2849 
2850 	devr->s1 = ib_create_srq(devr->p0, &attr);
2851 	if (IS_ERR(devr->s1)) {
2852 		ret = PTR_ERR(devr->s1);
2853 		goto error6;
2854 	}
2855 
2856 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2857 		INIT_WORK(&devr->ports[port].pkey_change_work,
2858 			  pkey_change_handler);
2859 
2860 	return 0;
2861 
2862 error6:
2863 	ib_destroy_srq(devr->s0);
2864 err_create:
2865 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2866 error3:
2867 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2868 error2:
2869 	ib_destroy_cq(devr->c0);
2870 error1:
2871 	ib_dealloc_pd(devr->p0);
2872 	return ret;
2873 }
2874 
2875 static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
2876 {
2877 	struct mlx5_ib_resources *devr = &dev->devr;
2878 	int port;
2879 
2880 	/*
2881 	 * Make sure no change P_Key work items are still executing.
2882 	 *
2883 	 * At this stage, the mlx5_ib_event should be unregistered
2884 	 * and it ensures that no new works are added.
2885 	 */
2886 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2887 		cancel_work_sync(&devr->ports[port].pkey_change_work);
2888 
2889 	ib_destroy_srq(devr->s1);
2890 	ib_destroy_srq(devr->s0);
2891 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2892 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2893 	ib_destroy_cq(devr->c0);
2894 	ib_dealloc_pd(devr->p0);
2895 }
2896 
2897 static u32 get_core_cap_flags(struct ib_device *ibdev,
2898 			      struct mlx5_hca_vport_context *rep)
2899 {
2900 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2901 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2902 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2903 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2904 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2905 	u32 ret = 0;
2906 
2907 	if (rep->grh_required)
2908 		ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
2909 
2910 	if (ll == IB_LINK_LAYER_INFINIBAND)
2911 		return ret | RDMA_CORE_PORT_IBA_IB;
2912 
2913 	if (raw_support)
2914 		ret |= RDMA_CORE_PORT_RAW_PACKET;
2915 
2916 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2917 		return ret;
2918 
2919 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2920 		return ret;
2921 
2922 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2923 		ret |= RDMA_CORE_PORT_IBA_ROCE;
2924 
2925 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2926 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2927 
2928 	return ret;
2929 }
2930 
2931 static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
2932 			       struct ib_port_immutable *immutable)
2933 {
2934 	struct ib_port_attr attr;
2935 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2936 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
2937 	struct mlx5_hca_vport_context rep = {0};
2938 	int err;
2939 
2940 	err = ib_query_port(ibdev, port_num, &attr);
2941 	if (err)
2942 		return err;
2943 
2944 	if (ll == IB_LINK_LAYER_INFINIBAND) {
2945 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2946 						   &rep);
2947 		if (err)
2948 			return err;
2949 	}
2950 
2951 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2952 	immutable->gid_tbl_len = attr.gid_tbl_len;
2953 	immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
2954 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2955 
2956 	return 0;
2957 }
2958 
2959 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
2960 				   struct ib_port_immutable *immutable)
2961 {
2962 	struct ib_port_attr attr;
2963 	int err;
2964 
2965 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
2966 
2967 	err = ib_query_port(ibdev, port_num, &attr);
2968 	if (err)
2969 		return err;
2970 
2971 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2972 	immutable->gid_tbl_len = attr.gid_tbl_len;
2973 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
2974 
2975 	return 0;
2976 }
2977 
2978 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
2979 {
2980 	struct mlx5_ib_dev *dev =
2981 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
2982 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
2983 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
2984 		 fw_rev_sub(dev->mdev));
2985 }
2986 
2987 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
2988 {
2989 	struct mlx5_core_dev *mdev = dev->mdev;
2990 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
2991 								 MLX5_FLOW_NAMESPACE_LAG);
2992 	struct mlx5_flow_table *ft;
2993 	int err;
2994 
2995 	if (!ns || !mlx5_lag_is_active(mdev))
2996 		return 0;
2997 
2998 	err = mlx5_cmd_create_vport_lag(mdev);
2999 	if (err)
3000 		return err;
3001 
3002 	ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
3003 	if (IS_ERR(ft)) {
3004 		err = PTR_ERR(ft);
3005 		goto err_destroy_vport_lag;
3006 	}
3007 
3008 	dev->flow_db->lag_demux_ft = ft;
3009 	dev->lag_active = true;
3010 	return 0;
3011 
3012 err_destroy_vport_lag:
3013 	mlx5_cmd_destroy_vport_lag(mdev);
3014 	return err;
3015 }
3016 
3017 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3018 {
3019 	struct mlx5_core_dev *mdev = dev->mdev;
3020 
3021 	if (dev->lag_active) {
3022 		dev->lag_active = false;
3023 
3024 		mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
3025 		dev->flow_db->lag_demux_ft = NULL;
3026 
3027 		mlx5_cmd_destroy_vport_lag(mdev);
3028 	}
3029 }
3030 
3031 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
3032 {
3033 	int err;
3034 
3035 	dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
3036 	err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
3037 	if (err) {
3038 		dev->port[port_num].roce.nb.notifier_call = NULL;
3039 		return err;
3040 	}
3041 
3042 	return 0;
3043 }
3044 
3045 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
3046 {
3047 	if (dev->port[port_num].roce.nb.notifier_call) {
3048 		unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
3049 		dev->port[port_num].roce.nb.notifier_call = NULL;
3050 	}
3051 }
3052 
3053 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3054 {
3055 	int err;
3056 
3057 	if (!dev->is_rep && dev->profile != &raw_eth_profile) {
3058 		err = mlx5_nic_vport_enable_roce(dev->mdev);
3059 		if (err)
3060 			return err;
3061 	}
3062 
3063 	err = mlx5_eth_lag_init(dev);
3064 	if (err)
3065 		goto err_disable_roce;
3066 
3067 	return 0;
3068 
3069 err_disable_roce:
3070 	if (!dev->is_rep && dev->profile != &raw_eth_profile)
3071 		mlx5_nic_vport_disable_roce(dev->mdev);
3072 
3073 	return err;
3074 }
3075 
3076 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3077 {
3078 	mlx5_eth_lag_cleanup(dev);
3079 	if (!dev->is_rep && dev->profile != &raw_eth_profile)
3080 		mlx5_nic_vport_disable_roce(dev->mdev);
3081 }
3082 
3083 static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3084 				 enum rdma_netdev_t type,
3085 				 struct rdma_netdev_alloc_params *params)
3086 {
3087 	if (type != RDMA_NETDEV_IPOIB)
3088 		return -EOPNOTSUPP;
3089 
3090 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3091 }
3092 
3093 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3094 				       size_t count, loff_t *pos)
3095 {
3096 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3097 	char lbuf[20];
3098 	int len;
3099 
3100 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3101 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
3102 }
3103 
3104 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3105 					size_t count, loff_t *pos)
3106 {
3107 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3108 	u32 timeout;
3109 	u32 var;
3110 
3111 	if (kstrtouint_from_user(buf, count, 0, &var))
3112 		return -EFAULT;
3113 
3114 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3115 			1000);
3116 	if (timeout != var)
3117 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3118 			    timeout);
3119 
3120 	delay_drop->timeout = timeout;
3121 
3122 	return count;
3123 }
3124 
3125 static const struct file_operations fops_delay_drop_timeout = {
3126 	.owner	= THIS_MODULE,
3127 	.open	= simple_open,
3128 	.write	= delay_drop_timeout_write,
3129 	.read	= delay_drop_timeout_read,
3130 };
3131 
3132 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3133 				      struct mlx5_ib_multiport_info *mpi)
3134 {
3135 	u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3136 	struct mlx5_ib_port *port = &ibdev->port[port_num];
3137 	int comps;
3138 	int err;
3139 	int i;
3140 
3141 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
3142 
3143 	mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3144 
3145 	spin_lock(&port->mp.mpi_lock);
3146 	if (!mpi->ibdev) {
3147 		spin_unlock(&port->mp.mpi_lock);
3148 		return;
3149 	}
3150 
3151 	mpi->ibdev = NULL;
3152 
3153 	spin_unlock(&port->mp.mpi_lock);
3154 	if (mpi->mdev_events.notifier_call)
3155 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3156 	mpi->mdev_events.notifier_call = NULL;
3157 	mlx5_remove_netdev_notifier(ibdev, port_num);
3158 	spin_lock(&port->mp.mpi_lock);
3159 
3160 	comps = mpi->mdev_refcnt;
3161 	if (comps) {
3162 		mpi->unaffiliate = true;
3163 		init_completion(&mpi->unref_comp);
3164 		spin_unlock(&port->mp.mpi_lock);
3165 
3166 		for (i = 0; i < comps; i++)
3167 			wait_for_completion(&mpi->unref_comp);
3168 
3169 		spin_lock(&port->mp.mpi_lock);
3170 		mpi->unaffiliate = false;
3171 	}
3172 
3173 	port->mp.mpi = NULL;
3174 
3175 	spin_unlock(&port->mp.mpi_lock);
3176 
3177 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3178 
3179 	mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3180 	/* Log an error, still needed to cleanup the pointers and add
3181 	 * it back to the list.
3182 	 */
3183 	if (err)
3184 		mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3185 			    port_num + 1);
3186 
3187 	ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3188 }
3189 
3190 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3191 				    struct mlx5_ib_multiport_info *mpi)
3192 {
3193 	u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3194 	int err;
3195 
3196 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
3197 
3198 	spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3199 	if (ibdev->port[port_num].mp.mpi) {
3200 		mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3201 			    port_num + 1);
3202 		spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3203 		return false;
3204 	}
3205 
3206 	ibdev->port[port_num].mp.mpi = mpi;
3207 	mpi->ibdev = ibdev;
3208 	mpi->mdev_events.notifier_call = NULL;
3209 	spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3210 
3211 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3212 	if (err)
3213 		goto unbind;
3214 
3215 	err = mlx5_add_netdev_notifier(ibdev, port_num);
3216 	if (err) {
3217 		mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
3218 			    port_num + 1);
3219 		goto unbind;
3220 	}
3221 
3222 	mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
3223 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3224 
3225 	mlx5_ib_init_cong_debugfs(ibdev, port_num);
3226 
3227 	return true;
3228 
3229 unbind:
3230 	mlx5_ib_unbind_slave_port(ibdev, mpi);
3231 	return false;
3232 }
3233 
3234 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
3235 {
3236 	u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3237 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3238 							  port_num + 1);
3239 	struct mlx5_ib_multiport_info *mpi;
3240 	int err;
3241 	u32 i;
3242 
3243 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3244 		return 0;
3245 
3246 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3247 						     &dev->sys_image_guid);
3248 	if (err)
3249 		return err;
3250 
3251 	err = mlx5_nic_vport_enable_roce(dev->mdev);
3252 	if (err)
3253 		return err;
3254 
3255 	mutex_lock(&mlx5_ib_multiport_mutex);
3256 	for (i = 0; i < dev->num_ports; i++) {
3257 		bool bound = false;
3258 
3259 		/* build a stub multiport info struct for the native port. */
3260 		if (i == port_num) {
3261 			mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
3262 			if (!mpi) {
3263 				mutex_unlock(&mlx5_ib_multiport_mutex);
3264 				mlx5_nic_vport_disable_roce(dev->mdev);
3265 				return -ENOMEM;
3266 			}
3267 
3268 			mpi->is_master = true;
3269 			mpi->mdev = dev->mdev;
3270 			mpi->sys_image_guid = dev->sys_image_guid;
3271 			dev->port[i].mp.mpi = mpi;
3272 			mpi->ibdev = dev;
3273 			mpi = NULL;
3274 			continue;
3275 		}
3276 
3277 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
3278 				    list) {
3279 			if (dev->sys_image_guid == mpi->sys_image_guid &&
3280 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
3281 				bound = mlx5_ib_bind_slave_port(dev, mpi);
3282 			}
3283 
3284 			if (bound) {
3285 				dev_dbg(mpi->mdev->device,
3286 					"removing port from unaffiliated list.\n");
3287 				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
3288 				list_del(&mpi->list);
3289 				break;
3290 			}
3291 		}
3292 		if (!bound)
3293 			mlx5_ib_dbg(dev, "no free port found for port %d\n",
3294 				    i + 1);
3295 	}
3296 
3297 	list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
3298 	mutex_unlock(&mlx5_ib_multiport_mutex);
3299 	return err;
3300 }
3301 
3302 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
3303 {
3304 	u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3305 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3306 							  port_num + 1);
3307 	u32 i;
3308 
3309 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3310 		return;
3311 
3312 	mutex_lock(&mlx5_ib_multiport_mutex);
3313 	for (i = 0; i < dev->num_ports; i++) {
3314 		if (dev->port[i].mp.mpi) {
3315 			/* Destroy the native port stub */
3316 			if (i == port_num) {
3317 				kfree(dev->port[i].mp.mpi);
3318 				dev->port[i].mp.mpi = NULL;
3319 			} else {
3320 				mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
3321 					    i + 1);
3322 				list_add_tail(&dev->port[i].mp.mpi->list,
3323 					      &mlx5_ib_unaffiliated_port_list);
3324 				mlx5_ib_unbind_slave_port(dev,
3325 							  dev->port[i].mp.mpi);
3326 			}
3327 		}
3328 	}
3329 
3330 	mlx5_ib_dbg(dev, "removing from devlist\n");
3331 	list_del(&dev->ib_dev_list);
3332 	mutex_unlock(&mlx5_ib_multiport_mutex);
3333 
3334 	mlx5_nic_vport_disable_roce(dev->mdev);
3335 }
3336 
3337 static int mmap_obj_cleanup(struct ib_uobject *uobject,
3338 			    enum rdma_remove_reason why,
3339 			    struct uverbs_attr_bundle *attrs)
3340 {
3341 	struct mlx5_user_mmap_entry *obj = uobject->object;
3342 
3343 	rdma_user_mmap_entry_remove(&obj->rdma_entry);
3344 	return 0;
3345 }
3346 
3347 static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
3348 					    struct mlx5_user_mmap_entry *entry,
3349 					    size_t length)
3350 {
3351 	return rdma_user_mmap_entry_insert_range(
3352 		&c->ibucontext, &entry->rdma_entry, length,
3353 		(MLX5_IB_MMAP_OFFSET_START << 16),
3354 		((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
3355 }
3356 
3357 static struct mlx5_user_mmap_entry *
3358 alloc_var_entry(struct mlx5_ib_ucontext *c)
3359 {
3360 	struct mlx5_user_mmap_entry *entry;
3361 	struct mlx5_var_table *var_table;
3362 	u32 page_idx;
3363 	int err;
3364 
3365 	var_table = &to_mdev(c->ibucontext.device)->var_table;
3366 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3367 	if (!entry)
3368 		return ERR_PTR(-ENOMEM);
3369 
3370 	mutex_lock(&var_table->bitmap_lock);
3371 	page_idx = find_first_zero_bit(var_table->bitmap,
3372 				       var_table->num_var_hw_entries);
3373 	if (page_idx >= var_table->num_var_hw_entries) {
3374 		err = -ENOSPC;
3375 		mutex_unlock(&var_table->bitmap_lock);
3376 		goto end;
3377 	}
3378 
3379 	set_bit(page_idx, var_table->bitmap);
3380 	mutex_unlock(&var_table->bitmap_lock);
3381 
3382 	entry->address = var_table->hw_start_addr +
3383 				(page_idx * var_table->stride_size);
3384 	entry->page_idx = page_idx;
3385 	entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
3386 
3387 	err = mlx5_rdma_user_mmap_entry_insert(c, entry,
3388 					       var_table->stride_size);
3389 	if (err)
3390 		goto err_insert;
3391 
3392 	return entry;
3393 
3394 err_insert:
3395 	mutex_lock(&var_table->bitmap_lock);
3396 	clear_bit(page_idx, var_table->bitmap);
3397 	mutex_unlock(&var_table->bitmap_lock);
3398 end:
3399 	kfree(entry);
3400 	return ERR_PTR(err);
3401 }
3402 
3403 static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
3404 	struct uverbs_attr_bundle *attrs)
3405 {
3406 	struct ib_uobject *uobj = uverbs_attr_get_uobject(
3407 		attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
3408 	struct mlx5_ib_ucontext *c;
3409 	struct mlx5_user_mmap_entry *entry;
3410 	u64 mmap_offset;
3411 	u32 length;
3412 	int err;
3413 
3414 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
3415 	if (IS_ERR(c))
3416 		return PTR_ERR(c);
3417 
3418 	entry = alloc_var_entry(c);
3419 	if (IS_ERR(entry))
3420 		return PTR_ERR(entry);
3421 
3422 	mmap_offset = mlx5_entry_to_mmap_offset(entry);
3423 	length = entry->rdma_entry.npages * PAGE_SIZE;
3424 	uobj->object = entry;
3425 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
3426 
3427 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
3428 			     &mmap_offset, sizeof(mmap_offset));
3429 	if (err)
3430 		return err;
3431 
3432 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
3433 			     &entry->page_idx, sizeof(entry->page_idx));
3434 	if (err)
3435 		return err;
3436 
3437 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
3438 			     &length, sizeof(length));
3439 	return err;
3440 }
3441 
3442 DECLARE_UVERBS_NAMED_METHOD(
3443 	MLX5_IB_METHOD_VAR_OBJ_ALLOC,
3444 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
3445 			MLX5_IB_OBJECT_VAR,
3446 			UVERBS_ACCESS_NEW,
3447 			UA_MANDATORY),
3448 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
3449 			   UVERBS_ATTR_TYPE(u32),
3450 			   UA_MANDATORY),
3451 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
3452 			   UVERBS_ATTR_TYPE(u32),
3453 			   UA_MANDATORY),
3454 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
3455 			    UVERBS_ATTR_TYPE(u64),
3456 			    UA_MANDATORY));
3457 
3458 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3459 	MLX5_IB_METHOD_VAR_OBJ_DESTROY,
3460 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
3461 			MLX5_IB_OBJECT_VAR,
3462 			UVERBS_ACCESS_DESTROY,
3463 			UA_MANDATORY));
3464 
3465 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
3466 			    UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3467 			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
3468 			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
3469 
3470 static bool var_is_supported(struct ib_device *device)
3471 {
3472 	struct mlx5_ib_dev *dev = to_mdev(device);
3473 
3474 	return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3475 			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
3476 }
3477 
3478 static struct mlx5_user_mmap_entry *
3479 alloc_uar_entry(struct mlx5_ib_ucontext *c,
3480 		enum mlx5_ib_uapi_uar_alloc_type alloc_type)
3481 {
3482 	struct mlx5_user_mmap_entry *entry;
3483 	struct mlx5_ib_dev *dev;
3484 	u32 uar_index;
3485 	int err;
3486 
3487 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3488 	if (!entry)
3489 		return ERR_PTR(-ENOMEM);
3490 
3491 	dev = to_mdev(c->ibucontext.device);
3492 	err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
3493 	if (err)
3494 		goto end;
3495 
3496 	entry->page_idx = uar_index;
3497 	entry->address = uar_index2paddress(dev, uar_index);
3498 	if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3499 		entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
3500 	else
3501 		entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
3502 
3503 	err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
3504 	if (err)
3505 		goto err_insert;
3506 
3507 	return entry;
3508 
3509 err_insert:
3510 	mlx5_cmd_free_uar(dev->mdev, uar_index);
3511 end:
3512 	kfree(entry);
3513 	return ERR_PTR(err);
3514 }
3515 
3516 static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
3517 	struct uverbs_attr_bundle *attrs)
3518 {
3519 	struct ib_uobject *uobj = uverbs_attr_get_uobject(
3520 		attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
3521 	enum mlx5_ib_uapi_uar_alloc_type alloc_type;
3522 	struct mlx5_ib_ucontext *c;
3523 	struct mlx5_user_mmap_entry *entry;
3524 	u64 mmap_offset;
3525 	u32 length;
3526 	int err;
3527 
3528 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
3529 	if (IS_ERR(c))
3530 		return PTR_ERR(c);
3531 
3532 	err = uverbs_get_const(&alloc_type, attrs,
3533 			       MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
3534 	if (err)
3535 		return err;
3536 
3537 	if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
3538 	    alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
3539 		return -EOPNOTSUPP;
3540 
3541 	if (!to_mdev(c->ibucontext.device)->wc_support &&
3542 	    alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3543 		return -EOPNOTSUPP;
3544 
3545 	entry = alloc_uar_entry(c, alloc_type);
3546 	if (IS_ERR(entry))
3547 		return PTR_ERR(entry);
3548 
3549 	mmap_offset = mlx5_entry_to_mmap_offset(entry);
3550 	length = entry->rdma_entry.npages * PAGE_SIZE;
3551 	uobj->object = entry;
3552 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
3553 
3554 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
3555 			     &mmap_offset, sizeof(mmap_offset));
3556 	if (err)
3557 		return err;
3558 
3559 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
3560 			     &entry->page_idx, sizeof(entry->page_idx));
3561 	if (err)
3562 		return err;
3563 
3564 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
3565 			     &length, sizeof(length));
3566 	return err;
3567 }
3568 
3569 DECLARE_UVERBS_NAMED_METHOD(
3570 	MLX5_IB_METHOD_UAR_OBJ_ALLOC,
3571 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
3572 			MLX5_IB_OBJECT_UAR,
3573 			UVERBS_ACCESS_NEW,
3574 			UA_MANDATORY),
3575 	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
3576 			     enum mlx5_ib_uapi_uar_alloc_type,
3577 			     UA_MANDATORY),
3578 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
3579 			   UVERBS_ATTR_TYPE(u32),
3580 			   UA_MANDATORY),
3581 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
3582 			   UVERBS_ATTR_TYPE(u32),
3583 			   UA_MANDATORY),
3584 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
3585 			    UVERBS_ATTR_TYPE(u64),
3586 			    UA_MANDATORY));
3587 
3588 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3589 	MLX5_IB_METHOD_UAR_OBJ_DESTROY,
3590 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
3591 			MLX5_IB_OBJECT_UAR,
3592 			UVERBS_ACCESS_DESTROY,
3593 			UA_MANDATORY));
3594 
3595 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
3596 			    UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3597 			    &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
3598 			    &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
3599 
3600 ADD_UVERBS_ATTRIBUTES_SIMPLE(
3601 	mlx5_ib_flow_action,
3602 	UVERBS_OBJECT_FLOW_ACTION,
3603 	UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
3604 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
3605 			     enum mlx5_ib_uapi_flow_action_flags));
3606 
3607 ADD_UVERBS_ATTRIBUTES_SIMPLE(
3608 	mlx5_ib_query_context,
3609 	UVERBS_OBJECT_DEVICE,
3610 	UVERBS_METHOD_QUERY_CONTEXT,
3611 	UVERBS_ATTR_PTR_OUT(
3612 		MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
3613 		UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
3614 				   dump_fill_mkey),
3615 		UA_MANDATORY));
3616 
3617 static const struct uapi_definition mlx5_ib_defs[] = {
3618 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3619 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3620 	UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3621 	UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3622 	UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
3623 
3624 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
3625 				&mlx5_ib_flow_action),
3626 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3627 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3628 				UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3629 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3630 	{}
3631 };
3632 
3633 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
3634 {
3635 	mlx5_ib_cleanup_multiport_master(dev);
3636 	WARN_ON(!xa_empty(&dev->odp_mkeys));
3637 	mutex_destroy(&dev->cap_mask_mutex);
3638 	WARN_ON(!xa_empty(&dev->sig_mrs));
3639 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
3640 }
3641 
3642 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
3643 {
3644 	struct mlx5_core_dev *mdev = dev->mdev;
3645 	int err;
3646 	int i;
3647 
3648 	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
3649 	dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
3650 	dev->ib_dev.phys_port_cnt = dev->num_ports;
3651 	dev->ib_dev.dev.parent = mdev->device;
3652 	dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
3653 
3654 	for (i = 0; i < dev->num_ports; i++) {
3655 		spin_lock_init(&dev->port[i].mp.mpi_lock);
3656 		rwlock_init(&dev->port[i].roce.netdev_lock);
3657 		dev->port[i].roce.dev = dev;
3658 		dev->port[i].roce.native_port_num = i + 1;
3659 		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
3660 	}
3661 
3662 	err = mlx5_ib_init_multiport_master(dev);
3663 	if (err)
3664 		return err;
3665 
3666 	err = set_has_smi_cap(dev);
3667 	if (err)
3668 		goto err_mp;
3669 
3670 	err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
3671 	if (err)
3672 		goto err_mp;
3673 
3674 	if (mlx5_use_mad_ifc(dev))
3675 		get_ext_port_caps(dev);
3676 
3677 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
3678 
3679 	mutex_init(&dev->cap_mask_mutex);
3680 	INIT_LIST_HEAD(&dev->qp_list);
3681 	spin_lock_init(&dev->reset_flow_resource_lock);
3682 	xa_init(&dev->odp_mkeys);
3683 	xa_init(&dev->sig_mrs);
3684 	atomic_set(&dev->mkey_var, 0);
3685 
3686 	spin_lock_init(&dev->dm.lock);
3687 	dev->dm.dev = mdev;
3688 	return 0;
3689 
3690 err_mp:
3691 	mlx5_ib_cleanup_multiport_master(dev);
3692 	return err;
3693 }
3694 
3695 static int mlx5_ib_enable_driver(struct ib_device *dev)
3696 {
3697 	struct mlx5_ib_dev *mdev = to_mdev(dev);
3698 	int ret;
3699 
3700 	ret = mlx5_ib_test_wc(mdev);
3701 	mlx5_ib_dbg(mdev, "Write-Combining %s",
3702 		    mdev->wc_support ? "supported" : "not supported");
3703 
3704 	return ret;
3705 }
3706 
3707 static const struct ib_device_ops mlx5_ib_dev_ops = {
3708 	.owner = THIS_MODULE,
3709 	.driver_id = RDMA_DRIVER_MLX5,
3710 	.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION,
3711 
3712 	.add_gid = mlx5_ib_add_gid,
3713 	.alloc_mr = mlx5_ib_alloc_mr,
3714 	.alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
3715 	.alloc_pd = mlx5_ib_alloc_pd,
3716 	.alloc_ucontext = mlx5_ib_alloc_ucontext,
3717 	.attach_mcast = mlx5_ib_mcg_attach,
3718 	.check_mr_status = mlx5_ib_check_mr_status,
3719 	.create_ah = mlx5_ib_create_ah,
3720 	.create_cq = mlx5_ib_create_cq,
3721 	.create_qp = mlx5_ib_create_qp,
3722 	.create_srq = mlx5_ib_create_srq,
3723 	.create_user_ah = mlx5_ib_create_ah,
3724 	.dealloc_pd = mlx5_ib_dealloc_pd,
3725 	.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
3726 	.del_gid = mlx5_ib_del_gid,
3727 	.dereg_mr = mlx5_ib_dereg_mr,
3728 	.destroy_ah = mlx5_ib_destroy_ah,
3729 	.destroy_cq = mlx5_ib_destroy_cq,
3730 	.destroy_qp = mlx5_ib_destroy_qp,
3731 	.destroy_srq = mlx5_ib_destroy_srq,
3732 	.detach_mcast = mlx5_ib_mcg_detach,
3733 	.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
3734 	.drain_rq = mlx5_ib_drain_rq,
3735 	.drain_sq = mlx5_ib_drain_sq,
3736 	.device_group = &mlx5_attr_group,
3737 	.enable_driver = mlx5_ib_enable_driver,
3738 	.get_dev_fw_str = get_dev_fw_str,
3739 	.get_dma_mr = mlx5_ib_get_dma_mr,
3740 	.get_link_layer = mlx5_ib_port_link_layer,
3741 	.map_mr_sg = mlx5_ib_map_mr_sg,
3742 	.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
3743 	.mmap = mlx5_ib_mmap,
3744 	.mmap_free = mlx5_ib_mmap_free,
3745 	.modify_cq = mlx5_ib_modify_cq,
3746 	.modify_device = mlx5_ib_modify_device,
3747 	.modify_port = mlx5_ib_modify_port,
3748 	.modify_qp = mlx5_ib_modify_qp,
3749 	.modify_srq = mlx5_ib_modify_srq,
3750 	.poll_cq = mlx5_ib_poll_cq,
3751 	.post_recv = mlx5_ib_post_recv_nodrain,
3752 	.post_send = mlx5_ib_post_send_nodrain,
3753 	.post_srq_recv = mlx5_ib_post_srq_recv,
3754 	.process_mad = mlx5_ib_process_mad,
3755 	.query_ah = mlx5_ib_query_ah,
3756 	.query_device = mlx5_ib_query_device,
3757 	.query_gid = mlx5_ib_query_gid,
3758 	.query_pkey = mlx5_ib_query_pkey,
3759 	.query_qp = mlx5_ib_query_qp,
3760 	.query_srq = mlx5_ib_query_srq,
3761 	.query_ucontext = mlx5_ib_query_ucontext,
3762 	.reg_user_mr = mlx5_ib_reg_user_mr,
3763 	.reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
3764 	.req_notify_cq = mlx5_ib_arm_cq,
3765 	.rereg_user_mr = mlx5_ib_rereg_user_mr,
3766 	.resize_cq = mlx5_ib_resize_cq,
3767 
3768 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
3769 	INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
3770 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
3771 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
3772 	INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
3773 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
3774 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
3775 };
3776 
3777 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
3778 	.rdma_netdev_get_params = mlx5_ib_rn_get_params,
3779 };
3780 
3781 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
3782 	.get_vf_config = mlx5_ib_get_vf_config,
3783 	.get_vf_guid = mlx5_ib_get_vf_guid,
3784 	.get_vf_stats = mlx5_ib_get_vf_stats,
3785 	.set_vf_guid = mlx5_ib_set_vf_guid,
3786 	.set_vf_link_state = mlx5_ib_set_vf_link_state,
3787 };
3788 
3789 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
3790 	.alloc_mw = mlx5_ib_alloc_mw,
3791 	.dealloc_mw = mlx5_ib_dealloc_mw,
3792 
3793 	INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
3794 };
3795 
3796 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
3797 	.alloc_xrcd = mlx5_ib_alloc_xrcd,
3798 	.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
3799 
3800 	INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
3801 };
3802 
3803 static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
3804 {
3805 	struct mlx5_core_dev *mdev = dev->mdev;
3806 	struct mlx5_var_table *var_table = &dev->var_table;
3807 	u8 log_doorbell_bar_size;
3808 	u8 log_doorbell_stride;
3809 	u64 bar_size;
3810 
3811 	log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3812 					log_doorbell_bar_size);
3813 	log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3814 					log_doorbell_stride);
3815 	var_table->hw_start_addr = dev->mdev->bar_addr +
3816 				MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
3817 					doorbell_bar_offset);
3818 	bar_size = (1ULL << log_doorbell_bar_size) * 4096;
3819 	var_table->stride_size = 1ULL << log_doorbell_stride;
3820 	var_table->num_var_hw_entries = div_u64(bar_size,
3821 						var_table->stride_size);
3822 	mutex_init(&var_table->bitmap_lock);
3823 	var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
3824 					  GFP_KERNEL);
3825 	return (var_table->bitmap) ? 0 : -ENOMEM;
3826 }
3827 
3828 static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
3829 {
3830 	bitmap_free(dev->var_table.bitmap);
3831 }
3832 
3833 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
3834 {
3835 	struct mlx5_core_dev *mdev = dev->mdev;
3836 	int err;
3837 
3838 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
3839 	    IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
3840 		ib_set_device_ops(&dev->ib_dev,
3841 				  &mlx5_ib_dev_ipoib_enhanced_ops);
3842 
3843 	if (mlx5_core_is_pf(mdev))
3844 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
3845 
3846 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3847 
3848 	if (MLX5_CAP_GEN(mdev, imaicl))
3849 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
3850 
3851 	if (MLX5_CAP_GEN(mdev, xrc))
3852 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
3853 
3854 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
3855 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3856 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
3857 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
3858 
3859 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
3860 
3861 	if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
3862 		dev->ib_dev.driver_def = mlx5_ib_defs;
3863 
3864 	err = init_node_data(dev);
3865 	if (err)
3866 		return err;
3867 
3868 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3869 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
3870 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
3871 		mutex_init(&dev->lb.mutex);
3872 
3873 	if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3874 			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
3875 		err = mlx5_ib_init_var_table(dev);
3876 		if (err)
3877 			return err;
3878 	}
3879 
3880 	dev->ib_dev.use_cq_dim = true;
3881 
3882 	return 0;
3883 }
3884 
3885 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
3886 	.get_port_immutable = mlx5_port_immutable,
3887 	.query_port = mlx5_ib_query_port,
3888 };
3889 
3890 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
3891 {
3892 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
3893 	return 0;
3894 }
3895 
3896 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
3897 	.get_port_immutable = mlx5_port_rep_immutable,
3898 	.query_port = mlx5_ib_rep_query_port,
3899 	.query_pkey = mlx5_ib_rep_query_pkey,
3900 };
3901 
3902 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
3903 {
3904 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
3905 	return 0;
3906 }
3907 
3908 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
3909 	.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
3910 	.create_wq = mlx5_ib_create_wq,
3911 	.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
3912 	.destroy_wq = mlx5_ib_destroy_wq,
3913 	.get_netdev = mlx5_ib_get_netdev,
3914 	.modify_wq = mlx5_ib_modify_wq,
3915 
3916 	INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
3917 			   ib_rwq_ind_tbl),
3918 };
3919 
3920 static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
3921 {
3922 	struct mlx5_core_dev *mdev = dev->mdev;
3923 	enum rdma_link_layer ll;
3924 	int port_type_cap;
3925 	u32 port_num = 0;
3926 	int err;
3927 
3928 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3929 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3930 
3931 	if (ll == IB_LINK_LAYER_ETHERNET) {
3932 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
3933 
3934 		port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3935 
3936 		/* Register only for native ports */
3937 		err = mlx5_add_netdev_notifier(dev, port_num);
3938 		if (err)
3939 			return err;
3940 
3941 		err = mlx5_enable_eth(dev);
3942 		if (err)
3943 			goto cleanup;
3944 	}
3945 
3946 	return 0;
3947 cleanup:
3948 	mlx5_remove_netdev_notifier(dev, port_num);
3949 	return err;
3950 }
3951 
3952 static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
3953 {
3954 	struct mlx5_core_dev *mdev = dev->mdev;
3955 	enum rdma_link_layer ll;
3956 	int port_type_cap;
3957 	u32 port_num;
3958 
3959 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3960 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3961 
3962 	if (ll == IB_LINK_LAYER_ETHERNET) {
3963 		mlx5_disable_eth(dev);
3964 
3965 		port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3966 		mlx5_remove_netdev_notifier(dev, port_num);
3967 	}
3968 }
3969 
3970 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
3971 {
3972 	mlx5_ib_init_cong_debugfs(dev,
3973 				  mlx5_core_native_port_num(dev->mdev) - 1);
3974 	return 0;
3975 }
3976 
3977 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
3978 {
3979 	mlx5_ib_cleanup_cong_debugfs(dev,
3980 				     mlx5_core_native_port_num(dev->mdev) - 1);
3981 }
3982 
3983 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
3984 {
3985 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
3986 	return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
3987 }
3988 
3989 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
3990 {
3991 	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
3992 }
3993 
3994 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
3995 {
3996 	int err;
3997 
3998 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
3999 	if (err)
4000 		return err;
4001 
4002 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4003 	if (err)
4004 		mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4005 
4006 	return err;
4007 }
4008 
4009 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4010 {
4011 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4012 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4013 }
4014 
4015 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4016 {
4017 	const char *name;
4018 
4019 	if (!mlx5_lag_is_active(dev->mdev))
4020 		name = "mlx5_%d";
4021 	else
4022 		name = "mlx5_bond_%d";
4023 	return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4024 }
4025 
4026 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4027 {
4028 	int err;
4029 
4030 	err = mlx5_mr_cache_cleanup(dev);
4031 	if (err)
4032 		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4033 
4034 	if (dev->umrc.qp)
4035 		ib_destroy_qp(dev->umrc.qp);
4036 	if (dev->umrc.cq)
4037 		ib_free_cq(dev->umrc.cq);
4038 	if (dev->umrc.pd)
4039 		ib_dealloc_pd(dev->umrc.pd);
4040 }
4041 
4042 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4043 {
4044 	ib_unregister_device(&dev->ib_dev);
4045 }
4046 
4047 enum {
4048 	MAX_UMR_WR = 128,
4049 };
4050 
4051 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4052 {
4053 	struct ib_qp_init_attr *init_attr = NULL;
4054 	struct ib_qp_attr *attr = NULL;
4055 	struct ib_pd *pd;
4056 	struct ib_cq *cq;
4057 	struct ib_qp *qp;
4058 	int ret;
4059 
4060 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4061 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4062 	if (!attr || !init_attr) {
4063 		ret = -ENOMEM;
4064 		goto error_0;
4065 	}
4066 
4067 	pd = ib_alloc_pd(&dev->ib_dev, 0);
4068 	if (IS_ERR(pd)) {
4069 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4070 		ret = PTR_ERR(pd);
4071 		goto error_0;
4072 	}
4073 
4074 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4075 	if (IS_ERR(cq)) {
4076 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4077 		ret = PTR_ERR(cq);
4078 		goto error_2;
4079 	}
4080 
4081 	init_attr->send_cq = cq;
4082 	init_attr->recv_cq = cq;
4083 	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4084 	init_attr->cap.max_send_wr = MAX_UMR_WR;
4085 	init_attr->cap.max_send_sge = 1;
4086 	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4087 	init_attr->port_num = 1;
4088 	qp = ib_create_qp(pd, init_attr);
4089 	if (IS_ERR(qp)) {
4090 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4091 		ret = PTR_ERR(qp);
4092 		goto error_3;
4093 	}
4094 
4095 	attr->qp_state = IB_QPS_INIT;
4096 	attr->port_num = 1;
4097 	ret = ib_modify_qp(qp, attr,
4098 			   IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
4099 	if (ret) {
4100 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4101 		goto error_4;
4102 	}
4103 
4104 	memset(attr, 0, sizeof(*attr));
4105 	attr->qp_state = IB_QPS_RTR;
4106 	attr->path_mtu = IB_MTU_256;
4107 
4108 	ret = ib_modify_qp(qp, attr, IB_QP_STATE);
4109 	if (ret) {
4110 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4111 		goto error_4;
4112 	}
4113 
4114 	memset(attr, 0, sizeof(*attr));
4115 	attr->qp_state = IB_QPS_RTS;
4116 	ret = ib_modify_qp(qp, attr, IB_QP_STATE);
4117 	if (ret) {
4118 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4119 		goto error_4;
4120 	}
4121 
4122 	dev->umrc.qp = qp;
4123 	dev->umrc.cq = cq;
4124 	dev->umrc.pd = pd;
4125 
4126 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
4127 	ret = mlx5_mr_cache_init(dev);
4128 	if (ret) {
4129 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4130 		goto error_4;
4131 	}
4132 
4133 	kfree(attr);
4134 	kfree(init_attr);
4135 
4136 	return 0;
4137 
4138 error_4:
4139 	ib_destroy_qp(qp);
4140 	dev->umrc.qp = NULL;
4141 
4142 error_3:
4143 	ib_free_cq(cq);
4144 	dev->umrc.cq = NULL;
4145 
4146 error_2:
4147 	ib_dealloc_pd(pd);
4148 	dev->umrc.pd = NULL;
4149 
4150 error_0:
4151 	kfree(attr);
4152 	kfree(init_attr);
4153 	return ret;
4154 }
4155 
4156 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4157 {
4158 	struct dentry *root;
4159 
4160 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4161 		return 0;
4162 
4163 	mutex_init(&dev->delay_drop.lock);
4164 	dev->delay_drop.dev = dev;
4165 	dev->delay_drop.activate = false;
4166 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4167 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
4168 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
4169 	atomic_set(&dev->delay_drop.events_cnt, 0);
4170 
4171 	if (!mlx5_debugfs_root)
4172 		return 0;
4173 
4174 	root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
4175 	dev->delay_drop.dir_debugfs = root;
4176 
4177 	debugfs_create_atomic_t("num_timeout_events", 0400, root,
4178 				&dev->delay_drop.events_cnt);
4179 	debugfs_create_atomic_t("num_rqs", 0400, root,
4180 				&dev->delay_drop.rqs_cnt);
4181 	debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
4182 			    &fops_delay_drop_timeout);
4183 	return 0;
4184 }
4185 
4186 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4187 {
4188 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4189 		return;
4190 
4191 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
4192 	if (!dev->delay_drop.dir_debugfs)
4193 		return;
4194 
4195 	debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
4196 	dev->delay_drop.dir_debugfs = NULL;
4197 }
4198 
4199 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
4200 {
4201 	dev->mdev_events.notifier_call = mlx5_ib_event;
4202 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4203 	return 0;
4204 }
4205 
4206 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4207 {
4208 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4209 }
4210 
4211 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4212 		      const struct mlx5_ib_profile *profile,
4213 		      int stage)
4214 {
4215 	dev->ib_active = false;
4216 
4217 	/* Number of stages to cleanup */
4218 	while (stage) {
4219 		stage--;
4220 		if (profile->stage[stage].cleanup)
4221 			profile->stage[stage].cleanup(dev);
4222 	}
4223 
4224 	kfree(dev->port);
4225 	ib_dealloc_device(&dev->ib_dev);
4226 }
4227 
4228 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
4229 		  const struct mlx5_ib_profile *profile)
4230 {
4231 	int err;
4232 	int i;
4233 
4234 	dev->profile = profile;
4235 
4236 	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
4237 		if (profile->stage[i].init) {
4238 			err = profile->stage[i].init(dev);
4239 			if (err)
4240 				goto err_out;
4241 		}
4242 	}
4243 
4244 	dev->ib_active = true;
4245 	return 0;
4246 
4247 err_out:
4248 	/* Clean up stages which were initialized */
4249 	while (i) {
4250 		i--;
4251 		if (profile->stage[i].cleanup)
4252 			profile->stage[i].cleanup(dev);
4253 	}
4254 	return -ENOMEM;
4255 }
4256 
4257 static const struct mlx5_ib_profile pf_profile = {
4258 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
4259 		     mlx5_ib_stage_init_init,
4260 		     mlx5_ib_stage_init_cleanup),
4261 	STAGE_CREATE(MLX5_IB_STAGE_FS,
4262 		     mlx5_ib_fs_init,
4263 		     mlx5_ib_fs_cleanup),
4264 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4265 		     mlx5_ib_stage_caps_init,
4266 		     mlx5_ib_stage_caps_cleanup),
4267 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4268 		     mlx5_ib_stage_non_default_cb,
4269 		     NULL),
4270 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4271 		     mlx5_ib_roce_init,
4272 		     mlx5_ib_roce_cleanup),
4273 	STAGE_CREATE(MLX5_IB_STAGE_QP,
4274 		     mlx5_init_qp_table,
4275 		     mlx5_cleanup_qp_table),
4276 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4277 		     mlx5_init_srq_table,
4278 		     mlx5_cleanup_srq_table),
4279 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4280 		     mlx5_ib_dev_res_init,
4281 		     mlx5_ib_dev_res_cleanup),
4282 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
4283 		     mlx5_ib_stage_dev_notifier_init,
4284 		     mlx5_ib_stage_dev_notifier_cleanup),
4285 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
4286 		     mlx5_ib_odp_init_one,
4287 		     mlx5_ib_odp_cleanup_one),
4288 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4289 		     mlx5_ib_counters_init,
4290 		     mlx5_ib_counters_cleanup),
4291 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4292 		     mlx5_ib_stage_cong_debugfs_init,
4293 		     mlx5_ib_stage_cong_debugfs_cleanup),
4294 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
4295 		     mlx5_ib_stage_uar_init,
4296 		     mlx5_ib_stage_uar_cleanup),
4297 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4298 		     mlx5_ib_stage_bfrag_init,
4299 		     mlx5_ib_stage_bfrag_cleanup),
4300 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4301 		     NULL,
4302 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4303 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
4304 		     mlx5_ib_devx_init,
4305 		     mlx5_ib_devx_cleanup),
4306 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4307 		     mlx5_ib_stage_ib_reg_init,
4308 		     mlx5_ib_stage_ib_reg_cleanup),
4309 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4310 		     mlx5_ib_stage_post_ib_reg_umr_init,
4311 		     NULL),
4312 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4313 		     mlx5_ib_stage_delay_drop_init,
4314 		     mlx5_ib_stage_delay_drop_cleanup),
4315 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4316 		     mlx5_ib_restrack_init,
4317 		     NULL),
4318 };
4319 
4320 const struct mlx5_ib_profile raw_eth_profile = {
4321 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
4322 		     mlx5_ib_stage_init_init,
4323 		     mlx5_ib_stage_init_cleanup),
4324 	STAGE_CREATE(MLX5_IB_STAGE_FS,
4325 		     mlx5_ib_fs_init,
4326 		     mlx5_ib_fs_cleanup),
4327 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4328 		     mlx5_ib_stage_caps_init,
4329 		     mlx5_ib_stage_caps_cleanup),
4330 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4331 		     mlx5_ib_stage_raw_eth_non_default_cb,
4332 		     NULL),
4333 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4334 		     mlx5_ib_roce_init,
4335 		     mlx5_ib_roce_cleanup),
4336 	STAGE_CREATE(MLX5_IB_STAGE_QP,
4337 		     mlx5_init_qp_table,
4338 		     mlx5_cleanup_qp_table),
4339 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4340 		     mlx5_init_srq_table,
4341 		     mlx5_cleanup_srq_table),
4342 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4343 		     mlx5_ib_dev_res_init,
4344 		     mlx5_ib_dev_res_cleanup),
4345 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
4346 		     mlx5_ib_stage_dev_notifier_init,
4347 		     mlx5_ib_stage_dev_notifier_cleanup),
4348 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4349 		     mlx5_ib_counters_init,
4350 		     mlx5_ib_counters_cleanup),
4351 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4352 		     mlx5_ib_stage_cong_debugfs_init,
4353 		     mlx5_ib_stage_cong_debugfs_cleanup),
4354 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
4355 		     mlx5_ib_stage_uar_init,
4356 		     mlx5_ib_stage_uar_cleanup),
4357 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4358 		     mlx5_ib_stage_bfrag_init,
4359 		     mlx5_ib_stage_bfrag_cleanup),
4360 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4361 		     NULL,
4362 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4363 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
4364 		     mlx5_ib_devx_init,
4365 		     mlx5_ib_devx_cleanup),
4366 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4367 		     mlx5_ib_stage_ib_reg_init,
4368 		     mlx5_ib_stage_ib_reg_cleanup),
4369 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4370 		     mlx5_ib_stage_post_ib_reg_umr_init,
4371 		     NULL),
4372 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4373 		     mlx5_ib_restrack_init,
4374 		     NULL),
4375 };
4376 
4377 static int mlx5r_mp_probe(struct auxiliary_device *adev,
4378 			  const struct auxiliary_device_id *id)
4379 {
4380 	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4381 	struct mlx5_core_dev *mdev = idev->mdev;
4382 	struct mlx5_ib_multiport_info *mpi;
4383 	struct mlx5_ib_dev *dev;
4384 	bool bound = false;
4385 	int err;
4386 
4387 	mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
4388 	if (!mpi)
4389 		return -ENOMEM;
4390 
4391 	mpi->mdev = mdev;
4392 	err = mlx5_query_nic_vport_system_image_guid(mdev,
4393 						     &mpi->sys_image_guid);
4394 	if (err) {
4395 		kfree(mpi);
4396 		return err;
4397 	}
4398 
4399 	mutex_lock(&mlx5_ib_multiport_mutex);
4400 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
4401 		if (dev->sys_image_guid == mpi->sys_image_guid)
4402 			bound = mlx5_ib_bind_slave_port(dev, mpi);
4403 
4404 		if (bound) {
4405 			rdma_roce_rescan_device(&dev->ib_dev);
4406 			mpi->ibdev->ib_active = true;
4407 			break;
4408 		}
4409 	}
4410 
4411 	if (!bound) {
4412 		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
4413 		dev_dbg(mdev->device,
4414 			"no suitable IB device found to bind to, added to unaffiliated list.\n");
4415 	}
4416 	mutex_unlock(&mlx5_ib_multiport_mutex);
4417 
4418 	dev_set_drvdata(&adev->dev, mpi);
4419 	return 0;
4420 }
4421 
4422 static void mlx5r_mp_remove(struct auxiliary_device *adev)
4423 {
4424 	struct mlx5_ib_multiport_info *mpi;
4425 
4426 	mpi = dev_get_drvdata(&adev->dev);
4427 	mutex_lock(&mlx5_ib_multiport_mutex);
4428 	if (mpi->ibdev)
4429 		mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
4430 	else
4431 		list_del(&mpi->list);
4432 	mutex_unlock(&mlx5_ib_multiport_mutex);
4433 	kfree(mpi);
4434 }
4435 
4436 static int mlx5r_probe(struct auxiliary_device *adev,
4437 		       const struct auxiliary_device_id *id)
4438 {
4439 	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4440 	struct mlx5_core_dev *mdev = idev->mdev;
4441 	const struct mlx5_ib_profile *profile;
4442 	int port_type_cap, num_ports, ret;
4443 	enum rdma_link_layer ll;
4444 	struct mlx5_ib_dev *dev;
4445 
4446 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4447 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4448 
4449 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4450 			MLX5_CAP_GEN(mdev, num_vhca_ports));
4451 	dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
4452 	if (!dev)
4453 		return -ENOMEM;
4454 	dev->port = kcalloc(num_ports, sizeof(*dev->port),
4455 			     GFP_KERNEL);
4456 	if (!dev->port) {
4457 		ib_dealloc_device(&dev->ib_dev);
4458 		return -ENOMEM;
4459 	}
4460 
4461 	dev->mdev = mdev;
4462 	dev->num_ports = num_ports;
4463 
4464 	if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
4465 		profile = &raw_eth_profile;
4466 	else
4467 		profile = &pf_profile;
4468 
4469 	ret = __mlx5_ib_add(dev, profile);
4470 	if (ret) {
4471 		kfree(dev->port);
4472 		ib_dealloc_device(&dev->ib_dev);
4473 		return ret;
4474 	}
4475 
4476 	dev_set_drvdata(&adev->dev, dev);
4477 	return 0;
4478 }
4479 
4480 static void mlx5r_remove(struct auxiliary_device *adev)
4481 {
4482 	struct mlx5_ib_dev *dev;
4483 
4484 	dev = dev_get_drvdata(&adev->dev);
4485 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
4486 }
4487 
4488 static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
4489 	{ .name = MLX5_ADEV_NAME ".multiport", },
4490 	{},
4491 };
4492 
4493 static const struct auxiliary_device_id mlx5r_id_table[] = {
4494 	{ .name = MLX5_ADEV_NAME ".rdma", },
4495 	{},
4496 };
4497 
4498 MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
4499 MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
4500 
4501 static struct auxiliary_driver mlx5r_mp_driver = {
4502 	.name = "multiport",
4503 	.probe = mlx5r_mp_probe,
4504 	.remove = mlx5r_mp_remove,
4505 	.id_table = mlx5r_mp_id_table,
4506 };
4507 
4508 static struct auxiliary_driver mlx5r_driver = {
4509 	.name = "rdma",
4510 	.probe = mlx5r_probe,
4511 	.remove = mlx5r_remove,
4512 	.id_table = mlx5r_id_table,
4513 };
4514 
4515 static int __init mlx5_ib_init(void)
4516 {
4517 	int ret;
4518 
4519 	xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
4520 	if (!xlt_emergency_page)
4521 		return -ENOMEM;
4522 
4523 	mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
4524 	if (!mlx5_ib_event_wq) {
4525 		free_page((unsigned long)xlt_emergency_page);
4526 		return -ENOMEM;
4527 	}
4528 
4529 	mlx5_ib_odp_init();
4530 	ret = mlx5r_rep_init();
4531 	if (ret)
4532 		goto rep_err;
4533 	ret = auxiliary_driver_register(&mlx5r_mp_driver);
4534 	if (ret)
4535 		goto mp_err;
4536 	ret = auxiliary_driver_register(&mlx5r_driver);
4537 	if (ret)
4538 		goto drv_err;
4539 	return 0;
4540 
4541 drv_err:
4542 	auxiliary_driver_unregister(&mlx5r_mp_driver);
4543 mp_err:
4544 	mlx5r_rep_cleanup();
4545 rep_err:
4546 	destroy_workqueue(mlx5_ib_event_wq);
4547 	free_page((unsigned long)xlt_emergency_page);
4548 	return ret;
4549 }
4550 
4551 static void __exit mlx5_ib_cleanup(void)
4552 {
4553 	auxiliary_driver_unregister(&mlx5r_driver);
4554 	auxiliary_driver_unregister(&mlx5r_mp_driver);
4555 	mlx5r_rep_cleanup();
4556 
4557 	destroy_workqueue(mlx5_ib_event_wq);
4558 	free_page((unsigned long)xlt_emergency_page);
4559 }
4560 
4561 module_init(mlx5_ib_init);
4562 module_exit(mlx5_ib_cleanup);
4563