xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision eaa163ca)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44 #include <linux/in.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
48 
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/rw.h>
53 #include <rdma/lag.h>
54 
55 #include "core_priv.h"
56 #include <trace/events/rdma_core.h>
57 
58 static int ib_resolve_eth_dmac(struct ib_device *device,
59 			       struct rdma_ah_attr *ah_attr);
60 
61 static const char * const ib_events[] = {
62 	[IB_EVENT_CQ_ERR]		= "CQ error",
63 	[IB_EVENT_QP_FATAL]		= "QP fatal error",
64 	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
65 	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
66 	[IB_EVENT_COMM_EST]		= "communication established",
67 	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
68 	[IB_EVENT_PATH_MIG]		= "path migration successful",
69 	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
70 	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
71 	[IB_EVENT_PORT_ACTIVE]		= "port active",
72 	[IB_EVENT_PORT_ERR]		= "port error",
73 	[IB_EVENT_LID_CHANGE]		= "LID change",
74 	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
75 	[IB_EVENT_SM_CHANGE]		= "SM change",
76 	[IB_EVENT_SRQ_ERR]		= "SRQ error",
77 	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
78 	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
79 	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
80 	[IB_EVENT_GID_CHANGE]		= "GID changed",
81 };
82 
83 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
84 {
85 	size_t index = event;
86 
87 	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
88 			ib_events[index] : "unrecognized event";
89 }
90 EXPORT_SYMBOL(ib_event_msg);
91 
92 static const char * const wc_statuses[] = {
93 	[IB_WC_SUCCESS]			= "success",
94 	[IB_WC_LOC_LEN_ERR]		= "local length error",
95 	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
96 	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
97 	[IB_WC_LOC_PROT_ERR]		= "local protection error",
98 	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
99 	[IB_WC_MW_BIND_ERR]		= "memory management operation error",
100 	[IB_WC_BAD_RESP_ERR]		= "bad response error",
101 	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
102 	[IB_WC_REM_INV_REQ_ERR]		= "invalid request error",
103 	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
104 	[IB_WC_REM_OP_ERR]		= "remote operation error",
105 	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
106 	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
107 	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
108 	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
109 	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
110 	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
111 	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
112 	[IB_WC_FATAL_ERR]		= "fatal error",
113 	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
114 	[IB_WC_GENERAL_ERR]		= "general error",
115 };
116 
117 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
118 {
119 	size_t index = status;
120 
121 	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
122 			wc_statuses[index] : "unrecognized status";
123 }
124 EXPORT_SYMBOL(ib_wc_status_msg);
125 
126 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
127 {
128 	switch (rate) {
129 	case IB_RATE_2_5_GBPS: return   1;
130 	case IB_RATE_5_GBPS:   return   2;
131 	case IB_RATE_10_GBPS:  return   4;
132 	case IB_RATE_20_GBPS:  return   8;
133 	case IB_RATE_30_GBPS:  return  12;
134 	case IB_RATE_40_GBPS:  return  16;
135 	case IB_RATE_60_GBPS:  return  24;
136 	case IB_RATE_80_GBPS:  return  32;
137 	case IB_RATE_120_GBPS: return  48;
138 	case IB_RATE_14_GBPS:  return   6;
139 	case IB_RATE_56_GBPS:  return  22;
140 	case IB_RATE_112_GBPS: return  45;
141 	case IB_RATE_168_GBPS: return  67;
142 	case IB_RATE_25_GBPS:  return  10;
143 	case IB_RATE_100_GBPS: return  40;
144 	case IB_RATE_200_GBPS: return  80;
145 	case IB_RATE_300_GBPS: return 120;
146 	case IB_RATE_28_GBPS:  return  11;
147 	case IB_RATE_50_GBPS:  return  20;
148 	case IB_RATE_400_GBPS: return 160;
149 	case IB_RATE_600_GBPS: return 240;
150 	default:	       return  -1;
151 	}
152 }
153 EXPORT_SYMBOL(ib_rate_to_mult);
154 
155 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
156 {
157 	switch (mult) {
158 	case 1:   return IB_RATE_2_5_GBPS;
159 	case 2:   return IB_RATE_5_GBPS;
160 	case 4:   return IB_RATE_10_GBPS;
161 	case 8:   return IB_RATE_20_GBPS;
162 	case 12:  return IB_RATE_30_GBPS;
163 	case 16:  return IB_RATE_40_GBPS;
164 	case 24:  return IB_RATE_60_GBPS;
165 	case 32:  return IB_RATE_80_GBPS;
166 	case 48:  return IB_RATE_120_GBPS;
167 	case 6:   return IB_RATE_14_GBPS;
168 	case 22:  return IB_RATE_56_GBPS;
169 	case 45:  return IB_RATE_112_GBPS;
170 	case 67:  return IB_RATE_168_GBPS;
171 	case 10:  return IB_RATE_25_GBPS;
172 	case 40:  return IB_RATE_100_GBPS;
173 	case 80:  return IB_RATE_200_GBPS;
174 	case 120: return IB_RATE_300_GBPS;
175 	case 11:  return IB_RATE_28_GBPS;
176 	case 20:  return IB_RATE_50_GBPS;
177 	case 160: return IB_RATE_400_GBPS;
178 	case 240: return IB_RATE_600_GBPS;
179 	default:  return IB_RATE_PORT_CURRENT;
180 	}
181 }
182 EXPORT_SYMBOL(mult_to_ib_rate);
183 
184 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
185 {
186 	switch (rate) {
187 	case IB_RATE_2_5_GBPS: return 2500;
188 	case IB_RATE_5_GBPS:   return 5000;
189 	case IB_RATE_10_GBPS:  return 10000;
190 	case IB_RATE_20_GBPS:  return 20000;
191 	case IB_RATE_30_GBPS:  return 30000;
192 	case IB_RATE_40_GBPS:  return 40000;
193 	case IB_RATE_60_GBPS:  return 60000;
194 	case IB_RATE_80_GBPS:  return 80000;
195 	case IB_RATE_120_GBPS: return 120000;
196 	case IB_RATE_14_GBPS:  return 14062;
197 	case IB_RATE_56_GBPS:  return 56250;
198 	case IB_RATE_112_GBPS: return 112500;
199 	case IB_RATE_168_GBPS: return 168750;
200 	case IB_RATE_25_GBPS:  return 25781;
201 	case IB_RATE_100_GBPS: return 103125;
202 	case IB_RATE_200_GBPS: return 206250;
203 	case IB_RATE_300_GBPS: return 309375;
204 	case IB_RATE_28_GBPS:  return 28125;
205 	case IB_RATE_50_GBPS:  return 53125;
206 	case IB_RATE_400_GBPS: return 425000;
207 	case IB_RATE_600_GBPS: return 637500;
208 	default:	       return -1;
209 	}
210 }
211 EXPORT_SYMBOL(ib_rate_to_mbps);
212 
213 __attribute_const__ enum rdma_transport_type
214 rdma_node_get_transport(unsigned int node_type)
215 {
216 
217 	if (node_type == RDMA_NODE_USNIC)
218 		return RDMA_TRANSPORT_USNIC;
219 	if (node_type == RDMA_NODE_USNIC_UDP)
220 		return RDMA_TRANSPORT_USNIC_UDP;
221 	if (node_type == RDMA_NODE_RNIC)
222 		return RDMA_TRANSPORT_IWARP;
223 	if (node_type == RDMA_NODE_UNSPECIFIED)
224 		return RDMA_TRANSPORT_UNSPECIFIED;
225 
226 	return RDMA_TRANSPORT_IB;
227 }
228 EXPORT_SYMBOL(rdma_node_get_transport);
229 
230 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
231 {
232 	enum rdma_transport_type lt;
233 	if (device->ops.get_link_layer)
234 		return device->ops.get_link_layer(device, port_num);
235 
236 	lt = rdma_node_get_transport(device->node_type);
237 	if (lt == RDMA_TRANSPORT_IB)
238 		return IB_LINK_LAYER_INFINIBAND;
239 
240 	return IB_LINK_LAYER_ETHERNET;
241 }
242 EXPORT_SYMBOL(rdma_port_get_link_layer);
243 
244 /* Protection domains */
245 
246 /**
247  * ib_alloc_pd - Allocates an unused protection domain.
248  * @device: The device on which to allocate the protection domain.
249  * @flags: protection domain flags
250  * @caller: caller's build-time module name
251  *
252  * A protection domain object provides an association between QPs, shared
253  * receive queues, address handles, memory regions, and memory windows.
254  *
255  * Every PD has a local_dma_lkey which can be used as the lkey value for local
256  * memory operations.
257  */
258 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
259 		const char *caller)
260 {
261 	struct ib_pd *pd;
262 	int mr_access_flags = 0;
263 	int ret;
264 
265 	pd = rdma_zalloc_drv_obj(device, ib_pd);
266 	if (!pd)
267 		return ERR_PTR(-ENOMEM);
268 
269 	pd->device = device;
270 	pd->uobject = NULL;
271 	pd->__internal_mr = NULL;
272 	atomic_set(&pd->usecnt, 0);
273 	pd->flags = flags;
274 
275 	pd->res.type = RDMA_RESTRACK_PD;
276 	rdma_restrack_set_task(&pd->res, caller);
277 
278 	ret = device->ops.alloc_pd(pd, NULL);
279 	if (ret) {
280 		kfree(pd);
281 		return ERR_PTR(ret);
282 	}
283 	rdma_restrack_kadd(&pd->res);
284 
285 	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
286 		pd->local_dma_lkey = device->local_dma_lkey;
287 	else
288 		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
289 
290 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
291 		pr_warn("%s: enabling unsafe global rkey\n", caller);
292 		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
293 	}
294 
295 	if (mr_access_flags) {
296 		struct ib_mr *mr;
297 
298 		mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
299 		if (IS_ERR(mr)) {
300 			ib_dealloc_pd(pd);
301 			return ERR_CAST(mr);
302 		}
303 
304 		mr->device	= pd->device;
305 		mr->pd		= pd;
306 		mr->type        = IB_MR_TYPE_DMA;
307 		mr->uobject	= NULL;
308 		mr->need_inval	= false;
309 
310 		pd->__internal_mr = mr;
311 
312 		if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
313 			pd->local_dma_lkey = pd->__internal_mr->lkey;
314 
315 		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
316 			pd->unsafe_global_rkey = pd->__internal_mr->rkey;
317 	}
318 
319 	return pd;
320 }
321 EXPORT_SYMBOL(__ib_alloc_pd);
322 
323 /**
324  * ib_dealloc_pd_user - Deallocates a protection domain.
325  * @pd: The protection domain to deallocate.
326  * @udata: Valid user data or NULL for kernel object
327  *
328  * It is an error to call this function while any resources in the pd still
329  * exist.  The caller is responsible to synchronously destroy them and
330  * guarantee no new allocations will happen.
331  */
332 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
333 {
334 	int ret;
335 
336 	if (pd->__internal_mr) {
337 		ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
338 		WARN_ON(ret);
339 		pd->__internal_mr = NULL;
340 	}
341 
342 	/* uverbs manipulates usecnt with proper locking, while the kabi
343 	   requires the caller to guarantee we can't race here. */
344 	WARN_ON(atomic_read(&pd->usecnt));
345 
346 	rdma_restrack_del(&pd->res);
347 	pd->device->ops.dealloc_pd(pd, udata);
348 	kfree(pd);
349 }
350 EXPORT_SYMBOL(ib_dealloc_pd_user);
351 
352 /* Address handles */
353 
354 /**
355  * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
356  * @dest:       Pointer to destination ah_attr. Contents of the destination
357  *              pointer is assumed to be invalid and attribute are overwritten.
358  * @src:        Pointer to source ah_attr.
359  */
360 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
361 		       const struct rdma_ah_attr *src)
362 {
363 	*dest = *src;
364 	if (dest->grh.sgid_attr)
365 		rdma_hold_gid_attr(dest->grh.sgid_attr);
366 }
367 EXPORT_SYMBOL(rdma_copy_ah_attr);
368 
369 /**
370  * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
371  * @old:        Pointer to existing ah_attr which needs to be replaced.
372  *              old is assumed to be valid or zero'd
373  * @new:        Pointer to the new ah_attr.
374  *
375  * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
376  * old the ah_attr is valid; after that it copies the new attribute and holds
377  * the reference to the replaced ah_attr.
378  */
379 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
380 			  const struct rdma_ah_attr *new)
381 {
382 	rdma_destroy_ah_attr(old);
383 	*old = *new;
384 	if (old->grh.sgid_attr)
385 		rdma_hold_gid_attr(old->grh.sgid_attr);
386 }
387 EXPORT_SYMBOL(rdma_replace_ah_attr);
388 
389 /**
390  * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
391  * @dest:       Pointer to destination ah_attr to copy to.
392  *              dest is assumed to be valid or zero'd
393  * @src:        Pointer to the new ah_attr.
394  *
395  * rdma_move_ah_attr() first releases any reference in the destination ah_attr
396  * if it is valid. This also transfers ownership of internal references from
397  * src to dest, making src invalid in the process. No new reference of the src
398  * ah_attr is taken.
399  */
400 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
401 {
402 	rdma_destroy_ah_attr(dest);
403 	*dest = *src;
404 	src->grh.sgid_attr = NULL;
405 }
406 EXPORT_SYMBOL(rdma_move_ah_attr);
407 
408 /*
409  * Validate that the rdma_ah_attr is valid for the device before passing it
410  * off to the driver.
411  */
412 static int rdma_check_ah_attr(struct ib_device *device,
413 			      struct rdma_ah_attr *ah_attr)
414 {
415 	if (!rdma_is_port_valid(device, ah_attr->port_num))
416 		return -EINVAL;
417 
418 	if ((rdma_is_grh_required(device, ah_attr->port_num) ||
419 	     ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
420 	    !(ah_attr->ah_flags & IB_AH_GRH))
421 		return -EINVAL;
422 
423 	if (ah_attr->grh.sgid_attr) {
424 		/*
425 		 * Make sure the passed sgid_attr is consistent with the
426 		 * parameters
427 		 */
428 		if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
429 		    ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
430 			return -EINVAL;
431 	}
432 	return 0;
433 }
434 
435 /*
436  * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
437  * On success the caller is responsible to call rdma_unfill_sgid_attr().
438  */
439 static int rdma_fill_sgid_attr(struct ib_device *device,
440 			       struct rdma_ah_attr *ah_attr,
441 			       const struct ib_gid_attr **old_sgid_attr)
442 {
443 	const struct ib_gid_attr *sgid_attr;
444 	struct ib_global_route *grh;
445 	int ret;
446 
447 	*old_sgid_attr = ah_attr->grh.sgid_attr;
448 
449 	ret = rdma_check_ah_attr(device, ah_attr);
450 	if (ret)
451 		return ret;
452 
453 	if (!(ah_attr->ah_flags & IB_AH_GRH))
454 		return 0;
455 
456 	grh = rdma_ah_retrieve_grh(ah_attr);
457 	if (grh->sgid_attr)
458 		return 0;
459 
460 	sgid_attr =
461 		rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
462 	if (IS_ERR(sgid_attr))
463 		return PTR_ERR(sgid_attr);
464 
465 	/* Move ownerhip of the kref into the ah_attr */
466 	grh->sgid_attr = sgid_attr;
467 	return 0;
468 }
469 
470 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
471 				  const struct ib_gid_attr *old_sgid_attr)
472 {
473 	/*
474 	 * Fill didn't change anything, the caller retains ownership of
475 	 * whatever it passed
476 	 */
477 	if (ah_attr->grh.sgid_attr == old_sgid_attr)
478 		return;
479 
480 	/*
481 	 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
482 	 * doesn't see any change in the rdma_ah_attr. If we get here
483 	 * old_sgid_attr is NULL.
484 	 */
485 	rdma_destroy_ah_attr(ah_attr);
486 }
487 
488 static const struct ib_gid_attr *
489 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
490 		      const struct ib_gid_attr *old_attr)
491 {
492 	if (old_attr)
493 		rdma_put_gid_attr(old_attr);
494 	if (ah_attr->ah_flags & IB_AH_GRH) {
495 		rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
496 		return ah_attr->grh.sgid_attr;
497 	}
498 	return NULL;
499 }
500 
501 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
502 				     struct rdma_ah_attr *ah_attr,
503 				     u32 flags,
504 				     struct ib_udata *udata,
505 				     struct net_device *xmit_slave)
506 {
507 	struct rdma_ah_init_attr init_attr = {};
508 	struct ib_device *device = pd->device;
509 	struct ib_ah *ah;
510 	int ret;
511 
512 	might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
513 
514 	if (!device->ops.create_ah)
515 		return ERR_PTR(-EOPNOTSUPP);
516 
517 	ah = rdma_zalloc_drv_obj_gfp(
518 		device, ib_ah,
519 		(flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
520 	if (!ah)
521 		return ERR_PTR(-ENOMEM);
522 
523 	ah->device = device;
524 	ah->pd = pd;
525 	ah->type = ah_attr->type;
526 	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
527 	init_attr.ah_attr = ah_attr;
528 	init_attr.flags = flags;
529 	init_attr.xmit_slave = xmit_slave;
530 
531 	ret = device->ops.create_ah(ah, &init_attr, udata);
532 	if (ret) {
533 		kfree(ah);
534 		return ERR_PTR(ret);
535 	}
536 
537 	atomic_inc(&pd->usecnt);
538 	return ah;
539 }
540 
541 /**
542  * rdma_create_ah - Creates an address handle for the
543  * given address vector.
544  * @pd: The protection domain associated with the address handle.
545  * @ah_attr: The attributes of the address vector.
546  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
547  *
548  * It returns 0 on success and returns appropriate error code on error.
549  * The address handle is used to reference a local or global destination
550  * in all UD QP post sends.
551  */
552 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
553 			     u32 flags)
554 {
555 	const struct ib_gid_attr *old_sgid_attr;
556 	struct net_device *slave;
557 	struct ib_ah *ah;
558 	int ret;
559 
560 	ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
561 	if (ret)
562 		return ERR_PTR(ret);
563 	slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
564 					   (flags & RDMA_CREATE_AH_SLEEPABLE) ?
565 					   GFP_KERNEL : GFP_ATOMIC);
566 	if (IS_ERR(slave)) {
567 		rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
568 		return (void *)slave;
569 	}
570 	ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
571 	rdma_lag_put_ah_roce_slave(slave);
572 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
573 	return ah;
574 }
575 EXPORT_SYMBOL(rdma_create_ah);
576 
577 /**
578  * rdma_create_user_ah - Creates an address handle for the
579  * given address vector.
580  * It resolves destination mac address for ah attribute of RoCE type.
581  * @pd: The protection domain associated with the address handle.
582  * @ah_attr: The attributes of the address vector.
583  * @udata: pointer to user's input output buffer information need by
584  *         provider driver.
585  *
586  * It returns 0 on success and returns appropriate error code on error.
587  * The address handle is used to reference a local or global destination
588  * in all UD QP post sends.
589  */
590 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
591 				  struct rdma_ah_attr *ah_attr,
592 				  struct ib_udata *udata)
593 {
594 	const struct ib_gid_attr *old_sgid_attr;
595 	struct ib_ah *ah;
596 	int err;
597 
598 	err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
599 	if (err)
600 		return ERR_PTR(err);
601 
602 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
603 		err = ib_resolve_eth_dmac(pd->device, ah_attr);
604 		if (err) {
605 			ah = ERR_PTR(err);
606 			goto out;
607 		}
608 	}
609 
610 	ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
611 			     udata, NULL);
612 
613 out:
614 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
615 	return ah;
616 }
617 EXPORT_SYMBOL(rdma_create_user_ah);
618 
619 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
620 {
621 	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
622 	struct iphdr ip4h_checked;
623 	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
624 
625 	/* If it's IPv6, the version must be 6, otherwise, the first
626 	 * 20 bytes (before the IPv4 header) are garbled.
627 	 */
628 	if (ip6h->version != 6)
629 		return (ip4h->version == 4) ? 4 : 0;
630 	/* version may be 6 or 4 because the first 20 bytes could be garbled */
631 
632 	/* RoCE v2 requires no options, thus header length
633 	 * must be 5 words
634 	 */
635 	if (ip4h->ihl != 5)
636 		return 6;
637 
638 	/* Verify checksum.
639 	 * We can't write on scattered buffers so we need to copy to
640 	 * temp buffer.
641 	 */
642 	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
643 	ip4h_checked.check = 0;
644 	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
645 	/* if IPv4 header checksum is OK, believe it */
646 	if (ip4h->check == ip4h_checked.check)
647 		return 4;
648 	return 6;
649 }
650 EXPORT_SYMBOL(ib_get_rdma_header_version);
651 
652 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
653 						     u8 port_num,
654 						     const struct ib_grh *grh)
655 {
656 	int grh_version;
657 
658 	if (rdma_protocol_ib(device, port_num))
659 		return RDMA_NETWORK_IB;
660 
661 	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
662 
663 	if (grh_version == 4)
664 		return RDMA_NETWORK_IPV4;
665 
666 	if (grh->next_hdr == IPPROTO_UDP)
667 		return RDMA_NETWORK_IPV6;
668 
669 	return RDMA_NETWORK_ROCE_V1;
670 }
671 
672 struct find_gid_index_context {
673 	u16 vlan_id;
674 	enum ib_gid_type gid_type;
675 };
676 
677 static bool find_gid_index(const union ib_gid *gid,
678 			   const struct ib_gid_attr *gid_attr,
679 			   void *context)
680 {
681 	struct find_gid_index_context *ctx = context;
682 	u16 vlan_id = 0xffff;
683 	int ret;
684 
685 	if (ctx->gid_type != gid_attr->gid_type)
686 		return false;
687 
688 	ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
689 	if (ret)
690 		return false;
691 
692 	return ctx->vlan_id == vlan_id;
693 }
694 
695 static const struct ib_gid_attr *
696 get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
697 		       u16 vlan_id, const union ib_gid *sgid,
698 		       enum ib_gid_type gid_type)
699 {
700 	struct find_gid_index_context context = {.vlan_id = vlan_id,
701 						 .gid_type = gid_type};
702 
703 	return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
704 				       &context);
705 }
706 
707 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
708 			      enum rdma_network_type net_type,
709 			      union ib_gid *sgid, union ib_gid *dgid)
710 {
711 	struct sockaddr_in  src_in;
712 	struct sockaddr_in  dst_in;
713 	__be32 src_saddr, dst_saddr;
714 
715 	if (!sgid || !dgid)
716 		return -EINVAL;
717 
718 	if (net_type == RDMA_NETWORK_IPV4) {
719 		memcpy(&src_in.sin_addr.s_addr,
720 		       &hdr->roce4grh.saddr, 4);
721 		memcpy(&dst_in.sin_addr.s_addr,
722 		       &hdr->roce4grh.daddr, 4);
723 		src_saddr = src_in.sin_addr.s_addr;
724 		dst_saddr = dst_in.sin_addr.s_addr;
725 		ipv6_addr_set_v4mapped(src_saddr,
726 				       (struct in6_addr *)sgid);
727 		ipv6_addr_set_v4mapped(dst_saddr,
728 				       (struct in6_addr *)dgid);
729 		return 0;
730 	} else if (net_type == RDMA_NETWORK_IPV6 ||
731 		   net_type == RDMA_NETWORK_IB) {
732 		*dgid = hdr->ibgrh.dgid;
733 		*sgid = hdr->ibgrh.sgid;
734 		return 0;
735 	} else {
736 		return -EINVAL;
737 	}
738 }
739 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
740 
741 /* Resolve destination mac address and hop limit for unicast destination
742  * GID entry, considering the source GID entry as well.
743  * ah_attribute must have have valid port_num, sgid_index.
744  */
745 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
746 				       struct rdma_ah_attr *ah_attr)
747 {
748 	struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
749 	const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
750 	int hop_limit = 0xff;
751 	int ret = 0;
752 
753 	/* If destination is link local and source GID is RoCEv1,
754 	 * IP stack is not used.
755 	 */
756 	if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
757 	    sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
758 		rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
759 				ah_attr->roce.dmac);
760 		return ret;
761 	}
762 
763 	ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
764 					   ah_attr->roce.dmac,
765 					   sgid_attr, &hop_limit);
766 
767 	grh->hop_limit = hop_limit;
768 	return ret;
769 }
770 
771 /*
772  * This function initializes address handle attributes from the incoming packet.
773  * Incoming packet has dgid of the receiver node on which this code is
774  * getting executed and, sgid contains the GID of the sender.
775  *
776  * When resolving mac address of destination, the arrived dgid is used
777  * as sgid and, sgid is used as dgid because sgid contains destinations
778  * GID whom to respond to.
779  *
780  * On success the caller is responsible to call rdma_destroy_ah_attr on the
781  * attr.
782  */
783 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
784 			    const struct ib_wc *wc, const struct ib_grh *grh,
785 			    struct rdma_ah_attr *ah_attr)
786 {
787 	u32 flow_class;
788 	int ret;
789 	enum rdma_network_type net_type = RDMA_NETWORK_IB;
790 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
791 	const struct ib_gid_attr *sgid_attr;
792 	int hoplimit = 0xff;
793 	union ib_gid dgid;
794 	union ib_gid sgid;
795 
796 	might_sleep();
797 
798 	memset(ah_attr, 0, sizeof *ah_attr);
799 	ah_attr->type = rdma_ah_find_type(device, port_num);
800 	if (rdma_cap_eth_ah(device, port_num)) {
801 		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
802 			net_type = wc->network_hdr_type;
803 		else
804 			net_type = ib_get_net_type_by_grh(device, port_num, grh);
805 		gid_type = ib_network_to_gid_type(net_type);
806 	}
807 	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
808 					&sgid, &dgid);
809 	if (ret)
810 		return ret;
811 
812 	rdma_ah_set_sl(ah_attr, wc->sl);
813 	rdma_ah_set_port_num(ah_attr, port_num);
814 
815 	if (rdma_protocol_roce(device, port_num)) {
816 		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
817 				wc->vlan_id : 0xffff;
818 
819 		if (!(wc->wc_flags & IB_WC_GRH))
820 			return -EPROTOTYPE;
821 
822 		sgid_attr = get_sgid_attr_from_eth(device, port_num,
823 						   vlan_id, &dgid,
824 						   gid_type);
825 		if (IS_ERR(sgid_attr))
826 			return PTR_ERR(sgid_attr);
827 
828 		flow_class = be32_to_cpu(grh->version_tclass_flow);
829 		rdma_move_grh_sgid_attr(ah_attr,
830 					&sgid,
831 					flow_class & 0xFFFFF,
832 					hoplimit,
833 					(flow_class >> 20) & 0xFF,
834 					sgid_attr);
835 
836 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
837 		if (ret)
838 			rdma_destroy_ah_attr(ah_attr);
839 
840 		return ret;
841 	} else {
842 		rdma_ah_set_dlid(ah_attr, wc->slid);
843 		rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
844 
845 		if ((wc->wc_flags & IB_WC_GRH) == 0)
846 			return 0;
847 
848 		if (dgid.global.interface_id !=
849 					cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
850 			sgid_attr = rdma_find_gid_by_port(
851 				device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
852 		} else
853 			sgid_attr = rdma_get_gid_attr(device, port_num, 0);
854 
855 		if (IS_ERR(sgid_attr))
856 			return PTR_ERR(sgid_attr);
857 		flow_class = be32_to_cpu(grh->version_tclass_flow);
858 		rdma_move_grh_sgid_attr(ah_attr,
859 					&sgid,
860 					flow_class & 0xFFFFF,
861 					hoplimit,
862 					(flow_class >> 20) & 0xFF,
863 					sgid_attr);
864 
865 		return 0;
866 	}
867 }
868 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
869 
870 /**
871  * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
872  * of the reference
873  *
874  * @attr:	Pointer to AH attribute structure
875  * @dgid:	Destination GID
876  * @flow_label:	Flow label
877  * @hop_limit:	Hop limit
878  * @traffic_class: traffic class
879  * @sgid_attr:	Pointer to SGID attribute
880  *
881  * This takes ownership of the sgid_attr reference. The caller must ensure
882  * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
883  * calling this function.
884  */
885 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
886 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
887 			     const struct ib_gid_attr *sgid_attr)
888 {
889 	rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
890 			traffic_class);
891 	attr->grh.sgid_attr = sgid_attr;
892 }
893 EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
894 
895 /**
896  * rdma_destroy_ah_attr - Release reference to SGID attribute of
897  * ah attribute.
898  * @ah_attr: Pointer to ah attribute
899  *
900  * Release reference to the SGID attribute of the ah attribute if it is
901  * non NULL. It is safe to call this multiple times, and safe to call it on
902  * a zero initialized ah_attr.
903  */
904 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
905 {
906 	if (ah_attr->grh.sgid_attr) {
907 		rdma_put_gid_attr(ah_attr->grh.sgid_attr);
908 		ah_attr->grh.sgid_attr = NULL;
909 	}
910 }
911 EXPORT_SYMBOL(rdma_destroy_ah_attr);
912 
913 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
914 				   const struct ib_grh *grh, u8 port_num)
915 {
916 	struct rdma_ah_attr ah_attr;
917 	struct ib_ah *ah;
918 	int ret;
919 
920 	ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
921 	if (ret)
922 		return ERR_PTR(ret);
923 
924 	ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
925 
926 	rdma_destroy_ah_attr(&ah_attr);
927 	return ah;
928 }
929 EXPORT_SYMBOL(ib_create_ah_from_wc);
930 
931 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
932 {
933 	const struct ib_gid_attr *old_sgid_attr;
934 	int ret;
935 
936 	if (ah->type != ah_attr->type)
937 		return -EINVAL;
938 
939 	ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
940 	if (ret)
941 		return ret;
942 
943 	ret = ah->device->ops.modify_ah ?
944 		ah->device->ops.modify_ah(ah, ah_attr) :
945 		-EOPNOTSUPP;
946 
947 	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
948 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
949 	return ret;
950 }
951 EXPORT_SYMBOL(rdma_modify_ah);
952 
953 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
954 {
955 	ah_attr->grh.sgid_attr = NULL;
956 
957 	return ah->device->ops.query_ah ?
958 		ah->device->ops.query_ah(ah, ah_attr) :
959 		-EOPNOTSUPP;
960 }
961 EXPORT_SYMBOL(rdma_query_ah);
962 
963 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
964 {
965 	const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
966 	struct ib_pd *pd;
967 
968 	might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
969 
970 	pd = ah->pd;
971 
972 	ah->device->ops.destroy_ah(ah, flags);
973 	atomic_dec(&pd->usecnt);
974 	if (sgid_attr)
975 		rdma_put_gid_attr(sgid_attr);
976 
977 	kfree(ah);
978 	return 0;
979 }
980 EXPORT_SYMBOL(rdma_destroy_ah_user);
981 
982 /* Shared receive queues */
983 
984 /**
985  * ib_create_srq_user - Creates a SRQ associated with the specified protection
986  *   domain.
987  * @pd: The protection domain associated with the SRQ.
988  * @srq_init_attr: A list of initial attributes required to create the
989  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
990  *   the actual capabilities of the created SRQ.
991  * @uobject - uobject pointer if this is not a kernel SRQ
992  * @udata - udata pointer if this is not a kernel SRQ
993  *
994  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
995  * requested size of the SRQ, and set to the actual values allocated
996  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
997  * will always be at least as large as the requested values.
998  */
999 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1000 				  struct ib_srq_init_attr *srq_init_attr,
1001 				  struct ib_usrq_object *uobject,
1002 				  struct ib_udata *udata)
1003 {
1004 	struct ib_srq *srq;
1005 	int ret;
1006 
1007 	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1008 	if (!srq)
1009 		return ERR_PTR(-ENOMEM);
1010 
1011 	srq->device = pd->device;
1012 	srq->pd = pd;
1013 	srq->event_handler = srq_init_attr->event_handler;
1014 	srq->srq_context = srq_init_attr->srq_context;
1015 	srq->srq_type = srq_init_attr->srq_type;
1016 	srq->uobject = uobject;
1017 
1018 	if (ib_srq_has_cq(srq->srq_type)) {
1019 		srq->ext.cq = srq_init_attr->ext.cq;
1020 		atomic_inc(&srq->ext.cq->usecnt);
1021 	}
1022 	if (srq->srq_type == IB_SRQT_XRC) {
1023 		srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1024 		atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1025 	}
1026 	atomic_inc(&pd->usecnt);
1027 
1028 	ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1029 	if (ret) {
1030 		atomic_dec(&srq->pd->usecnt);
1031 		if (srq->srq_type == IB_SRQT_XRC)
1032 			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1033 		if (ib_srq_has_cq(srq->srq_type))
1034 			atomic_dec(&srq->ext.cq->usecnt);
1035 		kfree(srq);
1036 		return ERR_PTR(ret);
1037 	}
1038 
1039 	return srq;
1040 }
1041 EXPORT_SYMBOL(ib_create_srq_user);
1042 
1043 int ib_modify_srq(struct ib_srq *srq,
1044 		  struct ib_srq_attr *srq_attr,
1045 		  enum ib_srq_attr_mask srq_attr_mask)
1046 {
1047 	return srq->device->ops.modify_srq ?
1048 		srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1049 					    NULL) : -EOPNOTSUPP;
1050 }
1051 EXPORT_SYMBOL(ib_modify_srq);
1052 
1053 int ib_query_srq(struct ib_srq *srq,
1054 		 struct ib_srq_attr *srq_attr)
1055 {
1056 	return srq->device->ops.query_srq ?
1057 		srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1058 }
1059 EXPORT_SYMBOL(ib_query_srq);
1060 
1061 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1062 {
1063 	if (atomic_read(&srq->usecnt))
1064 		return -EBUSY;
1065 
1066 	srq->device->ops.destroy_srq(srq, udata);
1067 
1068 	atomic_dec(&srq->pd->usecnt);
1069 	if (srq->srq_type == IB_SRQT_XRC)
1070 		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1071 	if (ib_srq_has_cq(srq->srq_type))
1072 		atomic_dec(&srq->ext.cq->usecnt);
1073 	kfree(srq);
1074 
1075 	return 0;
1076 }
1077 EXPORT_SYMBOL(ib_destroy_srq_user);
1078 
1079 /* Queue pairs */
1080 
1081 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1082 {
1083 	struct ib_qp *qp = context;
1084 	unsigned long flags;
1085 
1086 	spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1087 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1088 		if (event->element.qp->event_handler)
1089 			event->element.qp->event_handler(event, event->element.qp->qp_context);
1090 	spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1091 }
1092 
1093 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
1094 {
1095 	mutex_lock(&xrcd->tgt_qp_mutex);
1096 	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
1097 	mutex_unlock(&xrcd->tgt_qp_mutex);
1098 }
1099 
1100 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1101 				  void (*event_handler)(struct ib_event *, void *),
1102 				  void *qp_context)
1103 {
1104 	struct ib_qp *qp;
1105 	unsigned long flags;
1106 	int err;
1107 
1108 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
1109 	if (!qp)
1110 		return ERR_PTR(-ENOMEM);
1111 
1112 	qp->real_qp = real_qp;
1113 	err = ib_open_shared_qp_security(qp, real_qp->device);
1114 	if (err) {
1115 		kfree(qp);
1116 		return ERR_PTR(err);
1117 	}
1118 
1119 	qp->real_qp = real_qp;
1120 	atomic_inc(&real_qp->usecnt);
1121 	qp->device = real_qp->device;
1122 	qp->event_handler = event_handler;
1123 	qp->qp_context = qp_context;
1124 	qp->qp_num = real_qp->qp_num;
1125 	qp->qp_type = real_qp->qp_type;
1126 
1127 	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1128 	list_add(&qp->open_list, &real_qp->open_list);
1129 	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1130 
1131 	return qp;
1132 }
1133 
1134 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1135 			 struct ib_qp_open_attr *qp_open_attr)
1136 {
1137 	struct ib_qp *qp, *real_qp;
1138 
1139 	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1140 		return ERR_PTR(-EINVAL);
1141 
1142 	qp = ERR_PTR(-EINVAL);
1143 	mutex_lock(&xrcd->tgt_qp_mutex);
1144 	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
1145 		if (real_qp->qp_num == qp_open_attr->qp_num) {
1146 			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1147 					  qp_open_attr->qp_context);
1148 			break;
1149 		}
1150 	}
1151 	mutex_unlock(&xrcd->tgt_qp_mutex);
1152 	return qp;
1153 }
1154 EXPORT_SYMBOL(ib_open_qp);
1155 
1156 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1157 					struct ib_qp_init_attr *qp_init_attr)
1158 {
1159 	struct ib_qp *real_qp = qp;
1160 
1161 	qp->event_handler = __ib_shared_qp_event_handler;
1162 	qp->qp_context = qp;
1163 	qp->pd = NULL;
1164 	qp->send_cq = qp->recv_cq = NULL;
1165 	qp->srq = NULL;
1166 	qp->xrcd = qp_init_attr->xrcd;
1167 	atomic_inc(&qp_init_attr->xrcd->usecnt);
1168 	INIT_LIST_HEAD(&qp->open_list);
1169 
1170 	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1171 			  qp_init_attr->qp_context);
1172 	if (IS_ERR(qp))
1173 		return qp;
1174 
1175 	__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
1176 	return qp;
1177 }
1178 
1179 /**
1180  * ib_create_qp - Creates a kernel QP associated with the specified protection
1181  *   domain.
1182  * @pd: The protection domain associated with the QP.
1183  * @qp_init_attr: A list of initial attributes required to create the
1184  *   QP.  If QP creation succeeds, then the attributes are updated to
1185  *   the actual capabilities of the created QP.
1186  *
1187  * NOTE: for user qp use ib_create_qp_user with valid udata!
1188  */
1189 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1190 			   struct ib_qp_init_attr *qp_init_attr)
1191 {
1192 	struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
1193 	struct ib_qp *qp;
1194 	int ret;
1195 
1196 	if (qp_init_attr->rwq_ind_tbl &&
1197 	    (qp_init_attr->recv_cq ||
1198 	    qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1199 	    qp_init_attr->cap.max_recv_sge))
1200 		return ERR_PTR(-EINVAL);
1201 
1202 	if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) &&
1203 	    !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER))
1204 		return ERR_PTR(-EINVAL);
1205 
1206 	/*
1207 	 * If the callers is using the RDMA API calculate the resources
1208 	 * needed for the RDMA READ/WRITE operations.
1209 	 *
1210 	 * Note that these callers need to pass in a port number.
1211 	 */
1212 	if (qp_init_attr->cap.max_rdma_ctxs)
1213 		rdma_rw_init_qp(device, qp_init_attr);
1214 
1215 	qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
1216 	if (IS_ERR(qp))
1217 		return qp;
1218 
1219 	ret = ib_create_qp_security(qp, device);
1220 	if (ret)
1221 		goto err;
1222 
1223 	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1224 		struct ib_qp *xrc_qp =
1225 			create_xrc_qp_user(qp, qp_init_attr);
1226 
1227 		if (IS_ERR(xrc_qp)) {
1228 			ret = PTR_ERR(xrc_qp);
1229 			goto err;
1230 		}
1231 		return xrc_qp;
1232 	}
1233 
1234 	qp->event_handler = qp_init_attr->event_handler;
1235 	qp->qp_context = qp_init_attr->qp_context;
1236 	if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1237 		qp->recv_cq = NULL;
1238 		qp->srq = NULL;
1239 	} else {
1240 		qp->recv_cq = qp_init_attr->recv_cq;
1241 		if (qp_init_attr->recv_cq)
1242 			atomic_inc(&qp_init_attr->recv_cq->usecnt);
1243 		qp->srq = qp_init_attr->srq;
1244 		if (qp->srq)
1245 			atomic_inc(&qp_init_attr->srq->usecnt);
1246 	}
1247 
1248 	qp->send_cq = qp_init_attr->send_cq;
1249 	qp->xrcd    = NULL;
1250 
1251 	atomic_inc(&pd->usecnt);
1252 	if (qp_init_attr->send_cq)
1253 		atomic_inc(&qp_init_attr->send_cq->usecnt);
1254 	if (qp_init_attr->rwq_ind_tbl)
1255 		atomic_inc(&qp->rwq_ind_tbl->usecnt);
1256 
1257 	if (qp_init_attr->cap.max_rdma_ctxs) {
1258 		ret = rdma_rw_init_mrs(qp, qp_init_attr);
1259 		if (ret)
1260 			goto err;
1261 	}
1262 
1263 	/*
1264 	 * Note: all hw drivers guarantee that max_send_sge is lower than
1265 	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1266 	 * max_send_sge <= max_sge_rd.
1267 	 */
1268 	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1269 	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1270 				 device->attrs.max_sge_rd);
1271 	if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1272 		qp->integrity_en = true;
1273 
1274 	return qp;
1275 
1276 err:
1277 	ib_destroy_qp(qp);
1278 	return ERR_PTR(ret);
1279 
1280 }
1281 EXPORT_SYMBOL(ib_create_qp);
1282 
1283 static const struct {
1284 	int			valid;
1285 	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
1286 	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
1287 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1288 	[IB_QPS_RESET] = {
1289 		[IB_QPS_RESET] = { .valid = 1 },
1290 		[IB_QPS_INIT]  = {
1291 			.valid = 1,
1292 			.req_param = {
1293 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1294 						IB_QP_PORT			|
1295 						IB_QP_QKEY),
1296 				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
1297 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1298 						IB_QP_PORT			|
1299 						IB_QP_ACCESS_FLAGS),
1300 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1301 						IB_QP_PORT			|
1302 						IB_QP_ACCESS_FLAGS),
1303 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1304 						IB_QP_PORT			|
1305 						IB_QP_ACCESS_FLAGS),
1306 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1307 						IB_QP_PORT			|
1308 						IB_QP_ACCESS_FLAGS),
1309 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1310 						IB_QP_QKEY),
1311 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1312 						IB_QP_QKEY),
1313 			}
1314 		},
1315 	},
1316 	[IB_QPS_INIT]  = {
1317 		[IB_QPS_RESET] = { .valid = 1 },
1318 		[IB_QPS_ERR] =   { .valid = 1 },
1319 		[IB_QPS_INIT]  = {
1320 			.valid = 1,
1321 			.opt_param = {
1322 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1323 						IB_QP_PORT			|
1324 						IB_QP_QKEY),
1325 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1326 						IB_QP_PORT			|
1327 						IB_QP_ACCESS_FLAGS),
1328 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1329 						IB_QP_PORT			|
1330 						IB_QP_ACCESS_FLAGS),
1331 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1332 						IB_QP_PORT			|
1333 						IB_QP_ACCESS_FLAGS),
1334 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1335 						IB_QP_PORT			|
1336 						IB_QP_ACCESS_FLAGS),
1337 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1338 						IB_QP_QKEY),
1339 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1340 						IB_QP_QKEY),
1341 			}
1342 		},
1343 		[IB_QPS_RTR]   = {
1344 			.valid = 1,
1345 			.req_param = {
1346 				[IB_QPT_UC]  = (IB_QP_AV			|
1347 						IB_QP_PATH_MTU			|
1348 						IB_QP_DEST_QPN			|
1349 						IB_QP_RQ_PSN),
1350 				[IB_QPT_RC]  = (IB_QP_AV			|
1351 						IB_QP_PATH_MTU			|
1352 						IB_QP_DEST_QPN			|
1353 						IB_QP_RQ_PSN			|
1354 						IB_QP_MAX_DEST_RD_ATOMIC	|
1355 						IB_QP_MIN_RNR_TIMER),
1356 				[IB_QPT_XRC_INI] = (IB_QP_AV			|
1357 						IB_QP_PATH_MTU			|
1358 						IB_QP_DEST_QPN			|
1359 						IB_QP_RQ_PSN),
1360 				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
1361 						IB_QP_PATH_MTU			|
1362 						IB_QP_DEST_QPN			|
1363 						IB_QP_RQ_PSN			|
1364 						IB_QP_MAX_DEST_RD_ATOMIC	|
1365 						IB_QP_MIN_RNR_TIMER),
1366 			},
1367 			.opt_param = {
1368 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1369 						 IB_QP_QKEY),
1370 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
1371 						 IB_QP_ACCESS_FLAGS		|
1372 						 IB_QP_PKEY_INDEX),
1373 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
1374 						 IB_QP_ACCESS_FLAGS		|
1375 						 IB_QP_PKEY_INDEX),
1376 				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
1377 						 IB_QP_ACCESS_FLAGS		|
1378 						 IB_QP_PKEY_INDEX),
1379 				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
1380 						 IB_QP_ACCESS_FLAGS		|
1381 						 IB_QP_PKEY_INDEX),
1382 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1383 						 IB_QP_QKEY),
1384 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1385 						 IB_QP_QKEY),
1386 			 },
1387 		},
1388 	},
1389 	[IB_QPS_RTR]   = {
1390 		[IB_QPS_RESET] = { .valid = 1 },
1391 		[IB_QPS_ERR] =   { .valid = 1 },
1392 		[IB_QPS_RTS]   = {
1393 			.valid = 1,
1394 			.req_param = {
1395 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
1396 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
1397 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
1398 						IB_QP_RETRY_CNT			|
1399 						IB_QP_RNR_RETRY			|
1400 						IB_QP_SQ_PSN			|
1401 						IB_QP_MAX_QP_RD_ATOMIC),
1402 				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
1403 						IB_QP_RETRY_CNT			|
1404 						IB_QP_RNR_RETRY			|
1405 						IB_QP_SQ_PSN			|
1406 						IB_QP_MAX_QP_RD_ATOMIC),
1407 				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
1408 						IB_QP_SQ_PSN),
1409 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
1410 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
1411 			},
1412 			.opt_param = {
1413 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
1414 						 IB_QP_QKEY),
1415 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
1416 						 IB_QP_ALT_PATH			|
1417 						 IB_QP_ACCESS_FLAGS		|
1418 						 IB_QP_PATH_MIG_STATE),
1419 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
1420 						 IB_QP_ALT_PATH			|
1421 						 IB_QP_ACCESS_FLAGS		|
1422 						 IB_QP_MIN_RNR_TIMER		|
1423 						 IB_QP_PATH_MIG_STATE),
1424 				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1425 						 IB_QP_ALT_PATH			|
1426 						 IB_QP_ACCESS_FLAGS		|
1427 						 IB_QP_PATH_MIG_STATE),
1428 				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1429 						 IB_QP_ALT_PATH			|
1430 						 IB_QP_ACCESS_FLAGS		|
1431 						 IB_QP_MIN_RNR_TIMER		|
1432 						 IB_QP_PATH_MIG_STATE),
1433 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
1434 						 IB_QP_QKEY),
1435 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
1436 						 IB_QP_QKEY),
1437 				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1438 			 }
1439 		}
1440 	},
1441 	[IB_QPS_RTS]   = {
1442 		[IB_QPS_RESET] = { .valid = 1 },
1443 		[IB_QPS_ERR] =   { .valid = 1 },
1444 		[IB_QPS_RTS]   = {
1445 			.valid = 1,
1446 			.opt_param = {
1447 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1448 						IB_QP_QKEY),
1449 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1450 						IB_QP_ACCESS_FLAGS		|
1451 						IB_QP_ALT_PATH			|
1452 						IB_QP_PATH_MIG_STATE),
1453 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1454 						IB_QP_ACCESS_FLAGS		|
1455 						IB_QP_ALT_PATH			|
1456 						IB_QP_PATH_MIG_STATE		|
1457 						IB_QP_MIN_RNR_TIMER),
1458 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1459 						IB_QP_ACCESS_FLAGS		|
1460 						IB_QP_ALT_PATH			|
1461 						IB_QP_PATH_MIG_STATE),
1462 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1463 						IB_QP_ACCESS_FLAGS		|
1464 						IB_QP_ALT_PATH			|
1465 						IB_QP_PATH_MIG_STATE		|
1466 						IB_QP_MIN_RNR_TIMER),
1467 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1468 						IB_QP_QKEY),
1469 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1470 						IB_QP_QKEY),
1471 				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1472 			}
1473 		},
1474 		[IB_QPS_SQD]   = {
1475 			.valid = 1,
1476 			.opt_param = {
1477 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1478 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1479 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1480 				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1481 				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1482 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1483 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1484 			}
1485 		},
1486 	},
1487 	[IB_QPS_SQD]   = {
1488 		[IB_QPS_RESET] = { .valid = 1 },
1489 		[IB_QPS_ERR] =   { .valid = 1 },
1490 		[IB_QPS_RTS]   = {
1491 			.valid = 1,
1492 			.opt_param = {
1493 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1494 						IB_QP_QKEY),
1495 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1496 						IB_QP_ALT_PATH			|
1497 						IB_QP_ACCESS_FLAGS		|
1498 						IB_QP_PATH_MIG_STATE),
1499 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1500 						IB_QP_ALT_PATH			|
1501 						IB_QP_ACCESS_FLAGS		|
1502 						IB_QP_MIN_RNR_TIMER		|
1503 						IB_QP_PATH_MIG_STATE),
1504 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1505 						IB_QP_ALT_PATH			|
1506 						IB_QP_ACCESS_FLAGS		|
1507 						IB_QP_PATH_MIG_STATE),
1508 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1509 						IB_QP_ALT_PATH			|
1510 						IB_QP_ACCESS_FLAGS		|
1511 						IB_QP_MIN_RNR_TIMER		|
1512 						IB_QP_PATH_MIG_STATE),
1513 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1514 						IB_QP_QKEY),
1515 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1516 						IB_QP_QKEY),
1517 			}
1518 		},
1519 		[IB_QPS_SQD]   = {
1520 			.valid = 1,
1521 			.opt_param = {
1522 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1523 						IB_QP_QKEY),
1524 				[IB_QPT_UC]  = (IB_QP_AV			|
1525 						IB_QP_ALT_PATH			|
1526 						IB_QP_ACCESS_FLAGS		|
1527 						IB_QP_PKEY_INDEX		|
1528 						IB_QP_PATH_MIG_STATE),
1529 				[IB_QPT_RC]  = (IB_QP_PORT			|
1530 						IB_QP_AV			|
1531 						IB_QP_TIMEOUT			|
1532 						IB_QP_RETRY_CNT			|
1533 						IB_QP_RNR_RETRY			|
1534 						IB_QP_MAX_QP_RD_ATOMIC		|
1535 						IB_QP_MAX_DEST_RD_ATOMIC	|
1536 						IB_QP_ALT_PATH			|
1537 						IB_QP_ACCESS_FLAGS		|
1538 						IB_QP_PKEY_INDEX		|
1539 						IB_QP_MIN_RNR_TIMER		|
1540 						IB_QP_PATH_MIG_STATE),
1541 				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1542 						IB_QP_AV			|
1543 						IB_QP_TIMEOUT			|
1544 						IB_QP_RETRY_CNT			|
1545 						IB_QP_RNR_RETRY			|
1546 						IB_QP_MAX_QP_RD_ATOMIC		|
1547 						IB_QP_ALT_PATH			|
1548 						IB_QP_ACCESS_FLAGS		|
1549 						IB_QP_PKEY_INDEX		|
1550 						IB_QP_PATH_MIG_STATE),
1551 				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1552 						IB_QP_AV			|
1553 						IB_QP_TIMEOUT			|
1554 						IB_QP_MAX_DEST_RD_ATOMIC	|
1555 						IB_QP_ALT_PATH			|
1556 						IB_QP_ACCESS_FLAGS		|
1557 						IB_QP_PKEY_INDEX		|
1558 						IB_QP_MIN_RNR_TIMER		|
1559 						IB_QP_PATH_MIG_STATE),
1560 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1561 						IB_QP_QKEY),
1562 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1563 						IB_QP_QKEY),
1564 			}
1565 		}
1566 	},
1567 	[IB_QPS_SQE]   = {
1568 		[IB_QPS_RESET] = { .valid = 1 },
1569 		[IB_QPS_ERR] =   { .valid = 1 },
1570 		[IB_QPS_RTS]   = {
1571 			.valid = 1,
1572 			.opt_param = {
1573 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1574 						IB_QP_QKEY),
1575 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1576 						IB_QP_ACCESS_FLAGS),
1577 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1578 						IB_QP_QKEY),
1579 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1580 						IB_QP_QKEY),
1581 			}
1582 		}
1583 	},
1584 	[IB_QPS_ERR] = {
1585 		[IB_QPS_RESET] = { .valid = 1 },
1586 		[IB_QPS_ERR] =   { .valid = 1 }
1587 	}
1588 };
1589 
1590 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1591 			enum ib_qp_type type, enum ib_qp_attr_mask mask)
1592 {
1593 	enum ib_qp_attr_mask req_param, opt_param;
1594 
1595 	if (mask & IB_QP_CUR_STATE  &&
1596 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1597 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1598 		return false;
1599 
1600 	if (!qp_state_table[cur_state][next_state].valid)
1601 		return false;
1602 
1603 	req_param = qp_state_table[cur_state][next_state].req_param[type];
1604 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1605 
1606 	if ((mask & req_param) != req_param)
1607 		return false;
1608 
1609 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
1610 		return false;
1611 
1612 	return true;
1613 }
1614 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1615 
1616 /**
1617  * ib_resolve_eth_dmac - Resolve destination mac address
1618  * @device:		Device to consider
1619  * @ah_attr:		address handle attribute which describes the
1620  *			source and destination parameters
1621  * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1622  * returns 0 on success or appropriate error code. It initializes the
1623  * necessary ah_attr fields when call is successful.
1624  */
1625 static int ib_resolve_eth_dmac(struct ib_device *device,
1626 			       struct rdma_ah_attr *ah_attr)
1627 {
1628 	int ret = 0;
1629 
1630 	if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1631 		if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1632 			__be32 addr = 0;
1633 
1634 			memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1635 			ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1636 		} else {
1637 			ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1638 					(char *)ah_attr->roce.dmac);
1639 		}
1640 	} else {
1641 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1642 	}
1643 	return ret;
1644 }
1645 
1646 static bool is_qp_type_connected(const struct ib_qp *qp)
1647 {
1648 	return (qp->qp_type == IB_QPT_UC ||
1649 		qp->qp_type == IB_QPT_RC ||
1650 		qp->qp_type == IB_QPT_XRC_INI ||
1651 		qp->qp_type == IB_QPT_XRC_TGT);
1652 }
1653 
1654 /**
1655  * IB core internal function to perform QP attributes modification.
1656  */
1657 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1658 			 int attr_mask, struct ib_udata *udata)
1659 {
1660 	u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1661 	const struct ib_gid_attr *old_sgid_attr_av;
1662 	const struct ib_gid_attr *old_sgid_attr_alt_av;
1663 	int ret;
1664 
1665 	attr->xmit_slave = NULL;
1666 	if (attr_mask & IB_QP_AV) {
1667 		ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1668 					  &old_sgid_attr_av);
1669 		if (ret)
1670 			return ret;
1671 
1672 		if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1673 		    is_qp_type_connected(qp)) {
1674 			struct net_device *slave;
1675 
1676 			/*
1677 			 * If the user provided the qp_attr then we have to
1678 			 * resolve it. Kerne users have to provide already
1679 			 * resolved rdma_ah_attr's.
1680 			 */
1681 			if (udata) {
1682 				ret = ib_resolve_eth_dmac(qp->device,
1683 							  &attr->ah_attr);
1684 				if (ret)
1685 					goto out_av;
1686 			}
1687 			slave = rdma_lag_get_ah_roce_slave(qp->device,
1688 							   &attr->ah_attr,
1689 							   GFP_KERNEL);
1690 			if (IS_ERR(slave))
1691 				goto out_av;
1692 			attr->xmit_slave = slave;
1693 		}
1694 	}
1695 	if (attr_mask & IB_QP_ALT_PATH) {
1696 		/*
1697 		 * FIXME: This does not track the migration state, so if the
1698 		 * user loads a new alternate path after the HW has migrated
1699 		 * from primary->alternate we will keep the wrong
1700 		 * references. This is OK for IB because the reference
1701 		 * counting does not serve any functional purpose.
1702 		 */
1703 		ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1704 					  &old_sgid_attr_alt_av);
1705 		if (ret)
1706 			goto out_av;
1707 
1708 		/*
1709 		 * Today the core code can only handle alternate paths and APM
1710 		 * for IB. Ban them in roce mode.
1711 		 */
1712 		if (!(rdma_protocol_ib(qp->device,
1713 				       attr->alt_ah_attr.port_num) &&
1714 		      rdma_protocol_ib(qp->device, port))) {
1715 			ret = EINVAL;
1716 			goto out;
1717 		}
1718 	}
1719 
1720 	if (rdma_ib_or_roce(qp->device, port)) {
1721 		if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1722 			dev_warn(&qp->device->dev,
1723 				 "%s rq_psn overflow, masking to 24 bits\n",
1724 				 __func__);
1725 			attr->rq_psn &= 0xffffff;
1726 		}
1727 
1728 		if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1729 			dev_warn(&qp->device->dev,
1730 				 " %s sq_psn overflow, masking to 24 bits\n",
1731 				 __func__);
1732 			attr->sq_psn &= 0xffffff;
1733 		}
1734 	}
1735 
1736 	/*
1737 	 * Bind this qp to a counter automatically based on the rdma counter
1738 	 * rules. This only set in RST2INIT with port specified
1739 	 */
1740 	if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1741 	    ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1742 		rdma_counter_bind_qp_auto(qp, attr->port_num);
1743 
1744 	ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1745 	if (ret)
1746 		goto out;
1747 
1748 	if (attr_mask & IB_QP_PORT)
1749 		qp->port = attr->port_num;
1750 	if (attr_mask & IB_QP_AV)
1751 		qp->av_sgid_attr =
1752 			rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1753 	if (attr_mask & IB_QP_ALT_PATH)
1754 		qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1755 			&attr->alt_ah_attr, qp->alt_path_sgid_attr);
1756 
1757 out:
1758 	if (attr_mask & IB_QP_ALT_PATH)
1759 		rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1760 out_av:
1761 	if (attr_mask & IB_QP_AV) {
1762 		rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1763 		rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1764 	}
1765 	return ret;
1766 }
1767 
1768 /**
1769  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1770  * @ib_qp: The QP to modify.
1771  * @attr: On input, specifies the QP attributes to modify.  On output,
1772  *   the current values of selected QP attributes are returned.
1773  * @attr_mask: A bit-mask used to specify which attributes of the QP
1774  *   are being modified.
1775  * @udata: pointer to user's input output buffer information
1776  *   are being modified.
1777  * It returns 0 on success and returns appropriate error code on error.
1778  */
1779 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1780 			    int attr_mask, struct ib_udata *udata)
1781 {
1782 	return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1783 }
1784 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1785 
1786 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1787 {
1788 	int rc;
1789 	u32 netdev_speed;
1790 	struct net_device *netdev;
1791 	struct ethtool_link_ksettings lksettings;
1792 
1793 	if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1794 		return -EINVAL;
1795 
1796 	netdev = ib_device_get_netdev(dev, port_num);
1797 	if (!netdev)
1798 		return -ENODEV;
1799 
1800 	rtnl_lock();
1801 	rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1802 	rtnl_unlock();
1803 
1804 	dev_put(netdev);
1805 
1806 	if (!rc) {
1807 		netdev_speed = lksettings.base.speed;
1808 	} else {
1809 		netdev_speed = SPEED_1000;
1810 		pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1811 			netdev_speed);
1812 	}
1813 
1814 	if (netdev_speed <= SPEED_1000) {
1815 		*width = IB_WIDTH_1X;
1816 		*speed = IB_SPEED_SDR;
1817 	} else if (netdev_speed <= SPEED_10000) {
1818 		*width = IB_WIDTH_1X;
1819 		*speed = IB_SPEED_FDR10;
1820 	} else if (netdev_speed <= SPEED_20000) {
1821 		*width = IB_WIDTH_4X;
1822 		*speed = IB_SPEED_DDR;
1823 	} else if (netdev_speed <= SPEED_25000) {
1824 		*width = IB_WIDTH_1X;
1825 		*speed = IB_SPEED_EDR;
1826 	} else if (netdev_speed <= SPEED_40000) {
1827 		*width = IB_WIDTH_4X;
1828 		*speed = IB_SPEED_FDR10;
1829 	} else {
1830 		*width = IB_WIDTH_4X;
1831 		*speed = IB_SPEED_EDR;
1832 	}
1833 
1834 	return 0;
1835 }
1836 EXPORT_SYMBOL(ib_get_eth_speed);
1837 
1838 int ib_modify_qp(struct ib_qp *qp,
1839 		 struct ib_qp_attr *qp_attr,
1840 		 int qp_attr_mask)
1841 {
1842 	return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1843 }
1844 EXPORT_SYMBOL(ib_modify_qp);
1845 
1846 int ib_query_qp(struct ib_qp *qp,
1847 		struct ib_qp_attr *qp_attr,
1848 		int qp_attr_mask,
1849 		struct ib_qp_init_attr *qp_init_attr)
1850 {
1851 	qp_attr->ah_attr.grh.sgid_attr = NULL;
1852 	qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1853 
1854 	return qp->device->ops.query_qp ?
1855 		qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1856 					 qp_init_attr) : -EOPNOTSUPP;
1857 }
1858 EXPORT_SYMBOL(ib_query_qp);
1859 
1860 int ib_close_qp(struct ib_qp *qp)
1861 {
1862 	struct ib_qp *real_qp;
1863 	unsigned long flags;
1864 
1865 	real_qp = qp->real_qp;
1866 	if (real_qp == qp)
1867 		return -EINVAL;
1868 
1869 	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1870 	list_del(&qp->open_list);
1871 	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1872 
1873 	atomic_dec(&real_qp->usecnt);
1874 	if (qp->qp_sec)
1875 		ib_close_shared_qp_security(qp->qp_sec);
1876 	kfree(qp);
1877 
1878 	return 0;
1879 }
1880 EXPORT_SYMBOL(ib_close_qp);
1881 
1882 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1883 {
1884 	struct ib_xrcd *xrcd;
1885 	struct ib_qp *real_qp;
1886 	int ret;
1887 
1888 	real_qp = qp->real_qp;
1889 	xrcd = real_qp->xrcd;
1890 
1891 	mutex_lock(&xrcd->tgt_qp_mutex);
1892 	ib_close_qp(qp);
1893 	if (atomic_read(&real_qp->usecnt) == 0)
1894 		list_del(&real_qp->xrcd_list);
1895 	else
1896 		real_qp = NULL;
1897 	mutex_unlock(&xrcd->tgt_qp_mutex);
1898 
1899 	if (real_qp) {
1900 		ret = ib_destroy_qp(real_qp);
1901 		if (!ret)
1902 			atomic_dec(&xrcd->usecnt);
1903 		else
1904 			__ib_insert_xrcd_qp(xrcd, real_qp);
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
1911 {
1912 	const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
1913 	const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
1914 	struct ib_pd *pd;
1915 	struct ib_cq *scq, *rcq;
1916 	struct ib_srq *srq;
1917 	struct ib_rwq_ind_table *ind_tbl;
1918 	struct ib_qp_security *sec;
1919 	int ret;
1920 
1921 	WARN_ON_ONCE(qp->mrs_used > 0);
1922 
1923 	if (atomic_read(&qp->usecnt))
1924 		return -EBUSY;
1925 
1926 	if (qp->real_qp != qp)
1927 		return __ib_destroy_shared_qp(qp);
1928 
1929 	pd   = qp->pd;
1930 	scq  = qp->send_cq;
1931 	rcq  = qp->recv_cq;
1932 	srq  = qp->srq;
1933 	ind_tbl = qp->rwq_ind_tbl;
1934 	sec  = qp->qp_sec;
1935 	if (sec)
1936 		ib_destroy_qp_security_begin(sec);
1937 
1938 	if (!qp->uobject)
1939 		rdma_rw_cleanup_mrs(qp);
1940 
1941 	rdma_counter_unbind_qp(qp, true);
1942 	rdma_restrack_del(&qp->res);
1943 	ret = qp->device->ops.destroy_qp(qp, udata);
1944 	if (!ret) {
1945 		if (alt_path_sgid_attr)
1946 			rdma_put_gid_attr(alt_path_sgid_attr);
1947 		if (av_sgid_attr)
1948 			rdma_put_gid_attr(av_sgid_attr);
1949 		if (pd)
1950 			atomic_dec(&pd->usecnt);
1951 		if (scq)
1952 			atomic_dec(&scq->usecnt);
1953 		if (rcq)
1954 			atomic_dec(&rcq->usecnt);
1955 		if (srq)
1956 			atomic_dec(&srq->usecnt);
1957 		if (ind_tbl)
1958 			atomic_dec(&ind_tbl->usecnt);
1959 		if (sec)
1960 			ib_destroy_qp_security_end(sec);
1961 	} else {
1962 		if (sec)
1963 			ib_destroy_qp_security_abort(sec);
1964 	}
1965 
1966 	return ret;
1967 }
1968 EXPORT_SYMBOL(ib_destroy_qp_user);
1969 
1970 /* Completion queues */
1971 
1972 struct ib_cq *__ib_create_cq(struct ib_device *device,
1973 			     ib_comp_handler comp_handler,
1974 			     void (*event_handler)(struct ib_event *, void *),
1975 			     void *cq_context,
1976 			     const struct ib_cq_init_attr *cq_attr,
1977 			     const char *caller)
1978 {
1979 	struct ib_cq *cq;
1980 	int ret;
1981 
1982 	cq = rdma_zalloc_drv_obj(device, ib_cq);
1983 	if (!cq)
1984 		return ERR_PTR(-ENOMEM);
1985 
1986 	cq->device = device;
1987 	cq->uobject = NULL;
1988 	cq->comp_handler = comp_handler;
1989 	cq->event_handler = event_handler;
1990 	cq->cq_context = cq_context;
1991 	atomic_set(&cq->usecnt, 0);
1992 	cq->res.type = RDMA_RESTRACK_CQ;
1993 	rdma_restrack_set_task(&cq->res, caller);
1994 
1995 	ret = device->ops.create_cq(cq, cq_attr, NULL);
1996 	if (ret) {
1997 		kfree(cq);
1998 		return ERR_PTR(ret);
1999 	}
2000 
2001 	rdma_restrack_kadd(&cq->res);
2002 	return cq;
2003 }
2004 EXPORT_SYMBOL(__ib_create_cq);
2005 
2006 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2007 {
2008 	if (cq->shared)
2009 		return -EOPNOTSUPP;
2010 
2011 	return cq->device->ops.modify_cq ?
2012 		cq->device->ops.modify_cq(cq, cq_count,
2013 					  cq_period) : -EOPNOTSUPP;
2014 }
2015 EXPORT_SYMBOL(rdma_set_cq_moderation);
2016 
2017 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2018 {
2019 	if (WARN_ON_ONCE(cq->shared))
2020 		return -EOPNOTSUPP;
2021 
2022 	if (atomic_read(&cq->usecnt))
2023 		return -EBUSY;
2024 
2025 	rdma_restrack_del(&cq->res);
2026 	cq->device->ops.destroy_cq(cq, udata);
2027 	kfree(cq);
2028 	return 0;
2029 }
2030 EXPORT_SYMBOL(ib_destroy_cq_user);
2031 
2032 int ib_resize_cq(struct ib_cq *cq, int cqe)
2033 {
2034 	if (cq->shared)
2035 		return -EOPNOTSUPP;
2036 
2037 	return cq->device->ops.resize_cq ?
2038 		cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2039 }
2040 EXPORT_SYMBOL(ib_resize_cq);
2041 
2042 /* Memory regions */
2043 
2044 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2045 			     u64 virt_addr, int access_flags)
2046 {
2047 	struct ib_mr *mr;
2048 
2049 	if (access_flags & IB_ACCESS_ON_DEMAND) {
2050 		if (!(pd->device->attrs.device_cap_flags &
2051 		      IB_DEVICE_ON_DEMAND_PAGING)) {
2052 			pr_debug("ODP support not available\n");
2053 			return ERR_PTR(-EINVAL);
2054 		}
2055 	}
2056 
2057 	mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2058 					 access_flags, NULL);
2059 
2060 	if (IS_ERR(mr))
2061 		return mr;
2062 
2063 	mr->device = pd->device;
2064 	mr->pd = pd;
2065 	mr->dm = NULL;
2066 	atomic_inc(&pd->usecnt);
2067 	mr->res.type = RDMA_RESTRACK_MR;
2068 	rdma_restrack_kadd(&mr->res);
2069 
2070 	return mr;
2071 }
2072 EXPORT_SYMBOL(ib_reg_user_mr);
2073 
2074 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2075 		 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2076 {
2077 	if (!pd->device->ops.advise_mr)
2078 		return -EOPNOTSUPP;
2079 
2080 	return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2081 					 NULL);
2082 }
2083 EXPORT_SYMBOL(ib_advise_mr);
2084 
2085 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2086 {
2087 	struct ib_pd *pd = mr->pd;
2088 	struct ib_dm *dm = mr->dm;
2089 	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2090 	int ret;
2091 
2092 	trace_mr_dereg(mr);
2093 	rdma_restrack_del(&mr->res);
2094 	ret = mr->device->ops.dereg_mr(mr, udata);
2095 	if (!ret) {
2096 		atomic_dec(&pd->usecnt);
2097 		if (dm)
2098 			atomic_dec(&dm->usecnt);
2099 		kfree(sig_attrs);
2100 	}
2101 
2102 	return ret;
2103 }
2104 EXPORT_SYMBOL(ib_dereg_mr_user);
2105 
2106 /**
2107  * ib_alloc_mr_user() - Allocates a memory region
2108  * @pd:            protection domain associated with the region
2109  * @mr_type:       memory region type
2110  * @max_num_sg:    maximum sg entries available for registration.
2111  * @udata:	   user data or null for kernel objects
2112  *
2113  * Notes:
2114  * Memory registeration page/sg lists must not exceed max_num_sg.
2115  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2116  * max_num_sg * used_page_size.
2117  *
2118  */
2119 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
2120 			       u32 max_num_sg, struct ib_udata *udata)
2121 {
2122 	struct ib_mr *mr;
2123 
2124 	if (!pd->device->ops.alloc_mr) {
2125 		mr = ERR_PTR(-EOPNOTSUPP);
2126 		goto out;
2127 	}
2128 
2129 	if (mr_type == IB_MR_TYPE_INTEGRITY) {
2130 		WARN_ON_ONCE(1);
2131 		mr = ERR_PTR(-EINVAL);
2132 		goto out;
2133 	}
2134 
2135 	mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
2136 	if (!IS_ERR(mr)) {
2137 		mr->device  = pd->device;
2138 		mr->pd      = pd;
2139 		mr->dm      = NULL;
2140 		mr->uobject = NULL;
2141 		atomic_inc(&pd->usecnt);
2142 		mr->need_inval = false;
2143 		mr->res.type = RDMA_RESTRACK_MR;
2144 		rdma_restrack_kadd(&mr->res);
2145 		mr->type = mr_type;
2146 		mr->sig_attrs = NULL;
2147 	}
2148 
2149 out:
2150 	trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2151 	return mr;
2152 }
2153 EXPORT_SYMBOL(ib_alloc_mr_user);
2154 
2155 /**
2156  * ib_alloc_mr_integrity() - Allocates an integrity memory region
2157  * @pd:                      protection domain associated with the region
2158  * @max_num_data_sg:         maximum data sg entries available for registration
2159  * @max_num_meta_sg:         maximum metadata sg entries available for
2160  *                           registration
2161  *
2162  * Notes:
2163  * Memory registration page/sg lists must not exceed max_num_sg,
2164  * also the integrity page/sg lists must not exceed max_num_meta_sg.
2165  *
2166  */
2167 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2168 				    u32 max_num_data_sg,
2169 				    u32 max_num_meta_sg)
2170 {
2171 	struct ib_mr *mr;
2172 	struct ib_sig_attrs *sig_attrs;
2173 
2174 	if (!pd->device->ops.alloc_mr_integrity ||
2175 	    !pd->device->ops.map_mr_sg_pi) {
2176 		mr = ERR_PTR(-EOPNOTSUPP);
2177 		goto out;
2178 	}
2179 
2180 	if (!max_num_meta_sg) {
2181 		mr = ERR_PTR(-EINVAL);
2182 		goto out;
2183 	}
2184 
2185 	sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2186 	if (!sig_attrs) {
2187 		mr = ERR_PTR(-ENOMEM);
2188 		goto out;
2189 	}
2190 
2191 	mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2192 						max_num_meta_sg);
2193 	if (IS_ERR(mr)) {
2194 		kfree(sig_attrs);
2195 		goto out;
2196 	}
2197 
2198 	mr->device = pd->device;
2199 	mr->pd = pd;
2200 	mr->dm = NULL;
2201 	mr->uobject = NULL;
2202 	atomic_inc(&pd->usecnt);
2203 	mr->need_inval = false;
2204 	mr->res.type = RDMA_RESTRACK_MR;
2205 	rdma_restrack_kadd(&mr->res);
2206 	mr->type = IB_MR_TYPE_INTEGRITY;
2207 	mr->sig_attrs = sig_attrs;
2208 
2209 out:
2210 	trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2211 	return mr;
2212 }
2213 EXPORT_SYMBOL(ib_alloc_mr_integrity);
2214 
2215 /* Multicast groups */
2216 
2217 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2218 {
2219 	struct ib_qp_init_attr init_attr = {};
2220 	struct ib_qp_attr attr = {};
2221 	int num_eth_ports = 0;
2222 	int port;
2223 
2224 	/* If QP state >= init, it is assigned to a port and we can check this
2225 	 * port only.
2226 	 */
2227 	if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2228 		if (attr.qp_state >= IB_QPS_INIT) {
2229 			if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2230 			    IB_LINK_LAYER_INFINIBAND)
2231 				return true;
2232 			goto lid_check;
2233 		}
2234 	}
2235 
2236 	/* Can't get a quick answer, iterate over all ports */
2237 	for (port = 0; port < qp->device->phys_port_cnt; port++)
2238 		if (rdma_port_get_link_layer(qp->device, port) !=
2239 		    IB_LINK_LAYER_INFINIBAND)
2240 			num_eth_ports++;
2241 
2242 	/* If we have at lease one Ethernet port, RoCE annex declares that
2243 	 * multicast LID should be ignored. We can't tell at this step if the
2244 	 * QP belongs to an IB or Ethernet port.
2245 	 */
2246 	if (num_eth_ports)
2247 		return true;
2248 
2249 	/* If all the ports are IB, we can check according to IB spec. */
2250 lid_check:
2251 	return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2252 		 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2253 }
2254 
2255 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2256 {
2257 	int ret;
2258 
2259 	if (!qp->device->ops.attach_mcast)
2260 		return -EOPNOTSUPP;
2261 
2262 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2263 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2264 		return -EINVAL;
2265 
2266 	ret = qp->device->ops.attach_mcast(qp, gid, lid);
2267 	if (!ret)
2268 		atomic_inc(&qp->usecnt);
2269 	return ret;
2270 }
2271 EXPORT_SYMBOL(ib_attach_mcast);
2272 
2273 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2274 {
2275 	int ret;
2276 
2277 	if (!qp->device->ops.detach_mcast)
2278 		return -EOPNOTSUPP;
2279 
2280 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2281 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2282 		return -EINVAL;
2283 
2284 	ret = qp->device->ops.detach_mcast(qp, gid, lid);
2285 	if (!ret)
2286 		atomic_dec(&qp->usecnt);
2287 	return ret;
2288 }
2289 EXPORT_SYMBOL(ib_detach_mcast);
2290 
2291 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
2292 {
2293 	struct ib_xrcd *xrcd;
2294 
2295 	if (!device->ops.alloc_xrcd)
2296 		return ERR_PTR(-EOPNOTSUPP);
2297 
2298 	xrcd = device->ops.alloc_xrcd(device, NULL);
2299 	if (!IS_ERR(xrcd)) {
2300 		xrcd->device = device;
2301 		xrcd->inode = NULL;
2302 		atomic_set(&xrcd->usecnt, 0);
2303 		mutex_init(&xrcd->tgt_qp_mutex);
2304 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
2305 	}
2306 
2307 	return xrcd;
2308 }
2309 EXPORT_SYMBOL(__ib_alloc_xrcd);
2310 
2311 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
2312 {
2313 	struct ib_qp *qp;
2314 	int ret;
2315 
2316 	if (atomic_read(&xrcd->usecnt))
2317 		return -EBUSY;
2318 
2319 	while (!list_empty(&xrcd->tgt_qp_list)) {
2320 		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
2321 		ret = ib_destroy_qp(qp);
2322 		if (ret)
2323 			return ret;
2324 	}
2325 	mutex_destroy(&xrcd->tgt_qp_mutex);
2326 
2327 	return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2328 }
2329 EXPORT_SYMBOL(ib_dealloc_xrcd);
2330 
2331 /**
2332  * ib_create_wq - Creates a WQ associated with the specified protection
2333  * domain.
2334  * @pd: The protection domain associated with the WQ.
2335  * @wq_attr: A list of initial attributes required to create the
2336  * WQ. If WQ creation succeeds, then the attributes are updated to
2337  * the actual capabilities of the created WQ.
2338  *
2339  * wq_attr->max_wr and wq_attr->max_sge determine
2340  * the requested size of the WQ, and set to the actual values allocated
2341  * on return.
2342  * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2343  * at least as large as the requested values.
2344  */
2345 struct ib_wq *ib_create_wq(struct ib_pd *pd,
2346 			   struct ib_wq_init_attr *wq_attr)
2347 {
2348 	struct ib_wq *wq;
2349 
2350 	if (!pd->device->ops.create_wq)
2351 		return ERR_PTR(-EOPNOTSUPP);
2352 
2353 	wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2354 	if (!IS_ERR(wq)) {
2355 		wq->event_handler = wq_attr->event_handler;
2356 		wq->wq_context = wq_attr->wq_context;
2357 		wq->wq_type = wq_attr->wq_type;
2358 		wq->cq = wq_attr->cq;
2359 		wq->device = pd->device;
2360 		wq->pd = pd;
2361 		wq->uobject = NULL;
2362 		atomic_inc(&pd->usecnt);
2363 		atomic_inc(&wq_attr->cq->usecnt);
2364 		atomic_set(&wq->usecnt, 0);
2365 	}
2366 	return wq;
2367 }
2368 EXPORT_SYMBOL(ib_create_wq);
2369 
2370 /**
2371  * ib_destroy_wq - Destroys the specified user WQ.
2372  * @wq: The WQ to destroy.
2373  * @udata: Valid user data
2374  */
2375 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
2376 {
2377 	struct ib_cq *cq = wq->cq;
2378 	struct ib_pd *pd = wq->pd;
2379 
2380 	if (atomic_read(&wq->usecnt))
2381 		return -EBUSY;
2382 
2383 	wq->device->ops.destroy_wq(wq, udata);
2384 	atomic_dec(&pd->usecnt);
2385 	atomic_dec(&cq->usecnt);
2386 
2387 	return 0;
2388 }
2389 EXPORT_SYMBOL(ib_destroy_wq);
2390 
2391 /**
2392  * ib_modify_wq - Modifies the specified WQ.
2393  * @wq: The WQ to modify.
2394  * @wq_attr: On input, specifies the WQ attributes to modify.
2395  * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
2396  *   are being modified.
2397  * On output, the current values of selected WQ attributes are returned.
2398  */
2399 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
2400 		 u32 wq_attr_mask)
2401 {
2402 	int err;
2403 
2404 	if (!wq->device->ops.modify_wq)
2405 		return -EOPNOTSUPP;
2406 
2407 	err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
2408 	return err;
2409 }
2410 EXPORT_SYMBOL(ib_modify_wq);
2411 
2412 /*
2413  * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
2414  * @device: The device on which to create the rwq indirection table.
2415  * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
2416  * create the Indirection Table.
2417  *
2418  * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
2419  *	than the created ib_rwq_ind_table object and the caller is responsible
2420  *	for its memory allocation/free.
2421  */
2422 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
2423 						 struct ib_rwq_ind_table_init_attr *init_attr)
2424 {
2425 	struct ib_rwq_ind_table *rwq_ind_table;
2426 	int i;
2427 	u32 table_size;
2428 
2429 	if (!device->ops.create_rwq_ind_table)
2430 		return ERR_PTR(-EOPNOTSUPP);
2431 
2432 	table_size = (1 << init_attr->log_ind_tbl_size);
2433 	rwq_ind_table = device->ops.create_rwq_ind_table(device,
2434 							 init_attr, NULL);
2435 	if (IS_ERR(rwq_ind_table))
2436 		return rwq_ind_table;
2437 
2438 	rwq_ind_table->ind_tbl = init_attr->ind_tbl;
2439 	rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
2440 	rwq_ind_table->device = device;
2441 	rwq_ind_table->uobject = NULL;
2442 	atomic_set(&rwq_ind_table->usecnt, 0);
2443 
2444 	for (i = 0; i < table_size; i++)
2445 		atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
2446 
2447 	return rwq_ind_table;
2448 }
2449 EXPORT_SYMBOL(ib_create_rwq_ind_table);
2450 
2451 /*
2452  * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
2453  * @wq_ind_table: The Indirection Table to destroy.
2454 */
2455 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
2456 {
2457 	int err, i;
2458 	u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
2459 	struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
2460 
2461 	if (atomic_read(&rwq_ind_table->usecnt))
2462 		return -EBUSY;
2463 
2464 	err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
2465 	if (!err) {
2466 		for (i = 0; i < table_size; i++)
2467 			atomic_dec(&ind_tbl[i]->usecnt);
2468 	}
2469 
2470 	return err;
2471 }
2472 EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
2473 
2474 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2475 		       struct ib_mr_status *mr_status)
2476 {
2477 	if (!mr->device->ops.check_mr_status)
2478 		return -EOPNOTSUPP;
2479 
2480 	return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2481 }
2482 EXPORT_SYMBOL(ib_check_mr_status);
2483 
2484 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2485 			 int state)
2486 {
2487 	if (!device->ops.set_vf_link_state)
2488 		return -EOPNOTSUPP;
2489 
2490 	return device->ops.set_vf_link_state(device, vf, port, state);
2491 }
2492 EXPORT_SYMBOL(ib_set_vf_link_state);
2493 
2494 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2495 		     struct ifla_vf_info *info)
2496 {
2497 	if (!device->ops.get_vf_config)
2498 		return -EOPNOTSUPP;
2499 
2500 	return device->ops.get_vf_config(device, vf, port, info);
2501 }
2502 EXPORT_SYMBOL(ib_get_vf_config);
2503 
2504 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2505 		    struct ifla_vf_stats *stats)
2506 {
2507 	if (!device->ops.get_vf_stats)
2508 		return -EOPNOTSUPP;
2509 
2510 	return device->ops.get_vf_stats(device, vf, port, stats);
2511 }
2512 EXPORT_SYMBOL(ib_get_vf_stats);
2513 
2514 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2515 		   int type)
2516 {
2517 	if (!device->ops.set_vf_guid)
2518 		return -EOPNOTSUPP;
2519 
2520 	return device->ops.set_vf_guid(device, vf, port, guid, type);
2521 }
2522 EXPORT_SYMBOL(ib_set_vf_guid);
2523 
2524 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
2525 		   struct ifla_vf_guid *node_guid,
2526 		   struct ifla_vf_guid *port_guid)
2527 {
2528 	if (!device->ops.get_vf_guid)
2529 		return -EOPNOTSUPP;
2530 
2531 	return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2532 }
2533 EXPORT_SYMBOL(ib_get_vf_guid);
2534 /**
2535  * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2536  *     information) and set an appropriate memory region for registration.
2537  * @mr:             memory region
2538  * @data_sg:        dma mapped scatterlist for data
2539  * @data_sg_nents:  number of entries in data_sg
2540  * @data_sg_offset: offset in bytes into data_sg
2541  * @meta_sg:        dma mapped scatterlist for metadata
2542  * @meta_sg_nents:  number of entries in meta_sg
2543  * @meta_sg_offset: offset in bytes into meta_sg
2544  * @page_size:      page vector desired page size
2545  *
2546  * Constraints:
2547  * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2548  *
2549  * Return: 0 on success.
2550  *
2551  * After this completes successfully, the  memory region
2552  * is ready for registration.
2553  */
2554 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2555 		    int data_sg_nents, unsigned int *data_sg_offset,
2556 		    struct scatterlist *meta_sg, int meta_sg_nents,
2557 		    unsigned int *meta_sg_offset, unsigned int page_size)
2558 {
2559 	if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2560 		     WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2561 		return -EOPNOTSUPP;
2562 
2563 	mr->page_size = page_size;
2564 
2565 	return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2566 					    data_sg_offset, meta_sg,
2567 					    meta_sg_nents, meta_sg_offset);
2568 }
2569 EXPORT_SYMBOL(ib_map_mr_sg_pi);
2570 
2571 /**
2572  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2573  *     and set it the memory region.
2574  * @mr:            memory region
2575  * @sg:            dma mapped scatterlist
2576  * @sg_nents:      number of entries in sg
2577  * @sg_offset:     offset in bytes into sg
2578  * @page_size:     page vector desired page size
2579  *
2580  * Constraints:
2581  *
2582  * - The first sg element is allowed to have an offset.
2583  * - Each sg element must either be aligned to page_size or virtually
2584  *   contiguous to the previous element. In case an sg element has a
2585  *   non-contiguous offset, the mapping prefix will not include it.
2586  * - The last sg element is allowed to have length less than page_size.
2587  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2588  *   then only max_num_sg entries will be mapped.
2589  * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2590  *   constraints holds and the page_size argument is ignored.
2591  *
2592  * Returns the number of sg elements that were mapped to the memory region.
2593  *
2594  * After this completes successfully, the  memory region
2595  * is ready for registration.
2596  */
2597 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2598 		 unsigned int *sg_offset, unsigned int page_size)
2599 {
2600 	if (unlikely(!mr->device->ops.map_mr_sg))
2601 		return -EOPNOTSUPP;
2602 
2603 	mr->page_size = page_size;
2604 
2605 	return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2606 }
2607 EXPORT_SYMBOL(ib_map_mr_sg);
2608 
2609 /**
2610  * ib_sg_to_pages() - Convert the largest prefix of a sg list
2611  *     to a page vector
2612  * @mr:            memory region
2613  * @sgl:           dma mapped scatterlist
2614  * @sg_nents:      number of entries in sg
2615  * @sg_offset_p:   ==== =======================================================
2616  *                 IN   start offset in bytes into sg
2617  *                 OUT  offset in bytes for element n of the sg of the first
2618  *                      byte that has not been processed where n is the return
2619  *                      value of this function.
2620  *                 ==== =======================================================
2621  * @set_page:      driver page assignment function pointer
2622  *
2623  * Core service helper for drivers to convert the largest
2624  * prefix of given sg list to a page vector. The sg list
2625  * prefix converted is the prefix that meet the requirements
2626  * of ib_map_mr_sg.
2627  *
2628  * Returns the number of sg elements that were assigned to
2629  * a page vector.
2630  */
2631 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2632 		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2633 {
2634 	struct scatterlist *sg;
2635 	u64 last_end_dma_addr = 0;
2636 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2637 	unsigned int last_page_off = 0;
2638 	u64 page_mask = ~((u64)mr->page_size - 1);
2639 	int i, ret;
2640 
2641 	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2642 		return -EINVAL;
2643 
2644 	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2645 	mr->length = 0;
2646 
2647 	for_each_sg(sgl, sg, sg_nents, i) {
2648 		u64 dma_addr = sg_dma_address(sg) + sg_offset;
2649 		u64 prev_addr = dma_addr;
2650 		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2651 		u64 end_dma_addr = dma_addr + dma_len;
2652 		u64 page_addr = dma_addr & page_mask;
2653 
2654 		/*
2655 		 * For the second and later elements, check whether either the
2656 		 * end of element i-1 or the start of element i is not aligned
2657 		 * on a page boundary.
2658 		 */
2659 		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2660 			/* Stop mapping if there is a gap. */
2661 			if (last_end_dma_addr != dma_addr)
2662 				break;
2663 
2664 			/*
2665 			 * Coalesce this element with the last. If it is small
2666 			 * enough just update mr->length. Otherwise start
2667 			 * mapping from the next page.
2668 			 */
2669 			goto next_page;
2670 		}
2671 
2672 		do {
2673 			ret = set_page(mr, page_addr);
2674 			if (unlikely(ret < 0)) {
2675 				sg_offset = prev_addr - sg_dma_address(sg);
2676 				mr->length += prev_addr - dma_addr;
2677 				if (sg_offset_p)
2678 					*sg_offset_p = sg_offset;
2679 				return i || sg_offset ? i : ret;
2680 			}
2681 			prev_addr = page_addr;
2682 next_page:
2683 			page_addr += mr->page_size;
2684 		} while (page_addr < end_dma_addr);
2685 
2686 		mr->length += dma_len;
2687 		last_end_dma_addr = end_dma_addr;
2688 		last_page_off = end_dma_addr & ~page_mask;
2689 
2690 		sg_offset = 0;
2691 	}
2692 
2693 	if (sg_offset_p)
2694 		*sg_offset_p = 0;
2695 	return i;
2696 }
2697 EXPORT_SYMBOL(ib_sg_to_pages);
2698 
2699 struct ib_drain_cqe {
2700 	struct ib_cqe cqe;
2701 	struct completion done;
2702 };
2703 
2704 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2705 {
2706 	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2707 						cqe);
2708 
2709 	complete(&cqe->done);
2710 }
2711 
2712 /*
2713  * Post a WR and block until its completion is reaped for the SQ.
2714  */
2715 static void __ib_drain_sq(struct ib_qp *qp)
2716 {
2717 	struct ib_cq *cq = qp->send_cq;
2718 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2719 	struct ib_drain_cqe sdrain;
2720 	struct ib_rdma_wr swr = {
2721 		.wr = {
2722 			.next = NULL,
2723 			{ .wr_cqe	= &sdrain.cqe, },
2724 			.opcode	= IB_WR_RDMA_WRITE,
2725 		},
2726 	};
2727 	int ret;
2728 
2729 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2730 	if (ret) {
2731 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2732 		return;
2733 	}
2734 
2735 	sdrain.cqe.done = ib_drain_qp_done;
2736 	init_completion(&sdrain.done);
2737 
2738 	ret = ib_post_send(qp, &swr.wr, NULL);
2739 	if (ret) {
2740 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2741 		return;
2742 	}
2743 
2744 	if (cq->poll_ctx == IB_POLL_DIRECT)
2745 		while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2746 			ib_process_cq_direct(cq, -1);
2747 	else
2748 		wait_for_completion(&sdrain.done);
2749 }
2750 
2751 /*
2752  * Post a WR and block until its completion is reaped for the RQ.
2753  */
2754 static void __ib_drain_rq(struct ib_qp *qp)
2755 {
2756 	struct ib_cq *cq = qp->recv_cq;
2757 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2758 	struct ib_drain_cqe rdrain;
2759 	struct ib_recv_wr rwr = {};
2760 	int ret;
2761 
2762 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2763 	if (ret) {
2764 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2765 		return;
2766 	}
2767 
2768 	rwr.wr_cqe = &rdrain.cqe;
2769 	rdrain.cqe.done = ib_drain_qp_done;
2770 	init_completion(&rdrain.done);
2771 
2772 	ret = ib_post_recv(qp, &rwr, NULL);
2773 	if (ret) {
2774 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2775 		return;
2776 	}
2777 
2778 	if (cq->poll_ctx == IB_POLL_DIRECT)
2779 		while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2780 			ib_process_cq_direct(cq, -1);
2781 	else
2782 		wait_for_completion(&rdrain.done);
2783 }
2784 
2785 /**
2786  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2787  *		   application.
2788  * @qp:            queue pair to drain
2789  *
2790  * If the device has a provider-specific drain function, then
2791  * call that.  Otherwise call the generic drain function
2792  * __ib_drain_sq().
2793  *
2794  * The caller must:
2795  *
2796  * ensure there is room in the CQ and SQ for the drain work request and
2797  * completion.
2798  *
2799  * allocate the CQ using ib_alloc_cq().
2800  *
2801  * ensure that there are no other contexts that are posting WRs concurrently.
2802  * Otherwise the drain is not guaranteed.
2803  */
2804 void ib_drain_sq(struct ib_qp *qp)
2805 {
2806 	if (qp->device->ops.drain_sq)
2807 		qp->device->ops.drain_sq(qp);
2808 	else
2809 		__ib_drain_sq(qp);
2810 	trace_cq_drain_complete(qp->send_cq);
2811 }
2812 EXPORT_SYMBOL(ib_drain_sq);
2813 
2814 /**
2815  * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2816  *		   application.
2817  * @qp:            queue pair to drain
2818  *
2819  * If the device has a provider-specific drain function, then
2820  * call that.  Otherwise call the generic drain function
2821  * __ib_drain_rq().
2822  *
2823  * The caller must:
2824  *
2825  * ensure there is room in the CQ and RQ for the drain work request and
2826  * completion.
2827  *
2828  * allocate the CQ using ib_alloc_cq().
2829  *
2830  * ensure that there are no other contexts that are posting WRs concurrently.
2831  * Otherwise the drain is not guaranteed.
2832  */
2833 void ib_drain_rq(struct ib_qp *qp)
2834 {
2835 	if (qp->device->ops.drain_rq)
2836 		qp->device->ops.drain_rq(qp);
2837 	else
2838 		__ib_drain_rq(qp);
2839 	trace_cq_drain_complete(qp->recv_cq);
2840 }
2841 EXPORT_SYMBOL(ib_drain_rq);
2842 
2843 /**
2844  * ib_drain_qp() - Block until all CQEs have been consumed by the
2845  *		   application on both the RQ and SQ.
2846  * @qp:            queue pair to drain
2847  *
2848  * The caller must:
2849  *
2850  * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2851  * and completions.
2852  *
2853  * allocate the CQs using ib_alloc_cq().
2854  *
2855  * ensure that there are no other contexts that are posting WRs concurrently.
2856  * Otherwise the drain is not guaranteed.
2857  */
2858 void ib_drain_qp(struct ib_qp *qp)
2859 {
2860 	ib_drain_sq(qp);
2861 	if (!qp->srq)
2862 		ib_drain_rq(qp);
2863 }
2864 EXPORT_SYMBOL(ib_drain_qp);
2865 
2866 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2867 				     enum rdma_netdev_t type, const char *name,
2868 				     unsigned char name_assign_type,
2869 				     void (*setup)(struct net_device *))
2870 {
2871 	struct rdma_netdev_alloc_params params;
2872 	struct net_device *netdev;
2873 	int rc;
2874 
2875 	if (!device->ops.rdma_netdev_get_params)
2876 		return ERR_PTR(-EOPNOTSUPP);
2877 
2878 	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2879 						&params);
2880 	if (rc)
2881 		return ERR_PTR(rc);
2882 
2883 	netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2884 				  setup, params.txqs, params.rxqs);
2885 	if (!netdev)
2886 		return ERR_PTR(-ENOMEM);
2887 
2888 	return netdev;
2889 }
2890 EXPORT_SYMBOL(rdma_alloc_netdev);
2891 
2892 int rdma_init_netdev(struct ib_device *device, u8 port_num,
2893 		     enum rdma_netdev_t type, const char *name,
2894 		     unsigned char name_assign_type,
2895 		     void (*setup)(struct net_device *),
2896 		     struct net_device *netdev)
2897 {
2898 	struct rdma_netdev_alloc_params params;
2899 	int rc;
2900 
2901 	if (!device->ops.rdma_netdev_get_params)
2902 		return -EOPNOTSUPP;
2903 
2904 	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2905 						&params);
2906 	if (rc)
2907 		return rc;
2908 
2909 	return params.initialize_rdma_netdev(device, port_num,
2910 					     netdev, params.param);
2911 }
2912 EXPORT_SYMBOL(rdma_init_netdev);
2913 
2914 void __rdma_block_iter_start(struct ib_block_iter *biter,
2915 			     struct scatterlist *sglist, unsigned int nents,
2916 			     unsigned long pgsz)
2917 {
2918 	memset(biter, 0, sizeof(struct ib_block_iter));
2919 	biter->__sg = sglist;
2920 	biter->__sg_nents = nents;
2921 
2922 	/* Driver provides best block size to use */
2923 	biter->__pg_bit = __fls(pgsz);
2924 }
2925 EXPORT_SYMBOL(__rdma_block_iter_start);
2926 
2927 bool __rdma_block_iter_next(struct ib_block_iter *biter)
2928 {
2929 	unsigned int block_offset;
2930 
2931 	if (!biter->__sg_nents || !biter->__sg)
2932 		return false;
2933 
2934 	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2935 	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2936 	biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2937 
2938 	if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2939 		biter->__sg_advance = 0;
2940 		biter->__sg = sg_next(biter->__sg);
2941 		biter->__sg_nents--;
2942 	}
2943 
2944 	return true;
2945 }
2946 EXPORT_SYMBOL(__rdma_block_iter_next);
2947