xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision e2c75e76)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44 #include <linux/in.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
48 
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/rw.h>
53 
54 #include "core_priv.h"
55 
56 static int ib_resolve_eth_dmac(struct ib_device *device,
57 			       struct rdma_ah_attr *ah_attr);
58 
59 static const char * const ib_events[] = {
60 	[IB_EVENT_CQ_ERR]		= "CQ error",
61 	[IB_EVENT_QP_FATAL]		= "QP fatal error",
62 	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
63 	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
64 	[IB_EVENT_COMM_EST]		= "communication established",
65 	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
66 	[IB_EVENT_PATH_MIG]		= "path migration successful",
67 	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
68 	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
69 	[IB_EVENT_PORT_ACTIVE]		= "port active",
70 	[IB_EVENT_PORT_ERR]		= "port error",
71 	[IB_EVENT_LID_CHANGE]		= "LID change",
72 	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
73 	[IB_EVENT_SM_CHANGE]		= "SM change",
74 	[IB_EVENT_SRQ_ERR]		= "SRQ error",
75 	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
76 	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
77 	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
78 	[IB_EVENT_GID_CHANGE]		= "GID changed",
79 };
80 
81 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
82 {
83 	size_t index = event;
84 
85 	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
86 			ib_events[index] : "unrecognized event";
87 }
88 EXPORT_SYMBOL(ib_event_msg);
89 
90 static const char * const wc_statuses[] = {
91 	[IB_WC_SUCCESS]			= "success",
92 	[IB_WC_LOC_LEN_ERR]		= "local length error",
93 	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
94 	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
95 	[IB_WC_LOC_PROT_ERR]		= "local protection error",
96 	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
97 	[IB_WC_MW_BIND_ERR]		= "memory management operation error",
98 	[IB_WC_BAD_RESP_ERR]		= "bad response error",
99 	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
100 	[IB_WC_REM_INV_REQ_ERR]		= "invalid request error",
101 	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
102 	[IB_WC_REM_OP_ERR]		= "remote operation error",
103 	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
104 	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
105 	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
106 	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
107 	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
108 	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
109 	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
110 	[IB_WC_FATAL_ERR]		= "fatal error",
111 	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
112 	[IB_WC_GENERAL_ERR]		= "general error",
113 };
114 
115 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
116 {
117 	size_t index = status;
118 
119 	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
120 			wc_statuses[index] : "unrecognized status";
121 }
122 EXPORT_SYMBOL(ib_wc_status_msg);
123 
124 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
125 {
126 	switch (rate) {
127 	case IB_RATE_2_5_GBPS: return   1;
128 	case IB_RATE_5_GBPS:   return   2;
129 	case IB_RATE_10_GBPS:  return   4;
130 	case IB_RATE_20_GBPS:  return   8;
131 	case IB_RATE_30_GBPS:  return  12;
132 	case IB_RATE_40_GBPS:  return  16;
133 	case IB_RATE_60_GBPS:  return  24;
134 	case IB_RATE_80_GBPS:  return  32;
135 	case IB_RATE_120_GBPS: return  48;
136 	case IB_RATE_14_GBPS:  return   6;
137 	case IB_RATE_56_GBPS:  return  22;
138 	case IB_RATE_112_GBPS: return  45;
139 	case IB_RATE_168_GBPS: return  67;
140 	case IB_RATE_25_GBPS:  return  10;
141 	case IB_RATE_100_GBPS: return  40;
142 	case IB_RATE_200_GBPS: return  80;
143 	case IB_RATE_300_GBPS: return 120;
144 	default:	       return  -1;
145 	}
146 }
147 EXPORT_SYMBOL(ib_rate_to_mult);
148 
149 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
150 {
151 	switch (mult) {
152 	case 1:   return IB_RATE_2_5_GBPS;
153 	case 2:   return IB_RATE_5_GBPS;
154 	case 4:   return IB_RATE_10_GBPS;
155 	case 8:   return IB_RATE_20_GBPS;
156 	case 12:  return IB_RATE_30_GBPS;
157 	case 16:  return IB_RATE_40_GBPS;
158 	case 24:  return IB_RATE_60_GBPS;
159 	case 32:  return IB_RATE_80_GBPS;
160 	case 48:  return IB_RATE_120_GBPS;
161 	case 6:   return IB_RATE_14_GBPS;
162 	case 22:  return IB_RATE_56_GBPS;
163 	case 45:  return IB_RATE_112_GBPS;
164 	case 67:  return IB_RATE_168_GBPS;
165 	case 10:  return IB_RATE_25_GBPS;
166 	case 40:  return IB_RATE_100_GBPS;
167 	case 80:  return IB_RATE_200_GBPS;
168 	case 120: return IB_RATE_300_GBPS;
169 	default:  return IB_RATE_PORT_CURRENT;
170 	}
171 }
172 EXPORT_SYMBOL(mult_to_ib_rate);
173 
174 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
175 {
176 	switch (rate) {
177 	case IB_RATE_2_5_GBPS: return 2500;
178 	case IB_RATE_5_GBPS:   return 5000;
179 	case IB_RATE_10_GBPS:  return 10000;
180 	case IB_RATE_20_GBPS:  return 20000;
181 	case IB_RATE_30_GBPS:  return 30000;
182 	case IB_RATE_40_GBPS:  return 40000;
183 	case IB_RATE_60_GBPS:  return 60000;
184 	case IB_RATE_80_GBPS:  return 80000;
185 	case IB_RATE_120_GBPS: return 120000;
186 	case IB_RATE_14_GBPS:  return 14062;
187 	case IB_RATE_56_GBPS:  return 56250;
188 	case IB_RATE_112_GBPS: return 112500;
189 	case IB_RATE_168_GBPS: return 168750;
190 	case IB_RATE_25_GBPS:  return 25781;
191 	case IB_RATE_100_GBPS: return 103125;
192 	case IB_RATE_200_GBPS: return 206250;
193 	case IB_RATE_300_GBPS: return 309375;
194 	default:	       return -1;
195 	}
196 }
197 EXPORT_SYMBOL(ib_rate_to_mbps);
198 
199 __attribute_const__ enum rdma_transport_type
200 rdma_node_get_transport(enum rdma_node_type node_type)
201 {
202 
203 	if (node_type == RDMA_NODE_USNIC)
204 		return RDMA_TRANSPORT_USNIC;
205 	if (node_type == RDMA_NODE_USNIC_UDP)
206 		return RDMA_TRANSPORT_USNIC_UDP;
207 	if (node_type == RDMA_NODE_RNIC)
208 		return RDMA_TRANSPORT_IWARP;
209 
210 	return RDMA_TRANSPORT_IB;
211 }
212 EXPORT_SYMBOL(rdma_node_get_transport);
213 
214 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
215 {
216 	enum rdma_transport_type lt;
217 	if (device->get_link_layer)
218 		return device->get_link_layer(device, port_num);
219 
220 	lt = rdma_node_get_transport(device->node_type);
221 	if (lt == RDMA_TRANSPORT_IB)
222 		return IB_LINK_LAYER_INFINIBAND;
223 
224 	return IB_LINK_LAYER_ETHERNET;
225 }
226 EXPORT_SYMBOL(rdma_port_get_link_layer);
227 
228 /* Protection domains */
229 
230 /**
231  * ib_alloc_pd - Allocates an unused protection domain.
232  * @device: The device on which to allocate the protection domain.
233  *
234  * A protection domain object provides an association between QPs, shared
235  * receive queues, address handles, memory regions, and memory windows.
236  *
237  * Every PD has a local_dma_lkey which can be used as the lkey value for local
238  * memory operations.
239  */
240 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
241 		const char *caller)
242 {
243 	struct ib_pd *pd;
244 	int mr_access_flags = 0;
245 
246 	pd = device->alloc_pd(device, NULL, NULL);
247 	if (IS_ERR(pd))
248 		return pd;
249 
250 	pd->device = device;
251 	pd->uobject = NULL;
252 	pd->__internal_mr = NULL;
253 	atomic_set(&pd->usecnt, 0);
254 	pd->flags = flags;
255 
256 	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
257 		pd->local_dma_lkey = device->local_dma_lkey;
258 	else
259 		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
260 
261 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
262 		pr_warn("%s: enabling unsafe global rkey\n", caller);
263 		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
264 	}
265 
266 	pd->res.type = RDMA_RESTRACK_PD;
267 	pd->res.kern_name = caller;
268 	rdma_restrack_add(&pd->res);
269 
270 	if (mr_access_flags) {
271 		struct ib_mr *mr;
272 
273 		mr = pd->device->get_dma_mr(pd, mr_access_flags);
274 		if (IS_ERR(mr)) {
275 			ib_dealloc_pd(pd);
276 			return ERR_CAST(mr);
277 		}
278 
279 		mr->device	= pd->device;
280 		mr->pd		= pd;
281 		mr->uobject	= NULL;
282 		mr->need_inval	= false;
283 
284 		pd->__internal_mr = mr;
285 
286 		if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
287 			pd->local_dma_lkey = pd->__internal_mr->lkey;
288 
289 		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
290 			pd->unsafe_global_rkey = pd->__internal_mr->rkey;
291 	}
292 
293 	return pd;
294 }
295 EXPORT_SYMBOL(__ib_alloc_pd);
296 
297 /**
298  * ib_dealloc_pd - Deallocates a protection domain.
299  * @pd: The protection domain to deallocate.
300  *
301  * It is an error to call this function while any resources in the pd still
302  * exist.  The caller is responsible to synchronously destroy them and
303  * guarantee no new allocations will happen.
304  */
305 void ib_dealloc_pd(struct ib_pd *pd)
306 {
307 	int ret;
308 
309 	if (pd->__internal_mr) {
310 		ret = pd->device->dereg_mr(pd->__internal_mr);
311 		WARN_ON(ret);
312 		pd->__internal_mr = NULL;
313 	}
314 
315 	/* uverbs manipulates usecnt with proper locking, while the kabi
316 	   requires the caller to guarantee we can't race here. */
317 	WARN_ON(atomic_read(&pd->usecnt));
318 
319 	rdma_restrack_del(&pd->res);
320 	/* Making delalloc_pd a void return is a WIP, no driver should return
321 	   an error here. */
322 	ret = pd->device->dealloc_pd(pd);
323 	WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
324 }
325 EXPORT_SYMBOL(ib_dealloc_pd);
326 
327 /* Address handles */
328 
329 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
330 				     struct rdma_ah_attr *ah_attr,
331 				     struct ib_udata *udata)
332 {
333 	struct ib_ah *ah;
334 
335 	ah = pd->device->create_ah(pd, ah_attr, udata);
336 
337 	if (!IS_ERR(ah)) {
338 		ah->device  = pd->device;
339 		ah->pd      = pd;
340 		ah->uobject = NULL;
341 		ah->type    = ah_attr->type;
342 		atomic_inc(&pd->usecnt);
343 	}
344 
345 	return ah;
346 }
347 
348 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
349 {
350 	return _rdma_create_ah(pd, ah_attr, NULL);
351 }
352 EXPORT_SYMBOL(rdma_create_ah);
353 
354 /**
355  * rdma_create_user_ah - Creates an address handle for the
356  * given address vector.
357  * It resolves destination mac address for ah attribute of RoCE type.
358  * @pd: The protection domain associated with the address handle.
359  * @ah_attr: The attributes of the address vector.
360  * @udata: pointer to user's input output buffer information need by
361  *         provider driver.
362  *
363  * It returns 0 on success and returns appropriate error code on error.
364  * The address handle is used to reference a local or global destination
365  * in all UD QP post sends.
366  */
367 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
368 				  struct rdma_ah_attr *ah_attr,
369 				  struct ib_udata *udata)
370 {
371 	int err;
372 
373 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
374 		err = ib_resolve_eth_dmac(pd->device, ah_attr);
375 		if (err)
376 			return ERR_PTR(err);
377 	}
378 
379 	return _rdma_create_ah(pd, ah_attr, udata);
380 }
381 EXPORT_SYMBOL(rdma_create_user_ah);
382 
383 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
384 {
385 	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
386 	struct iphdr ip4h_checked;
387 	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
388 
389 	/* If it's IPv6, the version must be 6, otherwise, the first
390 	 * 20 bytes (before the IPv4 header) are garbled.
391 	 */
392 	if (ip6h->version != 6)
393 		return (ip4h->version == 4) ? 4 : 0;
394 	/* version may be 6 or 4 because the first 20 bytes could be garbled */
395 
396 	/* RoCE v2 requires no options, thus header length
397 	 * must be 5 words
398 	 */
399 	if (ip4h->ihl != 5)
400 		return 6;
401 
402 	/* Verify checksum.
403 	 * We can't write on scattered buffers so we need to copy to
404 	 * temp buffer.
405 	 */
406 	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
407 	ip4h_checked.check = 0;
408 	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
409 	/* if IPv4 header checksum is OK, believe it */
410 	if (ip4h->check == ip4h_checked.check)
411 		return 4;
412 	return 6;
413 }
414 EXPORT_SYMBOL(ib_get_rdma_header_version);
415 
416 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
417 						     u8 port_num,
418 						     const struct ib_grh *grh)
419 {
420 	int grh_version;
421 
422 	if (rdma_protocol_ib(device, port_num))
423 		return RDMA_NETWORK_IB;
424 
425 	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
426 
427 	if (grh_version == 4)
428 		return RDMA_NETWORK_IPV4;
429 
430 	if (grh->next_hdr == IPPROTO_UDP)
431 		return RDMA_NETWORK_IPV6;
432 
433 	return RDMA_NETWORK_ROCE_V1;
434 }
435 
436 struct find_gid_index_context {
437 	u16 vlan_id;
438 	enum ib_gid_type gid_type;
439 };
440 
441 static bool find_gid_index(const union ib_gid *gid,
442 			   const struct ib_gid_attr *gid_attr,
443 			   void *context)
444 {
445 	struct find_gid_index_context *ctx = context;
446 
447 	if (ctx->gid_type != gid_attr->gid_type)
448 		return false;
449 
450 	if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
451 	    (is_vlan_dev(gid_attr->ndev) &&
452 	     vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
453 		return false;
454 
455 	return true;
456 }
457 
458 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
459 				   u16 vlan_id, const union ib_gid *sgid,
460 				   enum ib_gid_type gid_type,
461 				   u16 *gid_index)
462 {
463 	struct find_gid_index_context context = {.vlan_id = vlan_id,
464 						 .gid_type = gid_type};
465 
466 	return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
467 				     &context, gid_index);
468 }
469 
470 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
471 			      enum rdma_network_type net_type,
472 			      union ib_gid *sgid, union ib_gid *dgid)
473 {
474 	struct sockaddr_in  src_in;
475 	struct sockaddr_in  dst_in;
476 	__be32 src_saddr, dst_saddr;
477 
478 	if (!sgid || !dgid)
479 		return -EINVAL;
480 
481 	if (net_type == RDMA_NETWORK_IPV4) {
482 		memcpy(&src_in.sin_addr.s_addr,
483 		       &hdr->roce4grh.saddr, 4);
484 		memcpy(&dst_in.sin_addr.s_addr,
485 		       &hdr->roce4grh.daddr, 4);
486 		src_saddr = src_in.sin_addr.s_addr;
487 		dst_saddr = dst_in.sin_addr.s_addr;
488 		ipv6_addr_set_v4mapped(src_saddr,
489 				       (struct in6_addr *)sgid);
490 		ipv6_addr_set_v4mapped(dst_saddr,
491 				       (struct in6_addr *)dgid);
492 		return 0;
493 	} else if (net_type == RDMA_NETWORK_IPV6 ||
494 		   net_type == RDMA_NETWORK_IB) {
495 		*dgid = hdr->ibgrh.dgid;
496 		*sgid = hdr->ibgrh.sgid;
497 		return 0;
498 	} else {
499 		return -EINVAL;
500 	}
501 }
502 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
503 
504 /* Resolve destination mac address and hop limit for unicast destination
505  * GID entry, considering the source GID entry as well.
506  * ah_attribute must have have valid port_num, sgid_index.
507  */
508 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
509 				       struct rdma_ah_attr *ah_attr)
510 {
511 	struct ib_gid_attr sgid_attr;
512 	struct ib_global_route *grh;
513 	int hop_limit = 0xff;
514 	union ib_gid sgid;
515 	int ret;
516 
517 	grh = rdma_ah_retrieve_grh(ah_attr);
518 
519 	ret = ib_query_gid(device,
520 			   rdma_ah_get_port_num(ah_attr),
521 			   grh->sgid_index,
522 			   &sgid, &sgid_attr);
523 	if (ret || !sgid_attr.ndev) {
524 		if (!ret)
525 			ret = -ENXIO;
526 		return ret;
527 	}
528 
529 	/* If destination is link local and source GID is RoCEv1,
530 	 * IP stack is not used.
531 	 */
532 	if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
533 	    sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
534 		rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
535 				ah_attr->roce.dmac);
536 		goto done;
537 	}
538 
539 	ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
540 					   ah_attr->roce.dmac,
541 					   sgid_attr.ndev, &hop_limit);
542 done:
543 	dev_put(sgid_attr.ndev);
544 
545 	grh->hop_limit = hop_limit;
546 	return ret;
547 }
548 
549 /*
550  * This function initializes address handle attributes from the incoming packet.
551  * Incoming packet has dgid of the receiver node on which this code is
552  * getting executed and, sgid contains the GID of the sender.
553  *
554  * When resolving mac address of destination, the arrived dgid is used
555  * as sgid and, sgid is used as dgid because sgid contains destinations
556  * GID whom to respond to.
557  *
558  */
559 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
560 			    const struct ib_wc *wc, const struct ib_grh *grh,
561 			    struct rdma_ah_attr *ah_attr)
562 {
563 	u32 flow_class;
564 	u16 gid_index;
565 	int ret;
566 	enum rdma_network_type net_type = RDMA_NETWORK_IB;
567 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
568 	int hoplimit = 0xff;
569 	union ib_gid dgid;
570 	union ib_gid sgid;
571 
572 	might_sleep();
573 
574 	memset(ah_attr, 0, sizeof *ah_attr);
575 	ah_attr->type = rdma_ah_find_type(device, port_num);
576 	if (rdma_cap_eth_ah(device, port_num)) {
577 		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
578 			net_type = wc->network_hdr_type;
579 		else
580 			net_type = ib_get_net_type_by_grh(device, port_num, grh);
581 		gid_type = ib_network_to_gid_type(net_type);
582 	}
583 	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
584 					&sgid, &dgid);
585 	if (ret)
586 		return ret;
587 
588 	rdma_ah_set_sl(ah_attr, wc->sl);
589 	rdma_ah_set_port_num(ah_attr, port_num);
590 
591 	if (rdma_protocol_roce(device, port_num)) {
592 		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
593 				wc->vlan_id : 0xffff;
594 
595 		if (!(wc->wc_flags & IB_WC_GRH))
596 			return -EPROTOTYPE;
597 
598 		ret = get_sgid_index_from_eth(device, port_num,
599 					      vlan_id, &dgid,
600 					      gid_type, &gid_index);
601 		if (ret)
602 			return ret;
603 
604 		flow_class = be32_to_cpu(grh->version_tclass_flow);
605 		rdma_ah_set_grh(ah_attr, &sgid,
606 				flow_class & 0xFFFFF,
607 				(u8)gid_index, hoplimit,
608 				(flow_class >> 20) & 0xFF);
609 		return ib_resolve_unicast_gid_dmac(device, ah_attr);
610 	} else {
611 		rdma_ah_set_dlid(ah_attr, wc->slid);
612 		rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
613 
614 		if (wc->wc_flags & IB_WC_GRH) {
615 			if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
616 				ret = ib_find_cached_gid_by_port(device, &dgid,
617 								 IB_GID_TYPE_IB,
618 								 port_num, NULL,
619 								 &gid_index);
620 				if (ret)
621 					return ret;
622 			} else {
623 				gid_index = 0;
624 			}
625 
626 			flow_class = be32_to_cpu(grh->version_tclass_flow);
627 			rdma_ah_set_grh(ah_attr, &sgid,
628 					flow_class & 0xFFFFF,
629 					(u8)gid_index, hoplimit,
630 					(flow_class >> 20) & 0xFF);
631 		}
632 		return 0;
633 	}
634 }
635 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
636 
637 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
638 				   const struct ib_grh *grh, u8 port_num)
639 {
640 	struct rdma_ah_attr ah_attr;
641 	int ret;
642 
643 	ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
644 	if (ret)
645 		return ERR_PTR(ret);
646 
647 	return rdma_create_ah(pd, &ah_attr);
648 }
649 EXPORT_SYMBOL(ib_create_ah_from_wc);
650 
651 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
652 {
653 	if (ah->type != ah_attr->type)
654 		return -EINVAL;
655 
656 	return ah->device->modify_ah ?
657 		ah->device->modify_ah(ah, ah_attr) :
658 		-ENOSYS;
659 }
660 EXPORT_SYMBOL(rdma_modify_ah);
661 
662 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
663 {
664 	return ah->device->query_ah ?
665 		ah->device->query_ah(ah, ah_attr) :
666 		-ENOSYS;
667 }
668 EXPORT_SYMBOL(rdma_query_ah);
669 
670 int rdma_destroy_ah(struct ib_ah *ah)
671 {
672 	struct ib_pd *pd;
673 	int ret;
674 
675 	pd = ah->pd;
676 	ret = ah->device->destroy_ah(ah);
677 	if (!ret)
678 		atomic_dec(&pd->usecnt);
679 
680 	return ret;
681 }
682 EXPORT_SYMBOL(rdma_destroy_ah);
683 
684 /* Shared receive queues */
685 
686 struct ib_srq *ib_create_srq(struct ib_pd *pd,
687 			     struct ib_srq_init_attr *srq_init_attr)
688 {
689 	struct ib_srq *srq;
690 
691 	if (!pd->device->create_srq)
692 		return ERR_PTR(-ENOSYS);
693 
694 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
695 
696 	if (!IS_ERR(srq)) {
697 		srq->device    	   = pd->device;
698 		srq->pd        	   = pd;
699 		srq->uobject       = NULL;
700 		srq->event_handler = srq_init_attr->event_handler;
701 		srq->srq_context   = srq_init_attr->srq_context;
702 		srq->srq_type      = srq_init_attr->srq_type;
703 		if (ib_srq_has_cq(srq->srq_type)) {
704 			srq->ext.cq   = srq_init_attr->ext.cq;
705 			atomic_inc(&srq->ext.cq->usecnt);
706 		}
707 		if (srq->srq_type == IB_SRQT_XRC) {
708 			srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
709 			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
710 		}
711 		atomic_inc(&pd->usecnt);
712 		atomic_set(&srq->usecnt, 0);
713 	}
714 
715 	return srq;
716 }
717 EXPORT_SYMBOL(ib_create_srq);
718 
719 int ib_modify_srq(struct ib_srq *srq,
720 		  struct ib_srq_attr *srq_attr,
721 		  enum ib_srq_attr_mask srq_attr_mask)
722 {
723 	return srq->device->modify_srq ?
724 		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
725 		-ENOSYS;
726 }
727 EXPORT_SYMBOL(ib_modify_srq);
728 
729 int ib_query_srq(struct ib_srq *srq,
730 		 struct ib_srq_attr *srq_attr)
731 {
732 	return srq->device->query_srq ?
733 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
734 }
735 EXPORT_SYMBOL(ib_query_srq);
736 
737 int ib_destroy_srq(struct ib_srq *srq)
738 {
739 	struct ib_pd *pd;
740 	enum ib_srq_type srq_type;
741 	struct ib_xrcd *uninitialized_var(xrcd);
742 	struct ib_cq *uninitialized_var(cq);
743 	int ret;
744 
745 	if (atomic_read(&srq->usecnt))
746 		return -EBUSY;
747 
748 	pd = srq->pd;
749 	srq_type = srq->srq_type;
750 	if (ib_srq_has_cq(srq_type))
751 		cq = srq->ext.cq;
752 	if (srq_type == IB_SRQT_XRC)
753 		xrcd = srq->ext.xrc.xrcd;
754 
755 	ret = srq->device->destroy_srq(srq);
756 	if (!ret) {
757 		atomic_dec(&pd->usecnt);
758 		if (srq_type == IB_SRQT_XRC)
759 			atomic_dec(&xrcd->usecnt);
760 		if (ib_srq_has_cq(srq_type))
761 			atomic_dec(&cq->usecnt);
762 	}
763 
764 	return ret;
765 }
766 EXPORT_SYMBOL(ib_destroy_srq);
767 
768 /* Queue pairs */
769 
770 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
771 {
772 	struct ib_qp *qp = context;
773 	unsigned long flags;
774 
775 	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
776 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
777 		if (event->element.qp->event_handler)
778 			event->element.qp->event_handler(event, event->element.qp->qp_context);
779 	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
780 }
781 
782 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
783 {
784 	mutex_lock(&xrcd->tgt_qp_mutex);
785 	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
786 	mutex_unlock(&xrcd->tgt_qp_mutex);
787 }
788 
789 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
790 				  void (*event_handler)(struct ib_event *, void *),
791 				  void *qp_context)
792 {
793 	struct ib_qp *qp;
794 	unsigned long flags;
795 	int err;
796 
797 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
798 	if (!qp)
799 		return ERR_PTR(-ENOMEM);
800 
801 	qp->real_qp = real_qp;
802 	err = ib_open_shared_qp_security(qp, real_qp->device);
803 	if (err) {
804 		kfree(qp);
805 		return ERR_PTR(err);
806 	}
807 
808 	qp->real_qp = real_qp;
809 	atomic_inc(&real_qp->usecnt);
810 	qp->device = real_qp->device;
811 	qp->event_handler = event_handler;
812 	qp->qp_context = qp_context;
813 	qp->qp_num = real_qp->qp_num;
814 	qp->qp_type = real_qp->qp_type;
815 
816 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
817 	list_add(&qp->open_list, &real_qp->open_list);
818 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
819 
820 	return qp;
821 }
822 
823 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
824 			 struct ib_qp_open_attr *qp_open_attr)
825 {
826 	struct ib_qp *qp, *real_qp;
827 
828 	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
829 		return ERR_PTR(-EINVAL);
830 
831 	qp = ERR_PTR(-EINVAL);
832 	mutex_lock(&xrcd->tgt_qp_mutex);
833 	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
834 		if (real_qp->qp_num == qp_open_attr->qp_num) {
835 			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
836 					  qp_open_attr->qp_context);
837 			break;
838 		}
839 	}
840 	mutex_unlock(&xrcd->tgt_qp_mutex);
841 	return qp;
842 }
843 EXPORT_SYMBOL(ib_open_qp);
844 
845 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
846 		struct ib_qp_init_attr *qp_init_attr)
847 {
848 	struct ib_qp *real_qp = qp;
849 
850 	qp->event_handler = __ib_shared_qp_event_handler;
851 	qp->qp_context = qp;
852 	qp->pd = NULL;
853 	qp->send_cq = qp->recv_cq = NULL;
854 	qp->srq = NULL;
855 	qp->xrcd = qp_init_attr->xrcd;
856 	atomic_inc(&qp_init_attr->xrcd->usecnt);
857 	INIT_LIST_HEAD(&qp->open_list);
858 
859 	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
860 			  qp_init_attr->qp_context);
861 	if (!IS_ERR(qp))
862 		__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
863 	else
864 		real_qp->device->destroy_qp(real_qp);
865 	return qp;
866 }
867 
868 struct ib_qp *ib_create_qp(struct ib_pd *pd,
869 			   struct ib_qp_init_attr *qp_init_attr)
870 {
871 	struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
872 	struct ib_qp *qp;
873 	int ret;
874 
875 	if (qp_init_attr->rwq_ind_tbl &&
876 	    (qp_init_attr->recv_cq ||
877 	    qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
878 	    qp_init_attr->cap.max_recv_sge))
879 		return ERR_PTR(-EINVAL);
880 
881 	/*
882 	 * If the callers is using the RDMA API calculate the resources
883 	 * needed for the RDMA READ/WRITE operations.
884 	 *
885 	 * Note that these callers need to pass in a port number.
886 	 */
887 	if (qp_init_attr->cap.max_rdma_ctxs)
888 		rdma_rw_init_qp(device, qp_init_attr);
889 
890 	qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
891 	if (IS_ERR(qp))
892 		return qp;
893 
894 	ret = ib_create_qp_security(qp, device);
895 	if (ret) {
896 		ib_destroy_qp(qp);
897 		return ERR_PTR(ret);
898 	}
899 
900 	qp->real_qp    = qp;
901 	qp->qp_type    = qp_init_attr->qp_type;
902 	qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
903 
904 	atomic_set(&qp->usecnt, 0);
905 	qp->mrs_used = 0;
906 	spin_lock_init(&qp->mr_lock);
907 	INIT_LIST_HEAD(&qp->rdma_mrs);
908 	INIT_LIST_HEAD(&qp->sig_mrs);
909 	qp->port = 0;
910 
911 	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
912 		return ib_create_xrc_qp(qp, qp_init_attr);
913 
914 	qp->event_handler = qp_init_attr->event_handler;
915 	qp->qp_context = qp_init_attr->qp_context;
916 	if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
917 		qp->recv_cq = NULL;
918 		qp->srq = NULL;
919 	} else {
920 		qp->recv_cq = qp_init_attr->recv_cq;
921 		if (qp_init_attr->recv_cq)
922 			atomic_inc(&qp_init_attr->recv_cq->usecnt);
923 		qp->srq = qp_init_attr->srq;
924 		if (qp->srq)
925 			atomic_inc(&qp_init_attr->srq->usecnt);
926 	}
927 
928 	qp->send_cq = qp_init_attr->send_cq;
929 	qp->xrcd    = NULL;
930 
931 	atomic_inc(&pd->usecnt);
932 	if (qp_init_attr->send_cq)
933 		atomic_inc(&qp_init_attr->send_cq->usecnt);
934 	if (qp_init_attr->rwq_ind_tbl)
935 		atomic_inc(&qp->rwq_ind_tbl->usecnt);
936 
937 	if (qp_init_attr->cap.max_rdma_ctxs) {
938 		ret = rdma_rw_init_mrs(qp, qp_init_attr);
939 		if (ret) {
940 			pr_err("failed to init MR pool ret= %d\n", ret);
941 			ib_destroy_qp(qp);
942 			return ERR_PTR(ret);
943 		}
944 	}
945 
946 	/*
947 	 * Note: all hw drivers guarantee that max_send_sge is lower than
948 	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
949 	 * max_send_sge <= max_sge_rd.
950 	 */
951 	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
952 	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
953 				 device->attrs.max_sge_rd);
954 
955 	return qp;
956 }
957 EXPORT_SYMBOL(ib_create_qp);
958 
959 static const struct {
960 	int			valid;
961 	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
962 	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
963 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
964 	[IB_QPS_RESET] = {
965 		[IB_QPS_RESET] = { .valid = 1 },
966 		[IB_QPS_INIT]  = {
967 			.valid = 1,
968 			.req_param = {
969 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
970 						IB_QP_PORT			|
971 						IB_QP_QKEY),
972 				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
973 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
974 						IB_QP_PORT			|
975 						IB_QP_ACCESS_FLAGS),
976 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
977 						IB_QP_PORT			|
978 						IB_QP_ACCESS_FLAGS),
979 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
980 						IB_QP_PORT			|
981 						IB_QP_ACCESS_FLAGS),
982 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
983 						IB_QP_PORT			|
984 						IB_QP_ACCESS_FLAGS),
985 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
986 						IB_QP_QKEY),
987 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
988 						IB_QP_QKEY),
989 			}
990 		},
991 	},
992 	[IB_QPS_INIT]  = {
993 		[IB_QPS_RESET] = { .valid = 1 },
994 		[IB_QPS_ERR] =   { .valid = 1 },
995 		[IB_QPS_INIT]  = {
996 			.valid = 1,
997 			.opt_param = {
998 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
999 						IB_QP_PORT			|
1000 						IB_QP_QKEY),
1001 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1002 						IB_QP_PORT			|
1003 						IB_QP_ACCESS_FLAGS),
1004 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1005 						IB_QP_PORT			|
1006 						IB_QP_ACCESS_FLAGS),
1007 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1008 						IB_QP_PORT			|
1009 						IB_QP_ACCESS_FLAGS),
1010 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1011 						IB_QP_PORT			|
1012 						IB_QP_ACCESS_FLAGS),
1013 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1014 						IB_QP_QKEY),
1015 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1016 						IB_QP_QKEY),
1017 			}
1018 		},
1019 		[IB_QPS_RTR]   = {
1020 			.valid = 1,
1021 			.req_param = {
1022 				[IB_QPT_UC]  = (IB_QP_AV			|
1023 						IB_QP_PATH_MTU			|
1024 						IB_QP_DEST_QPN			|
1025 						IB_QP_RQ_PSN),
1026 				[IB_QPT_RC]  = (IB_QP_AV			|
1027 						IB_QP_PATH_MTU			|
1028 						IB_QP_DEST_QPN			|
1029 						IB_QP_RQ_PSN			|
1030 						IB_QP_MAX_DEST_RD_ATOMIC	|
1031 						IB_QP_MIN_RNR_TIMER),
1032 				[IB_QPT_XRC_INI] = (IB_QP_AV			|
1033 						IB_QP_PATH_MTU			|
1034 						IB_QP_DEST_QPN			|
1035 						IB_QP_RQ_PSN),
1036 				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
1037 						IB_QP_PATH_MTU			|
1038 						IB_QP_DEST_QPN			|
1039 						IB_QP_RQ_PSN			|
1040 						IB_QP_MAX_DEST_RD_ATOMIC	|
1041 						IB_QP_MIN_RNR_TIMER),
1042 			},
1043 			.opt_param = {
1044 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1045 						 IB_QP_QKEY),
1046 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
1047 						 IB_QP_ACCESS_FLAGS		|
1048 						 IB_QP_PKEY_INDEX),
1049 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
1050 						 IB_QP_ACCESS_FLAGS		|
1051 						 IB_QP_PKEY_INDEX),
1052 				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
1053 						 IB_QP_ACCESS_FLAGS		|
1054 						 IB_QP_PKEY_INDEX),
1055 				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
1056 						 IB_QP_ACCESS_FLAGS		|
1057 						 IB_QP_PKEY_INDEX),
1058 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1059 						 IB_QP_QKEY),
1060 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1061 						 IB_QP_QKEY),
1062 			 },
1063 		},
1064 	},
1065 	[IB_QPS_RTR]   = {
1066 		[IB_QPS_RESET] = { .valid = 1 },
1067 		[IB_QPS_ERR] =   { .valid = 1 },
1068 		[IB_QPS_RTS]   = {
1069 			.valid = 1,
1070 			.req_param = {
1071 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
1072 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
1073 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
1074 						IB_QP_RETRY_CNT			|
1075 						IB_QP_RNR_RETRY			|
1076 						IB_QP_SQ_PSN			|
1077 						IB_QP_MAX_QP_RD_ATOMIC),
1078 				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
1079 						IB_QP_RETRY_CNT			|
1080 						IB_QP_RNR_RETRY			|
1081 						IB_QP_SQ_PSN			|
1082 						IB_QP_MAX_QP_RD_ATOMIC),
1083 				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
1084 						IB_QP_SQ_PSN),
1085 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
1086 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
1087 			},
1088 			.opt_param = {
1089 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
1090 						 IB_QP_QKEY),
1091 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
1092 						 IB_QP_ALT_PATH			|
1093 						 IB_QP_ACCESS_FLAGS		|
1094 						 IB_QP_PATH_MIG_STATE),
1095 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
1096 						 IB_QP_ALT_PATH			|
1097 						 IB_QP_ACCESS_FLAGS		|
1098 						 IB_QP_MIN_RNR_TIMER		|
1099 						 IB_QP_PATH_MIG_STATE),
1100 				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1101 						 IB_QP_ALT_PATH			|
1102 						 IB_QP_ACCESS_FLAGS		|
1103 						 IB_QP_PATH_MIG_STATE),
1104 				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1105 						 IB_QP_ALT_PATH			|
1106 						 IB_QP_ACCESS_FLAGS		|
1107 						 IB_QP_MIN_RNR_TIMER		|
1108 						 IB_QP_PATH_MIG_STATE),
1109 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
1110 						 IB_QP_QKEY),
1111 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
1112 						 IB_QP_QKEY),
1113 				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1114 			 }
1115 		}
1116 	},
1117 	[IB_QPS_RTS]   = {
1118 		[IB_QPS_RESET] = { .valid = 1 },
1119 		[IB_QPS_ERR] =   { .valid = 1 },
1120 		[IB_QPS_RTS]   = {
1121 			.valid = 1,
1122 			.opt_param = {
1123 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1124 						IB_QP_QKEY),
1125 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1126 						IB_QP_ACCESS_FLAGS		|
1127 						IB_QP_ALT_PATH			|
1128 						IB_QP_PATH_MIG_STATE),
1129 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1130 						IB_QP_ACCESS_FLAGS		|
1131 						IB_QP_ALT_PATH			|
1132 						IB_QP_PATH_MIG_STATE		|
1133 						IB_QP_MIN_RNR_TIMER),
1134 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1135 						IB_QP_ACCESS_FLAGS		|
1136 						IB_QP_ALT_PATH			|
1137 						IB_QP_PATH_MIG_STATE),
1138 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1139 						IB_QP_ACCESS_FLAGS		|
1140 						IB_QP_ALT_PATH			|
1141 						IB_QP_PATH_MIG_STATE		|
1142 						IB_QP_MIN_RNR_TIMER),
1143 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1144 						IB_QP_QKEY),
1145 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1146 						IB_QP_QKEY),
1147 				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1148 			}
1149 		},
1150 		[IB_QPS_SQD]   = {
1151 			.valid = 1,
1152 			.opt_param = {
1153 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1154 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1155 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1156 				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1157 				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1158 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1159 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1160 			}
1161 		},
1162 	},
1163 	[IB_QPS_SQD]   = {
1164 		[IB_QPS_RESET] = { .valid = 1 },
1165 		[IB_QPS_ERR] =   { .valid = 1 },
1166 		[IB_QPS_RTS]   = {
1167 			.valid = 1,
1168 			.opt_param = {
1169 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1170 						IB_QP_QKEY),
1171 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1172 						IB_QP_ALT_PATH			|
1173 						IB_QP_ACCESS_FLAGS		|
1174 						IB_QP_PATH_MIG_STATE),
1175 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1176 						IB_QP_ALT_PATH			|
1177 						IB_QP_ACCESS_FLAGS		|
1178 						IB_QP_MIN_RNR_TIMER		|
1179 						IB_QP_PATH_MIG_STATE),
1180 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1181 						IB_QP_ALT_PATH			|
1182 						IB_QP_ACCESS_FLAGS		|
1183 						IB_QP_PATH_MIG_STATE),
1184 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1185 						IB_QP_ALT_PATH			|
1186 						IB_QP_ACCESS_FLAGS		|
1187 						IB_QP_MIN_RNR_TIMER		|
1188 						IB_QP_PATH_MIG_STATE),
1189 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1190 						IB_QP_QKEY),
1191 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1192 						IB_QP_QKEY),
1193 			}
1194 		},
1195 		[IB_QPS_SQD]   = {
1196 			.valid = 1,
1197 			.opt_param = {
1198 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1199 						IB_QP_QKEY),
1200 				[IB_QPT_UC]  = (IB_QP_AV			|
1201 						IB_QP_ALT_PATH			|
1202 						IB_QP_ACCESS_FLAGS		|
1203 						IB_QP_PKEY_INDEX		|
1204 						IB_QP_PATH_MIG_STATE),
1205 				[IB_QPT_RC]  = (IB_QP_PORT			|
1206 						IB_QP_AV			|
1207 						IB_QP_TIMEOUT			|
1208 						IB_QP_RETRY_CNT			|
1209 						IB_QP_RNR_RETRY			|
1210 						IB_QP_MAX_QP_RD_ATOMIC		|
1211 						IB_QP_MAX_DEST_RD_ATOMIC	|
1212 						IB_QP_ALT_PATH			|
1213 						IB_QP_ACCESS_FLAGS		|
1214 						IB_QP_PKEY_INDEX		|
1215 						IB_QP_MIN_RNR_TIMER		|
1216 						IB_QP_PATH_MIG_STATE),
1217 				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1218 						IB_QP_AV			|
1219 						IB_QP_TIMEOUT			|
1220 						IB_QP_RETRY_CNT			|
1221 						IB_QP_RNR_RETRY			|
1222 						IB_QP_MAX_QP_RD_ATOMIC		|
1223 						IB_QP_ALT_PATH			|
1224 						IB_QP_ACCESS_FLAGS		|
1225 						IB_QP_PKEY_INDEX		|
1226 						IB_QP_PATH_MIG_STATE),
1227 				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1228 						IB_QP_AV			|
1229 						IB_QP_TIMEOUT			|
1230 						IB_QP_MAX_DEST_RD_ATOMIC	|
1231 						IB_QP_ALT_PATH			|
1232 						IB_QP_ACCESS_FLAGS		|
1233 						IB_QP_PKEY_INDEX		|
1234 						IB_QP_MIN_RNR_TIMER		|
1235 						IB_QP_PATH_MIG_STATE),
1236 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1237 						IB_QP_QKEY),
1238 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1239 						IB_QP_QKEY),
1240 			}
1241 		}
1242 	},
1243 	[IB_QPS_SQE]   = {
1244 		[IB_QPS_RESET] = { .valid = 1 },
1245 		[IB_QPS_ERR] =   { .valid = 1 },
1246 		[IB_QPS_RTS]   = {
1247 			.valid = 1,
1248 			.opt_param = {
1249 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1250 						IB_QP_QKEY),
1251 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1252 						IB_QP_ACCESS_FLAGS),
1253 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1254 						IB_QP_QKEY),
1255 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1256 						IB_QP_QKEY),
1257 			}
1258 		}
1259 	},
1260 	[IB_QPS_ERR] = {
1261 		[IB_QPS_RESET] = { .valid = 1 },
1262 		[IB_QPS_ERR] =   { .valid = 1 }
1263 	}
1264 };
1265 
1266 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1267 		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
1268 		       enum rdma_link_layer ll)
1269 {
1270 	enum ib_qp_attr_mask req_param, opt_param;
1271 
1272 	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
1273 	    next_state < 0 || next_state > IB_QPS_ERR)
1274 		return 0;
1275 
1276 	if (mask & IB_QP_CUR_STATE  &&
1277 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1278 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1279 		return 0;
1280 
1281 	if (!qp_state_table[cur_state][next_state].valid)
1282 		return 0;
1283 
1284 	req_param = qp_state_table[cur_state][next_state].req_param[type];
1285 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1286 
1287 	if ((mask & req_param) != req_param)
1288 		return 0;
1289 
1290 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
1291 		return 0;
1292 
1293 	return 1;
1294 }
1295 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1296 
1297 static int ib_resolve_eth_dmac(struct ib_device *device,
1298 			       struct rdma_ah_attr *ah_attr)
1299 {
1300 	int           ret = 0;
1301 	struct ib_global_route *grh;
1302 
1303 	if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
1304 		return -EINVAL;
1305 
1306 	grh = rdma_ah_retrieve_grh(ah_attr);
1307 
1308 	if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1309 		if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1310 			__be32 addr = 0;
1311 
1312 			memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1313 			ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1314 		} else {
1315 			ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1316 					(char *)ah_attr->roce.dmac);
1317 		}
1318 	} else {
1319 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1320 	}
1321 	return ret;
1322 }
1323 
1324 /**
1325  * IB core internal function to perform QP attributes modification.
1326  */
1327 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1328 			 int attr_mask, struct ib_udata *udata)
1329 {
1330 	u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1331 	int ret;
1332 
1333 	if (rdma_ib_or_roce(qp->device, port)) {
1334 		if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1335 			pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
1336 				__func__, qp->device->name);
1337 			attr->rq_psn &= 0xffffff;
1338 		}
1339 
1340 		if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1341 			pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
1342 				__func__, qp->device->name);
1343 			attr->sq_psn &= 0xffffff;
1344 		}
1345 	}
1346 
1347 	ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1348 	if (!ret && (attr_mask & IB_QP_PORT))
1349 		qp->port = attr->port_num;
1350 
1351 	return ret;
1352 }
1353 
1354 static bool is_qp_type_connected(const struct ib_qp *qp)
1355 {
1356 	return (qp->qp_type == IB_QPT_UC ||
1357 		qp->qp_type == IB_QPT_RC ||
1358 		qp->qp_type == IB_QPT_XRC_INI ||
1359 		qp->qp_type == IB_QPT_XRC_TGT);
1360 }
1361 
1362 /**
1363  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1364  * @ib_qp: The QP to modify.
1365  * @attr: On input, specifies the QP attributes to modify.  On output,
1366  *   the current values of selected QP attributes are returned.
1367  * @attr_mask: A bit-mask used to specify which attributes of the QP
1368  *   are being modified.
1369  * @udata: pointer to user's input output buffer information
1370  *   are being modified.
1371  * It returns 0 on success and returns appropriate error code on error.
1372  */
1373 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1374 			    int attr_mask, struct ib_udata *udata)
1375 {
1376 	struct ib_qp *qp = ib_qp->real_qp;
1377 	int ret;
1378 
1379 	if (attr_mask & IB_QP_AV &&
1380 	    attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1381 	    is_qp_type_connected(qp)) {
1382 		ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1383 		if (ret)
1384 			return ret;
1385 	}
1386 	return _ib_modify_qp(qp, attr, attr_mask, udata);
1387 }
1388 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1389 
1390 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1391 {
1392 	int rc;
1393 	u32 netdev_speed;
1394 	struct net_device *netdev;
1395 	struct ethtool_link_ksettings lksettings;
1396 
1397 	if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1398 		return -EINVAL;
1399 
1400 	if (!dev->get_netdev)
1401 		return -EOPNOTSUPP;
1402 
1403 	netdev = dev->get_netdev(dev, port_num);
1404 	if (!netdev)
1405 		return -ENODEV;
1406 
1407 	rtnl_lock();
1408 	rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1409 	rtnl_unlock();
1410 
1411 	dev_put(netdev);
1412 
1413 	if (!rc) {
1414 		netdev_speed = lksettings.base.speed;
1415 	} else {
1416 		netdev_speed = SPEED_1000;
1417 		pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1418 			netdev_speed);
1419 	}
1420 
1421 	if (netdev_speed <= SPEED_1000) {
1422 		*width = IB_WIDTH_1X;
1423 		*speed = IB_SPEED_SDR;
1424 	} else if (netdev_speed <= SPEED_10000) {
1425 		*width = IB_WIDTH_1X;
1426 		*speed = IB_SPEED_FDR10;
1427 	} else if (netdev_speed <= SPEED_20000) {
1428 		*width = IB_WIDTH_4X;
1429 		*speed = IB_SPEED_DDR;
1430 	} else if (netdev_speed <= SPEED_25000) {
1431 		*width = IB_WIDTH_1X;
1432 		*speed = IB_SPEED_EDR;
1433 	} else if (netdev_speed <= SPEED_40000) {
1434 		*width = IB_WIDTH_4X;
1435 		*speed = IB_SPEED_FDR10;
1436 	} else {
1437 		*width = IB_WIDTH_4X;
1438 		*speed = IB_SPEED_EDR;
1439 	}
1440 
1441 	return 0;
1442 }
1443 EXPORT_SYMBOL(ib_get_eth_speed);
1444 
1445 int ib_modify_qp(struct ib_qp *qp,
1446 		 struct ib_qp_attr *qp_attr,
1447 		 int qp_attr_mask)
1448 {
1449 	return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1450 }
1451 EXPORT_SYMBOL(ib_modify_qp);
1452 
1453 int ib_query_qp(struct ib_qp *qp,
1454 		struct ib_qp_attr *qp_attr,
1455 		int qp_attr_mask,
1456 		struct ib_qp_init_attr *qp_init_attr)
1457 {
1458 	return qp->device->query_qp ?
1459 		qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1460 		-ENOSYS;
1461 }
1462 EXPORT_SYMBOL(ib_query_qp);
1463 
1464 int ib_close_qp(struct ib_qp *qp)
1465 {
1466 	struct ib_qp *real_qp;
1467 	unsigned long flags;
1468 
1469 	real_qp = qp->real_qp;
1470 	if (real_qp == qp)
1471 		return -EINVAL;
1472 
1473 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1474 	list_del(&qp->open_list);
1475 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1476 
1477 	atomic_dec(&real_qp->usecnt);
1478 	if (qp->qp_sec)
1479 		ib_close_shared_qp_security(qp->qp_sec);
1480 	kfree(qp);
1481 
1482 	return 0;
1483 }
1484 EXPORT_SYMBOL(ib_close_qp);
1485 
1486 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1487 {
1488 	struct ib_xrcd *xrcd;
1489 	struct ib_qp *real_qp;
1490 	int ret;
1491 
1492 	real_qp = qp->real_qp;
1493 	xrcd = real_qp->xrcd;
1494 
1495 	mutex_lock(&xrcd->tgt_qp_mutex);
1496 	ib_close_qp(qp);
1497 	if (atomic_read(&real_qp->usecnt) == 0)
1498 		list_del(&real_qp->xrcd_list);
1499 	else
1500 		real_qp = NULL;
1501 	mutex_unlock(&xrcd->tgt_qp_mutex);
1502 
1503 	if (real_qp) {
1504 		ret = ib_destroy_qp(real_qp);
1505 		if (!ret)
1506 			atomic_dec(&xrcd->usecnt);
1507 		else
1508 			__ib_insert_xrcd_qp(xrcd, real_qp);
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 int ib_destroy_qp(struct ib_qp *qp)
1515 {
1516 	struct ib_pd *pd;
1517 	struct ib_cq *scq, *rcq;
1518 	struct ib_srq *srq;
1519 	struct ib_rwq_ind_table *ind_tbl;
1520 	struct ib_qp_security *sec;
1521 	int ret;
1522 
1523 	WARN_ON_ONCE(qp->mrs_used > 0);
1524 
1525 	if (atomic_read(&qp->usecnt))
1526 		return -EBUSY;
1527 
1528 	if (qp->real_qp != qp)
1529 		return __ib_destroy_shared_qp(qp);
1530 
1531 	pd   = qp->pd;
1532 	scq  = qp->send_cq;
1533 	rcq  = qp->recv_cq;
1534 	srq  = qp->srq;
1535 	ind_tbl = qp->rwq_ind_tbl;
1536 	sec  = qp->qp_sec;
1537 	if (sec)
1538 		ib_destroy_qp_security_begin(sec);
1539 
1540 	if (!qp->uobject)
1541 		rdma_rw_cleanup_mrs(qp);
1542 
1543 	rdma_restrack_del(&qp->res);
1544 	ret = qp->device->destroy_qp(qp);
1545 	if (!ret) {
1546 		if (pd)
1547 			atomic_dec(&pd->usecnt);
1548 		if (scq)
1549 			atomic_dec(&scq->usecnt);
1550 		if (rcq)
1551 			atomic_dec(&rcq->usecnt);
1552 		if (srq)
1553 			atomic_dec(&srq->usecnt);
1554 		if (ind_tbl)
1555 			atomic_dec(&ind_tbl->usecnt);
1556 		if (sec)
1557 			ib_destroy_qp_security_end(sec);
1558 	} else {
1559 		if (sec)
1560 			ib_destroy_qp_security_abort(sec);
1561 	}
1562 
1563 	return ret;
1564 }
1565 EXPORT_SYMBOL(ib_destroy_qp);
1566 
1567 /* Completion queues */
1568 
1569 struct ib_cq *ib_create_cq(struct ib_device *device,
1570 			   ib_comp_handler comp_handler,
1571 			   void (*event_handler)(struct ib_event *, void *),
1572 			   void *cq_context,
1573 			   const struct ib_cq_init_attr *cq_attr)
1574 {
1575 	struct ib_cq *cq;
1576 
1577 	cq = device->create_cq(device, cq_attr, NULL, NULL);
1578 
1579 	if (!IS_ERR(cq)) {
1580 		cq->device        = device;
1581 		cq->uobject       = NULL;
1582 		cq->comp_handler  = comp_handler;
1583 		cq->event_handler = event_handler;
1584 		cq->cq_context    = cq_context;
1585 		atomic_set(&cq->usecnt, 0);
1586 		cq->res.type = RDMA_RESTRACK_CQ;
1587 		rdma_restrack_add(&cq->res);
1588 	}
1589 
1590 	return cq;
1591 }
1592 EXPORT_SYMBOL(ib_create_cq);
1593 
1594 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1595 {
1596 	return cq->device->modify_cq ?
1597 		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1598 }
1599 EXPORT_SYMBOL(rdma_set_cq_moderation);
1600 
1601 int ib_destroy_cq(struct ib_cq *cq)
1602 {
1603 	if (atomic_read(&cq->usecnt))
1604 		return -EBUSY;
1605 
1606 	rdma_restrack_del(&cq->res);
1607 	return cq->device->destroy_cq(cq);
1608 }
1609 EXPORT_SYMBOL(ib_destroy_cq);
1610 
1611 int ib_resize_cq(struct ib_cq *cq, int cqe)
1612 {
1613 	return cq->device->resize_cq ?
1614 		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1615 }
1616 EXPORT_SYMBOL(ib_resize_cq);
1617 
1618 /* Memory regions */
1619 
1620 int ib_dereg_mr(struct ib_mr *mr)
1621 {
1622 	struct ib_pd *pd = mr->pd;
1623 	int ret;
1624 
1625 	ret = mr->device->dereg_mr(mr);
1626 	if (!ret)
1627 		atomic_dec(&pd->usecnt);
1628 
1629 	return ret;
1630 }
1631 EXPORT_SYMBOL(ib_dereg_mr);
1632 
1633 /**
1634  * ib_alloc_mr() - Allocates a memory region
1635  * @pd:            protection domain associated with the region
1636  * @mr_type:       memory region type
1637  * @max_num_sg:    maximum sg entries available for registration.
1638  *
1639  * Notes:
1640  * Memory registeration page/sg lists must not exceed max_num_sg.
1641  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1642  * max_num_sg * used_page_size.
1643  *
1644  */
1645 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1646 			  enum ib_mr_type mr_type,
1647 			  u32 max_num_sg)
1648 {
1649 	struct ib_mr *mr;
1650 
1651 	if (!pd->device->alloc_mr)
1652 		return ERR_PTR(-ENOSYS);
1653 
1654 	mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1655 	if (!IS_ERR(mr)) {
1656 		mr->device  = pd->device;
1657 		mr->pd      = pd;
1658 		mr->uobject = NULL;
1659 		atomic_inc(&pd->usecnt);
1660 		mr->need_inval = false;
1661 	}
1662 
1663 	return mr;
1664 }
1665 EXPORT_SYMBOL(ib_alloc_mr);
1666 
1667 /* "Fast" memory regions */
1668 
1669 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1670 			    int mr_access_flags,
1671 			    struct ib_fmr_attr *fmr_attr)
1672 {
1673 	struct ib_fmr *fmr;
1674 
1675 	if (!pd->device->alloc_fmr)
1676 		return ERR_PTR(-ENOSYS);
1677 
1678 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1679 	if (!IS_ERR(fmr)) {
1680 		fmr->device = pd->device;
1681 		fmr->pd     = pd;
1682 		atomic_inc(&pd->usecnt);
1683 	}
1684 
1685 	return fmr;
1686 }
1687 EXPORT_SYMBOL(ib_alloc_fmr);
1688 
1689 int ib_unmap_fmr(struct list_head *fmr_list)
1690 {
1691 	struct ib_fmr *fmr;
1692 
1693 	if (list_empty(fmr_list))
1694 		return 0;
1695 
1696 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1697 	return fmr->device->unmap_fmr(fmr_list);
1698 }
1699 EXPORT_SYMBOL(ib_unmap_fmr);
1700 
1701 int ib_dealloc_fmr(struct ib_fmr *fmr)
1702 {
1703 	struct ib_pd *pd;
1704 	int ret;
1705 
1706 	pd = fmr->pd;
1707 	ret = fmr->device->dealloc_fmr(fmr);
1708 	if (!ret)
1709 		atomic_dec(&pd->usecnt);
1710 
1711 	return ret;
1712 }
1713 EXPORT_SYMBOL(ib_dealloc_fmr);
1714 
1715 /* Multicast groups */
1716 
1717 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1718 {
1719 	struct ib_qp_init_attr init_attr = {};
1720 	struct ib_qp_attr attr = {};
1721 	int num_eth_ports = 0;
1722 	int port;
1723 
1724 	/* If QP state >= init, it is assigned to a port and we can check this
1725 	 * port only.
1726 	 */
1727 	if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1728 		if (attr.qp_state >= IB_QPS_INIT) {
1729 			if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
1730 			    IB_LINK_LAYER_INFINIBAND)
1731 				return true;
1732 			goto lid_check;
1733 		}
1734 	}
1735 
1736 	/* Can't get a quick answer, iterate over all ports */
1737 	for (port = 0; port < qp->device->phys_port_cnt; port++)
1738 		if (rdma_port_get_link_layer(qp->device, port) !=
1739 		    IB_LINK_LAYER_INFINIBAND)
1740 			num_eth_ports++;
1741 
1742 	/* If we have at lease one Ethernet port, RoCE annex declares that
1743 	 * multicast LID should be ignored. We can't tell at this step if the
1744 	 * QP belongs to an IB or Ethernet port.
1745 	 */
1746 	if (num_eth_ports)
1747 		return true;
1748 
1749 	/* If all the ports are IB, we can check according to IB spec. */
1750 lid_check:
1751 	return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1752 		 lid == be16_to_cpu(IB_LID_PERMISSIVE));
1753 }
1754 
1755 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1756 {
1757 	int ret;
1758 
1759 	if (!qp->device->attach_mcast)
1760 		return -ENOSYS;
1761 
1762 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1763 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
1764 		return -EINVAL;
1765 
1766 	ret = qp->device->attach_mcast(qp, gid, lid);
1767 	if (!ret)
1768 		atomic_inc(&qp->usecnt);
1769 	return ret;
1770 }
1771 EXPORT_SYMBOL(ib_attach_mcast);
1772 
1773 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1774 {
1775 	int ret;
1776 
1777 	if (!qp->device->detach_mcast)
1778 		return -ENOSYS;
1779 
1780 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1781 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
1782 		return -EINVAL;
1783 
1784 	ret = qp->device->detach_mcast(qp, gid, lid);
1785 	if (!ret)
1786 		atomic_dec(&qp->usecnt);
1787 	return ret;
1788 }
1789 EXPORT_SYMBOL(ib_detach_mcast);
1790 
1791 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
1792 {
1793 	struct ib_xrcd *xrcd;
1794 
1795 	if (!device->alloc_xrcd)
1796 		return ERR_PTR(-ENOSYS);
1797 
1798 	xrcd = device->alloc_xrcd(device, NULL, NULL);
1799 	if (!IS_ERR(xrcd)) {
1800 		xrcd->device = device;
1801 		xrcd->inode = NULL;
1802 		atomic_set(&xrcd->usecnt, 0);
1803 		mutex_init(&xrcd->tgt_qp_mutex);
1804 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1805 	}
1806 
1807 	return xrcd;
1808 }
1809 EXPORT_SYMBOL(__ib_alloc_xrcd);
1810 
1811 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1812 {
1813 	struct ib_qp *qp;
1814 	int ret;
1815 
1816 	if (atomic_read(&xrcd->usecnt))
1817 		return -EBUSY;
1818 
1819 	while (!list_empty(&xrcd->tgt_qp_list)) {
1820 		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1821 		ret = ib_destroy_qp(qp);
1822 		if (ret)
1823 			return ret;
1824 	}
1825 
1826 	return xrcd->device->dealloc_xrcd(xrcd);
1827 }
1828 EXPORT_SYMBOL(ib_dealloc_xrcd);
1829 
1830 /**
1831  * ib_create_wq - Creates a WQ associated with the specified protection
1832  * domain.
1833  * @pd: The protection domain associated with the WQ.
1834  * @wq_attr: A list of initial attributes required to create the
1835  * WQ. If WQ creation succeeds, then the attributes are updated to
1836  * the actual capabilities of the created WQ.
1837  *
1838  * wq_attr->max_wr and wq_attr->max_sge determine
1839  * the requested size of the WQ, and set to the actual values allocated
1840  * on return.
1841  * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1842  * at least as large as the requested values.
1843  */
1844 struct ib_wq *ib_create_wq(struct ib_pd *pd,
1845 			   struct ib_wq_init_attr *wq_attr)
1846 {
1847 	struct ib_wq *wq;
1848 
1849 	if (!pd->device->create_wq)
1850 		return ERR_PTR(-ENOSYS);
1851 
1852 	wq = pd->device->create_wq(pd, wq_attr, NULL);
1853 	if (!IS_ERR(wq)) {
1854 		wq->event_handler = wq_attr->event_handler;
1855 		wq->wq_context = wq_attr->wq_context;
1856 		wq->wq_type = wq_attr->wq_type;
1857 		wq->cq = wq_attr->cq;
1858 		wq->device = pd->device;
1859 		wq->pd = pd;
1860 		wq->uobject = NULL;
1861 		atomic_inc(&pd->usecnt);
1862 		atomic_inc(&wq_attr->cq->usecnt);
1863 		atomic_set(&wq->usecnt, 0);
1864 	}
1865 	return wq;
1866 }
1867 EXPORT_SYMBOL(ib_create_wq);
1868 
1869 /**
1870  * ib_destroy_wq - Destroys the specified WQ.
1871  * @wq: The WQ to destroy.
1872  */
1873 int ib_destroy_wq(struct ib_wq *wq)
1874 {
1875 	int err;
1876 	struct ib_cq *cq = wq->cq;
1877 	struct ib_pd *pd = wq->pd;
1878 
1879 	if (atomic_read(&wq->usecnt))
1880 		return -EBUSY;
1881 
1882 	err = wq->device->destroy_wq(wq);
1883 	if (!err) {
1884 		atomic_dec(&pd->usecnt);
1885 		atomic_dec(&cq->usecnt);
1886 	}
1887 	return err;
1888 }
1889 EXPORT_SYMBOL(ib_destroy_wq);
1890 
1891 /**
1892  * ib_modify_wq - Modifies the specified WQ.
1893  * @wq: The WQ to modify.
1894  * @wq_attr: On input, specifies the WQ attributes to modify.
1895  * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1896  *   are being modified.
1897  * On output, the current values of selected WQ attributes are returned.
1898  */
1899 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1900 		 u32 wq_attr_mask)
1901 {
1902 	int err;
1903 
1904 	if (!wq->device->modify_wq)
1905 		return -ENOSYS;
1906 
1907 	err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1908 	return err;
1909 }
1910 EXPORT_SYMBOL(ib_modify_wq);
1911 
1912 /*
1913  * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1914  * @device: The device on which to create the rwq indirection table.
1915  * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1916  * create the Indirection Table.
1917  *
1918  * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1919  *	than the created ib_rwq_ind_table object and the caller is responsible
1920  *	for its memory allocation/free.
1921  */
1922 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1923 						 struct ib_rwq_ind_table_init_attr *init_attr)
1924 {
1925 	struct ib_rwq_ind_table *rwq_ind_table;
1926 	int i;
1927 	u32 table_size;
1928 
1929 	if (!device->create_rwq_ind_table)
1930 		return ERR_PTR(-ENOSYS);
1931 
1932 	table_size = (1 << init_attr->log_ind_tbl_size);
1933 	rwq_ind_table = device->create_rwq_ind_table(device,
1934 				init_attr, NULL);
1935 	if (IS_ERR(rwq_ind_table))
1936 		return rwq_ind_table;
1937 
1938 	rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1939 	rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1940 	rwq_ind_table->device = device;
1941 	rwq_ind_table->uobject = NULL;
1942 	atomic_set(&rwq_ind_table->usecnt, 0);
1943 
1944 	for (i = 0; i < table_size; i++)
1945 		atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1946 
1947 	return rwq_ind_table;
1948 }
1949 EXPORT_SYMBOL(ib_create_rwq_ind_table);
1950 
1951 /*
1952  * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1953  * @wq_ind_table: The Indirection Table to destroy.
1954 */
1955 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1956 {
1957 	int err, i;
1958 	u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1959 	struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1960 
1961 	if (atomic_read(&rwq_ind_table->usecnt))
1962 		return -EBUSY;
1963 
1964 	err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1965 	if (!err) {
1966 		for (i = 0; i < table_size; i++)
1967 			atomic_dec(&ind_tbl[i]->usecnt);
1968 	}
1969 
1970 	return err;
1971 }
1972 EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1973 
1974 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1975 			       struct ib_flow_attr *flow_attr,
1976 			       int domain)
1977 {
1978 	struct ib_flow *flow_id;
1979 	if (!qp->device->create_flow)
1980 		return ERR_PTR(-ENOSYS);
1981 
1982 	flow_id = qp->device->create_flow(qp, flow_attr, domain);
1983 	if (!IS_ERR(flow_id)) {
1984 		atomic_inc(&qp->usecnt);
1985 		flow_id->qp = qp;
1986 	}
1987 	return flow_id;
1988 }
1989 EXPORT_SYMBOL(ib_create_flow);
1990 
1991 int ib_destroy_flow(struct ib_flow *flow_id)
1992 {
1993 	int err;
1994 	struct ib_qp *qp = flow_id->qp;
1995 
1996 	err = qp->device->destroy_flow(flow_id);
1997 	if (!err)
1998 		atomic_dec(&qp->usecnt);
1999 	return err;
2000 }
2001 EXPORT_SYMBOL(ib_destroy_flow);
2002 
2003 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2004 		       struct ib_mr_status *mr_status)
2005 {
2006 	return mr->device->check_mr_status ?
2007 		mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
2008 }
2009 EXPORT_SYMBOL(ib_check_mr_status);
2010 
2011 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2012 			 int state)
2013 {
2014 	if (!device->set_vf_link_state)
2015 		return -ENOSYS;
2016 
2017 	return device->set_vf_link_state(device, vf, port, state);
2018 }
2019 EXPORT_SYMBOL(ib_set_vf_link_state);
2020 
2021 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2022 		     struct ifla_vf_info *info)
2023 {
2024 	if (!device->get_vf_config)
2025 		return -ENOSYS;
2026 
2027 	return device->get_vf_config(device, vf, port, info);
2028 }
2029 EXPORT_SYMBOL(ib_get_vf_config);
2030 
2031 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2032 		    struct ifla_vf_stats *stats)
2033 {
2034 	if (!device->get_vf_stats)
2035 		return -ENOSYS;
2036 
2037 	return device->get_vf_stats(device, vf, port, stats);
2038 }
2039 EXPORT_SYMBOL(ib_get_vf_stats);
2040 
2041 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2042 		   int type)
2043 {
2044 	if (!device->set_vf_guid)
2045 		return -ENOSYS;
2046 
2047 	return device->set_vf_guid(device, vf, port, guid, type);
2048 }
2049 EXPORT_SYMBOL(ib_set_vf_guid);
2050 
2051 /**
2052  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2053  *     and set it the memory region.
2054  * @mr:            memory region
2055  * @sg:            dma mapped scatterlist
2056  * @sg_nents:      number of entries in sg
2057  * @sg_offset:     offset in bytes into sg
2058  * @page_size:     page vector desired page size
2059  *
2060  * Constraints:
2061  * - The first sg element is allowed to have an offset.
2062  * - Each sg element must either be aligned to page_size or virtually
2063  *   contiguous to the previous element. In case an sg element has a
2064  *   non-contiguous offset, the mapping prefix will not include it.
2065  * - The last sg element is allowed to have length less than page_size.
2066  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2067  *   then only max_num_sg entries will be mapped.
2068  * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2069  *   constraints holds and the page_size argument is ignored.
2070  *
2071  * Returns the number of sg elements that were mapped to the memory region.
2072  *
2073  * After this completes successfully, the  memory region
2074  * is ready for registration.
2075  */
2076 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2077 		 unsigned int *sg_offset, unsigned int page_size)
2078 {
2079 	if (unlikely(!mr->device->map_mr_sg))
2080 		return -ENOSYS;
2081 
2082 	mr->page_size = page_size;
2083 
2084 	return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
2085 }
2086 EXPORT_SYMBOL(ib_map_mr_sg);
2087 
2088 /**
2089  * ib_sg_to_pages() - Convert the largest prefix of a sg list
2090  *     to a page vector
2091  * @mr:            memory region
2092  * @sgl:           dma mapped scatterlist
2093  * @sg_nents:      number of entries in sg
2094  * @sg_offset_p:   IN:  start offset in bytes into sg
2095  *                 OUT: offset in bytes for element n of the sg of the first
2096  *                      byte that has not been processed where n is the return
2097  *                      value of this function.
2098  * @set_page:      driver page assignment function pointer
2099  *
2100  * Core service helper for drivers to convert the largest
2101  * prefix of given sg list to a page vector. The sg list
2102  * prefix converted is the prefix that meet the requirements
2103  * of ib_map_mr_sg.
2104  *
2105  * Returns the number of sg elements that were assigned to
2106  * a page vector.
2107  */
2108 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2109 		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2110 {
2111 	struct scatterlist *sg;
2112 	u64 last_end_dma_addr = 0;
2113 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2114 	unsigned int last_page_off = 0;
2115 	u64 page_mask = ~((u64)mr->page_size - 1);
2116 	int i, ret;
2117 
2118 	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2119 		return -EINVAL;
2120 
2121 	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2122 	mr->length = 0;
2123 
2124 	for_each_sg(sgl, sg, sg_nents, i) {
2125 		u64 dma_addr = sg_dma_address(sg) + sg_offset;
2126 		u64 prev_addr = dma_addr;
2127 		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2128 		u64 end_dma_addr = dma_addr + dma_len;
2129 		u64 page_addr = dma_addr & page_mask;
2130 
2131 		/*
2132 		 * For the second and later elements, check whether either the
2133 		 * end of element i-1 or the start of element i is not aligned
2134 		 * on a page boundary.
2135 		 */
2136 		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2137 			/* Stop mapping if there is a gap. */
2138 			if (last_end_dma_addr != dma_addr)
2139 				break;
2140 
2141 			/*
2142 			 * Coalesce this element with the last. If it is small
2143 			 * enough just update mr->length. Otherwise start
2144 			 * mapping from the next page.
2145 			 */
2146 			goto next_page;
2147 		}
2148 
2149 		do {
2150 			ret = set_page(mr, page_addr);
2151 			if (unlikely(ret < 0)) {
2152 				sg_offset = prev_addr - sg_dma_address(sg);
2153 				mr->length += prev_addr - dma_addr;
2154 				if (sg_offset_p)
2155 					*sg_offset_p = sg_offset;
2156 				return i || sg_offset ? i : ret;
2157 			}
2158 			prev_addr = page_addr;
2159 next_page:
2160 			page_addr += mr->page_size;
2161 		} while (page_addr < end_dma_addr);
2162 
2163 		mr->length += dma_len;
2164 		last_end_dma_addr = end_dma_addr;
2165 		last_page_off = end_dma_addr & ~page_mask;
2166 
2167 		sg_offset = 0;
2168 	}
2169 
2170 	if (sg_offset_p)
2171 		*sg_offset_p = 0;
2172 	return i;
2173 }
2174 EXPORT_SYMBOL(ib_sg_to_pages);
2175 
2176 struct ib_drain_cqe {
2177 	struct ib_cqe cqe;
2178 	struct completion done;
2179 };
2180 
2181 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2182 {
2183 	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2184 						cqe);
2185 
2186 	complete(&cqe->done);
2187 }
2188 
2189 /*
2190  * Post a WR and block until its completion is reaped for the SQ.
2191  */
2192 static void __ib_drain_sq(struct ib_qp *qp)
2193 {
2194 	struct ib_cq *cq = qp->send_cq;
2195 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2196 	struct ib_drain_cqe sdrain;
2197 	struct ib_send_wr swr = {}, *bad_swr;
2198 	int ret;
2199 
2200 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2201 	if (ret) {
2202 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2203 		return;
2204 	}
2205 
2206 	swr.wr_cqe = &sdrain.cqe;
2207 	sdrain.cqe.done = ib_drain_qp_done;
2208 	init_completion(&sdrain.done);
2209 
2210 	ret = ib_post_send(qp, &swr, &bad_swr);
2211 	if (ret) {
2212 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2213 		return;
2214 	}
2215 
2216 	if (cq->poll_ctx == IB_POLL_DIRECT)
2217 		while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2218 			ib_process_cq_direct(cq, -1);
2219 	else
2220 		wait_for_completion(&sdrain.done);
2221 }
2222 
2223 /*
2224  * Post a WR and block until its completion is reaped for the RQ.
2225  */
2226 static void __ib_drain_rq(struct ib_qp *qp)
2227 {
2228 	struct ib_cq *cq = qp->recv_cq;
2229 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2230 	struct ib_drain_cqe rdrain;
2231 	struct ib_recv_wr rwr = {}, *bad_rwr;
2232 	int ret;
2233 
2234 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2235 	if (ret) {
2236 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2237 		return;
2238 	}
2239 
2240 	rwr.wr_cqe = &rdrain.cqe;
2241 	rdrain.cqe.done = ib_drain_qp_done;
2242 	init_completion(&rdrain.done);
2243 
2244 	ret = ib_post_recv(qp, &rwr, &bad_rwr);
2245 	if (ret) {
2246 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2247 		return;
2248 	}
2249 
2250 	if (cq->poll_ctx == IB_POLL_DIRECT)
2251 		while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2252 			ib_process_cq_direct(cq, -1);
2253 	else
2254 		wait_for_completion(&rdrain.done);
2255 }
2256 
2257 /**
2258  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2259  *		   application.
2260  * @qp:            queue pair to drain
2261  *
2262  * If the device has a provider-specific drain function, then
2263  * call that.  Otherwise call the generic drain function
2264  * __ib_drain_sq().
2265  *
2266  * The caller must:
2267  *
2268  * ensure there is room in the CQ and SQ for the drain work request and
2269  * completion.
2270  *
2271  * allocate the CQ using ib_alloc_cq().
2272  *
2273  * ensure that there are no other contexts that are posting WRs concurrently.
2274  * Otherwise the drain is not guaranteed.
2275  */
2276 void ib_drain_sq(struct ib_qp *qp)
2277 {
2278 	if (qp->device->drain_sq)
2279 		qp->device->drain_sq(qp);
2280 	else
2281 		__ib_drain_sq(qp);
2282 }
2283 EXPORT_SYMBOL(ib_drain_sq);
2284 
2285 /**
2286  * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2287  *		   application.
2288  * @qp:            queue pair to drain
2289  *
2290  * If the device has a provider-specific drain function, then
2291  * call that.  Otherwise call the generic drain function
2292  * __ib_drain_rq().
2293  *
2294  * The caller must:
2295  *
2296  * ensure there is room in the CQ and RQ for the drain work request and
2297  * completion.
2298  *
2299  * allocate the CQ using ib_alloc_cq().
2300  *
2301  * ensure that there are no other contexts that are posting WRs concurrently.
2302  * Otherwise the drain is not guaranteed.
2303  */
2304 void ib_drain_rq(struct ib_qp *qp)
2305 {
2306 	if (qp->device->drain_rq)
2307 		qp->device->drain_rq(qp);
2308 	else
2309 		__ib_drain_rq(qp);
2310 }
2311 EXPORT_SYMBOL(ib_drain_rq);
2312 
2313 /**
2314  * ib_drain_qp() - Block until all CQEs have been consumed by the
2315  *		   application on both the RQ and SQ.
2316  * @qp:            queue pair to drain
2317  *
2318  * The caller must:
2319  *
2320  * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2321  * and completions.
2322  *
2323  * allocate the CQs using ib_alloc_cq().
2324  *
2325  * ensure that there are no other contexts that are posting WRs concurrently.
2326  * Otherwise the drain is not guaranteed.
2327  */
2328 void ib_drain_qp(struct ib_qp *qp)
2329 {
2330 	ib_drain_sq(qp);
2331 	if (!qp->srq)
2332 		ib_drain_rq(qp);
2333 }
2334 EXPORT_SYMBOL(ib_drain_qp);
2335