xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision 4f205687)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44 #include <linux/in.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/rw.h>
52 
53 #include "core_priv.h"
54 
55 static const char * const ib_events[] = {
56 	[IB_EVENT_CQ_ERR]		= "CQ error",
57 	[IB_EVENT_QP_FATAL]		= "QP fatal error",
58 	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
59 	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
60 	[IB_EVENT_COMM_EST]		= "communication established",
61 	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
62 	[IB_EVENT_PATH_MIG]		= "path migration successful",
63 	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
64 	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
65 	[IB_EVENT_PORT_ACTIVE]		= "port active",
66 	[IB_EVENT_PORT_ERR]		= "port error",
67 	[IB_EVENT_LID_CHANGE]		= "LID change",
68 	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
69 	[IB_EVENT_SM_CHANGE]		= "SM change",
70 	[IB_EVENT_SRQ_ERR]		= "SRQ error",
71 	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
72 	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
73 	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
74 	[IB_EVENT_GID_CHANGE]		= "GID changed",
75 };
76 
77 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
78 {
79 	size_t index = event;
80 
81 	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
82 			ib_events[index] : "unrecognized event";
83 }
84 EXPORT_SYMBOL(ib_event_msg);
85 
86 static const char * const wc_statuses[] = {
87 	[IB_WC_SUCCESS]			= "success",
88 	[IB_WC_LOC_LEN_ERR]		= "local length error",
89 	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
90 	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
91 	[IB_WC_LOC_PROT_ERR]		= "local protection error",
92 	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
93 	[IB_WC_MW_BIND_ERR]		= "memory management operation error",
94 	[IB_WC_BAD_RESP_ERR]		= "bad response error",
95 	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
96 	[IB_WC_REM_INV_REQ_ERR]		= "invalid request error",
97 	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
98 	[IB_WC_REM_OP_ERR]		= "remote operation error",
99 	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
100 	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
101 	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
102 	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
103 	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
104 	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
105 	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
106 	[IB_WC_FATAL_ERR]		= "fatal error",
107 	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
108 	[IB_WC_GENERAL_ERR]		= "general error",
109 };
110 
111 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
112 {
113 	size_t index = status;
114 
115 	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
116 			wc_statuses[index] : "unrecognized status";
117 }
118 EXPORT_SYMBOL(ib_wc_status_msg);
119 
120 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
121 {
122 	switch (rate) {
123 	case IB_RATE_2_5_GBPS: return  1;
124 	case IB_RATE_5_GBPS:   return  2;
125 	case IB_RATE_10_GBPS:  return  4;
126 	case IB_RATE_20_GBPS:  return  8;
127 	case IB_RATE_30_GBPS:  return 12;
128 	case IB_RATE_40_GBPS:  return 16;
129 	case IB_RATE_60_GBPS:  return 24;
130 	case IB_RATE_80_GBPS:  return 32;
131 	case IB_RATE_120_GBPS: return 48;
132 	default:	       return -1;
133 	}
134 }
135 EXPORT_SYMBOL(ib_rate_to_mult);
136 
137 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
138 {
139 	switch (mult) {
140 	case 1:  return IB_RATE_2_5_GBPS;
141 	case 2:  return IB_RATE_5_GBPS;
142 	case 4:  return IB_RATE_10_GBPS;
143 	case 8:  return IB_RATE_20_GBPS;
144 	case 12: return IB_RATE_30_GBPS;
145 	case 16: return IB_RATE_40_GBPS;
146 	case 24: return IB_RATE_60_GBPS;
147 	case 32: return IB_RATE_80_GBPS;
148 	case 48: return IB_RATE_120_GBPS;
149 	default: return IB_RATE_PORT_CURRENT;
150 	}
151 }
152 EXPORT_SYMBOL(mult_to_ib_rate);
153 
154 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
155 {
156 	switch (rate) {
157 	case IB_RATE_2_5_GBPS: return 2500;
158 	case IB_RATE_5_GBPS:   return 5000;
159 	case IB_RATE_10_GBPS:  return 10000;
160 	case IB_RATE_20_GBPS:  return 20000;
161 	case IB_RATE_30_GBPS:  return 30000;
162 	case IB_RATE_40_GBPS:  return 40000;
163 	case IB_RATE_60_GBPS:  return 60000;
164 	case IB_RATE_80_GBPS:  return 80000;
165 	case IB_RATE_120_GBPS: return 120000;
166 	case IB_RATE_14_GBPS:  return 14062;
167 	case IB_RATE_56_GBPS:  return 56250;
168 	case IB_RATE_112_GBPS: return 112500;
169 	case IB_RATE_168_GBPS: return 168750;
170 	case IB_RATE_25_GBPS:  return 25781;
171 	case IB_RATE_100_GBPS: return 103125;
172 	case IB_RATE_200_GBPS: return 206250;
173 	case IB_RATE_300_GBPS: return 309375;
174 	default:	       return -1;
175 	}
176 }
177 EXPORT_SYMBOL(ib_rate_to_mbps);
178 
179 __attribute_const__ enum rdma_transport_type
180 rdma_node_get_transport(enum rdma_node_type node_type)
181 {
182 	switch (node_type) {
183 	case RDMA_NODE_IB_CA:
184 	case RDMA_NODE_IB_SWITCH:
185 	case RDMA_NODE_IB_ROUTER:
186 		return RDMA_TRANSPORT_IB;
187 	case RDMA_NODE_RNIC:
188 		return RDMA_TRANSPORT_IWARP;
189 	case RDMA_NODE_USNIC:
190 		return RDMA_TRANSPORT_USNIC;
191 	case RDMA_NODE_USNIC_UDP:
192 		return RDMA_TRANSPORT_USNIC_UDP;
193 	default:
194 		BUG();
195 		return 0;
196 	}
197 }
198 EXPORT_SYMBOL(rdma_node_get_transport);
199 
200 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
201 {
202 	if (device->get_link_layer)
203 		return device->get_link_layer(device, port_num);
204 
205 	switch (rdma_node_get_transport(device->node_type)) {
206 	case RDMA_TRANSPORT_IB:
207 		return IB_LINK_LAYER_INFINIBAND;
208 	case RDMA_TRANSPORT_IWARP:
209 	case RDMA_TRANSPORT_USNIC:
210 	case RDMA_TRANSPORT_USNIC_UDP:
211 		return IB_LINK_LAYER_ETHERNET;
212 	default:
213 		return IB_LINK_LAYER_UNSPECIFIED;
214 	}
215 }
216 EXPORT_SYMBOL(rdma_port_get_link_layer);
217 
218 /* Protection domains */
219 
220 /**
221  * ib_alloc_pd - Allocates an unused protection domain.
222  * @device: The device on which to allocate the protection domain.
223  *
224  * A protection domain object provides an association between QPs, shared
225  * receive queues, address handles, memory regions, and memory windows.
226  *
227  * Every PD has a local_dma_lkey which can be used as the lkey value for local
228  * memory operations.
229  */
230 struct ib_pd *ib_alloc_pd(struct ib_device *device)
231 {
232 	struct ib_pd *pd;
233 
234 	pd = device->alloc_pd(device, NULL, NULL);
235 	if (IS_ERR(pd))
236 		return pd;
237 
238 	pd->device = device;
239 	pd->uobject = NULL;
240 	pd->local_mr = NULL;
241 	atomic_set(&pd->usecnt, 0);
242 
243 	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
244 		pd->local_dma_lkey = device->local_dma_lkey;
245 	else {
246 		struct ib_mr *mr;
247 
248 		mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
249 		if (IS_ERR(mr)) {
250 			ib_dealloc_pd(pd);
251 			return (struct ib_pd *)mr;
252 		}
253 
254 		pd->local_mr = mr;
255 		pd->local_dma_lkey = pd->local_mr->lkey;
256 	}
257 	return pd;
258 }
259 EXPORT_SYMBOL(ib_alloc_pd);
260 
261 /**
262  * ib_dealloc_pd - Deallocates a protection domain.
263  * @pd: The protection domain to deallocate.
264  *
265  * It is an error to call this function while any resources in the pd still
266  * exist.  The caller is responsible to synchronously destroy them and
267  * guarantee no new allocations will happen.
268  */
269 void ib_dealloc_pd(struct ib_pd *pd)
270 {
271 	int ret;
272 
273 	if (pd->local_mr) {
274 		ret = ib_dereg_mr(pd->local_mr);
275 		WARN_ON(ret);
276 		pd->local_mr = NULL;
277 	}
278 
279 	/* uverbs manipulates usecnt with proper locking, while the kabi
280 	   requires the caller to guarantee we can't race here. */
281 	WARN_ON(atomic_read(&pd->usecnt));
282 
283 	/* Making delalloc_pd a void return is a WIP, no driver should return
284 	   an error here. */
285 	ret = pd->device->dealloc_pd(pd);
286 	WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
287 }
288 EXPORT_SYMBOL(ib_dealloc_pd);
289 
290 /* Address handles */
291 
292 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
293 {
294 	struct ib_ah *ah;
295 
296 	ah = pd->device->create_ah(pd, ah_attr);
297 
298 	if (!IS_ERR(ah)) {
299 		ah->device  = pd->device;
300 		ah->pd      = pd;
301 		ah->uobject = NULL;
302 		atomic_inc(&pd->usecnt);
303 	}
304 
305 	return ah;
306 }
307 EXPORT_SYMBOL(ib_create_ah);
308 
309 static int ib_get_header_version(const union rdma_network_hdr *hdr)
310 {
311 	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
312 	struct iphdr ip4h_checked;
313 	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
314 
315 	/* If it's IPv6, the version must be 6, otherwise, the first
316 	 * 20 bytes (before the IPv4 header) are garbled.
317 	 */
318 	if (ip6h->version != 6)
319 		return (ip4h->version == 4) ? 4 : 0;
320 	/* version may be 6 or 4 because the first 20 bytes could be garbled */
321 
322 	/* RoCE v2 requires no options, thus header length
323 	 * must be 5 words
324 	 */
325 	if (ip4h->ihl != 5)
326 		return 6;
327 
328 	/* Verify checksum.
329 	 * We can't write on scattered buffers so we need to copy to
330 	 * temp buffer.
331 	 */
332 	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
333 	ip4h_checked.check = 0;
334 	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
335 	/* if IPv4 header checksum is OK, believe it */
336 	if (ip4h->check == ip4h_checked.check)
337 		return 4;
338 	return 6;
339 }
340 
341 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
342 						     u8 port_num,
343 						     const struct ib_grh *grh)
344 {
345 	int grh_version;
346 
347 	if (rdma_protocol_ib(device, port_num))
348 		return RDMA_NETWORK_IB;
349 
350 	grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
351 
352 	if (grh_version == 4)
353 		return RDMA_NETWORK_IPV4;
354 
355 	if (grh->next_hdr == IPPROTO_UDP)
356 		return RDMA_NETWORK_IPV6;
357 
358 	return RDMA_NETWORK_ROCE_V1;
359 }
360 
361 struct find_gid_index_context {
362 	u16 vlan_id;
363 	enum ib_gid_type gid_type;
364 };
365 
366 static bool find_gid_index(const union ib_gid *gid,
367 			   const struct ib_gid_attr *gid_attr,
368 			   void *context)
369 {
370 	struct find_gid_index_context *ctx =
371 		(struct find_gid_index_context *)context;
372 
373 	if (ctx->gid_type != gid_attr->gid_type)
374 		return false;
375 
376 	if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
377 	    (is_vlan_dev(gid_attr->ndev) &&
378 	     vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
379 		return false;
380 
381 	return true;
382 }
383 
384 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
385 				   u16 vlan_id, const union ib_gid *sgid,
386 				   enum ib_gid_type gid_type,
387 				   u16 *gid_index)
388 {
389 	struct find_gid_index_context context = {.vlan_id = vlan_id,
390 						 .gid_type = gid_type};
391 
392 	return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
393 				     &context, gid_index);
394 }
395 
396 static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
397 				  enum rdma_network_type net_type,
398 				  union ib_gid *sgid, union ib_gid *dgid)
399 {
400 	struct sockaddr_in  src_in;
401 	struct sockaddr_in  dst_in;
402 	__be32 src_saddr, dst_saddr;
403 
404 	if (!sgid || !dgid)
405 		return -EINVAL;
406 
407 	if (net_type == RDMA_NETWORK_IPV4) {
408 		memcpy(&src_in.sin_addr.s_addr,
409 		       &hdr->roce4grh.saddr, 4);
410 		memcpy(&dst_in.sin_addr.s_addr,
411 		       &hdr->roce4grh.daddr, 4);
412 		src_saddr = src_in.sin_addr.s_addr;
413 		dst_saddr = dst_in.sin_addr.s_addr;
414 		ipv6_addr_set_v4mapped(src_saddr,
415 				       (struct in6_addr *)sgid);
416 		ipv6_addr_set_v4mapped(dst_saddr,
417 				       (struct in6_addr *)dgid);
418 		return 0;
419 	} else if (net_type == RDMA_NETWORK_IPV6 ||
420 		   net_type == RDMA_NETWORK_IB) {
421 		*dgid = hdr->ibgrh.dgid;
422 		*sgid = hdr->ibgrh.sgid;
423 		return 0;
424 	} else {
425 		return -EINVAL;
426 	}
427 }
428 
429 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
430 		       const struct ib_wc *wc, const struct ib_grh *grh,
431 		       struct ib_ah_attr *ah_attr)
432 {
433 	u32 flow_class;
434 	u16 gid_index;
435 	int ret;
436 	enum rdma_network_type net_type = RDMA_NETWORK_IB;
437 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
438 	int hoplimit = 0xff;
439 	union ib_gid dgid;
440 	union ib_gid sgid;
441 
442 	memset(ah_attr, 0, sizeof *ah_attr);
443 	if (rdma_cap_eth_ah(device, port_num)) {
444 		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
445 			net_type = wc->network_hdr_type;
446 		else
447 			net_type = ib_get_net_type_by_grh(device, port_num, grh);
448 		gid_type = ib_network_to_gid_type(net_type);
449 	}
450 	ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
451 				     &sgid, &dgid);
452 	if (ret)
453 		return ret;
454 
455 	if (rdma_protocol_roce(device, port_num)) {
456 		int if_index = 0;
457 		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
458 				wc->vlan_id : 0xffff;
459 		struct net_device *idev;
460 		struct net_device *resolved_dev;
461 
462 		if (!(wc->wc_flags & IB_WC_GRH))
463 			return -EPROTOTYPE;
464 
465 		if (!device->get_netdev)
466 			return -EOPNOTSUPP;
467 
468 		idev = device->get_netdev(device, port_num);
469 		if (!idev)
470 			return -ENODEV;
471 
472 		ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
473 						   ah_attr->dmac,
474 						   wc->wc_flags & IB_WC_WITH_VLAN ?
475 						   NULL : &vlan_id,
476 						   &if_index, &hoplimit);
477 		if (ret) {
478 			dev_put(idev);
479 			return ret;
480 		}
481 
482 		resolved_dev = dev_get_by_index(&init_net, if_index);
483 		if (resolved_dev->flags & IFF_LOOPBACK) {
484 			dev_put(resolved_dev);
485 			resolved_dev = idev;
486 			dev_hold(resolved_dev);
487 		}
488 		rcu_read_lock();
489 		if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
490 								   resolved_dev))
491 			ret = -EHOSTUNREACH;
492 		rcu_read_unlock();
493 		dev_put(idev);
494 		dev_put(resolved_dev);
495 		if (ret)
496 			return ret;
497 
498 		ret = get_sgid_index_from_eth(device, port_num, vlan_id,
499 					      &dgid, gid_type, &gid_index);
500 		if (ret)
501 			return ret;
502 	}
503 
504 	ah_attr->dlid = wc->slid;
505 	ah_attr->sl = wc->sl;
506 	ah_attr->src_path_bits = wc->dlid_path_bits;
507 	ah_attr->port_num = port_num;
508 
509 	if (wc->wc_flags & IB_WC_GRH) {
510 		ah_attr->ah_flags = IB_AH_GRH;
511 		ah_attr->grh.dgid = sgid;
512 
513 		if (!rdma_cap_eth_ah(device, port_num)) {
514 			if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
515 				ret = ib_find_cached_gid_by_port(device, &dgid,
516 								 IB_GID_TYPE_IB,
517 								 port_num, NULL,
518 								 &gid_index);
519 				if (ret)
520 					return ret;
521 			} else {
522 				gid_index = 0;
523 			}
524 		}
525 
526 		ah_attr->grh.sgid_index = (u8) gid_index;
527 		flow_class = be32_to_cpu(grh->version_tclass_flow);
528 		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
529 		ah_attr->grh.hop_limit = hoplimit;
530 		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
531 	}
532 	return 0;
533 }
534 EXPORT_SYMBOL(ib_init_ah_from_wc);
535 
536 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
537 				   const struct ib_grh *grh, u8 port_num)
538 {
539 	struct ib_ah_attr ah_attr;
540 	int ret;
541 
542 	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
543 	if (ret)
544 		return ERR_PTR(ret);
545 
546 	return ib_create_ah(pd, &ah_attr);
547 }
548 EXPORT_SYMBOL(ib_create_ah_from_wc);
549 
550 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
551 {
552 	return ah->device->modify_ah ?
553 		ah->device->modify_ah(ah, ah_attr) :
554 		-ENOSYS;
555 }
556 EXPORT_SYMBOL(ib_modify_ah);
557 
558 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
559 {
560 	return ah->device->query_ah ?
561 		ah->device->query_ah(ah, ah_attr) :
562 		-ENOSYS;
563 }
564 EXPORT_SYMBOL(ib_query_ah);
565 
566 int ib_destroy_ah(struct ib_ah *ah)
567 {
568 	struct ib_pd *pd;
569 	int ret;
570 
571 	pd = ah->pd;
572 	ret = ah->device->destroy_ah(ah);
573 	if (!ret)
574 		atomic_dec(&pd->usecnt);
575 
576 	return ret;
577 }
578 EXPORT_SYMBOL(ib_destroy_ah);
579 
580 /* Shared receive queues */
581 
582 struct ib_srq *ib_create_srq(struct ib_pd *pd,
583 			     struct ib_srq_init_attr *srq_init_attr)
584 {
585 	struct ib_srq *srq;
586 
587 	if (!pd->device->create_srq)
588 		return ERR_PTR(-ENOSYS);
589 
590 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
591 
592 	if (!IS_ERR(srq)) {
593 		srq->device    	   = pd->device;
594 		srq->pd        	   = pd;
595 		srq->uobject       = NULL;
596 		srq->event_handler = srq_init_attr->event_handler;
597 		srq->srq_context   = srq_init_attr->srq_context;
598 		srq->srq_type      = srq_init_attr->srq_type;
599 		if (srq->srq_type == IB_SRQT_XRC) {
600 			srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
601 			srq->ext.xrc.cq   = srq_init_attr->ext.xrc.cq;
602 			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
603 			atomic_inc(&srq->ext.xrc.cq->usecnt);
604 		}
605 		atomic_inc(&pd->usecnt);
606 		atomic_set(&srq->usecnt, 0);
607 	}
608 
609 	return srq;
610 }
611 EXPORT_SYMBOL(ib_create_srq);
612 
613 int ib_modify_srq(struct ib_srq *srq,
614 		  struct ib_srq_attr *srq_attr,
615 		  enum ib_srq_attr_mask srq_attr_mask)
616 {
617 	return srq->device->modify_srq ?
618 		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
619 		-ENOSYS;
620 }
621 EXPORT_SYMBOL(ib_modify_srq);
622 
623 int ib_query_srq(struct ib_srq *srq,
624 		 struct ib_srq_attr *srq_attr)
625 {
626 	return srq->device->query_srq ?
627 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
628 }
629 EXPORT_SYMBOL(ib_query_srq);
630 
631 int ib_destroy_srq(struct ib_srq *srq)
632 {
633 	struct ib_pd *pd;
634 	enum ib_srq_type srq_type;
635 	struct ib_xrcd *uninitialized_var(xrcd);
636 	struct ib_cq *uninitialized_var(cq);
637 	int ret;
638 
639 	if (atomic_read(&srq->usecnt))
640 		return -EBUSY;
641 
642 	pd = srq->pd;
643 	srq_type = srq->srq_type;
644 	if (srq_type == IB_SRQT_XRC) {
645 		xrcd = srq->ext.xrc.xrcd;
646 		cq = srq->ext.xrc.cq;
647 	}
648 
649 	ret = srq->device->destroy_srq(srq);
650 	if (!ret) {
651 		atomic_dec(&pd->usecnt);
652 		if (srq_type == IB_SRQT_XRC) {
653 			atomic_dec(&xrcd->usecnt);
654 			atomic_dec(&cq->usecnt);
655 		}
656 	}
657 
658 	return ret;
659 }
660 EXPORT_SYMBOL(ib_destroy_srq);
661 
662 /* Queue pairs */
663 
664 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
665 {
666 	struct ib_qp *qp = context;
667 	unsigned long flags;
668 
669 	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
670 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
671 		if (event->element.qp->event_handler)
672 			event->element.qp->event_handler(event, event->element.qp->qp_context);
673 	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
674 }
675 
676 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
677 {
678 	mutex_lock(&xrcd->tgt_qp_mutex);
679 	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
680 	mutex_unlock(&xrcd->tgt_qp_mutex);
681 }
682 
683 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
684 				  void (*event_handler)(struct ib_event *, void *),
685 				  void *qp_context)
686 {
687 	struct ib_qp *qp;
688 	unsigned long flags;
689 
690 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
691 	if (!qp)
692 		return ERR_PTR(-ENOMEM);
693 
694 	qp->real_qp = real_qp;
695 	atomic_inc(&real_qp->usecnt);
696 	qp->device = real_qp->device;
697 	qp->event_handler = event_handler;
698 	qp->qp_context = qp_context;
699 	qp->qp_num = real_qp->qp_num;
700 	qp->qp_type = real_qp->qp_type;
701 
702 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
703 	list_add(&qp->open_list, &real_qp->open_list);
704 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
705 
706 	return qp;
707 }
708 
709 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
710 			 struct ib_qp_open_attr *qp_open_attr)
711 {
712 	struct ib_qp *qp, *real_qp;
713 
714 	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
715 		return ERR_PTR(-EINVAL);
716 
717 	qp = ERR_PTR(-EINVAL);
718 	mutex_lock(&xrcd->tgt_qp_mutex);
719 	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
720 		if (real_qp->qp_num == qp_open_attr->qp_num) {
721 			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
722 					  qp_open_attr->qp_context);
723 			break;
724 		}
725 	}
726 	mutex_unlock(&xrcd->tgt_qp_mutex);
727 	return qp;
728 }
729 EXPORT_SYMBOL(ib_open_qp);
730 
731 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
732 		struct ib_qp_init_attr *qp_init_attr)
733 {
734 	struct ib_qp *real_qp = qp;
735 
736 	qp->event_handler = __ib_shared_qp_event_handler;
737 	qp->qp_context = qp;
738 	qp->pd = NULL;
739 	qp->send_cq = qp->recv_cq = NULL;
740 	qp->srq = NULL;
741 	qp->xrcd = qp_init_attr->xrcd;
742 	atomic_inc(&qp_init_attr->xrcd->usecnt);
743 	INIT_LIST_HEAD(&qp->open_list);
744 
745 	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
746 			  qp_init_attr->qp_context);
747 	if (!IS_ERR(qp))
748 		__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
749 	else
750 		real_qp->device->destroy_qp(real_qp);
751 	return qp;
752 }
753 
754 struct ib_qp *ib_create_qp(struct ib_pd *pd,
755 			   struct ib_qp_init_attr *qp_init_attr)
756 {
757 	struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
758 	struct ib_qp *qp;
759 	int ret;
760 
761 	/*
762 	 * If the callers is using the RDMA API calculate the resources
763 	 * needed for the RDMA READ/WRITE operations.
764 	 *
765 	 * Note that these callers need to pass in a port number.
766 	 */
767 	if (qp_init_attr->cap.max_rdma_ctxs)
768 		rdma_rw_init_qp(device, qp_init_attr);
769 
770 	qp = device->create_qp(pd, qp_init_attr, NULL);
771 	if (IS_ERR(qp))
772 		return qp;
773 
774 	qp->device     = device;
775 	qp->real_qp    = qp;
776 	qp->uobject    = NULL;
777 	qp->qp_type    = qp_init_attr->qp_type;
778 
779 	atomic_set(&qp->usecnt, 0);
780 	qp->mrs_used = 0;
781 	spin_lock_init(&qp->mr_lock);
782 	INIT_LIST_HEAD(&qp->rdma_mrs);
783 	INIT_LIST_HEAD(&qp->sig_mrs);
784 
785 	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
786 		return ib_create_xrc_qp(qp, qp_init_attr);
787 
788 	qp->event_handler = qp_init_attr->event_handler;
789 	qp->qp_context = qp_init_attr->qp_context;
790 	if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
791 		qp->recv_cq = NULL;
792 		qp->srq = NULL;
793 	} else {
794 		qp->recv_cq = qp_init_attr->recv_cq;
795 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
796 		qp->srq = qp_init_attr->srq;
797 		if (qp->srq)
798 			atomic_inc(&qp_init_attr->srq->usecnt);
799 	}
800 
801 	qp->pd	    = pd;
802 	qp->send_cq = qp_init_attr->send_cq;
803 	qp->xrcd    = NULL;
804 
805 	atomic_inc(&pd->usecnt);
806 	atomic_inc(&qp_init_attr->send_cq->usecnt);
807 
808 	if (qp_init_attr->cap.max_rdma_ctxs) {
809 		ret = rdma_rw_init_mrs(qp, qp_init_attr);
810 		if (ret) {
811 			pr_err("failed to init MR pool ret= %d\n", ret);
812 			ib_destroy_qp(qp);
813 			qp = ERR_PTR(ret);
814 		}
815 	}
816 
817 	return qp;
818 }
819 EXPORT_SYMBOL(ib_create_qp);
820 
821 static const struct {
822 	int			valid;
823 	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
824 	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
825 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
826 	[IB_QPS_RESET] = {
827 		[IB_QPS_RESET] = { .valid = 1 },
828 		[IB_QPS_INIT]  = {
829 			.valid = 1,
830 			.req_param = {
831 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
832 						IB_QP_PORT			|
833 						IB_QP_QKEY),
834 				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
835 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
836 						IB_QP_PORT			|
837 						IB_QP_ACCESS_FLAGS),
838 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
839 						IB_QP_PORT			|
840 						IB_QP_ACCESS_FLAGS),
841 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
842 						IB_QP_PORT			|
843 						IB_QP_ACCESS_FLAGS),
844 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
845 						IB_QP_PORT			|
846 						IB_QP_ACCESS_FLAGS),
847 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
848 						IB_QP_QKEY),
849 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
850 						IB_QP_QKEY),
851 			}
852 		},
853 	},
854 	[IB_QPS_INIT]  = {
855 		[IB_QPS_RESET] = { .valid = 1 },
856 		[IB_QPS_ERR] =   { .valid = 1 },
857 		[IB_QPS_INIT]  = {
858 			.valid = 1,
859 			.opt_param = {
860 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
861 						IB_QP_PORT			|
862 						IB_QP_QKEY),
863 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
864 						IB_QP_PORT			|
865 						IB_QP_ACCESS_FLAGS),
866 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
867 						IB_QP_PORT			|
868 						IB_QP_ACCESS_FLAGS),
869 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
870 						IB_QP_PORT			|
871 						IB_QP_ACCESS_FLAGS),
872 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
873 						IB_QP_PORT			|
874 						IB_QP_ACCESS_FLAGS),
875 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
876 						IB_QP_QKEY),
877 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
878 						IB_QP_QKEY),
879 			}
880 		},
881 		[IB_QPS_RTR]   = {
882 			.valid = 1,
883 			.req_param = {
884 				[IB_QPT_UC]  = (IB_QP_AV			|
885 						IB_QP_PATH_MTU			|
886 						IB_QP_DEST_QPN			|
887 						IB_QP_RQ_PSN),
888 				[IB_QPT_RC]  = (IB_QP_AV			|
889 						IB_QP_PATH_MTU			|
890 						IB_QP_DEST_QPN			|
891 						IB_QP_RQ_PSN			|
892 						IB_QP_MAX_DEST_RD_ATOMIC	|
893 						IB_QP_MIN_RNR_TIMER),
894 				[IB_QPT_XRC_INI] = (IB_QP_AV			|
895 						IB_QP_PATH_MTU			|
896 						IB_QP_DEST_QPN			|
897 						IB_QP_RQ_PSN),
898 				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
899 						IB_QP_PATH_MTU			|
900 						IB_QP_DEST_QPN			|
901 						IB_QP_RQ_PSN			|
902 						IB_QP_MAX_DEST_RD_ATOMIC	|
903 						IB_QP_MIN_RNR_TIMER),
904 			},
905 			.opt_param = {
906 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
907 						 IB_QP_QKEY),
908 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
909 						 IB_QP_ACCESS_FLAGS		|
910 						 IB_QP_PKEY_INDEX),
911 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
912 						 IB_QP_ACCESS_FLAGS		|
913 						 IB_QP_PKEY_INDEX),
914 				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
915 						 IB_QP_ACCESS_FLAGS		|
916 						 IB_QP_PKEY_INDEX),
917 				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
918 						 IB_QP_ACCESS_FLAGS		|
919 						 IB_QP_PKEY_INDEX),
920 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
921 						 IB_QP_QKEY),
922 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
923 						 IB_QP_QKEY),
924 			 },
925 		},
926 	},
927 	[IB_QPS_RTR]   = {
928 		[IB_QPS_RESET] = { .valid = 1 },
929 		[IB_QPS_ERR] =   { .valid = 1 },
930 		[IB_QPS_RTS]   = {
931 			.valid = 1,
932 			.req_param = {
933 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
934 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
935 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
936 						IB_QP_RETRY_CNT			|
937 						IB_QP_RNR_RETRY			|
938 						IB_QP_SQ_PSN			|
939 						IB_QP_MAX_QP_RD_ATOMIC),
940 				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
941 						IB_QP_RETRY_CNT			|
942 						IB_QP_RNR_RETRY			|
943 						IB_QP_SQ_PSN			|
944 						IB_QP_MAX_QP_RD_ATOMIC),
945 				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
946 						IB_QP_SQ_PSN),
947 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
948 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
949 			},
950 			.opt_param = {
951 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
952 						 IB_QP_QKEY),
953 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
954 						 IB_QP_ALT_PATH			|
955 						 IB_QP_ACCESS_FLAGS		|
956 						 IB_QP_PATH_MIG_STATE),
957 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
958 						 IB_QP_ALT_PATH			|
959 						 IB_QP_ACCESS_FLAGS		|
960 						 IB_QP_MIN_RNR_TIMER		|
961 						 IB_QP_PATH_MIG_STATE),
962 				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
963 						 IB_QP_ALT_PATH			|
964 						 IB_QP_ACCESS_FLAGS		|
965 						 IB_QP_PATH_MIG_STATE),
966 				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
967 						 IB_QP_ALT_PATH			|
968 						 IB_QP_ACCESS_FLAGS		|
969 						 IB_QP_MIN_RNR_TIMER		|
970 						 IB_QP_PATH_MIG_STATE),
971 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
972 						 IB_QP_QKEY),
973 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
974 						 IB_QP_QKEY),
975 			 }
976 		}
977 	},
978 	[IB_QPS_RTS]   = {
979 		[IB_QPS_RESET] = { .valid = 1 },
980 		[IB_QPS_ERR] =   { .valid = 1 },
981 		[IB_QPS_RTS]   = {
982 			.valid = 1,
983 			.opt_param = {
984 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
985 						IB_QP_QKEY),
986 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
987 						IB_QP_ACCESS_FLAGS		|
988 						IB_QP_ALT_PATH			|
989 						IB_QP_PATH_MIG_STATE),
990 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
991 						IB_QP_ACCESS_FLAGS		|
992 						IB_QP_ALT_PATH			|
993 						IB_QP_PATH_MIG_STATE		|
994 						IB_QP_MIN_RNR_TIMER),
995 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
996 						IB_QP_ACCESS_FLAGS		|
997 						IB_QP_ALT_PATH			|
998 						IB_QP_PATH_MIG_STATE),
999 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1000 						IB_QP_ACCESS_FLAGS		|
1001 						IB_QP_ALT_PATH			|
1002 						IB_QP_PATH_MIG_STATE		|
1003 						IB_QP_MIN_RNR_TIMER),
1004 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1005 						IB_QP_QKEY),
1006 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1007 						IB_QP_QKEY),
1008 			}
1009 		},
1010 		[IB_QPS_SQD]   = {
1011 			.valid = 1,
1012 			.opt_param = {
1013 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1014 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1015 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1016 				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1017 				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1018 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1019 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1020 			}
1021 		},
1022 	},
1023 	[IB_QPS_SQD]   = {
1024 		[IB_QPS_RESET] = { .valid = 1 },
1025 		[IB_QPS_ERR] =   { .valid = 1 },
1026 		[IB_QPS_RTS]   = {
1027 			.valid = 1,
1028 			.opt_param = {
1029 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1030 						IB_QP_QKEY),
1031 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1032 						IB_QP_ALT_PATH			|
1033 						IB_QP_ACCESS_FLAGS		|
1034 						IB_QP_PATH_MIG_STATE),
1035 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1036 						IB_QP_ALT_PATH			|
1037 						IB_QP_ACCESS_FLAGS		|
1038 						IB_QP_MIN_RNR_TIMER		|
1039 						IB_QP_PATH_MIG_STATE),
1040 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1041 						IB_QP_ALT_PATH			|
1042 						IB_QP_ACCESS_FLAGS		|
1043 						IB_QP_PATH_MIG_STATE),
1044 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1045 						IB_QP_ALT_PATH			|
1046 						IB_QP_ACCESS_FLAGS		|
1047 						IB_QP_MIN_RNR_TIMER		|
1048 						IB_QP_PATH_MIG_STATE),
1049 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1050 						IB_QP_QKEY),
1051 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1052 						IB_QP_QKEY),
1053 			}
1054 		},
1055 		[IB_QPS_SQD]   = {
1056 			.valid = 1,
1057 			.opt_param = {
1058 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1059 						IB_QP_QKEY),
1060 				[IB_QPT_UC]  = (IB_QP_AV			|
1061 						IB_QP_ALT_PATH			|
1062 						IB_QP_ACCESS_FLAGS		|
1063 						IB_QP_PKEY_INDEX		|
1064 						IB_QP_PATH_MIG_STATE),
1065 				[IB_QPT_RC]  = (IB_QP_PORT			|
1066 						IB_QP_AV			|
1067 						IB_QP_TIMEOUT			|
1068 						IB_QP_RETRY_CNT			|
1069 						IB_QP_RNR_RETRY			|
1070 						IB_QP_MAX_QP_RD_ATOMIC		|
1071 						IB_QP_MAX_DEST_RD_ATOMIC	|
1072 						IB_QP_ALT_PATH			|
1073 						IB_QP_ACCESS_FLAGS		|
1074 						IB_QP_PKEY_INDEX		|
1075 						IB_QP_MIN_RNR_TIMER		|
1076 						IB_QP_PATH_MIG_STATE),
1077 				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1078 						IB_QP_AV			|
1079 						IB_QP_TIMEOUT			|
1080 						IB_QP_RETRY_CNT			|
1081 						IB_QP_RNR_RETRY			|
1082 						IB_QP_MAX_QP_RD_ATOMIC		|
1083 						IB_QP_ALT_PATH			|
1084 						IB_QP_ACCESS_FLAGS		|
1085 						IB_QP_PKEY_INDEX		|
1086 						IB_QP_PATH_MIG_STATE),
1087 				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1088 						IB_QP_AV			|
1089 						IB_QP_TIMEOUT			|
1090 						IB_QP_MAX_DEST_RD_ATOMIC	|
1091 						IB_QP_ALT_PATH			|
1092 						IB_QP_ACCESS_FLAGS		|
1093 						IB_QP_PKEY_INDEX		|
1094 						IB_QP_MIN_RNR_TIMER		|
1095 						IB_QP_PATH_MIG_STATE),
1096 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1097 						IB_QP_QKEY),
1098 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1099 						IB_QP_QKEY),
1100 			}
1101 		}
1102 	},
1103 	[IB_QPS_SQE]   = {
1104 		[IB_QPS_RESET] = { .valid = 1 },
1105 		[IB_QPS_ERR] =   { .valid = 1 },
1106 		[IB_QPS_RTS]   = {
1107 			.valid = 1,
1108 			.opt_param = {
1109 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1110 						IB_QP_QKEY),
1111 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1112 						IB_QP_ACCESS_FLAGS),
1113 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1114 						IB_QP_QKEY),
1115 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1116 						IB_QP_QKEY),
1117 			}
1118 		}
1119 	},
1120 	[IB_QPS_ERR] = {
1121 		[IB_QPS_RESET] = { .valid = 1 },
1122 		[IB_QPS_ERR] =   { .valid = 1 }
1123 	}
1124 };
1125 
1126 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1127 		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
1128 		       enum rdma_link_layer ll)
1129 {
1130 	enum ib_qp_attr_mask req_param, opt_param;
1131 
1132 	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
1133 	    next_state < 0 || next_state > IB_QPS_ERR)
1134 		return 0;
1135 
1136 	if (mask & IB_QP_CUR_STATE  &&
1137 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1138 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1139 		return 0;
1140 
1141 	if (!qp_state_table[cur_state][next_state].valid)
1142 		return 0;
1143 
1144 	req_param = qp_state_table[cur_state][next_state].req_param[type];
1145 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1146 
1147 	if ((mask & req_param) != req_param)
1148 		return 0;
1149 
1150 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
1151 		return 0;
1152 
1153 	return 1;
1154 }
1155 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1156 
1157 int ib_resolve_eth_dmac(struct ib_qp *qp,
1158 			struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1159 {
1160 	int           ret = 0;
1161 
1162 	if (*qp_attr_mask & IB_QP_AV) {
1163 		if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
1164 		    qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
1165 			return -EINVAL;
1166 
1167 		if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
1168 			return 0;
1169 
1170 		if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
1171 			rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
1172 					qp_attr->ah_attr.dmac);
1173 		} else {
1174 			union ib_gid		sgid;
1175 			struct ib_gid_attr	sgid_attr;
1176 			int			ifindex;
1177 			int			hop_limit;
1178 
1179 			ret = ib_query_gid(qp->device,
1180 					   qp_attr->ah_attr.port_num,
1181 					   qp_attr->ah_attr.grh.sgid_index,
1182 					   &sgid, &sgid_attr);
1183 
1184 			if (ret || !sgid_attr.ndev) {
1185 				if (!ret)
1186 					ret = -ENXIO;
1187 				goto out;
1188 			}
1189 
1190 			ifindex = sgid_attr.ndev->ifindex;
1191 
1192 			ret = rdma_addr_find_l2_eth_by_grh(&sgid,
1193 							   &qp_attr->ah_attr.grh.dgid,
1194 							   qp_attr->ah_attr.dmac,
1195 							   NULL, &ifindex, &hop_limit);
1196 
1197 			dev_put(sgid_attr.ndev);
1198 
1199 			qp_attr->ah_attr.grh.hop_limit = hop_limit;
1200 		}
1201 	}
1202 out:
1203 	return ret;
1204 }
1205 EXPORT_SYMBOL(ib_resolve_eth_dmac);
1206 
1207 
1208 int ib_modify_qp(struct ib_qp *qp,
1209 		 struct ib_qp_attr *qp_attr,
1210 		 int qp_attr_mask)
1211 {
1212 	int ret;
1213 
1214 	ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
1215 	if (ret)
1216 		return ret;
1217 
1218 	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1219 }
1220 EXPORT_SYMBOL(ib_modify_qp);
1221 
1222 int ib_query_qp(struct ib_qp *qp,
1223 		struct ib_qp_attr *qp_attr,
1224 		int qp_attr_mask,
1225 		struct ib_qp_init_attr *qp_init_attr)
1226 {
1227 	return qp->device->query_qp ?
1228 		qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1229 		-ENOSYS;
1230 }
1231 EXPORT_SYMBOL(ib_query_qp);
1232 
1233 int ib_close_qp(struct ib_qp *qp)
1234 {
1235 	struct ib_qp *real_qp;
1236 	unsigned long flags;
1237 
1238 	real_qp = qp->real_qp;
1239 	if (real_qp == qp)
1240 		return -EINVAL;
1241 
1242 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1243 	list_del(&qp->open_list);
1244 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1245 
1246 	atomic_dec(&real_qp->usecnt);
1247 	kfree(qp);
1248 
1249 	return 0;
1250 }
1251 EXPORT_SYMBOL(ib_close_qp);
1252 
1253 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1254 {
1255 	struct ib_xrcd *xrcd;
1256 	struct ib_qp *real_qp;
1257 	int ret;
1258 
1259 	real_qp = qp->real_qp;
1260 	xrcd = real_qp->xrcd;
1261 
1262 	mutex_lock(&xrcd->tgt_qp_mutex);
1263 	ib_close_qp(qp);
1264 	if (atomic_read(&real_qp->usecnt) == 0)
1265 		list_del(&real_qp->xrcd_list);
1266 	else
1267 		real_qp = NULL;
1268 	mutex_unlock(&xrcd->tgt_qp_mutex);
1269 
1270 	if (real_qp) {
1271 		ret = ib_destroy_qp(real_qp);
1272 		if (!ret)
1273 			atomic_dec(&xrcd->usecnt);
1274 		else
1275 			__ib_insert_xrcd_qp(xrcd, real_qp);
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 int ib_destroy_qp(struct ib_qp *qp)
1282 {
1283 	struct ib_pd *pd;
1284 	struct ib_cq *scq, *rcq;
1285 	struct ib_srq *srq;
1286 	int ret;
1287 
1288 	WARN_ON_ONCE(qp->mrs_used > 0);
1289 
1290 	if (atomic_read(&qp->usecnt))
1291 		return -EBUSY;
1292 
1293 	if (qp->real_qp != qp)
1294 		return __ib_destroy_shared_qp(qp);
1295 
1296 	pd   = qp->pd;
1297 	scq  = qp->send_cq;
1298 	rcq  = qp->recv_cq;
1299 	srq  = qp->srq;
1300 
1301 	if (!qp->uobject)
1302 		rdma_rw_cleanup_mrs(qp);
1303 
1304 	ret = qp->device->destroy_qp(qp);
1305 	if (!ret) {
1306 		if (pd)
1307 			atomic_dec(&pd->usecnt);
1308 		if (scq)
1309 			atomic_dec(&scq->usecnt);
1310 		if (rcq)
1311 			atomic_dec(&rcq->usecnt);
1312 		if (srq)
1313 			atomic_dec(&srq->usecnt);
1314 	}
1315 
1316 	return ret;
1317 }
1318 EXPORT_SYMBOL(ib_destroy_qp);
1319 
1320 /* Completion queues */
1321 
1322 struct ib_cq *ib_create_cq(struct ib_device *device,
1323 			   ib_comp_handler comp_handler,
1324 			   void (*event_handler)(struct ib_event *, void *),
1325 			   void *cq_context,
1326 			   const struct ib_cq_init_attr *cq_attr)
1327 {
1328 	struct ib_cq *cq;
1329 
1330 	cq = device->create_cq(device, cq_attr, NULL, NULL);
1331 
1332 	if (!IS_ERR(cq)) {
1333 		cq->device        = device;
1334 		cq->uobject       = NULL;
1335 		cq->comp_handler  = comp_handler;
1336 		cq->event_handler = event_handler;
1337 		cq->cq_context    = cq_context;
1338 		atomic_set(&cq->usecnt, 0);
1339 	}
1340 
1341 	return cq;
1342 }
1343 EXPORT_SYMBOL(ib_create_cq);
1344 
1345 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1346 {
1347 	return cq->device->modify_cq ?
1348 		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1349 }
1350 EXPORT_SYMBOL(ib_modify_cq);
1351 
1352 int ib_destroy_cq(struct ib_cq *cq)
1353 {
1354 	if (atomic_read(&cq->usecnt))
1355 		return -EBUSY;
1356 
1357 	return cq->device->destroy_cq(cq);
1358 }
1359 EXPORT_SYMBOL(ib_destroy_cq);
1360 
1361 int ib_resize_cq(struct ib_cq *cq, int cqe)
1362 {
1363 	return cq->device->resize_cq ?
1364 		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1365 }
1366 EXPORT_SYMBOL(ib_resize_cq);
1367 
1368 /* Memory regions */
1369 
1370 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1371 {
1372 	struct ib_mr *mr;
1373 	int err;
1374 
1375 	err = ib_check_mr_access(mr_access_flags);
1376 	if (err)
1377 		return ERR_PTR(err);
1378 
1379 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
1380 
1381 	if (!IS_ERR(mr)) {
1382 		mr->device  = pd->device;
1383 		mr->pd      = pd;
1384 		mr->uobject = NULL;
1385 		atomic_inc(&pd->usecnt);
1386 		mr->need_inval = false;
1387 	}
1388 
1389 	return mr;
1390 }
1391 EXPORT_SYMBOL(ib_get_dma_mr);
1392 
1393 int ib_dereg_mr(struct ib_mr *mr)
1394 {
1395 	struct ib_pd *pd = mr->pd;
1396 	int ret;
1397 
1398 	ret = mr->device->dereg_mr(mr);
1399 	if (!ret)
1400 		atomic_dec(&pd->usecnt);
1401 
1402 	return ret;
1403 }
1404 EXPORT_SYMBOL(ib_dereg_mr);
1405 
1406 /**
1407  * ib_alloc_mr() - Allocates a memory region
1408  * @pd:            protection domain associated with the region
1409  * @mr_type:       memory region type
1410  * @max_num_sg:    maximum sg entries available for registration.
1411  *
1412  * Notes:
1413  * Memory registeration page/sg lists must not exceed max_num_sg.
1414  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1415  * max_num_sg * used_page_size.
1416  *
1417  */
1418 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1419 			  enum ib_mr_type mr_type,
1420 			  u32 max_num_sg)
1421 {
1422 	struct ib_mr *mr;
1423 
1424 	if (!pd->device->alloc_mr)
1425 		return ERR_PTR(-ENOSYS);
1426 
1427 	mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1428 	if (!IS_ERR(mr)) {
1429 		mr->device  = pd->device;
1430 		mr->pd      = pd;
1431 		mr->uobject = NULL;
1432 		atomic_inc(&pd->usecnt);
1433 		mr->need_inval = false;
1434 	}
1435 
1436 	return mr;
1437 }
1438 EXPORT_SYMBOL(ib_alloc_mr);
1439 
1440 /* "Fast" memory regions */
1441 
1442 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1443 			    int mr_access_flags,
1444 			    struct ib_fmr_attr *fmr_attr)
1445 {
1446 	struct ib_fmr *fmr;
1447 
1448 	if (!pd->device->alloc_fmr)
1449 		return ERR_PTR(-ENOSYS);
1450 
1451 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1452 	if (!IS_ERR(fmr)) {
1453 		fmr->device = pd->device;
1454 		fmr->pd     = pd;
1455 		atomic_inc(&pd->usecnt);
1456 	}
1457 
1458 	return fmr;
1459 }
1460 EXPORT_SYMBOL(ib_alloc_fmr);
1461 
1462 int ib_unmap_fmr(struct list_head *fmr_list)
1463 {
1464 	struct ib_fmr *fmr;
1465 
1466 	if (list_empty(fmr_list))
1467 		return 0;
1468 
1469 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1470 	return fmr->device->unmap_fmr(fmr_list);
1471 }
1472 EXPORT_SYMBOL(ib_unmap_fmr);
1473 
1474 int ib_dealloc_fmr(struct ib_fmr *fmr)
1475 {
1476 	struct ib_pd *pd;
1477 	int ret;
1478 
1479 	pd = fmr->pd;
1480 	ret = fmr->device->dealloc_fmr(fmr);
1481 	if (!ret)
1482 		atomic_dec(&pd->usecnt);
1483 
1484 	return ret;
1485 }
1486 EXPORT_SYMBOL(ib_dealloc_fmr);
1487 
1488 /* Multicast groups */
1489 
1490 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1491 {
1492 	int ret;
1493 
1494 	if (!qp->device->attach_mcast)
1495 		return -ENOSYS;
1496 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1497 		return -EINVAL;
1498 
1499 	ret = qp->device->attach_mcast(qp, gid, lid);
1500 	if (!ret)
1501 		atomic_inc(&qp->usecnt);
1502 	return ret;
1503 }
1504 EXPORT_SYMBOL(ib_attach_mcast);
1505 
1506 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1507 {
1508 	int ret;
1509 
1510 	if (!qp->device->detach_mcast)
1511 		return -ENOSYS;
1512 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1513 		return -EINVAL;
1514 
1515 	ret = qp->device->detach_mcast(qp, gid, lid);
1516 	if (!ret)
1517 		atomic_dec(&qp->usecnt);
1518 	return ret;
1519 }
1520 EXPORT_SYMBOL(ib_detach_mcast);
1521 
1522 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1523 {
1524 	struct ib_xrcd *xrcd;
1525 
1526 	if (!device->alloc_xrcd)
1527 		return ERR_PTR(-ENOSYS);
1528 
1529 	xrcd = device->alloc_xrcd(device, NULL, NULL);
1530 	if (!IS_ERR(xrcd)) {
1531 		xrcd->device = device;
1532 		xrcd->inode = NULL;
1533 		atomic_set(&xrcd->usecnt, 0);
1534 		mutex_init(&xrcd->tgt_qp_mutex);
1535 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1536 	}
1537 
1538 	return xrcd;
1539 }
1540 EXPORT_SYMBOL(ib_alloc_xrcd);
1541 
1542 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1543 {
1544 	struct ib_qp *qp;
1545 	int ret;
1546 
1547 	if (atomic_read(&xrcd->usecnt))
1548 		return -EBUSY;
1549 
1550 	while (!list_empty(&xrcd->tgt_qp_list)) {
1551 		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1552 		ret = ib_destroy_qp(qp);
1553 		if (ret)
1554 			return ret;
1555 	}
1556 
1557 	return xrcd->device->dealloc_xrcd(xrcd);
1558 }
1559 EXPORT_SYMBOL(ib_dealloc_xrcd);
1560 
1561 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1562 			       struct ib_flow_attr *flow_attr,
1563 			       int domain)
1564 {
1565 	struct ib_flow *flow_id;
1566 	if (!qp->device->create_flow)
1567 		return ERR_PTR(-ENOSYS);
1568 
1569 	flow_id = qp->device->create_flow(qp, flow_attr, domain);
1570 	if (!IS_ERR(flow_id))
1571 		atomic_inc(&qp->usecnt);
1572 	return flow_id;
1573 }
1574 EXPORT_SYMBOL(ib_create_flow);
1575 
1576 int ib_destroy_flow(struct ib_flow *flow_id)
1577 {
1578 	int err;
1579 	struct ib_qp *qp = flow_id->qp;
1580 
1581 	err = qp->device->destroy_flow(flow_id);
1582 	if (!err)
1583 		atomic_dec(&qp->usecnt);
1584 	return err;
1585 }
1586 EXPORT_SYMBOL(ib_destroy_flow);
1587 
1588 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1589 		       struct ib_mr_status *mr_status)
1590 {
1591 	return mr->device->check_mr_status ?
1592 		mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1593 }
1594 EXPORT_SYMBOL(ib_check_mr_status);
1595 
1596 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1597 			 int state)
1598 {
1599 	if (!device->set_vf_link_state)
1600 		return -ENOSYS;
1601 
1602 	return device->set_vf_link_state(device, vf, port, state);
1603 }
1604 EXPORT_SYMBOL(ib_set_vf_link_state);
1605 
1606 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1607 		     struct ifla_vf_info *info)
1608 {
1609 	if (!device->get_vf_config)
1610 		return -ENOSYS;
1611 
1612 	return device->get_vf_config(device, vf, port, info);
1613 }
1614 EXPORT_SYMBOL(ib_get_vf_config);
1615 
1616 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1617 		    struct ifla_vf_stats *stats)
1618 {
1619 	if (!device->get_vf_stats)
1620 		return -ENOSYS;
1621 
1622 	return device->get_vf_stats(device, vf, port, stats);
1623 }
1624 EXPORT_SYMBOL(ib_get_vf_stats);
1625 
1626 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1627 		   int type)
1628 {
1629 	if (!device->set_vf_guid)
1630 		return -ENOSYS;
1631 
1632 	return device->set_vf_guid(device, vf, port, guid, type);
1633 }
1634 EXPORT_SYMBOL(ib_set_vf_guid);
1635 
1636 /**
1637  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1638  *     and set it the memory region.
1639  * @mr:            memory region
1640  * @sg:            dma mapped scatterlist
1641  * @sg_nents:      number of entries in sg
1642  * @sg_offset:     offset in bytes into sg
1643  * @page_size:     page vector desired page size
1644  *
1645  * Constraints:
1646  * - The first sg element is allowed to have an offset.
1647  * - Each sg element must be aligned to page_size (or physically
1648  *   contiguous to the previous element). In case an sg element has a
1649  *   non contiguous offset, the mapping prefix will not include it.
1650  * - The last sg element is allowed to have length less than page_size.
1651  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1652  *   then only max_num_sg entries will be mapped.
1653  * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
1654  *   constraints holds and the page_size argument is ignored.
1655  *
1656  * Returns the number of sg elements that were mapped to the memory region.
1657  *
1658  * After this completes successfully, the  memory region
1659  * is ready for registration.
1660  */
1661 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
1662 		 unsigned int *sg_offset, unsigned int page_size)
1663 {
1664 	if (unlikely(!mr->device->map_mr_sg))
1665 		return -ENOSYS;
1666 
1667 	mr->page_size = page_size;
1668 
1669 	return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
1670 }
1671 EXPORT_SYMBOL(ib_map_mr_sg);
1672 
1673 /**
1674  * ib_sg_to_pages() - Convert the largest prefix of a sg list
1675  *     to a page vector
1676  * @mr:            memory region
1677  * @sgl:           dma mapped scatterlist
1678  * @sg_nents:      number of entries in sg
1679  * @sg_offset_p:   IN:  start offset in bytes into sg
1680  *                 OUT: offset in bytes for element n of the sg of the first
1681  *                      byte that has not been processed where n is the return
1682  *                      value of this function.
1683  * @set_page:      driver page assignment function pointer
1684  *
1685  * Core service helper for drivers to convert the largest
1686  * prefix of given sg list to a page vector. The sg list
1687  * prefix converted is the prefix that meet the requirements
1688  * of ib_map_mr_sg.
1689  *
1690  * Returns the number of sg elements that were assigned to
1691  * a page vector.
1692  */
1693 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
1694 		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
1695 {
1696 	struct scatterlist *sg;
1697 	u64 last_end_dma_addr = 0;
1698 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1699 	unsigned int last_page_off = 0;
1700 	u64 page_mask = ~((u64)mr->page_size - 1);
1701 	int i, ret;
1702 
1703 	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
1704 		return -EINVAL;
1705 
1706 	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
1707 	mr->length = 0;
1708 
1709 	for_each_sg(sgl, sg, sg_nents, i) {
1710 		u64 dma_addr = sg_dma_address(sg) + sg_offset;
1711 		u64 prev_addr = dma_addr;
1712 		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
1713 		u64 end_dma_addr = dma_addr + dma_len;
1714 		u64 page_addr = dma_addr & page_mask;
1715 
1716 		/*
1717 		 * For the second and later elements, check whether either the
1718 		 * end of element i-1 or the start of element i is not aligned
1719 		 * on a page boundary.
1720 		 */
1721 		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1722 			/* Stop mapping if there is a gap. */
1723 			if (last_end_dma_addr != dma_addr)
1724 				break;
1725 
1726 			/*
1727 			 * Coalesce this element with the last. If it is small
1728 			 * enough just update mr->length. Otherwise start
1729 			 * mapping from the next page.
1730 			 */
1731 			goto next_page;
1732 		}
1733 
1734 		do {
1735 			ret = set_page(mr, page_addr);
1736 			if (unlikely(ret < 0)) {
1737 				sg_offset = prev_addr - sg_dma_address(sg);
1738 				mr->length += prev_addr - dma_addr;
1739 				if (sg_offset_p)
1740 					*sg_offset_p = sg_offset;
1741 				return i || sg_offset ? i : ret;
1742 			}
1743 			prev_addr = page_addr;
1744 next_page:
1745 			page_addr += mr->page_size;
1746 		} while (page_addr < end_dma_addr);
1747 
1748 		mr->length += dma_len;
1749 		last_end_dma_addr = end_dma_addr;
1750 		last_page_off = end_dma_addr & ~page_mask;
1751 
1752 		sg_offset = 0;
1753 	}
1754 
1755 	if (sg_offset_p)
1756 		*sg_offset_p = 0;
1757 	return i;
1758 }
1759 EXPORT_SYMBOL(ib_sg_to_pages);
1760 
1761 struct ib_drain_cqe {
1762 	struct ib_cqe cqe;
1763 	struct completion done;
1764 };
1765 
1766 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
1767 {
1768 	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
1769 						cqe);
1770 
1771 	complete(&cqe->done);
1772 }
1773 
1774 /*
1775  * Post a WR and block until its completion is reaped for the SQ.
1776  */
1777 static void __ib_drain_sq(struct ib_qp *qp)
1778 {
1779 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1780 	struct ib_drain_cqe sdrain;
1781 	struct ib_send_wr swr = {}, *bad_swr;
1782 	int ret;
1783 
1784 	if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
1785 		WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
1786 			  "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1787 		return;
1788 	}
1789 
1790 	swr.wr_cqe = &sdrain.cqe;
1791 	sdrain.cqe.done = ib_drain_qp_done;
1792 	init_completion(&sdrain.done);
1793 
1794 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1795 	if (ret) {
1796 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1797 		return;
1798 	}
1799 
1800 	ret = ib_post_send(qp, &swr, &bad_swr);
1801 	if (ret) {
1802 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1803 		return;
1804 	}
1805 
1806 	wait_for_completion(&sdrain.done);
1807 }
1808 
1809 /*
1810  * Post a WR and block until its completion is reaped for the RQ.
1811  */
1812 static void __ib_drain_rq(struct ib_qp *qp)
1813 {
1814 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1815 	struct ib_drain_cqe rdrain;
1816 	struct ib_recv_wr rwr = {}, *bad_rwr;
1817 	int ret;
1818 
1819 	if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
1820 		WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
1821 			  "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1822 		return;
1823 	}
1824 
1825 	rwr.wr_cqe = &rdrain.cqe;
1826 	rdrain.cqe.done = ib_drain_qp_done;
1827 	init_completion(&rdrain.done);
1828 
1829 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1830 	if (ret) {
1831 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1832 		return;
1833 	}
1834 
1835 	ret = ib_post_recv(qp, &rwr, &bad_rwr);
1836 	if (ret) {
1837 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1838 		return;
1839 	}
1840 
1841 	wait_for_completion(&rdrain.done);
1842 }
1843 
1844 /**
1845  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
1846  *		   application.
1847  * @qp:            queue pair to drain
1848  *
1849  * If the device has a provider-specific drain function, then
1850  * call that.  Otherwise call the generic drain function
1851  * __ib_drain_sq().
1852  *
1853  * The caller must:
1854  *
1855  * ensure there is room in the CQ and SQ for the drain work request and
1856  * completion.
1857  *
1858  * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1859  * IB_POLL_DIRECT.
1860  *
1861  * ensure that there are no other contexts that are posting WRs concurrently.
1862  * Otherwise the drain is not guaranteed.
1863  */
1864 void ib_drain_sq(struct ib_qp *qp)
1865 {
1866 	if (qp->device->drain_sq)
1867 		qp->device->drain_sq(qp);
1868 	else
1869 		__ib_drain_sq(qp);
1870 }
1871 EXPORT_SYMBOL(ib_drain_sq);
1872 
1873 /**
1874  * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
1875  *		   application.
1876  * @qp:            queue pair to drain
1877  *
1878  * If the device has a provider-specific drain function, then
1879  * call that.  Otherwise call the generic drain function
1880  * __ib_drain_rq().
1881  *
1882  * The caller must:
1883  *
1884  * ensure there is room in the CQ and RQ for the drain work request and
1885  * completion.
1886  *
1887  * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1888  * IB_POLL_DIRECT.
1889  *
1890  * ensure that there are no other contexts that are posting WRs concurrently.
1891  * Otherwise the drain is not guaranteed.
1892  */
1893 void ib_drain_rq(struct ib_qp *qp)
1894 {
1895 	if (qp->device->drain_rq)
1896 		qp->device->drain_rq(qp);
1897 	else
1898 		__ib_drain_rq(qp);
1899 }
1900 EXPORT_SYMBOL(ib_drain_rq);
1901 
1902 /**
1903  * ib_drain_qp() - Block until all CQEs have been consumed by the
1904  *		   application on both the RQ and SQ.
1905  * @qp:            queue pair to drain
1906  *
1907  * The caller must:
1908  *
1909  * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
1910  * and completions.
1911  *
1912  * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
1913  * IB_POLL_DIRECT.
1914  *
1915  * ensure that there are no other contexts that are posting WRs concurrently.
1916  * Otherwise the drain is not guaranteed.
1917  */
1918 void ib_drain_qp(struct ib_qp *qp)
1919 {
1920 	ib_drain_sq(qp);
1921 	if (!qp->srq)
1922 		ib_drain_rq(qp);
1923 }
1924 EXPORT_SYMBOL(ib_drain_qp);
1925