xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision a52c8e24)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
31da177e4SLinus Torvalds  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
41da177e4SLinus Torvalds  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
51da177e4SLinus Torvalds  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
61da177e4SLinus Torvalds  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
72a1d9b7fSRoland Dreier  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
833b9b3eeSRoland Dreier  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * This software is available to you under a choice of one of two
111da177e4SLinus Torvalds  * licenses.  You may choose to be licensed under the terms of the GNU
121da177e4SLinus Torvalds  * General Public License (GPL) Version 2, available from the file
131da177e4SLinus Torvalds  * COPYING in the main directory of this source tree, or the
141da177e4SLinus Torvalds  * OpenIB.org BSD license below:
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  *     Redistribution and use in source and binary forms, with or
171da177e4SLinus Torvalds  *     without modification, are permitted provided that the following
181da177e4SLinus Torvalds  *     conditions are met:
191da177e4SLinus Torvalds  *
201da177e4SLinus Torvalds  *      - Redistributions of source code must retain the above
211da177e4SLinus Torvalds  *        copyright notice, this list of conditions and the following
221da177e4SLinus Torvalds  *        disclaimer.
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *      - Redistributions in binary form must reproduce the above
251da177e4SLinus Torvalds  *        copyright notice, this list of conditions and the following
261da177e4SLinus Torvalds  *        disclaimer in the documentation and/or other materials
271da177e4SLinus Torvalds  *        provided with the distribution.
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
301da177e4SLinus Torvalds  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
311da177e4SLinus Torvalds  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
321da177e4SLinus Torvalds  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
331da177e4SLinus Torvalds  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
341da177e4SLinus Torvalds  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
351da177e4SLinus Torvalds  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
361da177e4SLinus Torvalds  * SOFTWARE.
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <linux/errno.h>
401da177e4SLinus Torvalds #include <linux/err.h>
41b108d976SPaul Gortmaker #include <linux/export.h>
428c65b4a6STim Schmielau #include <linux/string.h>
430e0ec7e0SSean Hefty #include <linux/slab.h>
44dbf727deSMatan Barak #include <linux/in.h>
45dbf727deSMatan Barak #include <linux/in6.h>
46dbf727deSMatan Barak #include <net/addrconf.h>
47d291f1a6SDaniel Jurgens #include <linux/security.h>
481da177e4SLinus Torvalds 
49a4d61e84SRoland Dreier #include <rdma/ib_verbs.h>
50a4d61e84SRoland Dreier #include <rdma/ib_cache.h>
51dd5f03beSMatan Barak #include <rdma/ib_addr.h>
52a060b562SChristoph Hellwig #include <rdma/rw.h>
531da177e4SLinus Torvalds 
54ed4c54e5SOr Gerlitz #include "core_priv.h"
551da177e4SLinus Torvalds 
56c0348eb0SParav Pandit static int ib_resolve_eth_dmac(struct ib_device *device,
57c0348eb0SParav Pandit 			       struct rdma_ah_attr *ah_attr);
58c0348eb0SParav Pandit 
592b1b5b60SSagi Grimberg static const char * const ib_events[] = {
602b1b5b60SSagi Grimberg 	[IB_EVENT_CQ_ERR]		= "CQ error",
612b1b5b60SSagi Grimberg 	[IB_EVENT_QP_FATAL]		= "QP fatal error",
622b1b5b60SSagi Grimberg 	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
632b1b5b60SSagi Grimberg 	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
642b1b5b60SSagi Grimberg 	[IB_EVENT_COMM_EST]		= "communication established",
652b1b5b60SSagi Grimberg 	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
662b1b5b60SSagi Grimberg 	[IB_EVENT_PATH_MIG]		= "path migration successful",
672b1b5b60SSagi Grimberg 	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
682b1b5b60SSagi Grimberg 	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
692b1b5b60SSagi Grimberg 	[IB_EVENT_PORT_ACTIVE]		= "port active",
702b1b5b60SSagi Grimberg 	[IB_EVENT_PORT_ERR]		= "port error",
712b1b5b60SSagi Grimberg 	[IB_EVENT_LID_CHANGE]		= "LID change",
722b1b5b60SSagi Grimberg 	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
732b1b5b60SSagi Grimberg 	[IB_EVENT_SM_CHANGE]		= "SM change",
742b1b5b60SSagi Grimberg 	[IB_EVENT_SRQ_ERR]		= "SRQ error",
752b1b5b60SSagi Grimberg 	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
762b1b5b60SSagi Grimberg 	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
772b1b5b60SSagi Grimberg 	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
782b1b5b60SSagi Grimberg 	[IB_EVENT_GID_CHANGE]		= "GID changed",
792b1b5b60SSagi Grimberg };
802b1b5b60SSagi Grimberg 
81db7489e0SBart Van Assche const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
822b1b5b60SSagi Grimberg {
832b1b5b60SSagi Grimberg 	size_t index = event;
842b1b5b60SSagi Grimberg 
852b1b5b60SSagi Grimberg 	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
862b1b5b60SSagi Grimberg 			ib_events[index] : "unrecognized event";
872b1b5b60SSagi Grimberg }
882b1b5b60SSagi Grimberg EXPORT_SYMBOL(ib_event_msg);
892b1b5b60SSagi Grimberg 
902b1b5b60SSagi Grimberg static const char * const wc_statuses[] = {
912b1b5b60SSagi Grimberg 	[IB_WC_SUCCESS]			= "success",
922b1b5b60SSagi Grimberg 	[IB_WC_LOC_LEN_ERR]		= "local length error",
932b1b5b60SSagi Grimberg 	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
942b1b5b60SSagi Grimberg 	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
952b1b5b60SSagi Grimberg 	[IB_WC_LOC_PROT_ERR]		= "local protection error",
962b1b5b60SSagi Grimberg 	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
972b1b5b60SSagi Grimberg 	[IB_WC_MW_BIND_ERR]		= "memory management operation error",
982b1b5b60SSagi Grimberg 	[IB_WC_BAD_RESP_ERR]		= "bad response error",
992b1b5b60SSagi Grimberg 	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
1002b1b5b60SSagi Grimberg 	[IB_WC_REM_INV_REQ_ERR]		= "invalid request error",
1012b1b5b60SSagi Grimberg 	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
1022b1b5b60SSagi Grimberg 	[IB_WC_REM_OP_ERR]		= "remote operation error",
1032b1b5b60SSagi Grimberg 	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
1042b1b5b60SSagi Grimberg 	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
1052b1b5b60SSagi Grimberg 	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
1062b1b5b60SSagi Grimberg 	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
1072b1b5b60SSagi Grimberg 	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
1082b1b5b60SSagi Grimberg 	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
1092b1b5b60SSagi Grimberg 	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
1102b1b5b60SSagi Grimberg 	[IB_WC_FATAL_ERR]		= "fatal error",
1112b1b5b60SSagi Grimberg 	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
1122b1b5b60SSagi Grimberg 	[IB_WC_GENERAL_ERR]		= "general error",
1132b1b5b60SSagi Grimberg };
1142b1b5b60SSagi Grimberg 
115db7489e0SBart Van Assche const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
1162b1b5b60SSagi Grimberg {
1172b1b5b60SSagi Grimberg 	size_t index = status;
1182b1b5b60SSagi Grimberg 
1192b1b5b60SSagi Grimberg 	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
1202b1b5b60SSagi Grimberg 			wc_statuses[index] : "unrecognized status";
1212b1b5b60SSagi Grimberg }
1222b1b5b60SSagi Grimberg EXPORT_SYMBOL(ib_wc_status_msg);
1232b1b5b60SSagi Grimberg 
1248385fd84SRoland Dreier __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
125bf6a9e31SJack Morgenstein {
126bf6a9e31SJack Morgenstein 	switch (rate) {
127bf6a9e31SJack Morgenstein 	case IB_RATE_2_5_GBPS: return   1;
128bf6a9e31SJack Morgenstein 	case IB_RATE_5_GBPS:   return   2;
129bf6a9e31SJack Morgenstein 	case IB_RATE_10_GBPS:  return   4;
130bf6a9e31SJack Morgenstein 	case IB_RATE_20_GBPS:  return   8;
131bf6a9e31SJack Morgenstein 	case IB_RATE_30_GBPS:  return  12;
132bf6a9e31SJack Morgenstein 	case IB_RATE_40_GBPS:  return  16;
133bf6a9e31SJack Morgenstein 	case IB_RATE_60_GBPS:  return  24;
134bf6a9e31SJack Morgenstein 	case IB_RATE_80_GBPS:  return  32;
135bf6a9e31SJack Morgenstein 	case IB_RATE_120_GBPS: return  48;
136e2dda368SHans Westgaard Ry 	case IB_RATE_14_GBPS:  return   6;
137e2dda368SHans Westgaard Ry 	case IB_RATE_56_GBPS:  return  22;
138e2dda368SHans Westgaard Ry 	case IB_RATE_112_GBPS: return  45;
139e2dda368SHans Westgaard Ry 	case IB_RATE_168_GBPS: return  67;
140e2dda368SHans Westgaard Ry 	case IB_RATE_25_GBPS:  return  10;
141e2dda368SHans Westgaard Ry 	case IB_RATE_100_GBPS: return  40;
142e2dda368SHans Westgaard Ry 	case IB_RATE_200_GBPS: return  80;
143e2dda368SHans Westgaard Ry 	case IB_RATE_300_GBPS: return 120;
144a5a5d199SMichael Guralnik 	case IB_RATE_28_GBPS:  return  11;
145a5a5d199SMichael Guralnik 	case IB_RATE_50_GBPS:  return  20;
146a5a5d199SMichael Guralnik 	case IB_RATE_400_GBPS: return 160;
147a5a5d199SMichael Guralnik 	case IB_RATE_600_GBPS: return 240;
148bf6a9e31SJack Morgenstein 	default:	       return  -1;
149bf6a9e31SJack Morgenstein 	}
150bf6a9e31SJack Morgenstein }
151bf6a9e31SJack Morgenstein EXPORT_SYMBOL(ib_rate_to_mult);
152bf6a9e31SJack Morgenstein 
1538385fd84SRoland Dreier __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
154bf6a9e31SJack Morgenstein {
155bf6a9e31SJack Morgenstein 	switch (mult) {
156bf6a9e31SJack Morgenstein 	case 1:   return IB_RATE_2_5_GBPS;
157bf6a9e31SJack Morgenstein 	case 2:   return IB_RATE_5_GBPS;
158bf6a9e31SJack Morgenstein 	case 4:   return IB_RATE_10_GBPS;
159bf6a9e31SJack Morgenstein 	case 8:   return IB_RATE_20_GBPS;
160bf6a9e31SJack Morgenstein 	case 12:  return IB_RATE_30_GBPS;
161bf6a9e31SJack Morgenstein 	case 16:  return IB_RATE_40_GBPS;
162bf6a9e31SJack Morgenstein 	case 24:  return IB_RATE_60_GBPS;
163bf6a9e31SJack Morgenstein 	case 32:  return IB_RATE_80_GBPS;
164bf6a9e31SJack Morgenstein 	case 48:  return IB_RATE_120_GBPS;
165e2dda368SHans Westgaard Ry 	case 6:   return IB_RATE_14_GBPS;
166e2dda368SHans Westgaard Ry 	case 22:  return IB_RATE_56_GBPS;
167e2dda368SHans Westgaard Ry 	case 45:  return IB_RATE_112_GBPS;
168e2dda368SHans Westgaard Ry 	case 67:  return IB_RATE_168_GBPS;
169e2dda368SHans Westgaard Ry 	case 10:  return IB_RATE_25_GBPS;
170e2dda368SHans Westgaard Ry 	case 40:  return IB_RATE_100_GBPS;
171e2dda368SHans Westgaard Ry 	case 80:  return IB_RATE_200_GBPS;
172e2dda368SHans Westgaard Ry 	case 120: return IB_RATE_300_GBPS;
173a5a5d199SMichael Guralnik 	case 11:  return IB_RATE_28_GBPS;
174a5a5d199SMichael Guralnik 	case 20:  return IB_RATE_50_GBPS;
175a5a5d199SMichael Guralnik 	case 160: return IB_RATE_400_GBPS;
176a5a5d199SMichael Guralnik 	case 240: return IB_RATE_600_GBPS;
177bf6a9e31SJack Morgenstein 	default:  return IB_RATE_PORT_CURRENT;
178bf6a9e31SJack Morgenstein 	}
179bf6a9e31SJack Morgenstein }
180bf6a9e31SJack Morgenstein EXPORT_SYMBOL(mult_to_ib_rate);
181bf6a9e31SJack Morgenstein 
1828385fd84SRoland Dreier __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
18371eeba16SMarcel Apfelbaum {
18471eeba16SMarcel Apfelbaum 	switch (rate) {
18571eeba16SMarcel Apfelbaum 	case IB_RATE_2_5_GBPS: return 2500;
18671eeba16SMarcel Apfelbaum 	case IB_RATE_5_GBPS:   return 5000;
18771eeba16SMarcel Apfelbaum 	case IB_RATE_10_GBPS:  return 10000;
18871eeba16SMarcel Apfelbaum 	case IB_RATE_20_GBPS:  return 20000;
18971eeba16SMarcel Apfelbaum 	case IB_RATE_30_GBPS:  return 30000;
19071eeba16SMarcel Apfelbaum 	case IB_RATE_40_GBPS:  return 40000;
19171eeba16SMarcel Apfelbaum 	case IB_RATE_60_GBPS:  return 60000;
19271eeba16SMarcel Apfelbaum 	case IB_RATE_80_GBPS:  return 80000;
19371eeba16SMarcel Apfelbaum 	case IB_RATE_120_GBPS: return 120000;
19471eeba16SMarcel Apfelbaum 	case IB_RATE_14_GBPS:  return 14062;
19571eeba16SMarcel Apfelbaum 	case IB_RATE_56_GBPS:  return 56250;
19671eeba16SMarcel Apfelbaum 	case IB_RATE_112_GBPS: return 112500;
19771eeba16SMarcel Apfelbaum 	case IB_RATE_168_GBPS: return 168750;
19871eeba16SMarcel Apfelbaum 	case IB_RATE_25_GBPS:  return 25781;
19971eeba16SMarcel Apfelbaum 	case IB_RATE_100_GBPS: return 103125;
20071eeba16SMarcel Apfelbaum 	case IB_RATE_200_GBPS: return 206250;
20171eeba16SMarcel Apfelbaum 	case IB_RATE_300_GBPS: return 309375;
202a5a5d199SMichael Guralnik 	case IB_RATE_28_GBPS:  return 28125;
203a5a5d199SMichael Guralnik 	case IB_RATE_50_GBPS:  return 53125;
204a5a5d199SMichael Guralnik 	case IB_RATE_400_GBPS: return 425000;
205a5a5d199SMichael Guralnik 	case IB_RATE_600_GBPS: return 637500;
20671eeba16SMarcel Apfelbaum 	default:	       return -1;
20771eeba16SMarcel Apfelbaum 	}
20871eeba16SMarcel Apfelbaum }
20971eeba16SMarcel Apfelbaum EXPORT_SYMBOL(ib_rate_to_mbps);
21071eeba16SMarcel Apfelbaum 
2118385fd84SRoland Dreier __attribute_const__ enum rdma_transport_type
21207ebafbaSTom Tucker rdma_node_get_transport(enum rdma_node_type node_type)
21307ebafbaSTom Tucker {
214cdc596d8SLeon Romanovsky 
215cdc596d8SLeon Romanovsky 	if (node_type == RDMA_NODE_USNIC)
2165db5765eSUpinder Malhi 		return RDMA_TRANSPORT_USNIC;
217cdc596d8SLeon Romanovsky 	if (node_type == RDMA_NODE_USNIC_UDP)
218248567f7SUpinder Malhi 		return RDMA_TRANSPORT_USNIC_UDP;
219cdc596d8SLeon Romanovsky 	if (node_type == RDMA_NODE_RNIC)
220cdc596d8SLeon Romanovsky 		return RDMA_TRANSPORT_IWARP;
221f95be3d2SGal Pressman 	if (node_type == RDMA_NODE_UNSPECIFIED)
222f95be3d2SGal Pressman 		return RDMA_TRANSPORT_UNSPECIFIED;
223cdc596d8SLeon Romanovsky 
224cdc596d8SLeon Romanovsky 	return RDMA_TRANSPORT_IB;
22507ebafbaSTom Tucker }
22607ebafbaSTom Tucker EXPORT_SYMBOL(rdma_node_get_transport);
22707ebafbaSTom Tucker 
228a3f5adafSEli Cohen enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
229a3f5adafSEli Cohen {
23082901e3eSLeon Romanovsky 	enum rdma_transport_type lt;
2313023a1e9SKamal Heib 	if (device->ops.get_link_layer)
2323023a1e9SKamal Heib 		return device->ops.get_link_layer(device, port_num);
233a3f5adafSEli Cohen 
23482901e3eSLeon Romanovsky 	lt = rdma_node_get_transport(device->node_type);
23582901e3eSLeon Romanovsky 	if (lt == RDMA_TRANSPORT_IB)
236a3f5adafSEli Cohen 		return IB_LINK_LAYER_INFINIBAND;
23782901e3eSLeon Romanovsky 
238a3f5adafSEli Cohen 	return IB_LINK_LAYER_ETHERNET;
239a3f5adafSEli Cohen }
240a3f5adafSEli Cohen EXPORT_SYMBOL(rdma_port_get_link_layer);
241a3f5adafSEli Cohen 
2421da177e4SLinus Torvalds /* Protection domains */
2431da177e4SLinus Torvalds 
24496249d70SJason Gunthorpe /**
24596249d70SJason Gunthorpe  * ib_alloc_pd - Allocates an unused protection domain.
24696249d70SJason Gunthorpe  * @device: The device on which to allocate the protection domain.
24796249d70SJason Gunthorpe  *
24896249d70SJason Gunthorpe  * A protection domain object provides an association between QPs, shared
24996249d70SJason Gunthorpe  * receive queues, address handles, memory regions, and memory windows.
25096249d70SJason Gunthorpe  *
25196249d70SJason Gunthorpe  * Every PD has a local_dma_lkey which can be used as the lkey value for local
25296249d70SJason Gunthorpe  * memory operations.
25396249d70SJason Gunthorpe  */
254ed082d36SChristoph Hellwig struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
255ed082d36SChristoph Hellwig 		const char *caller)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	struct ib_pd *pd;
258ed082d36SChristoph Hellwig 	int mr_access_flags = 0;
25921a428a0SLeon Romanovsky 	int ret;
2601da177e4SLinus Torvalds 
26121a428a0SLeon Romanovsky 	pd = rdma_zalloc_drv_obj(device, ib_pd);
26221a428a0SLeon Romanovsky 	if (!pd)
26321a428a0SLeon Romanovsky 		return ERR_PTR(-ENOMEM);
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds 	pd->device = device;
266b5e81bf5SRoland Dreier 	pd->uobject = NULL;
26750d46335SChristoph Hellwig 	pd->__internal_mr = NULL;
2681da177e4SLinus Torvalds 	atomic_set(&pd->usecnt, 0);
269ed082d36SChristoph Hellwig 	pd->flags = flags;
27096249d70SJason Gunthorpe 
27121a428a0SLeon Romanovsky 	pd->res.type = RDMA_RESTRACK_PD;
27221a428a0SLeon Romanovsky 	rdma_restrack_set_task(&pd->res, caller);
27321a428a0SLeon Romanovsky 
274ff23dfa1SShamir Rabinovitch 	ret = device->ops.alloc_pd(pd, NULL);
27521a428a0SLeon Romanovsky 	if (ret) {
27621a428a0SLeon Romanovsky 		kfree(pd);
27721a428a0SLeon Romanovsky 		return ERR_PTR(ret);
27821a428a0SLeon Romanovsky 	}
27921a428a0SLeon Romanovsky 	rdma_restrack_kadd(&pd->res);
28021a428a0SLeon Romanovsky 
28186bee4c9SOr Gerlitz 	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
28296249d70SJason Gunthorpe 		pd->local_dma_lkey = device->local_dma_lkey;
283ed082d36SChristoph Hellwig 	else
284ed082d36SChristoph Hellwig 		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
285ed082d36SChristoph Hellwig 
286ed082d36SChristoph Hellwig 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
287ed082d36SChristoph Hellwig 		pr_warn("%s: enabling unsafe global rkey\n", caller);
288ed082d36SChristoph Hellwig 		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
289ed082d36SChristoph Hellwig 	}
290ed082d36SChristoph Hellwig 
291ed082d36SChristoph Hellwig 	if (mr_access_flags) {
29296249d70SJason Gunthorpe 		struct ib_mr *mr;
29396249d70SJason Gunthorpe 
2943023a1e9SKamal Heib 		mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
29596249d70SJason Gunthorpe 		if (IS_ERR(mr)) {
29696249d70SJason Gunthorpe 			ib_dealloc_pd(pd);
2975ef990f0SChristoph Hellwig 			return ERR_CAST(mr);
2981da177e4SLinus Torvalds 		}
2991da177e4SLinus Torvalds 
3005ef990f0SChristoph Hellwig 		mr->device	= pd->device;
3015ef990f0SChristoph Hellwig 		mr->pd		= pd;
3025ef990f0SChristoph Hellwig 		mr->uobject	= NULL;
3035ef990f0SChristoph Hellwig 		mr->need_inval	= false;
3045ef990f0SChristoph Hellwig 
30550d46335SChristoph Hellwig 		pd->__internal_mr = mr;
306ed082d36SChristoph Hellwig 
307ed082d36SChristoph Hellwig 		if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
30850d46335SChristoph Hellwig 			pd->local_dma_lkey = pd->__internal_mr->lkey;
309ed082d36SChristoph Hellwig 
310ed082d36SChristoph Hellwig 		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
311ed082d36SChristoph Hellwig 			pd->unsafe_global_rkey = pd->__internal_mr->rkey;
31296249d70SJason Gunthorpe 	}
313ed082d36SChristoph Hellwig 
3141da177e4SLinus Torvalds 	return pd;
3151da177e4SLinus Torvalds }
316ed082d36SChristoph Hellwig EXPORT_SYMBOL(__ib_alloc_pd);
3171da177e4SLinus Torvalds 
3187dd78647SJason Gunthorpe /**
31991f57129SIsrael Rukshin  * ib_dealloc_pd_user - Deallocates a protection domain.
3207dd78647SJason Gunthorpe  * @pd: The protection domain to deallocate.
321c4367a26SShamir Rabinovitch  * @udata: Valid user data or NULL for kernel object
3227dd78647SJason Gunthorpe  *
3237dd78647SJason Gunthorpe  * It is an error to call this function while any resources in the pd still
3247dd78647SJason Gunthorpe  * exist.  The caller is responsible to synchronously destroy them and
3257dd78647SJason Gunthorpe  * guarantee no new allocations will happen.
3267dd78647SJason Gunthorpe  */
327c4367a26SShamir Rabinovitch void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
3281da177e4SLinus Torvalds {
3297dd78647SJason Gunthorpe 	int ret;
3301da177e4SLinus Torvalds 
33150d46335SChristoph Hellwig 	if (pd->__internal_mr) {
332c4367a26SShamir Rabinovitch 		ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
3337dd78647SJason Gunthorpe 		WARN_ON(ret);
33450d46335SChristoph Hellwig 		pd->__internal_mr = NULL;
33596249d70SJason Gunthorpe 	}
33696249d70SJason Gunthorpe 
3377dd78647SJason Gunthorpe 	/* uverbs manipulates usecnt with proper locking, while the kabi
3387dd78647SJason Gunthorpe 	   requires the caller to guarantee we can't race here. */
3397dd78647SJason Gunthorpe 	WARN_ON(atomic_read(&pd->usecnt));
3401da177e4SLinus Torvalds 
3419d5f8c20SLeon Romanovsky 	rdma_restrack_del(&pd->res);
342c4367a26SShamir Rabinovitch 	pd->device->ops.dealloc_pd(pd, udata);
34321a428a0SLeon Romanovsky 	kfree(pd);
3441da177e4SLinus Torvalds }
345c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_dealloc_pd_user);
3461da177e4SLinus Torvalds 
3471da177e4SLinus Torvalds /* Address handles */
3481da177e4SLinus Torvalds 
349d97099feSJason Gunthorpe /**
350d97099feSJason Gunthorpe  * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
351d97099feSJason Gunthorpe  * @dest:       Pointer to destination ah_attr. Contents of the destination
352d97099feSJason Gunthorpe  *              pointer is assumed to be invalid and attribute are overwritten.
353d97099feSJason Gunthorpe  * @src:        Pointer to source ah_attr.
354d97099feSJason Gunthorpe  */
355d97099feSJason Gunthorpe void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
356d97099feSJason Gunthorpe 		       const struct rdma_ah_attr *src)
357d97099feSJason Gunthorpe {
358d97099feSJason Gunthorpe 	*dest = *src;
359d97099feSJason Gunthorpe 	if (dest->grh.sgid_attr)
360d97099feSJason Gunthorpe 		rdma_hold_gid_attr(dest->grh.sgid_attr);
361d97099feSJason Gunthorpe }
362d97099feSJason Gunthorpe EXPORT_SYMBOL(rdma_copy_ah_attr);
363d97099feSJason Gunthorpe 
364d97099feSJason Gunthorpe /**
365d97099feSJason Gunthorpe  * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
366d97099feSJason Gunthorpe  * @old:        Pointer to existing ah_attr which needs to be replaced.
367d97099feSJason Gunthorpe  *              old is assumed to be valid or zero'd
368d97099feSJason Gunthorpe  * @new:        Pointer to the new ah_attr.
369d97099feSJason Gunthorpe  *
370d97099feSJason Gunthorpe  * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
371d97099feSJason Gunthorpe  * old the ah_attr is valid; after that it copies the new attribute and holds
372d97099feSJason Gunthorpe  * the reference to the replaced ah_attr.
373d97099feSJason Gunthorpe  */
374d97099feSJason Gunthorpe void rdma_replace_ah_attr(struct rdma_ah_attr *old,
375d97099feSJason Gunthorpe 			  const struct rdma_ah_attr *new)
376d97099feSJason Gunthorpe {
377d97099feSJason Gunthorpe 	rdma_destroy_ah_attr(old);
378d97099feSJason Gunthorpe 	*old = *new;
379d97099feSJason Gunthorpe 	if (old->grh.sgid_attr)
380d97099feSJason Gunthorpe 		rdma_hold_gid_attr(old->grh.sgid_attr);
381d97099feSJason Gunthorpe }
382d97099feSJason Gunthorpe EXPORT_SYMBOL(rdma_replace_ah_attr);
383d97099feSJason Gunthorpe 
384d97099feSJason Gunthorpe /**
385d97099feSJason Gunthorpe  * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
386d97099feSJason Gunthorpe  * @dest:       Pointer to destination ah_attr to copy to.
387d97099feSJason Gunthorpe  *              dest is assumed to be valid or zero'd
388d97099feSJason Gunthorpe  * @src:        Pointer to the new ah_attr.
389d97099feSJason Gunthorpe  *
390d97099feSJason Gunthorpe  * rdma_move_ah_attr() first releases any reference in the destination ah_attr
391d97099feSJason Gunthorpe  * if it is valid. This also transfers ownership of internal references from
392d97099feSJason Gunthorpe  * src to dest, making src invalid in the process. No new reference of the src
393d97099feSJason Gunthorpe  * ah_attr is taken.
394d97099feSJason Gunthorpe  */
395d97099feSJason Gunthorpe void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
396d97099feSJason Gunthorpe {
397d97099feSJason Gunthorpe 	rdma_destroy_ah_attr(dest);
398d97099feSJason Gunthorpe 	*dest = *src;
399d97099feSJason Gunthorpe 	src->grh.sgid_attr = NULL;
400d97099feSJason Gunthorpe }
401d97099feSJason Gunthorpe EXPORT_SYMBOL(rdma_move_ah_attr);
402d97099feSJason Gunthorpe 
4038d9ec9adSJason Gunthorpe /*
4048d9ec9adSJason Gunthorpe  * Validate that the rdma_ah_attr is valid for the device before passing it
4058d9ec9adSJason Gunthorpe  * off to the driver.
4068d9ec9adSJason Gunthorpe  */
4078d9ec9adSJason Gunthorpe static int rdma_check_ah_attr(struct ib_device *device,
4088d9ec9adSJason Gunthorpe 			      struct rdma_ah_attr *ah_attr)
4098d9ec9adSJason Gunthorpe {
4108d9ec9adSJason Gunthorpe 	if (!rdma_is_port_valid(device, ah_attr->port_num))
4118d9ec9adSJason Gunthorpe 		return -EINVAL;
4128d9ec9adSJason Gunthorpe 
413b02289b3SArtemy Kovalyov 	if ((rdma_is_grh_required(device, ah_attr->port_num) ||
414b02289b3SArtemy Kovalyov 	     ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
4158d9ec9adSJason Gunthorpe 	    !(ah_attr->ah_flags & IB_AH_GRH))
4168d9ec9adSJason Gunthorpe 		return -EINVAL;
4178d9ec9adSJason Gunthorpe 
4188d9ec9adSJason Gunthorpe 	if (ah_attr->grh.sgid_attr) {
4198d9ec9adSJason Gunthorpe 		/*
4208d9ec9adSJason Gunthorpe 		 * Make sure the passed sgid_attr is consistent with the
4218d9ec9adSJason Gunthorpe 		 * parameters
4228d9ec9adSJason Gunthorpe 		 */
4238d9ec9adSJason Gunthorpe 		if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
4248d9ec9adSJason Gunthorpe 		    ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
4258d9ec9adSJason Gunthorpe 			return -EINVAL;
4268d9ec9adSJason Gunthorpe 	}
4278d9ec9adSJason Gunthorpe 	return 0;
4288d9ec9adSJason Gunthorpe }
4298d9ec9adSJason Gunthorpe 
4308d9ec9adSJason Gunthorpe /*
4318d9ec9adSJason Gunthorpe  * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
4328d9ec9adSJason Gunthorpe  * On success the caller is responsible to call rdma_unfill_sgid_attr().
4338d9ec9adSJason Gunthorpe  */
4348d9ec9adSJason Gunthorpe static int rdma_fill_sgid_attr(struct ib_device *device,
4358d9ec9adSJason Gunthorpe 			       struct rdma_ah_attr *ah_attr,
4368d9ec9adSJason Gunthorpe 			       const struct ib_gid_attr **old_sgid_attr)
4378d9ec9adSJason Gunthorpe {
4388d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *sgid_attr;
4398d9ec9adSJason Gunthorpe 	struct ib_global_route *grh;
4408d9ec9adSJason Gunthorpe 	int ret;
4418d9ec9adSJason Gunthorpe 
4428d9ec9adSJason Gunthorpe 	*old_sgid_attr = ah_attr->grh.sgid_attr;
4438d9ec9adSJason Gunthorpe 
4448d9ec9adSJason Gunthorpe 	ret = rdma_check_ah_attr(device, ah_attr);
4458d9ec9adSJason Gunthorpe 	if (ret)
4468d9ec9adSJason Gunthorpe 		return ret;
4478d9ec9adSJason Gunthorpe 
4488d9ec9adSJason Gunthorpe 	if (!(ah_attr->ah_flags & IB_AH_GRH))
4498d9ec9adSJason Gunthorpe 		return 0;
4508d9ec9adSJason Gunthorpe 
4518d9ec9adSJason Gunthorpe 	grh = rdma_ah_retrieve_grh(ah_attr);
4528d9ec9adSJason Gunthorpe 	if (grh->sgid_attr)
4538d9ec9adSJason Gunthorpe 		return 0;
4548d9ec9adSJason Gunthorpe 
4558d9ec9adSJason Gunthorpe 	sgid_attr =
4568d9ec9adSJason Gunthorpe 		rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
4578d9ec9adSJason Gunthorpe 	if (IS_ERR(sgid_attr))
4588d9ec9adSJason Gunthorpe 		return PTR_ERR(sgid_attr);
4598d9ec9adSJason Gunthorpe 
4608d9ec9adSJason Gunthorpe 	/* Move ownerhip of the kref into the ah_attr */
4618d9ec9adSJason Gunthorpe 	grh->sgid_attr = sgid_attr;
4628d9ec9adSJason Gunthorpe 	return 0;
4638d9ec9adSJason Gunthorpe }
4648d9ec9adSJason Gunthorpe 
4658d9ec9adSJason Gunthorpe static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
4668d9ec9adSJason Gunthorpe 				  const struct ib_gid_attr *old_sgid_attr)
4678d9ec9adSJason Gunthorpe {
4688d9ec9adSJason Gunthorpe 	/*
4698d9ec9adSJason Gunthorpe 	 * Fill didn't change anything, the caller retains ownership of
4708d9ec9adSJason Gunthorpe 	 * whatever it passed
4718d9ec9adSJason Gunthorpe 	 */
4728d9ec9adSJason Gunthorpe 	if (ah_attr->grh.sgid_attr == old_sgid_attr)
4738d9ec9adSJason Gunthorpe 		return;
4748d9ec9adSJason Gunthorpe 
4758d9ec9adSJason Gunthorpe 	/*
4768d9ec9adSJason Gunthorpe 	 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
4778d9ec9adSJason Gunthorpe 	 * doesn't see any change in the rdma_ah_attr. If we get here
4788d9ec9adSJason Gunthorpe 	 * old_sgid_attr is NULL.
4798d9ec9adSJason Gunthorpe 	 */
4808d9ec9adSJason Gunthorpe 	rdma_destroy_ah_attr(ah_attr);
4818d9ec9adSJason Gunthorpe }
4828d9ec9adSJason Gunthorpe 
4831a1f460fSJason Gunthorpe static const struct ib_gid_attr *
4841a1f460fSJason Gunthorpe rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
4851a1f460fSJason Gunthorpe 		      const struct ib_gid_attr *old_attr)
4861a1f460fSJason Gunthorpe {
4871a1f460fSJason Gunthorpe 	if (old_attr)
4881a1f460fSJason Gunthorpe 		rdma_put_gid_attr(old_attr);
4891a1f460fSJason Gunthorpe 	if (ah_attr->ah_flags & IB_AH_GRH) {
4901a1f460fSJason Gunthorpe 		rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
4911a1f460fSJason Gunthorpe 		return ah_attr->grh.sgid_attr;
4921a1f460fSJason Gunthorpe 	}
4931a1f460fSJason Gunthorpe 	return NULL;
4941a1f460fSJason Gunthorpe }
4951a1f460fSJason Gunthorpe 
4965cda6587SParav Pandit static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
4975cda6587SParav Pandit 				     struct rdma_ah_attr *ah_attr,
498b090c4e3SGal Pressman 				     u32 flags,
4995cda6587SParav Pandit 				     struct ib_udata *udata)
5001da177e4SLinus Torvalds {
501d3456914SLeon Romanovsky 	struct ib_device *device = pd->device;
5021da177e4SLinus Torvalds 	struct ib_ah *ah;
503d3456914SLeon Romanovsky 	int ret;
5041da177e4SLinus Torvalds 
505b090c4e3SGal Pressman 	might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
506b090c4e3SGal Pressman 
507d3456914SLeon Romanovsky 	if (!device->ops.create_ah)
5080584c47bSKamal Heib 		return ERR_PTR(-EOPNOTSUPP);
5090584c47bSKamal Heib 
510d3456914SLeon Romanovsky 	ah = rdma_zalloc_drv_obj_gfp(
511d3456914SLeon Romanovsky 		device, ib_ah,
512d3456914SLeon Romanovsky 		(flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
513d3456914SLeon Romanovsky 	if (!ah)
514d3456914SLeon Romanovsky 		return ERR_PTR(-ENOMEM);
5151da177e4SLinus Torvalds 
516d3456914SLeon Romanovsky 	ah->device = device;
5171da177e4SLinus Torvalds 	ah->pd = pd;
51844c58487SDasaratharaman Chandramouli 	ah->type = ah_attr->type;
5191a1f460fSJason Gunthorpe 	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
5201a1f460fSJason Gunthorpe 
521d3456914SLeon Romanovsky 	ret = device->ops.create_ah(ah, ah_attr, flags, udata);
522d3456914SLeon Romanovsky 	if (ret) {
523d3456914SLeon Romanovsky 		kfree(ah);
524d3456914SLeon Romanovsky 		return ERR_PTR(ret);
5251da177e4SLinus Torvalds 	}
5261da177e4SLinus Torvalds 
527d3456914SLeon Romanovsky 	atomic_inc(&pd->usecnt);
5281da177e4SLinus Torvalds 	return ah;
5291da177e4SLinus Torvalds }
5305cda6587SParav Pandit 
5318d9ec9adSJason Gunthorpe /**
5328d9ec9adSJason Gunthorpe  * rdma_create_ah - Creates an address handle for the
5338d9ec9adSJason Gunthorpe  * given address vector.
5348d9ec9adSJason Gunthorpe  * @pd: The protection domain associated with the address handle.
5358d9ec9adSJason Gunthorpe  * @ah_attr: The attributes of the address vector.
536b090c4e3SGal Pressman  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
5378d9ec9adSJason Gunthorpe  *
5388d9ec9adSJason Gunthorpe  * It returns 0 on success and returns appropriate error code on error.
5398d9ec9adSJason Gunthorpe  * The address handle is used to reference a local or global destination
5408d9ec9adSJason Gunthorpe  * in all UD QP post sends.
5418d9ec9adSJason Gunthorpe  */
542b090c4e3SGal Pressman struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
543b090c4e3SGal Pressman 			     u32 flags)
5445cda6587SParav Pandit {
5458d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr;
5468d9ec9adSJason Gunthorpe 	struct ib_ah *ah;
5478d9ec9adSJason Gunthorpe 	int ret;
5488d9ec9adSJason Gunthorpe 
5498d9ec9adSJason Gunthorpe 	ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
5508d9ec9adSJason Gunthorpe 	if (ret)
5518d9ec9adSJason Gunthorpe 		return ERR_PTR(ret);
5528d9ec9adSJason Gunthorpe 
553b090c4e3SGal Pressman 	ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
5548d9ec9adSJason Gunthorpe 
5558d9ec9adSJason Gunthorpe 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
5568d9ec9adSJason Gunthorpe 	return ah;
5575cda6587SParav Pandit }
5580a18cfe4SDasaratharaman Chandramouli EXPORT_SYMBOL(rdma_create_ah);
5591da177e4SLinus Torvalds 
5605cda6587SParav Pandit /**
5615cda6587SParav Pandit  * rdma_create_user_ah - Creates an address handle for the
5625cda6587SParav Pandit  * given address vector.
5635cda6587SParav Pandit  * It resolves destination mac address for ah attribute of RoCE type.
5645cda6587SParav Pandit  * @pd: The protection domain associated with the address handle.
5655cda6587SParav Pandit  * @ah_attr: The attributes of the address vector.
5665cda6587SParav Pandit  * @udata: pointer to user's input output buffer information need by
5675cda6587SParav Pandit  *         provider driver.
5685cda6587SParav Pandit  *
5695cda6587SParav Pandit  * It returns 0 on success and returns appropriate error code on error.
5705cda6587SParav Pandit  * The address handle is used to reference a local or global destination
5715cda6587SParav Pandit  * in all UD QP post sends.
5725cda6587SParav Pandit  */
5735cda6587SParav Pandit struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
5745cda6587SParav Pandit 				  struct rdma_ah_attr *ah_attr,
5755cda6587SParav Pandit 				  struct ib_udata *udata)
5765cda6587SParav Pandit {
5778d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr;
5788d9ec9adSJason Gunthorpe 	struct ib_ah *ah;
5795cda6587SParav Pandit 	int err;
5805cda6587SParav Pandit 
5818d9ec9adSJason Gunthorpe 	err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
5828d9ec9adSJason Gunthorpe 	if (err)
5838d9ec9adSJason Gunthorpe 		return ERR_PTR(err);
5848d9ec9adSJason Gunthorpe 
5855cda6587SParav Pandit 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
5865cda6587SParav Pandit 		err = ib_resolve_eth_dmac(pd->device, ah_attr);
5878d9ec9adSJason Gunthorpe 		if (err) {
5888d9ec9adSJason Gunthorpe 			ah = ERR_PTR(err);
5898d9ec9adSJason Gunthorpe 			goto out;
5908d9ec9adSJason Gunthorpe 		}
5915cda6587SParav Pandit 	}
5925cda6587SParav Pandit 
593b090c4e3SGal Pressman 	ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
5948d9ec9adSJason Gunthorpe 
5958d9ec9adSJason Gunthorpe out:
5968d9ec9adSJason Gunthorpe 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
5978d9ec9adSJason Gunthorpe 	return ah;
5985cda6587SParav Pandit }
5995cda6587SParav Pandit EXPORT_SYMBOL(rdma_create_user_ah);
6005cda6587SParav Pandit 
601850d8fd7SMoni Shoua int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
602c865f246SSomnath Kotur {
603c865f246SSomnath Kotur 	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
604c865f246SSomnath Kotur 	struct iphdr ip4h_checked;
605c865f246SSomnath Kotur 	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
606c865f246SSomnath Kotur 
607c865f246SSomnath Kotur 	/* If it's IPv6, the version must be 6, otherwise, the first
608c865f246SSomnath Kotur 	 * 20 bytes (before the IPv4 header) are garbled.
609c865f246SSomnath Kotur 	 */
610c865f246SSomnath Kotur 	if (ip6h->version != 6)
611c865f246SSomnath Kotur 		return (ip4h->version == 4) ? 4 : 0;
612c865f246SSomnath Kotur 	/* version may be 6 or 4 because the first 20 bytes could be garbled */
613c865f246SSomnath Kotur 
614c865f246SSomnath Kotur 	/* RoCE v2 requires no options, thus header length
615c865f246SSomnath Kotur 	 * must be 5 words
616c865f246SSomnath Kotur 	 */
617c865f246SSomnath Kotur 	if (ip4h->ihl != 5)
618c865f246SSomnath Kotur 		return 6;
619c865f246SSomnath Kotur 
620c865f246SSomnath Kotur 	/* Verify checksum.
621c865f246SSomnath Kotur 	 * We can't write on scattered buffers so we need to copy to
622c865f246SSomnath Kotur 	 * temp buffer.
623c865f246SSomnath Kotur 	 */
624c865f246SSomnath Kotur 	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
625c865f246SSomnath Kotur 	ip4h_checked.check = 0;
626c865f246SSomnath Kotur 	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
627c865f246SSomnath Kotur 	/* if IPv4 header checksum is OK, believe it */
628c865f246SSomnath Kotur 	if (ip4h->check == ip4h_checked.check)
629c865f246SSomnath Kotur 		return 4;
630c865f246SSomnath Kotur 	return 6;
631c865f246SSomnath Kotur }
632850d8fd7SMoni Shoua EXPORT_SYMBOL(ib_get_rdma_header_version);
633c865f246SSomnath Kotur 
634c865f246SSomnath Kotur static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
635c865f246SSomnath Kotur 						     u8 port_num,
636c865f246SSomnath Kotur 						     const struct ib_grh *grh)
637c865f246SSomnath Kotur {
638c865f246SSomnath Kotur 	int grh_version;
639c865f246SSomnath Kotur 
640c865f246SSomnath Kotur 	if (rdma_protocol_ib(device, port_num))
641c865f246SSomnath Kotur 		return RDMA_NETWORK_IB;
642c865f246SSomnath Kotur 
643850d8fd7SMoni Shoua 	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
644c865f246SSomnath Kotur 
645c865f246SSomnath Kotur 	if (grh_version == 4)
646c865f246SSomnath Kotur 		return RDMA_NETWORK_IPV4;
647c865f246SSomnath Kotur 
648c865f246SSomnath Kotur 	if (grh->next_hdr == IPPROTO_UDP)
649c865f246SSomnath Kotur 		return RDMA_NETWORK_IPV6;
650c865f246SSomnath Kotur 
651c865f246SSomnath Kotur 	return RDMA_NETWORK_ROCE_V1;
652c865f246SSomnath Kotur }
653c865f246SSomnath Kotur 
654dbf727deSMatan Barak struct find_gid_index_context {
655dbf727deSMatan Barak 	u16 vlan_id;
656c865f246SSomnath Kotur 	enum ib_gid_type gid_type;
657dbf727deSMatan Barak };
658dbf727deSMatan Barak 
659dbf727deSMatan Barak static bool find_gid_index(const union ib_gid *gid,
660dbf727deSMatan Barak 			   const struct ib_gid_attr *gid_attr,
661dbf727deSMatan Barak 			   void *context)
662dbf727deSMatan Barak {
663b0dd0d33SParav Pandit 	struct find_gid_index_context *ctx = context;
664dbf727deSMatan Barak 
665c865f246SSomnath Kotur 	if (ctx->gid_type != gid_attr->gid_type)
666c865f246SSomnath Kotur 		return false;
667c865f246SSomnath Kotur 
668dbf727deSMatan Barak 	if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
669dbf727deSMatan Barak 	    (is_vlan_dev(gid_attr->ndev) &&
670dbf727deSMatan Barak 	     vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
671dbf727deSMatan Barak 		return false;
672dbf727deSMatan Barak 
673dbf727deSMatan Barak 	return true;
674dbf727deSMatan Barak }
675dbf727deSMatan Barak 
676b7403217SParav Pandit static const struct ib_gid_attr *
677b7403217SParav Pandit get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
678dbf727deSMatan Barak 		       u16 vlan_id, const union ib_gid *sgid,
679b7403217SParav Pandit 		       enum ib_gid_type gid_type)
680dbf727deSMatan Barak {
681c865f246SSomnath Kotur 	struct find_gid_index_context context = {.vlan_id = vlan_id,
682c865f246SSomnath Kotur 						 .gid_type = gid_type};
683dbf727deSMatan Barak 
684b7403217SParav Pandit 	return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
685b7403217SParav Pandit 				       &context);
686dbf727deSMatan Barak }
687dbf727deSMatan Barak 
688850d8fd7SMoni Shoua int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
689c865f246SSomnath Kotur 			      enum rdma_network_type net_type,
690c865f246SSomnath Kotur 			      union ib_gid *sgid, union ib_gid *dgid)
691c865f246SSomnath Kotur {
692c865f246SSomnath Kotur 	struct sockaddr_in  src_in;
693c865f246SSomnath Kotur 	struct sockaddr_in  dst_in;
694c865f246SSomnath Kotur 	__be32 src_saddr, dst_saddr;
695c865f246SSomnath Kotur 
696c865f246SSomnath Kotur 	if (!sgid || !dgid)
697c865f246SSomnath Kotur 		return -EINVAL;
698c865f246SSomnath Kotur 
699c865f246SSomnath Kotur 	if (net_type == RDMA_NETWORK_IPV4) {
700c865f246SSomnath Kotur 		memcpy(&src_in.sin_addr.s_addr,
701c865f246SSomnath Kotur 		       &hdr->roce4grh.saddr, 4);
702c865f246SSomnath Kotur 		memcpy(&dst_in.sin_addr.s_addr,
703c865f246SSomnath Kotur 		       &hdr->roce4grh.daddr, 4);
704c865f246SSomnath Kotur 		src_saddr = src_in.sin_addr.s_addr;
705c865f246SSomnath Kotur 		dst_saddr = dst_in.sin_addr.s_addr;
706c865f246SSomnath Kotur 		ipv6_addr_set_v4mapped(src_saddr,
707c865f246SSomnath Kotur 				       (struct in6_addr *)sgid);
708c865f246SSomnath Kotur 		ipv6_addr_set_v4mapped(dst_saddr,
709c865f246SSomnath Kotur 				       (struct in6_addr *)dgid);
710c865f246SSomnath Kotur 		return 0;
711c865f246SSomnath Kotur 	} else if (net_type == RDMA_NETWORK_IPV6 ||
712c865f246SSomnath Kotur 		   net_type == RDMA_NETWORK_IB) {
713c865f246SSomnath Kotur 		*dgid = hdr->ibgrh.dgid;
714c865f246SSomnath Kotur 		*sgid = hdr->ibgrh.sgid;
715c865f246SSomnath Kotur 		return 0;
716c865f246SSomnath Kotur 	} else {
717c865f246SSomnath Kotur 		return -EINVAL;
718c865f246SSomnath Kotur 	}
719c865f246SSomnath Kotur }
720850d8fd7SMoni Shoua EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
721c865f246SSomnath Kotur 
7221060f865SParav Pandit /* Resolve destination mac address and hop limit for unicast destination
7231060f865SParav Pandit  * GID entry, considering the source GID entry as well.
7241060f865SParav Pandit  * ah_attribute must have have valid port_num, sgid_index.
7251060f865SParav Pandit  */
7261060f865SParav Pandit static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
7271060f865SParav Pandit 				       struct rdma_ah_attr *ah_attr)
7281060f865SParav Pandit {
729b7403217SParav Pandit 	struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
730b7403217SParav Pandit 	const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
7311060f865SParav Pandit 	int hop_limit = 0xff;
732b7403217SParav Pandit 	int ret = 0;
7331060f865SParav Pandit 
73456d0a7d9SParav Pandit 	/* If destination is link local and source GID is RoCEv1,
73556d0a7d9SParav Pandit 	 * IP stack is not used.
73656d0a7d9SParav Pandit 	 */
73756d0a7d9SParav Pandit 	if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
738b7403217SParav Pandit 	    sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
73956d0a7d9SParav Pandit 		rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
74056d0a7d9SParav Pandit 				ah_attr->roce.dmac);
741b7403217SParav Pandit 		return ret;
74256d0a7d9SParav Pandit 	}
74356d0a7d9SParav Pandit 
744b7403217SParav Pandit 	ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
7451060f865SParav Pandit 					   ah_attr->roce.dmac,
7460e9d2c19SParav Pandit 					   sgid_attr, &hop_limit);
7471060f865SParav Pandit 
7481060f865SParav Pandit 	grh->hop_limit = hop_limit;
7491060f865SParav Pandit 	return ret;
7501060f865SParav Pandit }
7511060f865SParav Pandit 
75228b5b3a2SGustavo A. R. Silva /*
753f6bdb142SParav Pandit  * This function initializes address handle attributes from the incoming packet.
75428b5b3a2SGustavo A. R. Silva  * Incoming packet has dgid of the receiver node on which this code is
75528b5b3a2SGustavo A. R. Silva  * getting executed and, sgid contains the GID of the sender.
75628b5b3a2SGustavo A. R. Silva  *
75728b5b3a2SGustavo A. R. Silva  * When resolving mac address of destination, the arrived dgid is used
75828b5b3a2SGustavo A. R. Silva  * as sgid and, sgid is used as dgid because sgid contains destinations
75928b5b3a2SGustavo A. R. Silva  * GID whom to respond to.
76028b5b3a2SGustavo A. R. Silva  *
761b7403217SParav Pandit  * On success the caller is responsible to call rdma_destroy_ah_attr on the
762b7403217SParav Pandit  * attr.
76328b5b3a2SGustavo A. R. Silva  */
764f6bdb142SParav Pandit int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
76573cdaaeeSIra Weiny 			    const struct ib_wc *wc, const struct ib_grh *grh,
76690898850SDasaratharaman Chandramouli 			    struct rdma_ah_attr *ah_attr)
767513789edSHal Rosenstock {
768513789edSHal Rosenstock 	u32 flow_class;
769513789edSHal Rosenstock 	int ret;
770c865f246SSomnath Kotur 	enum rdma_network_type net_type = RDMA_NETWORK_IB;
771c865f246SSomnath Kotur 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
772b7403217SParav Pandit 	const struct ib_gid_attr *sgid_attr;
773c3efe750SMatan Barak 	int hoplimit = 0xff;
774c865f246SSomnath Kotur 	union ib_gid dgid;
775c865f246SSomnath Kotur 	union ib_gid sgid;
776513789edSHal Rosenstock 
77779364227SRoland Dreier 	might_sleep();
77879364227SRoland Dreier 
7794e00d694SSean Hefty 	memset(ah_attr, 0, sizeof *ah_attr);
78044c58487SDasaratharaman Chandramouli 	ah_attr->type = rdma_ah_find_type(device, port_num);
781227128fcSMichael Wang 	if (rdma_cap_eth_ah(device, port_num)) {
782c865f246SSomnath Kotur 		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
783c865f246SSomnath Kotur 			net_type = wc->network_hdr_type;
784c865f246SSomnath Kotur 		else
785c865f246SSomnath Kotur 			net_type = ib_get_net_type_by_grh(device, port_num, grh);
786c865f246SSomnath Kotur 		gid_type = ib_network_to_gid_type(net_type);
787c865f246SSomnath Kotur 	}
788850d8fd7SMoni Shoua 	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
789c865f246SSomnath Kotur 					&sgid, &dgid);
790c865f246SSomnath Kotur 	if (ret)
791c865f246SSomnath Kotur 		return ret;
792c865f246SSomnath Kotur 
7931060f865SParav Pandit 	rdma_ah_set_sl(ah_attr, wc->sl);
7941060f865SParav Pandit 	rdma_ah_set_port_num(ah_attr, port_num);
7951060f865SParav Pandit 
796c865f246SSomnath Kotur 	if (rdma_protocol_roce(device, port_num)) {
797dbf727deSMatan Barak 		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
798dbf727deSMatan Barak 				wc->vlan_id : 0xffff;
799dbf727deSMatan Barak 
800dd5f03beSMatan Barak 		if (!(wc->wc_flags & IB_WC_GRH))
801dd5f03beSMatan Barak 			return -EPROTOTYPE;
802dd5f03beSMatan Barak 
803b7403217SParav Pandit 		sgid_attr = get_sgid_attr_from_eth(device, port_num,
8041060f865SParav Pandit 						   vlan_id, &dgid,
805b7403217SParav Pandit 						   gid_type);
806b7403217SParav Pandit 		if (IS_ERR(sgid_attr))
807b7403217SParav Pandit 			return PTR_ERR(sgid_attr);
80820029832SMatan Barak 
8091060f865SParav Pandit 		flow_class = be32_to_cpu(grh->version_tclass_flow);
810b7403217SParav Pandit 		rdma_move_grh_sgid_attr(ah_attr,
811b7403217SParav Pandit 					&sgid,
8121060f865SParav Pandit 					flow_class & 0xFFFFF,
813b7403217SParav Pandit 					hoplimit,
814b7403217SParav Pandit 					(flow_class >> 20) & 0xFF,
815b7403217SParav Pandit 					sgid_attr);
816b7403217SParav Pandit 
817b7403217SParav Pandit 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
818b7403217SParav Pandit 		if (ret)
819b7403217SParav Pandit 			rdma_destroy_ah_attr(ah_attr);
820b7403217SParav Pandit 
821b7403217SParav Pandit 		return ret;
8221060f865SParav Pandit 	} else {
823d8966fcdSDasaratharaman Chandramouli 		rdma_ah_set_dlid(ah_attr, wc->slid);
824d8966fcdSDasaratharaman Chandramouli 		rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
825513789edSHal Rosenstock 
826b7403217SParav Pandit 		if ((wc->wc_flags & IB_WC_GRH) == 0)
827b7403217SParav Pandit 			return 0;
828513789edSHal Rosenstock 
829b7403217SParav Pandit 		if (dgid.global.interface_id !=
830b7403217SParav Pandit 					cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
831b7403217SParav Pandit 			sgid_attr = rdma_find_gid_by_port(
832b7403217SParav Pandit 				device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
833b7403217SParav Pandit 		} else
834b7403217SParav Pandit 			sgid_attr = rdma_get_gid_attr(device, port_num, 0);
835b7403217SParav Pandit 
836b7403217SParav Pandit 		if (IS_ERR(sgid_attr))
837b7403217SParav Pandit 			return PTR_ERR(sgid_attr);
838497677abSHal Rosenstock 		flow_class = be32_to_cpu(grh->version_tclass_flow);
839b7403217SParav Pandit 		rdma_move_grh_sgid_attr(ah_attr,
840b7403217SParav Pandit 					&sgid,
841d8966fcdSDasaratharaman Chandramouli 					flow_class & 0xFFFFF,
842b7403217SParav Pandit 					hoplimit,
843b7403217SParav Pandit 					(flow_class >> 20) & 0xFF,
844b7403217SParav Pandit 					sgid_attr);
845b7403217SParav Pandit 
8464e00d694SSean Hefty 		return 0;
8474e00d694SSean Hefty 	}
8481060f865SParav Pandit }
849f6bdb142SParav Pandit EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
8504e00d694SSean Hefty 
8518d9ec9adSJason Gunthorpe /**
8528d9ec9adSJason Gunthorpe  * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
8538d9ec9adSJason Gunthorpe  * of the reference
8548d9ec9adSJason Gunthorpe  *
8558d9ec9adSJason Gunthorpe  * @attr:	Pointer to AH attribute structure
8568d9ec9adSJason Gunthorpe  * @dgid:	Destination GID
8578d9ec9adSJason Gunthorpe  * @flow_label:	Flow label
8588d9ec9adSJason Gunthorpe  * @hop_limit:	Hop limit
8598d9ec9adSJason Gunthorpe  * @traffic_class: traffic class
8608d9ec9adSJason Gunthorpe  * @sgid_attr:	Pointer to SGID attribute
8618d9ec9adSJason Gunthorpe  *
8628d9ec9adSJason Gunthorpe  * This takes ownership of the sgid_attr reference. The caller must ensure
8638d9ec9adSJason Gunthorpe  * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
8648d9ec9adSJason Gunthorpe  * calling this function.
8658d9ec9adSJason Gunthorpe  */
8668d9ec9adSJason Gunthorpe void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
8678d9ec9adSJason Gunthorpe 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
8688d9ec9adSJason Gunthorpe 			     const struct ib_gid_attr *sgid_attr)
8698d9ec9adSJason Gunthorpe {
8708d9ec9adSJason Gunthorpe 	rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
8718d9ec9adSJason Gunthorpe 			traffic_class);
8728d9ec9adSJason Gunthorpe 	attr->grh.sgid_attr = sgid_attr;
8738d9ec9adSJason Gunthorpe }
8748d9ec9adSJason Gunthorpe EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
8758d9ec9adSJason Gunthorpe 
8768d9ec9adSJason Gunthorpe /**
8778d9ec9adSJason Gunthorpe  * rdma_destroy_ah_attr - Release reference to SGID attribute of
8788d9ec9adSJason Gunthorpe  * ah attribute.
8798d9ec9adSJason Gunthorpe  * @ah_attr: Pointer to ah attribute
8808d9ec9adSJason Gunthorpe  *
8818d9ec9adSJason Gunthorpe  * Release reference to the SGID attribute of the ah attribute if it is
8828d9ec9adSJason Gunthorpe  * non NULL. It is safe to call this multiple times, and safe to call it on
8838d9ec9adSJason Gunthorpe  * a zero initialized ah_attr.
8848d9ec9adSJason Gunthorpe  */
8858d9ec9adSJason Gunthorpe void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
8868d9ec9adSJason Gunthorpe {
8878d9ec9adSJason Gunthorpe 	if (ah_attr->grh.sgid_attr) {
8888d9ec9adSJason Gunthorpe 		rdma_put_gid_attr(ah_attr->grh.sgid_attr);
8898d9ec9adSJason Gunthorpe 		ah_attr->grh.sgid_attr = NULL;
8908d9ec9adSJason Gunthorpe 	}
8918d9ec9adSJason Gunthorpe }
8928d9ec9adSJason Gunthorpe EXPORT_SYMBOL(rdma_destroy_ah_attr);
8938d9ec9adSJason Gunthorpe 
89473cdaaeeSIra Weiny struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
89573cdaaeeSIra Weiny 				   const struct ib_grh *grh, u8 port_num)
8964e00d694SSean Hefty {
89790898850SDasaratharaman Chandramouli 	struct rdma_ah_attr ah_attr;
898b7403217SParav Pandit 	struct ib_ah *ah;
8994e00d694SSean Hefty 	int ret;
9004e00d694SSean Hefty 
901f6bdb142SParav Pandit 	ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
9024e00d694SSean Hefty 	if (ret)
9034e00d694SSean Hefty 		return ERR_PTR(ret);
904513789edSHal Rosenstock 
905b090c4e3SGal Pressman 	ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
906b7403217SParav Pandit 
907b7403217SParav Pandit 	rdma_destroy_ah_attr(&ah_attr);
908b7403217SParav Pandit 	return ah;
909513789edSHal Rosenstock }
910513789edSHal Rosenstock EXPORT_SYMBOL(ib_create_ah_from_wc);
911513789edSHal Rosenstock 
91267b985b6SDasaratharaman Chandramouli int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
9131da177e4SLinus Torvalds {
9148d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr;
9158d9ec9adSJason Gunthorpe 	int ret;
9168d9ec9adSJason Gunthorpe 
91744c58487SDasaratharaman Chandramouli 	if (ah->type != ah_attr->type)
91844c58487SDasaratharaman Chandramouli 		return -EINVAL;
91944c58487SDasaratharaman Chandramouli 
9208d9ec9adSJason Gunthorpe 	ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
9218d9ec9adSJason Gunthorpe 	if (ret)
9228d9ec9adSJason Gunthorpe 		return ret;
9238d9ec9adSJason Gunthorpe 
9243023a1e9SKamal Heib 	ret = ah->device->ops.modify_ah ?
9253023a1e9SKamal Heib 		ah->device->ops.modify_ah(ah, ah_attr) :
92687915bf8SLeon Romanovsky 		-EOPNOTSUPP;
9278d9ec9adSJason Gunthorpe 
9281a1f460fSJason Gunthorpe 	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
9298d9ec9adSJason Gunthorpe 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
9308d9ec9adSJason Gunthorpe 	return ret;
9311da177e4SLinus Torvalds }
93267b985b6SDasaratharaman Chandramouli EXPORT_SYMBOL(rdma_modify_ah);
9331da177e4SLinus Torvalds 
934bfbfd661SDasaratharaman Chandramouli int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
9351da177e4SLinus Torvalds {
9368d9ec9adSJason Gunthorpe 	ah_attr->grh.sgid_attr = NULL;
9378d9ec9adSJason Gunthorpe 
9383023a1e9SKamal Heib 	return ah->device->ops.query_ah ?
9393023a1e9SKamal Heib 		ah->device->ops.query_ah(ah, ah_attr) :
94087915bf8SLeon Romanovsky 		-EOPNOTSUPP;
9411da177e4SLinus Torvalds }
942bfbfd661SDasaratharaman Chandramouli EXPORT_SYMBOL(rdma_query_ah);
9431da177e4SLinus Torvalds 
944c4367a26SShamir Rabinovitch int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
9451da177e4SLinus Torvalds {
9461a1f460fSJason Gunthorpe 	const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
9471da177e4SLinus Torvalds 	struct ib_pd *pd;
9481da177e4SLinus Torvalds 
9492553ba21SGal Pressman 	might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
9502553ba21SGal Pressman 
9511da177e4SLinus Torvalds 	pd = ah->pd;
952d3456914SLeon Romanovsky 
953d3456914SLeon Romanovsky 	ah->device->ops.destroy_ah(ah, flags);
9541da177e4SLinus Torvalds 	atomic_dec(&pd->usecnt);
9551a1f460fSJason Gunthorpe 	if (sgid_attr)
9561a1f460fSJason Gunthorpe 		rdma_put_gid_attr(sgid_attr);
9571da177e4SLinus Torvalds 
958d3456914SLeon Romanovsky 	kfree(ah);
959d3456914SLeon Romanovsky 	return 0;
9601da177e4SLinus Torvalds }
961c4367a26SShamir Rabinovitch EXPORT_SYMBOL(rdma_destroy_ah_user);
9621da177e4SLinus Torvalds 
963d41fcc67SRoland Dreier /* Shared receive queues */
964d41fcc67SRoland Dreier 
965d41fcc67SRoland Dreier struct ib_srq *ib_create_srq(struct ib_pd *pd,
966d41fcc67SRoland Dreier 			     struct ib_srq_init_attr *srq_init_attr)
967d41fcc67SRoland Dreier {
968d41fcc67SRoland Dreier 	struct ib_srq *srq;
96968e326deSLeon Romanovsky 	int ret;
970d41fcc67SRoland Dreier 
9713023a1e9SKamal Heib 	if (!pd->device->ops.create_srq)
97287915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
973d41fcc67SRoland Dreier 
97468e326deSLeon Romanovsky 	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
97568e326deSLeon Romanovsky 	if (!srq)
97668e326deSLeon Romanovsky 		return ERR_PTR(-ENOMEM);
977d41fcc67SRoland Dreier 
978d41fcc67SRoland Dreier 	srq->device = pd->device;
979d41fcc67SRoland Dreier 	srq->pd = pd;
980d41fcc67SRoland Dreier 	srq->event_handler = srq_init_attr->event_handler;
981d41fcc67SRoland Dreier 	srq->srq_context = srq_init_attr->srq_context;
98296104edaSSean Hefty 	srq->srq_type = srq_init_attr->srq_type;
98368e326deSLeon Romanovsky 
9841a56ff6dSArtemy Kovalyov 	if (ib_srq_has_cq(srq->srq_type)) {
9851a56ff6dSArtemy Kovalyov 		srq->ext.cq = srq_init_attr->ext.cq;
9861a56ff6dSArtemy Kovalyov 		atomic_inc(&srq->ext.cq->usecnt);
9871a56ff6dSArtemy Kovalyov 	}
988418d5130SSean Hefty 	if (srq->srq_type == IB_SRQT_XRC) {
989418d5130SSean Hefty 		srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
990418d5130SSean Hefty 		atomic_inc(&srq->ext.xrc.xrcd->usecnt);
991418d5130SSean Hefty 	}
992d41fcc67SRoland Dreier 	atomic_inc(&pd->usecnt);
99368e326deSLeon Romanovsky 
99468e326deSLeon Romanovsky 	ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
99568e326deSLeon Romanovsky 	if (ret) {
99668e326deSLeon Romanovsky 		atomic_dec(&srq->pd->usecnt);
99768e326deSLeon Romanovsky 		if (srq->srq_type == IB_SRQT_XRC)
99868e326deSLeon Romanovsky 			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
99968e326deSLeon Romanovsky 		if (ib_srq_has_cq(srq->srq_type))
100068e326deSLeon Romanovsky 			atomic_dec(&srq->ext.cq->usecnt);
100168e326deSLeon Romanovsky 		kfree(srq);
100268e326deSLeon Romanovsky 		return ERR_PTR(ret);
1003d41fcc67SRoland Dreier 	}
1004d41fcc67SRoland Dreier 
1005d41fcc67SRoland Dreier 	return srq;
1006d41fcc67SRoland Dreier }
1007d41fcc67SRoland Dreier EXPORT_SYMBOL(ib_create_srq);
1008d41fcc67SRoland Dreier 
1009d41fcc67SRoland Dreier int ib_modify_srq(struct ib_srq *srq,
1010d41fcc67SRoland Dreier 		  struct ib_srq_attr *srq_attr,
1011d41fcc67SRoland Dreier 		  enum ib_srq_attr_mask srq_attr_mask)
1012d41fcc67SRoland Dreier {
10133023a1e9SKamal Heib 	return srq->device->ops.modify_srq ?
10143023a1e9SKamal Heib 		srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
10153023a1e9SKamal Heib 					    NULL) : -EOPNOTSUPP;
1016d41fcc67SRoland Dreier }
1017d41fcc67SRoland Dreier EXPORT_SYMBOL(ib_modify_srq);
1018d41fcc67SRoland Dreier 
1019d41fcc67SRoland Dreier int ib_query_srq(struct ib_srq *srq,
1020d41fcc67SRoland Dreier 		 struct ib_srq_attr *srq_attr)
1021d41fcc67SRoland Dreier {
10223023a1e9SKamal Heib 	return srq->device->ops.query_srq ?
10233023a1e9SKamal Heib 		srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1024d41fcc67SRoland Dreier }
1025d41fcc67SRoland Dreier EXPORT_SYMBOL(ib_query_srq);
1026d41fcc67SRoland Dreier 
1027c4367a26SShamir Rabinovitch int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1028d41fcc67SRoland Dreier {
1029d41fcc67SRoland Dreier 	if (atomic_read(&srq->usecnt))
1030d41fcc67SRoland Dreier 		return -EBUSY;
1031d41fcc67SRoland Dreier 
103268e326deSLeon Romanovsky 	srq->device->ops.destroy_srq(srq, udata);
1033d41fcc67SRoland Dreier 
103468e326deSLeon Romanovsky 	atomic_dec(&srq->pd->usecnt);
103568e326deSLeon Romanovsky 	if (srq->srq_type == IB_SRQT_XRC)
103668e326deSLeon Romanovsky 		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
103768e326deSLeon Romanovsky 	if (ib_srq_has_cq(srq->srq_type))
103868e326deSLeon Romanovsky 		atomic_dec(&srq->ext.cq->usecnt);
103968e326deSLeon Romanovsky 	kfree(srq);
1040d41fcc67SRoland Dreier 
104168e326deSLeon Romanovsky 	return 0;
1042d41fcc67SRoland Dreier }
1043c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_destroy_srq_user);
1044d41fcc67SRoland Dreier 
10451da177e4SLinus Torvalds /* Queue pairs */
10461da177e4SLinus Torvalds 
10470e0ec7e0SSean Hefty static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
10480e0ec7e0SSean Hefty {
10490e0ec7e0SSean Hefty 	struct ib_qp *qp = context;
105073c40c61SYishai Hadas 	unsigned long flags;
10510e0ec7e0SSean Hefty 
105273c40c61SYishai Hadas 	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
10530e0ec7e0SSean Hefty 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1054eec9e29fSShlomo Pongratz 		if (event->element.qp->event_handler)
10550e0ec7e0SSean Hefty 			event->element.qp->event_handler(event, event->element.qp->qp_context);
105673c40c61SYishai Hadas 	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
10570e0ec7e0SSean Hefty }
10580e0ec7e0SSean Hefty 
1059d3d72d90SSean Hefty static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
1060d3d72d90SSean Hefty {
1061d3d72d90SSean Hefty 	mutex_lock(&xrcd->tgt_qp_mutex);
1062d3d72d90SSean Hefty 	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
1063d3d72d90SSean Hefty 	mutex_unlock(&xrcd->tgt_qp_mutex);
1064d3d72d90SSean Hefty }
1065d3d72d90SSean Hefty 
10660e0ec7e0SSean Hefty static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
10670e0ec7e0SSean Hefty 				  void (*event_handler)(struct ib_event *, void *),
10680e0ec7e0SSean Hefty 				  void *qp_context)
1069d3d72d90SSean Hefty {
10700e0ec7e0SSean Hefty 	struct ib_qp *qp;
10710e0ec7e0SSean Hefty 	unsigned long flags;
1072d291f1a6SDaniel Jurgens 	int err;
10730e0ec7e0SSean Hefty 
10740e0ec7e0SSean Hefty 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
10750e0ec7e0SSean Hefty 	if (!qp)
10760e0ec7e0SSean Hefty 		return ERR_PTR(-ENOMEM);
10770e0ec7e0SSean Hefty 
10780e0ec7e0SSean Hefty 	qp->real_qp = real_qp;
1079d291f1a6SDaniel Jurgens 	err = ib_open_shared_qp_security(qp, real_qp->device);
1080d291f1a6SDaniel Jurgens 	if (err) {
1081d291f1a6SDaniel Jurgens 		kfree(qp);
1082d291f1a6SDaniel Jurgens 		return ERR_PTR(err);
1083d291f1a6SDaniel Jurgens 	}
1084d291f1a6SDaniel Jurgens 
1085d291f1a6SDaniel Jurgens 	qp->real_qp = real_qp;
10860e0ec7e0SSean Hefty 	atomic_inc(&real_qp->usecnt);
10870e0ec7e0SSean Hefty 	qp->device = real_qp->device;
10880e0ec7e0SSean Hefty 	qp->event_handler = event_handler;
10890e0ec7e0SSean Hefty 	qp->qp_context = qp_context;
10900e0ec7e0SSean Hefty 	qp->qp_num = real_qp->qp_num;
10910e0ec7e0SSean Hefty 	qp->qp_type = real_qp->qp_type;
10920e0ec7e0SSean Hefty 
10930e0ec7e0SSean Hefty 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
10940e0ec7e0SSean Hefty 	list_add(&qp->open_list, &real_qp->open_list);
10950e0ec7e0SSean Hefty 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
10960e0ec7e0SSean Hefty 
10970e0ec7e0SSean Hefty 	return qp;
1098d3d72d90SSean Hefty }
1099d3d72d90SSean Hefty 
11000e0ec7e0SSean Hefty struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
11010e0ec7e0SSean Hefty 			 struct ib_qp_open_attr *qp_open_attr)
11020e0ec7e0SSean Hefty {
11030e0ec7e0SSean Hefty 	struct ib_qp *qp, *real_qp;
11040e0ec7e0SSean Hefty 
11050e0ec7e0SSean Hefty 	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
11060e0ec7e0SSean Hefty 		return ERR_PTR(-EINVAL);
11070e0ec7e0SSean Hefty 
11080e0ec7e0SSean Hefty 	qp = ERR_PTR(-EINVAL);
11090e0ec7e0SSean Hefty 	mutex_lock(&xrcd->tgt_qp_mutex);
11100e0ec7e0SSean Hefty 	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
11110e0ec7e0SSean Hefty 		if (real_qp->qp_num == qp_open_attr->qp_num) {
11120e0ec7e0SSean Hefty 			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
11130e0ec7e0SSean Hefty 					  qp_open_attr->qp_context);
11140e0ec7e0SSean Hefty 			break;
11150e0ec7e0SSean Hefty 		}
11160e0ec7e0SSean Hefty 	}
11170e0ec7e0SSean Hefty 	mutex_unlock(&xrcd->tgt_qp_mutex);
11180e0ec7e0SSean Hefty 	return qp;
11190e0ec7e0SSean Hefty }
11200e0ec7e0SSean Hefty EXPORT_SYMBOL(ib_open_qp);
11210e0ec7e0SSean Hefty 
1122c4367a26SShamir Rabinovitch static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1123c4367a26SShamir Rabinovitch 					struct ib_qp_init_attr *qp_init_attr,
1124c4367a26SShamir Rabinovitch 					struct ib_udata *udata)
11251da177e4SLinus Torvalds {
112604c41bf3SChristoph Hellwig 	struct ib_qp *real_qp = qp;
11271da177e4SLinus Torvalds 
11280e0ec7e0SSean Hefty 	qp->event_handler = __ib_shared_qp_event_handler;
11290e0ec7e0SSean Hefty 	qp->qp_context = qp;
1130b42b63cfSSean Hefty 	qp->pd = NULL;
1131b42b63cfSSean Hefty 	qp->send_cq = qp->recv_cq = NULL;
1132b42b63cfSSean Hefty 	qp->srq = NULL;
1133b42b63cfSSean Hefty 	qp->xrcd = qp_init_attr->xrcd;
1134b42b63cfSSean Hefty 	atomic_inc(&qp_init_attr->xrcd->usecnt);
11350e0ec7e0SSean Hefty 	INIT_LIST_HEAD(&qp->open_list);
11360e0ec7e0SSean Hefty 
11370e0ec7e0SSean Hefty 	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
11380e0ec7e0SSean Hefty 			  qp_init_attr->qp_context);
1139535005caSYuval Avnery 	if (IS_ERR(qp))
1140535005caSYuval Avnery 		return qp;
1141535005caSYuval Avnery 
11420e0ec7e0SSean Hefty 	__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
114304c41bf3SChristoph Hellwig 	return qp;
114404c41bf3SChristoph Hellwig }
114504c41bf3SChristoph Hellwig 
1146c4367a26SShamir Rabinovitch struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
1147c4367a26SShamir Rabinovitch 				struct ib_qp_init_attr *qp_init_attr,
1148c4367a26SShamir Rabinovitch 				struct ib_udata *udata)
114904c41bf3SChristoph Hellwig {
115004c41bf3SChristoph Hellwig 	struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
115104c41bf3SChristoph Hellwig 	struct ib_qp *qp;
1152a060b562SChristoph Hellwig 	int ret;
1153a060b562SChristoph Hellwig 
1154a9017e23SYishai Hadas 	if (qp_init_attr->rwq_ind_tbl &&
1155a9017e23SYishai Hadas 	    (qp_init_attr->recv_cq ||
1156a9017e23SYishai Hadas 	    qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1157a9017e23SYishai Hadas 	    qp_init_attr->cap.max_recv_sge))
1158a9017e23SYishai Hadas 		return ERR_PTR(-EINVAL);
1159a9017e23SYishai Hadas 
1160a060b562SChristoph Hellwig 	/*
1161a060b562SChristoph Hellwig 	 * If the callers is using the RDMA API calculate the resources
1162a060b562SChristoph Hellwig 	 * needed for the RDMA READ/WRITE operations.
1163a060b562SChristoph Hellwig 	 *
1164a060b562SChristoph Hellwig 	 * Note that these callers need to pass in a port number.
1165a060b562SChristoph Hellwig 	 */
1166a060b562SChristoph Hellwig 	if (qp_init_attr->cap.max_rdma_ctxs)
1167a060b562SChristoph Hellwig 		rdma_rw_init_qp(device, qp_init_attr);
116804c41bf3SChristoph Hellwig 
11692f08ee36SSteve Wise 	qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
117004c41bf3SChristoph Hellwig 	if (IS_ERR(qp))
117104c41bf3SChristoph Hellwig 		return qp;
117204c41bf3SChristoph Hellwig 
1173d291f1a6SDaniel Jurgens 	ret = ib_create_qp_security(qp, device);
1174535005caSYuval Avnery 	if (ret)
1175535005caSYuval Avnery 		goto err;
1176d291f1a6SDaniel Jurgens 
117704c41bf3SChristoph Hellwig 	qp->qp_type    = qp_init_attr->qp_type;
1178a9017e23SYishai Hadas 	qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
117904c41bf3SChristoph Hellwig 
118004c41bf3SChristoph Hellwig 	atomic_set(&qp->usecnt, 0);
1181fffb0383SChristoph Hellwig 	qp->mrs_used = 0;
1182fffb0383SChristoph Hellwig 	spin_lock_init(&qp->mr_lock);
1183a060b562SChristoph Hellwig 	INIT_LIST_HEAD(&qp->rdma_mrs);
11840e353e34SChristoph Hellwig 	INIT_LIST_HEAD(&qp->sig_mrs);
1185498ca3c8SNoa Osherovich 	qp->port = 0;
1186fffb0383SChristoph Hellwig 
1187535005caSYuval Avnery 	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1188c4367a26SShamir Rabinovitch 		struct ib_qp *xrc_qp =
1189c4367a26SShamir Rabinovitch 			create_xrc_qp_user(qp, qp_init_attr, udata);
1190535005caSYuval Avnery 
1191535005caSYuval Avnery 		if (IS_ERR(xrc_qp)) {
1192535005caSYuval Avnery 			ret = PTR_ERR(xrc_qp);
1193535005caSYuval Avnery 			goto err;
1194535005caSYuval Avnery 		}
1195535005caSYuval Avnery 		return xrc_qp;
1196535005caSYuval Avnery 	}
119704c41bf3SChristoph Hellwig 
11981da177e4SLinus Torvalds 	qp->event_handler = qp_init_attr->event_handler;
11991da177e4SLinus Torvalds 	qp->qp_context = qp_init_attr->qp_context;
1200b42b63cfSSean Hefty 	if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1201b42b63cfSSean Hefty 		qp->recv_cq = NULL;
1202b42b63cfSSean Hefty 		qp->srq = NULL;
1203b42b63cfSSean Hefty 	} else {
1204b42b63cfSSean Hefty 		qp->recv_cq = qp_init_attr->recv_cq;
1205a9017e23SYishai Hadas 		if (qp_init_attr->recv_cq)
1206b42b63cfSSean Hefty 			atomic_inc(&qp_init_attr->recv_cq->usecnt);
1207b42b63cfSSean Hefty 		qp->srq = qp_init_attr->srq;
1208b42b63cfSSean Hefty 		if (qp->srq)
1209b42b63cfSSean Hefty 			atomic_inc(&qp_init_attr->srq->usecnt);
1210b42b63cfSSean Hefty 	}
1211b42b63cfSSean Hefty 
12121da177e4SLinus Torvalds 	qp->send_cq = qp_init_attr->send_cq;
1213b42b63cfSSean Hefty 	qp->xrcd    = NULL;
1214b42b63cfSSean Hefty 
12151da177e4SLinus Torvalds 	atomic_inc(&pd->usecnt);
1216a9017e23SYishai Hadas 	if (qp_init_attr->send_cq)
12171da177e4SLinus Torvalds 		atomic_inc(&qp_init_attr->send_cq->usecnt);
1218a9017e23SYishai Hadas 	if (qp_init_attr->rwq_ind_tbl)
1219a9017e23SYishai Hadas 		atomic_inc(&qp->rwq_ind_tbl->usecnt);
1220a060b562SChristoph Hellwig 
1221a060b562SChristoph Hellwig 	if (qp_init_attr->cap.max_rdma_ctxs) {
1222a060b562SChristoph Hellwig 		ret = rdma_rw_init_mrs(qp, qp_init_attr);
1223535005caSYuval Avnery 		if (ret)
1224535005caSYuval Avnery 			goto err;
1225a060b562SChristoph Hellwig 	}
1226a060b562SChristoph Hellwig 
1227632bc3f6SBart Van Assche 	/*
1228632bc3f6SBart Van Assche 	 * Note: all hw drivers guarantee that max_send_sge is lower than
1229632bc3f6SBart Van Assche 	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1230632bc3f6SBart Van Assche 	 * max_send_sge <= max_sge_rd.
1231632bc3f6SBart Van Assche 	 */
1232632bc3f6SBart Van Assche 	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1233632bc3f6SBart Van Assche 	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1234632bc3f6SBart Van Assche 				 device->attrs.max_sge_rd);
1235632bc3f6SBart Van Assche 
12361da177e4SLinus Torvalds 	return qp;
1237535005caSYuval Avnery 
1238535005caSYuval Avnery err:
1239535005caSYuval Avnery 	ib_destroy_qp(qp);
1240535005caSYuval Avnery 	return ERR_PTR(ret);
1241535005caSYuval Avnery 
12421da177e4SLinus Torvalds }
1243c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_create_qp_user);
12441da177e4SLinus Torvalds 
12458a51866fSRoland Dreier static const struct {
12468a51866fSRoland Dreier 	int			valid;
1247b42b63cfSSean Hefty 	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
1248b42b63cfSSean Hefty 	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
12498a51866fSRoland Dreier } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
12508a51866fSRoland Dreier 	[IB_QPS_RESET] = {
12518a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
12528a51866fSRoland Dreier 		[IB_QPS_INIT]  = {
12538a51866fSRoland Dreier 			.valid = 1,
12548a51866fSRoland Dreier 			.req_param = {
12558a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
12568a51866fSRoland Dreier 						IB_QP_PORT			|
12578a51866fSRoland Dreier 						IB_QP_QKEY),
1258c938a616SOr Gerlitz 				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
12598a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
12608a51866fSRoland Dreier 						IB_QP_PORT			|
12618a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
12628a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
12638a51866fSRoland Dreier 						IB_QP_PORT			|
12648a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
1265b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1266b42b63cfSSean Hefty 						IB_QP_PORT			|
1267b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
1268b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1269b42b63cfSSean Hefty 						IB_QP_PORT			|
1270b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
12718a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
12728a51866fSRoland Dreier 						IB_QP_QKEY),
12738a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
12748a51866fSRoland Dreier 						IB_QP_QKEY),
12758a51866fSRoland Dreier 			}
12768a51866fSRoland Dreier 		},
12778a51866fSRoland Dreier 	},
12788a51866fSRoland Dreier 	[IB_QPS_INIT]  = {
12798a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
12808a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
12818a51866fSRoland Dreier 		[IB_QPS_INIT]  = {
12828a51866fSRoland Dreier 			.valid = 1,
12838a51866fSRoland Dreier 			.opt_param = {
12848a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
12858a51866fSRoland Dreier 						IB_QP_PORT			|
12868a51866fSRoland Dreier 						IB_QP_QKEY),
12878a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
12888a51866fSRoland Dreier 						IB_QP_PORT			|
12898a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
12908a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
12918a51866fSRoland Dreier 						IB_QP_PORT			|
12928a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
1293b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1294b42b63cfSSean Hefty 						IB_QP_PORT			|
1295b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
1296b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1297b42b63cfSSean Hefty 						IB_QP_PORT			|
1298b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
12998a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
13008a51866fSRoland Dreier 						IB_QP_QKEY),
13018a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
13028a51866fSRoland Dreier 						IB_QP_QKEY),
13038a51866fSRoland Dreier 			}
13048a51866fSRoland Dreier 		},
13058a51866fSRoland Dreier 		[IB_QPS_RTR]   = {
13068a51866fSRoland Dreier 			.valid = 1,
13078a51866fSRoland Dreier 			.req_param = {
13088a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_AV			|
13098a51866fSRoland Dreier 						IB_QP_PATH_MTU			|
13108a51866fSRoland Dreier 						IB_QP_DEST_QPN			|
13118a51866fSRoland Dreier 						IB_QP_RQ_PSN),
13128a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_AV			|
13138a51866fSRoland Dreier 						IB_QP_PATH_MTU			|
13148a51866fSRoland Dreier 						IB_QP_DEST_QPN			|
13158a51866fSRoland Dreier 						IB_QP_RQ_PSN			|
13168a51866fSRoland Dreier 						IB_QP_MAX_DEST_RD_ATOMIC	|
13178a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER),
1318b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_AV			|
1319b42b63cfSSean Hefty 						IB_QP_PATH_MTU			|
1320b42b63cfSSean Hefty 						IB_QP_DEST_QPN			|
1321b42b63cfSSean Hefty 						IB_QP_RQ_PSN),
1322b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
1323b42b63cfSSean Hefty 						IB_QP_PATH_MTU			|
1324b42b63cfSSean Hefty 						IB_QP_DEST_QPN			|
1325b42b63cfSSean Hefty 						IB_QP_RQ_PSN			|
1326b42b63cfSSean Hefty 						IB_QP_MAX_DEST_RD_ATOMIC	|
1327b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER),
13288a51866fSRoland Dreier 			},
13298a51866fSRoland Dreier 			.opt_param = {
13308a51866fSRoland Dreier 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
13318a51866fSRoland Dreier 						 IB_QP_QKEY),
13328a51866fSRoland Dreier 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
13338a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13348a51866fSRoland Dreier 						 IB_QP_PKEY_INDEX),
13358a51866fSRoland Dreier 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
13368a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13378a51866fSRoland Dreier 						 IB_QP_PKEY_INDEX),
1338b42b63cfSSean Hefty 				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
1339b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1340b42b63cfSSean Hefty 						 IB_QP_PKEY_INDEX),
1341b42b63cfSSean Hefty 				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
1342b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1343b42b63cfSSean Hefty 						 IB_QP_PKEY_INDEX),
13448a51866fSRoland Dreier 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
13458a51866fSRoland Dreier 						 IB_QP_QKEY),
13468a51866fSRoland Dreier 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
13478a51866fSRoland Dreier 						 IB_QP_QKEY),
1348dd5f03beSMatan Barak 			 },
1349dbf727deSMatan Barak 		},
13508a51866fSRoland Dreier 	},
13518a51866fSRoland Dreier 	[IB_QPS_RTR]   = {
13528a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
13538a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
13548a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
13558a51866fSRoland Dreier 			.valid = 1,
13568a51866fSRoland Dreier 			.req_param = {
13578a51866fSRoland Dreier 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
13588a51866fSRoland Dreier 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
13598a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
13608a51866fSRoland Dreier 						IB_QP_RETRY_CNT			|
13618a51866fSRoland Dreier 						IB_QP_RNR_RETRY			|
13628a51866fSRoland Dreier 						IB_QP_SQ_PSN			|
13638a51866fSRoland Dreier 						IB_QP_MAX_QP_RD_ATOMIC),
1364b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
1365b42b63cfSSean Hefty 						IB_QP_RETRY_CNT			|
1366b42b63cfSSean Hefty 						IB_QP_RNR_RETRY			|
1367b42b63cfSSean Hefty 						IB_QP_SQ_PSN			|
1368b42b63cfSSean Hefty 						IB_QP_MAX_QP_RD_ATOMIC),
1369b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
1370b42b63cfSSean Hefty 						IB_QP_SQ_PSN),
13718a51866fSRoland Dreier 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
13728a51866fSRoland Dreier 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
13738a51866fSRoland Dreier 			},
13748a51866fSRoland Dreier 			.opt_param = {
13758a51866fSRoland Dreier 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
13768a51866fSRoland Dreier 						 IB_QP_QKEY),
13778a51866fSRoland Dreier 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
13788a51866fSRoland Dreier 						 IB_QP_ALT_PATH			|
13798a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13808a51866fSRoland Dreier 						 IB_QP_PATH_MIG_STATE),
13818a51866fSRoland Dreier 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
13828a51866fSRoland Dreier 						 IB_QP_ALT_PATH			|
13838a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13848a51866fSRoland Dreier 						 IB_QP_MIN_RNR_TIMER		|
13858a51866fSRoland Dreier 						 IB_QP_PATH_MIG_STATE),
1386b42b63cfSSean Hefty 				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1387b42b63cfSSean Hefty 						 IB_QP_ALT_PATH			|
1388b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1389b42b63cfSSean Hefty 						 IB_QP_PATH_MIG_STATE),
1390b42b63cfSSean Hefty 				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1391b42b63cfSSean Hefty 						 IB_QP_ALT_PATH			|
1392b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1393b42b63cfSSean Hefty 						 IB_QP_MIN_RNR_TIMER		|
1394b42b63cfSSean Hefty 						 IB_QP_PATH_MIG_STATE),
13958a51866fSRoland Dreier 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
13968a51866fSRoland Dreier 						 IB_QP_QKEY),
13978a51866fSRoland Dreier 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
13988a51866fSRoland Dreier 						 IB_QP_QKEY),
1399528e5a1bSBodong Wang 				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
14008a51866fSRoland Dreier 			 }
14018a51866fSRoland Dreier 		}
14028a51866fSRoland Dreier 	},
14038a51866fSRoland Dreier 	[IB_QPS_RTS]   = {
14048a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
14058a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
14068a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
14078a51866fSRoland Dreier 			.valid = 1,
14088a51866fSRoland Dreier 			.opt_param = {
14098a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
14108a51866fSRoland Dreier 						IB_QP_QKEY),
14114546d31dSDotan Barak 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
14124546d31dSDotan Barak 						IB_QP_ACCESS_FLAGS		|
14138a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14148a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
14154546d31dSDotan Barak 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
14164546d31dSDotan Barak 						IB_QP_ACCESS_FLAGS		|
14178a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14188a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE		|
14198a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER),
1420b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1421b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1422b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1423b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
1424b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1425b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1426b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1427b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE		|
1428b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER),
14298a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
14308a51866fSRoland Dreier 						IB_QP_QKEY),
14318a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
14328a51866fSRoland Dreier 						IB_QP_QKEY),
1433528e5a1bSBodong Wang 				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
14348a51866fSRoland Dreier 			}
14358a51866fSRoland Dreier 		},
14368a51866fSRoland Dreier 		[IB_QPS_SQD]   = {
14378a51866fSRoland Dreier 			.valid = 1,
14388a51866fSRoland Dreier 			.opt_param = {
14398a51866fSRoland Dreier 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
14408a51866fSRoland Dreier 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
14418a51866fSRoland Dreier 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1442b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1443b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
14448a51866fSRoland Dreier 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
14458a51866fSRoland Dreier 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
14468a51866fSRoland Dreier 			}
14478a51866fSRoland Dreier 		},
14488a51866fSRoland Dreier 	},
14498a51866fSRoland Dreier 	[IB_QPS_SQD]   = {
14508a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
14518a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
14528a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
14538a51866fSRoland Dreier 			.valid = 1,
14548a51866fSRoland Dreier 			.opt_param = {
14558a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
14568a51866fSRoland Dreier 						IB_QP_QKEY),
14578a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
14588a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14598a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
14608a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
14618a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
14628a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14638a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
14648a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER		|
14658a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
1466b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1467b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1468b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1469b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
1470b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1471b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1472b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1473b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER		|
1474b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
14758a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
14768a51866fSRoland Dreier 						IB_QP_QKEY),
14778a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
14788a51866fSRoland Dreier 						IB_QP_QKEY),
14798a51866fSRoland Dreier 			}
14808a51866fSRoland Dreier 		},
14818a51866fSRoland Dreier 		[IB_QPS_SQD]   = {
14828a51866fSRoland Dreier 			.valid = 1,
14838a51866fSRoland Dreier 			.opt_param = {
14848a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
14858a51866fSRoland Dreier 						IB_QP_QKEY),
14868a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_AV			|
14878a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14888a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
14898a51866fSRoland Dreier 						IB_QP_PKEY_INDEX		|
14908a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
14918a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_PORT			|
14928a51866fSRoland Dreier 						IB_QP_AV			|
14938a51866fSRoland Dreier 						IB_QP_TIMEOUT			|
14948a51866fSRoland Dreier 						IB_QP_RETRY_CNT			|
14958a51866fSRoland Dreier 						IB_QP_RNR_RETRY			|
14968a51866fSRoland Dreier 						IB_QP_MAX_QP_RD_ATOMIC		|
14978a51866fSRoland Dreier 						IB_QP_MAX_DEST_RD_ATOMIC	|
14988a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14998a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
15008a51866fSRoland Dreier 						IB_QP_PKEY_INDEX		|
15018a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER		|
15028a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
1503b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1504b42b63cfSSean Hefty 						IB_QP_AV			|
1505b42b63cfSSean Hefty 						IB_QP_TIMEOUT			|
1506b42b63cfSSean Hefty 						IB_QP_RETRY_CNT			|
1507b42b63cfSSean Hefty 						IB_QP_RNR_RETRY			|
1508b42b63cfSSean Hefty 						IB_QP_MAX_QP_RD_ATOMIC		|
1509b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1510b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1511b42b63cfSSean Hefty 						IB_QP_PKEY_INDEX		|
1512b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
1513b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1514b42b63cfSSean Hefty 						IB_QP_AV			|
1515b42b63cfSSean Hefty 						IB_QP_TIMEOUT			|
1516b42b63cfSSean Hefty 						IB_QP_MAX_DEST_RD_ATOMIC	|
1517b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1518b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1519b42b63cfSSean Hefty 						IB_QP_PKEY_INDEX		|
1520b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER		|
1521b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
15228a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
15238a51866fSRoland Dreier 						IB_QP_QKEY),
15248a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
15258a51866fSRoland Dreier 						IB_QP_QKEY),
15268a51866fSRoland Dreier 			}
15278a51866fSRoland Dreier 		}
15288a51866fSRoland Dreier 	},
15298a51866fSRoland Dreier 	[IB_QPS_SQE]   = {
15308a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
15318a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
15328a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
15338a51866fSRoland Dreier 			.valid = 1,
15348a51866fSRoland Dreier 			.opt_param = {
15358a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
15368a51866fSRoland Dreier 						IB_QP_QKEY),
15378a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
15388a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
15398a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
15408a51866fSRoland Dreier 						IB_QP_QKEY),
15418a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
15428a51866fSRoland Dreier 						IB_QP_QKEY),
15438a51866fSRoland Dreier 			}
15448a51866fSRoland Dreier 		}
15458a51866fSRoland Dreier 	},
15468a51866fSRoland Dreier 	[IB_QPS_ERR] = {
15478a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
15488a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 }
15498a51866fSRoland Dreier 	}
15508a51866fSRoland Dreier };
15518a51866fSRoland Dreier 
155219b1f540SLeon Romanovsky bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1553d31131bbSKamal Heib 			enum ib_qp_type type, enum ib_qp_attr_mask mask)
15548a51866fSRoland Dreier {
15558a51866fSRoland Dreier 	enum ib_qp_attr_mask req_param, opt_param;
15568a51866fSRoland Dreier 
15578a51866fSRoland Dreier 	if (mask & IB_QP_CUR_STATE  &&
15588a51866fSRoland Dreier 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
15598a51866fSRoland Dreier 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
156019b1f540SLeon Romanovsky 		return false;
15618a51866fSRoland Dreier 
15628a51866fSRoland Dreier 	if (!qp_state_table[cur_state][next_state].valid)
156319b1f540SLeon Romanovsky 		return false;
15648a51866fSRoland Dreier 
15658a51866fSRoland Dreier 	req_param = qp_state_table[cur_state][next_state].req_param[type];
15668a51866fSRoland Dreier 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
15678a51866fSRoland Dreier 
15688a51866fSRoland Dreier 	if ((mask & req_param) != req_param)
156919b1f540SLeon Romanovsky 		return false;
15708a51866fSRoland Dreier 
15718a51866fSRoland Dreier 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
157219b1f540SLeon Romanovsky 		return false;
15738a51866fSRoland Dreier 
157419b1f540SLeon Romanovsky 	return true;
15758a51866fSRoland Dreier }
15768a51866fSRoland Dreier EXPORT_SYMBOL(ib_modify_qp_is_ok);
15778a51866fSRoland Dreier 
1578947c99ecSParav Pandit /**
1579947c99ecSParav Pandit  * ib_resolve_eth_dmac - Resolve destination mac address
1580947c99ecSParav Pandit  * @device:		Device to consider
1581947c99ecSParav Pandit  * @ah_attr:		address handle attribute which describes the
1582947c99ecSParav Pandit  *			source and destination parameters
1583947c99ecSParav Pandit  * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1584947c99ecSParav Pandit  * returns 0 on success or appropriate error code. It initializes the
1585947c99ecSParav Pandit  * necessary ah_attr fields when call is successful.
1586947c99ecSParav Pandit  */
1587c0348eb0SParav Pandit static int ib_resolve_eth_dmac(struct ib_device *device,
158890898850SDasaratharaman Chandramouli 			       struct rdma_ah_attr *ah_attr)
1589ed4c54e5SOr Gerlitz {
1590ed4c54e5SOr Gerlitz 	int ret = 0;
1591d8966fcdSDasaratharaman Chandramouli 
15929636a56fSNoa Osherovich 	if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
15939636a56fSNoa Osherovich 		if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
15949636a56fSNoa Osherovich 			__be32 addr = 0;
15959636a56fSNoa Osherovich 
15969636a56fSNoa Osherovich 			memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
15979636a56fSNoa Osherovich 			ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
15989636a56fSNoa Osherovich 		} else {
15999636a56fSNoa Osherovich 			ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
16009636a56fSNoa Osherovich 					(char *)ah_attr->roce.dmac);
16019636a56fSNoa Osherovich 		}
1602ed4c54e5SOr Gerlitz 	} else {
16031060f865SParav Pandit 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1604ed4c54e5SOr Gerlitz 	}
1605ed4c54e5SOr Gerlitz 	return ret;
1606ed4c54e5SOr Gerlitz }
1607ed4c54e5SOr Gerlitz 
16088d9ec9adSJason Gunthorpe static bool is_qp_type_connected(const struct ib_qp *qp)
16098d9ec9adSJason Gunthorpe {
16108d9ec9adSJason Gunthorpe 	return (qp->qp_type == IB_QPT_UC ||
16118d9ec9adSJason Gunthorpe 		qp->qp_type == IB_QPT_RC ||
16128d9ec9adSJason Gunthorpe 		qp->qp_type == IB_QPT_XRC_INI ||
16138d9ec9adSJason Gunthorpe 		qp->qp_type == IB_QPT_XRC_TGT);
16148d9ec9adSJason Gunthorpe }
16158d9ec9adSJason Gunthorpe 
1616a512c2fbSParav Pandit /**
1617b96ac05aSParav Pandit  * IB core internal function to perform QP attributes modification.
1618a512c2fbSParav Pandit  */
1619b96ac05aSParav Pandit static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1620a512c2fbSParav Pandit 			 int attr_mask, struct ib_udata *udata)
1621a512c2fbSParav Pandit {
1622727b7e9aSMajd Dibbiny 	u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
16238d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr_av;
16248d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr_alt_av;
1625a512c2fbSParav Pandit 	int ret;
1626a512c2fbSParav Pandit 
16278d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_AV) {
16288d9ec9adSJason Gunthorpe 		ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
16298d9ec9adSJason Gunthorpe 					  &old_sgid_attr_av);
16308d9ec9adSJason Gunthorpe 		if (ret)
16318d9ec9adSJason Gunthorpe 			return ret;
16328d9ec9adSJason Gunthorpe 	}
16338d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_ALT_PATH) {
16341a1f460fSJason Gunthorpe 		/*
16351a1f460fSJason Gunthorpe 		 * FIXME: This does not track the migration state, so if the
16361a1f460fSJason Gunthorpe 		 * user loads a new alternate path after the HW has migrated
16371a1f460fSJason Gunthorpe 		 * from primary->alternate we will keep the wrong
16381a1f460fSJason Gunthorpe 		 * references. This is OK for IB because the reference
16391a1f460fSJason Gunthorpe 		 * counting does not serve any functional purpose.
16401a1f460fSJason Gunthorpe 		 */
16418d9ec9adSJason Gunthorpe 		ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
16428d9ec9adSJason Gunthorpe 					  &old_sgid_attr_alt_av);
16438d9ec9adSJason Gunthorpe 		if (ret)
16448d9ec9adSJason Gunthorpe 			goto out_av;
16457a5c938bSJason Gunthorpe 
16467a5c938bSJason Gunthorpe 		/*
16477a5c938bSJason Gunthorpe 		 * Today the core code can only handle alternate paths and APM
16487a5c938bSJason Gunthorpe 		 * for IB. Ban them in roce mode.
16497a5c938bSJason Gunthorpe 		 */
16507a5c938bSJason Gunthorpe 		if (!(rdma_protocol_ib(qp->device,
16517a5c938bSJason Gunthorpe 				       attr->alt_ah_attr.port_num) &&
16527a5c938bSJason Gunthorpe 		      rdma_protocol_ib(qp->device, port))) {
16537a5c938bSJason Gunthorpe 			ret = EINVAL;
16547a5c938bSJason Gunthorpe 			goto out;
16557a5c938bSJason Gunthorpe 		}
16568d9ec9adSJason Gunthorpe 	}
16578d9ec9adSJason Gunthorpe 
16588d9ec9adSJason Gunthorpe 	/*
16598d9ec9adSJason Gunthorpe 	 * If the user provided the qp_attr then we have to resolve it. Kernel
16608d9ec9adSJason Gunthorpe 	 * users have to provide already resolved rdma_ah_attr's
16618d9ec9adSJason Gunthorpe 	 */
16628d9ec9adSJason Gunthorpe 	if (udata && (attr_mask & IB_QP_AV) &&
16638d9ec9adSJason Gunthorpe 	    attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
16648d9ec9adSJason Gunthorpe 	    is_qp_type_connected(qp)) {
16658d9ec9adSJason Gunthorpe 		ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
16668d9ec9adSJason Gunthorpe 		if (ret)
16678d9ec9adSJason Gunthorpe 			goto out;
16688d9ec9adSJason Gunthorpe 	}
16698d9ec9adSJason Gunthorpe 
1670727b7e9aSMajd Dibbiny 	if (rdma_ib_or_roce(qp->device, port)) {
1671727b7e9aSMajd Dibbiny 		if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
167243c7c851SJason Gunthorpe 			dev_warn(&qp->device->dev,
167343c7c851SJason Gunthorpe 				 "%s rq_psn overflow, masking to 24 bits\n",
167443c7c851SJason Gunthorpe 				 __func__);
1675727b7e9aSMajd Dibbiny 			attr->rq_psn &= 0xffffff;
1676727b7e9aSMajd Dibbiny 		}
1677727b7e9aSMajd Dibbiny 
1678727b7e9aSMajd Dibbiny 		if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
167943c7c851SJason Gunthorpe 			dev_warn(&qp->device->dev,
168043c7c851SJason Gunthorpe 				 " %s sq_psn overflow, masking to 24 bits\n",
168143c7c851SJason Gunthorpe 				 __func__);
1682727b7e9aSMajd Dibbiny 			attr->sq_psn &= 0xffffff;
1683727b7e9aSMajd Dibbiny 		}
1684727b7e9aSMajd Dibbiny 	}
1685727b7e9aSMajd Dibbiny 
1686498ca3c8SNoa Osherovich 	ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
16871a1f460fSJason Gunthorpe 	if (ret)
16881a1f460fSJason Gunthorpe 		goto out;
16891a1f460fSJason Gunthorpe 
16901a1f460fSJason Gunthorpe 	if (attr_mask & IB_QP_PORT)
1691498ca3c8SNoa Osherovich 		qp->port = attr->port_num;
16921a1f460fSJason Gunthorpe 	if (attr_mask & IB_QP_AV)
16931a1f460fSJason Gunthorpe 		qp->av_sgid_attr =
16941a1f460fSJason Gunthorpe 			rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
16951a1f460fSJason Gunthorpe 	if (attr_mask & IB_QP_ALT_PATH)
16961a1f460fSJason Gunthorpe 		qp->alt_path_sgid_attr = rdma_update_sgid_attr(
16971a1f460fSJason Gunthorpe 			&attr->alt_ah_attr, qp->alt_path_sgid_attr);
1698498ca3c8SNoa Osherovich 
16998d9ec9adSJason Gunthorpe out:
17008d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_ALT_PATH)
17018d9ec9adSJason Gunthorpe 		rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
17028d9ec9adSJason Gunthorpe out_av:
17038d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_AV)
17048d9ec9adSJason Gunthorpe 		rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1705498ca3c8SNoa Osherovich 	return ret;
1706a512c2fbSParav Pandit }
1707b96ac05aSParav Pandit 
1708b96ac05aSParav Pandit /**
1709b96ac05aSParav Pandit  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1710b96ac05aSParav Pandit  * @ib_qp: The QP to modify.
1711b96ac05aSParav Pandit  * @attr: On input, specifies the QP attributes to modify.  On output,
1712b96ac05aSParav Pandit  *   the current values of selected QP attributes are returned.
1713b96ac05aSParav Pandit  * @attr_mask: A bit-mask used to specify which attributes of the QP
1714b96ac05aSParav Pandit  *   are being modified.
1715b96ac05aSParav Pandit  * @udata: pointer to user's input output buffer information
1716b96ac05aSParav Pandit  *   are being modified.
1717b96ac05aSParav Pandit  * It returns 0 on success and returns appropriate error code on error.
1718b96ac05aSParav Pandit  */
1719b96ac05aSParav Pandit int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1720b96ac05aSParav Pandit 			    int attr_mask, struct ib_udata *udata)
1721b96ac05aSParav Pandit {
17228d9ec9adSJason Gunthorpe 	return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1723b96ac05aSParav Pandit }
1724a512c2fbSParav Pandit EXPORT_SYMBOL(ib_modify_qp_with_udata);
1725a512c2fbSParav Pandit 
1726d4186194SYuval Shaia int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1727d4186194SYuval Shaia {
1728d4186194SYuval Shaia 	int rc;
1729d4186194SYuval Shaia 	u32 netdev_speed;
1730d4186194SYuval Shaia 	struct net_device *netdev;
1731d4186194SYuval Shaia 	struct ethtool_link_ksettings lksettings;
1732d4186194SYuval Shaia 
1733d4186194SYuval Shaia 	if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1734d4186194SYuval Shaia 		return -EINVAL;
1735d4186194SYuval Shaia 
1736c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(dev, port_num);
1737d4186194SYuval Shaia 	if (!netdev)
1738d4186194SYuval Shaia 		return -ENODEV;
1739d4186194SYuval Shaia 
1740d4186194SYuval Shaia 	rtnl_lock();
1741d4186194SYuval Shaia 	rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1742d4186194SYuval Shaia 	rtnl_unlock();
1743d4186194SYuval Shaia 
1744d4186194SYuval Shaia 	dev_put(netdev);
1745d4186194SYuval Shaia 
1746d4186194SYuval Shaia 	if (!rc) {
1747d4186194SYuval Shaia 		netdev_speed = lksettings.base.speed;
1748d4186194SYuval Shaia 	} else {
1749d4186194SYuval Shaia 		netdev_speed = SPEED_1000;
1750d4186194SYuval Shaia 		pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1751d4186194SYuval Shaia 			netdev_speed);
1752d4186194SYuval Shaia 	}
1753d4186194SYuval Shaia 
1754d4186194SYuval Shaia 	if (netdev_speed <= SPEED_1000) {
1755d4186194SYuval Shaia 		*width = IB_WIDTH_1X;
1756d4186194SYuval Shaia 		*speed = IB_SPEED_SDR;
1757d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_10000) {
1758d4186194SYuval Shaia 		*width = IB_WIDTH_1X;
1759d4186194SYuval Shaia 		*speed = IB_SPEED_FDR10;
1760d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_20000) {
1761d4186194SYuval Shaia 		*width = IB_WIDTH_4X;
1762d4186194SYuval Shaia 		*speed = IB_SPEED_DDR;
1763d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_25000) {
1764d4186194SYuval Shaia 		*width = IB_WIDTH_1X;
1765d4186194SYuval Shaia 		*speed = IB_SPEED_EDR;
1766d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_40000) {
1767d4186194SYuval Shaia 		*width = IB_WIDTH_4X;
1768d4186194SYuval Shaia 		*speed = IB_SPEED_FDR10;
1769d4186194SYuval Shaia 	} else {
1770d4186194SYuval Shaia 		*width = IB_WIDTH_4X;
1771d4186194SYuval Shaia 		*speed = IB_SPEED_EDR;
1772d4186194SYuval Shaia 	}
1773d4186194SYuval Shaia 
1774d4186194SYuval Shaia 	return 0;
1775d4186194SYuval Shaia }
1776d4186194SYuval Shaia EXPORT_SYMBOL(ib_get_eth_speed);
1777d4186194SYuval Shaia 
17781da177e4SLinus Torvalds int ib_modify_qp(struct ib_qp *qp,
17791da177e4SLinus Torvalds 		 struct ib_qp_attr *qp_attr,
17801da177e4SLinus Torvalds 		 int qp_attr_mask)
17811da177e4SLinus Torvalds {
1782b96ac05aSParav Pandit 	return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
17831da177e4SLinus Torvalds }
17841da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_qp);
17851da177e4SLinus Torvalds 
17861da177e4SLinus Torvalds int ib_query_qp(struct ib_qp *qp,
17871da177e4SLinus Torvalds 		struct ib_qp_attr *qp_attr,
17881da177e4SLinus Torvalds 		int qp_attr_mask,
17891da177e4SLinus Torvalds 		struct ib_qp_init_attr *qp_init_attr)
17901da177e4SLinus Torvalds {
17918d9ec9adSJason Gunthorpe 	qp_attr->ah_attr.grh.sgid_attr = NULL;
17928d9ec9adSJason Gunthorpe 	qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
17938d9ec9adSJason Gunthorpe 
17943023a1e9SKamal Heib 	return qp->device->ops.query_qp ?
17953023a1e9SKamal Heib 		qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
17963023a1e9SKamal Heib 					 qp_init_attr) : -EOPNOTSUPP;
17971da177e4SLinus Torvalds }
17981da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_qp);
17991da177e4SLinus Torvalds 
18000e0ec7e0SSean Hefty int ib_close_qp(struct ib_qp *qp)
18010e0ec7e0SSean Hefty {
18020e0ec7e0SSean Hefty 	struct ib_qp *real_qp;
18030e0ec7e0SSean Hefty 	unsigned long flags;
18040e0ec7e0SSean Hefty 
18050e0ec7e0SSean Hefty 	real_qp = qp->real_qp;
18060e0ec7e0SSean Hefty 	if (real_qp == qp)
18070e0ec7e0SSean Hefty 		return -EINVAL;
18080e0ec7e0SSean Hefty 
18090e0ec7e0SSean Hefty 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
18100e0ec7e0SSean Hefty 	list_del(&qp->open_list);
18110e0ec7e0SSean Hefty 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
18120e0ec7e0SSean Hefty 
18130e0ec7e0SSean Hefty 	atomic_dec(&real_qp->usecnt);
18144a50881bSMoni Shoua 	if (qp->qp_sec)
1815d291f1a6SDaniel Jurgens 		ib_close_shared_qp_security(qp->qp_sec);
18160e0ec7e0SSean Hefty 	kfree(qp);
18170e0ec7e0SSean Hefty 
18180e0ec7e0SSean Hefty 	return 0;
18190e0ec7e0SSean Hefty }
18200e0ec7e0SSean Hefty EXPORT_SYMBOL(ib_close_qp);
18210e0ec7e0SSean Hefty 
18220e0ec7e0SSean Hefty static int __ib_destroy_shared_qp(struct ib_qp *qp)
18230e0ec7e0SSean Hefty {
18240e0ec7e0SSean Hefty 	struct ib_xrcd *xrcd;
18250e0ec7e0SSean Hefty 	struct ib_qp *real_qp;
18260e0ec7e0SSean Hefty 	int ret;
18270e0ec7e0SSean Hefty 
18280e0ec7e0SSean Hefty 	real_qp = qp->real_qp;
18290e0ec7e0SSean Hefty 	xrcd = real_qp->xrcd;
18300e0ec7e0SSean Hefty 
18310e0ec7e0SSean Hefty 	mutex_lock(&xrcd->tgt_qp_mutex);
18320e0ec7e0SSean Hefty 	ib_close_qp(qp);
18330e0ec7e0SSean Hefty 	if (atomic_read(&real_qp->usecnt) == 0)
18340e0ec7e0SSean Hefty 		list_del(&real_qp->xrcd_list);
18350e0ec7e0SSean Hefty 	else
18360e0ec7e0SSean Hefty 		real_qp = NULL;
18370e0ec7e0SSean Hefty 	mutex_unlock(&xrcd->tgt_qp_mutex);
18380e0ec7e0SSean Hefty 
18390e0ec7e0SSean Hefty 	if (real_qp) {
18400e0ec7e0SSean Hefty 		ret = ib_destroy_qp(real_qp);
18410e0ec7e0SSean Hefty 		if (!ret)
18420e0ec7e0SSean Hefty 			atomic_dec(&xrcd->usecnt);
18430e0ec7e0SSean Hefty 		else
18440e0ec7e0SSean Hefty 			__ib_insert_xrcd_qp(xrcd, real_qp);
18450e0ec7e0SSean Hefty 	}
18460e0ec7e0SSean Hefty 
18470e0ec7e0SSean Hefty 	return 0;
18480e0ec7e0SSean Hefty }
18490e0ec7e0SSean Hefty 
1850c4367a26SShamir Rabinovitch int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
18511da177e4SLinus Torvalds {
18521a1f460fSJason Gunthorpe 	const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
18531a1f460fSJason Gunthorpe 	const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
18541da177e4SLinus Torvalds 	struct ib_pd *pd;
18551da177e4SLinus Torvalds 	struct ib_cq *scq, *rcq;
18561da177e4SLinus Torvalds 	struct ib_srq *srq;
1857a9017e23SYishai Hadas 	struct ib_rwq_ind_table *ind_tbl;
1858d291f1a6SDaniel Jurgens 	struct ib_qp_security *sec;
18591da177e4SLinus Torvalds 	int ret;
18601da177e4SLinus Torvalds 
1861fffb0383SChristoph Hellwig 	WARN_ON_ONCE(qp->mrs_used > 0);
1862fffb0383SChristoph Hellwig 
18630e0ec7e0SSean Hefty 	if (atomic_read(&qp->usecnt))
18640e0ec7e0SSean Hefty 		return -EBUSY;
18650e0ec7e0SSean Hefty 
18660e0ec7e0SSean Hefty 	if (qp->real_qp != qp)
18670e0ec7e0SSean Hefty 		return __ib_destroy_shared_qp(qp);
18680e0ec7e0SSean Hefty 
18691da177e4SLinus Torvalds 	pd   = qp->pd;
18701da177e4SLinus Torvalds 	scq  = qp->send_cq;
18711da177e4SLinus Torvalds 	rcq  = qp->recv_cq;
18721da177e4SLinus Torvalds 	srq  = qp->srq;
1873a9017e23SYishai Hadas 	ind_tbl = qp->rwq_ind_tbl;
1874d291f1a6SDaniel Jurgens 	sec  = qp->qp_sec;
1875d291f1a6SDaniel Jurgens 	if (sec)
1876d291f1a6SDaniel Jurgens 		ib_destroy_qp_security_begin(sec);
18771da177e4SLinus Torvalds 
1878a060b562SChristoph Hellwig 	if (!qp->uobject)
1879a060b562SChristoph Hellwig 		rdma_rw_cleanup_mrs(qp);
1880a060b562SChristoph Hellwig 
188178a0cd64SLeon Romanovsky 	rdma_restrack_del(&qp->res);
1882c4367a26SShamir Rabinovitch 	ret = qp->device->ops.destroy_qp(qp, udata);
18831da177e4SLinus Torvalds 	if (!ret) {
18841a1f460fSJason Gunthorpe 		if (alt_path_sgid_attr)
18851a1f460fSJason Gunthorpe 			rdma_put_gid_attr(alt_path_sgid_attr);
18861a1f460fSJason Gunthorpe 		if (av_sgid_attr)
18871a1f460fSJason Gunthorpe 			rdma_put_gid_attr(av_sgid_attr);
1888b42b63cfSSean Hefty 		if (pd)
18891da177e4SLinus Torvalds 			atomic_dec(&pd->usecnt);
1890b42b63cfSSean Hefty 		if (scq)
18911da177e4SLinus Torvalds 			atomic_dec(&scq->usecnt);
1892b42b63cfSSean Hefty 		if (rcq)
18931da177e4SLinus Torvalds 			atomic_dec(&rcq->usecnt);
18941da177e4SLinus Torvalds 		if (srq)
18951da177e4SLinus Torvalds 			atomic_dec(&srq->usecnt);
1896a9017e23SYishai Hadas 		if (ind_tbl)
1897a9017e23SYishai Hadas 			atomic_dec(&ind_tbl->usecnt);
1898d291f1a6SDaniel Jurgens 		if (sec)
1899d291f1a6SDaniel Jurgens 			ib_destroy_qp_security_end(sec);
1900d291f1a6SDaniel Jurgens 	} else {
1901d291f1a6SDaniel Jurgens 		if (sec)
1902d291f1a6SDaniel Jurgens 			ib_destroy_qp_security_abort(sec);
19031da177e4SLinus Torvalds 	}
19041da177e4SLinus Torvalds 
19051da177e4SLinus Torvalds 	return ret;
19061da177e4SLinus Torvalds }
1907c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_destroy_qp_user);
19081da177e4SLinus Torvalds 
19091da177e4SLinus Torvalds /* Completion queues */
19101da177e4SLinus Torvalds 
19117350cdd0SBharat Potnuri struct ib_cq *__ib_create_cq(struct ib_device *device,
19121da177e4SLinus Torvalds 			     ib_comp_handler comp_handler,
19131da177e4SLinus Torvalds 			     void (*event_handler)(struct ib_event *, void *),
19148e37210bSMatan Barak 			     void *cq_context,
19157350cdd0SBharat Potnuri 			     const struct ib_cq_init_attr *cq_attr,
19167350cdd0SBharat Potnuri 			     const char *caller)
19171da177e4SLinus Torvalds {
19181da177e4SLinus Torvalds 	struct ib_cq *cq;
19191da177e4SLinus Torvalds 
1920ff23dfa1SShamir Rabinovitch 	cq = device->ops.create_cq(device, cq_attr, NULL);
19211da177e4SLinus Torvalds 
19221da177e4SLinus Torvalds 	if (!IS_ERR(cq)) {
19231da177e4SLinus Torvalds 		cq->device        = device;
1924b5e81bf5SRoland Dreier 		cq->uobject       = NULL;
19251da177e4SLinus Torvalds 		cq->comp_handler  = comp_handler;
19261da177e4SLinus Torvalds 		cq->event_handler = event_handler;
19271da177e4SLinus Torvalds 		cq->cq_context    = cq_context;
19281da177e4SLinus Torvalds 		atomic_set(&cq->usecnt, 0);
192908f294a1SLeon Romanovsky 		cq->res.type = RDMA_RESTRACK_CQ;
19302165fc26SLeon Romanovsky 		rdma_restrack_set_task(&cq->res, caller);
1931af8d7037SShamir Rabinovitch 		rdma_restrack_kadd(&cq->res);
19321da177e4SLinus Torvalds 	}
19331da177e4SLinus Torvalds 
19341da177e4SLinus Torvalds 	return cq;
19351da177e4SLinus Torvalds }
19367350cdd0SBharat Potnuri EXPORT_SYMBOL(__ib_create_cq);
19371da177e4SLinus Torvalds 
19384190b4e9SLeon Romanovsky int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
19392dd57162SEli Cohen {
19403023a1e9SKamal Heib 	return cq->device->ops.modify_cq ?
19413023a1e9SKamal Heib 		cq->device->ops.modify_cq(cq, cq_count,
19423023a1e9SKamal Heib 					  cq_period) : -EOPNOTSUPP;
19432dd57162SEli Cohen }
19444190b4e9SLeon Romanovsky EXPORT_SYMBOL(rdma_set_cq_moderation);
19452dd57162SEli Cohen 
1946c4367a26SShamir Rabinovitch int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
19471da177e4SLinus Torvalds {
19481da177e4SLinus Torvalds 	if (atomic_read(&cq->usecnt))
19491da177e4SLinus Torvalds 		return -EBUSY;
19501da177e4SLinus Torvalds 
195108f294a1SLeon Romanovsky 	rdma_restrack_del(&cq->res);
1952a52c8e24SLeon Romanovsky 	cq->device->ops.destroy_cq(cq, udata);
1953a52c8e24SLeon Romanovsky 	return 0;
19541da177e4SLinus Torvalds }
1955c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_destroy_cq_user);
19561da177e4SLinus Torvalds 
1957a74cd4afSRoland Dreier int ib_resize_cq(struct ib_cq *cq, int cqe)
19581da177e4SLinus Torvalds {
19593023a1e9SKamal Heib 	return cq->device->ops.resize_cq ?
19603023a1e9SKamal Heib 		cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
19611da177e4SLinus Torvalds }
19621da177e4SLinus Torvalds EXPORT_SYMBOL(ib_resize_cq);
19631da177e4SLinus Torvalds 
19641da177e4SLinus Torvalds /* Memory regions */
19651da177e4SLinus Torvalds 
1966c4367a26SShamir Rabinovitch int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
19671da177e4SLinus Torvalds {
1968ab67ed8dSChristoph Hellwig 	struct ib_pd *pd = mr->pd;
1969be934ccaSAriel Levkovich 	struct ib_dm *dm = mr->dm;
19701da177e4SLinus Torvalds 	int ret;
19711da177e4SLinus Torvalds 
1972fccec5b8SSteve Wise 	rdma_restrack_del(&mr->res);
1973c4367a26SShamir Rabinovitch 	ret = mr->device->ops.dereg_mr(mr, udata);
1974be934ccaSAriel Levkovich 	if (!ret) {
19751da177e4SLinus Torvalds 		atomic_dec(&pd->usecnt);
1976be934ccaSAriel Levkovich 		if (dm)
1977be934ccaSAriel Levkovich 			atomic_dec(&dm->usecnt);
1978be934ccaSAriel Levkovich 	}
19791da177e4SLinus Torvalds 
19801da177e4SLinus Torvalds 	return ret;
19811da177e4SLinus Torvalds }
1982c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_dereg_mr_user);
19831da177e4SLinus Torvalds 
19849bee178bSSagi Grimberg /**
198591f57129SIsrael Rukshin  * ib_alloc_mr_user() - Allocates a memory region
19869bee178bSSagi Grimberg  * @pd:            protection domain associated with the region
19879bee178bSSagi Grimberg  * @mr_type:       memory region type
19889bee178bSSagi Grimberg  * @max_num_sg:    maximum sg entries available for registration.
1989c4367a26SShamir Rabinovitch  * @udata:	   user data or null for kernel objects
19909bee178bSSagi Grimberg  *
19919bee178bSSagi Grimberg  * Notes:
19929bee178bSSagi Grimberg  * Memory registeration page/sg lists must not exceed max_num_sg.
19939bee178bSSagi Grimberg  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
19949bee178bSSagi Grimberg  * max_num_sg * used_page_size.
19959bee178bSSagi Grimberg  *
19969bee178bSSagi Grimberg  */
1997c4367a26SShamir Rabinovitch struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
1998c4367a26SShamir Rabinovitch 			       u32 max_num_sg, struct ib_udata *udata)
199917cd3a2dSSagi Grimberg {
200017cd3a2dSSagi Grimberg 	struct ib_mr *mr;
200117cd3a2dSSagi Grimberg 
20023023a1e9SKamal Heib 	if (!pd->device->ops.alloc_mr)
200387915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
200417cd3a2dSSagi Grimberg 
2005c4367a26SShamir Rabinovitch 	mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
200617cd3a2dSSagi Grimberg 	if (!IS_ERR(mr)) {
200717cd3a2dSSagi Grimberg 		mr->device  = pd->device;
200817cd3a2dSSagi Grimberg 		mr->pd      = pd;
200954e7e48bSAriel Levkovich 		mr->dm      = NULL;
201017cd3a2dSSagi Grimberg 		mr->uobject = NULL;
201117cd3a2dSSagi Grimberg 		atomic_inc(&pd->usecnt);
2012d4a85c30SSteve Wise 		mr->need_inval = false;
2013fccec5b8SSteve Wise 		mr->res.type = RDMA_RESTRACK_MR;
2014af8d7037SShamir Rabinovitch 		rdma_restrack_kadd(&mr->res);
201517cd3a2dSSagi Grimberg 	}
201617cd3a2dSSagi Grimberg 
201717cd3a2dSSagi Grimberg 	return mr;
201817cd3a2dSSagi Grimberg }
2019c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_alloc_mr_user);
202000f7ec36SSteve Wise 
20211da177e4SLinus Torvalds /* "Fast" memory regions */
20221da177e4SLinus Torvalds 
20231da177e4SLinus Torvalds struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
20241da177e4SLinus Torvalds 			    int mr_access_flags,
20251da177e4SLinus Torvalds 			    struct ib_fmr_attr *fmr_attr)
20261da177e4SLinus Torvalds {
20271da177e4SLinus Torvalds 	struct ib_fmr *fmr;
20281da177e4SLinus Torvalds 
20293023a1e9SKamal Heib 	if (!pd->device->ops.alloc_fmr)
203087915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
20311da177e4SLinus Torvalds 
20323023a1e9SKamal Heib 	fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
20331da177e4SLinus Torvalds 	if (!IS_ERR(fmr)) {
20341da177e4SLinus Torvalds 		fmr->device = pd->device;
20351da177e4SLinus Torvalds 		fmr->pd     = pd;
20361da177e4SLinus Torvalds 		atomic_inc(&pd->usecnt);
20371da177e4SLinus Torvalds 	}
20381da177e4SLinus Torvalds 
20391da177e4SLinus Torvalds 	return fmr;
20401da177e4SLinus Torvalds }
20411da177e4SLinus Torvalds EXPORT_SYMBOL(ib_alloc_fmr);
20421da177e4SLinus Torvalds 
20431da177e4SLinus Torvalds int ib_unmap_fmr(struct list_head *fmr_list)
20441da177e4SLinus Torvalds {
20451da177e4SLinus Torvalds 	struct ib_fmr *fmr;
20461da177e4SLinus Torvalds 
20471da177e4SLinus Torvalds 	if (list_empty(fmr_list))
20481da177e4SLinus Torvalds 		return 0;
20491da177e4SLinus Torvalds 
20501da177e4SLinus Torvalds 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
20513023a1e9SKamal Heib 	return fmr->device->ops.unmap_fmr(fmr_list);
20521da177e4SLinus Torvalds }
20531da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unmap_fmr);
20541da177e4SLinus Torvalds 
20551da177e4SLinus Torvalds int ib_dealloc_fmr(struct ib_fmr *fmr)
20561da177e4SLinus Torvalds {
20571da177e4SLinus Torvalds 	struct ib_pd *pd;
20581da177e4SLinus Torvalds 	int ret;
20591da177e4SLinus Torvalds 
20601da177e4SLinus Torvalds 	pd = fmr->pd;
20613023a1e9SKamal Heib 	ret = fmr->device->ops.dealloc_fmr(fmr);
20621da177e4SLinus Torvalds 	if (!ret)
20631da177e4SLinus Torvalds 		atomic_dec(&pd->usecnt);
20641da177e4SLinus Torvalds 
20651da177e4SLinus Torvalds 	return ret;
20661da177e4SLinus Torvalds }
20671da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dealloc_fmr);
20681da177e4SLinus Torvalds 
20691da177e4SLinus Torvalds /* Multicast groups */
20701da177e4SLinus Torvalds 
207152363335SNoa Osherovich static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
207252363335SNoa Osherovich {
207352363335SNoa Osherovich 	struct ib_qp_init_attr init_attr = {};
207452363335SNoa Osherovich 	struct ib_qp_attr attr = {};
207552363335SNoa Osherovich 	int num_eth_ports = 0;
207652363335SNoa Osherovich 	int port;
207752363335SNoa Osherovich 
207852363335SNoa Osherovich 	/* If QP state >= init, it is assigned to a port and we can check this
207952363335SNoa Osherovich 	 * port only.
208052363335SNoa Osherovich 	 */
208152363335SNoa Osherovich 	if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
208252363335SNoa Osherovich 		if (attr.qp_state >= IB_QPS_INIT) {
2083e6f9bc34SAlex Estrin 			if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
208452363335SNoa Osherovich 			    IB_LINK_LAYER_INFINIBAND)
208552363335SNoa Osherovich 				return true;
208652363335SNoa Osherovich 			goto lid_check;
208752363335SNoa Osherovich 		}
208852363335SNoa Osherovich 	}
208952363335SNoa Osherovich 
209052363335SNoa Osherovich 	/* Can't get a quick answer, iterate over all ports */
209152363335SNoa Osherovich 	for (port = 0; port < qp->device->phys_port_cnt; port++)
2092e6f9bc34SAlex Estrin 		if (rdma_port_get_link_layer(qp->device, port) !=
209352363335SNoa Osherovich 		    IB_LINK_LAYER_INFINIBAND)
209452363335SNoa Osherovich 			num_eth_ports++;
209552363335SNoa Osherovich 
209652363335SNoa Osherovich 	/* If we have at lease one Ethernet port, RoCE annex declares that
209752363335SNoa Osherovich 	 * multicast LID should be ignored. We can't tell at this step if the
209852363335SNoa Osherovich 	 * QP belongs to an IB or Ethernet port.
209952363335SNoa Osherovich 	 */
210052363335SNoa Osherovich 	if (num_eth_ports)
210152363335SNoa Osherovich 		return true;
210252363335SNoa Osherovich 
210352363335SNoa Osherovich 	/* If all the ports are IB, we can check according to IB spec. */
210452363335SNoa Osherovich lid_check:
210552363335SNoa Osherovich 	return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
210652363335SNoa Osherovich 		 lid == be16_to_cpu(IB_LID_PERMISSIVE));
210752363335SNoa Osherovich }
210852363335SNoa Osherovich 
21091da177e4SLinus Torvalds int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
21101da177e4SLinus Torvalds {
2111c3bccbfbSOr Gerlitz 	int ret;
2112c3bccbfbSOr Gerlitz 
21133023a1e9SKamal Heib 	if (!qp->device->ops.attach_mcast)
211487915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
2115be1d325aSNoa Osherovich 
2116be1d325aSNoa Osherovich 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2117be1d325aSNoa Osherovich 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
21180c33aeedSJack Morgenstein 		return -EINVAL;
21190c33aeedSJack Morgenstein 
21203023a1e9SKamal Heib 	ret = qp->device->ops.attach_mcast(qp, gid, lid);
2121c3bccbfbSOr Gerlitz 	if (!ret)
2122c3bccbfbSOr Gerlitz 		atomic_inc(&qp->usecnt);
2123c3bccbfbSOr Gerlitz 	return ret;
21241da177e4SLinus Torvalds }
21251da177e4SLinus Torvalds EXPORT_SYMBOL(ib_attach_mcast);
21261da177e4SLinus Torvalds 
21271da177e4SLinus Torvalds int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
21281da177e4SLinus Torvalds {
2129c3bccbfbSOr Gerlitz 	int ret;
2130c3bccbfbSOr Gerlitz 
21313023a1e9SKamal Heib 	if (!qp->device->ops.detach_mcast)
213287915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
2133be1d325aSNoa Osherovich 
2134be1d325aSNoa Osherovich 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2135be1d325aSNoa Osherovich 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
21360c33aeedSJack Morgenstein 		return -EINVAL;
21370c33aeedSJack Morgenstein 
21383023a1e9SKamal Heib 	ret = qp->device->ops.detach_mcast(qp, gid, lid);
2139c3bccbfbSOr Gerlitz 	if (!ret)
2140c3bccbfbSOr Gerlitz 		atomic_dec(&qp->usecnt);
2141c3bccbfbSOr Gerlitz 	return ret;
21421da177e4SLinus Torvalds }
21431da177e4SLinus Torvalds EXPORT_SYMBOL(ib_detach_mcast);
214459991f94SSean Hefty 
2145f66c8ba4SLeon Romanovsky struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
214659991f94SSean Hefty {
214759991f94SSean Hefty 	struct ib_xrcd *xrcd;
214859991f94SSean Hefty 
21493023a1e9SKamal Heib 	if (!device->ops.alloc_xrcd)
215087915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
215159991f94SSean Hefty 
2152ff23dfa1SShamir Rabinovitch 	xrcd = device->ops.alloc_xrcd(device, NULL);
215359991f94SSean Hefty 	if (!IS_ERR(xrcd)) {
215459991f94SSean Hefty 		xrcd->device = device;
215553d0bd1eSSean Hefty 		xrcd->inode = NULL;
215659991f94SSean Hefty 		atomic_set(&xrcd->usecnt, 0);
2157d3d72d90SSean Hefty 		mutex_init(&xrcd->tgt_qp_mutex);
2158d3d72d90SSean Hefty 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
215959991f94SSean Hefty 	}
216059991f94SSean Hefty 
216159991f94SSean Hefty 	return xrcd;
216259991f94SSean Hefty }
2163f66c8ba4SLeon Romanovsky EXPORT_SYMBOL(__ib_alloc_xrcd);
216459991f94SSean Hefty 
2165c4367a26SShamir Rabinovitch int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
216659991f94SSean Hefty {
2167d3d72d90SSean Hefty 	struct ib_qp *qp;
2168d3d72d90SSean Hefty 	int ret;
2169d3d72d90SSean Hefty 
217059991f94SSean Hefty 	if (atomic_read(&xrcd->usecnt))
217159991f94SSean Hefty 		return -EBUSY;
217259991f94SSean Hefty 
2173d3d72d90SSean Hefty 	while (!list_empty(&xrcd->tgt_qp_list)) {
2174d3d72d90SSean Hefty 		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
2175d3d72d90SSean Hefty 		ret = ib_destroy_qp(qp);
2176d3d72d90SSean Hefty 		if (ret)
2177d3d72d90SSean Hefty 			return ret;
2178d3d72d90SSean Hefty 	}
2179d3d72d90SSean Hefty 
2180c4367a26SShamir Rabinovitch 	return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
218159991f94SSean Hefty }
218259991f94SSean Hefty EXPORT_SYMBOL(ib_dealloc_xrcd);
2183319a441dSHadar Hen Zion 
21845fd251c8SYishai Hadas /**
21855fd251c8SYishai Hadas  * ib_create_wq - Creates a WQ associated with the specified protection
21865fd251c8SYishai Hadas  * domain.
21875fd251c8SYishai Hadas  * @pd: The protection domain associated with the WQ.
21881f58621eSRandy Dunlap  * @wq_attr: A list of initial attributes required to create the
21895fd251c8SYishai Hadas  * WQ. If WQ creation succeeds, then the attributes are updated to
21905fd251c8SYishai Hadas  * the actual capabilities of the created WQ.
21915fd251c8SYishai Hadas  *
21921f58621eSRandy Dunlap  * wq_attr->max_wr and wq_attr->max_sge determine
21935fd251c8SYishai Hadas  * the requested size of the WQ, and set to the actual values allocated
21945fd251c8SYishai Hadas  * on return.
21955fd251c8SYishai Hadas  * If ib_create_wq() succeeds, then max_wr and max_sge will always be
21965fd251c8SYishai Hadas  * at least as large as the requested values.
21975fd251c8SYishai Hadas  */
21985fd251c8SYishai Hadas struct ib_wq *ib_create_wq(struct ib_pd *pd,
21995fd251c8SYishai Hadas 			   struct ib_wq_init_attr *wq_attr)
22005fd251c8SYishai Hadas {
22015fd251c8SYishai Hadas 	struct ib_wq *wq;
22025fd251c8SYishai Hadas 
22033023a1e9SKamal Heib 	if (!pd->device->ops.create_wq)
220487915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
22055fd251c8SYishai Hadas 
22063023a1e9SKamal Heib 	wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
22075fd251c8SYishai Hadas 	if (!IS_ERR(wq)) {
22085fd251c8SYishai Hadas 		wq->event_handler = wq_attr->event_handler;
22095fd251c8SYishai Hadas 		wq->wq_context = wq_attr->wq_context;
22105fd251c8SYishai Hadas 		wq->wq_type = wq_attr->wq_type;
22115fd251c8SYishai Hadas 		wq->cq = wq_attr->cq;
22125fd251c8SYishai Hadas 		wq->device = pd->device;
22135fd251c8SYishai Hadas 		wq->pd = pd;
22145fd251c8SYishai Hadas 		wq->uobject = NULL;
22155fd251c8SYishai Hadas 		atomic_inc(&pd->usecnt);
22165fd251c8SYishai Hadas 		atomic_inc(&wq_attr->cq->usecnt);
22175fd251c8SYishai Hadas 		atomic_set(&wq->usecnt, 0);
22185fd251c8SYishai Hadas 	}
22195fd251c8SYishai Hadas 	return wq;
22205fd251c8SYishai Hadas }
22215fd251c8SYishai Hadas EXPORT_SYMBOL(ib_create_wq);
22225fd251c8SYishai Hadas 
22235fd251c8SYishai Hadas /**
2224c4367a26SShamir Rabinovitch  * ib_destroy_wq - Destroys the specified user WQ.
22255fd251c8SYishai Hadas  * @wq: The WQ to destroy.
2226c4367a26SShamir Rabinovitch  * @udata: Valid user data
22275fd251c8SYishai Hadas  */
2228c4367a26SShamir Rabinovitch int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
22295fd251c8SYishai Hadas {
22305fd251c8SYishai Hadas 	int err;
22315fd251c8SYishai Hadas 	struct ib_cq *cq = wq->cq;
22325fd251c8SYishai Hadas 	struct ib_pd *pd = wq->pd;
22335fd251c8SYishai Hadas 
22345fd251c8SYishai Hadas 	if (atomic_read(&wq->usecnt))
22355fd251c8SYishai Hadas 		return -EBUSY;
22365fd251c8SYishai Hadas 
2237c4367a26SShamir Rabinovitch 	err = wq->device->ops.destroy_wq(wq, udata);
22385fd251c8SYishai Hadas 	if (!err) {
22395fd251c8SYishai Hadas 		atomic_dec(&pd->usecnt);
22405fd251c8SYishai Hadas 		atomic_dec(&cq->usecnt);
22415fd251c8SYishai Hadas 	}
22425fd251c8SYishai Hadas 	return err;
22435fd251c8SYishai Hadas }
22445fd251c8SYishai Hadas EXPORT_SYMBOL(ib_destroy_wq);
22455fd251c8SYishai Hadas 
22465fd251c8SYishai Hadas /**
22475fd251c8SYishai Hadas  * ib_modify_wq - Modifies the specified WQ.
22485fd251c8SYishai Hadas  * @wq: The WQ to modify.
22495fd251c8SYishai Hadas  * @wq_attr: On input, specifies the WQ attributes to modify.
22505fd251c8SYishai Hadas  * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
22515fd251c8SYishai Hadas  *   are being modified.
22525fd251c8SYishai Hadas  * On output, the current values of selected WQ attributes are returned.
22535fd251c8SYishai Hadas  */
22545fd251c8SYishai Hadas int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
22555fd251c8SYishai Hadas 		 u32 wq_attr_mask)
22565fd251c8SYishai Hadas {
22575fd251c8SYishai Hadas 	int err;
22585fd251c8SYishai Hadas 
22593023a1e9SKamal Heib 	if (!wq->device->ops.modify_wq)
226087915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
22615fd251c8SYishai Hadas 
22623023a1e9SKamal Heib 	err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
22635fd251c8SYishai Hadas 	return err;
22645fd251c8SYishai Hadas }
22655fd251c8SYishai Hadas EXPORT_SYMBOL(ib_modify_wq);
22665fd251c8SYishai Hadas 
22676d39786bSYishai Hadas /*
22686d39786bSYishai Hadas  * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
22696d39786bSYishai Hadas  * @device: The device on which to create the rwq indirection table.
22706d39786bSYishai Hadas  * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
22716d39786bSYishai Hadas  * create the Indirection Table.
22726d39786bSYishai Hadas  *
22736d39786bSYishai Hadas  * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
22746d39786bSYishai Hadas  *	than the created ib_rwq_ind_table object and the caller is responsible
22756d39786bSYishai Hadas  *	for its memory allocation/free.
22766d39786bSYishai Hadas  */
22776d39786bSYishai Hadas struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
22786d39786bSYishai Hadas 						 struct ib_rwq_ind_table_init_attr *init_attr)
22796d39786bSYishai Hadas {
22806d39786bSYishai Hadas 	struct ib_rwq_ind_table *rwq_ind_table;
22816d39786bSYishai Hadas 	int i;
22826d39786bSYishai Hadas 	u32 table_size;
22836d39786bSYishai Hadas 
22843023a1e9SKamal Heib 	if (!device->ops.create_rwq_ind_table)
228587915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
22866d39786bSYishai Hadas 
22876d39786bSYishai Hadas 	table_size = (1 << init_attr->log_ind_tbl_size);
22883023a1e9SKamal Heib 	rwq_ind_table = device->ops.create_rwq_ind_table(device,
22896d39786bSYishai Hadas 							 init_attr, NULL);
22906d39786bSYishai Hadas 	if (IS_ERR(rwq_ind_table))
22916d39786bSYishai Hadas 		return rwq_ind_table;
22926d39786bSYishai Hadas 
22936d39786bSYishai Hadas 	rwq_ind_table->ind_tbl = init_attr->ind_tbl;
22946d39786bSYishai Hadas 	rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
22956d39786bSYishai Hadas 	rwq_ind_table->device = device;
22966d39786bSYishai Hadas 	rwq_ind_table->uobject = NULL;
22976d39786bSYishai Hadas 	atomic_set(&rwq_ind_table->usecnt, 0);
22986d39786bSYishai Hadas 
22996d39786bSYishai Hadas 	for (i = 0; i < table_size; i++)
23006d39786bSYishai Hadas 		atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
23016d39786bSYishai Hadas 
23026d39786bSYishai Hadas 	return rwq_ind_table;
23036d39786bSYishai Hadas }
23046d39786bSYishai Hadas EXPORT_SYMBOL(ib_create_rwq_ind_table);
23056d39786bSYishai Hadas 
23066d39786bSYishai Hadas /*
23076d39786bSYishai Hadas  * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
23086d39786bSYishai Hadas  * @wq_ind_table: The Indirection Table to destroy.
23096d39786bSYishai Hadas */
23106d39786bSYishai Hadas int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
23116d39786bSYishai Hadas {
23126d39786bSYishai Hadas 	int err, i;
23136d39786bSYishai Hadas 	u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
23146d39786bSYishai Hadas 	struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
23156d39786bSYishai Hadas 
23166d39786bSYishai Hadas 	if (atomic_read(&rwq_ind_table->usecnt))
23176d39786bSYishai Hadas 		return -EBUSY;
23186d39786bSYishai Hadas 
23193023a1e9SKamal Heib 	err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
23206d39786bSYishai Hadas 	if (!err) {
23216d39786bSYishai Hadas 		for (i = 0; i < table_size; i++)
23226d39786bSYishai Hadas 			atomic_dec(&ind_tbl[i]->usecnt);
23236d39786bSYishai Hadas 	}
23246d39786bSYishai Hadas 
23256d39786bSYishai Hadas 	return err;
23266d39786bSYishai Hadas }
23276d39786bSYishai Hadas EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
23286d39786bSYishai Hadas 
23291b01d335SSagi Grimberg int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
23301b01d335SSagi Grimberg 		       struct ib_mr_status *mr_status)
23311b01d335SSagi Grimberg {
23323023a1e9SKamal Heib 	if (!mr->device->ops.check_mr_status)
23333023a1e9SKamal Heib 		return -EOPNOTSUPP;
23343023a1e9SKamal Heib 
23353023a1e9SKamal Heib 	return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
23361b01d335SSagi Grimberg }
23371b01d335SSagi Grimberg EXPORT_SYMBOL(ib_check_mr_status);
23384c67e2bfSSagi Grimberg 
233950174a7fSEli Cohen int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
234050174a7fSEli Cohen 			 int state)
234150174a7fSEli Cohen {
23423023a1e9SKamal Heib 	if (!device->ops.set_vf_link_state)
234387915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
234450174a7fSEli Cohen 
23453023a1e9SKamal Heib 	return device->ops.set_vf_link_state(device, vf, port, state);
234650174a7fSEli Cohen }
234750174a7fSEli Cohen EXPORT_SYMBOL(ib_set_vf_link_state);
234850174a7fSEli Cohen 
234950174a7fSEli Cohen int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
235050174a7fSEli Cohen 		     struct ifla_vf_info *info)
235150174a7fSEli Cohen {
23523023a1e9SKamal Heib 	if (!device->ops.get_vf_config)
235387915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
235450174a7fSEli Cohen 
23553023a1e9SKamal Heib 	return device->ops.get_vf_config(device, vf, port, info);
235650174a7fSEli Cohen }
235750174a7fSEli Cohen EXPORT_SYMBOL(ib_get_vf_config);
235850174a7fSEli Cohen 
235950174a7fSEli Cohen int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
236050174a7fSEli Cohen 		    struct ifla_vf_stats *stats)
236150174a7fSEli Cohen {
23623023a1e9SKamal Heib 	if (!device->ops.get_vf_stats)
236387915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
236450174a7fSEli Cohen 
23653023a1e9SKamal Heib 	return device->ops.get_vf_stats(device, vf, port, stats);
236650174a7fSEli Cohen }
236750174a7fSEli Cohen EXPORT_SYMBOL(ib_get_vf_stats);
236850174a7fSEli Cohen 
236950174a7fSEli Cohen int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
237050174a7fSEli Cohen 		   int type)
237150174a7fSEli Cohen {
23723023a1e9SKamal Heib 	if (!device->ops.set_vf_guid)
237387915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
237450174a7fSEli Cohen 
23753023a1e9SKamal Heib 	return device->ops.set_vf_guid(device, vf, port, guid, type);
237650174a7fSEli Cohen }
237750174a7fSEli Cohen EXPORT_SYMBOL(ib_set_vf_guid);
237850174a7fSEli Cohen 
23794c67e2bfSSagi Grimberg /**
23804c67e2bfSSagi Grimberg  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
23814c67e2bfSSagi Grimberg  *     and set it the memory region.
23824c67e2bfSSagi Grimberg  * @mr:            memory region
23834c67e2bfSSagi Grimberg  * @sg:            dma mapped scatterlist
23844c67e2bfSSagi Grimberg  * @sg_nents:      number of entries in sg
2385ff2ba993SChristoph Hellwig  * @sg_offset:     offset in bytes into sg
23864c67e2bfSSagi Grimberg  * @page_size:     page vector desired page size
23874c67e2bfSSagi Grimberg  *
23884c67e2bfSSagi Grimberg  * Constraints:
23894c67e2bfSSagi Grimberg  * - The first sg element is allowed to have an offset.
239052746129SBart Van Assche  * - Each sg element must either be aligned to page_size or virtually
239152746129SBart Van Assche  *   contiguous to the previous element. In case an sg element has a
239252746129SBart Van Assche  *   non-contiguous offset, the mapping prefix will not include it.
23934c67e2bfSSagi Grimberg  * - The last sg element is allowed to have length less than page_size.
23944c67e2bfSSagi Grimberg  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
23954c67e2bfSSagi Grimberg  *   then only max_num_sg entries will be mapped.
239652746129SBart Van Assche  * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2397f5aa9159SSagi Grimberg  *   constraints holds and the page_size argument is ignored.
23984c67e2bfSSagi Grimberg  *
23994c67e2bfSSagi Grimberg  * Returns the number of sg elements that were mapped to the memory region.
24004c67e2bfSSagi Grimberg  *
24014c67e2bfSSagi Grimberg  * After this completes successfully, the  memory region
24024c67e2bfSSagi Grimberg  * is ready for registration.
24034c67e2bfSSagi Grimberg  */
2404ff2ba993SChristoph Hellwig int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
24059aa8b321SBart Van Assche 		 unsigned int *sg_offset, unsigned int page_size)
24064c67e2bfSSagi Grimberg {
24073023a1e9SKamal Heib 	if (unlikely(!mr->device->ops.map_mr_sg))
240887915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
24094c67e2bfSSagi Grimberg 
24104c67e2bfSSagi Grimberg 	mr->page_size = page_size;
24114c67e2bfSSagi Grimberg 
24123023a1e9SKamal Heib 	return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
24134c67e2bfSSagi Grimberg }
24144c67e2bfSSagi Grimberg EXPORT_SYMBOL(ib_map_mr_sg);
24154c67e2bfSSagi Grimberg 
24164c67e2bfSSagi Grimberg /**
24174c67e2bfSSagi Grimberg  * ib_sg_to_pages() - Convert the largest prefix of a sg list
24184c67e2bfSSagi Grimberg  *     to a page vector
24194c67e2bfSSagi Grimberg  * @mr:            memory region
24204c67e2bfSSagi Grimberg  * @sgl:           dma mapped scatterlist
24214c67e2bfSSagi Grimberg  * @sg_nents:      number of entries in sg
24229aa8b321SBart Van Assche  * @sg_offset_p:   IN:  start offset in bytes into sg
24239aa8b321SBart Van Assche  *                 OUT: offset in bytes for element n of the sg of the first
24249aa8b321SBart Van Assche  *                      byte that has not been processed where n is the return
24259aa8b321SBart Van Assche  *                      value of this function.
24264c67e2bfSSagi Grimberg  * @set_page:      driver page assignment function pointer
24274c67e2bfSSagi Grimberg  *
24288f5ba10eSBart Van Assche  * Core service helper for drivers to convert the largest
24294c67e2bfSSagi Grimberg  * prefix of given sg list to a page vector. The sg list
24304c67e2bfSSagi Grimberg  * prefix converted is the prefix that meet the requirements
24314c67e2bfSSagi Grimberg  * of ib_map_mr_sg.
24324c67e2bfSSagi Grimberg  *
24334c67e2bfSSagi Grimberg  * Returns the number of sg elements that were assigned to
24344c67e2bfSSagi Grimberg  * a page vector.
24354c67e2bfSSagi Grimberg  */
2436ff2ba993SChristoph Hellwig int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
24379aa8b321SBart Van Assche 		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
24384c67e2bfSSagi Grimberg {
24394c67e2bfSSagi Grimberg 	struct scatterlist *sg;
2440b6aeb980SBart Van Assche 	u64 last_end_dma_addr = 0;
24419aa8b321SBart Van Assche 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
24424c67e2bfSSagi Grimberg 	unsigned int last_page_off = 0;
24434c67e2bfSSagi Grimberg 	u64 page_mask = ~((u64)mr->page_size - 1);
24448f5ba10eSBart Van Assche 	int i, ret;
24454c67e2bfSSagi Grimberg 
24469aa8b321SBart Van Assche 	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
24479aa8b321SBart Van Assche 		return -EINVAL;
24489aa8b321SBart Van Assche 
2449ff2ba993SChristoph Hellwig 	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
24504c67e2bfSSagi Grimberg 	mr->length = 0;
24514c67e2bfSSagi Grimberg 
24524c67e2bfSSagi Grimberg 	for_each_sg(sgl, sg, sg_nents, i) {
2453ff2ba993SChristoph Hellwig 		u64 dma_addr = sg_dma_address(sg) + sg_offset;
24549aa8b321SBart Van Assche 		u64 prev_addr = dma_addr;
2455ff2ba993SChristoph Hellwig 		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
24564c67e2bfSSagi Grimberg 		u64 end_dma_addr = dma_addr + dma_len;
24574c67e2bfSSagi Grimberg 		u64 page_addr = dma_addr & page_mask;
24584c67e2bfSSagi Grimberg 
24598f5ba10eSBart Van Assche 		/*
24608f5ba10eSBart Van Assche 		 * For the second and later elements, check whether either the
24618f5ba10eSBart Van Assche 		 * end of element i-1 or the start of element i is not aligned
24628f5ba10eSBart Van Assche 		 * on a page boundary.
24638f5ba10eSBart Van Assche 		 */
24648f5ba10eSBart Van Assche 		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
24658f5ba10eSBart Van Assche 			/* Stop mapping if there is a gap. */
24668f5ba10eSBart Van Assche 			if (last_end_dma_addr != dma_addr)
24678f5ba10eSBart Van Assche 				break;
24684c67e2bfSSagi Grimberg 
24698f5ba10eSBart Van Assche 			/*
24708f5ba10eSBart Van Assche 			 * Coalesce this element with the last. If it is small
24718f5ba10eSBart Van Assche 			 * enough just update mr->length. Otherwise start
24728f5ba10eSBart Van Assche 			 * mapping from the next page.
24738f5ba10eSBart Van Assche 			 */
24748f5ba10eSBart Van Assche 			goto next_page;
24754c67e2bfSSagi Grimberg 		}
24764c67e2bfSSagi Grimberg 
24774c67e2bfSSagi Grimberg 		do {
24788f5ba10eSBart Van Assche 			ret = set_page(mr, page_addr);
24799aa8b321SBart Van Assche 			if (unlikely(ret < 0)) {
24809aa8b321SBart Van Assche 				sg_offset = prev_addr - sg_dma_address(sg);
24819aa8b321SBart Van Assche 				mr->length += prev_addr - dma_addr;
24829aa8b321SBart Van Assche 				if (sg_offset_p)
24839aa8b321SBart Van Assche 					*sg_offset_p = sg_offset;
24849aa8b321SBart Van Assche 				return i || sg_offset ? i : ret;
24859aa8b321SBart Van Assche 			}
24869aa8b321SBart Van Assche 			prev_addr = page_addr;
24878f5ba10eSBart Van Assche next_page:
24884c67e2bfSSagi Grimberg 			page_addr += mr->page_size;
24894c67e2bfSSagi Grimberg 		} while (page_addr < end_dma_addr);
24904c67e2bfSSagi Grimberg 
24914c67e2bfSSagi Grimberg 		mr->length += dma_len;
24924c67e2bfSSagi Grimberg 		last_end_dma_addr = end_dma_addr;
24934c67e2bfSSagi Grimberg 		last_page_off = end_dma_addr & ~page_mask;
2494ff2ba993SChristoph Hellwig 
2495ff2ba993SChristoph Hellwig 		sg_offset = 0;
24964c67e2bfSSagi Grimberg 	}
24974c67e2bfSSagi Grimberg 
24989aa8b321SBart Van Assche 	if (sg_offset_p)
24999aa8b321SBart Van Assche 		*sg_offset_p = 0;
25004c67e2bfSSagi Grimberg 	return i;
25014c67e2bfSSagi Grimberg }
25024c67e2bfSSagi Grimberg EXPORT_SYMBOL(ib_sg_to_pages);
2503765d6774SSteve Wise 
2504765d6774SSteve Wise struct ib_drain_cqe {
2505765d6774SSteve Wise 	struct ib_cqe cqe;
2506765d6774SSteve Wise 	struct completion done;
2507765d6774SSteve Wise };
2508765d6774SSteve Wise 
2509765d6774SSteve Wise static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2510765d6774SSteve Wise {
2511765d6774SSteve Wise 	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2512765d6774SSteve Wise 						cqe);
2513765d6774SSteve Wise 
2514765d6774SSteve Wise 	complete(&cqe->done);
2515765d6774SSteve Wise }
2516765d6774SSteve Wise 
2517765d6774SSteve Wise /*
2518765d6774SSteve Wise  * Post a WR and block until its completion is reaped for the SQ.
2519765d6774SSteve Wise  */
2520765d6774SSteve Wise static void __ib_drain_sq(struct ib_qp *qp)
2521765d6774SSteve Wise {
2522f039f44fSBart Van Assche 	struct ib_cq *cq = qp->send_cq;
2523765d6774SSteve Wise 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2524765d6774SSteve Wise 	struct ib_drain_cqe sdrain;
2525a1ae7d03SBart Van Assche 	struct ib_rdma_wr swr = {
2526a1ae7d03SBart Van Assche 		.wr = {
25276ee68773SAndrew Morton 			.next = NULL,
25286ee68773SAndrew Morton 			{ .wr_cqe	= &sdrain.cqe, },
2529a1ae7d03SBart Van Assche 			.opcode	= IB_WR_RDMA_WRITE,
2530a1ae7d03SBart Van Assche 		},
2531a1ae7d03SBart Van Assche 	};
2532765d6774SSteve Wise 	int ret;
2533765d6774SSteve Wise 
2534765d6774SSteve Wise 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2535765d6774SSteve Wise 	if (ret) {
2536765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2537765d6774SSteve Wise 		return;
2538765d6774SSteve Wise 	}
2539765d6774SSteve Wise 
2540aaebd377SMax Gurtovoy 	sdrain.cqe.done = ib_drain_qp_done;
2541aaebd377SMax Gurtovoy 	init_completion(&sdrain.done);
2542aaebd377SMax Gurtovoy 
25431fec77bfSBart Van Assche 	ret = ib_post_send(qp, &swr.wr, NULL);
2544765d6774SSteve Wise 	if (ret) {
2545765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2546765d6774SSteve Wise 		return;
2547765d6774SSteve Wise 	}
2548765d6774SSteve Wise 
2549f039f44fSBart Van Assche 	if (cq->poll_ctx == IB_POLL_DIRECT)
2550f039f44fSBart Van Assche 		while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2551f039f44fSBart Van Assche 			ib_process_cq_direct(cq, -1);
2552f039f44fSBart Van Assche 	else
2553765d6774SSteve Wise 		wait_for_completion(&sdrain.done);
2554765d6774SSteve Wise }
2555765d6774SSteve Wise 
2556765d6774SSteve Wise /*
2557765d6774SSteve Wise  * Post a WR and block until its completion is reaped for the RQ.
2558765d6774SSteve Wise  */
2559765d6774SSteve Wise static void __ib_drain_rq(struct ib_qp *qp)
2560765d6774SSteve Wise {
2561f039f44fSBart Van Assche 	struct ib_cq *cq = qp->recv_cq;
2562765d6774SSteve Wise 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2563765d6774SSteve Wise 	struct ib_drain_cqe rdrain;
25641fec77bfSBart Van Assche 	struct ib_recv_wr rwr = {};
2565765d6774SSteve Wise 	int ret;
2566765d6774SSteve Wise 
2567765d6774SSteve Wise 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2568765d6774SSteve Wise 	if (ret) {
2569765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2570765d6774SSteve Wise 		return;
2571765d6774SSteve Wise 	}
2572765d6774SSteve Wise 
2573aaebd377SMax Gurtovoy 	rwr.wr_cqe = &rdrain.cqe;
2574aaebd377SMax Gurtovoy 	rdrain.cqe.done = ib_drain_qp_done;
2575aaebd377SMax Gurtovoy 	init_completion(&rdrain.done);
2576aaebd377SMax Gurtovoy 
25771fec77bfSBart Van Assche 	ret = ib_post_recv(qp, &rwr, NULL);
2578765d6774SSteve Wise 	if (ret) {
2579765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2580765d6774SSteve Wise 		return;
2581765d6774SSteve Wise 	}
2582765d6774SSteve Wise 
2583f039f44fSBart Van Assche 	if (cq->poll_ctx == IB_POLL_DIRECT)
2584f039f44fSBart Van Assche 		while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2585f039f44fSBart Van Assche 			ib_process_cq_direct(cq, -1);
2586f039f44fSBart Van Assche 	else
2587765d6774SSteve Wise 		wait_for_completion(&rdrain.done);
2588765d6774SSteve Wise }
2589765d6774SSteve Wise 
2590765d6774SSteve Wise /**
2591765d6774SSteve Wise  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2592765d6774SSteve Wise  *		   application.
2593765d6774SSteve Wise  * @qp:            queue pair to drain
2594765d6774SSteve Wise  *
2595765d6774SSteve Wise  * If the device has a provider-specific drain function, then
2596765d6774SSteve Wise  * call that.  Otherwise call the generic drain function
2597765d6774SSteve Wise  * __ib_drain_sq().
2598765d6774SSteve Wise  *
2599765d6774SSteve Wise  * The caller must:
2600765d6774SSteve Wise  *
2601765d6774SSteve Wise  * ensure there is room in the CQ and SQ for the drain work request and
2602765d6774SSteve Wise  * completion.
2603765d6774SSteve Wise  *
2604f039f44fSBart Van Assche  * allocate the CQ using ib_alloc_cq().
2605765d6774SSteve Wise  *
2606765d6774SSteve Wise  * ensure that there are no other contexts that are posting WRs concurrently.
2607765d6774SSteve Wise  * Otherwise the drain is not guaranteed.
2608765d6774SSteve Wise  */
2609765d6774SSteve Wise void ib_drain_sq(struct ib_qp *qp)
2610765d6774SSteve Wise {
26113023a1e9SKamal Heib 	if (qp->device->ops.drain_sq)
26123023a1e9SKamal Heib 		qp->device->ops.drain_sq(qp);
2613765d6774SSteve Wise 	else
2614765d6774SSteve Wise 		__ib_drain_sq(qp);
2615765d6774SSteve Wise }
2616765d6774SSteve Wise EXPORT_SYMBOL(ib_drain_sq);
2617765d6774SSteve Wise 
2618765d6774SSteve Wise /**
2619765d6774SSteve Wise  * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2620765d6774SSteve Wise  *		   application.
2621765d6774SSteve Wise  * @qp:            queue pair to drain
2622765d6774SSteve Wise  *
2623765d6774SSteve Wise  * If the device has a provider-specific drain function, then
2624765d6774SSteve Wise  * call that.  Otherwise call the generic drain function
2625765d6774SSteve Wise  * __ib_drain_rq().
2626765d6774SSteve Wise  *
2627765d6774SSteve Wise  * The caller must:
2628765d6774SSteve Wise  *
2629765d6774SSteve Wise  * ensure there is room in the CQ and RQ for the drain work request and
2630765d6774SSteve Wise  * completion.
2631765d6774SSteve Wise  *
2632f039f44fSBart Van Assche  * allocate the CQ using ib_alloc_cq().
2633765d6774SSteve Wise  *
2634765d6774SSteve Wise  * ensure that there are no other contexts that are posting WRs concurrently.
2635765d6774SSteve Wise  * Otherwise the drain is not guaranteed.
2636765d6774SSteve Wise  */
2637765d6774SSteve Wise void ib_drain_rq(struct ib_qp *qp)
2638765d6774SSteve Wise {
26393023a1e9SKamal Heib 	if (qp->device->ops.drain_rq)
26403023a1e9SKamal Heib 		qp->device->ops.drain_rq(qp);
2641765d6774SSteve Wise 	else
2642765d6774SSteve Wise 		__ib_drain_rq(qp);
2643765d6774SSteve Wise }
2644765d6774SSteve Wise EXPORT_SYMBOL(ib_drain_rq);
2645765d6774SSteve Wise 
2646765d6774SSteve Wise /**
2647765d6774SSteve Wise  * ib_drain_qp() - Block until all CQEs have been consumed by the
2648765d6774SSteve Wise  *		   application on both the RQ and SQ.
2649765d6774SSteve Wise  * @qp:            queue pair to drain
2650765d6774SSteve Wise  *
2651765d6774SSteve Wise  * The caller must:
2652765d6774SSteve Wise  *
2653765d6774SSteve Wise  * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2654765d6774SSteve Wise  * and completions.
2655765d6774SSteve Wise  *
2656f039f44fSBart Van Assche  * allocate the CQs using ib_alloc_cq().
2657765d6774SSteve Wise  *
2658765d6774SSteve Wise  * ensure that there are no other contexts that are posting WRs concurrently.
2659765d6774SSteve Wise  * Otherwise the drain is not guaranteed.
2660765d6774SSteve Wise  */
2661765d6774SSteve Wise void ib_drain_qp(struct ib_qp *qp)
2662765d6774SSteve Wise {
2663765d6774SSteve Wise 	ib_drain_sq(qp);
266442235f80SSagi Grimberg 	if (!qp->srq)
2665765d6774SSteve Wise 		ib_drain_rq(qp);
2666765d6774SSteve Wise }
2667765d6774SSteve Wise EXPORT_SYMBOL(ib_drain_qp);
2668f6a8a19bSDenis Drozdov 
2669f6a8a19bSDenis Drozdov struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2670f6a8a19bSDenis Drozdov 				     enum rdma_netdev_t type, const char *name,
2671f6a8a19bSDenis Drozdov 				     unsigned char name_assign_type,
2672f6a8a19bSDenis Drozdov 				     void (*setup)(struct net_device *))
2673f6a8a19bSDenis Drozdov {
2674f6a8a19bSDenis Drozdov 	struct rdma_netdev_alloc_params params;
2675f6a8a19bSDenis Drozdov 	struct net_device *netdev;
2676f6a8a19bSDenis Drozdov 	int rc;
2677f6a8a19bSDenis Drozdov 
26783023a1e9SKamal Heib 	if (!device->ops.rdma_netdev_get_params)
2679f6a8a19bSDenis Drozdov 		return ERR_PTR(-EOPNOTSUPP);
2680f6a8a19bSDenis Drozdov 
26813023a1e9SKamal Heib 	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
26823023a1e9SKamal Heib 						&params);
2683f6a8a19bSDenis Drozdov 	if (rc)
2684f6a8a19bSDenis Drozdov 		return ERR_PTR(rc);
2685f6a8a19bSDenis Drozdov 
2686f6a8a19bSDenis Drozdov 	netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2687f6a8a19bSDenis Drozdov 				  setup, params.txqs, params.rxqs);
2688f6a8a19bSDenis Drozdov 	if (!netdev)
2689f6a8a19bSDenis Drozdov 		return ERR_PTR(-ENOMEM);
2690f6a8a19bSDenis Drozdov 
2691f6a8a19bSDenis Drozdov 	return netdev;
2692f6a8a19bSDenis Drozdov }
2693f6a8a19bSDenis Drozdov EXPORT_SYMBOL(rdma_alloc_netdev);
26945d6b0cb3SDenis Drozdov 
26955d6b0cb3SDenis Drozdov int rdma_init_netdev(struct ib_device *device, u8 port_num,
26965d6b0cb3SDenis Drozdov 		     enum rdma_netdev_t type, const char *name,
26975d6b0cb3SDenis Drozdov 		     unsigned char name_assign_type,
26985d6b0cb3SDenis Drozdov 		     void (*setup)(struct net_device *),
26995d6b0cb3SDenis Drozdov 		     struct net_device *netdev)
27005d6b0cb3SDenis Drozdov {
27015d6b0cb3SDenis Drozdov 	struct rdma_netdev_alloc_params params;
27025d6b0cb3SDenis Drozdov 	int rc;
27035d6b0cb3SDenis Drozdov 
27043023a1e9SKamal Heib 	if (!device->ops.rdma_netdev_get_params)
27055d6b0cb3SDenis Drozdov 		return -EOPNOTSUPP;
27065d6b0cb3SDenis Drozdov 
27073023a1e9SKamal Heib 	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
27083023a1e9SKamal Heib 						&params);
27095d6b0cb3SDenis Drozdov 	if (rc)
27105d6b0cb3SDenis Drozdov 		return rc;
27115d6b0cb3SDenis Drozdov 
27125d6b0cb3SDenis Drozdov 	return params.initialize_rdma_netdev(device, port_num,
27135d6b0cb3SDenis Drozdov 					     netdev, params.param);
27145d6b0cb3SDenis Drozdov }
27155d6b0cb3SDenis Drozdov EXPORT_SYMBOL(rdma_init_netdev);
2716a808273aSShiraz Saleem 
2717a808273aSShiraz Saleem void __rdma_block_iter_start(struct ib_block_iter *biter,
2718a808273aSShiraz Saleem 			     struct scatterlist *sglist, unsigned int nents,
2719a808273aSShiraz Saleem 			     unsigned long pgsz)
2720a808273aSShiraz Saleem {
2721a808273aSShiraz Saleem 	memset(biter, 0, sizeof(struct ib_block_iter));
2722a808273aSShiraz Saleem 	biter->__sg = sglist;
2723a808273aSShiraz Saleem 	biter->__sg_nents = nents;
2724a808273aSShiraz Saleem 
2725a808273aSShiraz Saleem 	/* Driver provides best block size to use */
2726a808273aSShiraz Saleem 	biter->__pg_bit = __fls(pgsz);
2727a808273aSShiraz Saleem }
2728a808273aSShiraz Saleem EXPORT_SYMBOL(__rdma_block_iter_start);
2729a808273aSShiraz Saleem 
2730a808273aSShiraz Saleem bool __rdma_block_iter_next(struct ib_block_iter *biter)
2731a808273aSShiraz Saleem {
2732a808273aSShiraz Saleem 	unsigned int block_offset;
2733a808273aSShiraz Saleem 
2734a808273aSShiraz Saleem 	if (!biter->__sg_nents || !biter->__sg)
2735a808273aSShiraz Saleem 		return false;
2736a808273aSShiraz Saleem 
2737a808273aSShiraz Saleem 	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2738a808273aSShiraz Saleem 	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2739a808273aSShiraz Saleem 	biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2740a808273aSShiraz Saleem 
2741a808273aSShiraz Saleem 	if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2742a808273aSShiraz Saleem 		biter->__sg_advance = 0;
2743a808273aSShiraz Saleem 		biter->__sg = sg_next(biter->__sg);
2744a808273aSShiraz Saleem 		biter->__sg_nents--;
2745a808273aSShiraz Saleem 	}
2746a808273aSShiraz Saleem 
2747a808273aSShiraz Saleem 	return true;
2748a808273aSShiraz Saleem }
2749a808273aSShiraz Saleem EXPORT_SYMBOL(__rdma_block_iter_next);
2750