xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision 3e5901cb)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
31da177e4SLinus Torvalds  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
41da177e4SLinus Torvalds  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
51da177e4SLinus Torvalds  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
61da177e4SLinus Torvalds  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
72a1d9b7fSRoland Dreier  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
833b9b3eeSRoland Dreier  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * This software is available to you under a choice of one of two
111da177e4SLinus Torvalds  * licenses.  You may choose to be licensed under the terms of the GNU
121da177e4SLinus Torvalds  * General Public License (GPL) Version 2, available from the file
131da177e4SLinus Torvalds  * COPYING in the main directory of this source tree, or the
141da177e4SLinus Torvalds  * OpenIB.org BSD license below:
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  *     Redistribution and use in source and binary forms, with or
171da177e4SLinus Torvalds  *     without modification, are permitted provided that the following
181da177e4SLinus Torvalds  *     conditions are met:
191da177e4SLinus Torvalds  *
201da177e4SLinus Torvalds  *      - Redistributions of source code must retain the above
211da177e4SLinus Torvalds  *        copyright notice, this list of conditions and the following
221da177e4SLinus Torvalds  *        disclaimer.
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *      - Redistributions in binary form must reproduce the above
251da177e4SLinus Torvalds  *        copyright notice, this list of conditions and the following
261da177e4SLinus Torvalds  *        disclaimer in the documentation and/or other materials
271da177e4SLinus Torvalds  *        provided with the distribution.
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
301da177e4SLinus Torvalds  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
311da177e4SLinus Torvalds  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
321da177e4SLinus Torvalds  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
331da177e4SLinus Torvalds  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
341da177e4SLinus Torvalds  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
351da177e4SLinus Torvalds  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
361da177e4SLinus Torvalds  * SOFTWARE.
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <linux/errno.h>
401da177e4SLinus Torvalds #include <linux/err.h>
41b108d976SPaul Gortmaker #include <linux/export.h>
428c65b4a6STim Schmielau #include <linux/string.h>
430e0ec7e0SSean Hefty #include <linux/slab.h>
44dbf727deSMatan Barak #include <linux/in.h>
45dbf727deSMatan Barak #include <linux/in6.h>
46dbf727deSMatan Barak #include <net/addrconf.h>
47d291f1a6SDaniel Jurgens #include <linux/security.h>
481da177e4SLinus Torvalds 
49a4d61e84SRoland Dreier #include <rdma/ib_verbs.h>
50a4d61e84SRoland Dreier #include <rdma/ib_cache.h>
51dd5f03beSMatan Barak #include <rdma/ib_addr.h>
52a060b562SChristoph Hellwig #include <rdma/rw.h>
531da177e4SLinus Torvalds 
54ed4c54e5SOr Gerlitz #include "core_priv.h"
551da177e4SLinus Torvalds 
563e5901cbSChuck Lever #include <trace/events/rdma_core.h>
573e5901cbSChuck Lever 
58c0348eb0SParav Pandit static int ib_resolve_eth_dmac(struct ib_device *device,
59c0348eb0SParav Pandit 			       struct rdma_ah_attr *ah_attr);
60c0348eb0SParav Pandit 
612b1b5b60SSagi Grimberg static const char * const ib_events[] = {
622b1b5b60SSagi Grimberg 	[IB_EVENT_CQ_ERR]		= "CQ error",
632b1b5b60SSagi Grimberg 	[IB_EVENT_QP_FATAL]		= "QP fatal error",
642b1b5b60SSagi Grimberg 	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
652b1b5b60SSagi Grimberg 	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
662b1b5b60SSagi Grimberg 	[IB_EVENT_COMM_EST]		= "communication established",
672b1b5b60SSagi Grimberg 	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
682b1b5b60SSagi Grimberg 	[IB_EVENT_PATH_MIG]		= "path migration successful",
692b1b5b60SSagi Grimberg 	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
702b1b5b60SSagi Grimberg 	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
712b1b5b60SSagi Grimberg 	[IB_EVENT_PORT_ACTIVE]		= "port active",
722b1b5b60SSagi Grimberg 	[IB_EVENT_PORT_ERR]		= "port error",
732b1b5b60SSagi Grimberg 	[IB_EVENT_LID_CHANGE]		= "LID change",
742b1b5b60SSagi Grimberg 	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
752b1b5b60SSagi Grimberg 	[IB_EVENT_SM_CHANGE]		= "SM change",
762b1b5b60SSagi Grimberg 	[IB_EVENT_SRQ_ERR]		= "SRQ error",
772b1b5b60SSagi Grimberg 	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
782b1b5b60SSagi Grimberg 	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
792b1b5b60SSagi Grimberg 	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
802b1b5b60SSagi Grimberg 	[IB_EVENT_GID_CHANGE]		= "GID changed",
812b1b5b60SSagi Grimberg };
822b1b5b60SSagi Grimberg 
83db7489e0SBart Van Assche const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
842b1b5b60SSagi Grimberg {
852b1b5b60SSagi Grimberg 	size_t index = event;
862b1b5b60SSagi Grimberg 
872b1b5b60SSagi Grimberg 	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
882b1b5b60SSagi Grimberg 			ib_events[index] : "unrecognized event";
892b1b5b60SSagi Grimberg }
902b1b5b60SSagi Grimberg EXPORT_SYMBOL(ib_event_msg);
912b1b5b60SSagi Grimberg 
922b1b5b60SSagi Grimberg static const char * const wc_statuses[] = {
932b1b5b60SSagi Grimberg 	[IB_WC_SUCCESS]			= "success",
942b1b5b60SSagi Grimberg 	[IB_WC_LOC_LEN_ERR]		= "local length error",
952b1b5b60SSagi Grimberg 	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
962b1b5b60SSagi Grimberg 	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
972b1b5b60SSagi Grimberg 	[IB_WC_LOC_PROT_ERR]		= "local protection error",
982b1b5b60SSagi Grimberg 	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
992b1b5b60SSagi Grimberg 	[IB_WC_MW_BIND_ERR]		= "memory management operation error",
1002b1b5b60SSagi Grimberg 	[IB_WC_BAD_RESP_ERR]		= "bad response error",
1012b1b5b60SSagi Grimberg 	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
1022b1b5b60SSagi Grimberg 	[IB_WC_REM_INV_REQ_ERR]		= "invalid request error",
1032b1b5b60SSagi Grimberg 	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
1042b1b5b60SSagi Grimberg 	[IB_WC_REM_OP_ERR]		= "remote operation error",
1052b1b5b60SSagi Grimberg 	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
1062b1b5b60SSagi Grimberg 	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
1072b1b5b60SSagi Grimberg 	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
1082b1b5b60SSagi Grimberg 	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
1092b1b5b60SSagi Grimberg 	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
1102b1b5b60SSagi Grimberg 	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
1112b1b5b60SSagi Grimberg 	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
1122b1b5b60SSagi Grimberg 	[IB_WC_FATAL_ERR]		= "fatal error",
1132b1b5b60SSagi Grimberg 	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
1142b1b5b60SSagi Grimberg 	[IB_WC_GENERAL_ERR]		= "general error",
1152b1b5b60SSagi Grimberg };
1162b1b5b60SSagi Grimberg 
117db7489e0SBart Van Assche const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
1182b1b5b60SSagi Grimberg {
1192b1b5b60SSagi Grimberg 	size_t index = status;
1202b1b5b60SSagi Grimberg 
1212b1b5b60SSagi Grimberg 	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
1222b1b5b60SSagi Grimberg 			wc_statuses[index] : "unrecognized status";
1232b1b5b60SSagi Grimberg }
1242b1b5b60SSagi Grimberg EXPORT_SYMBOL(ib_wc_status_msg);
1252b1b5b60SSagi Grimberg 
1268385fd84SRoland Dreier __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
127bf6a9e31SJack Morgenstein {
128bf6a9e31SJack Morgenstein 	switch (rate) {
129bf6a9e31SJack Morgenstein 	case IB_RATE_2_5_GBPS: return   1;
130bf6a9e31SJack Morgenstein 	case IB_RATE_5_GBPS:   return   2;
131bf6a9e31SJack Morgenstein 	case IB_RATE_10_GBPS:  return   4;
132bf6a9e31SJack Morgenstein 	case IB_RATE_20_GBPS:  return   8;
133bf6a9e31SJack Morgenstein 	case IB_RATE_30_GBPS:  return  12;
134bf6a9e31SJack Morgenstein 	case IB_RATE_40_GBPS:  return  16;
135bf6a9e31SJack Morgenstein 	case IB_RATE_60_GBPS:  return  24;
136bf6a9e31SJack Morgenstein 	case IB_RATE_80_GBPS:  return  32;
137bf6a9e31SJack Morgenstein 	case IB_RATE_120_GBPS: return  48;
138e2dda368SHans Westgaard Ry 	case IB_RATE_14_GBPS:  return   6;
139e2dda368SHans Westgaard Ry 	case IB_RATE_56_GBPS:  return  22;
140e2dda368SHans Westgaard Ry 	case IB_RATE_112_GBPS: return  45;
141e2dda368SHans Westgaard Ry 	case IB_RATE_168_GBPS: return  67;
142e2dda368SHans Westgaard Ry 	case IB_RATE_25_GBPS:  return  10;
143e2dda368SHans Westgaard Ry 	case IB_RATE_100_GBPS: return  40;
144e2dda368SHans Westgaard Ry 	case IB_RATE_200_GBPS: return  80;
145e2dda368SHans Westgaard Ry 	case IB_RATE_300_GBPS: return 120;
146a5a5d199SMichael Guralnik 	case IB_RATE_28_GBPS:  return  11;
147a5a5d199SMichael Guralnik 	case IB_RATE_50_GBPS:  return  20;
148a5a5d199SMichael Guralnik 	case IB_RATE_400_GBPS: return 160;
149a5a5d199SMichael Guralnik 	case IB_RATE_600_GBPS: return 240;
150bf6a9e31SJack Morgenstein 	default:	       return  -1;
151bf6a9e31SJack Morgenstein 	}
152bf6a9e31SJack Morgenstein }
153bf6a9e31SJack Morgenstein EXPORT_SYMBOL(ib_rate_to_mult);
154bf6a9e31SJack Morgenstein 
1558385fd84SRoland Dreier __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
156bf6a9e31SJack Morgenstein {
157bf6a9e31SJack Morgenstein 	switch (mult) {
158bf6a9e31SJack Morgenstein 	case 1:   return IB_RATE_2_5_GBPS;
159bf6a9e31SJack Morgenstein 	case 2:   return IB_RATE_5_GBPS;
160bf6a9e31SJack Morgenstein 	case 4:   return IB_RATE_10_GBPS;
161bf6a9e31SJack Morgenstein 	case 8:   return IB_RATE_20_GBPS;
162bf6a9e31SJack Morgenstein 	case 12:  return IB_RATE_30_GBPS;
163bf6a9e31SJack Morgenstein 	case 16:  return IB_RATE_40_GBPS;
164bf6a9e31SJack Morgenstein 	case 24:  return IB_RATE_60_GBPS;
165bf6a9e31SJack Morgenstein 	case 32:  return IB_RATE_80_GBPS;
166bf6a9e31SJack Morgenstein 	case 48:  return IB_RATE_120_GBPS;
167e2dda368SHans Westgaard Ry 	case 6:   return IB_RATE_14_GBPS;
168e2dda368SHans Westgaard Ry 	case 22:  return IB_RATE_56_GBPS;
169e2dda368SHans Westgaard Ry 	case 45:  return IB_RATE_112_GBPS;
170e2dda368SHans Westgaard Ry 	case 67:  return IB_RATE_168_GBPS;
171e2dda368SHans Westgaard Ry 	case 10:  return IB_RATE_25_GBPS;
172e2dda368SHans Westgaard Ry 	case 40:  return IB_RATE_100_GBPS;
173e2dda368SHans Westgaard Ry 	case 80:  return IB_RATE_200_GBPS;
174e2dda368SHans Westgaard Ry 	case 120: return IB_RATE_300_GBPS;
175a5a5d199SMichael Guralnik 	case 11:  return IB_RATE_28_GBPS;
176a5a5d199SMichael Guralnik 	case 20:  return IB_RATE_50_GBPS;
177a5a5d199SMichael Guralnik 	case 160: return IB_RATE_400_GBPS;
178a5a5d199SMichael Guralnik 	case 240: return IB_RATE_600_GBPS;
179bf6a9e31SJack Morgenstein 	default:  return IB_RATE_PORT_CURRENT;
180bf6a9e31SJack Morgenstein 	}
181bf6a9e31SJack Morgenstein }
182bf6a9e31SJack Morgenstein EXPORT_SYMBOL(mult_to_ib_rate);
183bf6a9e31SJack Morgenstein 
1848385fd84SRoland Dreier __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
18571eeba16SMarcel Apfelbaum {
18671eeba16SMarcel Apfelbaum 	switch (rate) {
18771eeba16SMarcel Apfelbaum 	case IB_RATE_2_5_GBPS: return 2500;
18871eeba16SMarcel Apfelbaum 	case IB_RATE_5_GBPS:   return 5000;
18971eeba16SMarcel Apfelbaum 	case IB_RATE_10_GBPS:  return 10000;
19071eeba16SMarcel Apfelbaum 	case IB_RATE_20_GBPS:  return 20000;
19171eeba16SMarcel Apfelbaum 	case IB_RATE_30_GBPS:  return 30000;
19271eeba16SMarcel Apfelbaum 	case IB_RATE_40_GBPS:  return 40000;
19371eeba16SMarcel Apfelbaum 	case IB_RATE_60_GBPS:  return 60000;
19471eeba16SMarcel Apfelbaum 	case IB_RATE_80_GBPS:  return 80000;
19571eeba16SMarcel Apfelbaum 	case IB_RATE_120_GBPS: return 120000;
19671eeba16SMarcel Apfelbaum 	case IB_RATE_14_GBPS:  return 14062;
19771eeba16SMarcel Apfelbaum 	case IB_RATE_56_GBPS:  return 56250;
19871eeba16SMarcel Apfelbaum 	case IB_RATE_112_GBPS: return 112500;
19971eeba16SMarcel Apfelbaum 	case IB_RATE_168_GBPS: return 168750;
20071eeba16SMarcel Apfelbaum 	case IB_RATE_25_GBPS:  return 25781;
20171eeba16SMarcel Apfelbaum 	case IB_RATE_100_GBPS: return 103125;
20271eeba16SMarcel Apfelbaum 	case IB_RATE_200_GBPS: return 206250;
20371eeba16SMarcel Apfelbaum 	case IB_RATE_300_GBPS: return 309375;
204a5a5d199SMichael Guralnik 	case IB_RATE_28_GBPS:  return 28125;
205a5a5d199SMichael Guralnik 	case IB_RATE_50_GBPS:  return 53125;
206a5a5d199SMichael Guralnik 	case IB_RATE_400_GBPS: return 425000;
207a5a5d199SMichael Guralnik 	case IB_RATE_600_GBPS: return 637500;
20871eeba16SMarcel Apfelbaum 	default:	       return -1;
20971eeba16SMarcel Apfelbaum 	}
21071eeba16SMarcel Apfelbaum }
21171eeba16SMarcel Apfelbaum EXPORT_SYMBOL(ib_rate_to_mbps);
21271eeba16SMarcel Apfelbaum 
2138385fd84SRoland Dreier __attribute_const__ enum rdma_transport_type
2145d60c111SJason Gunthorpe rdma_node_get_transport(unsigned int node_type)
21507ebafbaSTom Tucker {
216cdc596d8SLeon Romanovsky 
217cdc596d8SLeon Romanovsky 	if (node_type == RDMA_NODE_USNIC)
2185db5765eSUpinder Malhi 		return RDMA_TRANSPORT_USNIC;
219cdc596d8SLeon Romanovsky 	if (node_type == RDMA_NODE_USNIC_UDP)
220248567f7SUpinder Malhi 		return RDMA_TRANSPORT_USNIC_UDP;
221cdc596d8SLeon Romanovsky 	if (node_type == RDMA_NODE_RNIC)
222cdc596d8SLeon Romanovsky 		return RDMA_TRANSPORT_IWARP;
223f95be3d2SGal Pressman 	if (node_type == RDMA_NODE_UNSPECIFIED)
224f95be3d2SGal Pressman 		return RDMA_TRANSPORT_UNSPECIFIED;
225cdc596d8SLeon Romanovsky 
226cdc596d8SLeon Romanovsky 	return RDMA_TRANSPORT_IB;
22707ebafbaSTom Tucker }
22807ebafbaSTom Tucker EXPORT_SYMBOL(rdma_node_get_transport);
22907ebafbaSTom Tucker 
230a3f5adafSEli Cohen enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
231a3f5adafSEli Cohen {
23282901e3eSLeon Romanovsky 	enum rdma_transport_type lt;
2333023a1e9SKamal Heib 	if (device->ops.get_link_layer)
2343023a1e9SKamal Heib 		return device->ops.get_link_layer(device, port_num);
235a3f5adafSEli Cohen 
23682901e3eSLeon Romanovsky 	lt = rdma_node_get_transport(device->node_type);
23782901e3eSLeon Romanovsky 	if (lt == RDMA_TRANSPORT_IB)
238a3f5adafSEli Cohen 		return IB_LINK_LAYER_INFINIBAND;
23982901e3eSLeon Romanovsky 
240a3f5adafSEli Cohen 	return IB_LINK_LAYER_ETHERNET;
241a3f5adafSEli Cohen }
242a3f5adafSEli Cohen EXPORT_SYMBOL(rdma_port_get_link_layer);
243a3f5adafSEli Cohen 
2441da177e4SLinus Torvalds /* Protection domains */
2451da177e4SLinus Torvalds 
24696249d70SJason Gunthorpe /**
24796249d70SJason Gunthorpe  * ib_alloc_pd - Allocates an unused protection domain.
24896249d70SJason Gunthorpe  * @device: The device on which to allocate the protection domain.
249094c88f3Srd.dunlab@gmail.com  * @flags: protection domain flags
250094c88f3Srd.dunlab@gmail.com  * @caller: caller's build-time module name
25196249d70SJason Gunthorpe  *
25296249d70SJason Gunthorpe  * A protection domain object provides an association between QPs, shared
25396249d70SJason Gunthorpe  * receive queues, address handles, memory regions, and memory windows.
25496249d70SJason Gunthorpe  *
25596249d70SJason Gunthorpe  * Every PD has a local_dma_lkey which can be used as the lkey value for local
25696249d70SJason Gunthorpe  * memory operations.
25796249d70SJason Gunthorpe  */
258ed082d36SChristoph Hellwig struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
259ed082d36SChristoph Hellwig 		const char *caller)
2601da177e4SLinus Torvalds {
2611da177e4SLinus Torvalds 	struct ib_pd *pd;
262ed082d36SChristoph Hellwig 	int mr_access_flags = 0;
26321a428a0SLeon Romanovsky 	int ret;
2641da177e4SLinus Torvalds 
26521a428a0SLeon Romanovsky 	pd = rdma_zalloc_drv_obj(device, ib_pd);
26621a428a0SLeon Romanovsky 	if (!pd)
26721a428a0SLeon Romanovsky 		return ERR_PTR(-ENOMEM);
2681da177e4SLinus Torvalds 
2691da177e4SLinus Torvalds 	pd->device = device;
270b5e81bf5SRoland Dreier 	pd->uobject = NULL;
27150d46335SChristoph Hellwig 	pd->__internal_mr = NULL;
2721da177e4SLinus Torvalds 	atomic_set(&pd->usecnt, 0);
273ed082d36SChristoph Hellwig 	pd->flags = flags;
27496249d70SJason Gunthorpe 
27521a428a0SLeon Romanovsky 	pd->res.type = RDMA_RESTRACK_PD;
27621a428a0SLeon Romanovsky 	rdma_restrack_set_task(&pd->res, caller);
27721a428a0SLeon Romanovsky 
278ff23dfa1SShamir Rabinovitch 	ret = device->ops.alloc_pd(pd, NULL);
27921a428a0SLeon Romanovsky 	if (ret) {
28021a428a0SLeon Romanovsky 		kfree(pd);
28121a428a0SLeon Romanovsky 		return ERR_PTR(ret);
28221a428a0SLeon Romanovsky 	}
28321a428a0SLeon Romanovsky 	rdma_restrack_kadd(&pd->res);
28421a428a0SLeon Romanovsky 
28586bee4c9SOr Gerlitz 	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
28696249d70SJason Gunthorpe 		pd->local_dma_lkey = device->local_dma_lkey;
287ed082d36SChristoph Hellwig 	else
288ed082d36SChristoph Hellwig 		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
289ed082d36SChristoph Hellwig 
290ed082d36SChristoph Hellwig 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
291ed082d36SChristoph Hellwig 		pr_warn("%s: enabling unsafe global rkey\n", caller);
292ed082d36SChristoph Hellwig 		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
293ed082d36SChristoph Hellwig 	}
294ed082d36SChristoph Hellwig 
295ed082d36SChristoph Hellwig 	if (mr_access_flags) {
29696249d70SJason Gunthorpe 		struct ib_mr *mr;
29796249d70SJason Gunthorpe 
2983023a1e9SKamal Heib 		mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
29996249d70SJason Gunthorpe 		if (IS_ERR(mr)) {
30096249d70SJason Gunthorpe 			ib_dealloc_pd(pd);
3015ef990f0SChristoph Hellwig 			return ERR_CAST(mr);
3021da177e4SLinus Torvalds 		}
3031da177e4SLinus Torvalds 
3045ef990f0SChristoph Hellwig 		mr->device	= pd->device;
3055ef990f0SChristoph Hellwig 		mr->pd		= pd;
306a0bc099aSMax Gurtovoy 		mr->type        = IB_MR_TYPE_DMA;
3075ef990f0SChristoph Hellwig 		mr->uobject	= NULL;
3085ef990f0SChristoph Hellwig 		mr->need_inval	= false;
3095ef990f0SChristoph Hellwig 
31050d46335SChristoph Hellwig 		pd->__internal_mr = mr;
311ed082d36SChristoph Hellwig 
312ed082d36SChristoph Hellwig 		if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
31350d46335SChristoph Hellwig 			pd->local_dma_lkey = pd->__internal_mr->lkey;
314ed082d36SChristoph Hellwig 
315ed082d36SChristoph Hellwig 		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
316ed082d36SChristoph Hellwig 			pd->unsafe_global_rkey = pd->__internal_mr->rkey;
31796249d70SJason Gunthorpe 	}
318ed082d36SChristoph Hellwig 
3191da177e4SLinus Torvalds 	return pd;
3201da177e4SLinus Torvalds }
321ed082d36SChristoph Hellwig EXPORT_SYMBOL(__ib_alloc_pd);
3221da177e4SLinus Torvalds 
3237dd78647SJason Gunthorpe /**
32491f57129SIsrael Rukshin  * ib_dealloc_pd_user - Deallocates a protection domain.
3257dd78647SJason Gunthorpe  * @pd: The protection domain to deallocate.
326c4367a26SShamir Rabinovitch  * @udata: Valid user data or NULL for kernel object
3277dd78647SJason Gunthorpe  *
3287dd78647SJason Gunthorpe  * It is an error to call this function while any resources in the pd still
3297dd78647SJason Gunthorpe  * exist.  The caller is responsible to synchronously destroy them and
3307dd78647SJason Gunthorpe  * guarantee no new allocations will happen.
3317dd78647SJason Gunthorpe  */
332c4367a26SShamir Rabinovitch void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
3331da177e4SLinus Torvalds {
3347dd78647SJason Gunthorpe 	int ret;
3351da177e4SLinus Torvalds 
33650d46335SChristoph Hellwig 	if (pd->__internal_mr) {
337c4367a26SShamir Rabinovitch 		ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
3387dd78647SJason Gunthorpe 		WARN_ON(ret);
33950d46335SChristoph Hellwig 		pd->__internal_mr = NULL;
34096249d70SJason Gunthorpe 	}
34196249d70SJason Gunthorpe 
3427dd78647SJason Gunthorpe 	/* uverbs manipulates usecnt with proper locking, while the kabi
3437dd78647SJason Gunthorpe 	   requires the caller to guarantee we can't race here. */
3447dd78647SJason Gunthorpe 	WARN_ON(atomic_read(&pd->usecnt));
3451da177e4SLinus Torvalds 
3469d5f8c20SLeon Romanovsky 	rdma_restrack_del(&pd->res);
347c4367a26SShamir Rabinovitch 	pd->device->ops.dealloc_pd(pd, udata);
34821a428a0SLeon Romanovsky 	kfree(pd);
3491da177e4SLinus Torvalds }
350c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_dealloc_pd_user);
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds /* Address handles */
3531da177e4SLinus Torvalds 
354d97099feSJason Gunthorpe /**
355d97099feSJason Gunthorpe  * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
356d97099feSJason Gunthorpe  * @dest:       Pointer to destination ah_attr. Contents of the destination
357d97099feSJason Gunthorpe  *              pointer is assumed to be invalid and attribute are overwritten.
358d97099feSJason Gunthorpe  * @src:        Pointer to source ah_attr.
359d97099feSJason Gunthorpe  */
360d97099feSJason Gunthorpe void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
361d97099feSJason Gunthorpe 		       const struct rdma_ah_attr *src)
362d97099feSJason Gunthorpe {
363d97099feSJason Gunthorpe 	*dest = *src;
364d97099feSJason Gunthorpe 	if (dest->grh.sgid_attr)
365d97099feSJason Gunthorpe 		rdma_hold_gid_attr(dest->grh.sgid_attr);
366d97099feSJason Gunthorpe }
367d97099feSJason Gunthorpe EXPORT_SYMBOL(rdma_copy_ah_attr);
368d97099feSJason Gunthorpe 
369d97099feSJason Gunthorpe /**
370d97099feSJason Gunthorpe  * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
371d97099feSJason Gunthorpe  * @old:        Pointer to existing ah_attr which needs to be replaced.
372d97099feSJason Gunthorpe  *              old is assumed to be valid or zero'd
373d97099feSJason Gunthorpe  * @new:        Pointer to the new ah_attr.
374d97099feSJason Gunthorpe  *
375d97099feSJason Gunthorpe  * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
376d97099feSJason Gunthorpe  * old the ah_attr is valid; after that it copies the new attribute and holds
377d97099feSJason Gunthorpe  * the reference to the replaced ah_attr.
378d97099feSJason Gunthorpe  */
379d97099feSJason Gunthorpe void rdma_replace_ah_attr(struct rdma_ah_attr *old,
380d97099feSJason Gunthorpe 			  const struct rdma_ah_attr *new)
381d97099feSJason Gunthorpe {
382d97099feSJason Gunthorpe 	rdma_destroy_ah_attr(old);
383d97099feSJason Gunthorpe 	*old = *new;
384d97099feSJason Gunthorpe 	if (old->grh.sgid_attr)
385d97099feSJason Gunthorpe 		rdma_hold_gid_attr(old->grh.sgid_attr);
386d97099feSJason Gunthorpe }
387d97099feSJason Gunthorpe EXPORT_SYMBOL(rdma_replace_ah_attr);
388d97099feSJason Gunthorpe 
389d97099feSJason Gunthorpe /**
390d97099feSJason Gunthorpe  * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
391d97099feSJason Gunthorpe  * @dest:       Pointer to destination ah_attr to copy to.
392d97099feSJason Gunthorpe  *              dest is assumed to be valid or zero'd
393d97099feSJason Gunthorpe  * @src:        Pointer to the new ah_attr.
394d97099feSJason Gunthorpe  *
395d97099feSJason Gunthorpe  * rdma_move_ah_attr() first releases any reference in the destination ah_attr
396d97099feSJason Gunthorpe  * if it is valid. This also transfers ownership of internal references from
397d97099feSJason Gunthorpe  * src to dest, making src invalid in the process. No new reference of the src
398d97099feSJason Gunthorpe  * ah_attr is taken.
399d97099feSJason Gunthorpe  */
400d97099feSJason Gunthorpe void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
401d97099feSJason Gunthorpe {
402d97099feSJason Gunthorpe 	rdma_destroy_ah_attr(dest);
403d97099feSJason Gunthorpe 	*dest = *src;
404d97099feSJason Gunthorpe 	src->grh.sgid_attr = NULL;
405d97099feSJason Gunthorpe }
406d97099feSJason Gunthorpe EXPORT_SYMBOL(rdma_move_ah_attr);
407d97099feSJason Gunthorpe 
4088d9ec9adSJason Gunthorpe /*
4098d9ec9adSJason Gunthorpe  * Validate that the rdma_ah_attr is valid for the device before passing it
4108d9ec9adSJason Gunthorpe  * off to the driver.
4118d9ec9adSJason Gunthorpe  */
4128d9ec9adSJason Gunthorpe static int rdma_check_ah_attr(struct ib_device *device,
4138d9ec9adSJason Gunthorpe 			      struct rdma_ah_attr *ah_attr)
4148d9ec9adSJason Gunthorpe {
4158d9ec9adSJason Gunthorpe 	if (!rdma_is_port_valid(device, ah_attr->port_num))
4168d9ec9adSJason Gunthorpe 		return -EINVAL;
4178d9ec9adSJason Gunthorpe 
418b02289b3SArtemy Kovalyov 	if ((rdma_is_grh_required(device, ah_attr->port_num) ||
419b02289b3SArtemy Kovalyov 	     ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
4208d9ec9adSJason Gunthorpe 	    !(ah_attr->ah_flags & IB_AH_GRH))
4218d9ec9adSJason Gunthorpe 		return -EINVAL;
4228d9ec9adSJason Gunthorpe 
4238d9ec9adSJason Gunthorpe 	if (ah_attr->grh.sgid_attr) {
4248d9ec9adSJason Gunthorpe 		/*
4258d9ec9adSJason Gunthorpe 		 * Make sure the passed sgid_attr is consistent with the
4268d9ec9adSJason Gunthorpe 		 * parameters
4278d9ec9adSJason Gunthorpe 		 */
4288d9ec9adSJason Gunthorpe 		if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
4298d9ec9adSJason Gunthorpe 		    ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
4308d9ec9adSJason Gunthorpe 			return -EINVAL;
4318d9ec9adSJason Gunthorpe 	}
4328d9ec9adSJason Gunthorpe 	return 0;
4338d9ec9adSJason Gunthorpe }
4348d9ec9adSJason Gunthorpe 
4358d9ec9adSJason Gunthorpe /*
4368d9ec9adSJason Gunthorpe  * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
4378d9ec9adSJason Gunthorpe  * On success the caller is responsible to call rdma_unfill_sgid_attr().
4388d9ec9adSJason Gunthorpe  */
4398d9ec9adSJason Gunthorpe static int rdma_fill_sgid_attr(struct ib_device *device,
4408d9ec9adSJason Gunthorpe 			       struct rdma_ah_attr *ah_attr,
4418d9ec9adSJason Gunthorpe 			       const struct ib_gid_attr **old_sgid_attr)
4428d9ec9adSJason Gunthorpe {
4438d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *sgid_attr;
4448d9ec9adSJason Gunthorpe 	struct ib_global_route *grh;
4458d9ec9adSJason Gunthorpe 	int ret;
4468d9ec9adSJason Gunthorpe 
4478d9ec9adSJason Gunthorpe 	*old_sgid_attr = ah_attr->grh.sgid_attr;
4488d9ec9adSJason Gunthorpe 
4498d9ec9adSJason Gunthorpe 	ret = rdma_check_ah_attr(device, ah_attr);
4508d9ec9adSJason Gunthorpe 	if (ret)
4518d9ec9adSJason Gunthorpe 		return ret;
4528d9ec9adSJason Gunthorpe 
4538d9ec9adSJason Gunthorpe 	if (!(ah_attr->ah_flags & IB_AH_GRH))
4548d9ec9adSJason Gunthorpe 		return 0;
4558d9ec9adSJason Gunthorpe 
4568d9ec9adSJason Gunthorpe 	grh = rdma_ah_retrieve_grh(ah_attr);
4578d9ec9adSJason Gunthorpe 	if (grh->sgid_attr)
4588d9ec9adSJason Gunthorpe 		return 0;
4598d9ec9adSJason Gunthorpe 
4608d9ec9adSJason Gunthorpe 	sgid_attr =
4618d9ec9adSJason Gunthorpe 		rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
4628d9ec9adSJason Gunthorpe 	if (IS_ERR(sgid_attr))
4638d9ec9adSJason Gunthorpe 		return PTR_ERR(sgid_attr);
4648d9ec9adSJason Gunthorpe 
4658d9ec9adSJason Gunthorpe 	/* Move ownerhip of the kref into the ah_attr */
4668d9ec9adSJason Gunthorpe 	grh->sgid_attr = sgid_attr;
4678d9ec9adSJason Gunthorpe 	return 0;
4688d9ec9adSJason Gunthorpe }
4698d9ec9adSJason Gunthorpe 
4708d9ec9adSJason Gunthorpe static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
4718d9ec9adSJason Gunthorpe 				  const struct ib_gid_attr *old_sgid_attr)
4728d9ec9adSJason Gunthorpe {
4738d9ec9adSJason Gunthorpe 	/*
4748d9ec9adSJason Gunthorpe 	 * Fill didn't change anything, the caller retains ownership of
4758d9ec9adSJason Gunthorpe 	 * whatever it passed
4768d9ec9adSJason Gunthorpe 	 */
4778d9ec9adSJason Gunthorpe 	if (ah_attr->grh.sgid_attr == old_sgid_attr)
4788d9ec9adSJason Gunthorpe 		return;
4798d9ec9adSJason Gunthorpe 
4808d9ec9adSJason Gunthorpe 	/*
4818d9ec9adSJason Gunthorpe 	 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
4828d9ec9adSJason Gunthorpe 	 * doesn't see any change in the rdma_ah_attr. If we get here
4838d9ec9adSJason Gunthorpe 	 * old_sgid_attr is NULL.
4848d9ec9adSJason Gunthorpe 	 */
4858d9ec9adSJason Gunthorpe 	rdma_destroy_ah_attr(ah_attr);
4868d9ec9adSJason Gunthorpe }
4878d9ec9adSJason Gunthorpe 
4881a1f460fSJason Gunthorpe static const struct ib_gid_attr *
4891a1f460fSJason Gunthorpe rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
4901a1f460fSJason Gunthorpe 		      const struct ib_gid_attr *old_attr)
4911a1f460fSJason Gunthorpe {
4921a1f460fSJason Gunthorpe 	if (old_attr)
4931a1f460fSJason Gunthorpe 		rdma_put_gid_attr(old_attr);
4941a1f460fSJason Gunthorpe 	if (ah_attr->ah_flags & IB_AH_GRH) {
4951a1f460fSJason Gunthorpe 		rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
4961a1f460fSJason Gunthorpe 		return ah_attr->grh.sgid_attr;
4971a1f460fSJason Gunthorpe 	}
4981a1f460fSJason Gunthorpe 	return NULL;
4991a1f460fSJason Gunthorpe }
5001a1f460fSJason Gunthorpe 
5015cda6587SParav Pandit static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
5025cda6587SParav Pandit 				     struct rdma_ah_attr *ah_attr,
503b090c4e3SGal Pressman 				     u32 flags,
5045cda6587SParav Pandit 				     struct ib_udata *udata)
5051da177e4SLinus Torvalds {
506d3456914SLeon Romanovsky 	struct ib_device *device = pd->device;
5071da177e4SLinus Torvalds 	struct ib_ah *ah;
508d3456914SLeon Romanovsky 	int ret;
5091da177e4SLinus Torvalds 
510b090c4e3SGal Pressman 	might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
511b090c4e3SGal Pressman 
512d3456914SLeon Romanovsky 	if (!device->ops.create_ah)
5130584c47bSKamal Heib 		return ERR_PTR(-EOPNOTSUPP);
5140584c47bSKamal Heib 
515d3456914SLeon Romanovsky 	ah = rdma_zalloc_drv_obj_gfp(
516d3456914SLeon Romanovsky 		device, ib_ah,
517d3456914SLeon Romanovsky 		(flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
518d3456914SLeon Romanovsky 	if (!ah)
519d3456914SLeon Romanovsky 		return ERR_PTR(-ENOMEM);
5201da177e4SLinus Torvalds 
521d3456914SLeon Romanovsky 	ah->device = device;
5221da177e4SLinus Torvalds 	ah->pd = pd;
52344c58487SDasaratharaman Chandramouli 	ah->type = ah_attr->type;
5241a1f460fSJason Gunthorpe 	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
5251a1f460fSJason Gunthorpe 
526d3456914SLeon Romanovsky 	ret = device->ops.create_ah(ah, ah_attr, flags, udata);
527d3456914SLeon Romanovsky 	if (ret) {
528d3456914SLeon Romanovsky 		kfree(ah);
529d3456914SLeon Romanovsky 		return ERR_PTR(ret);
5301da177e4SLinus Torvalds 	}
5311da177e4SLinus Torvalds 
532d3456914SLeon Romanovsky 	atomic_inc(&pd->usecnt);
5331da177e4SLinus Torvalds 	return ah;
5341da177e4SLinus Torvalds }
5355cda6587SParav Pandit 
5368d9ec9adSJason Gunthorpe /**
5378d9ec9adSJason Gunthorpe  * rdma_create_ah - Creates an address handle for the
5388d9ec9adSJason Gunthorpe  * given address vector.
5398d9ec9adSJason Gunthorpe  * @pd: The protection domain associated with the address handle.
5408d9ec9adSJason Gunthorpe  * @ah_attr: The attributes of the address vector.
541b090c4e3SGal Pressman  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
5428d9ec9adSJason Gunthorpe  *
5438d9ec9adSJason Gunthorpe  * It returns 0 on success and returns appropriate error code on error.
5448d9ec9adSJason Gunthorpe  * The address handle is used to reference a local or global destination
5458d9ec9adSJason Gunthorpe  * in all UD QP post sends.
5468d9ec9adSJason Gunthorpe  */
547b090c4e3SGal Pressman struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
548b090c4e3SGal Pressman 			     u32 flags)
5495cda6587SParav Pandit {
5508d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr;
5518d9ec9adSJason Gunthorpe 	struct ib_ah *ah;
5528d9ec9adSJason Gunthorpe 	int ret;
5538d9ec9adSJason Gunthorpe 
5548d9ec9adSJason Gunthorpe 	ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
5558d9ec9adSJason Gunthorpe 	if (ret)
5568d9ec9adSJason Gunthorpe 		return ERR_PTR(ret);
5578d9ec9adSJason Gunthorpe 
558b090c4e3SGal Pressman 	ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
5598d9ec9adSJason Gunthorpe 
5608d9ec9adSJason Gunthorpe 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
5618d9ec9adSJason Gunthorpe 	return ah;
5625cda6587SParav Pandit }
5630a18cfe4SDasaratharaman Chandramouli EXPORT_SYMBOL(rdma_create_ah);
5641da177e4SLinus Torvalds 
5655cda6587SParav Pandit /**
5665cda6587SParav Pandit  * rdma_create_user_ah - Creates an address handle for the
5675cda6587SParav Pandit  * given address vector.
5685cda6587SParav Pandit  * It resolves destination mac address for ah attribute of RoCE type.
5695cda6587SParav Pandit  * @pd: The protection domain associated with the address handle.
5705cda6587SParav Pandit  * @ah_attr: The attributes of the address vector.
5715cda6587SParav Pandit  * @udata: pointer to user's input output buffer information need by
5725cda6587SParav Pandit  *         provider driver.
5735cda6587SParav Pandit  *
5745cda6587SParav Pandit  * It returns 0 on success and returns appropriate error code on error.
5755cda6587SParav Pandit  * The address handle is used to reference a local or global destination
5765cda6587SParav Pandit  * in all UD QP post sends.
5775cda6587SParav Pandit  */
5785cda6587SParav Pandit struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
5795cda6587SParav Pandit 				  struct rdma_ah_attr *ah_attr,
5805cda6587SParav Pandit 				  struct ib_udata *udata)
5815cda6587SParav Pandit {
5828d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr;
5838d9ec9adSJason Gunthorpe 	struct ib_ah *ah;
5845cda6587SParav Pandit 	int err;
5855cda6587SParav Pandit 
5868d9ec9adSJason Gunthorpe 	err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
5878d9ec9adSJason Gunthorpe 	if (err)
5888d9ec9adSJason Gunthorpe 		return ERR_PTR(err);
5898d9ec9adSJason Gunthorpe 
5905cda6587SParav Pandit 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
5915cda6587SParav Pandit 		err = ib_resolve_eth_dmac(pd->device, ah_attr);
5928d9ec9adSJason Gunthorpe 		if (err) {
5938d9ec9adSJason Gunthorpe 			ah = ERR_PTR(err);
5948d9ec9adSJason Gunthorpe 			goto out;
5958d9ec9adSJason Gunthorpe 		}
5965cda6587SParav Pandit 	}
5975cda6587SParav Pandit 
598b090c4e3SGal Pressman 	ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
5998d9ec9adSJason Gunthorpe 
6008d9ec9adSJason Gunthorpe out:
6018d9ec9adSJason Gunthorpe 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
6028d9ec9adSJason Gunthorpe 	return ah;
6035cda6587SParav Pandit }
6045cda6587SParav Pandit EXPORT_SYMBOL(rdma_create_user_ah);
6055cda6587SParav Pandit 
606850d8fd7SMoni Shoua int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
607c865f246SSomnath Kotur {
608c865f246SSomnath Kotur 	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
609c865f246SSomnath Kotur 	struct iphdr ip4h_checked;
610c865f246SSomnath Kotur 	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
611c865f246SSomnath Kotur 
612c865f246SSomnath Kotur 	/* If it's IPv6, the version must be 6, otherwise, the first
613c865f246SSomnath Kotur 	 * 20 bytes (before the IPv4 header) are garbled.
614c865f246SSomnath Kotur 	 */
615c865f246SSomnath Kotur 	if (ip6h->version != 6)
616c865f246SSomnath Kotur 		return (ip4h->version == 4) ? 4 : 0;
617c865f246SSomnath Kotur 	/* version may be 6 or 4 because the first 20 bytes could be garbled */
618c865f246SSomnath Kotur 
619c865f246SSomnath Kotur 	/* RoCE v2 requires no options, thus header length
620c865f246SSomnath Kotur 	 * must be 5 words
621c865f246SSomnath Kotur 	 */
622c865f246SSomnath Kotur 	if (ip4h->ihl != 5)
623c865f246SSomnath Kotur 		return 6;
624c865f246SSomnath Kotur 
625c865f246SSomnath Kotur 	/* Verify checksum.
626c865f246SSomnath Kotur 	 * We can't write on scattered buffers so we need to copy to
627c865f246SSomnath Kotur 	 * temp buffer.
628c865f246SSomnath Kotur 	 */
629c865f246SSomnath Kotur 	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
630c865f246SSomnath Kotur 	ip4h_checked.check = 0;
631c865f246SSomnath Kotur 	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
632c865f246SSomnath Kotur 	/* if IPv4 header checksum is OK, believe it */
633c865f246SSomnath Kotur 	if (ip4h->check == ip4h_checked.check)
634c865f246SSomnath Kotur 		return 4;
635c865f246SSomnath Kotur 	return 6;
636c865f246SSomnath Kotur }
637850d8fd7SMoni Shoua EXPORT_SYMBOL(ib_get_rdma_header_version);
638c865f246SSomnath Kotur 
639c865f246SSomnath Kotur static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
640c865f246SSomnath Kotur 						     u8 port_num,
641c865f246SSomnath Kotur 						     const struct ib_grh *grh)
642c865f246SSomnath Kotur {
643c865f246SSomnath Kotur 	int grh_version;
644c865f246SSomnath Kotur 
645c865f246SSomnath Kotur 	if (rdma_protocol_ib(device, port_num))
646c865f246SSomnath Kotur 		return RDMA_NETWORK_IB;
647c865f246SSomnath Kotur 
648850d8fd7SMoni Shoua 	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
649c865f246SSomnath Kotur 
650c865f246SSomnath Kotur 	if (grh_version == 4)
651c865f246SSomnath Kotur 		return RDMA_NETWORK_IPV4;
652c865f246SSomnath Kotur 
653c865f246SSomnath Kotur 	if (grh->next_hdr == IPPROTO_UDP)
654c865f246SSomnath Kotur 		return RDMA_NETWORK_IPV6;
655c865f246SSomnath Kotur 
656c865f246SSomnath Kotur 	return RDMA_NETWORK_ROCE_V1;
657c865f246SSomnath Kotur }
658c865f246SSomnath Kotur 
659dbf727deSMatan Barak struct find_gid_index_context {
660dbf727deSMatan Barak 	u16 vlan_id;
661c865f246SSomnath Kotur 	enum ib_gid_type gid_type;
662dbf727deSMatan Barak };
663dbf727deSMatan Barak 
664dbf727deSMatan Barak static bool find_gid_index(const union ib_gid *gid,
665dbf727deSMatan Barak 			   const struct ib_gid_attr *gid_attr,
666dbf727deSMatan Barak 			   void *context)
667dbf727deSMatan Barak {
668b0dd0d33SParav Pandit 	struct find_gid_index_context *ctx = context;
669777a8b32SParav Pandit 	u16 vlan_id = 0xffff;
670777a8b32SParav Pandit 	int ret;
671dbf727deSMatan Barak 
672c865f246SSomnath Kotur 	if (ctx->gid_type != gid_attr->gid_type)
673c865f246SSomnath Kotur 		return false;
674c865f246SSomnath Kotur 
675777a8b32SParav Pandit 	ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
676777a8b32SParav Pandit 	if (ret)
677dbf727deSMatan Barak 		return false;
678dbf727deSMatan Barak 
679777a8b32SParav Pandit 	return ctx->vlan_id == vlan_id;
680dbf727deSMatan Barak }
681dbf727deSMatan Barak 
682b7403217SParav Pandit static const struct ib_gid_attr *
683b7403217SParav Pandit get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
684dbf727deSMatan Barak 		       u16 vlan_id, const union ib_gid *sgid,
685b7403217SParav Pandit 		       enum ib_gid_type gid_type)
686dbf727deSMatan Barak {
687c865f246SSomnath Kotur 	struct find_gid_index_context context = {.vlan_id = vlan_id,
688c865f246SSomnath Kotur 						 .gid_type = gid_type};
689dbf727deSMatan Barak 
690b7403217SParav Pandit 	return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
691b7403217SParav Pandit 				       &context);
692dbf727deSMatan Barak }
693dbf727deSMatan Barak 
694850d8fd7SMoni Shoua int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
695c865f246SSomnath Kotur 			      enum rdma_network_type net_type,
696c865f246SSomnath Kotur 			      union ib_gid *sgid, union ib_gid *dgid)
697c865f246SSomnath Kotur {
698c865f246SSomnath Kotur 	struct sockaddr_in  src_in;
699c865f246SSomnath Kotur 	struct sockaddr_in  dst_in;
700c865f246SSomnath Kotur 	__be32 src_saddr, dst_saddr;
701c865f246SSomnath Kotur 
702c865f246SSomnath Kotur 	if (!sgid || !dgid)
703c865f246SSomnath Kotur 		return -EINVAL;
704c865f246SSomnath Kotur 
705c865f246SSomnath Kotur 	if (net_type == RDMA_NETWORK_IPV4) {
706c865f246SSomnath Kotur 		memcpy(&src_in.sin_addr.s_addr,
707c865f246SSomnath Kotur 		       &hdr->roce4grh.saddr, 4);
708c865f246SSomnath Kotur 		memcpy(&dst_in.sin_addr.s_addr,
709c865f246SSomnath Kotur 		       &hdr->roce4grh.daddr, 4);
710c865f246SSomnath Kotur 		src_saddr = src_in.sin_addr.s_addr;
711c865f246SSomnath Kotur 		dst_saddr = dst_in.sin_addr.s_addr;
712c865f246SSomnath Kotur 		ipv6_addr_set_v4mapped(src_saddr,
713c865f246SSomnath Kotur 				       (struct in6_addr *)sgid);
714c865f246SSomnath Kotur 		ipv6_addr_set_v4mapped(dst_saddr,
715c865f246SSomnath Kotur 				       (struct in6_addr *)dgid);
716c865f246SSomnath Kotur 		return 0;
717c865f246SSomnath Kotur 	} else if (net_type == RDMA_NETWORK_IPV6 ||
718c865f246SSomnath Kotur 		   net_type == RDMA_NETWORK_IB) {
719c865f246SSomnath Kotur 		*dgid = hdr->ibgrh.dgid;
720c865f246SSomnath Kotur 		*sgid = hdr->ibgrh.sgid;
721c865f246SSomnath Kotur 		return 0;
722c865f246SSomnath Kotur 	} else {
723c865f246SSomnath Kotur 		return -EINVAL;
724c865f246SSomnath Kotur 	}
725c865f246SSomnath Kotur }
726850d8fd7SMoni Shoua EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
727c865f246SSomnath Kotur 
7281060f865SParav Pandit /* Resolve destination mac address and hop limit for unicast destination
7291060f865SParav Pandit  * GID entry, considering the source GID entry as well.
7301060f865SParav Pandit  * ah_attribute must have have valid port_num, sgid_index.
7311060f865SParav Pandit  */
7321060f865SParav Pandit static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
7331060f865SParav Pandit 				       struct rdma_ah_attr *ah_attr)
7341060f865SParav Pandit {
735b7403217SParav Pandit 	struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
736b7403217SParav Pandit 	const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
7371060f865SParav Pandit 	int hop_limit = 0xff;
738b7403217SParav Pandit 	int ret = 0;
7391060f865SParav Pandit 
74056d0a7d9SParav Pandit 	/* If destination is link local and source GID is RoCEv1,
74156d0a7d9SParav Pandit 	 * IP stack is not used.
74256d0a7d9SParav Pandit 	 */
74356d0a7d9SParav Pandit 	if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
744b7403217SParav Pandit 	    sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
74556d0a7d9SParav Pandit 		rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
74656d0a7d9SParav Pandit 				ah_attr->roce.dmac);
747b7403217SParav Pandit 		return ret;
74856d0a7d9SParav Pandit 	}
74956d0a7d9SParav Pandit 
750b7403217SParav Pandit 	ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
7511060f865SParav Pandit 					   ah_attr->roce.dmac,
7520e9d2c19SParav Pandit 					   sgid_attr, &hop_limit);
7531060f865SParav Pandit 
7541060f865SParav Pandit 	grh->hop_limit = hop_limit;
7551060f865SParav Pandit 	return ret;
7561060f865SParav Pandit }
7571060f865SParav Pandit 
75828b5b3a2SGustavo A. R. Silva /*
759f6bdb142SParav Pandit  * This function initializes address handle attributes from the incoming packet.
76028b5b3a2SGustavo A. R. Silva  * Incoming packet has dgid of the receiver node on which this code is
76128b5b3a2SGustavo A. R. Silva  * getting executed and, sgid contains the GID of the sender.
76228b5b3a2SGustavo A. R. Silva  *
76328b5b3a2SGustavo A. R. Silva  * When resolving mac address of destination, the arrived dgid is used
76428b5b3a2SGustavo A. R. Silva  * as sgid and, sgid is used as dgid because sgid contains destinations
76528b5b3a2SGustavo A. R. Silva  * GID whom to respond to.
76628b5b3a2SGustavo A. R. Silva  *
767b7403217SParav Pandit  * On success the caller is responsible to call rdma_destroy_ah_attr on the
768b7403217SParav Pandit  * attr.
76928b5b3a2SGustavo A. R. Silva  */
770f6bdb142SParav Pandit int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
77173cdaaeeSIra Weiny 			    const struct ib_wc *wc, const struct ib_grh *grh,
77290898850SDasaratharaman Chandramouli 			    struct rdma_ah_attr *ah_attr)
773513789edSHal Rosenstock {
774513789edSHal Rosenstock 	u32 flow_class;
775513789edSHal Rosenstock 	int ret;
776c865f246SSomnath Kotur 	enum rdma_network_type net_type = RDMA_NETWORK_IB;
777c865f246SSomnath Kotur 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
778b7403217SParav Pandit 	const struct ib_gid_attr *sgid_attr;
779c3efe750SMatan Barak 	int hoplimit = 0xff;
780c865f246SSomnath Kotur 	union ib_gid dgid;
781c865f246SSomnath Kotur 	union ib_gid sgid;
782513789edSHal Rosenstock 
78379364227SRoland Dreier 	might_sleep();
78479364227SRoland Dreier 
7854e00d694SSean Hefty 	memset(ah_attr, 0, sizeof *ah_attr);
78644c58487SDasaratharaman Chandramouli 	ah_attr->type = rdma_ah_find_type(device, port_num);
787227128fcSMichael Wang 	if (rdma_cap_eth_ah(device, port_num)) {
788c865f246SSomnath Kotur 		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
789c865f246SSomnath Kotur 			net_type = wc->network_hdr_type;
790c865f246SSomnath Kotur 		else
791c865f246SSomnath Kotur 			net_type = ib_get_net_type_by_grh(device, port_num, grh);
792c865f246SSomnath Kotur 		gid_type = ib_network_to_gid_type(net_type);
793c865f246SSomnath Kotur 	}
794850d8fd7SMoni Shoua 	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
795c865f246SSomnath Kotur 					&sgid, &dgid);
796c865f246SSomnath Kotur 	if (ret)
797c865f246SSomnath Kotur 		return ret;
798c865f246SSomnath Kotur 
7991060f865SParav Pandit 	rdma_ah_set_sl(ah_attr, wc->sl);
8001060f865SParav Pandit 	rdma_ah_set_port_num(ah_attr, port_num);
8011060f865SParav Pandit 
802c865f246SSomnath Kotur 	if (rdma_protocol_roce(device, port_num)) {
803dbf727deSMatan Barak 		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
804dbf727deSMatan Barak 				wc->vlan_id : 0xffff;
805dbf727deSMatan Barak 
806dd5f03beSMatan Barak 		if (!(wc->wc_flags & IB_WC_GRH))
807dd5f03beSMatan Barak 			return -EPROTOTYPE;
808dd5f03beSMatan Barak 
809b7403217SParav Pandit 		sgid_attr = get_sgid_attr_from_eth(device, port_num,
8101060f865SParav Pandit 						   vlan_id, &dgid,
811b7403217SParav Pandit 						   gid_type);
812b7403217SParav Pandit 		if (IS_ERR(sgid_attr))
813b7403217SParav Pandit 			return PTR_ERR(sgid_attr);
81420029832SMatan Barak 
8151060f865SParav Pandit 		flow_class = be32_to_cpu(grh->version_tclass_flow);
816b7403217SParav Pandit 		rdma_move_grh_sgid_attr(ah_attr,
817b7403217SParav Pandit 					&sgid,
8181060f865SParav Pandit 					flow_class & 0xFFFFF,
819b7403217SParav Pandit 					hoplimit,
820b7403217SParav Pandit 					(flow_class >> 20) & 0xFF,
821b7403217SParav Pandit 					sgid_attr);
822b7403217SParav Pandit 
823b7403217SParav Pandit 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
824b7403217SParav Pandit 		if (ret)
825b7403217SParav Pandit 			rdma_destroy_ah_attr(ah_attr);
826b7403217SParav Pandit 
827b7403217SParav Pandit 		return ret;
8281060f865SParav Pandit 	} else {
829d8966fcdSDasaratharaman Chandramouli 		rdma_ah_set_dlid(ah_attr, wc->slid);
830d8966fcdSDasaratharaman Chandramouli 		rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
831513789edSHal Rosenstock 
832b7403217SParav Pandit 		if ((wc->wc_flags & IB_WC_GRH) == 0)
833b7403217SParav Pandit 			return 0;
834513789edSHal Rosenstock 
835b7403217SParav Pandit 		if (dgid.global.interface_id !=
836b7403217SParav Pandit 					cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
837b7403217SParav Pandit 			sgid_attr = rdma_find_gid_by_port(
838b7403217SParav Pandit 				device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
839b7403217SParav Pandit 		} else
840b7403217SParav Pandit 			sgid_attr = rdma_get_gid_attr(device, port_num, 0);
841b7403217SParav Pandit 
842b7403217SParav Pandit 		if (IS_ERR(sgid_attr))
843b7403217SParav Pandit 			return PTR_ERR(sgid_attr);
844497677abSHal Rosenstock 		flow_class = be32_to_cpu(grh->version_tclass_flow);
845b7403217SParav Pandit 		rdma_move_grh_sgid_attr(ah_attr,
846b7403217SParav Pandit 					&sgid,
847d8966fcdSDasaratharaman Chandramouli 					flow_class & 0xFFFFF,
848b7403217SParav Pandit 					hoplimit,
849b7403217SParav Pandit 					(flow_class >> 20) & 0xFF,
850b7403217SParav Pandit 					sgid_attr);
851b7403217SParav Pandit 
8524e00d694SSean Hefty 		return 0;
8534e00d694SSean Hefty 	}
8541060f865SParav Pandit }
855f6bdb142SParav Pandit EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
8564e00d694SSean Hefty 
8578d9ec9adSJason Gunthorpe /**
8588d9ec9adSJason Gunthorpe  * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
8598d9ec9adSJason Gunthorpe  * of the reference
8608d9ec9adSJason Gunthorpe  *
8618d9ec9adSJason Gunthorpe  * @attr:	Pointer to AH attribute structure
8628d9ec9adSJason Gunthorpe  * @dgid:	Destination GID
8638d9ec9adSJason Gunthorpe  * @flow_label:	Flow label
8648d9ec9adSJason Gunthorpe  * @hop_limit:	Hop limit
8658d9ec9adSJason Gunthorpe  * @traffic_class: traffic class
8668d9ec9adSJason Gunthorpe  * @sgid_attr:	Pointer to SGID attribute
8678d9ec9adSJason Gunthorpe  *
8688d9ec9adSJason Gunthorpe  * This takes ownership of the sgid_attr reference. The caller must ensure
8698d9ec9adSJason Gunthorpe  * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
8708d9ec9adSJason Gunthorpe  * calling this function.
8718d9ec9adSJason Gunthorpe  */
8728d9ec9adSJason Gunthorpe void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
8738d9ec9adSJason Gunthorpe 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
8748d9ec9adSJason Gunthorpe 			     const struct ib_gid_attr *sgid_attr)
8758d9ec9adSJason Gunthorpe {
8768d9ec9adSJason Gunthorpe 	rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
8778d9ec9adSJason Gunthorpe 			traffic_class);
8788d9ec9adSJason Gunthorpe 	attr->grh.sgid_attr = sgid_attr;
8798d9ec9adSJason Gunthorpe }
8808d9ec9adSJason Gunthorpe EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
8818d9ec9adSJason Gunthorpe 
8828d9ec9adSJason Gunthorpe /**
8838d9ec9adSJason Gunthorpe  * rdma_destroy_ah_attr - Release reference to SGID attribute of
8848d9ec9adSJason Gunthorpe  * ah attribute.
8858d9ec9adSJason Gunthorpe  * @ah_attr: Pointer to ah attribute
8868d9ec9adSJason Gunthorpe  *
8878d9ec9adSJason Gunthorpe  * Release reference to the SGID attribute of the ah attribute if it is
8888d9ec9adSJason Gunthorpe  * non NULL. It is safe to call this multiple times, and safe to call it on
8898d9ec9adSJason Gunthorpe  * a zero initialized ah_attr.
8908d9ec9adSJason Gunthorpe  */
8918d9ec9adSJason Gunthorpe void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
8928d9ec9adSJason Gunthorpe {
8938d9ec9adSJason Gunthorpe 	if (ah_attr->grh.sgid_attr) {
8948d9ec9adSJason Gunthorpe 		rdma_put_gid_attr(ah_attr->grh.sgid_attr);
8958d9ec9adSJason Gunthorpe 		ah_attr->grh.sgid_attr = NULL;
8968d9ec9adSJason Gunthorpe 	}
8978d9ec9adSJason Gunthorpe }
8988d9ec9adSJason Gunthorpe EXPORT_SYMBOL(rdma_destroy_ah_attr);
8998d9ec9adSJason Gunthorpe 
90073cdaaeeSIra Weiny struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
90173cdaaeeSIra Weiny 				   const struct ib_grh *grh, u8 port_num)
9024e00d694SSean Hefty {
90390898850SDasaratharaman Chandramouli 	struct rdma_ah_attr ah_attr;
904b7403217SParav Pandit 	struct ib_ah *ah;
9054e00d694SSean Hefty 	int ret;
9064e00d694SSean Hefty 
907f6bdb142SParav Pandit 	ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
9084e00d694SSean Hefty 	if (ret)
9094e00d694SSean Hefty 		return ERR_PTR(ret);
910513789edSHal Rosenstock 
911b090c4e3SGal Pressman 	ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
912b7403217SParav Pandit 
913b7403217SParav Pandit 	rdma_destroy_ah_attr(&ah_attr);
914b7403217SParav Pandit 	return ah;
915513789edSHal Rosenstock }
916513789edSHal Rosenstock EXPORT_SYMBOL(ib_create_ah_from_wc);
917513789edSHal Rosenstock 
91867b985b6SDasaratharaman Chandramouli int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
9191da177e4SLinus Torvalds {
9208d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr;
9218d9ec9adSJason Gunthorpe 	int ret;
9228d9ec9adSJason Gunthorpe 
92344c58487SDasaratharaman Chandramouli 	if (ah->type != ah_attr->type)
92444c58487SDasaratharaman Chandramouli 		return -EINVAL;
92544c58487SDasaratharaman Chandramouli 
9268d9ec9adSJason Gunthorpe 	ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
9278d9ec9adSJason Gunthorpe 	if (ret)
9288d9ec9adSJason Gunthorpe 		return ret;
9298d9ec9adSJason Gunthorpe 
9303023a1e9SKamal Heib 	ret = ah->device->ops.modify_ah ?
9313023a1e9SKamal Heib 		ah->device->ops.modify_ah(ah, ah_attr) :
93287915bf8SLeon Romanovsky 		-EOPNOTSUPP;
9338d9ec9adSJason Gunthorpe 
9341a1f460fSJason Gunthorpe 	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
9358d9ec9adSJason Gunthorpe 	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
9368d9ec9adSJason Gunthorpe 	return ret;
9371da177e4SLinus Torvalds }
93867b985b6SDasaratharaman Chandramouli EXPORT_SYMBOL(rdma_modify_ah);
9391da177e4SLinus Torvalds 
940bfbfd661SDasaratharaman Chandramouli int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
9411da177e4SLinus Torvalds {
9428d9ec9adSJason Gunthorpe 	ah_attr->grh.sgid_attr = NULL;
9438d9ec9adSJason Gunthorpe 
9443023a1e9SKamal Heib 	return ah->device->ops.query_ah ?
9453023a1e9SKamal Heib 		ah->device->ops.query_ah(ah, ah_attr) :
94687915bf8SLeon Romanovsky 		-EOPNOTSUPP;
9471da177e4SLinus Torvalds }
948bfbfd661SDasaratharaman Chandramouli EXPORT_SYMBOL(rdma_query_ah);
9491da177e4SLinus Torvalds 
950c4367a26SShamir Rabinovitch int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
9511da177e4SLinus Torvalds {
9521a1f460fSJason Gunthorpe 	const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
9531da177e4SLinus Torvalds 	struct ib_pd *pd;
9541da177e4SLinus Torvalds 
9552553ba21SGal Pressman 	might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
9562553ba21SGal Pressman 
9571da177e4SLinus Torvalds 	pd = ah->pd;
958d3456914SLeon Romanovsky 
959d3456914SLeon Romanovsky 	ah->device->ops.destroy_ah(ah, flags);
9601da177e4SLinus Torvalds 	atomic_dec(&pd->usecnt);
9611a1f460fSJason Gunthorpe 	if (sgid_attr)
9621a1f460fSJason Gunthorpe 		rdma_put_gid_attr(sgid_attr);
9631da177e4SLinus Torvalds 
964d3456914SLeon Romanovsky 	kfree(ah);
965d3456914SLeon Romanovsky 	return 0;
9661da177e4SLinus Torvalds }
967c4367a26SShamir Rabinovitch EXPORT_SYMBOL(rdma_destroy_ah_user);
9681da177e4SLinus Torvalds 
969d41fcc67SRoland Dreier /* Shared receive queues */
970d41fcc67SRoland Dreier 
971d41fcc67SRoland Dreier struct ib_srq *ib_create_srq(struct ib_pd *pd,
972d41fcc67SRoland Dreier 			     struct ib_srq_init_attr *srq_init_attr)
973d41fcc67SRoland Dreier {
974d41fcc67SRoland Dreier 	struct ib_srq *srq;
97568e326deSLeon Romanovsky 	int ret;
976d41fcc67SRoland Dreier 
9773023a1e9SKamal Heib 	if (!pd->device->ops.create_srq)
97887915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
979d41fcc67SRoland Dreier 
98068e326deSLeon Romanovsky 	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
98168e326deSLeon Romanovsky 	if (!srq)
98268e326deSLeon Romanovsky 		return ERR_PTR(-ENOMEM);
983d41fcc67SRoland Dreier 
984d41fcc67SRoland Dreier 	srq->device = pd->device;
985d41fcc67SRoland Dreier 	srq->pd = pd;
986d41fcc67SRoland Dreier 	srq->event_handler = srq_init_attr->event_handler;
987d41fcc67SRoland Dreier 	srq->srq_context = srq_init_attr->srq_context;
98896104edaSSean Hefty 	srq->srq_type = srq_init_attr->srq_type;
98968e326deSLeon Romanovsky 
9901a56ff6dSArtemy Kovalyov 	if (ib_srq_has_cq(srq->srq_type)) {
9911a56ff6dSArtemy Kovalyov 		srq->ext.cq = srq_init_attr->ext.cq;
9921a56ff6dSArtemy Kovalyov 		atomic_inc(&srq->ext.cq->usecnt);
9931a56ff6dSArtemy Kovalyov 	}
994418d5130SSean Hefty 	if (srq->srq_type == IB_SRQT_XRC) {
995418d5130SSean Hefty 		srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
996418d5130SSean Hefty 		atomic_inc(&srq->ext.xrc.xrcd->usecnt);
997418d5130SSean Hefty 	}
998d41fcc67SRoland Dreier 	atomic_inc(&pd->usecnt);
99968e326deSLeon Romanovsky 
100068e326deSLeon Romanovsky 	ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
100168e326deSLeon Romanovsky 	if (ret) {
100268e326deSLeon Romanovsky 		atomic_dec(&srq->pd->usecnt);
100368e326deSLeon Romanovsky 		if (srq->srq_type == IB_SRQT_XRC)
100468e326deSLeon Romanovsky 			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
100568e326deSLeon Romanovsky 		if (ib_srq_has_cq(srq->srq_type))
100668e326deSLeon Romanovsky 			atomic_dec(&srq->ext.cq->usecnt);
100768e326deSLeon Romanovsky 		kfree(srq);
100868e326deSLeon Romanovsky 		return ERR_PTR(ret);
1009d41fcc67SRoland Dreier 	}
1010d41fcc67SRoland Dreier 
1011d41fcc67SRoland Dreier 	return srq;
1012d41fcc67SRoland Dreier }
1013d41fcc67SRoland Dreier EXPORT_SYMBOL(ib_create_srq);
1014d41fcc67SRoland Dreier 
1015d41fcc67SRoland Dreier int ib_modify_srq(struct ib_srq *srq,
1016d41fcc67SRoland Dreier 		  struct ib_srq_attr *srq_attr,
1017d41fcc67SRoland Dreier 		  enum ib_srq_attr_mask srq_attr_mask)
1018d41fcc67SRoland Dreier {
10193023a1e9SKamal Heib 	return srq->device->ops.modify_srq ?
10203023a1e9SKamal Heib 		srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
10213023a1e9SKamal Heib 					    NULL) : -EOPNOTSUPP;
1022d41fcc67SRoland Dreier }
1023d41fcc67SRoland Dreier EXPORT_SYMBOL(ib_modify_srq);
1024d41fcc67SRoland Dreier 
1025d41fcc67SRoland Dreier int ib_query_srq(struct ib_srq *srq,
1026d41fcc67SRoland Dreier 		 struct ib_srq_attr *srq_attr)
1027d41fcc67SRoland Dreier {
10283023a1e9SKamal Heib 	return srq->device->ops.query_srq ?
10293023a1e9SKamal Heib 		srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1030d41fcc67SRoland Dreier }
1031d41fcc67SRoland Dreier EXPORT_SYMBOL(ib_query_srq);
1032d41fcc67SRoland Dreier 
1033c4367a26SShamir Rabinovitch int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1034d41fcc67SRoland Dreier {
1035d41fcc67SRoland Dreier 	if (atomic_read(&srq->usecnt))
1036d41fcc67SRoland Dreier 		return -EBUSY;
1037d41fcc67SRoland Dreier 
103868e326deSLeon Romanovsky 	srq->device->ops.destroy_srq(srq, udata);
1039d41fcc67SRoland Dreier 
104068e326deSLeon Romanovsky 	atomic_dec(&srq->pd->usecnt);
104168e326deSLeon Romanovsky 	if (srq->srq_type == IB_SRQT_XRC)
104268e326deSLeon Romanovsky 		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
104368e326deSLeon Romanovsky 	if (ib_srq_has_cq(srq->srq_type))
104468e326deSLeon Romanovsky 		atomic_dec(&srq->ext.cq->usecnt);
104568e326deSLeon Romanovsky 	kfree(srq);
1046d41fcc67SRoland Dreier 
104768e326deSLeon Romanovsky 	return 0;
1048d41fcc67SRoland Dreier }
1049c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_destroy_srq_user);
1050d41fcc67SRoland Dreier 
10511da177e4SLinus Torvalds /* Queue pairs */
10521da177e4SLinus Torvalds 
10530e0ec7e0SSean Hefty static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
10540e0ec7e0SSean Hefty {
10550e0ec7e0SSean Hefty 	struct ib_qp *qp = context;
105673c40c61SYishai Hadas 	unsigned long flags;
10570e0ec7e0SSean Hefty 
105873c40c61SYishai Hadas 	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
10590e0ec7e0SSean Hefty 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1060eec9e29fSShlomo Pongratz 		if (event->element.qp->event_handler)
10610e0ec7e0SSean Hefty 			event->element.qp->event_handler(event, event->element.qp->qp_context);
106273c40c61SYishai Hadas 	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
10630e0ec7e0SSean Hefty }
10640e0ec7e0SSean Hefty 
1065d3d72d90SSean Hefty static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
1066d3d72d90SSean Hefty {
1067d3d72d90SSean Hefty 	mutex_lock(&xrcd->tgt_qp_mutex);
1068d3d72d90SSean Hefty 	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
1069d3d72d90SSean Hefty 	mutex_unlock(&xrcd->tgt_qp_mutex);
1070d3d72d90SSean Hefty }
1071d3d72d90SSean Hefty 
10720e0ec7e0SSean Hefty static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
10730e0ec7e0SSean Hefty 				  void (*event_handler)(struct ib_event *, void *),
10740e0ec7e0SSean Hefty 				  void *qp_context)
1075d3d72d90SSean Hefty {
10760e0ec7e0SSean Hefty 	struct ib_qp *qp;
10770e0ec7e0SSean Hefty 	unsigned long flags;
1078d291f1a6SDaniel Jurgens 	int err;
10790e0ec7e0SSean Hefty 
10800e0ec7e0SSean Hefty 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
10810e0ec7e0SSean Hefty 	if (!qp)
10820e0ec7e0SSean Hefty 		return ERR_PTR(-ENOMEM);
10830e0ec7e0SSean Hefty 
10840e0ec7e0SSean Hefty 	qp->real_qp = real_qp;
1085d291f1a6SDaniel Jurgens 	err = ib_open_shared_qp_security(qp, real_qp->device);
1086d291f1a6SDaniel Jurgens 	if (err) {
1087d291f1a6SDaniel Jurgens 		kfree(qp);
1088d291f1a6SDaniel Jurgens 		return ERR_PTR(err);
1089d291f1a6SDaniel Jurgens 	}
1090d291f1a6SDaniel Jurgens 
1091d291f1a6SDaniel Jurgens 	qp->real_qp = real_qp;
10920e0ec7e0SSean Hefty 	atomic_inc(&real_qp->usecnt);
10930e0ec7e0SSean Hefty 	qp->device = real_qp->device;
10940e0ec7e0SSean Hefty 	qp->event_handler = event_handler;
10950e0ec7e0SSean Hefty 	qp->qp_context = qp_context;
10960e0ec7e0SSean Hefty 	qp->qp_num = real_qp->qp_num;
10970e0ec7e0SSean Hefty 	qp->qp_type = real_qp->qp_type;
10980e0ec7e0SSean Hefty 
10990e0ec7e0SSean Hefty 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
11000e0ec7e0SSean Hefty 	list_add(&qp->open_list, &real_qp->open_list);
11010e0ec7e0SSean Hefty 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
11020e0ec7e0SSean Hefty 
11030e0ec7e0SSean Hefty 	return qp;
1104d3d72d90SSean Hefty }
1105d3d72d90SSean Hefty 
11060e0ec7e0SSean Hefty struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
11070e0ec7e0SSean Hefty 			 struct ib_qp_open_attr *qp_open_attr)
11080e0ec7e0SSean Hefty {
11090e0ec7e0SSean Hefty 	struct ib_qp *qp, *real_qp;
11100e0ec7e0SSean Hefty 
11110e0ec7e0SSean Hefty 	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
11120e0ec7e0SSean Hefty 		return ERR_PTR(-EINVAL);
11130e0ec7e0SSean Hefty 
11140e0ec7e0SSean Hefty 	qp = ERR_PTR(-EINVAL);
11150e0ec7e0SSean Hefty 	mutex_lock(&xrcd->tgt_qp_mutex);
11160e0ec7e0SSean Hefty 	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
11170e0ec7e0SSean Hefty 		if (real_qp->qp_num == qp_open_attr->qp_num) {
11180e0ec7e0SSean Hefty 			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
11190e0ec7e0SSean Hefty 					  qp_open_attr->qp_context);
11200e0ec7e0SSean Hefty 			break;
11210e0ec7e0SSean Hefty 		}
11220e0ec7e0SSean Hefty 	}
11230e0ec7e0SSean Hefty 	mutex_unlock(&xrcd->tgt_qp_mutex);
11240e0ec7e0SSean Hefty 	return qp;
11250e0ec7e0SSean Hefty }
11260e0ec7e0SSean Hefty EXPORT_SYMBOL(ib_open_qp);
11270e0ec7e0SSean Hefty 
1128c4367a26SShamir Rabinovitch static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1129c4367a26SShamir Rabinovitch 					struct ib_qp_init_attr *qp_init_attr,
1130c4367a26SShamir Rabinovitch 					struct ib_udata *udata)
11311da177e4SLinus Torvalds {
113204c41bf3SChristoph Hellwig 	struct ib_qp *real_qp = qp;
11331da177e4SLinus Torvalds 
11340e0ec7e0SSean Hefty 	qp->event_handler = __ib_shared_qp_event_handler;
11350e0ec7e0SSean Hefty 	qp->qp_context = qp;
1136b42b63cfSSean Hefty 	qp->pd = NULL;
1137b42b63cfSSean Hefty 	qp->send_cq = qp->recv_cq = NULL;
1138b42b63cfSSean Hefty 	qp->srq = NULL;
1139b42b63cfSSean Hefty 	qp->xrcd = qp_init_attr->xrcd;
1140b42b63cfSSean Hefty 	atomic_inc(&qp_init_attr->xrcd->usecnt);
11410e0ec7e0SSean Hefty 	INIT_LIST_HEAD(&qp->open_list);
11420e0ec7e0SSean Hefty 
11430e0ec7e0SSean Hefty 	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
11440e0ec7e0SSean Hefty 			  qp_init_attr->qp_context);
1145535005caSYuval Avnery 	if (IS_ERR(qp))
1146535005caSYuval Avnery 		return qp;
1147535005caSYuval Avnery 
11480e0ec7e0SSean Hefty 	__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
114904c41bf3SChristoph Hellwig 	return qp;
115004c41bf3SChristoph Hellwig }
115104c41bf3SChristoph Hellwig 
1152c4367a26SShamir Rabinovitch struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
1153c4367a26SShamir Rabinovitch 				struct ib_qp_init_attr *qp_init_attr,
1154c4367a26SShamir Rabinovitch 				struct ib_udata *udata)
115504c41bf3SChristoph Hellwig {
115604c41bf3SChristoph Hellwig 	struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
115704c41bf3SChristoph Hellwig 	struct ib_qp *qp;
1158a060b562SChristoph Hellwig 	int ret;
1159a060b562SChristoph Hellwig 
1160a9017e23SYishai Hadas 	if (qp_init_attr->rwq_ind_tbl &&
1161a9017e23SYishai Hadas 	    (qp_init_attr->recv_cq ||
1162a9017e23SYishai Hadas 	    qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1163a9017e23SYishai Hadas 	    qp_init_attr->cap.max_recv_sge))
1164a9017e23SYishai Hadas 		return ERR_PTR(-EINVAL);
1165a9017e23SYishai Hadas 
1166185eddc4SMax Gurtovoy 	if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) &&
1167185eddc4SMax Gurtovoy 	    !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER))
1168185eddc4SMax Gurtovoy 		return ERR_PTR(-EINVAL);
1169185eddc4SMax Gurtovoy 
1170a060b562SChristoph Hellwig 	/*
1171a060b562SChristoph Hellwig 	 * If the callers is using the RDMA API calculate the resources
1172a060b562SChristoph Hellwig 	 * needed for the RDMA READ/WRITE operations.
1173a060b562SChristoph Hellwig 	 *
1174a060b562SChristoph Hellwig 	 * Note that these callers need to pass in a port number.
1175a060b562SChristoph Hellwig 	 */
1176a060b562SChristoph Hellwig 	if (qp_init_attr->cap.max_rdma_ctxs)
1177a060b562SChristoph Hellwig 		rdma_rw_init_qp(device, qp_init_attr);
117804c41bf3SChristoph Hellwig 
11792f08ee36SSteve Wise 	qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
118004c41bf3SChristoph Hellwig 	if (IS_ERR(qp))
118104c41bf3SChristoph Hellwig 		return qp;
118204c41bf3SChristoph Hellwig 
1183d291f1a6SDaniel Jurgens 	ret = ib_create_qp_security(qp, device);
1184535005caSYuval Avnery 	if (ret)
1185535005caSYuval Avnery 		goto err;
1186d291f1a6SDaniel Jurgens 
118704c41bf3SChristoph Hellwig 	qp->qp_type    = qp_init_attr->qp_type;
1188a9017e23SYishai Hadas 	qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
118904c41bf3SChristoph Hellwig 
119004c41bf3SChristoph Hellwig 	atomic_set(&qp->usecnt, 0);
1191fffb0383SChristoph Hellwig 	qp->mrs_used = 0;
1192fffb0383SChristoph Hellwig 	spin_lock_init(&qp->mr_lock);
1193a060b562SChristoph Hellwig 	INIT_LIST_HEAD(&qp->rdma_mrs);
11940e353e34SChristoph Hellwig 	INIT_LIST_HEAD(&qp->sig_mrs);
1195498ca3c8SNoa Osherovich 	qp->port = 0;
1196fffb0383SChristoph Hellwig 
1197535005caSYuval Avnery 	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1198c4367a26SShamir Rabinovitch 		struct ib_qp *xrc_qp =
1199c4367a26SShamir Rabinovitch 			create_xrc_qp_user(qp, qp_init_attr, udata);
1200535005caSYuval Avnery 
1201535005caSYuval Avnery 		if (IS_ERR(xrc_qp)) {
1202535005caSYuval Avnery 			ret = PTR_ERR(xrc_qp);
1203535005caSYuval Avnery 			goto err;
1204535005caSYuval Avnery 		}
1205535005caSYuval Avnery 		return xrc_qp;
1206535005caSYuval Avnery 	}
120704c41bf3SChristoph Hellwig 
12081da177e4SLinus Torvalds 	qp->event_handler = qp_init_attr->event_handler;
12091da177e4SLinus Torvalds 	qp->qp_context = qp_init_attr->qp_context;
1210b42b63cfSSean Hefty 	if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1211b42b63cfSSean Hefty 		qp->recv_cq = NULL;
1212b42b63cfSSean Hefty 		qp->srq = NULL;
1213b42b63cfSSean Hefty 	} else {
1214b42b63cfSSean Hefty 		qp->recv_cq = qp_init_attr->recv_cq;
1215a9017e23SYishai Hadas 		if (qp_init_attr->recv_cq)
1216b42b63cfSSean Hefty 			atomic_inc(&qp_init_attr->recv_cq->usecnt);
1217b42b63cfSSean Hefty 		qp->srq = qp_init_attr->srq;
1218b42b63cfSSean Hefty 		if (qp->srq)
1219b42b63cfSSean Hefty 			atomic_inc(&qp_init_attr->srq->usecnt);
1220b42b63cfSSean Hefty 	}
1221b42b63cfSSean Hefty 
12221da177e4SLinus Torvalds 	qp->send_cq = qp_init_attr->send_cq;
1223b42b63cfSSean Hefty 	qp->xrcd    = NULL;
1224b42b63cfSSean Hefty 
12251da177e4SLinus Torvalds 	atomic_inc(&pd->usecnt);
1226a9017e23SYishai Hadas 	if (qp_init_attr->send_cq)
12271da177e4SLinus Torvalds 		atomic_inc(&qp_init_attr->send_cq->usecnt);
1228a9017e23SYishai Hadas 	if (qp_init_attr->rwq_ind_tbl)
1229a9017e23SYishai Hadas 		atomic_inc(&qp->rwq_ind_tbl->usecnt);
1230a060b562SChristoph Hellwig 
1231a060b562SChristoph Hellwig 	if (qp_init_attr->cap.max_rdma_ctxs) {
1232a060b562SChristoph Hellwig 		ret = rdma_rw_init_mrs(qp, qp_init_attr);
1233535005caSYuval Avnery 		if (ret)
1234535005caSYuval Avnery 			goto err;
1235a060b562SChristoph Hellwig 	}
1236a060b562SChristoph Hellwig 
1237632bc3f6SBart Van Assche 	/*
1238632bc3f6SBart Van Assche 	 * Note: all hw drivers guarantee that max_send_sge is lower than
1239632bc3f6SBart Van Assche 	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1240632bc3f6SBart Van Assche 	 * max_send_sge <= max_sge_rd.
1241632bc3f6SBart Van Assche 	 */
1242632bc3f6SBart Van Assche 	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1243632bc3f6SBart Van Assche 	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1244632bc3f6SBart Van Assche 				 device->attrs.max_sge_rd);
1245185eddc4SMax Gurtovoy 	if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1246185eddc4SMax Gurtovoy 		qp->integrity_en = true;
1247632bc3f6SBart Van Assche 
12481da177e4SLinus Torvalds 	return qp;
1249535005caSYuval Avnery 
1250535005caSYuval Avnery err:
1251535005caSYuval Avnery 	ib_destroy_qp(qp);
1252535005caSYuval Avnery 	return ERR_PTR(ret);
1253535005caSYuval Avnery 
12541da177e4SLinus Torvalds }
1255c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_create_qp_user);
12561da177e4SLinus Torvalds 
12578a51866fSRoland Dreier static const struct {
12588a51866fSRoland Dreier 	int			valid;
1259b42b63cfSSean Hefty 	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
1260b42b63cfSSean Hefty 	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
12618a51866fSRoland Dreier } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
12628a51866fSRoland Dreier 	[IB_QPS_RESET] = {
12638a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
12648a51866fSRoland Dreier 		[IB_QPS_INIT]  = {
12658a51866fSRoland Dreier 			.valid = 1,
12668a51866fSRoland Dreier 			.req_param = {
12678a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
12688a51866fSRoland Dreier 						IB_QP_PORT			|
12698a51866fSRoland Dreier 						IB_QP_QKEY),
1270c938a616SOr Gerlitz 				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
12718a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
12728a51866fSRoland Dreier 						IB_QP_PORT			|
12738a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
12748a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
12758a51866fSRoland Dreier 						IB_QP_PORT			|
12768a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
1277b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1278b42b63cfSSean Hefty 						IB_QP_PORT			|
1279b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
1280b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1281b42b63cfSSean Hefty 						IB_QP_PORT			|
1282b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
12838a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
12848a51866fSRoland Dreier 						IB_QP_QKEY),
12858a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
12868a51866fSRoland Dreier 						IB_QP_QKEY),
12878a51866fSRoland Dreier 			}
12888a51866fSRoland Dreier 		},
12898a51866fSRoland Dreier 	},
12908a51866fSRoland Dreier 	[IB_QPS_INIT]  = {
12918a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
12928a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
12938a51866fSRoland Dreier 		[IB_QPS_INIT]  = {
12948a51866fSRoland Dreier 			.valid = 1,
12958a51866fSRoland Dreier 			.opt_param = {
12968a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
12978a51866fSRoland Dreier 						IB_QP_PORT			|
12988a51866fSRoland Dreier 						IB_QP_QKEY),
12998a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
13008a51866fSRoland Dreier 						IB_QP_PORT			|
13018a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
13028a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
13038a51866fSRoland Dreier 						IB_QP_PORT			|
13048a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
1305b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1306b42b63cfSSean Hefty 						IB_QP_PORT			|
1307b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
1308b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1309b42b63cfSSean Hefty 						IB_QP_PORT			|
1310b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS),
13118a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
13128a51866fSRoland Dreier 						IB_QP_QKEY),
13138a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
13148a51866fSRoland Dreier 						IB_QP_QKEY),
13158a51866fSRoland Dreier 			}
13168a51866fSRoland Dreier 		},
13178a51866fSRoland Dreier 		[IB_QPS_RTR]   = {
13188a51866fSRoland Dreier 			.valid = 1,
13198a51866fSRoland Dreier 			.req_param = {
13208a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_AV			|
13218a51866fSRoland Dreier 						IB_QP_PATH_MTU			|
13228a51866fSRoland Dreier 						IB_QP_DEST_QPN			|
13238a51866fSRoland Dreier 						IB_QP_RQ_PSN),
13248a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_AV			|
13258a51866fSRoland Dreier 						IB_QP_PATH_MTU			|
13268a51866fSRoland Dreier 						IB_QP_DEST_QPN			|
13278a51866fSRoland Dreier 						IB_QP_RQ_PSN			|
13288a51866fSRoland Dreier 						IB_QP_MAX_DEST_RD_ATOMIC	|
13298a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER),
1330b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_AV			|
1331b42b63cfSSean Hefty 						IB_QP_PATH_MTU			|
1332b42b63cfSSean Hefty 						IB_QP_DEST_QPN			|
1333b42b63cfSSean Hefty 						IB_QP_RQ_PSN),
1334b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
1335b42b63cfSSean Hefty 						IB_QP_PATH_MTU			|
1336b42b63cfSSean Hefty 						IB_QP_DEST_QPN			|
1337b42b63cfSSean Hefty 						IB_QP_RQ_PSN			|
1338b42b63cfSSean Hefty 						IB_QP_MAX_DEST_RD_ATOMIC	|
1339b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER),
13408a51866fSRoland Dreier 			},
13418a51866fSRoland Dreier 			.opt_param = {
13428a51866fSRoland Dreier 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
13438a51866fSRoland Dreier 						 IB_QP_QKEY),
13448a51866fSRoland Dreier 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
13458a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13468a51866fSRoland Dreier 						 IB_QP_PKEY_INDEX),
13478a51866fSRoland Dreier 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
13488a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13498a51866fSRoland Dreier 						 IB_QP_PKEY_INDEX),
1350b42b63cfSSean Hefty 				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
1351b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1352b42b63cfSSean Hefty 						 IB_QP_PKEY_INDEX),
1353b42b63cfSSean Hefty 				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
1354b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1355b42b63cfSSean Hefty 						 IB_QP_PKEY_INDEX),
13568a51866fSRoland Dreier 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
13578a51866fSRoland Dreier 						 IB_QP_QKEY),
13588a51866fSRoland Dreier 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
13598a51866fSRoland Dreier 						 IB_QP_QKEY),
1360dd5f03beSMatan Barak 			 },
1361dbf727deSMatan Barak 		},
13628a51866fSRoland Dreier 	},
13638a51866fSRoland Dreier 	[IB_QPS_RTR]   = {
13648a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
13658a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
13668a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
13678a51866fSRoland Dreier 			.valid = 1,
13688a51866fSRoland Dreier 			.req_param = {
13698a51866fSRoland Dreier 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
13708a51866fSRoland Dreier 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
13718a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
13728a51866fSRoland Dreier 						IB_QP_RETRY_CNT			|
13738a51866fSRoland Dreier 						IB_QP_RNR_RETRY			|
13748a51866fSRoland Dreier 						IB_QP_SQ_PSN			|
13758a51866fSRoland Dreier 						IB_QP_MAX_QP_RD_ATOMIC),
1376b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
1377b42b63cfSSean Hefty 						IB_QP_RETRY_CNT			|
1378b42b63cfSSean Hefty 						IB_QP_RNR_RETRY			|
1379b42b63cfSSean Hefty 						IB_QP_SQ_PSN			|
1380b42b63cfSSean Hefty 						IB_QP_MAX_QP_RD_ATOMIC),
1381b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
1382b42b63cfSSean Hefty 						IB_QP_SQ_PSN),
13838a51866fSRoland Dreier 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
13848a51866fSRoland Dreier 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
13858a51866fSRoland Dreier 			},
13868a51866fSRoland Dreier 			.opt_param = {
13878a51866fSRoland Dreier 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
13888a51866fSRoland Dreier 						 IB_QP_QKEY),
13898a51866fSRoland Dreier 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
13908a51866fSRoland Dreier 						 IB_QP_ALT_PATH			|
13918a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13928a51866fSRoland Dreier 						 IB_QP_PATH_MIG_STATE),
13938a51866fSRoland Dreier 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
13948a51866fSRoland Dreier 						 IB_QP_ALT_PATH			|
13958a51866fSRoland Dreier 						 IB_QP_ACCESS_FLAGS		|
13968a51866fSRoland Dreier 						 IB_QP_MIN_RNR_TIMER		|
13978a51866fSRoland Dreier 						 IB_QP_PATH_MIG_STATE),
1398b42b63cfSSean Hefty 				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1399b42b63cfSSean Hefty 						 IB_QP_ALT_PATH			|
1400b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1401b42b63cfSSean Hefty 						 IB_QP_PATH_MIG_STATE),
1402b42b63cfSSean Hefty 				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1403b42b63cfSSean Hefty 						 IB_QP_ALT_PATH			|
1404b42b63cfSSean Hefty 						 IB_QP_ACCESS_FLAGS		|
1405b42b63cfSSean Hefty 						 IB_QP_MIN_RNR_TIMER		|
1406b42b63cfSSean Hefty 						 IB_QP_PATH_MIG_STATE),
14078a51866fSRoland Dreier 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
14088a51866fSRoland Dreier 						 IB_QP_QKEY),
14098a51866fSRoland Dreier 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
14108a51866fSRoland Dreier 						 IB_QP_QKEY),
1411528e5a1bSBodong Wang 				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
14128a51866fSRoland Dreier 			 }
14138a51866fSRoland Dreier 		}
14148a51866fSRoland Dreier 	},
14158a51866fSRoland Dreier 	[IB_QPS_RTS]   = {
14168a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
14178a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
14188a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
14198a51866fSRoland Dreier 			.valid = 1,
14208a51866fSRoland Dreier 			.opt_param = {
14218a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
14228a51866fSRoland Dreier 						IB_QP_QKEY),
14234546d31dSDotan Barak 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
14244546d31dSDotan Barak 						IB_QP_ACCESS_FLAGS		|
14258a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14268a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
14274546d31dSDotan Barak 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
14284546d31dSDotan Barak 						IB_QP_ACCESS_FLAGS		|
14298a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14308a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE		|
14318a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER),
1432b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1433b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1434b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1435b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
1436b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1437b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1438b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1439b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE		|
1440b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER),
14418a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
14428a51866fSRoland Dreier 						IB_QP_QKEY),
14438a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
14448a51866fSRoland Dreier 						IB_QP_QKEY),
1445528e5a1bSBodong Wang 				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
14468a51866fSRoland Dreier 			}
14478a51866fSRoland Dreier 		},
14488a51866fSRoland Dreier 		[IB_QPS_SQD]   = {
14498a51866fSRoland Dreier 			.valid = 1,
14508a51866fSRoland Dreier 			.opt_param = {
14518a51866fSRoland Dreier 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
14528a51866fSRoland Dreier 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
14538a51866fSRoland Dreier 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1454b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1455b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
14568a51866fSRoland Dreier 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
14578a51866fSRoland Dreier 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
14588a51866fSRoland Dreier 			}
14598a51866fSRoland Dreier 		},
14608a51866fSRoland Dreier 	},
14618a51866fSRoland Dreier 	[IB_QPS_SQD]   = {
14628a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
14638a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
14648a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
14658a51866fSRoland Dreier 			.valid = 1,
14668a51866fSRoland Dreier 			.opt_param = {
14678a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
14688a51866fSRoland Dreier 						IB_QP_QKEY),
14698a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
14708a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14718a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
14728a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
14738a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
14748a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
14758a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
14768a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER		|
14778a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
1478b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1479b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1480b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1481b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
1482b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1483b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1484b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1485b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER		|
1486b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
14878a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
14888a51866fSRoland Dreier 						IB_QP_QKEY),
14898a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
14908a51866fSRoland Dreier 						IB_QP_QKEY),
14918a51866fSRoland Dreier 			}
14928a51866fSRoland Dreier 		},
14938a51866fSRoland Dreier 		[IB_QPS_SQD]   = {
14948a51866fSRoland Dreier 			.valid = 1,
14958a51866fSRoland Dreier 			.opt_param = {
14968a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
14978a51866fSRoland Dreier 						IB_QP_QKEY),
14988a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_AV			|
14998a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
15008a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
15018a51866fSRoland Dreier 						IB_QP_PKEY_INDEX		|
15028a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
15038a51866fSRoland Dreier 				[IB_QPT_RC]  = (IB_QP_PORT			|
15048a51866fSRoland Dreier 						IB_QP_AV			|
15058a51866fSRoland Dreier 						IB_QP_TIMEOUT			|
15068a51866fSRoland Dreier 						IB_QP_RETRY_CNT			|
15078a51866fSRoland Dreier 						IB_QP_RNR_RETRY			|
15088a51866fSRoland Dreier 						IB_QP_MAX_QP_RD_ATOMIC		|
15098a51866fSRoland Dreier 						IB_QP_MAX_DEST_RD_ATOMIC	|
15108a51866fSRoland Dreier 						IB_QP_ALT_PATH			|
15118a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS		|
15128a51866fSRoland Dreier 						IB_QP_PKEY_INDEX		|
15138a51866fSRoland Dreier 						IB_QP_MIN_RNR_TIMER		|
15148a51866fSRoland Dreier 						IB_QP_PATH_MIG_STATE),
1515b42b63cfSSean Hefty 				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1516b42b63cfSSean Hefty 						IB_QP_AV			|
1517b42b63cfSSean Hefty 						IB_QP_TIMEOUT			|
1518b42b63cfSSean Hefty 						IB_QP_RETRY_CNT			|
1519b42b63cfSSean Hefty 						IB_QP_RNR_RETRY			|
1520b42b63cfSSean Hefty 						IB_QP_MAX_QP_RD_ATOMIC		|
1521b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1522b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1523b42b63cfSSean Hefty 						IB_QP_PKEY_INDEX		|
1524b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
1525b42b63cfSSean Hefty 				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1526b42b63cfSSean Hefty 						IB_QP_AV			|
1527b42b63cfSSean Hefty 						IB_QP_TIMEOUT			|
1528b42b63cfSSean Hefty 						IB_QP_MAX_DEST_RD_ATOMIC	|
1529b42b63cfSSean Hefty 						IB_QP_ALT_PATH			|
1530b42b63cfSSean Hefty 						IB_QP_ACCESS_FLAGS		|
1531b42b63cfSSean Hefty 						IB_QP_PKEY_INDEX		|
1532b42b63cfSSean Hefty 						IB_QP_MIN_RNR_TIMER		|
1533b42b63cfSSean Hefty 						IB_QP_PATH_MIG_STATE),
15348a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
15358a51866fSRoland Dreier 						IB_QP_QKEY),
15368a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
15378a51866fSRoland Dreier 						IB_QP_QKEY),
15388a51866fSRoland Dreier 			}
15398a51866fSRoland Dreier 		}
15408a51866fSRoland Dreier 	},
15418a51866fSRoland Dreier 	[IB_QPS_SQE]   = {
15428a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
15438a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 },
15448a51866fSRoland Dreier 		[IB_QPS_RTS]   = {
15458a51866fSRoland Dreier 			.valid = 1,
15468a51866fSRoland Dreier 			.opt_param = {
15478a51866fSRoland Dreier 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
15488a51866fSRoland Dreier 						IB_QP_QKEY),
15498a51866fSRoland Dreier 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
15508a51866fSRoland Dreier 						IB_QP_ACCESS_FLAGS),
15518a51866fSRoland Dreier 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
15528a51866fSRoland Dreier 						IB_QP_QKEY),
15538a51866fSRoland Dreier 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
15548a51866fSRoland Dreier 						IB_QP_QKEY),
15558a51866fSRoland Dreier 			}
15568a51866fSRoland Dreier 		}
15578a51866fSRoland Dreier 	},
15588a51866fSRoland Dreier 	[IB_QPS_ERR] = {
15598a51866fSRoland Dreier 		[IB_QPS_RESET] = { .valid = 1 },
15608a51866fSRoland Dreier 		[IB_QPS_ERR] =   { .valid = 1 }
15618a51866fSRoland Dreier 	}
15628a51866fSRoland Dreier };
15638a51866fSRoland Dreier 
156419b1f540SLeon Romanovsky bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1565d31131bbSKamal Heib 			enum ib_qp_type type, enum ib_qp_attr_mask mask)
15668a51866fSRoland Dreier {
15678a51866fSRoland Dreier 	enum ib_qp_attr_mask req_param, opt_param;
15688a51866fSRoland Dreier 
15698a51866fSRoland Dreier 	if (mask & IB_QP_CUR_STATE  &&
15708a51866fSRoland Dreier 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
15718a51866fSRoland Dreier 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
157219b1f540SLeon Romanovsky 		return false;
15738a51866fSRoland Dreier 
15748a51866fSRoland Dreier 	if (!qp_state_table[cur_state][next_state].valid)
157519b1f540SLeon Romanovsky 		return false;
15768a51866fSRoland Dreier 
15778a51866fSRoland Dreier 	req_param = qp_state_table[cur_state][next_state].req_param[type];
15788a51866fSRoland Dreier 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
15798a51866fSRoland Dreier 
15808a51866fSRoland Dreier 	if ((mask & req_param) != req_param)
158119b1f540SLeon Romanovsky 		return false;
15828a51866fSRoland Dreier 
15838a51866fSRoland Dreier 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
158419b1f540SLeon Romanovsky 		return false;
15858a51866fSRoland Dreier 
158619b1f540SLeon Romanovsky 	return true;
15878a51866fSRoland Dreier }
15888a51866fSRoland Dreier EXPORT_SYMBOL(ib_modify_qp_is_ok);
15898a51866fSRoland Dreier 
1590947c99ecSParav Pandit /**
1591947c99ecSParav Pandit  * ib_resolve_eth_dmac - Resolve destination mac address
1592947c99ecSParav Pandit  * @device:		Device to consider
1593947c99ecSParav Pandit  * @ah_attr:		address handle attribute which describes the
1594947c99ecSParav Pandit  *			source and destination parameters
1595947c99ecSParav Pandit  * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1596947c99ecSParav Pandit  * returns 0 on success or appropriate error code. It initializes the
1597947c99ecSParav Pandit  * necessary ah_attr fields when call is successful.
1598947c99ecSParav Pandit  */
1599c0348eb0SParav Pandit static int ib_resolve_eth_dmac(struct ib_device *device,
160090898850SDasaratharaman Chandramouli 			       struct rdma_ah_attr *ah_attr)
1601ed4c54e5SOr Gerlitz {
1602ed4c54e5SOr Gerlitz 	int ret = 0;
1603d8966fcdSDasaratharaman Chandramouli 
16049636a56fSNoa Osherovich 	if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
16059636a56fSNoa Osherovich 		if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
16069636a56fSNoa Osherovich 			__be32 addr = 0;
16079636a56fSNoa Osherovich 
16089636a56fSNoa Osherovich 			memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
16099636a56fSNoa Osherovich 			ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
16109636a56fSNoa Osherovich 		} else {
16119636a56fSNoa Osherovich 			ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
16129636a56fSNoa Osherovich 					(char *)ah_attr->roce.dmac);
16139636a56fSNoa Osherovich 		}
1614ed4c54e5SOr Gerlitz 	} else {
16151060f865SParav Pandit 		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1616ed4c54e5SOr Gerlitz 	}
1617ed4c54e5SOr Gerlitz 	return ret;
1618ed4c54e5SOr Gerlitz }
1619ed4c54e5SOr Gerlitz 
16208d9ec9adSJason Gunthorpe static bool is_qp_type_connected(const struct ib_qp *qp)
16218d9ec9adSJason Gunthorpe {
16228d9ec9adSJason Gunthorpe 	return (qp->qp_type == IB_QPT_UC ||
16238d9ec9adSJason Gunthorpe 		qp->qp_type == IB_QPT_RC ||
16248d9ec9adSJason Gunthorpe 		qp->qp_type == IB_QPT_XRC_INI ||
16258d9ec9adSJason Gunthorpe 		qp->qp_type == IB_QPT_XRC_TGT);
16268d9ec9adSJason Gunthorpe }
16278d9ec9adSJason Gunthorpe 
1628a512c2fbSParav Pandit /**
1629b96ac05aSParav Pandit  * IB core internal function to perform QP attributes modification.
1630a512c2fbSParav Pandit  */
1631b96ac05aSParav Pandit static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1632a512c2fbSParav Pandit 			 int attr_mask, struct ib_udata *udata)
1633a512c2fbSParav Pandit {
1634727b7e9aSMajd Dibbiny 	u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
16358d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr_av;
16368d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *old_sgid_attr_alt_av;
1637a512c2fbSParav Pandit 	int ret;
1638a512c2fbSParav Pandit 
16398d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_AV) {
16408d9ec9adSJason Gunthorpe 		ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
16418d9ec9adSJason Gunthorpe 					  &old_sgid_attr_av);
16428d9ec9adSJason Gunthorpe 		if (ret)
16438d9ec9adSJason Gunthorpe 			return ret;
16448d9ec9adSJason Gunthorpe 	}
16458d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_ALT_PATH) {
16461a1f460fSJason Gunthorpe 		/*
16471a1f460fSJason Gunthorpe 		 * FIXME: This does not track the migration state, so if the
16481a1f460fSJason Gunthorpe 		 * user loads a new alternate path after the HW has migrated
16491a1f460fSJason Gunthorpe 		 * from primary->alternate we will keep the wrong
16501a1f460fSJason Gunthorpe 		 * references. This is OK for IB because the reference
16511a1f460fSJason Gunthorpe 		 * counting does not serve any functional purpose.
16521a1f460fSJason Gunthorpe 		 */
16538d9ec9adSJason Gunthorpe 		ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
16548d9ec9adSJason Gunthorpe 					  &old_sgid_attr_alt_av);
16558d9ec9adSJason Gunthorpe 		if (ret)
16568d9ec9adSJason Gunthorpe 			goto out_av;
16577a5c938bSJason Gunthorpe 
16587a5c938bSJason Gunthorpe 		/*
16597a5c938bSJason Gunthorpe 		 * Today the core code can only handle alternate paths and APM
16607a5c938bSJason Gunthorpe 		 * for IB. Ban them in roce mode.
16617a5c938bSJason Gunthorpe 		 */
16627a5c938bSJason Gunthorpe 		if (!(rdma_protocol_ib(qp->device,
16637a5c938bSJason Gunthorpe 				       attr->alt_ah_attr.port_num) &&
16647a5c938bSJason Gunthorpe 		      rdma_protocol_ib(qp->device, port))) {
16657a5c938bSJason Gunthorpe 			ret = EINVAL;
16667a5c938bSJason Gunthorpe 			goto out;
16677a5c938bSJason Gunthorpe 		}
16688d9ec9adSJason Gunthorpe 	}
16698d9ec9adSJason Gunthorpe 
16708d9ec9adSJason Gunthorpe 	/*
16718d9ec9adSJason Gunthorpe 	 * If the user provided the qp_attr then we have to resolve it. Kernel
16728d9ec9adSJason Gunthorpe 	 * users have to provide already resolved rdma_ah_attr's
16738d9ec9adSJason Gunthorpe 	 */
16748d9ec9adSJason Gunthorpe 	if (udata && (attr_mask & IB_QP_AV) &&
16758d9ec9adSJason Gunthorpe 	    attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
16768d9ec9adSJason Gunthorpe 	    is_qp_type_connected(qp)) {
16778d9ec9adSJason Gunthorpe 		ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
16788d9ec9adSJason Gunthorpe 		if (ret)
16798d9ec9adSJason Gunthorpe 			goto out;
16808d9ec9adSJason Gunthorpe 	}
16818d9ec9adSJason Gunthorpe 
1682727b7e9aSMajd Dibbiny 	if (rdma_ib_or_roce(qp->device, port)) {
1683727b7e9aSMajd Dibbiny 		if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
168443c7c851SJason Gunthorpe 			dev_warn(&qp->device->dev,
168543c7c851SJason Gunthorpe 				 "%s rq_psn overflow, masking to 24 bits\n",
168643c7c851SJason Gunthorpe 				 __func__);
1687727b7e9aSMajd Dibbiny 			attr->rq_psn &= 0xffffff;
1688727b7e9aSMajd Dibbiny 		}
1689727b7e9aSMajd Dibbiny 
1690727b7e9aSMajd Dibbiny 		if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
169143c7c851SJason Gunthorpe 			dev_warn(&qp->device->dev,
169243c7c851SJason Gunthorpe 				 " %s sq_psn overflow, masking to 24 bits\n",
169343c7c851SJason Gunthorpe 				 __func__);
1694727b7e9aSMajd Dibbiny 			attr->sq_psn &= 0xffffff;
1695727b7e9aSMajd Dibbiny 		}
1696727b7e9aSMajd Dibbiny 	}
1697727b7e9aSMajd Dibbiny 
169899fa331dSMark Zhang 	/*
169999fa331dSMark Zhang 	 * Bind this qp to a counter automatically based on the rdma counter
170099fa331dSMark Zhang 	 * rules. This only set in RST2INIT with port specified
170199fa331dSMark Zhang 	 */
170299fa331dSMark Zhang 	if (!qp->counter && (attr_mask & IB_QP_PORT) &&
170399fa331dSMark Zhang 	    ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
170499fa331dSMark Zhang 		rdma_counter_bind_qp_auto(qp, attr->port_num);
170599fa331dSMark Zhang 
1706498ca3c8SNoa Osherovich 	ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
17071a1f460fSJason Gunthorpe 	if (ret)
17081a1f460fSJason Gunthorpe 		goto out;
17091a1f460fSJason Gunthorpe 
17101a1f460fSJason Gunthorpe 	if (attr_mask & IB_QP_PORT)
1711498ca3c8SNoa Osherovich 		qp->port = attr->port_num;
17121a1f460fSJason Gunthorpe 	if (attr_mask & IB_QP_AV)
17131a1f460fSJason Gunthorpe 		qp->av_sgid_attr =
17141a1f460fSJason Gunthorpe 			rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
17151a1f460fSJason Gunthorpe 	if (attr_mask & IB_QP_ALT_PATH)
17161a1f460fSJason Gunthorpe 		qp->alt_path_sgid_attr = rdma_update_sgid_attr(
17171a1f460fSJason Gunthorpe 			&attr->alt_ah_attr, qp->alt_path_sgid_attr);
1718498ca3c8SNoa Osherovich 
17198d9ec9adSJason Gunthorpe out:
17208d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_ALT_PATH)
17218d9ec9adSJason Gunthorpe 		rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
17228d9ec9adSJason Gunthorpe out_av:
17238d9ec9adSJason Gunthorpe 	if (attr_mask & IB_QP_AV)
17248d9ec9adSJason Gunthorpe 		rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1725498ca3c8SNoa Osherovich 	return ret;
1726a512c2fbSParav Pandit }
1727b96ac05aSParav Pandit 
1728b96ac05aSParav Pandit /**
1729b96ac05aSParav Pandit  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1730b96ac05aSParav Pandit  * @ib_qp: The QP to modify.
1731b96ac05aSParav Pandit  * @attr: On input, specifies the QP attributes to modify.  On output,
1732b96ac05aSParav Pandit  *   the current values of selected QP attributes are returned.
1733b96ac05aSParav Pandit  * @attr_mask: A bit-mask used to specify which attributes of the QP
1734b96ac05aSParav Pandit  *   are being modified.
1735b96ac05aSParav Pandit  * @udata: pointer to user's input output buffer information
1736b96ac05aSParav Pandit  *   are being modified.
1737b96ac05aSParav Pandit  * It returns 0 on success and returns appropriate error code on error.
1738b96ac05aSParav Pandit  */
1739b96ac05aSParav Pandit int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1740b96ac05aSParav Pandit 			    int attr_mask, struct ib_udata *udata)
1741b96ac05aSParav Pandit {
17428d9ec9adSJason Gunthorpe 	return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1743b96ac05aSParav Pandit }
1744a512c2fbSParav Pandit EXPORT_SYMBOL(ib_modify_qp_with_udata);
1745a512c2fbSParav Pandit 
1746d4186194SYuval Shaia int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1747d4186194SYuval Shaia {
1748d4186194SYuval Shaia 	int rc;
1749d4186194SYuval Shaia 	u32 netdev_speed;
1750d4186194SYuval Shaia 	struct net_device *netdev;
1751d4186194SYuval Shaia 	struct ethtool_link_ksettings lksettings;
1752d4186194SYuval Shaia 
1753d4186194SYuval Shaia 	if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1754d4186194SYuval Shaia 		return -EINVAL;
1755d4186194SYuval Shaia 
1756c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(dev, port_num);
1757d4186194SYuval Shaia 	if (!netdev)
1758d4186194SYuval Shaia 		return -ENODEV;
1759d4186194SYuval Shaia 
1760d4186194SYuval Shaia 	rtnl_lock();
1761d4186194SYuval Shaia 	rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1762d4186194SYuval Shaia 	rtnl_unlock();
1763d4186194SYuval Shaia 
1764d4186194SYuval Shaia 	dev_put(netdev);
1765d4186194SYuval Shaia 
1766d4186194SYuval Shaia 	if (!rc) {
1767d4186194SYuval Shaia 		netdev_speed = lksettings.base.speed;
1768d4186194SYuval Shaia 	} else {
1769d4186194SYuval Shaia 		netdev_speed = SPEED_1000;
1770d4186194SYuval Shaia 		pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1771d4186194SYuval Shaia 			netdev_speed);
1772d4186194SYuval Shaia 	}
1773d4186194SYuval Shaia 
1774d4186194SYuval Shaia 	if (netdev_speed <= SPEED_1000) {
1775d4186194SYuval Shaia 		*width = IB_WIDTH_1X;
1776d4186194SYuval Shaia 		*speed = IB_SPEED_SDR;
1777d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_10000) {
1778d4186194SYuval Shaia 		*width = IB_WIDTH_1X;
1779d4186194SYuval Shaia 		*speed = IB_SPEED_FDR10;
1780d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_20000) {
1781d4186194SYuval Shaia 		*width = IB_WIDTH_4X;
1782d4186194SYuval Shaia 		*speed = IB_SPEED_DDR;
1783d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_25000) {
1784d4186194SYuval Shaia 		*width = IB_WIDTH_1X;
1785d4186194SYuval Shaia 		*speed = IB_SPEED_EDR;
1786d4186194SYuval Shaia 	} else if (netdev_speed <= SPEED_40000) {
1787d4186194SYuval Shaia 		*width = IB_WIDTH_4X;
1788d4186194SYuval Shaia 		*speed = IB_SPEED_FDR10;
1789d4186194SYuval Shaia 	} else {
1790d4186194SYuval Shaia 		*width = IB_WIDTH_4X;
1791d4186194SYuval Shaia 		*speed = IB_SPEED_EDR;
1792d4186194SYuval Shaia 	}
1793d4186194SYuval Shaia 
1794d4186194SYuval Shaia 	return 0;
1795d4186194SYuval Shaia }
1796d4186194SYuval Shaia EXPORT_SYMBOL(ib_get_eth_speed);
1797d4186194SYuval Shaia 
17981da177e4SLinus Torvalds int ib_modify_qp(struct ib_qp *qp,
17991da177e4SLinus Torvalds 		 struct ib_qp_attr *qp_attr,
18001da177e4SLinus Torvalds 		 int qp_attr_mask)
18011da177e4SLinus Torvalds {
1802b96ac05aSParav Pandit 	return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
18031da177e4SLinus Torvalds }
18041da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_qp);
18051da177e4SLinus Torvalds 
18061da177e4SLinus Torvalds int ib_query_qp(struct ib_qp *qp,
18071da177e4SLinus Torvalds 		struct ib_qp_attr *qp_attr,
18081da177e4SLinus Torvalds 		int qp_attr_mask,
18091da177e4SLinus Torvalds 		struct ib_qp_init_attr *qp_init_attr)
18101da177e4SLinus Torvalds {
18118d9ec9adSJason Gunthorpe 	qp_attr->ah_attr.grh.sgid_attr = NULL;
18128d9ec9adSJason Gunthorpe 	qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
18138d9ec9adSJason Gunthorpe 
18143023a1e9SKamal Heib 	return qp->device->ops.query_qp ?
18153023a1e9SKamal Heib 		qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
18163023a1e9SKamal Heib 					 qp_init_attr) : -EOPNOTSUPP;
18171da177e4SLinus Torvalds }
18181da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_qp);
18191da177e4SLinus Torvalds 
18200e0ec7e0SSean Hefty int ib_close_qp(struct ib_qp *qp)
18210e0ec7e0SSean Hefty {
18220e0ec7e0SSean Hefty 	struct ib_qp *real_qp;
18230e0ec7e0SSean Hefty 	unsigned long flags;
18240e0ec7e0SSean Hefty 
18250e0ec7e0SSean Hefty 	real_qp = qp->real_qp;
18260e0ec7e0SSean Hefty 	if (real_qp == qp)
18270e0ec7e0SSean Hefty 		return -EINVAL;
18280e0ec7e0SSean Hefty 
18290e0ec7e0SSean Hefty 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
18300e0ec7e0SSean Hefty 	list_del(&qp->open_list);
18310e0ec7e0SSean Hefty 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
18320e0ec7e0SSean Hefty 
18330e0ec7e0SSean Hefty 	atomic_dec(&real_qp->usecnt);
18344a50881bSMoni Shoua 	if (qp->qp_sec)
1835d291f1a6SDaniel Jurgens 		ib_close_shared_qp_security(qp->qp_sec);
18360e0ec7e0SSean Hefty 	kfree(qp);
18370e0ec7e0SSean Hefty 
18380e0ec7e0SSean Hefty 	return 0;
18390e0ec7e0SSean Hefty }
18400e0ec7e0SSean Hefty EXPORT_SYMBOL(ib_close_qp);
18410e0ec7e0SSean Hefty 
18420e0ec7e0SSean Hefty static int __ib_destroy_shared_qp(struct ib_qp *qp)
18430e0ec7e0SSean Hefty {
18440e0ec7e0SSean Hefty 	struct ib_xrcd *xrcd;
18450e0ec7e0SSean Hefty 	struct ib_qp *real_qp;
18460e0ec7e0SSean Hefty 	int ret;
18470e0ec7e0SSean Hefty 
18480e0ec7e0SSean Hefty 	real_qp = qp->real_qp;
18490e0ec7e0SSean Hefty 	xrcd = real_qp->xrcd;
18500e0ec7e0SSean Hefty 
18510e0ec7e0SSean Hefty 	mutex_lock(&xrcd->tgt_qp_mutex);
18520e0ec7e0SSean Hefty 	ib_close_qp(qp);
18530e0ec7e0SSean Hefty 	if (atomic_read(&real_qp->usecnt) == 0)
18540e0ec7e0SSean Hefty 		list_del(&real_qp->xrcd_list);
18550e0ec7e0SSean Hefty 	else
18560e0ec7e0SSean Hefty 		real_qp = NULL;
18570e0ec7e0SSean Hefty 	mutex_unlock(&xrcd->tgt_qp_mutex);
18580e0ec7e0SSean Hefty 
18590e0ec7e0SSean Hefty 	if (real_qp) {
18600e0ec7e0SSean Hefty 		ret = ib_destroy_qp(real_qp);
18610e0ec7e0SSean Hefty 		if (!ret)
18620e0ec7e0SSean Hefty 			atomic_dec(&xrcd->usecnt);
18630e0ec7e0SSean Hefty 		else
18640e0ec7e0SSean Hefty 			__ib_insert_xrcd_qp(xrcd, real_qp);
18650e0ec7e0SSean Hefty 	}
18660e0ec7e0SSean Hefty 
18670e0ec7e0SSean Hefty 	return 0;
18680e0ec7e0SSean Hefty }
18690e0ec7e0SSean Hefty 
1870c4367a26SShamir Rabinovitch int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
18711da177e4SLinus Torvalds {
18721a1f460fSJason Gunthorpe 	const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
18731a1f460fSJason Gunthorpe 	const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
18741da177e4SLinus Torvalds 	struct ib_pd *pd;
18751da177e4SLinus Torvalds 	struct ib_cq *scq, *rcq;
18761da177e4SLinus Torvalds 	struct ib_srq *srq;
1877a9017e23SYishai Hadas 	struct ib_rwq_ind_table *ind_tbl;
1878d291f1a6SDaniel Jurgens 	struct ib_qp_security *sec;
18791da177e4SLinus Torvalds 	int ret;
18801da177e4SLinus Torvalds 
1881fffb0383SChristoph Hellwig 	WARN_ON_ONCE(qp->mrs_used > 0);
1882fffb0383SChristoph Hellwig 
18830e0ec7e0SSean Hefty 	if (atomic_read(&qp->usecnt))
18840e0ec7e0SSean Hefty 		return -EBUSY;
18850e0ec7e0SSean Hefty 
18860e0ec7e0SSean Hefty 	if (qp->real_qp != qp)
18870e0ec7e0SSean Hefty 		return __ib_destroy_shared_qp(qp);
18880e0ec7e0SSean Hefty 
18891da177e4SLinus Torvalds 	pd   = qp->pd;
18901da177e4SLinus Torvalds 	scq  = qp->send_cq;
18911da177e4SLinus Torvalds 	rcq  = qp->recv_cq;
18921da177e4SLinus Torvalds 	srq  = qp->srq;
1893a9017e23SYishai Hadas 	ind_tbl = qp->rwq_ind_tbl;
1894d291f1a6SDaniel Jurgens 	sec  = qp->qp_sec;
1895d291f1a6SDaniel Jurgens 	if (sec)
1896d291f1a6SDaniel Jurgens 		ib_destroy_qp_security_begin(sec);
18971da177e4SLinus Torvalds 
1898a060b562SChristoph Hellwig 	if (!qp->uobject)
1899a060b562SChristoph Hellwig 		rdma_rw_cleanup_mrs(qp);
1900a060b562SChristoph Hellwig 
190199fa331dSMark Zhang 	rdma_counter_unbind_qp(qp, true);
190278a0cd64SLeon Romanovsky 	rdma_restrack_del(&qp->res);
1903c4367a26SShamir Rabinovitch 	ret = qp->device->ops.destroy_qp(qp, udata);
19041da177e4SLinus Torvalds 	if (!ret) {
19051a1f460fSJason Gunthorpe 		if (alt_path_sgid_attr)
19061a1f460fSJason Gunthorpe 			rdma_put_gid_attr(alt_path_sgid_attr);
19071a1f460fSJason Gunthorpe 		if (av_sgid_attr)
19081a1f460fSJason Gunthorpe 			rdma_put_gid_attr(av_sgid_attr);
1909b42b63cfSSean Hefty 		if (pd)
19101da177e4SLinus Torvalds 			atomic_dec(&pd->usecnt);
1911b42b63cfSSean Hefty 		if (scq)
19121da177e4SLinus Torvalds 			atomic_dec(&scq->usecnt);
1913b42b63cfSSean Hefty 		if (rcq)
19141da177e4SLinus Torvalds 			atomic_dec(&rcq->usecnt);
19151da177e4SLinus Torvalds 		if (srq)
19161da177e4SLinus Torvalds 			atomic_dec(&srq->usecnt);
1917a9017e23SYishai Hadas 		if (ind_tbl)
1918a9017e23SYishai Hadas 			atomic_dec(&ind_tbl->usecnt);
1919d291f1a6SDaniel Jurgens 		if (sec)
1920d291f1a6SDaniel Jurgens 			ib_destroy_qp_security_end(sec);
1921d291f1a6SDaniel Jurgens 	} else {
1922d291f1a6SDaniel Jurgens 		if (sec)
1923d291f1a6SDaniel Jurgens 			ib_destroy_qp_security_abort(sec);
19241da177e4SLinus Torvalds 	}
19251da177e4SLinus Torvalds 
19261da177e4SLinus Torvalds 	return ret;
19271da177e4SLinus Torvalds }
1928c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_destroy_qp_user);
19291da177e4SLinus Torvalds 
19301da177e4SLinus Torvalds /* Completion queues */
19311da177e4SLinus Torvalds 
19327350cdd0SBharat Potnuri struct ib_cq *__ib_create_cq(struct ib_device *device,
19331da177e4SLinus Torvalds 			     ib_comp_handler comp_handler,
19341da177e4SLinus Torvalds 			     void (*event_handler)(struct ib_event *, void *),
19358e37210bSMatan Barak 			     void *cq_context,
19367350cdd0SBharat Potnuri 			     const struct ib_cq_init_attr *cq_attr,
19377350cdd0SBharat Potnuri 			     const char *caller)
19381da177e4SLinus Torvalds {
19391da177e4SLinus Torvalds 	struct ib_cq *cq;
1940e39afe3dSLeon Romanovsky 	int ret;
19411da177e4SLinus Torvalds 
1942e39afe3dSLeon Romanovsky 	cq = rdma_zalloc_drv_obj(device, ib_cq);
1943e39afe3dSLeon Romanovsky 	if (!cq)
1944e39afe3dSLeon Romanovsky 		return ERR_PTR(-ENOMEM);
19451da177e4SLinus Torvalds 
19461da177e4SLinus Torvalds 	cq->device = device;
1947b5e81bf5SRoland Dreier 	cq->uobject = NULL;
19481da177e4SLinus Torvalds 	cq->comp_handler = comp_handler;
19491da177e4SLinus Torvalds 	cq->event_handler = event_handler;
19501da177e4SLinus Torvalds 	cq->cq_context = cq_context;
19511da177e4SLinus Torvalds 	atomic_set(&cq->usecnt, 0);
195208f294a1SLeon Romanovsky 	cq->res.type = RDMA_RESTRACK_CQ;
19532165fc26SLeon Romanovsky 	rdma_restrack_set_task(&cq->res, caller);
1954e39afe3dSLeon Romanovsky 
1955e39afe3dSLeon Romanovsky 	ret = device->ops.create_cq(cq, cq_attr, NULL);
1956e39afe3dSLeon Romanovsky 	if (ret) {
1957e39afe3dSLeon Romanovsky 		kfree(cq);
1958e39afe3dSLeon Romanovsky 		return ERR_PTR(ret);
19591da177e4SLinus Torvalds 	}
19601da177e4SLinus Torvalds 
1961e39afe3dSLeon Romanovsky 	rdma_restrack_kadd(&cq->res);
19621da177e4SLinus Torvalds 	return cq;
19631da177e4SLinus Torvalds }
19647350cdd0SBharat Potnuri EXPORT_SYMBOL(__ib_create_cq);
19651da177e4SLinus Torvalds 
19664190b4e9SLeon Romanovsky int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
19672dd57162SEli Cohen {
19683023a1e9SKamal Heib 	return cq->device->ops.modify_cq ?
19693023a1e9SKamal Heib 		cq->device->ops.modify_cq(cq, cq_count,
19703023a1e9SKamal Heib 					  cq_period) : -EOPNOTSUPP;
19712dd57162SEli Cohen }
19724190b4e9SLeon Romanovsky EXPORT_SYMBOL(rdma_set_cq_moderation);
19732dd57162SEli Cohen 
1974c4367a26SShamir Rabinovitch int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
19751da177e4SLinus Torvalds {
19761da177e4SLinus Torvalds 	if (atomic_read(&cq->usecnt))
19771da177e4SLinus Torvalds 		return -EBUSY;
19781da177e4SLinus Torvalds 
197908f294a1SLeon Romanovsky 	rdma_restrack_del(&cq->res);
1980a52c8e24SLeon Romanovsky 	cq->device->ops.destroy_cq(cq, udata);
1981e39afe3dSLeon Romanovsky 	kfree(cq);
1982a52c8e24SLeon Romanovsky 	return 0;
19831da177e4SLinus Torvalds }
1984c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_destroy_cq_user);
19851da177e4SLinus Torvalds 
1986a74cd4afSRoland Dreier int ib_resize_cq(struct ib_cq *cq, int cqe)
19871da177e4SLinus Torvalds {
19883023a1e9SKamal Heib 	return cq->device->ops.resize_cq ?
19893023a1e9SKamal Heib 		cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
19901da177e4SLinus Torvalds }
19911da177e4SLinus Torvalds EXPORT_SYMBOL(ib_resize_cq);
19921da177e4SLinus Torvalds 
19931da177e4SLinus Torvalds /* Memory regions */
19941da177e4SLinus Torvalds 
1995c4367a26SShamir Rabinovitch int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
19961da177e4SLinus Torvalds {
1997ab67ed8dSChristoph Hellwig 	struct ib_pd *pd = mr->pd;
1998be934ccaSAriel Levkovich 	struct ib_dm *dm = mr->dm;
19997c717d3aSMax Gurtovoy 	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
20001da177e4SLinus Torvalds 	int ret;
20011da177e4SLinus Torvalds 
2002fccec5b8SSteve Wise 	rdma_restrack_del(&mr->res);
2003c4367a26SShamir Rabinovitch 	ret = mr->device->ops.dereg_mr(mr, udata);
2004be934ccaSAriel Levkovich 	if (!ret) {
20051da177e4SLinus Torvalds 		atomic_dec(&pd->usecnt);
2006be934ccaSAriel Levkovich 		if (dm)
2007be934ccaSAriel Levkovich 			atomic_dec(&dm->usecnt);
20087c717d3aSMax Gurtovoy 		kfree(sig_attrs);
2009be934ccaSAriel Levkovich 	}
20101da177e4SLinus Torvalds 
20111da177e4SLinus Torvalds 	return ret;
20121da177e4SLinus Torvalds }
2013c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_dereg_mr_user);
20141da177e4SLinus Torvalds 
20159bee178bSSagi Grimberg /**
201691f57129SIsrael Rukshin  * ib_alloc_mr_user() - Allocates a memory region
20179bee178bSSagi Grimberg  * @pd:            protection domain associated with the region
20189bee178bSSagi Grimberg  * @mr_type:       memory region type
20199bee178bSSagi Grimberg  * @max_num_sg:    maximum sg entries available for registration.
2020c4367a26SShamir Rabinovitch  * @udata:	   user data or null for kernel objects
20219bee178bSSagi Grimberg  *
20229bee178bSSagi Grimberg  * Notes:
20239bee178bSSagi Grimberg  * Memory registeration page/sg lists must not exceed max_num_sg.
20249bee178bSSagi Grimberg  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
20259bee178bSSagi Grimberg  * max_num_sg * used_page_size.
20269bee178bSSagi Grimberg  *
20279bee178bSSagi Grimberg  */
2028c4367a26SShamir Rabinovitch struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
2029c4367a26SShamir Rabinovitch 			       u32 max_num_sg, struct ib_udata *udata)
203017cd3a2dSSagi Grimberg {
203117cd3a2dSSagi Grimberg 	struct ib_mr *mr;
203217cd3a2dSSagi Grimberg 
20333023a1e9SKamal Heib 	if (!pd->device->ops.alloc_mr)
203487915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
203517cd3a2dSSagi Grimberg 
203626bc7eaeSIsrael Rukshin 	if (WARN_ON_ONCE(mr_type == IB_MR_TYPE_INTEGRITY))
203726bc7eaeSIsrael Rukshin 		return ERR_PTR(-EINVAL);
203826bc7eaeSIsrael Rukshin 
2039c4367a26SShamir Rabinovitch 	mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
204017cd3a2dSSagi Grimberg 	if (!IS_ERR(mr)) {
204117cd3a2dSSagi Grimberg 		mr->device  = pd->device;
204217cd3a2dSSagi Grimberg 		mr->pd      = pd;
204354e7e48bSAriel Levkovich 		mr->dm      = NULL;
204417cd3a2dSSagi Grimberg 		mr->uobject = NULL;
204517cd3a2dSSagi Grimberg 		atomic_inc(&pd->usecnt);
2046d4a85c30SSteve Wise 		mr->need_inval = false;
2047fccec5b8SSteve Wise 		mr->res.type = RDMA_RESTRACK_MR;
2048af8d7037SShamir Rabinovitch 		rdma_restrack_kadd(&mr->res);
2049a0bc099aSMax Gurtovoy 		mr->type = mr_type;
20507c717d3aSMax Gurtovoy 		mr->sig_attrs = NULL;
205117cd3a2dSSagi Grimberg 	}
205217cd3a2dSSagi Grimberg 
205317cd3a2dSSagi Grimberg 	return mr;
205417cd3a2dSSagi Grimberg }
2055c4367a26SShamir Rabinovitch EXPORT_SYMBOL(ib_alloc_mr_user);
205600f7ec36SSteve Wise 
205726bc7eaeSIsrael Rukshin /**
205826bc7eaeSIsrael Rukshin  * ib_alloc_mr_integrity() - Allocates an integrity memory region
205926bc7eaeSIsrael Rukshin  * @pd:                      protection domain associated with the region
206026bc7eaeSIsrael Rukshin  * @max_num_data_sg:         maximum data sg entries available for registration
206126bc7eaeSIsrael Rukshin  * @max_num_meta_sg:         maximum metadata sg entries available for
206226bc7eaeSIsrael Rukshin  *                           registration
206326bc7eaeSIsrael Rukshin  *
206426bc7eaeSIsrael Rukshin  * Notes:
206526bc7eaeSIsrael Rukshin  * Memory registration page/sg lists must not exceed max_num_sg,
206626bc7eaeSIsrael Rukshin  * also the integrity page/sg lists must not exceed max_num_meta_sg.
206726bc7eaeSIsrael Rukshin  *
206826bc7eaeSIsrael Rukshin  */
206926bc7eaeSIsrael Rukshin struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
207026bc7eaeSIsrael Rukshin 				    u32 max_num_data_sg,
207126bc7eaeSIsrael Rukshin 				    u32 max_num_meta_sg)
207226bc7eaeSIsrael Rukshin {
207326bc7eaeSIsrael Rukshin 	struct ib_mr *mr;
20747c717d3aSMax Gurtovoy 	struct ib_sig_attrs *sig_attrs;
207526bc7eaeSIsrael Rukshin 
20762cdfcdd8SMax Gurtovoy 	if (!pd->device->ops.alloc_mr_integrity ||
20772cdfcdd8SMax Gurtovoy 	    !pd->device->ops.map_mr_sg_pi)
207826bc7eaeSIsrael Rukshin 		return ERR_PTR(-EOPNOTSUPP);
207926bc7eaeSIsrael Rukshin 
208026bc7eaeSIsrael Rukshin 	if (!max_num_meta_sg)
208126bc7eaeSIsrael Rukshin 		return ERR_PTR(-EINVAL);
208226bc7eaeSIsrael Rukshin 
20837c717d3aSMax Gurtovoy 	sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
20847c717d3aSMax Gurtovoy 	if (!sig_attrs)
20857c717d3aSMax Gurtovoy 		return ERR_PTR(-ENOMEM);
20867c717d3aSMax Gurtovoy 
208726bc7eaeSIsrael Rukshin 	mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
208826bc7eaeSIsrael Rukshin 						max_num_meta_sg);
20897c717d3aSMax Gurtovoy 	if (IS_ERR(mr)) {
20907c717d3aSMax Gurtovoy 		kfree(sig_attrs);
209126bc7eaeSIsrael Rukshin 		return mr;
20927c717d3aSMax Gurtovoy 	}
209326bc7eaeSIsrael Rukshin 
209426bc7eaeSIsrael Rukshin 	mr->device = pd->device;
209526bc7eaeSIsrael Rukshin 	mr->pd = pd;
209626bc7eaeSIsrael Rukshin 	mr->dm = NULL;
209726bc7eaeSIsrael Rukshin 	mr->uobject = NULL;
209826bc7eaeSIsrael Rukshin 	atomic_inc(&pd->usecnt);
209926bc7eaeSIsrael Rukshin 	mr->need_inval = false;
210026bc7eaeSIsrael Rukshin 	mr->res.type = RDMA_RESTRACK_MR;
210126bc7eaeSIsrael Rukshin 	rdma_restrack_kadd(&mr->res);
210226bc7eaeSIsrael Rukshin 	mr->type = IB_MR_TYPE_INTEGRITY;
21037c717d3aSMax Gurtovoy 	mr->sig_attrs = sig_attrs;
210426bc7eaeSIsrael Rukshin 
210526bc7eaeSIsrael Rukshin 	return mr;
210626bc7eaeSIsrael Rukshin }
210726bc7eaeSIsrael Rukshin EXPORT_SYMBOL(ib_alloc_mr_integrity);
210826bc7eaeSIsrael Rukshin 
21091da177e4SLinus Torvalds /* "Fast" memory regions */
21101da177e4SLinus Torvalds 
21111da177e4SLinus Torvalds struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
21121da177e4SLinus Torvalds 			    int mr_access_flags,
21131da177e4SLinus Torvalds 			    struct ib_fmr_attr *fmr_attr)
21141da177e4SLinus Torvalds {
21151da177e4SLinus Torvalds 	struct ib_fmr *fmr;
21161da177e4SLinus Torvalds 
21173023a1e9SKamal Heib 	if (!pd->device->ops.alloc_fmr)
211887915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
21191da177e4SLinus Torvalds 
21203023a1e9SKamal Heib 	fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
21211da177e4SLinus Torvalds 	if (!IS_ERR(fmr)) {
21221da177e4SLinus Torvalds 		fmr->device = pd->device;
21231da177e4SLinus Torvalds 		fmr->pd     = pd;
21241da177e4SLinus Torvalds 		atomic_inc(&pd->usecnt);
21251da177e4SLinus Torvalds 	}
21261da177e4SLinus Torvalds 
21271da177e4SLinus Torvalds 	return fmr;
21281da177e4SLinus Torvalds }
21291da177e4SLinus Torvalds EXPORT_SYMBOL(ib_alloc_fmr);
21301da177e4SLinus Torvalds 
21311da177e4SLinus Torvalds int ib_unmap_fmr(struct list_head *fmr_list)
21321da177e4SLinus Torvalds {
21331da177e4SLinus Torvalds 	struct ib_fmr *fmr;
21341da177e4SLinus Torvalds 
21351da177e4SLinus Torvalds 	if (list_empty(fmr_list))
21361da177e4SLinus Torvalds 		return 0;
21371da177e4SLinus Torvalds 
21381da177e4SLinus Torvalds 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
21393023a1e9SKamal Heib 	return fmr->device->ops.unmap_fmr(fmr_list);
21401da177e4SLinus Torvalds }
21411da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unmap_fmr);
21421da177e4SLinus Torvalds 
21431da177e4SLinus Torvalds int ib_dealloc_fmr(struct ib_fmr *fmr)
21441da177e4SLinus Torvalds {
21451da177e4SLinus Torvalds 	struct ib_pd *pd;
21461da177e4SLinus Torvalds 	int ret;
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds 	pd = fmr->pd;
21493023a1e9SKamal Heib 	ret = fmr->device->ops.dealloc_fmr(fmr);
21501da177e4SLinus Torvalds 	if (!ret)
21511da177e4SLinus Torvalds 		atomic_dec(&pd->usecnt);
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds 	return ret;
21541da177e4SLinus Torvalds }
21551da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dealloc_fmr);
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds /* Multicast groups */
21581da177e4SLinus Torvalds 
215952363335SNoa Osherovich static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
216052363335SNoa Osherovich {
216152363335SNoa Osherovich 	struct ib_qp_init_attr init_attr = {};
216252363335SNoa Osherovich 	struct ib_qp_attr attr = {};
216352363335SNoa Osherovich 	int num_eth_ports = 0;
216452363335SNoa Osherovich 	int port;
216552363335SNoa Osherovich 
216652363335SNoa Osherovich 	/* If QP state >= init, it is assigned to a port and we can check this
216752363335SNoa Osherovich 	 * port only.
216852363335SNoa Osherovich 	 */
216952363335SNoa Osherovich 	if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
217052363335SNoa Osherovich 		if (attr.qp_state >= IB_QPS_INIT) {
2171e6f9bc34SAlex Estrin 			if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
217252363335SNoa Osherovich 			    IB_LINK_LAYER_INFINIBAND)
217352363335SNoa Osherovich 				return true;
217452363335SNoa Osherovich 			goto lid_check;
217552363335SNoa Osherovich 		}
217652363335SNoa Osherovich 	}
217752363335SNoa Osherovich 
217852363335SNoa Osherovich 	/* Can't get a quick answer, iterate over all ports */
217952363335SNoa Osherovich 	for (port = 0; port < qp->device->phys_port_cnt; port++)
2180e6f9bc34SAlex Estrin 		if (rdma_port_get_link_layer(qp->device, port) !=
218152363335SNoa Osherovich 		    IB_LINK_LAYER_INFINIBAND)
218252363335SNoa Osherovich 			num_eth_ports++;
218352363335SNoa Osherovich 
218452363335SNoa Osherovich 	/* If we have at lease one Ethernet port, RoCE annex declares that
218552363335SNoa Osherovich 	 * multicast LID should be ignored. We can't tell at this step if the
218652363335SNoa Osherovich 	 * QP belongs to an IB or Ethernet port.
218752363335SNoa Osherovich 	 */
218852363335SNoa Osherovich 	if (num_eth_ports)
218952363335SNoa Osherovich 		return true;
219052363335SNoa Osherovich 
219152363335SNoa Osherovich 	/* If all the ports are IB, we can check according to IB spec. */
219252363335SNoa Osherovich lid_check:
219352363335SNoa Osherovich 	return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
219452363335SNoa Osherovich 		 lid == be16_to_cpu(IB_LID_PERMISSIVE));
219552363335SNoa Osherovich }
219652363335SNoa Osherovich 
21971da177e4SLinus Torvalds int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
21981da177e4SLinus Torvalds {
2199c3bccbfbSOr Gerlitz 	int ret;
2200c3bccbfbSOr Gerlitz 
22013023a1e9SKamal Heib 	if (!qp->device->ops.attach_mcast)
220287915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
2203be1d325aSNoa Osherovich 
2204be1d325aSNoa Osherovich 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2205be1d325aSNoa Osherovich 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
22060c33aeedSJack Morgenstein 		return -EINVAL;
22070c33aeedSJack Morgenstein 
22083023a1e9SKamal Heib 	ret = qp->device->ops.attach_mcast(qp, gid, lid);
2209c3bccbfbSOr Gerlitz 	if (!ret)
2210c3bccbfbSOr Gerlitz 		atomic_inc(&qp->usecnt);
2211c3bccbfbSOr Gerlitz 	return ret;
22121da177e4SLinus Torvalds }
22131da177e4SLinus Torvalds EXPORT_SYMBOL(ib_attach_mcast);
22141da177e4SLinus Torvalds 
22151da177e4SLinus Torvalds int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
22161da177e4SLinus Torvalds {
2217c3bccbfbSOr Gerlitz 	int ret;
2218c3bccbfbSOr Gerlitz 
22193023a1e9SKamal Heib 	if (!qp->device->ops.detach_mcast)
222087915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
2221be1d325aSNoa Osherovich 
2222be1d325aSNoa Osherovich 	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2223be1d325aSNoa Osherovich 	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
22240c33aeedSJack Morgenstein 		return -EINVAL;
22250c33aeedSJack Morgenstein 
22263023a1e9SKamal Heib 	ret = qp->device->ops.detach_mcast(qp, gid, lid);
2227c3bccbfbSOr Gerlitz 	if (!ret)
2228c3bccbfbSOr Gerlitz 		atomic_dec(&qp->usecnt);
2229c3bccbfbSOr Gerlitz 	return ret;
22301da177e4SLinus Torvalds }
22311da177e4SLinus Torvalds EXPORT_SYMBOL(ib_detach_mcast);
223259991f94SSean Hefty 
2233f66c8ba4SLeon Romanovsky struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
223459991f94SSean Hefty {
223559991f94SSean Hefty 	struct ib_xrcd *xrcd;
223659991f94SSean Hefty 
22373023a1e9SKamal Heib 	if (!device->ops.alloc_xrcd)
223887915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
223959991f94SSean Hefty 
2240ff23dfa1SShamir Rabinovitch 	xrcd = device->ops.alloc_xrcd(device, NULL);
224159991f94SSean Hefty 	if (!IS_ERR(xrcd)) {
224259991f94SSean Hefty 		xrcd->device = device;
224353d0bd1eSSean Hefty 		xrcd->inode = NULL;
224459991f94SSean Hefty 		atomic_set(&xrcd->usecnt, 0);
2245d3d72d90SSean Hefty 		mutex_init(&xrcd->tgt_qp_mutex);
2246d3d72d90SSean Hefty 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
224759991f94SSean Hefty 	}
224859991f94SSean Hefty 
224959991f94SSean Hefty 	return xrcd;
225059991f94SSean Hefty }
2251f66c8ba4SLeon Romanovsky EXPORT_SYMBOL(__ib_alloc_xrcd);
225259991f94SSean Hefty 
2253c4367a26SShamir Rabinovitch int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
225459991f94SSean Hefty {
2255d3d72d90SSean Hefty 	struct ib_qp *qp;
2256d3d72d90SSean Hefty 	int ret;
2257d3d72d90SSean Hefty 
225859991f94SSean Hefty 	if (atomic_read(&xrcd->usecnt))
225959991f94SSean Hefty 		return -EBUSY;
226059991f94SSean Hefty 
2261d3d72d90SSean Hefty 	while (!list_empty(&xrcd->tgt_qp_list)) {
2262d3d72d90SSean Hefty 		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
2263d3d72d90SSean Hefty 		ret = ib_destroy_qp(qp);
2264d3d72d90SSean Hefty 		if (ret)
2265d3d72d90SSean Hefty 			return ret;
2266d3d72d90SSean Hefty 	}
226756594ae1SParav Pandit 	mutex_destroy(&xrcd->tgt_qp_mutex);
2268d3d72d90SSean Hefty 
2269c4367a26SShamir Rabinovitch 	return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
227059991f94SSean Hefty }
227159991f94SSean Hefty EXPORT_SYMBOL(ib_dealloc_xrcd);
2272319a441dSHadar Hen Zion 
22735fd251c8SYishai Hadas /**
22745fd251c8SYishai Hadas  * ib_create_wq - Creates a WQ associated with the specified protection
22755fd251c8SYishai Hadas  * domain.
22765fd251c8SYishai Hadas  * @pd: The protection domain associated with the WQ.
22771f58621eSRandy Dunlap  * @wq_attr: A list of initial attributes required to create the
22785fd251c8SYishai Hadas  * WQ. If WQ creation succeeds, then the attributes are updated to
22795fd251c8SYishai Hadas  * the actual capabilities of the created WQ.
22805fd251c8SYishai Hadas  *
22811f58621eSRandy Dunlap  * wq_attr->max_wr and wq_attr->max_sge determine
22825fd251c8SYishai Hadas  * the requested size of the WQ, and set to the actual values allocated
22835fd251c8SYishai Hadas  * on return.
22845fd251c8SYishai Hadas  * If ib_create_wq() succeeds, then max_wr and max_sge will always be
22855fd251c8SYishai Hadas  * at least as large as the requested values.
22865fd251c8SYishai Hadas  */
22875fd251c8SYishai Hadas struct ib_wq *ib_create_wq(struct ib_pd *pd,
22885fd251c8SYishai Hadas 			   struct ib_wq_init_attr *wq_attr)
22895fd251c8SYishai Hadas {
22905fd251c8SYishai Hadas 	struct ib_wq *wq;
22915fd251c8SYishai Hadas 
22923023a1e9SKamal Heib 	if (!pd->device->ops.create_wq)
229387915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
22945fd251c8SYishai Hadas 
22953023a1e9SKamal Heib 	wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
22965fd251c8SYishai Hadas 	if (!IS_ERR(wq)) {
22975fd251c8SYishai Hadas 		wq->event_handler = wq_attr->event_handler;
22985fd251c8SYishai Hadas 		wq->wq_context = wq_attr->wq_context;
22995fd251c8SYishai Hadas 		wq->wq_type = wq_attr->wq_type;
23005fd251c8SYishai Hadas 		wq->cq = wq_attr->cq;
23015fd251c8SYishai Hadas 		wq->device = pd->device;
23025fd251c8SYishai Hadas 		wq->pd = pd;
23035fd251c8SYishai Hadas 		wq->uobject = NULL;
23045fd251c8SYishai Hadas 		atomic_inc(&pd->usecnt);
23055fd251c8SYishai Hadas 		atomic_inc(&wq_attr->cq->usecnt);
23065fd251c8SYishai Hadas 		atomic_set(&wq->usecnt, 0);
23075fd251c8SYishai Hadas 	}
23085fd251c8SYishai Hadas 	return wq;
23095fd251c8SYishai Hadas }
23105fd251c8SYishai Hadas EXPORT_SYMBOL(ib_create_wq);
23115fd251c8SYishai Hadas 
23125fd251c8SYishai Hadas /**
2313c4367a26SShamir Rabinovitch  * ib_destroy_wq - Destroys the specified user WQ.
23145fd251c8SYishai Hadas  * @wq: The WQ to destroy.
2315c4367a26SShamir Rabinovitch  * @udata: Valid user data
23165fd251c8SYishai Hadas  */
2317c4367a26SShamir Rabinovitch int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
23185fd251c8SYishai Hadas {
23195fd251c8SYishai Hadas 	struct ib_cq *cq = wq->cq;
23205fd251c8SYishai Hadas 	struct ib_pd *pd = wq->pd;
23215fd251c8SYishai Hadas 
23225fd251c8SYishai Hadas 	if (atomic_read(&wq->usecnt))
23235fd251c8SYishai Hadas 		return -EBUSY;
23245fd251c8SYishai Hadas 
2325a49b1dc7SLeon Romanovsky 	wq->device->ops.destroy_wq(wq, udata);
23265fd251c8SYishai Hadas 	atomic_dec(&pd->usecnt);
23275fd251c8SYishai Hadas 	atomic_dec(&cq->usecnt);
2328a49b1dc7SLeon Romanovsky 
2329a49b1dc7SLeon Romanovsky 	return 0;
23305fd251c8SYishai Hadas }
23315fd251c8SYishai Hadas EXPORT_SYMBOL(ib_destroy_wq);
23325fd251c8SYishai Hadas 
23335fd251c8SYishai Hadas /**
23345fd251c8SYishai Hadas  * ib_modify_wq - Modifies the specified WQ.
23355fd251c8SYishai Hadas  * @wq: The WQ to modify.
23365fd251c8SYishai Hadas  * @wq_attr: On input, specifies the WQ attributes to modify.
23375fd251c8SYishai Hadas  * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
23385fd251c8SYishai Hadas  *   are being modified.
23395fd251c8SYishai Hadas  * On output, the current values of selected WQ attributes are returned.
23405fd251c8SYishai Hadas  */
23415fd251c8SYishai Hadas int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
23425fd251c8SYishai Hadas 		 u32 wq_attr_mask)
23435fd251c8SYishai Hadas {
23445fd251c8SYishai Hadas 	int err;
23455fd251c8SYishai Hadas 
23463023a1e9SKamal Heib 	if (!wq->device->ops.modify_wq)
234787915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
23485fd251c8SYishai Hadas 
23493023a1e9SKamal Heib 	err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
23505fd251c8SYishai Hadas 	return err;
23515fd251c8SYishai Hadas }
23525fd251c8SYishai Hadas EXPORT_SYMBOL(ib_modify_wq);
23535fd251c8SYishai Hadas 
23546d39786bSYishai Hadas /*
23556d39786bSYishai Hadas  * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
23566d39786bSYishai Hadas  * @device: The device on which to create the rwq indirection table.
23576d39786bSYishai Hadas  * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
23586d39786bSYishai Hadas  * create the Indirection Table.
23596d39786bSYishai Hadas  *
23606d39786bSYishai Hadas  * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
23616d39786bSYishai Hadas  *	than the created ib_rwq_ind_table object and the caller is responsible
23626d39786bSYishai Hadas  *	for its memory allocation/free.
23636d39786bSYishai Hadas  */
23646d39786bSYishai Hadas struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
23656d39786bSYishai Hadas 						 struct ib_rwq_ind_table_init_attr *init_attr)
23666d39786bSYishai Hadas {
23676d39786bSYishai Hadas 	struct ib_rwq_ind_table *rwq_ind_table;
23686d39786bSYishai Hadas 	int i;
23696d39786bSYishai Hadas 	u32 table_size;
23706d39786bSYishai Hadas 
23713023a1e9SKamal Heib 	if (!device->ops.create_rwq_ind_table)
237287915bf8SLeon Romanovsky 		return ERR_PTR(-EOPNOTSUPP);
23736d39786bSYishai Hadas 
23746d39786bSYishai Hadas 	table_size = (1 << init_attr->log_ind_tbl_size);
23753023a1e9SKamal Heib 	rwq_ind_table = device->ops.create_rwq_ind_table(device,
23766d39786bSYishai Hadas 							 init_attr, NULL);
23776d39786bSYishai Hadas 	if (IS_ERR(rwq_ind_table))
23786d39786bSYishai Hadas 		return rwq_ind_table;
23796d39786bSYishai Hadas 
23806d39786bSYishai Hadas 	rwq_ind_table->ind_tbl = init_attr->ind_tbl;
23816d39786bSYishai Hadas 	rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
23826d39786bSYishai Hadas 	rwq_ind_table->device = device;
23836d39786bSYishai Hadas 	rwq_ind_table->uobject = NULL;
23846d39786bSYishai Hadas 	atomic_set(&rwq_ind_table->usecnt, 0);
23856d39786bSYishai Hadas 
23866d39786bSYishai Hadas 	for (i = 0; i < table_size; i++)
23876d39786bSYishai Hadas 		atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
23886d39786bSYishai Hadas 
23896d39786bSYishai Hadas 	return rwq_ind_table;
23906d39786bSYishai Hadas }
23916d39786bSYishai Hadas EXPORT_SYMBOL(ib_create_rwq_ind_table);
23926d39786bSYishai Hadas 
23936d39786bSYishai Hadas /*
23946d39786bSYishai Hadas  * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
23956d39786bSYishai Hadas  * @wq_ind_table: The Indirection Table to destroy.
23966d39786bSYishai Hadas */
23976d39786bSYishai Hadas int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
23986d39786bSYishai Hadas {
23996d39786bSYishai Hadas 	int err, i;
24006d39786bSYishai Hadas 	u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
24016d39786bSYishai Hadas 	struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
24026d39786bSYishai Hadas 
24036d39786bSYishai Hadas 	if (atomic_read(&rwq_ind_table->usecnt))
24046d39786bSYishai Hadas 		return -EBUSY;
24056d39786bSYishai Hadas 
24063023a1e9SKamal Heib 	err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
24076d39786bSYishai Hadas 	if (!err) {
24086d39786bSYishai Hadas 		for (i = 0; i < table_size; i++)
24096d39786bSYishai Hadas 			atomic_dec(&ind_tbl[i]->usecnt);
24106d39786bSYishai Hadas 	}
24116d39786bSYishai Hadas 
24126d39786bSYishai Hadas 	return err;
24136d39786bSYishai Hadas }
24146d39786bSYishai Hadas EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
24156d39786bSYishai Hadas 
24161b01d335SSagi Grimberg int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
24171b01d335SSagi Grimberg 		       struct ib_mr_status *mr_status)
24181b01d335SSagi Grimberg {
24193023a1e9SKamal Heib 	if (!mr->device->ops.check_mr_status)
24203023a1e9SKamal Heib 		return -EOPNOTSUPP;
24213023a1e9SKamal Heib 
24223023a1e9SKamal Heib 	return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
24231b01d335SSagi Grimberg }
24241b01d335SSagi Grimberg EXPORT_SYMBOL(ib_check_mr_status);
24254c67e2bfSSagi Grimberg 
242650174a7fSEli Cohen int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
242750174a7fSEli Cohen 			 int state)
242850174a7fSEli Cohen {
24293023a1e9SKamal Heib 	if (!device->ops.set_vf_link_state)
243087915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
243150174a7fSEli Cohen 
24323023a1e9SKamal Heib 	return device->ops.set_vf_link_state(device, vf, port, state);
243350174a7fSEli Cohen }
243450174a7fSEli Cohen EXPORT_SYMBOL(ib_set_vf_link_state);
243550174a7fSEli Cohen 
243650174a7fSEli Cohen int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
243750174a7fSEli Cohen 		     struct ifla_vf_info *info)
243850174a7fSEli Cohen {
24393023a1e9SKamal Heib 	if (!device->ops.get_vf_config)
244087915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
244150174a7fSEli Cohen 
24423023a1e9SKamal Heib 	return device->ops.get_vf_config(device, vf, port, info);
244350174a7fSEli Cohen }
244450174a7fSEli Cohen EXPORT_SYMBOL(ib_get_vf_config);
244550174a7fSEli Cohen 
244650174a7fSEli Cohen int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
244750174a7fSEli Cohen 		    struct ifla_vf_stats *stats)
244850174a7fSEli Cohen {
24493023a1e9SKamal Heib 	if (!device->ops.get_vf_stats)
245087915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
245150174a7fSEli Cohen 
24523023a1e9SKamal Heib 	return device->ops.get_vf_stats(device, vf, port, stats);
245350174a7fSEli Cohen }
245450174a7fSEli Cohen EXPORT_SYMBOL(ib_get_vf_stats);
245550174a7fSEli Cohen 
245650174a7fSEli Cohen int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
245750174a7fSEli Cohen 		   int type)
245850174a7fSEli Cohen {
24593023a1e9SKamal Heib 	if (!device->ops.set_vf_guid)
246087915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
246150174a7fSEli Cohen 
24623023a1e9SKamal Heib 	return device->ops.set_vf_guid(device, vf, port, guid, type);
246350174a7fSEli Cohen }
246450174a7fSEli Cohen EXPORT_SYMBOL(ib_set_vf_guid);
246550174a7fSEli Cohen 
2466bfcb3c5dSDanit Goldberg int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
2467bfcb3c5dSDanit Goldberg 		   struct ifla_vf_guid *node_guid,
2468bfcb3c5dSDanit Goldberg 		   struct ifla_vf_guid *port_guid)
2469bfcb3c5dSDanit Goldberg {
2470bfcb3c5dSDanit Goldberg 	if (!device->ops.get_vf_guid)
2471bfcb3c5dSDanit Goldberg 		return -EOPNOTSUPP;
2472bfcb3c5dSDanit Goldberg 
2473bfcb3c5dSDanit Goldberg 	return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2474bfcb3c5dSDanit Goldberg }
2475bfcb3c5dSDanit Goldberg EXPORT_SYMBOL(ib_get_vf_guid);
24764c67e2bfSSagi Grimberg /**
24772cdfcdd8SMax Gurtovoy  * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
24782cdfcdd8SMax Gurtovoy  *     information) and set an appropriate memory region for registration.
24792cdfcdd8SMax Gurtovoy  * @mr:             memory region
24802cdfcdd8SMax Gurtovoy  * @data_sg:        dma mapped scatterlist for data
24812cdfcdd8SMax Gurtovoy  * @data_sg_nents:  number of entries in data_sg
24822cdfcdd8SMax Gurtovoy  * @data_sg_offset: offset in bytes into data_sg
24832cdfcdd8SMax Gurtovoy  * @meta_sg:        dma mapped scatterlist for metadata
24842cdfcdd8SMax Gurtovoy  * @meta_sg_nents:  number of entries in meta_sg
24852cdfcdd8SMax Gurtovoy  * @meta_sg_offset: offset in bytes into meta_sg
24862cdfcdd8SMax Gurtovoy  * @page_size:      page vector desired page size
24872cdfcdd8SMax Gurtovoy  *
24882cdfcdd8SMax Gurtovoy  * Constraints:
24892cdfcdd8SMax Gurtovoy  * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
24902cdfcdd8SMax Gurtovoy  *
24912cdfcdd8SMax Gurtovoy  * Return: 0 on success.
24922cdfcdd8SMax Gurtovoy  *
24932cdfcdd8SMax Gurtovoy  * After this completes successfully, the  memory region
24942cdfcdd8SMax Gurtovoy  * is ready for registration.
24952cdfcdd8SMax Gurtovoy  */
24962cdfcdd8SMax Gurtovoy int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
24972cdfcdd8SMax Gurtovoy 		    int data_sg_nents, unsigned int *data_sg_offset,
24982cdfcdd8SMax Gurtovoy 		    struct scatterlist *meta_sg, int meta_sg_nents,
24992cdfcdd8SMax Gurtovoy 		    unsigned int *meta_sg_offset, unsigned int page_size)
25002cdfcdd8SMax Gurtovoy {
25012cdfcdd8SMax Gurtovoy 	if (unlikely(!mr->device->ops.map_mr_sg_pi ||
25022cdfcdd8SMax Gurtovoy 		     WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
25032cdfcdd8SMax Gurtovoy 		return -EOPNOTSUPP;
25042cdfcdd8SMax Gurtovoy 
25052cdfcdd8SMax Gurtovoy 	mr->page_size = page_size;
25062cdfcdd8SMax Gurtovoy 
25072cdfcdd8SMax Gurtovoy 	return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
25082cdfcdd8SMax Gurtovoy 					    data_sg_offset, meta_sg,
25092cdfcdd8SMax Gurtovoy 					    meta_sg_nents, meta_sg_offset);
25102cdfcdd8SMax Gurtovoy }
25112cdfcdd8SMax Gurtovoy EXPORT_SYMBOL(ib_map_mr_sg_pi);
25122cdfcdd8SMax Gurtovoy 
25132cdfcdd8SMax Gurtovoy /**
25144c67e2bfSSagi Grimberg  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
25154c67e2bfSSagi Grimberg  *     and set it the memory region.
25164c67e2bfSSagi Grimberg  * @mr:            memory region
25174c67e2bfSSagi Grimberg  * @sg:            dma mapped scatterlist
25184c67e2bfSSagi Grimberg  * @sg_nents:      number of entries in sg
2519ff2ba993SChristoph Hellwig  * @sg_offset:     offset in bytes into sg
25204c67e2bfSSagi Grimberg  * @page_size:     page vector desired page size
25214c67e2bfSSagi Grimberg  *
25224c67e2bfSSagi Grimberg  * Constraints:
25234c67e2bfSSagi Grimberg  * - The first sg element is allowed to have an offset.
252452746129SBart Van Assche  * - Each sg element must either be aligned to page_size or virtually
252552746129SBart Van Assche  *   contiguous to the previous element. In case an sg element has a
252652746129SBart Van Assche  *   non-contiguous offset, the mapping prefix will not include it.
25274c67e2bfSSagi Grimberg  * - The last sg element is allowed to have length less than page_size.
25284c67e2bfSSagi Grimberg  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
25294c67e2bfSSagi Grimberg  *   then only max_num_sg entries will be mapped.
253052746129SBart Van Assche  * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2531f5aa9159SSagi Grimberg  *   constraints holds and the page_size argument is ignored.
25324c67e2bfSSagi Grimberg  *
25334c67e2bfSSagi Grimberg  * Returns the number of sg elements that were mapped to the memory region.
25344c67e2bfSSagi Grimberg  *
25354c67e2bfSSagi Grimberg  * After this completes successfully, the  memory region
25364c67e2bfSSagi Grimberg  * is ready for registration.
25374c67e2bfSSagi Grimberg  */
2538ff2ba993SChristoph Hellwig int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
25399aa8b321SBart Van Assche 		 unsigned int *sg_offset, unsigned int page_size)
25404c67e2bfSSagi Grimberg {
25413023a1e9SKamal Heib 	if (unlikely(!mr->device->ops.map_mr_sg))
254287915bf8SLeon Romanovsky 		return -EOPNOTSUPP;
25434c67e2bfSSagi Grimberg 
25444c67e2bfSSagi Grimberg 	mr->page_size = page_size;
25454c67e2bfSSagi Grimberg 
25463023a1e9SKamal Heib 	return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
25474c67e2bfSSagi Grimberg }
25484c67e2bfSSagi Grimberg EXPORT_SYMBOL(ib_map_mr_sg);
25494c67e2bfSSagi Grimberg 
25504c67e2bfSSagi Grimberg /**
25514c67e2bfSSagi Grimberg  * ib_sg_to_pages() - Convert the largest prefix of a sg list
25524c67e2bfSSagi Grimberg  *     to a page vector
25534c67e2bfSSagi Grimberg  * @mr:            memory region
25544c67e2bfSSagi Grimberg  * @sgl:           dma mapped scatterlist
25554c67e2bfSSagi Grimberg  * @sg_nents:      number of entries in sg
25569aa8b321SBart Van Assche  * @sg_offset_p:   IN:  start offset in bytes into sg
25579aa8b321SBart Van Assche  *                 OUT: offset in bytes for element n of the sg of the first
25589aa8b321SBart Van Assche  *                      byte that has not been processed where n is the return
25599aa8b321SBart Van Assche  *                      value of this function.
25604c67e2bfSSagi Grimberg  * @set_page:      driver page assignment function pointer
25614c67e2bfSSagi Grimberg  *
25628f5ba10eSBart Van Assche  * Core service helper for drivers to convert the largest
25634c67e2bfSSagi Grimberg  * prefix of given sg list to a page vector. The sg list
25644c67e2bfSSagi Grimberg  * prefix converted is the prefix that meet the requirements
25654c67e2bfSSagi Grimberg  * of ib_map_mr_sg.
25664c67e2bfSSagi Grimberg  *
25674c67e2bfSSagi Grimberg  * Returns the number of sg elements that were assigned to
25684c67e2bfSSagi Grimberg  * a page vector.
25694c67e2bfSSagi Grimberg  */
2570ff2ba993SChristoph Hellwig int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
25719aa8b321SBart Van Assche 		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
25724c67e2bfSSagi Grimberg {
25734c67e2bfSSagi Grimberg 	struct scatterlist *sg;
2574b6aeb980SBart Van Assche 	u64 last_end_dma_addr = 0;
25759aa8b321SBart Van Assche 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
25764c67e2bfSSagi Grimberg 	unsigned int last_page_off = 0;
25774c67e2bfSSagi Grimberg 	u64 page_mask = ~((u64)mr->page_size - 1);
25788f5ba10eSBart Van Assche 	int i, ret;
25794c67e2bfSSagi Grimberg 
25809aa8b321SBart Van Assche 	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
25819aa8b321SBart Van Assche 		return -EINVAL;
25829aa8b321SBart Van Assche 
2583ff2ba993SChristoph Hellwig 	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
25844c67e2bfSSagi Grimberg 	mr->length = 0;
25854c67e2bfSSagi Grimberg 
25864c67e2bfSSagi Grimberg 	for_each_sg(sgl, sg, sg_nents, i) {
2587ff2ba993SChristoph Hellwig 		u64 dma_addr = sg_dma_address(sg) + sg_offset;
25889aa8b321SBart Van Assche 		u64 prev_addr = dma_addr;
2589ff2ba993SChristoph Hellwig 		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
25904c67e2bfSSagi Grimberg 		u64 end_dma_addr = dma_addr + dma_len;
25914c67e2bfSSagi Grimberg 		u64 page_addr = dma_addr & page_mask;
25924c67e2bfSSagi Grimberg 
25938f5ba10eSBart Van Assche 		/*
25948f5ba10eSBart Van Assche 		 * For the second and later elements, check whether either the
25958f5ba10eSBart Van Assche 		 * end of element i-1 or the start of element i is not aligned
25968f5ba10eSBart Van Assche 		 * on a page boundary.
25978f5ba10eSBart Van Assche 		 */
25988f5ba10eSBart Van Assche 		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
25998f5ba10eSBart Van Assche 			/* Stop mapping if there is a gap. */
26008f5ba10eSBart Van Assche 			if (last_end_dma_addr != dma_addr)
26018f5ba10eSBart Van Assche 				break;
26024c67e2bfSSagi Grimberg 
26038f5ba10eSBart Van Assche 			/*
26048f5ba10eSBart Van Assche 			 * Coalesce this element with the last. If it is small
26058f5ba10eSBart Van Assche 			 * enough just update mr->length. Otherwise start
26068f5ba10eSBart Van Assche 			 * mapping from the next page.
26078f5ba10eSBart Van Assche 			 */
26088f5ba10eSBart Van Assche 			goto next_page;
26094c67e2bfSSagi Grimberg 		}
26104c67e2bfSSagi Grimberg 
26114c67e2bfSSagi Grimberg 		do {
26128f5ba10eSBart Van Assche 			ret = set_page(mr, page_addr);
26139aa8b321SBart Van Assche 			if (unlikely(ret < 0)) {
26149aa8b321SBart Van Assche 				sg_offset = prev_addr - sg_dma_address(sg);
26159aa8b321SBart Van Assche 				mr->length += prev_addr - dma_addr;
26169aa8b321SBart Van Assche 				if (sg_offset_p)
26179aa8b321SBart Van Assche 					*sg_offset_p = sg_offset;
26189aa8b321SBart Van Assche 				return i || sg_offset ? i : ret;
26199aa8b321SBart Van Assche 			}
26209aa8b321SBart Van Assche 			prev_addr = page_addr;
26218f5ba10eSBart Van Assche next_page:
26224c67e2bfSSagi Grimberg 			page_addr += mr->page_size;
26234c67e2bfSSagi Grimberg 		} while (page_addr < end_dma_addr);
26244c67e2bfSSagi Grimberg 
26254c67e2bfSSagi Grimberg 		mr->length += dma_len;
26264c67e2bfSSagi Grimberg 		last_end_dma_addr = end_dma_addr;
26274c67e2bfSSagi Grimberg 		last_page_off = end_dma_addr & ~page_mask;
2628ff2ba993SChristoph Hellwig 
2629ff2ba993SChristoph Hellwig 		sg_offset = 0;
26304c67e2bfSSagi Grimberg 	}
26314c67e2bfSSagi Grimberg 
26329aa8b321SBart Van Assche 	if (sg_offset_p)
26339aa8b321SBart Van Assche 		*sg_offset_p = 0;
26344c67e2bfSSagi Grimberg 	return i;
26354c67e2bfSSagi Grimberg }
26364c67e2bfSSagi Grimberg EXPORT_SYMBOL(ib_sg_to_pages);
2637765d6774SSteve Wise 
2638765d6774SSteve Wise struct ib_drain_cqe {
2639765d6774SSteve Wise 	struct ib_cqe cqe;
2640765d6774SSteve Wise 	struct completion done;
2641765d6774SSteve Wise };
2642765d6774SSteve Wise 
2643765d6774SSteve Wise static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2644765d6774SSteve Wise {
2645765d6774SSteve Wise 	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2646765d6774SSteve Wise 						cqe);
2647765d6774SSteve Wise 
2648765d6774SSteve Wise 	complete(&cqe->done);
2649765d6774SSteve Wise }
2650765d6774SSteve Wise 
2651765d6774SSteve Wise /*
2652765d6774SSteve Wise  * Post a WR and block until its completion is reaped for the SQ.
2653765d6774SSteve Wise  */
2654765d6774SSteve Wise static void __ib_drain_sq(struct ib_qp *qp)
2655765d6774SSteve Wise {
2656f039f44fSBart Van Assche 	struct ib_cq *cq = qp->send_cq;
2657765d6774SSteve Wise 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2658765d6774SSteve Wise 	struct ib_drain_cqe sdrain;
2659a1ae7d03SBart Van Assche 	struct ib_rdma_wr swr = {
2660a1ae7d03SBart Van Assche 		.wr = {
26616ee68773SAndrew Morton 			.next = NULL,
26626ee68773SAndrew Morton 			{ .wr_cqe	= &sdrain.cqe, },
2663a1ae7d03SBart Van Assche 			.opcode	= IB_WR_RDMA_WRITE,
2664a1ae7d03SBart Van Assche 		},
2665a1ae7d03SBart Van Assche 	};
2666765d6774SSteve Wise 	int ret;
2667765d6774SSteve Wise 
2668765d6774SSteve Wise 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2669765d6774SSteve Wise 	if (ret) {
2670765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2671765d6774SSteve Wise 		return;
2672765d6774SSteve Wise 	}
2673765d6774SSteve Wise 
2674aaebd377SMax Gurtovoy 	sdrain.cqe.done = ib_drain_qp_done;
2675aaebd377SMax Gurtovoy 	init_completion(&sdrain.done);
2676aaebd377SMax Gurtovoy 
26771fec77bfSBart Van Assche 	ret = ib_post_send(qp, &swr.wr, NULL);
2678765d6774SSteve Wise 	if (ret) {
2679765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2680765d6774SSteve Wise 		return;
2681765d6774SSteve Wise 	}
2682765d6774SSteve Wise 
2683f039f44fSBart Van Assche 	if (cq->poll_ctx == IB_POLL_DIRECT)
2684f039f44fSBart Van Assche 		while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2685f039f44fSBart Van Assche 			ib_process_cq_direct(cq, -1);
2686f039f44fSBart Van Assche 	else
2687765d6774SSteve Wise 		wait_for_completion(&sdrain.done);
2688765d6774SSteve Wise }
2689765d6774SSteve Wise 
2690765d6774SSteve Wise /*
2691765d6774SSteve Wise  * Post a WR and block until its completion is reaped for the RQ.
2692765d6774SSteve Wise  */
2693765d6774SSteve Wise static void __ib_drain_rq(struct ib_qp *qp)
2694765d6774SSteve Wise {
2695f039f44fSBart Van Assche 	struct ib_cq *cq = qp->recv_cq;
2696765d6774SSteve Wise 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2697765d6774SSteve Wise 	struct ib_drain_cqe rdrain;
26981fec77bfSBart Van Assche 	struct ib_recv_wr rwr = {};
2699765d6774SSteve Wise 	int ret;
2700765d6774SSteve Wise 
2701765d6774SSteve Wise 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2702765d6774SSteve Wise 	if (ret) {
2703765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2704765d6774SSteve Wise 		return;
2705765d6774SSteve Wise 	}
2706765d6774SSteve Wise 
2707aaebd377SMax Gurtovoy 	rwr.wr_cqe = &rdrain.cqe;
2708aaebd377SMax Gurtovoy 	rdrain.cqe.done = ib_drain_qp_done;
2709aaebd377SMax Gurtovoy 	init_completion(&rdrain.done);
2710aaebd377SMax Gurtovoy 
27111fec77bfSBart Van Assche 	ret = ib_post_recv(qp, &rwr, NULL);
2712765d6774SSteve Wise 	if (ret) {
2713765d6774SSteve Wise 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2714765d6774SSteve Wise 		return;
2715765d6774SSteve Wise 	}
2716765d6774SSteve Wise 
2717f039f44fSBart Van Assche 	if (cq->poll_ctx == IB_POLL_DIRECT)
2718f039f44fSBart Van Assche 		while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2719f039f44fSBart Van Assche 			ib_process_cq_direct(cq, -1);
2720f039f44fSBart Van Assche 	else
2721765d6774SSteve Wise 		wait_for_completion(&rdrain.done);
2722765d6774SSteve Wise }
2723765d6774SSteve Wise 
2724765d6774SSteve Wise /**
2725765d6774SSteve Wise  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2726765d6774SSteve Wise  *		   application.
2727765d6774SSteve Wise  * @qp:            queue pair to drain
2728765d6774SSteve Wise  *
2729765d6774SSteve Wise  * If the device has a provider-specific drain function, then
2730765d6774SSteve Wise  * call that.  Otherwise call the generic drain function
2731765d6774SSteve Wise  * __ib_drain_sq().
2732765d6774SSteve Wise  *
2733765d6774SSteve Wise  * The caller must:
2734765d6774SSteve Wise  *
2735765d6774SSteve Wise  * ensure there is room in the CQ and SQ for the drain work request and
2736765d6774SSteve Wise  * completion.
2737765d6774SSteve Wise  *
2738f039f44fSBart Van Assche  * allocate the CQ using ib_alloc_cq().
2739765d6774SSteve Wise  *
2740765d6774SSteve Wise  * ensure that there are no other contexts that are posting WRs concurrently.
2741765d6774SSteve Wise  * Otherwise the drain is not guaranteed.
2742765d6774SSteve Wise  */
2743765d6774SSteve Wise void ib_drain_sq(struct ib_qp *qp)
2744765d6774SSteve Wise {
27453023a1e9SKamal Heib 	if (qp->device->ops.drain_sq)
27463023a1e9SKamal Heib 		qp->device->ops.drain_sq(qp);
2747765d6774SSteve Wise 	else
2748765d6774SSteve Wise 		__ib_drain_sq(qp);
27493e5901cbSChuck Lever 	trace_cq_drain_complete(qp->send_cq);
2750765d6774SSteve Wise }
2751765d6774SSteve Wise EXPORT_SYMBOL(ib_drain_sq);
2752765d6774SSteve Wise 
2753765d6774SSteve Wise /**
2754765d6774SSteve Wise  * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2755765d6774SSteve Wise  *		   application.
2756765d6774SSteve Wise  * @qp:            queue pair to drain
2757765d6774SSteve Wise  *
2758765d6774SSteve Wise  * If the device has a provider-specific drain function, then
2759765d6774SSteve Wise  * call that.  Otherwise call the generic drain function
2760765d6774SSteve Wise  * __ib_drain_rq().
2761765d6774SSteve Wise  *
2762765d6774SSteve Wise  * The caller must:
2763765d6774SSteve Wise  *
2764765d6774SSteve Wise  * ensure there is room in the CQ and RQ for the drain work request and
2765765d6774SSteve Wise  * completion.
2766765d6774SSteve Wise  *
2767f039f44fSBart Van Assche  * allocate the CQ using ib_alloc_cq().
2768765d6774SSteve Wise  *
2769765d6774SSteve Wise  * ensure that there are no other contexts that are posting WRs concurrently.
2770765d6774SSteve Wise  * Otherwise the drain is not guaranteed.
2771765d6774SSteve Wise  */
2772765d6774SSteve Wise void ib_drain_rq(struct ib_qp *qp)
2773765d6774SSteve Wise {
27743023a1e9SKamal Heib 	if (qp->device->ops.drain_rq)
27753023a1e9SKamal Heib 		qp->device->ops.drain_rq(qp);
2776765d6774SSteve Wise 	else
2777765d6774SSteve Wise 		__ib_drain_rq(qp);
27783e5901cbSChuck Lever 	trace_cq_drain_complete(qp->recv_cq);
2779765d6774SSteve Wise }
2780765d6774SSteve Wise EXPORT_SYMBOL(ib_drain_rq);
2781765d6774SSteve Wise 
2782765d6774SSteve Wise /**
2783765d6774SSteve Wise  * ib_drain_qp() - Block until all CQEs have been consumed by the
2784765d6774SSteve Wise  *		   application on both the RQ and SQ.
2785765d6774SSteve Wise  * @qp:            queue pair to drain
2786765d6774SSteve Wise  *
2787765d6774SSteve Wise  * The caller must:
2788765d6774SSteve Wise  *
2789765d6774SSteve Wise  * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2790765d6774SSteve Wise  * and completions.
2791765d6774SSteve Wise  *
2792f039f44fSBart Van Assche  * allocate the CQs using ib_alloc_cq().
2793765d6774SSteve Wise  *
2794765d6774SSteve Wise  * ensure that there are no other contexts that are posting WRs concurrently.
2795765d6774SSteve Wise  * Otherwise the drain is not guaranteed.
2796765d6774SSteve Wise  */
2797765d6774SSteve Wise void ib_drain_qp(struct ib_qp *qp)
2798765d6774SSteve Wise {
2799765d6774SSteve Wise 	ib_drain_sq(qp);
280042235f80SSagi Grimberg 	if (!qp->srq)
2801765d6774SSteve Wise 		ib_drain_rq(qp);
2802765d6774SSteve Wise }
2803765d6774SSteve Wise EXPORT_SYMBOL(ib_drain_qp);
2804f6a8a19bSDenis Drozdov 
2805f6a8a19bSDenis Drozdov struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2806f6a8a19bSDenis Drozdov 				     enum rdma_netdev_t type, const char *name,
2807f6a8a19bSDenis Drozdov 				     unsigned char name_assign_type,
2808f6a8a19bSDenis Drozdov 				     void (*setup)(struct net_device *))
2809f6a8a19bSDenis Drozdov {
2810f6a8a19bSDenis Drozdov 	struct rdma_netdev_alloc_params params;
2811f6a8a19bSDenis Drozdov 	struct net_device *netdev;
2812f6a8a19bSDenis Drozdov 	int rc;
2813f6a8a19bSDenis Drozdov 
28143023a1e9SKamal Heib 	if (!device->ops.rdma_netdev_get_params)
2815f6a8a19bSDenis Drozdov 		return ERR_PTR(-EOPNOTSUPP);
2816f6a8a19bSDenis Drozdov 
28173023a1e9SKamal Heib 	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
28183023a1e9SKamal Heib 						&params);
2819f6a8a19bSDenis Drozdov 	if (rc)
2820f6a8a19bSDenis Drozdov 		return ERR_PTR(rc);
2821f6a8a19bSDenis Drozdov 
2822f6a8a19bSDenis Drozdov 	netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2823f6a8a19bSDenis Drozdov 				  setup, params.txqs, params.rxqs);
2824f6a8a19bSDenis Drozdov 	if (!netdev)
2825f6a8a19bSDenis Drozdov 		return ERR_PTR(-ENOMEM);
2826f6a8a19bSDenis Drozdov 
2827f6a8a19bSDenis Drozdov 	return netdev;
2828f6a8a19bSDenis Drozdov }
2829f6a8a19bSDenis Drozdov EXPORT_SYMBOL(rdma_alloc_netdev);
28305d6b0cb3SDenis Drozdov 
28315d6b0cb3SDenis Drozdov int rdma_init_netdev(struct ib_device *device, u8 port_num,
28325d6b0cb3SDenis Drozdov 		     enum rdma_netdev_t type, const char *name,
28335d6b0cb3SDenis Drozdov 		     unsigned char name_assign_type,
28345d6b0cb3SDenis Drozdov 		     void (*setup)(struct net_device *),
28355d6b0cb3SDenis Drozdov 		     struct net_device *netdev)
28365d6b0cb3SDenis Drozdov {
28375d6b0cb3SDenis Drozdov 	struct rdma_netdev_alloc_params params;
28385d6b0cb3SDenis Drozdov 	int rc;
28395d6b0cb3SDenis Drozdov 
28403023a1e9SKamal Heib 	if (!device->ops.rdma_netdev_get_params)
28415d6b0cb3SDenis Drozdov 		return -EOPNOTSUPP;
28425d6b0cb3SDenis Drozdov 
28433023a1e9SKamal Heib 	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
28443023a1e9SKamal Heib 						&params);
28455d6b0cb3SDenis Drozdov 	if (rc)
28465d6b0cb3SDenis Drozdov 		return rc;
28475d6b0cb3SDenis Drozdov 
28485d6b0cb3SDenis Drozdov 	return params.initialize_rdma_netdev(device, port_num,
28495d6b0cb3SDenis Drozdov 					     netdev, params.param);
28505d6b0cb3SDenis Drozdov }
28515d6b0cb3SDenis Drozdov EXPORT_SYMBOL(rdma_init_netdev);
2852a808273aSShiraz Saleem 
2853a808273aSShiraz Saleem void __rdma_block_iter_start(struct ib_block_iter *biter,
2854a808273aSShiraz Saleem 			     struct scatterlist *sglist, unsigned int nents,
2855a808273aSShiraz Saleem 			     unsigned long pgsz)
2856a808273aSShiraz Saleem {
2857a808273aSShiraz Saleem 	memset(biter, 0, sizeof(struct ib_block_iter));
2858a808273aSShiraz Saleem 	biter->__sg = sglist;
2859a808273aSShiraz Saleem 	biter->__sg_nents = nents;
2860a808273aSShiraz Saleem 
2861a808273aSShiraz Saleem 	/* Driver provides best block size to use */
2862a808273aSShiraz Saleem 	biter->__pg_bit = __fls(pgsz);
2863a808273aSShiraz Saleem }
2864a808273aSShiraz Saleem EXPORT_SYMBOL(__rdma_block_iter_start);
2865a808273aSShiraz Saleem 
2866a808273aSShiraz Saleem bool __rdma_block_iter_next(struct ib_block_iter *biter)
2867a808273aSShiraz Saleem {
2868a808273aSShiraz Saleem 	unsigned int block_offset;
2869a808273aSShiraz Saleem 
2870a808273aSShiraz Saleem 	if (!biter->__sg_nents || !biter->__sg)
2871a808273aSShiraz Saleem 		return false;
2872a808273aSShiraz Saleem 
2873a808273aSShiraz Saleem 	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2874a808273aSShiraz Saleem 	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2875a808273aSShiraz Saleem 	biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2876a808273aSShiraz Saleem 
2877a808273aSShiraz Saleem 	if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2878a808273aSShiraz Saleem 		biter->__sg_advance = 0;
2879a808273aSShiraz Saleem 		biter->__sg = sg_next(biter->__sg);
2880a808273aSShiraz Saleem 		biter->__sg_nents--;
2881a808273aSShiraz Saleem 	}
2882a808273aSShiraz Saleem 
2883a808273aSShiraz Saleem 	return true;
2884a808273aSShiraz Saleem }
2885a808273aSShiraz Saleem EXPORT_SYMBOL(__rdma_block_iter_next);
2886