xref: /openbmc/linux/include/rdma/ib_verbs.h (revision 0f9b4c3ca5fdf3e177266ef994071b1a03f07318)
16bf9d8f6SLeon Romanovsky /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2a4d61e84SRoland Dreier /*
3a4d61e84SRoland Dreier  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
4a4d61e84SRoland Dreier  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
53bc489e8SJianxin Xiong  * Copyright (c) 2004, 2020 Intel Corporation.  All rights reserved.
6a4d61e84SRoland Dreier  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
7a4d61e84SRoland Dreier  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
8a4d61e84SRoland Dreier  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9f7c6a7b5SRoland Dreier  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
10a4d61e84SRoland Dreier  */
11a4d61e84SRoland Dreier 
126bf9d8f6SLeon Romanovsky #ifndef IB_VERBS_H
13a4d61e84SRoland Dreier #define IB_VERBS_H
14a4d61e84SRoland Dreier 
15cc69837fSJakub Kicinski #include <linux/ethtool.h>
16a4d61e84SRoland Dreier #include <linux/types.h>
17a4d61e84SRoland Dreier #include <linux/device.h>
189b513090SRalph Campbell #include <linux/dma-mapping.h>
19459d6e2aSMichael S. Tsirkin #include <linux/kref.h>
20bfb3ea12SDotan Barak #include <linux/list.h>
21bfb3ea12SDotan Barak #include <linux/rwsem.h>
22f0626710STejun Heo #include <linux/workqueue.h>
2314d3a3b2SChristoph Hellwig #include <linux/irq_poll.h>
24dd5f03beSMatan Barak #include <uapi/linux/if_ether.h>
25c865f246SSomnath Kotur #include <net/ipv6.h>
26c865f246SSomnath Kotur #include <net/ip.h>
27301a721eSMatan Barak #include <linux/string.h>
28301a721eSMatan Barak #include <linux/slab.h>
292fc77572SVishwanathapura, Niranjana #include <linux/netdevice.h>
3001b67117SParav Pandit #include <linux/refcount.h>
3150174a7fSEli Cohen #include <linux/if_link.h>
3260063497SArun Sharma #include <linux/atomic.h>
33882214e2SHaggai Eran #include <linux/mmu_notifier.h>
347c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
3543579b5fSParav Pandit #include <linux/cgroup_rdma.h>
36f6316032SLeon Romanovsky #include <linux/irqflags.h>
37f6316032SLeon Romanovsky #include <linux/preempt.h>
38da662979SYamin Friedman #include <linux/dim.h>
39ea6819e1SNicolas Dichtel #include <uapi/rdma/ib_user_verbs.h>
40413d3347SMark Zhang #include <rdma/rdma_counter.h>
4102d8883fSLeon Romanovsky #include <rdma/restrack.h>
4236b1e47fSMax Gurtovoy #include <rdma/signature.h>
430ede73bcSMatan Barak #include <uapi/rdma/rdma_user_ioctl.h>
442eb9beaeSMatan Barak #include <uapi/rdma/ib_user_ioctl_verbs.h>
45a4d61e84SRoland Dreier 
469abb0d1bSLeon Romanovsky #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
479abb0d1bSLeon Romanovsky 
48b5231b01SJason Gunthorpe struct ib_umem_odp;
49620d3f81SJason Gunthorpe struct ib_uqp_object;
509fbe334cSJason Gunthorpe struct ib_usrq_object;
51e04dd131SJason Gunthorpe struct ib_uwq_object;
52211cd945SMaor Gottlieb struct rdma_cm_id;
53d8a58838SJason Gunthorpe struct ib_port;
54467f432aSJason Gunthorpe struct hw_stats_device_data;
55b5231b01SJason Gunthorpe 
56f0626710STejun Heo extern struct workqueue_struct *ib_wq;
5714d3a3b2SChristoph Hellwig extern struct workqueue_struct *ib_comp_wq;
58f794809aSJack Morgenstein extern struct workqueue_struct *ib_comp_unbound_wq;
59f0626710STejun Heo 
605bd48c18SJason Gunthorpe struct ib_ucq_object;
615bd48c18SJason Gunthorpe 
62923abb9dSGal Pressman __printf(3, 4) __cold
63923abb9dSGal Pressman void ibdev_printk(const char *level, const struct ib_device *ibdev,
64923abb9dSGal Pressman 		  const char *format, ...);
65923abb9dSGal Pressman __printf(2, 3) __cold
66923abb9dSGal Pressman void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
67923abb9dSGal Pressman __printf(2, 3) __cold
68923abb9dSGal Pressman void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
69923abb9dSGal Pressman __printf(2, 3) __cold
70923abb9dSGal Pressman void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
71923abb9dSGal Pressman __printf(2, 3) __cold
72923abb9dSGal Pressman void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
73923abb9dSGal Pressman __printf(2, 3) __cold
74923abb9dSGal Pressman void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
75923abb9dSGal Pressman __printf(2, 3) __cold
76923abb9dSGal Pressman void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
77923abb9dSGal Pressman __printf(2, 3) __cold
78923abb9dSGal Pressman void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
79923abb9dSGal Pressman 
80ceabef7dSOrson Zhai #if defined(CONFIG_DYNAMIC_DEBUG) || \
81ceabef7dSOrson Zhai 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
82923abb9dSGal Pressman #define ibdev_dbg(__dev, format, args...)                       \
83923abb9dSGal Pressman 	dynamic_ibdev_dbg(__dev, format, ##args)
84923abb9dSGal Pressman #else
85923abb9dSGal Pressman __printf(2, 3) __cold
86923abb9dSGal Pressman static inline
ibdev_dbg(const struct ib_device * ibdev,const char * format,...)87923abb9dSGal Pressman void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
88923abb9dSGal Pressman #endif
89923abb9dSGal Pressman 
9005bb411aSGal Pressman #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
9105bb411aSGal Pressman do {                                                                    \
9205bb411aSGal Pressman 	static DEFINE_RATELIMIT_STATE(_rs,                              \
9305bb411aSGal Pressman 				      DEFAULT_RATELIMIT_INTERVAL,       \
9405bb411aSGal Pressman 				      DEFAULT_RATELIMIT_BURST);         \
9505bb411aSGal Pressman 	if (__ratelimit(&_rs))                                          \
9605bb411aSGal Pressman 		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
9705bb411aSGal Pressman } while (0)
9805bb411aSGal Pressman 
9905bb411aSGal Pressman #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
10005bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
10105bb411aSGal Pressman #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
10205bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
10305bb411aSGal Pressman #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
10405bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
10505bb411aSGal Pressman #define ibdev_err_ratelimited(ibdev, fmt, ...) \
10605bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
10705bb411aSGal Pressman #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
10805bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
10905bb411aSGal Pressman #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
11005bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
11105bb411aSGal Pressman #define ibdev_info_ratelimited(ibdev, fmt, ...) \
11205bb411aSGal Pressman 	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
11305bb411aSGal Pressman 
114ceabef7dSOrson Zhai #if defined(CONFIG_DYNAMIC_DEBUG) || \
115ceabef7dSOrson Zhai 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
11605bb411aSGal Pressman /* descriptor check is first to prevent flooding with "callbacks suppressed" */
11705bb411aSGal Pressman #define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
11805bb411aSGal Pressman do {                                                                    \
11905bb411aSGal Pressman 	static DEFINE_RATELIMIT_STATE(_rs,                              \
12005bb411aSGal Pressman 				      DEFAULT_RATELIMIT_INTERVAL,       \
12105bb411aSGal Pressman 				      DEFAULT_RATELIMIT_BURST);         \
12205bb411aSGal Pressman 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
12305bb411aSGal Pressman 	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
12405bb411aSGal Pressman 		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
12505bb411aSGal Pressman 				    ##__VA_ARGS__);                     \
12605bb411aSGal Pressman } while (0)
12705bb411aSGal Pressman #else
12805bb411aSGal Pressman __printf(2, 3) __cold
12905bb411aSGal Pressman static inline
ibdev_dbg_ratelimited(const struct ib_device * ibdev,const char * format,...)13005bb411aSGal Pressman void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
13105bb411aSGal Pressman #endif
13205bb411aSGal Pressman 
133a4d61e84SRoland Dreier union ib_gid {
134a4d61e84SRoland Dreier 	u8	raw[16];
135a4d61e84SRoland Dreier 	struct {
136a4d61e84SRoland Dreier 		__be64	subnet_prefix;
137a4d61e84SRoland Dreier 		__be64	interface_id;
138a4d61e84SRoland Dreier 	} global;
139a4d61e84SRoland Dreier };
140a4d61e84SRoland Dreier 
141e26be1bfSMoni Shoua extern union ib_gid zgid;
142e26be1bfSMoni Shoua 
143b39ffa1dSMatan Barak enum ib_gid_type {
1449f85cbe5SAvihai Horon 	IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
1459f85cbe5SAvihai Horon 	IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
1469f85cbe5SAvihai Horon 	IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
147b39ffa1dSMatan Barak 	IB_GID_TYPE_SIZE
148b39ffa1dSMatan Barak };
149b39ffa1dSMatan Barak 
1507ead4bcbSMoni Shoua #define ROCE_V2_UDP_DPORT      4791
15103db3a2dSMatan Barak struct ib_gid_attr {
152943bd984SParav Pandit 	struct net_device __rcu	*ndev;
153598ff6baSParav Pandit 	struct ib_device	*device;
154b150c386SParav Pandit 	union ib_gid		gid;
155598ff6baSParav Pandit 	enum ib_gid_type	gid_type;
156598ff6baSParav Pandit 	u16			index;
1571fb7f897SMark Bloch 	u32			port_num;
15803db3a2dSMatan Barak };
15903db3a2dSMatan Barak 
160a0c1b2a3SEli Cohen enum {
161a0c1b2a3SEli Cohen 	/* set the local administered indication */
162a0c1b2a3SEli Cohen 	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
163a0c1b2a3SEli Cohen };
164a0c1b2a3SEli Cohen 
16507ebafbaSTom Tucker enum rdma_transport_type {
16607ebafbaSTom Tucker 	RDMA_TRANSPORT_IB,
167180771a3SUpinder Malhi \(umalhi\) 	RDMA_TRANSPORT_IWARP,
168248567f7SUpinder Malhi 	RDMA_TRANSPORT_USNIC,
169f95be3d2SGal Pressman 	RDMA_TRANSPORT_USNIC_UDP,
170f95be3d2SGal Pressman 	RDMA_TRANSPORT_UNSPECIFIED,
17107ebafbaSTom Tucker };
17207ebafbaSTom Tucker 
1736b90a6d6SMichael Wang enum rdma_protocol_type {
1746b90a6d6SMichael Wang 	RDMA_PROTOCOL_IB,
1756b90a6d6SMichael Wang 	RDMA_PROTOCOL_IBOE,
1766b90a6d6SMichael Wang 	RDMA_PROTOCOL_IWARP,
1776b90a6d6SMichael Wang 	RDMA_PROTOCOL_USNIC_UDP
1786b90a6d6SMichael Wang };
1796b90a6d6SMichael Wang 
1808385fd84SRoland Dreier __attribute_const__ enum rdma_transport_type
1815d60c111SJason Gunthorpe rdma_node_get_transport(unsigned int node_type);
18207ebafbaSTom Tucker 
183c865f246SSomnath Kotur enum rdma_network_type {
184c865f246SSomnath Kotur 	RDMA_NETWORK_IB,
1851c15b4f2SAvihai Horon 	RDMA_NETWORK_ROCE_V1,
186c865f246SSomnath Kotur 	RDMA_NETWORK_IPV4,
187c865f246SSomnath Kotur 	RDMA_NETWORK_IPV6
188c865f246SSomnath Kotur };
189c865f246SSomnath Kotur 
ib_network_to_gid_type(enum rdma_network_type network_type)190c865f246SSomnath Kotur static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
191c865f246SSomnath Kotur {
192c865f246SSomnath Kotur 	if (network_type == RDMA_NETWORK_IPV4 ||
193c865f246SSomnath Kotur 	    network_type == RDMA_NETWORK_IPV6)
194c865f246SSomnath Kotur 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
1951c15b4f2SAvihai Horon 	else if (network_type == RDMA_NETWORK_ROCE_V1)
1961c15b4f2SAvihai Horon 		return IB_GID_TYPE_ROCE;
1971c15b4f2SAvihai Horon 	else
198c865f246SSomnath Kotur 		return IB_GID_TYPE_IB;
199c865f246SSomnath Kotur }
200c865f246SSomnath Kotur 
20147ec3866SParav Pandit static inline enum rdma_network_type
rdma_gid_attr_network_type(const struct ib_gid_attr * attr)20247ec3866SParav Pandit rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
203c865f246SSomnath Kotur {
20447ec3866SParav Pandit 	if (attr->gid_type == IB_GID_TYPE_IB)
205c865f246SSomnath Kotur 		return RDMA_NETWORK_IB;
206c865f246SSomnath Kotur 
2071c15b4f2SAvihai Horon 	if (attr->gid_type == IB_GID_TYPE_ROCE)
2081c15b4f2SAvihai Horon 		return RDMA_NETWORK_ROCE_V1;
2091c15b4f2SAvihai Horon 
21047ec3866SParav Pandit 	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
211c865f246SSomnath Kotur 		return RDMA_NETWORK_IPV4;
212c865f246SSomnath Kotur 	else
213c865f246SSomnath Kotur 		return RDMA_NETWORK_IPV6;
214c865f246SSomnath Kotur }
215c865f246SSomnath Kotur 
216a3f5adafSEli Cohen enum rdma_link_layer {
217a3f5adafSEli Cohen 	IB_LINK_LAYER_UNSPECIFIED,
218a3f5adafSEli Cohen 	IB_LINK_LAYER_INFINIBAND,
219a3f5adafSEli Cohen 	IB_LINK_LAYER_ETHERNET,
220a3f5adafSEli Cohen };
221a3f5adafSEli Cohen 
222a4d61e84SRoland Dreier enum ib_device_cap_flags {
223f543a3e8SXiao Yang 	IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
224f543a3e8SXiao Yang 	IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
225f543a3e8SXiao Yang 	IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
226f543a3e8SXiao Yang 	IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
227f543a3e8SXiao Yang 	IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
228f543a3e8SXiao Yang 	IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
229f543a3e8SXiao Yang 	IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
230f543a3e8SXiao Yang 	IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
231f543a3e8SXiao Yang 	IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
232f543a3e8SXiao Yang 	/* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
233f543a3e8SXiao Yang 	IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
234f543a3e8SXiao Yang 	IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
235f543a3e8SXiao Yang 	IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
236f543a3e8SXiao Yang 	IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
237f543a3e8SXiao Yang 	IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
238b1adc714SChristoph Hellwig 
239f543a3e8SXiao Yang 	/* Reserved, old SEND_W_INV = 1 << 16,*/
240f543a3e8SXiao Yang 	IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
241e0605d91SEli Cohen 	/*
242e0605d91SEli Cohen 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
243e0605d91SEli Cohen 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
244e0605d91SEli Cohen 	 * messages and can verify the validity of checksum for
245e0605d91SEli Cohen 	 * incoming messages.  Setting this flag implies that the
246e0605d91SEli Cohen 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
247e0605d91SEli Cohen 	 */
248f543a3e8SXiao Yang 	IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
249f543a3e8SXiao Yang 	IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
250b1adc714SChristoph Hellwig 
251b1adc714SChristoph Hellwig 	/*
252b1adc714SChristoph Hellwig 	 * This device supports the IB "base memory management extension",
253b1adc714SChristoph Hellwig 	 * which includes support for fast registrations (IB_WR_REG_MR,
254b1adc714SChristoph Hellwig 	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
255b1adc714SChristoph Hellwig 	 * also be set by any iWarp device which must support FRs to comply
256b1adc714SChristoph Hellwig 	 * to the iWarp verbs spec.  iWarp devices also support the
257b1adc714SChristoph Hellwig 	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
258b1adc714SChristoph Hellwig 	 * stag.
259b1adc714SChristoph Hellwig 	 */
260f543a3e8SXiao Yang 	IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
261f543a3e8SXiao Yang 	IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
262f543a3e8SXiao Yang 	IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
263f543a3e8SXiao Yang 	IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
264ebaaee25SNoa Osherovich 	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
265f543a3e8SXiao Yang 	IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
266f543a3e8SXiao Yang 	IB_DEVICE_MANAGED_FLOW_STEERING =
267f543a3e8SXiao Yang 		IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
268ebaaee25SNoa Osherovich 	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
269f543a3e8SXiao Yang 	IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
270e1d2e887SNoa Osherovich 	/* The device supports padding incoming writes to cacheline. */
271f543a3e8SXiao Yang 	IB_DEVICE_PCI_WRITE_END_PADDING =
272f543a3e8SXiao Yang 		IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
273208e3a13SLi Zhijian 	/* Placement type attributes */
274208e3a13SLi Zhijian 	IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
275208e3a13SLi Zhijian 	IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
2763ff81e82SXiao Yang 	IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
2771b01d335SSagi Grimberg };
2781b01d335SSagi Grimberg 
279e945c653SJason Gunthorpe enum ib_kernel_cap_flags {
280e945c653SJason Gunthorpe 	/*
281e945c653SJason Gunthorpe 	 * This device supports a per-device lkey or stag that can be
282e945c653SJason Gunthorpe 	 * used without performing a memory registration for the local
283e945c653SJason Gunthorpe 	 * memory.  Note that ULPs should never check this flag, but
284e945c653SJason Gunthorpe 	 * instead of use the local_dma_lkey flag in the ib_pd structure,
285e945c653SJason Gunthorpe 	 * which will always contain a usable lkey.
286e945c653SJason Gunthorpe 	 */
287e945c653SJason Gunthorpe 	IBK_LOCAL_DMA_LKEY = 1 << 0,
288e945c653SJason Gunthorpe 	/* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
289e945c653SJason Gunthorpe 	IBK_INTEGRITY_HANDOVER = 1 << 1,
290e945c653SJason Gunthorpe 	/* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
291e945c653SJason Gunthorpe 	IBK_ON_DEMAND_PAGING = 1 << 2,
292e945c653SJason Gunthorpe 	/* IB_MR_TYPE_SG_GAPS is supported */
293e945c653SJason Gunthorpe 	IBK_SG_GAPS_REG = 1 << 3,
294e945c653SJason Gunthorpe 	/* Driver supports RDMA_NLDEV_CMD_DELLINK */
295e945c653SJason Gunthorpe 	IBK_ALLOW_USER_UNREG = 1 << 4,
296e945c653SJason Gunthorpe 
297e945c653SJason Gunthorpe 	/* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
298e945c653SJason Gunthorpe 	IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
299e945c653SJason Gunthorpe 	/* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
300e945c653SJason Gunthorpe 	IBK_UD_TSO = 1 << 6,
301e945c653SJason Gunthorpe 	/* iopib will use the device ops:
302e945c653SJason Gunthorpe 	 *   get_vf_config
303e945c653SJason Gunthorpe 	 *   get_vf_guid
304e945c653SJason Gunthorpe 	 *   get_vf_stats
305e945c653SJason Gunthorpe 	 *   set_vf_guid
306e945c653SJason Gunthorpe 	 *   set_vf_link_state
307e945c653SJason Gunthorpe 	 */
308e945c653SJason Gunthorpe 	IBK_VIRTUAL_FUNCTION = 1 << 7,
309e945c653SJason Gunthorpe 	/* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
310e945c653SJason Gunthorpe 	IBK_RDMA_NETDEV_OPA = 1 << 8,
311e945c653SJason Gunthorpe };
312f543a3e8SXiao Yang 
313a4d61e84SRoland Dreier enum ib_atomic_cap {
314a4d61e84SRoland Dreier 	IB_ATOMIC_NONE,
315a4d61e84SRoland Dreier 	IB_ATOMIC_HCA,
316a4d61e84SRoland Dreier 	IB_ATOMIC_GLOB
317a4d61e84SRoland Dreier };
318a4d61e84SRoland Dreier 
319860f10a7SSagi Grimberg enum ib_odp_general_cap_bits {
320860f10a7SSagi Grimberg 	IB_ODP_SUPPORT		= 1 << 0,
32125bf14d6SArtemy Kovalyov 	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
322860f10a7SSagi Grimberg };
323860f10a7SSagi Grimberg 
324860f10a7SSagi Grimberg enum ib_odp_transport_cap_bits {
325860f10a7SSagi Grimberg 	IB_ODP_SUPPORT_SEND	= 1 << 0,
326860f10a7SSagi Grimberg 	IB_ODP_SUPPORT_RECV	= 1 << 1,
327860f10a7SSagi Grimberg 	IB_ODP_SUPPORT_WRITE	= 1 << 2,
328860f10a7SSagi Grimberg 	IB_ODP_SUPPORT_READ	= 1 << 3,
329860f10a7SSagi Grimberg 	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
330da823342SMoni Shoua 	IB_ODP_SUPPORT_SRQ_RECV	= 1 << 5,
331860f10a7SSagi Grimberg };
332860f10a7SSagi Grimberg 
333860f10a7SSagi Grimberg struct ib_odp_caps {
334860f10a7SSagi Grimberg 	uint64_t general_caps;
335860f10a7SSagi Grimberg 	struct {
336860f10a7SSagi Grimberg 		uint32_t  rc_odp_caps;
337860f10a7SSagi Grimberg 		uint32_t  uc_odp_caps;
338860f10a7SSagi Grimberg 		uint32_t  ud_odp_caps;
33952a72e2aSMoni Shoua 		uint32_t  xrc_odp_caps;
340860f10a7SSagi Grimberg 	} per_transport_caps;
341860f10a7SSagi Grimberg };
342860f10a7SSagi Grimberg 
343ccf20562SYishai Hadas struct ib_rss_caps {
344ccf20562SYishai Hadas 	/* Corresponding bit will be set if qp type from
345ccf20562SYishai Hadas 	 * 'enum ib_qp_type' is supported, e.g.
346ccf20562SYishai Hadas 	 * supported_qpts |= 1 << IB_QPT_UD
347ccf20562SYishai Hadas 	 */
348ccf20562SYishai Hadas 	u32 supported_qpts;
349ccf20562SYishai Hadas 	u32 max_rwq_indirection_tables;
350ccf20562SYishai Hadas 	u32 max_rwq_indirection_table_size;
351ccf20562SYishai Hadas };
352ccf20562SYishai Hadas 
3536938fc1eSArtemy Kovalyov enum ib_tm_cap_flags {
35489705e92SDanit Goldberg 	/*  Support tag matching with rendezvous offload for RC transport */
35589705e92SDanit Goldberg 	IB_TM_CAP_RNDV_RC = 1 << 0,
3566938fc1eSArtemy Kovalyov };
3576938fc1eSArtemy Kovalyov 
35878b1beb0SLeon Romanovsky struct ib_tm_caps {
3596938fc1eSArtemy Kovalyov 	/* Max size of RNDV header */
3606938fc1eSArtemy Kovalyov 	u32 max_rndv_hdr_size;
3616938fc1eSArtemy Kovalyov 	/* Max number of entries in tag matching list */
3626938fc1eSArtemy Kovalyov 	u32 max_num_tags;
3636938fc1eSArtemy Kovalyov 	/* From enum ib_tm_cap_flags */
3646938fc1eSArtemy Kovalyov 	u32 flags;
3656938fc1eSArtemy Kovalyov 	/* Max number of outstanding list operations */
3666938fc1eSArtemy Kovalyov 	u32 max_ops;
3676938fc1eSArtemy Kovalyov 	/* Max number of SGE in tag matching entry */
3686938fc1eSArtemy Kovalyov 	u32 max_sge;
3696938fc1eSArtemy Kovalyov };
3706938fc1eSArtemy Kovalyov 
371bcf4c1eaSMatan Barak struct ib_cq_init_attr {
372bcf4c1eaSMatan Barak 	unsigned int	cqe;
373a9018adfSDan Carpenter 	u32		comp_vector;
374bcf4c1eaSMatan Barak 	u32		flags;
375bcf4c1eaSMatan Barak };
376bcf4c1eaSMatan Barak 
377869ddcf8SYonatan Cohen enum ib_cq_attr_mask {
378869ddcf8SYonatan Cohen 	IB_CQ_MODERATE = 1 << 0,
379869ddcf8SYonatan Cohen };
380869ddcf8SYonatan Cohen 
38118bd9072SYonatan Cohen struct ib_cq_caps {
38218bd9072SYonatan Cohen 	u16     max_cq_moderation_count;
38318bd9072SYonatan Cohen 	u16     max_cq_moderation_period;
38418bd9072SYonatan Cohen };
38518bd9072SYonatan Cohen 
386be934ccaSAriel Levkovich struct ib_dm_mr_attr {
387be934ccaSAriel Levkovich 	u64		length;
388be934ccaSAriel Levkovich 	u64		offset;
389be934ccaSAriel Levkovich 	u32		access_flags;
390be934ccaSAriel Levkovich };
391be934ccaSAriel Levkovich 
392bee76d7aSAriel Levkovich struct ib_dm_alloc_attr {
393bee76d7aSAriel Levkovich 	u64	length;
394bee76d7aSAriel Levkovich 	u32	alignment;
395bee76d7aSAriel Levkovich 	u32	flags;
396bee76d7aSAriel Levkovich };
397bee76d7aSAriel Levkovich 
398a4d61e84SRoland Dreier struct ib_device_attr {
399a4d61e84SRoland Dreier 	u64			fw_ver;
400a4d61e84SRoland Dreier 	__be64			sys_image_guid;
401a4d61e84SRoland Dreier 	u64			max_mr_size;
402a4d61e84SRoland Dreier 	u64			page_size_cap;
403a4d61e84SRoland Dreier 	u32			vendor_id;
404a4d61e84SRoland Dreier 	u32			vendor_part_id;
405a4d61e84SRoland Dreier 	u32			hw_ver;
406a4d61e84SRoland Dreier 	int			max_qp;
407a4d61e84SRoland Dreier 	int			max_qp_wr;
408fb532d6aSLeon Romanovsky 	u64			device_cap_flags;
409e945c653SJason Gunthorpe 	u64			kernel_cap_flags;
41033023fb8SSteve Wise 	int			max_send_sge;
41133023fb8SSteve Wise 	int			max_recv_sge;
412a4d61e84SRoland Dreier 	int			max_sge_rd;
413a4d61e84SRoland Dreier 	int			max_cq;
414a4d61e84SRoland Dreier 	int			max_cqe;
415a4d61e84SRoland Dreier 	int			max_mr;
416a4d61e84SRoland Dreier 	int			max_pd;
417a4d61e84SRoland Dreier 	int			max_qp_rd_atom;
418a4d61e84SRoland Dreier 	int			max_ee_rd_atom;
419a4d61e84SRoland Dreier 	int			max_res_rd_atom;
420a4d61e84SRoland Dreier 	int			max_qp_init_rd_atom;
421a4d61e84SRoland Dreier 	int			max_ee_init_rd_atom;
422a4d61e84SRoland Dreier 	enum ib_atomic_cap	atomic_cap;
4235e80ba8fSVladimir Sokolovsky 	enum ib_atomic_cap	masked_atomic_cap;
424a4d61e84SRoland Dreier 	int			max_ee;
425a4d61e84SRoland Dreier 	int			max_rdd;
426a4d61e84SRoland Dreier 	int			max_mw;
427a4d61e84SRoland Dreier 	int			max_raw_ipv6_qp;
428a4d61e84SRoland Dreier 	int			max_raw_ethy_qp;
429a4d61e84SRoland Dreier 	int			max_mcast_grp;
430a4d61e84SRoland Dreier 	int			max_mcast_qp_attach;
431a4d61e84SRoland Dreier 	int			max_total_mcast_qp_attach;
432a4d61e84SRoland Dreier 	int			max_ah;
433a4d61e84SRoland Dreier 	int			max_srq;
434a4d61e84SRoland Dreier 	int			max_srq_wr;
435a4d61e84SRoland Dreier 	int			max_srq_sge;
43600f7ec36SSteve Wise 	unsigned int		max_fast_reg_page_list_len;
43762e3c379SMax Gurtovoy 	unsigned int		max_pi_fast_reg_page_list_len;
438a4d61e84SRoland Dreier 	u16			max_pkeys;
439a4d61e84SRoland Dreier 	u8			local_ca_ack_delay;
4401b01d335SSagi Grimberg 	int			sig_prot_cap;
4411b01d335SSagi Grimberg 	int			sig_guard_cap;
442860f10a7SSagi Grimberg 	struct ib_odp_caps	odp_caps;
44324306dc6SMatan Barak 	uint64_t		timestamp_mask;
44424306dc6SMatan Barak 	uint64_t		hca_core_clock; /* in KHZ */
445ccf20562SYishai Hadas 	struct ib_rss_caps	rss_caps;
446ccf20562SYishai Hadas 	u32			max_wq_type_rq;
447ebaaee25SNoa Osherovich 	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
44878b1beb0SLeon Romanovsky 	struct ib_tm_caps	tm_caps;
44918bd9072SYonatan Cohen 	struct ib_cq_caps       cq_caps;
4501d8eeb9fSAriel Levkovich 	u64			max_dm_size;
45100bd1439SYamin Friedman 	/* Max entries for sgl for optimized performance per READ */
45200bd1439SYamin Friedman 	u32			max_sgl_rd;
453a4d61e84SRoland Dreier };
454a4d61e84SRoland Dreier 
455a4d61e84SRoland Dreier enum ib_mtu {
456a4d61e84SRoland Dreier 	IB_MTU_256  = 1,
457a4d61e84SRoland Dreier 	IB_MTU_512  = 2,
458a4d61e84SRoland Dreier 	IB_MTU_1024 = 3,
459a4d61e84SRoland Dreier 	IB_MTU_2048 = 4,
460a4d61e84SRoland Dreier 	IB_MTU_4096 = 5
461a4d61e84SRoland Dreier };
462a4d61e84SRoland Dreier 
4636d72344cSKaike Wan enum opa_mtu {
4646d72344cSKaike Wan 	OPA_MTU_8192 = 6,
4656d72344cSKaike Wan 	OPA_MTU_10240 = 7
4666d72344cSKaike Wan };
4676d72344cSKaike Wan 
ib_mtu_enum_to_int(enum ib_mtu mtu)468a4d61e84SRoland Dreier static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
469a4d61e84SRoland Dreier {
470a4d61e84SRoland Dreier 	switch (mtu) {
471a4d61e84SRoland Dreier 	case IB_MTU_256:  return  256;
472a4d61e84SRoland Dreier 	case IB_MTU_512:  return  512;
473a4d61e84SRoland Dreier 	case IB_MTU_1024: return 1024;
474a4d61e84SRoland Dreier 	case IB_MTU_2048: return 2048;
475a4d61e84SRoland Dreier 	case IB_MTU_4096: return 4096;
476a4d61e84SRoland Dreier 	default: 	  return -1;
477a4d61e84SRoland Dreier 	}
478a4d61e84SRoland Dreier }
479a4d61e84SRoland Dreier 
ib_mtu_int_to_enum(int mtu)480d3f4aaddSAmrani, Ram static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
481d3f4aaddSAmrani, Ram {
482d3f4aaddSAmrani, Ram 	if (mtu >= 4096)
483d3f4aaddSAmrani, Ram 		return IB_MTU_4096;
484d3f4aaddSAmrani, Ram 	else if (mtu >= 2048)
485d3f4aaddSAmrani, Ram 		return IB_MTU_2048;
486d3f4aaddSAmrani, Ram 	else if (mtu >= 1024)
487d3f4aaddSAmrani, Ram 		return IB_MTU_1024;
488d3f4aaddSAmrani, Ram 	else if (mtu >= 512)
489d3f4aaddSAmrani, Ram 		return IB_MTU_512;
490d3f4aaddSAmrani, Ram 	else
491d3f4aaddSAmrani, Ram 		return IB_MTU_256;
492d3f4aaddSAmrani, Ram }
493d3f4aaddSAmrani, Ram 
opa_mtu_enum_to_int(enum opa_mtu mtu)4946d72344cSKaike Wan static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
4956d72344cSKaike Wan {
4966d72344cSKaike Wan 	switch (mtu) {
4976d72344cSKaike Wan 	case OPA_MTU_8192:
4986d72344cSKaike Wan 		return 8192;
4996d72344cSKaike Wan 	case OPA_MTU_10240:
5006d72344cSKaike Wan 		return 10240;
5016d72344cSKaike Wan 	default:
5026d72344cSKaike Wan 		return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
5036d72344cSKaike Wan 	}
5046d72344cSKaike Wan }
5056d72344cSKaike Wan 
opa_mtu_int_to_enum(int mtu)5066d72344cSKaike Wan static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
5076d72344cSKaike Wan {
5086d72344cSKaike Wan 	if (mtu >= 10240)
5096d72344cSKaike Wan 		return OPA_MTU_10240;
5106d72344cSKaike Wan 	else if (mtu >= 8192)
5116d72344cSKaike Wan 		return OPA_MTU_8192;
5126d72344cSKaike Wan 	else
5136d72344cSKaike Wan 		return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
5146d72344cSKaike Wan }
5156d72344cSKaike Wan 
516a4d61e84SRoland Dreier enum ib_port_state {
517a4d61e84SRoland Dreier 	IB_PORT_NOP		= 0,
518a4d61e84SRoland Dreier 	IB_PORT_DOWN		= 1,
519a4d61e84SRoland Dreier 	IB_PORT_INIT		= 2,
520a4d61e84SRoland Dreier 	IB_PORT_ARMED		= 3,
521a4d61e84SRoland Dreier 	IB_PORT_ACTIVE		= 4,
522a4d61e84SRoland Dreier 	IB_PORT_ACTIVE_DEFER	= 5
523a4d61e84SRoland Dreier };
524a4d61e84SRoland Dreier 
52572a7720fSKamal Heib enum ib_port_phys_state {
52672a7720fSKamal Heib 	IB_PORT_PHYS_STATE_SLEEP = 1,
52772a7720fSKamal Heib 	IB_PORT_PHYS_STATE_POLLING = 2,
52872a7720fSKamal Heib 	IB_PORT_PHYS_STATE_DISABLED = 3,
52972a7720fSKamal Heib 	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
53072a7720fSKamal Heib 	IB_PORT_PHYS_STATE_LINK_UP = 5,
53172a7720fSKamal Heib 	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
53272a7720fSKamal Heib 	IB_PORT_PHYS_STATE_PHY_TEST = 7,
53372a7720fSKamal Heib };
53472a7720fSKamal Heib 
535a4d61e84SRoland Dreier enum ib_port_width {
536a4d61e84SRoland Dreier 	IB_WIDTH_1X	= 1,
537dbabf685SMichael Guralnik 	IB_WIDTH_2X	= 16,
538a4d61e84SRoland Dreier 	IB_WIDTH_4X	= 2,
539a4d61e84SRoland Dreier 	IB_WIDTH_8X	= 4,
540a4d61e84SRoland Dreier 	IB_WIDTH_12X	= 8
541a4d61e84SRoland Dreier };
542a4d61e84SRoland Dreier 
ib_width_enum_to_int(enum ib_port_width width)543a4d61e84SRoland Dreier static inline int ib_width_enum_to_int(enum ib_port_width width)
544a4d61e84SRoland Dreier {
545a4d61e84SRoland Dreier 	switch (width) {
546a4d61e84SRoland Dreier 	case IB_WIDTH_1X:  return  1;
547dbabf685SMichael Guralnik 	case IB_WIDTH_2X:  return  2;
548a4d61e84SRoland Dreier 	case IB_WIDTH_4X:  return  4;
549a4d61e84SRoland Dreier 	case IB_WIDTH_8X:  return  8;
550a4d61e84SRoland Dreier 	case IB_WIDTH_12X: return 12;
551a4d61e84SRoland Dreier 	default: 	  return -1;
552a4d61e84SRoland Dreier 	}
553a4d61e84SRoland Dreier }
554a4d61e84SRoland Dreier 
5552e96691cSOr Gerlitz enum ib_port_speed {
5562e96691cSOr Gerlitz 	IB_SPEED_SDR	= 1,
5572e96691cSOr Gerlitz 	IB_SPEED_DDR	= 2,
5582e96691cSOr Gerlitz 	IB_SPEED_QDR	= 4,
5592e96691cSOr Gerlitz 	IB_SPEED_FDR10	= 8,
5602e96691cSOr Gerlitz 	IB_SPEED_FDR	= 16,
56112113a35SNoa Osherovich 	IB_SPEED_EDR	= 32,
562376ceb31SAharon Landau 	IB_SPEED_HDR	= 64,
563376ceb31SAharon Landau 	IB_SPEED_NDR	= 128,
564*b993c450SOr Har-Toov 	IB_SPEED_XDR	= 256,
5652e96691cSOr Gerlitz };
5662e96691cSOr Gerlitz 
5675e2ddd1eSAharon Landau enum ib_stat_flag {
5685e2ddd1eSAharon Landau 	IB_STAT_FLAG_OPTIONAL = 1 << 0,
5695e2ddd1eSAharon Landau };
5705e2ddd1eSAharon Landau 
571b40f4757SChristoph Lameter /**
57213f30b0fSAharon Landau  * struct rdma_stat_desc
57313f30b0fSAharon Landau  * @name - The name of the counter
5745e2ddd1eSAharon Landau  * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
575a29b934cSAharon Landau  * @priv - Driver private information; Core code should not use
57613f30b0fSAharon Landau  */
57713f30b0fSAharon Landau struct rdma_stat_desc {
57813f30b0fSAharon Landau 	const char *name;
5795e2ddd1eSAharon Landau 	unsigned int flags;
580a29b934cSAharon Landau 	const void *priv;
58113f30b0fSAharon Landau };
58213f30b0fSAharon Landau 
58313f30b0fSAharon Landau /**
584b40f4757SChristoph Lameter  * struct rdma_hw_stats
585e945130bSMark Bloch  * @lock - Mutex to protect parallel write access to lifespan and values
58683567ceeSJulia Lawall  *    of counters, which are 64bits and not guaranteed to be written
587e945130bSMark Bloch  *    atomicaly on 32bits systems.
588b40f4757SChristoph Lameter  * @timestamp - Used by the core code to track when the last update was
589b40f4757SChristoph Lameter  * @lifespan - Used by the core code to determine how old the counters
590b40f4757SChristoph Lameter  *   should be before being updated again.  Stored in jiffies, defaults
591b40f4757SChristoph Lameter  *   to 10 milliseconds, drivers can override the default be specifying
592b40f4757SChristoph Lameter  *   their own value during their allocation routine.
59313f30b0fSAharon Landau  * @descs - Array of pointers to static descriptors used for the counters
59413f30b0fSAharon Landau  *   in directory.
5950dc89684SAharon Landau  * @is_disabled - A bitmap to indicate each counter is currently disabled
5960dc89684SAharon Landau  *   or not.
597b40f4757SChristoph Lameter  * @num_counters - How many hardware counters there are.  If name is
598b40f4757SChristoph Lameter  *   shorter than this number, a kernel oops will result.  Driver authors
599b40f4757SChristoph Lameter  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
600b40f4757SChristoph Lameter  *   in their code to prevent this.
601b40f4757SChristoph Lameter  * @value - Array of u64 counters that are accessed by the sysfs code and
602b40f4757SChristoph Lameter  *   filled in by the drivers get_stats routine
603b40f4757SChristoph Lameter  */
604b40f4757SChristoph Lameter struct rdma_hw_stats {
605e945130bSMark Bloch 	struct mutex	lock; /* Protect lifespan and values[] */
606b40f4757SChristoph Lameter 	unsigned long	timestamp;
607b40f4757SChristoph Lameter 	unsigned long	lifespan;
60813f30b0fSAharon Landau 	const struct rdma_stat_desc *descs;
6090dc89684SAharon Landau 	unsigned long	*is_disabled;
610b40f4757SChristoph Lameter 	int		num_counters;
611b40f4757SChristoph Lameter 	u64		value[];
6127f624d02SSteve Wise };
6137f624d02SSteve Wise 
614b40f4757SChristoph Lameter #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
6150a0800ceSMark Zhang 
6160a0800ceSMark Zhang struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
61713f30b0fSAharon Landau 	const struct rdma_stat_desc *descs, int num_counters,
6180a0800ceSMark Zhang 	unsigned long lifespan);
6197f624d02SSteve Wise 
6200a0800ceSMark Zhang void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
6217f624d02SSteve Wise 
622f9b22e35SIra Weiny /* Define bits for the various functionality this port needs to be supported by
623f9b22e35SIra Weiny  * the core.
624f9b22e35SIra Weiny  */
625f9b22e35SIra Weiny /* Management                           0x00000FFF */
626f9b22e35SIra Weiny #define RDMA_CORE_CAP_IB_MAD            0x00000001
627f9b22e35SIra Weiny #define RDMA_CORE_CAP_IB_SMI            0x00000002
628f9b22e35SIra Weiny #define RDMA_CORE_CAP_IB_CM             0x00000004
629f9b22e35SIra Weiny #define RDMA_CORE_CAP_IW_CM             0x00000008
630f9b22e35SIra Weiny #define RDMA_CORE_CAP_IB_SA             0x00000010
63165995feeSIra Weiny #define RDMA_CORE_CAP_OPA_MAD           0x00000020
632f9b22e35SIra Weiny 
633f9b22e35SIra Weiny /* Address format                       0x000FF000 */
634f9b22e35SIra Weiny #define RDMA_CORE_CAP_AF_IB             0x00001000
635f9b22e35SIra Weiny #define RDMA_CORE_CAP_ETH_AH            0x00002000
63694d595c5SDasaratharaman Chandramouli #define RDMA_CORE_CAP_OPA_AH            0x00004000
637b02289b3SArtemy Kovalyov #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
638f9b22e35SIra Weiny 
639f9b22e35SIra Weiny /* Protocol                             0xFFF00000 */
640f9b22e35SIra Weiny #define RDMA_CORE_CAP_PROT_IB           0x00100000
641f9b22e35SIra Weiny #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
642f9b22e35SIra Weiny #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
6437766a99fSMatan Barak #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
644aa773bd4SOr Gerlitz #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
645ce1e055fSOr Gerlitz #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
646f9b22e35SIra Weiny 
647b02289b3SArtemy Kovalyov #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
648b02289b3SArtemy Kovalyov 					| RDMA_CORE_CAP_PROT_ROCE     \
649b02289b3SArtemy Kovalyov 					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
650b02289b3SArtemy Kovalyov 
651f9b22e35SIra Weiny #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
652f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IB_MAD \
653f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IB_SMI \
654f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IB_CM  \
655f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IB_SA  \
656f9b22e35SIra Weiny 					| RDMA_CORE_CAP_AF_IB)
657f9b22e35SIra Weiny #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
658f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IB_MAD  \
659f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IB_CM   \
660f9b22e35SIra Weiny 					| RDMA_CORE_CAP_AF_IB   \
661f9b22e35SIra Weiny 					| RDMA_CORE_CAP_ETH_AH)
6627766a99fSMatan Barak #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
6637766a99fSMatan Barak 					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
6647766a99fSMatan Barak 					| RDMA_CORE_CAP_IB_MAD  \
6657766a99fSMatan Barak 					| RDMA_CORE_CAP_IB_CM   \
6667766a99fSMatan Barak 					| RDMA_CORE_CAP_AF_IB   \
6677766a99fSMatan Barak 					| RDMA_CORE_CAP_ETH_AH)
668f9b22e35SIra Weiny #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
669f9b22e35SIra Weiny 					| RDMA_CORE_CAP_IW_CM)
67065995feeSIra Weiny #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
67165995feeSIra Weiny 					| RDMA_CORE_CAP_OPA_MAD)
672f9b22e35SIra Weiny 
673aa773bd4SOr Gerlitz #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
674aa773bd4SOr Gerlitz 
675ce1e055fSOr Gerlitz #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
676ce1e055fSOr Gerlitz 
677a4d61e84SRoland Dreier struct ib_port_attr {
678fad61ad4SEli Cohen 	u64			subnet_prefix;
679a4d61e84SRoland Dreier 	enum ib_port_state	state;
680a4d61e84SRoland Dreier 	enum ib_mtu		max_mtu;
681a4d61e84SRoland Dreier 	enum ib_mtu		active_mtu;
6826d72344cSKaike Wan 	u32                     phys_mtu;
683a4d61e84SRoland Dreier 	int			gid_tbl_len;
6842f944c0fSJason Gunthorpe 	unsigned int		ip_gids:1;
6852f944c0fSJason Gunthorpe 	/* This is the value from PortInfo CapabilityMask, defined by IBA */
686a4d61e84SRoland Dreier 	u32			port_cap_flags;
687a4d61e84SRoland Dreier 	u32			max_msg_sz;
688a4d61e84SRoland Dreier 	u32			bad_pkey_cntr;
689a4d61e84SRoland Dreier 	u32			qkey_viol_cntr;
690a4d61e84SRoland Dreier 	u16			pkey_tbl_len;
691db58540bSDasaratharaman Chandramouli 	u32			sm_lid;
692582faf31SDasaratharaman Chandramouli 	u32			lid;
693a4d61e84SRoland Dreier 	u8			lmc;
694a4d61e84SRoland Dreier 	u8			max_vl_num;
695a4d61e84SRoland Dreier 	u8			sm_sl;
696a4d61e84SRoland Dreier 	u8			subnet_timeout;
697a4d61e84SRoland Dreier 	u8			init_type_reply;
698a4d61e84SRoland Dreier 	u8			active_width;
699376ceb31SAharon Landau 	u16			active_speed;
700a4d61e84SRoland Dreier 	u8                      phys_state;
7011e8f43b7SMichael Guralnik 	u16			port_cap_flags2;
702a4d61e84SRoland Dreier };
703a4d61e84SRoland Dreier 
704a4d61e84SRoland Dreier enum ib_device_modify_flags {
705c5bcbbb9SRoland Dreier 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
706c5bcbbb9SRoland Dreier 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
707a4d61e84SRoland Dreier };
708a4d61e84SRoland Dreier 
709bd99fdeaSYuval Shaia #define IB_DEVICE_NODE_DESC_MAX 64
710bd99fdeaSYuval Shaia 
711a4d61e84SRoland Dreier struct ib_device_modify {
712a4d61e84SRoland Dreier 	u64	sys_image_guid;
713bd99fdeaSYuval Shaia 	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
714a4d61e84SRoland Dreier };
715a4d61e84SRoland Dreier 
716a4d61e84SRoland Dreier enum ib_port_modify_flags {
717a4d61e84SRoland Dreier 	IB_PORT_SHUTDOWN		= 1,
718a4d61e84SRoland Dreier 	IB_PORT_INIT_TYPE		= (1<<2),
719cb49366fSVishwanathapura, Niranjana 	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
720cb49366fSVishwanathapura, Niranjana 	IB_PORT_OPA_MASK_CHG		= (1<<4)
721a4d61e84SRoland Dreier };
722a4d61e84SRoland Dreier 
723a4d61e84SRoland Dreier struct ib_port_modify {
724a4d61e84SRoland Dreier 	u32	set_port_cap_mask;
725a4d61e84SRoland Dreier 	u32	clr_port_cap_mask;
726a4d61e84SRoland Dreier 	u8	init_type;
727a4d61e84SRoland Dreier };
728a4d61e84SRoland Dreier 
729a4d61e84SRoland Dreier enum ib_event_type {
730a4d61e84SRoland Dreier 	IB_EVENT_CQ_ERR,
731a4d61e84SRoland Dreier 	IB_EVENT_QP_FATAL,
732a4d61e84SRoland Dreier 	IB_EVENT_QP_REQ_ERR,
733a4d61e84SRoland Dreier 	IB_EVENT_QP_ACCESS_ERR,
734a4d61e84SRoland Dreier 	IB_EVENT_COMM_EST,
735a4d61e84SRoland Dreier 	IB_EVENT_SQ_DRAINED,
736a4d61e84SRoland Dreier 	IB_EVENT_PATH_MIG,
737a4d61e84SRoland Dreier 	IB_EVENT_PATH_MIG_ERR,
738a4d61e84SRoland Dreier 	IB_EVENT_DEVICE_FATAL,
739a4d61e84SRoland Dreier 	IB_EVENT_PORT_ACTIVE,
740a4d61e84SRoland Dreier 	IB_EVENT_PORT_ERR,
741a4d61e84SRoland Dreier 	IB_EVENT_LID_CHANGE,
742a4d61e84SRoland Dreier 	IB_EVENT_PKEY_CHANGE,
743a4d61e84SRoland Dreier 	IB_EVENT_SM_CHANGE,
744a4d61e84SRoland Dreier 	IB_EVENT_SRQ_ERR,
745a4d61e84SRoland Dreier 	IB_EVENT_SRQ_LIMIT_REACHED,
74663942c9aSLeonid Arsh 	IB_EVENT_QP_LAST_WQE_REACHED,
747761d90edSOr Gerlitz 	IB_EVENT_CLIENT_REREGISTER,
748761d90edSOr Gerlitz 	IB_EVENT_GID_CHANGE,
749f213c052SYishai Hadas 	IB_EVENT_WQ_FATAL,
750a4d61e84SRoland Dreier };
751a4d61e84SRoland Dreier 
752db7489e0SBart Van Assche const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
7532b1b5b60SSagi Grimberg 
754a4d61e84SRoland Dreier struct ib_event {
755a4d61e84SRoland Dreier 	struct ib_device	*device;
756a4d61e84SRoland Dreier 	union {
757a4d61e84SRoland Dreier 		struct ib_cq	*cq;
758a4d61e84SRoland Dreier 		struct ib_qp	*qp;
759a4d61e84SRoland Dreier 		struct ib_srq	*srq;
760f213c052SYishai Hadas 		struct ib_wq	*wq;
7611fb7f897SMark Bloch 		u32		port_num;
762a4d61e84SRoland Dreier 	} element;
763a4d61e84SRoland Dreier 	enum ib_event_type	event;
764a4d61e84SRoland Dreier };
765a4d61e84SRoland Dreier 
766a4d61e84SRoland Dreier struct ib_event_handler {
767a4d61e84SRoland Dreier 	struct ib_device *device;
768a4d61e84SRoland Dreier 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
769a4d61e84SRoland Dreier 	struct list_head  list;
770a4d61e84SRoland Dreier };
771a4d61e84SRoland Dreier 
772a4d61e84SRoland Dreier #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
773a4d61e84SRoland Dreier 	do {							\
774a4d61e84SRoland Dreier 		(_ptr)->device  = _device;			\
775a4d61e84SRoland Dreier 		(_ptr)->handler = _handler;			\
776a4d61e84SRoland Dreier 		INIT_LIST_HEAD(&(_ptr)->list);			\
777a4d61e84SRoland Dreier 	} while (0)
778a4d61e84SRoland Dreier 
779a4d61e84SRoland Dreier struct ib_global_route {
7808d9ec9adSJason Gunthorpe 	const struct ib_gid_attr *sgid_attr;
781a4d61e84SRoland Dreier 	union ib_gid	dgid;
782a4d61e84SRoland Dreier 	u32		flow_label;
783a4d61e84SRoland Dreier 	u8		sgid_index;
784a4d61e84SRoland Dreier 	u8		hop_limit;
785a4d61e84SRoland Dreier 	u8		traffic_class;
786a4d61e84SRoland Dreier };
787a4d61e84SRoland Dreier 
788a4d61e84SRoland Dreier struct ib_grh {
789a4d61e84SRoland Dreier 	__be32		version_tclass_flow;
790a4d61e84SRoland Dreier 	__be16		paylen;
791a4d61e84SRoland Dreier 	u8		next_hdr;
792a4d61e84SRoland Dreier 	u8		hop_limit;
793a4d61e84SRoland Dreier 	union ib_gid	sgid;
794a4d61e84SRoland Dreier 	union ib_gid	dgid;
795a4d61e84SRoland Dreier };
796a4d61e84SRoland Dreier 
797c865f246SSomnath Kotur union rdma_network_hdr {
798c865f246SSomnath Kotur 	struct ib_grh ibgrh;
799c865f246SSomnath Kotur 	struct {
800c865f246SSomnath Kotur 		/* The IB spec states that if it's IPv4, the header
801c865f246SSomnath Kotur 		 * is located in the last 20 bytes of the header.
802c865f246SSomnath Kotur 		 */
803c865f246SSomnath Kotur 		u8		reserved[20];
804c865f246SSomnath Kotur 		struct iphdr	roce4grh;
805c865f246SSomnath Kotur 	};
806c865f246SSomnath Kotur };
807c865f246SSomnath Kotur 
8087dafbab3SDon Hiatt #define IB_QPN_MASK		0xFFFFFF
8097dafbab3SDon Hiatt 
810a4d61e84SRoland Dreier enum {
811a4d61e84SRoland Dreier 	IB_MULTICAST_QPN = 0xffffff
812a4d61e84SRoland Dreier };
813a4d61e84SRoland Dreier 
814f3a7c66bSHarvey Harrison #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
815b4e64397SDennis Dalessandro #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
816a4d61e84SRoland Dreier 
817a4d61e84SRoland Dreier enum ib_ah_flags {
818a4d61e84SRoland Dreier 	IB_AH_GRH	= 1
819a4d61e84SRoland Dreier };
820a4d61e84SRoland Dreier 
821bf6a9e31SJack Morgenstein enum ib_rate {
822bf6a9e31SJack Morgenstein 	IB_RATE_PORT_CURRENT = 0,
823bf6a9e31SJack Morgenstein 	IB_RATE_2_5_GBPS = 2,
824bf6a9e31SJack Morgenstein 	IB_RATE_5_GBPS   = 5,
825bf6a9e31SJack Morgenstein 	IB_RATE_10_GBPS  = 3,
826bf6a9e31SJack Morgenstein 	IB_RATE_20_GBPS  = 6,
827bf6a9e31SJack Morgenstein 	IB_RATE_30_GBPS  = 4,
828bf6a9e31SJack Morgenstein 	IB_RATE_40_GBPS  = 7,
829bf6a9e31SJack Morgenstein 	IB_RATE_60_GBPS  = 8,
830bf6a9e31SJack Morgenstein 	IB_RATE_80_GBPS  = 9,
83171eeba16SMarcel Apfelbaum 	IB_RATE_120_GBPS = 10,
83271eeba16SMarcel Apfelbaum 	IB_RATE_14_GBPS  = 11,
83371eeba16SMarcel Apfelbaum 	IB_RATE_56_GBPS  = 12,
83471eeba16SMarcel Apfelbaum 	IB_RATE_112_GBPS = 13,
83571eeba16SMarcel Apfelbaum 	IB_RATE_168_GBPS = 14,
83671eeba16SMarcel Apfelbaum 	IB_RATE_25_GBPS  = 15,
83771eeba16SMarcel Apfelbaum 	IB_RATE_100_GBPS = 16,
83871eeba16SMarcel Apfelbaum 	IB_RATE_200_GBPS = 17,
839a5a5d199SMichael Guralnik 	IB_RATE_300_GBPS = 18,
840a5a5d199SMichael Guralnik 	IB_RATE_28_GBPS  = 19,
841a5a5d199SMichael Guralnik 	IB_RATE_50_GBPS  = 20,
842a5a5d199SMichael Guralnik 	IB_RATE_400_GBPS = 21,
843a5a5d199SMichael Guralnik 	IB_RATE_600_GBPS = 22,
844*b993c450SOr Har-Toov 	IB_RATE_800_GBPS = 23,
845bf6a9e31SJack Morgenstein };
846bf6a9e31SJack Morgenstein 
847bf6a9e31SJack Morgenstein /**
848bf6a9e31SJack Morgenstein  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
849bf6a9e31SJack Morgenstein  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
850bf6a9e31SJack Morgenstein  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
851bf6a9e31SJack Morgenstein  * @rate: rate to convert.
852bf6a9e31SJack Morgenstein  */
8538385fd84SRoland Dreier __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
854bf6a9e31SJack Morgenstein 
855bf6a9e31SJack Morgenstein /**
85671eeba16SMarcel Apfelbaum  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
85771eeba16SMarcel Apfelbaum  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
85871eeba16SMarcel Apfelbaum  * @rate: rate to convert.
85971eeba16SMarcel Apfelbaum  */
8608385fd84SRoland Dreier __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
86171eeba16SMarcel Apfelbaum 
86217cd3a2dSSagi Grimberg 
86317cd3a2dSSagi Grimberg /**
8649bee178bSSagi Grimberg  * enum ib_mr_type - memory region type
8659bee178bSSagi Grimberg  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
8669bee178bSSagi Grimberg  *                            normal registration
867f5aa9159SSagi Grimberg  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
868f5aa9159SSagi Grimberg  *                            register any arbitrary sg lists (without
869f5aa9159SSagi Grimberg  *                            the normal mr constraints - see
870f5aa9159SSagi Grimberg  *                            ib_map_mr_sg)
871a0bc099aSMax Gurtovoy  * @IB_MR_TYPE_DM:            memory region that is used for device
872a0bc099aSMax Gurtovoy  *                            memory registration
873a0bc099aSMax Gurtovoy  * @IB_MR_TYPE_USER:          memory region that is used for the user-space
874a0bc099aSMax Gurtovoy  *                            application
875a0bc099aSMax Gurtovoy  * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
876a0bc099aSMax Gurtovoy  *                            without address translations (VA=PA)
87726bc7eaeSIsrael Rukshin  * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
87826bc7eaeSIsrael Rukshin  *                            data integrity operations
87917cd3a2dSSagi Grimberg  */
8809bee178bSSagi Grimberg enum ib_mr_type {
8819bee178bSSagi Grimberg 	IB_MR_TYPE_MEM_REG,
882f5aa9159SSagi Grimberg 	IB_MR_TYPE_SG_GAPS,
883a0bc099aSMax Gurtovoy 	IB_MR_TYPE_DM,
884a0bc099aSMax Gurtovoy 	IB_MR_TYPE_USER,
885a0bc099aSMax Gurtovoy 	IB_MR_TYPE_DMA,
88626bc7eaeSIsrael Rukshin 	IB_MR_TYPE_INTEGRITY,
88717cd3a2dSSagi Grimberg };
88817cd3a2dSSagi Grimberg 
8891b01d335SSagi Grimberg enum ib_mr_status_check {
8901b01d335SSagi Grimberg 	IB_MR_CHECK_SIG_STATUS = 1,
8911b01d335SSagi Grimberg };
8921b01d335SSagi Grimberg 
8931b01d335SSagi Grimberg /**
8941b01d335SSagi Grimberg  * struct ib_mr_status - Memory region status container
8951b01d335SSagi Grimberg  *
8961b01d335SSagi Grimberg  * @fail_status: Bitmask of MR checks status. For each
8971b01d335SSagi Grimberg  *     failed check a corresponding status bit is set.
8981b01d335SSagi Grimberg  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
8991b01d335SSagi Grimberg  *     failure.
9001b01d335SSagi Grimberg  */
9011b01d335SSagi Grimberg struct ib_mr_status {
9021b01d335SSagi Grimberg 	u32		    fail_status;
9031b01d335SSagi Grimberg 	struct ib_sig_err   sig_err;
9041b01d335SSagi Grimberg };
9051b01d335SSagi Grimberg 
90671eeba16SMarcel Apfelbaum /**
907bf6a9e31SJack Morgenstein  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
908bf6a9e31SJack Morgenstein  * enum.
909bf6a9e31SJack Morgenstein  * @mult: multiple to convert.
910bf6a9e31SJack Morgenstein  */
9118385fd84SRoland Dreier __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
912bf6a9e31SJack Morgenstein 
913fa5d010cSMaor Gottlieb struct rdma_ah_init_attr {
914fa5d010cSMaor Gottlieb 	struct rdma_ah_attr *ah_attr;
915fa5d010cSMaor Gottlieb 	u32 flags;
91651aab126SMaor Gottlieb 	struct net_device *xmit_slave;
917fa5d010cSMaor Gottlieb };
918fa5d010cSMaor Gottlieb 
91944c58487SDasaratharaman Chandramouli enum rdma_ah_attr_type {
92087daac68SDon Hiatt 	RDMA_AH_ATTR_TYPE_UNDEFINED,
92144c58487SDasaratharaman Chandramouli 	RDMA_AH_ATTR_TYPE_IB,
92244c58487SDasaratharaman Chandramouli 	RDMA_AH_ATTR_TYPE_ROCE,
92364b4646eSDasaratharaman Chandramouli 	RDMA_AH_ATTR_TYPE_OPA,
92444c58487SDasaratharaman Chandramouli };
92544c58487SDasaratharaman Chandramouli 
92644c58487SDasaratharaman Chandramouli struct ib_ah_attr {
92744c58487SDasaratharaman Chandramouli 	u16			dlid;
92844c58487SDasaratharaman Chandramouli 	u8			src_path_bits;
92944c58487SDasaratharaman Chandramouli };
93044c58487SDasaratharaman Chandramouli 
93144c58487SDasaratharaman Chandramouli struct roce_ah_attr {
93244c58487SDasaratharaman Chandramouli 	u8			dmac[ETH_ALEN];
93344c58487SDasaratharaman Chandramouli };
93444c58487SDasaratharaman Chandramouli 
93564b4646eSDasaratharaman Chandramouli struct opa_ah_attr {
93664b4646eSDasaratharaman Chandramouli 	u32			dlid;
93764b4646eSDasaratharaman Chandramouli 	u8			src_path_bits;
938d98bb7f7SDon Hiatt 	bool			make_grd;
93964b4646eSDasaratharaman Chandramouli };
94064b4646eSDasaratharaman Chandramouli 
94190898850SDasaratharaman Chandramouli struct rdma_ah_attr {
942a4d61e84SRoland Dreier 	struct ib_global_route	grh;
943a4d61e84SRoland Dreier 	u8			sl;
944a4d61e84SRoland Dreier 	u8			static_rate;
9451fb7f897SMark Bloch 	u32			port_num;
94644c58487SDasaratharaman Chandramouli 	u8			ah_flags;
94744c58487SDasaratharaman Chandramouli 	enum rdma_ah_attr_type type;
94844c58487SDasaratharaman Chandramouli 	union {
94944c58487SDasaratharaman Chandramouli 		struct ib_ah_attr ib;
95044c58487SDasaratharaman Chandramouli 		struct roce_ah_attr roce;
95164b4646eSDasaratharaman Chandramouli 		struct opa_ah_attr opa;
95244c58487SDasaratharaman Chandramouli 	};
953a4d61e84SRoland Dreier };
954a4d61e84SRoland Dreier 
955a4d61e84SRoland Dreier enum ib_wc_status {
956a4d61e84SRoland Dreier 	IB_WC_SUCCESS,
957a4d61e84SRoland Dreier 	IB_WC_LOC_LEN_ERR,
958a4d61e84SRoland Dreier 	IB_WC_LOC_QP_OP_ERR,
959a4d61e84SRoland Dreier 	IB_WC_LOC_EEC_OP_ERR,
960a4d61e84SRoland Dreier 	IB_WC_LOC_PROT_ERR,
961a4d61e84SRoland Dreier 	IB_WC_WR_FLUSH_ERR,
962a4d61e84SRoland Dreier 	IB_WC_MW_BIND_ERR,
963a4d61e84SRoland Dreier 	IB_WC_BAD_RESP_ERR,
964a4d61e84SRoland Dreier 	IB_WC_LOC_ACCESS_ERR,
965a4d61e84SRoland Dreier 	IB_WC_REM_INV_REQ_ERR,
966a4d61e84SRoland Dreier 	IB_WC_REM_ACCESS_ERR,
967a4d61e84SRoland Dreier 	IB_WC_REM_OP_ERR,
968a4d61e84SRoland Dreier 	IB_WC_RETRY_EXC_ERR,
969a4d61e84SRoland Dreier 	IB_WC_RNR_RETRY_EXC_ERR,
970a4d61e84SRoland Dreier 	IB_WC_LOC_RDD_VIOL_ERR,
971a4d61e84SRoland Dreier 	IB_WC_REM_INV_RD_REQ_ERR,
972a4d61e84SRoland Dreier 	IB_WC_REM_ABORT_ERR,
973a4d61e84SRoland Dreier 	IB_WC_INV_EECN_ERR,
974a4d61e84SRoland Dreier 	IB_WC_INV_EEC_STATE_ERR,
975a4d61e84SRoland Dreier 	IB_WC_FATAL_ERR,
976a4d61e84SRoland Dreier 	IB_WC_RESP_TIMEOUT_ERR,
977a4d61e84SRoland Dreier 	IB_WC_GENERAL_ERR
978a4d61e84SRoland Dreier };
979a4d61e84SRoland Dreier 
980db7489e0SBart Van Assche const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
9812b1b5b60SSagi Grimberg 
982a4d61e84SRoland Dreier enum ib_wc_opcode {
983b60b9c02SBob Pearson 	IB_WC_SEND = IB_UVERBS_WC_SEND,
984b60b9c02SBob Pearson 	IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
985b60b9c02SBob Pearson 	IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
986b60b9c02SBob Pearson 	IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
987b60b9c02SBob Pearson 	IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
988b60b9c02SBob Pearson 	IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
989b60b9c02SBob Pearson 	IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
990b60b9c02SBob Pearson 	IB_WC_LSO = IB_UVERBS_WC_TSO,
9913ff81e82SXiao Yang 	IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
9924c67e2bfSSagi Grimberg 	IB_WC_REG_MR,
9935e80ba8fSVladimir Sokolovsky 	IB_WC_MASKED_COMP_SWAP,
9945e80ba8fSVladimir Sokolovsky 	IB_WC_MASKED_FETCH_ADD,
995208e3a13SLi Zhijian 	IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
996a4d61e84SRoland Dreier /*
997a4d61e84SRoland Dreier  * Set value of IB_WC_RECV so consumers can test if a completion is a
998a4d61e84SRoland Dreier  * receive by testing (opcode & IB_WC_RECV).
999a4d61e84SRoland Dreier  */
1000a4d61e84SRoland Dreier 	IB_WC_RECV			= 1 << 7,
1001a4d61e84SRoland Dreier 	IB_WC_RECV_RDMA_WITH_IMM
1002a4d61e84SRoland Dreier };
1003a4d61e84SRoland Dreier 
1004a4d61e84SRoland Dreier enum ib_wc_flags {
1005a4d61e84SRoland Dreier 	IB_WC_GRH		= 1,
100600f7ec36SSteve Wise 	IB_WC_WITH_IMM		= (1<<1),
100700f7ec36SSteve Wise 	IB_WC_WITH_INVALIDATE	= (1<<2),
1008d927d505SOr Gerlitz 	IB_WC_IP_CSUM_OK	= (1<<3),
1009dd5f03beSMatan Barak 	IB_WC_WITH_SMAC		= (1<<4),
1010dd5f03beSMatan Barak 	IB_WC_WITH_VLAN		= (1<<5),
1011c865f246SSomnath Kotur 	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
1012a4d61e84SRoland Dreier };
1013a4d61e84SRoland Dreier 
1014a4d61e84SRoland Dreier struct ib_wc {
101514d3a3b2SChristoph Hellwig 	union {
1016a4d61e84SRoland Dreier 		u64		wr_id;
101714d3a3b2SChristoph Hellwig 		struct ib_cqe	*wr_cqe;
101814d3a3b2SChristoph Hellwig 	};
1019a4d61e84SRoland Dreier 	enum ib_wc_status	status;
1020a4d61e84SRoland Dreier 	enum ib_wc_opcode	opcode;
1021a4d61e84SRoland Dreier 	u32			vendor_err;
1022a4d61e84SRoland Dreier 	u32			byte_len;
1023062dbb69SMichael S. Tsirkin 	struct ib_qp	       *qp;
102400f7ec36SSteve Wise 	union {
1025a4d61e84SRoland Dreier 		__be32		imm_data;
102600f7ec36SSteve Wise 		u32		invalidate_rkey;
102700f7ec36SSteve Wise 	} ex;
1028a4d61e84SRoland Dreier 	u32			src_qp;
1029cd2a6e7dSBodong Wang 	u32			slid;
1030a4d61e84SRoland Dreier 	int			wc_flags;
1031a4d61e84SRoland Dreier 	u16			pkey_index;
1032a4d61e84SRoland Dreier 	u8			sl;
1033a4d61e84SRoland Dreier 	u8			dlid_path_bits;
10341fb7f897SMark Bloch 	u32 port_num; /* valid only for DR SMPs on switches */
1035dd5f03beSMatan Barak 	u8			smac[ETH_ALEN];
1036dd5f03beSMatan Barak 	u16			vlan_id;
1037c865f246SSomnath Kotur 	u8			network_hdr_type;
1038a4d61e84SRoland Dreier };
1039a4d61e84SRoland Dreier 
1040ed23a727SRoland Dreier enum ib_cq_notify_flags {
1041ed23a727SRoland Dreier 	IB_CQ_SOLICITED			= 1 << 0,
1042ed23a727SRoland Dreier 	IB_CQ_NEXT_COMP			= 1 << 1,
1043ed23a727SRoland Dreier 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1044ed23a727SRoland Dreier 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1045a4d61e84SRoland Dreier };
1046a4d61e84SRoland Dreier 
104796104edaSSean Hefty enum ib_srq_type {
1048175ba58dSYishai Hadas 	IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1049175ba58dSYishai Hadas 	IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1050175ba58dSYishai Hadas 	IB_SRQT_TM = IB_UVERBS_SRQT_TM,
105196104edaSSean Hefty };
105296104edaSSean Hefty 
ib_srq_has_cq(enum ib_srq_type srq_type)10531a56ff6dSArtemy Kovalyov static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
10541a56ff6dSArtemy Kovalyov {
10559c2c8496SArtemy Kovalyov 	return srq_type == IB_SRQT_XRC ||
10569c2c8496SArtemy Kovalyov 	       srq_type == IB_SRQT_TM;
10571a56ff6dSArtemy Kovalyov }
10581a56ff6dSArtemy Kovalyov 
1059a4d61e84SRoland Dreier enum ib_srq_attr_mask {
1060a4d61e84SRoland Dreier 	IB_SRQ_MAX_WR	= 1 << 0,
1061a4d61e84SRoland Dreier 	IB_SRQ_LIMIT	= 1 << 1,
1062a4d61e84SRoland Dreier };
1063a4d61e84SRoland Dreier 
1064a4d61e84SRoland Dreier struct ib_srq_attr {
1065a4d61e84SRoland Dreier 	u32	max_wr;
1066a4d61e84SRoland Dreier 	u32	max_sge;
1067a4d61e84SRoland Dreier 	u32	srq_limit;
1068a4d61e84SRoland Dreier };
1069a4d61e84SRoland Dreier 
1070a4d61e84SRoland Dreier struct ib_srq_init_attr {
1071a4d61e84SRoland Dreier 	void		      (*event_handler)(struct ib_event *, void *);
1072a4d61e84SRoland Dreier 	void		       *srq_context;
1073a4d61e84SRoland Dreier 	struct ib_srq_attr	attr;
107496104edaSSean Hefty 	enum ib_srq_type	srq_type;
1075418d5130SSean Hefty 
10761a56ff6dSArtemy Kovalyov 	struct {
10771a56ff6dSArtemy Kovalyov 		struct ib_cq   *cq;
1078418d5130SSean Hefty 		union {
1079418d5130SSean Hefty 			struct {
1080418d5130SSean Hefty 				struct ib_xrcd *xrcd;
1081418d5130SSean Hefty 			} xrc;
10829c2c8496SArtemy Kovalyov 
10839c2c8496SArtemy Kovalyov 			struct {
10849c2c8496SArtemy Kovalyov 				u32		max_num_tags;
10859c2c8496SArtemy Kovalyov 			} tag_matching;
10861a56ff6dSArtemy Kovalyov 		};
1087418d5130SSean Hefty 	} ext;
1088a4d61e84SRoland Dreier };
1089a4d61e84SRoland Dreier 
1090a4d61e84SRoland Dreier struct ib_qp_cap {
1091a4d61e84SRoland Dreier 	u32	max_send_wr;
1092a4d61e84SRoland Dreier 	u32	max_recv_wr;
1093a4d61e84SRoland Dreier 	u32	max_send_sge;
1094a4d61e84SRoland Dreier 	u32	max_recv_sge;
1095a4d61e84SRoland Dreier 	u32	max_inline_data;
1096a060b562SChristoph Hellwig 
1097a060b562SChristoph Hellwig 	/*
1098a060b562SChristoph Hellwig 	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1099a060b562SChristoph Hellwig 	 * ib_create_qp() will calculate the right amount of neededed WRs
1100a060b562SChristoph Hellwig 	 * and MRs based on this.
1101a060b562SChristoph Hellwig 	 */
1102a060b562SChristoph Hellwig 	u32	max_rdma_ctxs;
1103a4d61e84SRoland Dreier };
1104a4d61e84SRoland Dreier 
1105a4d61e84SRoland Dreier enum ib_sig_type {
1106a4d61e84SRoland Dreier 	IB_SIGNAL_ALL_WR,
1107a4d61e84SRoland Dreier 	IB_SIGNAL_REQ_WR
1108a4d61e84SRoland Dreier };
1109a4d61e84SRoland Dreier 
1110a4d61e84SRoland Dreier enum ib_qp_type {
1111a4d61e84SRoland Dreier 	/*
1112a4d61e84SRoland Dreier 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1113a4d61e84SRoland Dreier 	 * here (and in that order) since the MAD layer uses them as
1114a4d61e84SRoland Dreier 	 * indices into a 2-entry table.
1115a4d61e84SRoland Dreier 	 */
1116a4d61e84SRoland Dreier 	IB_QPT_SMI,
1117a4d61e84SRoland Dreier 	IB_QPT_GSI,
1118a4d61e84SRoland Dreier 
1119175ba58dSYishai Hadas 	IB_QPT_RC = IB_UVERBS_QPT_RC,
1120175ba58dSYishai Hadas 	IB_QPT_UC = IB_UVERBS_QPT_UC,
1121175ba58dSYishai Hadas 	IB_QPT_UD = IB_UVERBS_QPT_UD,
1122a4d61e84SRoland Dreier 	IB_QPT_RAW_IPV6,
1123b42b63cfSSean Hefty 	IB_QPT_RAW_ETHERTYPE,
1124175ba58dSYishai Hadas 	IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1125175ba58dSYishai Hadas 	IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1126175ba58dSYishai Hadas 	IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
11270134f16bSJack Morgenstein 	IB_QPT_MAX,
1128175ba58dSYishai Hadas 	IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
11290134f16bSJack Morgenstein 	/* Reserve a range for qp types internal to the low level driver.
11300134f16bSJack Morgenstein 	 * These qp types will not be visible at the IB core layer, so the
11310134f16bSJack Morgenstein 	 * IB_QPT_MAX usages should not be affected in the core layer
11320134f16bSJack Morgenstein 	 */
11330134f16bSJack Morgenstein 	IB_QPT_RESERVED1 = 0x1000,
11340134f16bSJack Morgenstein 	IB_QPT_RESERVED2,
11350134f16bSJack Morgenstein 	IB_QPT_RESERVED3,
11360134f16bSJack Morgenstein 	IB_QPT_RESERVED4,
11370134f16bSJack Morgenstein 	IB_QPT_RESERVED5,
11380134f16bSJack Morgenstein 	IB_QPT_RESERVED6,
11390134f16bSJack Morgenstein 	IB_QPT_RESERVED7,
11400134f16bSJack Morgenstein 	IB_QPT_RESERVED8,
11410134f16bSJack Morgenstein 	IB_QPT_RESERVED9,
11420134f16bSJack Morgenstein 	IB_QPT_RESERVED10,
1143a4d61e84SRoland Dreier };
1144a4d61e84SRoland Dreier 
1145b846f25aSEli Cohen enum ib_qp_create_flags {
1146b846f25aSEli Cohen 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1147175ba58dSYishai Hadas 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	=
1148175ba58dSYishai Hadas 		IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
11498a06ce59SLeon Romanovsky 	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
11508a06ce59SLeon Romanovsky 	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
11518a06ce59SLeon Romanovsky 	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
115290f1d1b4SMatan Barak 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1153c0a6cbb9SIsrael Rukshin 	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
11547f90a5a0SGary Leshner 	IB_QP_CREATE_NETDEV_USE			= 1 << 7,
1155175ba58dSYishai Hadas 	IB_QP_CREATE_SCATTER_FCS		=
1156175ba58dSYishai Hadas 		IB_UVERBS_QP_CREATE_SCATTER_FCS,
1157175ba58dSYishai Hadas 	IB_QP_CREATE_CVLAN_STRIPPING		=
1158175ba58dSYishai Hadas 		IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
115902984cc7SYishai Hadas 	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1160175ba58dSYishai Hadas 	IB_QP_CREATE_PCI_WRITE_END_PADDING	=
1161175ba58dSYishai Hadas 		IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1162d2b57063SJack Morgenstein 	/* reserve bits 26-31 for low level drivers' internal use */
1163d2b57063SJack Morgenstein 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1164d2b57063SJack Morgenstein 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1165b846f25aSEli Cohen };
1166b846f25aSEli Cohen 
116773c40c61SYishai Hadas /*
116873c40c61SYishai Hadas  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
116973c40c61SYishai Hadas  * callback to destroy the passed in QP.
117073c40c61SYishai Hadas  */
117173c40c61SYishai Hadas 
1172a4d61e84SRoland Dreier struct ib_qp_init_attr {
1173312b8f79SMark Zhang 	/* This callback occurs in workqueue context */
1174a4d61e84SRoland Dreier 	void                  (*event_handler)(struct ib_event *, void *);
1175eb93c82eSChuck Lever 
1176a4d61e84SRoland Dreier 	void		       *qp_context;
1177a4d61e84SRoland Dreier 	struct ib_cq	       *send_cq;
1178a4d61e84SRoland Dreier 	struct ib_cq	       *recv_cq;
1179a4d61e84SRoland Dreier 	struct ib_srq	       *srq;
1180b42b63cfSSean Hefty 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1181a4d61e84SRoland Dreier 	struct ib_qp_cap	cap;
1182a4d61e84SRoland Dreier 	enum ib_sig_type	sq_sig_type;
1183a4d61e84SRoland Dreier 	enum ib_qp_type		qp_type;
1184b56511c1SNathan Chancellor 	u32			create_flags;
1185a060b562SChristoph Hellwig 
1186a060b562SChristoph Hellwig 	/*
1187a060b562SChristoph Hellwig 	 * Only needed for special QP types, or when using the RW API.
1188a060b562SChristoph Hellwig 	 */
11891fb7f897SMark Bloch 	u32			port_num;
1190a9017e23SYishai Hadas 	struct ib_rwq_ind_table *rwq_ind_tbl;
119102984cc7SYishai Hadas 	u32			source_qpn;
1192a4d61e84SRoland Dreier };
1193a4d61e84SRoland Dreier 
11940e0ec7e0SSean Hefty struct ib_qp_open_attr {
11950e0ec7e0SSean Hefty 	void                  (*event_handler)(struct ib_event *, void *);
11960e0ec7e0SSean Hefty 	void		       *qp_context;
11970e0ec7e0SSean Hefty 	u32			qp_num;
11980e0ec7e0SSean Hefty 	enum ib_qp_type		qp_type;
11990e0ec7e0SSean Hefty };
12000e0ec7e0SSean Hefty 
1201a4d61e84SRoland Dreier enum ib_rnr_timeout {
1202a4d61e84SRoland Dreier 	IB_RNR_TIMER_655_36 =  0,
1203a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_01 =  1,
1204a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_02 =  2,
1205a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_03 =  3,
1206a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_04 =  4,
1207a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_06 =  5,
1208a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_08 =  6,
1209a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_12 =  7,
1210a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_16 =  8,
1211a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_24 =  9,
1212a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_32 = 10,
1213a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_48 = 11,
1214a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_64 = 12,
1215a4d61e84SRoland Dreier 	IB_RNR_TIMER_000_96 = 13,
1216a4d61e84SRoland Dreier 	IB_RNR_TIMER_001_28 = 14,
1217a4d61e84SRoland Dreier 	IB_RNR_TIMER_001_92 = 15,
1218a4d61e84SRoland Dreier 	IB_RNR_TIMER_002_56 = 16,
1219a4d61e84SRoland Dreier 	IB_RNR_TIMER_003_84 = 17,
1220a4d61e84SRoland Dreier 	IB_RNR_TIMER_005_12 = 18,
1221a4d61e84SRoland Dreier 	IB_RNR_TIMER_007_68 = 19,
1222a4d61e84SRoland Dreier 	IB_RNR_TIMER_010_24 = 20,
1223a4d61e84SRoland Dreier 	IB_RNR_TIMER_015_36 = 21,
1224a4d61e84SRoland Dreier 	IB_RNR_TIMER_020_48 = 22,
1225a4d61e84SRoland Dreier 	IB_RNR_TIMER_030_72 = 23,
1226a4d61e84SRoland Dreier 	IB_RNR_TIMER_040_96 = 24,
1227a4d61e84SRoland Dreier 	IB_RNR_TIMER_061_44 = 25,
1228a4d61e84SRoland Dreier 	IB_RNR_TIMER_081_92 = 26,
1229a4d61e84SRoland Dreier 	IB_RNR_TIMER_122_88 = 27,
1230a4d61e84SRoland Dreier 	IB_RNR_TIMER_163_84 = 28,
1231a4d61e84SRoland Dreier 	IB_RNR_TIMER_245_76 = 29,
1232a4d61e84SRoland Dreier 	IB_RNR_TIMER_327_68 = 30,
1233a4d61e84SRoland Dreier 	IB_RNR_TIMER_491_52 = 31
1234a4d61e84SRoland Dreier };
1235a4d61e84SRoland Dreier 
1236a4d61e84SRoland Dreier enum ib_qp_attr_mask {
1237a4d61e84SRoland Dreier 	IB_QP_STATE			= 1,
1238a4d61e84SRoland Dreier 	IB_QP_CUR_STATE			= (1<<1),
1239a4d61e84SRoland Dreier 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1240a4d61e84SRoland Dreier 	IB_QP_ACCESS_FLAGS		= (1<<3),
1241a4d61e84SRoland Dreier 	IB_QP_PKEY_INDEX		= (1<<4),
1242a4d61e84SRoland Dreier 	IB_QP_PORT			= (1<<5),
1243a4d61e84SRoland Dreier 	IB_QP_QKEY			= (1<<6),
1244a4d61e84SRoland Dreier 	IB_QP_AV			= (1<<7),
1245a4d61e84SRoland Dreier 	IB_QP_PATH_MTU			= (1<<8),
1246a4d61e84SRoland Dreier 	IB_QP_TIMEOUT			= (1<<9),
1247a4d61e84SRoland Dreier 	IB_QP_RETRY_CNT			= (1<<10),
1248a4d61e84SRoland Dreier 	IB_QP_RNR_RETRY			= (1<<11),
1249a4d61e84SRoland Dreier 	IB_QP_RQ_PSN			= (1<<12),
1250a4d61e84SRoland Dreier 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1251a4d61e84SRoland Dreier 	IB_QP_ALT_PATH			= (1<<14),
1252a4d61e84SRoland Dreier 	IB_QP_MIN_RNR_TIMER		= (1<<15),
1253a4d61e84SRoland Dreier 	IB_QP_SQ_PSN			= (1<<16),
1254a4d61e84SRoland Dreier 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1255a4d61e84SRoland Dreier 	IB_QP_PATH_MIG_STATE		= (1<<18),
1256a4d61e84SRoland Dreier 	IB_QP_CAP			= (1<<19),
1257dd5f03beSMatan Barak 	IB_QP_DEST_QPN			= (1<<20),
1258aa744cc0SMatan Barak 	IB_QP_RESERVED1			= (1<<21),
1259aa744cc0SMatan Barak 	IB_QP_RESERVED2			= (1<<22),
1260aa744cc0SMatan Barak 	IB_QP_RESERVED3			= (1<<23),
1261aa744cc0SMatan Barak 	IB_QP_RESERVED4			= (1<<24),
1262528e5a1bSBodong Wang 	IB_QP_RATE_LIMIT		= (1<<25),
126326e990baSJason Gunthorpe 
126426e990baSJason Gunthorpe 	IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1265a4d61e84SRoland Dreier };
1266a4d61e84SRoland Dreier 
1267a4d61e84SRoland Dreier enum ib_qp_state {
1268a4d61e84SRoland Dreier 	IB_QPS_RESET,
1269a4d61e84SRoland Dreier 	IB_QPS_INIT,
1270a4d61e84SRoland Dreier 	IB_QPS_RTR,
1271a4d61e84SRoland Dreier 	IB_QPS_RTS,
1272a4d61e84SRoland Dreier 	IB_QPS_SQD,
1273a4d61e84SRoland Dreier 	IB_QPS_SQE,
1274a4d61e84SRoland Dreier 	IB_QPS_ERR
1275a4d61e84SRoland Dreier };
1276a4d61e84SRoland Dreier 
1277a4d61e84SRoland Dreier enum ib_mig_state {
1278a4d61e84SRoland Dreier 	IB_MIG_MIGRATED,
1279a4d61e84SRoland Dreier 	IB_MIG_REARM,
1280a4d61e84SRoland Dreier 	IB_MIG_ARMED
1281a4d61e84SRoland Dreier };
1282a4d61e84SRoland Dreier 
12837083e42eSShani Michaeli enum ib_mw_type {
12847083e42eSShani Michaeli 	IB_MW_TYPE_1 = 1,
12857083e42eSShani Michaeli 	IB_MW_TYPE_2 = 2
12867083e42eSShani Michaeli };
12877083e42eSShani Michaeli 
1288a4d61e84SRoland Dreier struct ib_qp_attr {
1289a4d61e84SRoland Dreier 	enum ib_qp_state	qp_state;
1290a4d61e84SRoland Dreier 	enum ib_qp_state	cur_qp_state;
1291a4d61e84SRoland Dreier 	enum ib_mtu		path_mtu;
1292a4d61e84SRoland Dreier 	enum ib_mig_state	path_mig_state;
1293a4d61e84SRoland Dreier 	u32			qkey;
1294a4d61e84SRoland Dreier 	u32			rq_psn;
1295a4d61e84SRoland Dreier 	u32			sq_psn;
1296a4d61e84SRoland Dreier 	u32			dest_qp_num;
1297a4d61e84SRoland Dreier 	int			qp_access_flags;
1298a4d61e84SRoland Dreier 	struct ib_qp_cap	cap;
129990898850SDasaratharaman Chandramouli 	struct rdma_ah_attr	ah_attr;
130090898850SDasaratharaman Chandramouli 	struct rdma_ah_attr	alt_ah_attr;
1301a4d61e84SRoland Dreier 	u16			pkey_index;
1302a4d61e84SRoland Dreier 	u16			alt_pkey_index;
1303a4d61e84SRoland Dreier 	u8			en_sqd_async_notify;
1304a4d61e84SRoland Dreier 	u8			sq_draining;
1305a4d61e84SRoland Dreier 	u8			max_rd_atomic;
1306a4d61e84SRoland Dreier 	u8			max_dest_rd_atomic;
1307a4d61e84SRoland Dreier 	u8			min_rnr_timer;
13081fb7f897SMark Bloch 	u32			port_num;
1309a4d61e84SRoland Dreier 	u8			timeout;
1310a4d61e84SRoland Dreier 	u8			retry_cnt;
1311a4d61e84SRoland Dreier 	u8			rnr_retry;
13121fb7f897SMark Bloch 	u32			alt_port_num;
1313a4d61e84SRoland Dreier 	u8			alt_timeout;
1314528e5a1bSBodong Wang 	u32			rate_limit;
131551aab126SMaor Gottlieb 	struct net_device	*xmit_slave;
1316a4d61e84SRoland Dreier };
1317a4d61e84SRoland Dreier 
1318a4d61e84SRoland Dreier enum ib_wr_opcode {
13199a59739bSJason Gunthorpe 	/* These are shared with userspace */
13209a59739bSJason Gunthorpe 	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
13219a59739bSJason Gunthorpe 	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
13229a59739bSJason Gunthorpe 	IB_WR_SEND = IB_UVERBS_WR_SEND,
13239a59739bSJason Gunthorpe 	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
13249a59739bSJason Gunthorpe 	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
13259a59739bSJason Gunthorpe 	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
13269a59739bSJason Gunthorpe 	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1327b60b9c02SBob Pearson 	IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
13289a59739bSJason Gunthorpe 	IB_WR_LSO = IB_UVERBS_WR_TSO,
13299a59739bSJason Gunthorpe 	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
13309a59739bSJason Gunthorpe 	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
13319a59739bSJason Gunthorpe 	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
13329a59739bSJason Gunthorpe 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
13339a59739bSJason Gunthorpe 		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
13349a59739bSJason Gunthorpe 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
13359a59739bSJason Gunthorpe 		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1336208e3a13SLi Zhijian 	IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
13373ff81e82SXiao Yang 	IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
13389a59739bSJason Gunthorpe 
13399a59739bSJason Gunthorpe 	/* These are kernel only and can not be issued by userspace */
13409a59739bSJason Gunthorpe 	IB_WR_REG_MR = 0x20,
134138ca87c6SMax Gurtovoy 	IB_WR_REG_MR_INTEGRITY,
13429a59739bSJason Gunthorpe 
13430134f16bSJack Morgenstein 	/* reserve values for low level drivers' internal use.
13440134f16bSJack Morgenstein 	 * These values will not be used at all in the ib core layer.
13450134f16bSJack Morgenstein 	 */
13460134f16bSJack Morgenstein 	IB_WR_RESERVED1 = 0xf0,
13470134f16bSJack Morgenstein 	IB_WR_RESERVED2,
13480134f16bSJack Morgenstein 	IB_WR_RESERVED3,
13490134f16bSJack Morgenstein 	IB_WR_RESERVED4,
13500134f16bSJack Morgenstein 	IB_WR_RESERVED5,
13510134f16bSJack Morgenstein 	IB_WR_RESERVED6,
13520134f16bSJack Morgenstein 	IB_WR_RESERVED7,
13530134f16bSJack Morgenstein 	IB_WR_RESERVED8,
13540134f16bSJack Morgenstein 	IB_WR_RESERVED9,
13550134f16bSJack Morgenstein 	IB_WR_RESERVED10,
1356a4d61e84SRoland Dreier };
1357a4d61e84SRoland Dreier 
1358a4d61e84SRoland Dreier enum ib_send_flags {
1359a4d61e84SRoland Dreier 	IB_SEND_FENCE		= 1,
1360a4d61e84SRoland Dreier 	IB_SEND_SIGNALED	= (1<<1),
1361a4d61e84SRoland Dreier 	IB_SEND_SOLICITED	= (1<<2),
1362e0605d91SEli Cohen 	IB_SEND_INLINE		= (1<<3),
13630134f16bSJack Morgenstein 	IB_SEND_IP_CSUM		= (1<<4),
13640134f16bSJack Morgenstein 
13650134f16bSJack Morgenstein 	/* reserve bits 26-31 for low level drivers' internal use */
13660134f16bSJack Morgenstein 	IB_SEND_RESERVED_START	= (1 << 26),
13670134f16bSJack Morgenstein 	IB_SEND_RESERVED_END	= (1 << 31),
1368a4d61e84SRoland Dreier };
1369a4d61e84SRoland Dreier 
1370a4d61e84SRoland Dreier struct ib_sge {
1371a4d61e84SRoland Dreier 	u64	addr;
1372a4d61e84SRoland Dreier 	u32	length;
1373a4d61e84SRoland Dreier 	u32	lkey;
1374a4d61e84SRoland Dreier };
1375a4d61e84SRoland Dreier 
137614d3a3b2SChristoph Hellwig struct ib_cqe {
137714d3a3b2SChristoph Hellwig 	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
137814d3a3b2SChristoph Hellwig };
137914d3a3b2SChristoph Hellwig 
1380a4d61e84SRoland Dreier struct ib_send_wr {
1381a4d61e84SRoland Dreier 	struct ib_send_wr      *next;
138214d3a3b2SChristoph Hellwig 	union {
1383a4d61e84SRoland Dreier 		u64		wr_id;
138414d3a3b2SChristoph Hellwig 		struct ib_cqe	*wr_cqe;
138514d3a3b2SChristoph Hellwig 	};
1386a4d61e84SRoland Dreier 	struct ib_sge	       *sg_list;
1387a4d61e84SRoland Dreier 	int			num_sge;
1388a4d61e84SRoland Dreier 	enum ib_wr_opcode	opcode;
1389a4d61e84SRoland Dreier 	int			send_flags;
13900f39cf3dSRoland Dreier 	union {
1391a4d61e84SRoland Dreier 		__be32		imm_data;
13920f39cf3dSRoland Dreier 		u32		invalidate_rkey;
13930f39cf3dSRoland Dreier 	} ex;
1394e622f2f4SChristoph Hellwig };
1395e622f2f4SChristoph Hellwig 
1396e622f2f4SChristoph Hellwig struct ib_rdma_wr {
1397e622f2f4SChristoph Hellwig 	struct ib_send_wr	wr;
1398a4d61e84SRoland Dreier 	u64			remote_addr;
1399a4d61e84SRoland Dreier 	u32			rkey;
1400e622f2f4SChristoph Hellwig };
1401e622f2f4SChristoph Hellwig 
rdma_wr(const struct ib_send_wr * wr)1402f696bf6dSBart Van Assche static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1403e622f2f4SChristoph Hellwig {
1404e622f2f4SChristoph Hellwig 	return container_of(wr, struct ib_rdma_wr, wr);
1405e622f2f4SChristoph Hellwig }
1406e622f2f4SChristoph Hellwig 
1407e622f2f4SChristoph Hellwig struct ib_atomic_wr {
1408e622f2f4SChristoph Hellwig 	struct ib_send_wr	wr;
1409a4d61e84SRoland Dreier 	u64			remote_addr;
1410a4d61e84SRoland Dreier 	u64			compare_add;
1411a4d61e84SRoland Dreier 	u64			swap;
14125e80ba8fSVladimir Sokolovsky 	u64			compare_add_mask;
14135e80ba8fSVladimir Sokolovsky 	u64			swap_mask;
1414a4d61e84SRoland Dreier 	u32			rkey;
1415e622f2f4SChristoph Hellwig };
1416e622f2f4SChristoph Hellwig 
atomic_wr(const struct ib_send_wr * wr)1417f696bf6dSBart Van Assche static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1418e622f2f4SChristoph Hellwig {
1419e622f2f4SChristoph Hellwig 	return container_of(wr, struct ib_atomic_wr, wr);
1420e622f2f4SChristoph Hellwig }
1421e622f2f4SChristoph Hellwig 
1422e622f2f4SChristoph Hellwig struct ib_ud_wr {
1423e622f2f4SChristoph Hellwig 	struct ib_send_wr	wr;
1424a4d61e84SRoland Dreier 	struct ib_ah		*ah;
1425c93570f2SEli Cohen 	void			*header;
1426c93570f2SEli Cohen 	int			hlen;
1427c93570f2SEli Cohen 	int			mss;
1428a4d61e84SRoland Dreier 	u32			remote_qpn;
1429a4d61e84SRoland Dreier 	u32			remote_qkey;
1430a4d61e84SRoland Dreier 	u16			pkey_index; /* valid for GSI only */
14311fb7f897SMark Bloch 	u32			port_num; /* valid for DR SMPs on switch only */
1432e622f2f4SChristoph Hellwig };
1433e622f2f4SChristoph Hellwig 
ud_wr(const struct ib_send_wr * wr)1434f696bf6dSBart Van Assche static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1435e622f2f4SChristoph Hellwig {
1436e622f2f4SChristoph Hellwig 	return container_of(wr, struct ib_ud_wr, wr);
1437e622f2f4SChristoph Hellwig }
1438e622f2f4SChristoph Hellwig 
14394c67e2bfSSagi Grimberg struct ib_reg_wr {
14404c67e2bfSSagi Grimberg 	struct ib_send_wr	wr;
14414c67e2bfSSagi Grimberg 	struct ib_mr		*mr;
14424c67e2bfSSagi Grimberg 	u32			key;
14434c67e2bfSSagi Grimberg 	int			access;
14444c67e2bfSSagi Grimberg };
14454c67e2bfSSagi Grimberg 
reg_wr(const struct ib_send_wr * wr)1446f696bf6dSBart Van Assche static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
14474c67e2bfSSagi Grimberg {
14484c67e2bfSSagi Grimberg 	return container_of(wr, struct ib_reg_wr, wr);
14494c67e2bfSSagi Grimberg }
14504c67e2bfSSagi Grimberg 
1451a4d61e84SRoland Dreier struct ib_recv_wr {
1452a4d61e84SRoland Dreier 	struct ib_recv_wr      *next;
145314d3a3b2SChristoph Hellwig 	union {
1454a4d61e84SRoland Dreier 		u64		wr_id;
145514d3a3b2SChristoph Hellwig 		struct ib_cqe	*wr_cqe;
145614d3a3b2SChristoph Hellwig 	};
1457a4d61e84SRoland Dreier 	struct ib_sge	       *sg_list;
1458a4d61e84SRoland Dreier 	int			num_sge;
1459a4d61e84SRoland Dreier };
1460a4d61e84SRoland Dreier 
1461a4d61e84SRoland Dreier enum ib_access_flags {
14624fca0377SJason Gunthorpe 	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
14634fca0377SJason Gunthorpe 	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
14644fca0377SJason Gunthorpe 	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
14654fca0377SJason Gunthorpe 	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
14664fca0377SJason Gunthorpe 	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
14674fca0377SJason Gunthorpe 	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
14684fca0377SJason Gunthorpe 	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
14694fca0377SJason Gunthorpe 	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
14702233c660SMichael Guralnik 	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1471208e3a13SLi Zhijian 	IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
1472208e3a13SLi Zhijian 	IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
14734fca0377SJason Gunthorpe 
147468d384b9SMichael Guralnik 	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
147568d384b9SMichael Guralnik 	IB_ACCESS_SUPPORTED =
1476208e3a13SLi Zhijian 		((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
1477a4d61e84SRoland Dreier };
1478a4d61e84SRoland Dreier 
1479b7d3e0a9SChristoph Hellwig /*
1480b7d3e0a9SChristoph Hellwig  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1481b7d3e0a9SChristoph Hellwig  * are hidden here instead of a uapi header!
1482b7d3e0a9SChristoph Hellwig  */
1483a4d61e84SRoland Dreier enum ib_mr_rereg_flags {
1484a4d61e84SRoland Dreier 	IB_MR_REREG_TRANS	= 1,
1485a4d61e84SRoland Dreier 	IB_MR_REREG_PD		= (1<<1),
14867e6edb9bSMatan Barak 	IB_MR_REREG_ACCESS	= (1<<2),
14877e6edb9bSMatan Barak 	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1488a4d61e84SRoland Dreier };
1489a4d61e84SRoland Dreier 
1490882214e2SHaggai Eran struct ib_umem;
1491882214e2SHaggai Eran 
149238321256SMatan Barak enum rdma_remove_reason {
14931c77483eSYishai Hadas 	/*
14941c77483eSYishai Hadas 	 * Userspace requested uobject deletion or initial try
14951c77483eSYishai Hadas 	 * to remove uobject via cleanup. Call could fail
14961c77483eSYishai Hadas 	 */
149738321256SMatan Barak 	RDMA_REMOVE_DESTROY,
149838321256SMatan Barak 	/* Context deletion. This call should delete the actual object itself */
149938321256SMatan Barak 	RDMA_REMOVE_CLOSE,
150038321256SMatan Barak 	/* Driver is being hot-unplugged. This call should delete the actual object itself */
150138321256SMatan Barak 	RDMA_REMOVE_DRIVER_REMOVE,
150287ad80abSJason Gunthorpe 	/* uobj is being cleaned-up before being committed */
150387ad80abSJason Gunthorpe 	RDMA_REMOVE_ABORT,
1504efa968eeSLeon Romanovsky 	/* The driver failed to destroy the uobject and is being disconnected */
1505efa968eeSLeon Romanovsky 	RDMA_REMOVE_DRIVER_FAILURE,
150638321256SMatan Barak };
150738321256SMatan Barak 
150843579b5fSParav Pandit struct ib_rdmacg_object {
150943579b5fSParav Pandit #ifdef CONFIG_CGROUP_RDMA
151043579b5fSParav Pandit 	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
151143579b5fSParav Pandit #endif
151243579b5fSParav Pandit };
151343579b5fSParav Pandit 
1514a4d61e84SRoland Dreier struct ib_ucontext {
1515a4d61e84SRoland Dreier 	struct ib_device       *device;
1516771addf6SMatan Barak 	struct ib_uverbs_file  *ufile;
15178ada2c1cSShachar Raindel 
151843579b5fSParav Pandit 	struct ib_rdmacg_object	cg_obj;
151960615210SLeon Romanovsky 	/*
152060615210SLeon Romanovsky 	 * Implementation details of the RDMA core, don't use in drivers:
152160615210SLeon Romanovsky 	 */
152260615210SLeon Romanovsky 	struct rdma_restrack_entry res;
15233411f9f0SMichal Kalderon 	struct xarray mmap_xa;
1524a4d61e84SRoland Dreier };
1525a4d61e84SRoland Dreier 
1526a4d61e84SRoland Dreier struct ib_uobject {
1527a4d61e84SRoland Dreier 	u64			user_handle;	/* handle given to us by userspace */
15286a5e9c88SJason Gunthorpe 	/* ufile & ucontext owning this object */
15296a5e9c88SJason Gunthorpe 	struct ib_uverbs_file  *ufile;
15306a5e9c88SJason Gunthorpe 	/* FIXME, save memory: ufile->context == context */
1531a4d61e84SRoland Dreier 	struct ib_ucontext     *context;	/* associated user context */
15329ead190bSRoland Dreier 	void		       *object;		/* containing object */
1533a4d61e84SRoland Dreier 	struct list_head	list;		/* link to context's list */
153443579b5fSParav Pandit 	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1535b3d636b0SRoland Dreier 	int			id;		/* index into kernel idr */
15369ead190bSRoland Dreier 	struct kref		ref;
153738321256SMatan Barak 	atomic_t		usecnt;		/* protects exclusive access */
1538d144da8cSMike Marciniszyn 	struct rcu_head		rcu;		/* kfree_rcu() overhead */
153938321256SMatan Barak 
15406b0d08f4SJason Gunthorpe 	const struct uverbs_api_object *uapi_object;
1541a4d61e84SRoland Dreier };
1542a4d61e84SRoland Dreier 
1543a4d61e84SRoland Dreier struct ib_udata {
1544309243ecSYann Droneaud 	const void __user *inbuf;
1545a4d61e84SRoland Dreier 	void __user *outbuf;
1546a4d61e84SRoland Dreier 	size_t       inlen;
1547a4d61e84SRoland Dreier 	size_t       outlen;
1548a4d61e84SRoland Dreier };
1549a4d61e84SRoland Dreier 
1550a4d61e84SRoland Dreier struct ib_pd {
155196249d70SJason Gunthorpe 	u32			local_dma_lkey;
1552ed082d36SChristoph Hellwig 	u32			flags;
1553a4d61e84SRoland Dreier 	struct ib_device       *device;
1554a4d61e84SRoland Dreier 	struct ib_uobject      *uobject;
1555a4d61e84SRoland Dreier 	atomic_t          	usecnt; /* count all resources */
155650d46335SChristoph Hellwig 
1557ed082d36SChristoph Hellwig 	u32			unsafe_global_rkey;
1558ed082d36SChristoph Hellwig 
155950d46335SChristoph Hellwig 	/*
156050d46335SChristoph Hellwig 	 * Implementation details of the RDMA core, don't use in drivers:
156150d46335SChristoph Hellwig 	 */
156250d46335SChristoph Hellwig 	struct ib_mr	       *__internal_mr;
156302d8883fSLeon Romanovsky 	struct rdma_restrack_entry res;
1564a4d61e84SRoland Dreier };
1565a4d61e84SRoland Dreier 
156659991f94SSean Hefty struct ib_xrcd {
156759991f94SSean Hefty 	struct ib_device       *device;
1568d3d72d90SSean Hefty 	atomic_t		usecnt; /* count all exposed resources */
156953d0bd1eSSean Hefty 	struct inode	       *inode;
15706f3ca6f4SMaor Gottlieb 	struct rw_semaphore	tgt_qps_rwsem;
15716f3ca6f4SMaor Gottlieb 	struct xarray		tgt_qps;
157259991f94SSean Hefty };
157359991f94SSean Hefty 
1574a4d61e84SRoland Dreier struct ib_ah {
1575a4d61e84SRoland Dreier 	struct ib_device	*device;
1576a4d61e84SRoland Dreier 	struct ib_pd		*pd;
1577a4d61e84SRoland Dreier 	struct ib_uobject	*uobject;
15781a1f460fSJason Gunthorpe 	const struct ib_gid_attr *sgid_attr;
157944c58487SDasaratharaman Chandramouli 	enum rdma_ah_attr_type	type;
1580a4d61e84SRoland Dreier };
1581a4d61e84SRoland Dreier 
1582a4d61e84SRoland Dreier typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1583a4d61e84SRoland Dreier 
158414d3a3b2SChristoph Hellwig enum ib_poll_context {
158514d3a3b2SChristoph Hellwig 	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
158614d3a3b2SChristoph Hellwig 	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1587f794809aSJack Morgenstein 	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1588c7ff819aSYamin Friedman 	IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1589c7ff819aSYamin Friedman 
1590c7ff819aSYamin Friedman 	IB_POLL_DIRECT,		   /* caller context, no hw completions */
159114d3a3b2SChristoph Hellwig };
159214d3a3b2SChristoph Hellwig 
1593a4d61e84SRoland Dreier struct ib_cq {
1594a4d61e84SRoland Dreier 	struct ib_device       *device;
15955bd48c18SJason Gunthorpe 	struct ib_ucq_object   *uobject;
1596a4d61e84SRoland Dreier 	ib_comp_handler   	comp_handler;
1597a4d61e84SRoland Dreier 	void                  (*event_handler)(struct ib_event *, void *);
1598a4d61e84SRoland Dreier 	void                   *cq_context;
1599a4d61e84SRoland Dreier 	int               	cqe;
1600c7ff819aSYamin Friedman 	unsigned int		cqe_used;
1601a4d61e84SRoland Dreier 	atomic_t          	usecnt; /* count number of work queues */
160214d3a3b2SChristoph Hellwig 	enum ib_poll_context	poll_ctx;
160314d3a3b2SChristoph Hellwig 	struct ib_wc		*wc;
1604c7ff819aSYamin Friedman 	struct list_head        pool_entry;
160514d3a3b2SChristoph Hellwig 	union {
160614d3a3b2SChristoph Hellwig 		struct irq_poll		iop;
160714d3a3b2SChristoph Hellwig 		struct work_struct	work;
160814d3a3b2SChristoph Hellwig 	};
1609f794809aSJack Morgenstein 	struct workqueue_struct *comp_wq;
1610da662979SYamin Friedman 	struct dim *dim;
16113e5901cbSChuck Lever 
16123e5901cbSChuck Lever 	/* updated only by trace points */
16133e5901cbSChuck Lever 	ktime_t timestamp;
16143446cbd2SYamin Friedman 	u8 interrupt:1;
16153446cbd2SYamin Friedman 	u8 shared:1;
1616c7ff819aSYamin Friedman 	unsigned int comp_vector;
16173e5901cbSChuck Lever 
161802d8883fSLeon Romanovsky 	/*
161902d8883fSLeon Romanovsky 	 * Implementation details of the RDMA core, don't use in drivers:
162002d8883fSLeon Romanovsky 	 */
162102d8883fSLeon Romanovsky 	struct rdma_restrack_entry res;
1622a4d61e84SRoland Dreier };
1623a4d61e84SRoland Dreier 
1624a4d61e84SRoland Dreier struct ib_srq {
1625a4d61e84SRoland Dreier 	struct ib_device       *device;
1626a4d61e84SRoland Dreier 	struct ib_pd	       *pd;
16279fbe334cSJason Gunthorpe 	struct ib_usrq_object  *uobject;
1628a4d61e84SRoland Dreier 	void		      (*event_handler)(struct ib_event *, void *);
1629a4d61e84SRoland Dreier 	void		       *srq_context;
163096104edaSSean Hefty 	enum ib_srq_type	srq_type;
1631a4d61e84SRoland Dreier 	atomic_t		usecnt;
1632418d5130SSean Hefty 
16331a56ff6dSArtemy Kovalyov 	struct {
16341a56ff6dSArtemy Kovalyov 		struct ib_cq   *cq;
1635418d5130SSean Hefty 		union {
1636418d5130SSean Hefty 			struct {
1637418d5130SSean Hefty 				struct ib_xrcd *xrcd;
1638418d5130SSean Hefty 				u32		srq_num;
1639418d5130SSean Hefty 			} xrc;
16401a56ff6dSArtemy Kovalyov 		};
1641418d5130SSean Hefty 	} ext;
164248f8a70eSNeta Ostrovsky 
164348f8a70eSNeta Ostrovsky 	/*
164448f8a70eSNeta Ostrovsky 	 * Implementation details of the RDMA core, don't use in drivers:
164548f8a70eSNeta Ostrovsky 	 */
164648f8a70eSNeta Ostrovsky 	struct rdma_restrack_entry res;
1647a4d61e84SRoland Dreier };
1648a4d61e84SRoland Dreier 
1649ebaaee25SNoa Osherovich enum ib_raw_packet_caps {
165030ad63e7SXiao Yang 	/*
165130ad63e7SXiao Yang 	 * Strip cvlan from incoming packet and report it in the matching work
1652ebaaee25SNoa Osherovich 	 * completion is supported.
1653ebaaee25SNoa Osherovich 	 */
165430ad63e7SXiao Yang 	IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
165530ad63e7SXiao Yang 		IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
165630ad63e7SXiao Yang 	/*
165730ad63e7SXiao Yang 	 * Scatter FCS field of an incoming packet to host memory is supported.
1658ebaaee25SNoa Osherovich 	 */
165930ad63e7SXiao Yang 	IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1660ebaaee25SNoa Osherovich 	/* Checksum offloads are supported (for both send and receive). */
166130ad63e7SXiao Yang 	IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
166230ad63e7SXiao Yang 	/*
166330ad63e7SXiao Yang 	 * When a packet is received for an RQ with no receive WQEs, the
16647d9336d8SMaor Gottlieb 	 * packet processing is delayed.
16657d9336d8SMaor Gottlieb 	 */
166630ad63e7SXiao Yang 	IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1667ebaaee25SNoa Osherovich };
1668ebaaee25SNoa Osherovich 
16695fd251c8SYishai Hadas enum ib_wq_type {
1670175ba58dSYishai Hadas 	IB_WQT_RQ = IB_UVERBS_WQT_RQ,
16715fd251c8SYishai Hadas };
16725fd251c8SYishai Hadas 
16735fd251c8SYishai Hadas enum ib_wq_state {
16745fd251c8SYishai Hadas 	IB_WQS_RESET,
16755fd251c8SYishai Hadas 	IB_WQS_RDY,
16765fd251c8SYishai Hadas 	IB_WQS_ERR
16775fd251c8SYishai Hadas };
16785fd251c8SYishai Hadas 
16795fd251c8SYishai Hadas struct ib_wq {
16805fd251c8SYishai Hadas 	struct ib_device       *device;
1681e04dd131SJason Gunthorpe 	struct ib_uwq_object   *uobject;
16825fd251c8SYishai Hadas 	void		    *wq_context;
16835fd251c8SYishai Hadas 	void		    (*event_handler)(struct ib_event *, void *);
16845fd251c8SYishai Hadas 	struct ib_pd	       *pd;
16855fd251c8SYishai Hadas 	struct ib_cq	       *cq;
16865fd251c8SYishai Hadas 	u32		wq_num;
16875fd251c8SYishai Hadas 	enum ib_wq_state       state;
16885fd251c8SYishai Hadas 	enum ib_wq_type	wq_type;
16895fd251c8SYishai Hadas 	atomic_t		usecnt;
16905fd251c8SYishai Hadas };
16915fd251c8SYishai Hadas 
169210bac72bSNoa Osherovich enum ib_wq_flags {
1693175ba58dSYishai Hadas 	IB_WQ_FLAGS_CVLAN_STRIPPING	= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1694175ba58dSYishai Hadas 	IB_WQ_FLAGS_SCATTER_FCS		= IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1695175ba58dSYishai Hadas 	IB_WQ_FLAGS_DELAY_DROP		= IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1696175ba58dSYishai Hadas 	IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1697175ba58dSYishai Hadas 				IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
169810bac72bSNoa Osherovich };
169910bac72bSNoa Osherovich 
17005fd251c8SYishai Hadas struct ib_wq_init_attr {
17015fd251c8SYishai Hadas 	void		       *wq_context;
17025fd251c8SYishai Hadas 	enum ib_wq_type	wq_type;
17035fd251c8SYishai Hadas 	u32		max_wr;
17045fd251c8SYishai Hadas 	u32		max_sge;
17055fd251c8SYishai Hadas 	struct	ib_cq	       *cq;
17065fd251c8SYishai Hadas 	void		    (*event_handler)(struct ib_event *, void *);
170710bac72bSNoa Osherovich 	u32		create_flags; /* Use enum ib_wq_flags */
17085fd251c8SYishai Hadas };
17095fd251c8SYishai Hadas 
17105fd251c8SYishai Hadas enum ib_wq_attr_mask {
17115fd251c8SYishai Hadas 	IB_WQ_STATE		= 1 << 0,
17125fd251c8SYishai Hadas 	IB_WQ_CUR_STATE		= 1 << 1,
171310bac72bSNoa Osherovich 	IB_WQ_FLAGS		= 1 << 2,
17145fd251c8SYishai Hadas };
17155fd251c8SYishai Hadas 
17165fd251c8SYishai Hadas struct ib_wq_attr {
17175fd251c8SYishai Hadas 	enum	ib_wq_state	wq_state;
17185fd251c8SYishai Hadas 	enum	ib_wq_state	curr_wq_state;
171910bac72bSNoa Osherovich 	u32			flags; /* Use enum ib_wq_flags */
172010bac72bSNoa Osherovich 	u32			flags_mask; /* Use enum ib_wq_flags */
17215fd251c8SYishai Hadas };
17225fd251c8SYishai Hadas 
17236d39786bSYishai Hadas struct ib_rwq_ind_table {
17246d39786bSYishai Hadas 	struct ib_device	*device;
17256d39786bSYishai Hadas 	struct ib_uobject      *uobject;
17266d39786bSYishai Hadas 	atomic_t		usecnt;
17276d39786bSYishai Hadas 	u32		ind_tbl_num;
17286d39786bSYishai Hadas 	u32		log_ind_tbl_size;
17296d39786bSYishai Hadas 	struct ib_wq	**ind_tbl;
17306d39786bSYishai Hadas };
17316d39786bSYishai Hadas 
17326d39786bSYishai Hadas struct ib_rwq_ind_table_init_attr {
17336d39786bSYishai Hadas 	u32		log_ind_tbl_size;
17346d39786bSYishai Hadas 	/* Each entry is a pointer to Receive Work Queue */
17356d39786bSYishai Hadas 	struct ib_wq	**ind_tbl;
17366d39786bSYishai Hadas };
17376d39786bSYishai Hadas 
1738d291f1a6SDaniel Jurgens enum port_pkey_state {
1739d291f1a6SDaniel Jurgens 	IB_PORT_PKEY_NOT_VALID = 0,
1740d291f1a6SDaniel Jurgens 	IB_PORT_PKEY_VALID = 1,
1741d291f1a6SDaniel Jurgens 	IB_PORT_PKEY_LISTED = 2,
1742d291f1a6SDaniel Jurgens };
1743d291f1a6SDaniel Jurgens 
1744d291f1a6SDaniel Jurgens struct ib_qp_security;
1745d291f1a6SDaniel Jurgens 
1746d291f1a6SDaniel Jurgens struct ib_port_pkey {
1747d291f1a6SDaniel Jurgens 	enum port_pkey_state	state;
1748d291f1a6SDaniel Jurgens 	u16			pkey_index;
17491fb7f897SMark Bloch 	u32			port_num;
1750d291f1a6SDaniel Jurgens 	struct list_head	qp_list;
1751d291f1a6SDaniel Jurgens 	struct list_head	to_error_list;
1752d291f1a6SDaniel Jurgens 	struct ib_qp_security  *sec;
1753d291f1a6SDaniel Jurgens };
1754d291f1a6SDaniel Jurgens 
1755d291f1a6SDaniel Jurgens struct ib_ports_pkeys {
1756d291f1a6SDaniel Jurgens 	struct ib_port_pkey	main;
1757d291f1a6SDaniel Jurgens 	struct ib_port_pkey	alt;
1758d291f1a6SDaniel Jurgens };
1759d291f1a6SDaniel Jurgens 
1760d291f1a6SDaniel Jurgens struct ib_qp_security {
1761d291f1a6SDaniel Jurgens 	struct ib_qp	       *qp;
1762d291f1a6SDaniel Jurgens 	struct ib_device       *dev;
1763d291f1a6SDaniel Jurgens 	/* Hold this mutex when changing port and pkey settings. */
1764d291f1a6SDaniel Jurgens 	struct mutex		mutex;
1765d291f1a6SDaniel Jurgens 	struct ib_ports_pkeys  *ports_pkeys;
1766d291f1a6SDaniel Jurgens 	/* A list of all open shared QP handles.  Required to enforce security
1767d291f1a6SDaniel Jurgens 	 * properly for all users of a shared QP.
1768d291f1a6SDaniel Jurgens 	 */
1769d291f1a6SDaniel Jurgens 	struct list_head        shared_qp_list;
1770d291f1a6SDaniel Jurgens 	void                   *security;
1771d291f1a6SDaniel Jurgens 	bool			destroying;
1772d291f1a6SDaniel Jurgens 	atomic_t		error_list_count;
1773d291f1a6SDaniel Jurgens 	struct completion	error_complete;
1774d291f1a6SDaniel Jurgens 	int			error_comps_pending;
1775d291f1a6SDaniel Jurgens };
1776d291f1a6SDaniel Jurgens 
1777632bc3f6SBart Van Assche /*
1778632bc3f6SBart Van Assche  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1779632bc3f6SBart Van Assche  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1780632bc3f6SBart Van Assche  */
1781a4d61e84SRoland Dreier struct ib_qp {
1782a4d61e84SRoland Dreier 	struct ib_device       *device;
1783a4d61e84SRoland Dreier 	struct ib_pd	       *pd;
1784a4d61e84SRoland Dreier 	struct ib_cq	       *send_cq;
1785a4d61e84SRoland Dreier 	struct ib_cq	       *recv_cq;
1786fffb0383SChristoph Hellwig 	spinlock_t		mr_lock;
1787fffb0383SChristoph Hellwig 	int			mrs_used;
1788a060b562SChristoph Hellwig 	struct list_head	rdma_mrs;
17890e353e34SChristoph Hellwig 	struct list_head	sig_mrs;
1790a4d61e84SRoland Dreier 	struct ib_srq	       *srq;
1791b42b63cfSSean Hefty 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1792d3d72d90SSean Hefty 	struct list_head	xrcd_list;
1793fffb0383SChristoph Hellwig 
1794319a441dSHadar Hen Zion 	/* count times opened, mcast attaches, flow attaches */
1795319a441dSHadar Hen Zion 	atomic_t		usecnt;
17960e0ec7e0SSean Hefty 	struct list_head	open_list;
17970e0ec7e0SSean Hefty 	struct ib_qp           *real_qp;
1798620d3f81SJason Gunthorpe 	struct ib_uqp_object   *uobject;
1799a4d61e84SRoland Dreier 	void                  (*event_handler)(struct ib_event *, void *);
1800a4d61e84SRoland Dreier 	void		       *qp_context;
18011a1f460fSJason Gunthorpe 	/* sgid_attrs associated with the AV's */
18021a1f460fSJason Gunthorpe 	const struct ib_gid_attr *av_sgid_attr;
18031a1f460fSJason Gunthorpe 	const struct ib_gid_attr *alt_path_sgid_attr;
1804a4d61e84SRoland Dreier 	u32			qp_num;
1805632bc3f6SBart Van Assche 	u32			max_write_sge;
1806632bc3f6SBart Van Assche 	u32			max_read_sge;
1807a4d61e84SRoland Dreier 	enum ib_qp_type		qp_type;
1808a9017e23SYishai Hadas 	struct ib_rwq_ind_table *rwq_ind_tbl;
1809d291f1a6SDaniel Jurgens 	struct ib_qp_security  *qp_sec;
18101fb7f897SMark Bloch 	u32			port;
181102d8883fSLeon Romanovsky 
1812185eddc4SMax Gurtovoy 	bool			integrity_en;
181302d8883fSLeon Romanovsky 	/*
181402d8883fSLeon Romanovsky 	 * Implementation details of the RDMA core, don't use in drivers:
181502d8883fSLeon Romanovsky 	 */
181602d8883fSLeon Romanovsky 	struct rdma_restrack_entry     res;
181799fa331dSMark Zhang 
181899fa331dSMark Zhang 	/* The counter the qp is bind to */
181999fa331dSMark Zhang 	struct rdma_counter    *counter;
1820a4d61e84SRoland Dreier };
1821a4d61e84SRoland Dreier 
1822bee76d7aSAriel Levkovich struct ib_dm {
1823bee76d7aSAriel Levkovich 	struct ib_device  *device;
1824bee76d7aSAriel Levkovich 	u32		   length;
1825bee76d7aSAriel Levkovich 	u32		   flags;
1826bee76d7aSAriel Levkovich 	struct ib_uobject *uobject;
1827bee76d7aSAriel Levkovich 	atomic_t	   usecnt;
1828bee76d7aSAriel Levkovich };
1829bee76d7aSAriel Levkovich 
1830a4d61e84SRoland Dreier struct ib_mr {
1831a4d61e84SRoland Dreier 	struct ib_device  *device;
1832a4d61e84SRoland Dreier 	struct ib_pd	  *pd;
1833a4d61e84SRoland Dreier 	u32		   lkey;
1834a4d61e84SRoland Dreier 	u32		   rkey;
18354c67e2bfSSagi Grimberg 	u64		   iova;
1836edd31551SParav Pandit 	u64		   length;
18374c67e2bfSSagi Grimberg 	unsigned int	   page_size;
1838a0bc099aSMax Gurtovoy 	enum ib_mr_type	   type;
1839d4a85c30SSteve Wise 	bool		   need_inval;
1840fffb0383SChristoph Hellwig 	union {
1841fffb0383SChristoph Hellwig 		struct ib_uobject	*uobject;	/* user */
1842fffb0383SChristoph Hellwig 		struct list_head	qp_entry;	/* FR */
1843fffb0383SChristoph Hellwig 	};
1844fccec5b8SSteve Wise 
1845be934ccaSAriel Levkovich 	struct ib_dm      *dm;
18467c717d3aSMax Gurtovoy 	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1847fccec5b8SSteve Wise 	/*
1848fccec5b8SSteve Wise 	 * Implementation details of the RDMA core, don't use in drivers:
1849fccec5b8SSteve Wise 	 */
1850fccec5b8SSteve Wise 	struct rdma_restrack_entry res;
1851a4d61e84SRoland Dreier };
1852a4d61e84SRoland Dreier 
1853a4d61e84SRoland Dreier struct ib_mw {
1854a4d61e84SRoland Dreier 	struct ib_device	*device;
1855a4d61e84SRoland Dreier 	struct ib_pd		*pd;
1856a4d61e84SRoland Dreier 	struct ib_uobject	*uobject;
1857a4d61e84SRoland Dreier 	u32			rkey;
18587083e42eSShani Michaeli 	enum ib_mw_type         type;
1859a4d61e84SRoland Dreier };
1860a4d61e84SRoland Dreier 
1861319a441dSHadar Hen Zion /* Supported steering options */
1862319a441dSHadar Hen Zion enum ib_flow_attr_type {
1863319a441dSHadar Hen Zion 	/* steering according to rule specifications */
1864319a441dSHadar Hen Zion 	IB_FLOW_ATTR_NORMAL		= 0x0,
1865319a441dSHadar Hen Zion 	/* default unicast and multicast rule -
1866319a441dSHadar Hen Zion 	 * receive all Eth traffic which isn't steered to any QP
1867319a441dSHadar Hen Zion 	 */
1868319a441dSHadar Hen Zion 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1869319a441dSHadar Hen Zion 	/* default multicast rule -
1870319a441dSHadar Hen Zion 	 * receive all Eth multicast traffic which isn't steered to any QP
1871319a441dSHadar Hen Zion 	 */
1872319a441dSHadar Hen Zion 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1873319a441dSHadar Hen Zion 	/* sniffer rule - receive all port traffic */
1874319a441dSHadar Hen Zion 	IB_FLOW_ATTR_SNIFFER		= 0x3
1875319a441dSHadar Hen Zion };
1876319a441dSHadar Hen Zion 
1877319a441dSHadar Hen Zion /* Supported steering header types */
1878319a441dSHadar Hen Zion enum ib_flow_spec_type {
1879319a441dSHadar Hen Zion 	/* L2 headers*/
1880319a441dSHadar Hen Zion 	IB_FLOW_SPEC_ETH		= 0x20,
1881240ae00eSMatan Barak 	IB_FLOW_SPEC_IB			= 0x22,
1882319a441dSHadar Hen Zion 	/* L3 header*/
1883319a441dSHadar Hen Zion 	IB_FLOW_SPEC_IPV4		= 0x30,
18844c2aae71SMaor Gottlieb 	IB_FLOW_SPEC_IPV6		= 0x31,
188556ab0b38SMatan Barak 	IB_FLOW_SPEC_ESP                = 0x34,
1886319a441dSHadar Hen Zion 	/* L4 headers*/
1887319a441dSHadar Hen Zion 	IB_FLOW_SPEC_TCP		= 0x40,
18880dbf3332SMoses Reuben 	IB_FLOW_SPEC_UDP		= 0x41,
18890dbf3332SMoses Reuben 	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1890d90e5e50SAriel Levkovich 	IB_FLOW_SPEC_GRE		= 0x51,
1891b04f0f03SAriel Levkovich 	IB_FLOW_SPEC_MPLS		= 0x60,
1892fbf46860SMoses Reuben 	IB_FLOW_SPEC_INNER		= 0x100,
1893460d0198SMoses Reuben 	/* Actions */
1894460d0198SMoses Reuben 	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1895483a3966SSlava Shwartsman 	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
18969b828441SMatan Barak 	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
18977eea23a5SRaed Salem 	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1898319a441dSHadar Hen Zion };
1899240ae00eSMatan Barak #define IB_FLOW_SPEC_LAYER_MASK	0xF0
19007eea23a5SRaed Salem #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
190122878dbcSMatan Barak 
1902a3100a78SMarina Varshaver enum ib_flow_flags {
1903a3100a78SMarina Varshaver 	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
190421e82d3eSBoris Pismenny 	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
190521e82d3eSBoris Pismenny 	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1906a3100a78SMarina Varshaver };
1907a3100a78SMarina Varshaver 
1908319a441dSHadar Hen Zion struct ib_flow_eth_filter {
1909319a441dSHadar Hen Zion 	u8	dst_mac[6];
1910319a441dSHadar Hen Zion 	u8	src_mac[6];
1911319a441dSHadar Hen Zion 	__be16	ether_type;
1912319a441dSHadar Hen Zion 	__be16	vlan_tag;
191315dfbd6bSMaor Gottlieb 	/* Must be last */
19145b361328SGustavo A. R. Silva 	u8	real_sz[];
1915319a441dSHadar Hen Zion };
1916319a441dSHadar Hen Zion 
1917319a441dSHadar Hen Zion struct ib_flow_spec_eth {
1918fbf46860SMoses Reuben 	u32			  type;
1919319a441dSHadar Hen Zion 	u16			  size;
1920319a441dSHadar Hen Zion 	struct ib_flow_eth_filter val;
1921319a441dSHadar Hen Zion 	struct ib_flow_eth_filter mask;
1922319a441dSHadar Hen Zion };
1923319a441dSHadar Hen Zion 
1924240ae00eSMatan Barak struct ib_flow_ib_filter {
1925240ae00eSMatan Barak 	__be16 dlid;
1926240ae00eSMatan Barak 	__u8   sl;
192715dfbd6bSMaor Gottlieb 	/* Must be last */
19285b361328SGustavo A. R. Silva 	u8	real_sz[];
1929240ae00eSMatan Barak };
1930240ae00eSMatan Barak 
1931240ae00eSMatan Barak struct ib_flow_spec_ib {
1932fbf46860SMoses Reuben 	u32			 type;
1933240ae00eSMatan Barak 	u16			 size;
1934240ae00eSMatan Barak 	struct ib_flow_ib_filter val;
1935240ae00eSMatan Barak 	struct ib_flow_ib_filter mask;
1936240ae00eSMatan Barak };
1937240ae00eSMatan Barak 
1938989a3a8fSMaor Gottlieb /* IPv4 header flags */
1939989a3a8fSMaor Gottlieb enum ib_ipv4_flags {
1940989a3a8fSMaor Gottlieb 	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1941989a3a8fSMaor Gottlieb 	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1942989a3a8fSMaor Gottlieb 				    last have this flag set */
1943989a3a8fSMaor Gottlieb };
1944989a3a8fSMaor Gottlieb 
1945319a441dSHadar Hen Zion struct ib_flow_ipv4_filter {
1946319a441dSHadar Hen Zion 	__be32	src_ip;
1947319a441dSHadar Hen Zion 	__be32	dst_ip;
1948989a3a8fSMaor Gottlieb 	u8	proto;
1949989a3a8fSMaor Gottlieb 	u8	tos;
1950989a3a8fSMaor Gottlieb 	u8	ttl;
1951989a3a8fSMaor Gottlieb 	u8	flags;
195215dfbd6bSMaor Gottlieb 	/* Must be last */
19535b361328SGustavo A. R. Silva 	u8	real_sz[];
1954319a441dSHadar Hen Zion };
1955319a441dSHadar Hen Zion 
1956319a441dSHadar Hen Zion struct ib_flow_spec_ipv4 {
1957fbf46860SMoses Reuben 	u32			   type;
1958319a441dSHadar Hen Zion 	u16			   size;
1959319a441dSHadar Hen Zion 	struct ib_flow_ipv4_filter val;
1960319a441dSHadar Hen Zion 	struct ib_flow_ipv4_filter mask;
1961319a441dSHadar Hen Zion };
1962319a441dSHadar Hen Zion 
19634c2aae71SMaor Gottlieb struct ib_flow_ipv6_filter {
19644c2aae71SMaor Gottlieb 	u8	src_ip[16];
19654c2aae71SMaor Gottlieb 	u8	dst_ip[16];
1966a72c6a2bSMaor Gottlieb 	__be32	flow_label;
1967a72c6a2bSMaor Gottlieb 	u8	next_hdr;
1968a72c6a2bSMaor Gottlieb 	u8	traffic_class;
1969a72c6a2bSMaor Gottlieb 	u8	hop_limit;
197015dfbd6bSMaor Gottlieb 	/* Must be last */
19715b361328SGustavo A. R. Silva 	u8	real_sz[];
19724c2aae71SMaor Gottlieb };
19734c2aae71SMaor Gottlieb 
19744c2aae71SMaor Gottlieb struct ib_flow_spec_ipv6 {
1975fbf46860SMoses Reuben 	u32			   type;
19764c2aae71SMaor Gottlieb 	u16			   size;
19774c2aae71SMaor Gottlieb 	struct ib_flow_ipv6_filter val;
19784c2aae71SMaor Gottlieb 	struct ib_flow_ipv6_filter mask;
19794c2aae71SMaor Gottlieb };
19804c2aae71SMaor Gottlieb 
1981319a441dSHadar Hen Zion struct ib_flow_tcp_udp_filter {
1982319a441dSHadar Hen Zion 	__be16	dst_port;
1983319a441dSHadar Hen Zion 	__be16	src_port;
198415dfbd6bSMaor Gottlieb 	/* Must be last */
19855b361328SGustavo A. R. Silva 	u8	real_sz[];
1986319a441dSHadar Hen Zion };
1987319a441dSHadar Hen Zion 
1988319a441dSHadar Hen Zion struct ib_flow_spec_tcp_udp {
1989fbf46860SMoses Reuben 	u32			      type;
1990319a441dSHadar Hen Zion 	u16			      size;
1991319a441dSHadar Hen Zion 	struct ib_flow_tcp_udp_filter val;
1992319a441dSHadar Hen Zion 	struct ib_flow_tcp_udp_filter mask;
1993319a441dSHadar Hen Zion };
1994319a441dSHadar Hen Zion 
19950dbf3332SMoses Reuben struct ib_flow_tunnel_filter {
19960dbf3332SMoses Reuben 	__be32	tunnel_id;
19975b361328SGustavo A. R. Silva 	u8	real_sz[];
19980dbf3332SMoses Reuben };
19990dbf3332SMoses Reuben 
20000dbf3332SMoses Reuben /* ib_flow_spec_tunnel describes the Vxlan tunnel
20010dbf3332SMoses Reuben  * the tunnel_id from val has the vni value
20020dbf3332SMoses Reuben  */
20030dbf3332SMoses Reuben struct ib_flow_spec_tunnel {
2004fbf46860SMoses Reuben 	u32			      type;
20050dbf3332SMoses Reuben 	u16			      size;
20060dbf3332SMoses Reuben 	struct ib_flow_tunnel_filter  val;
20070dbf3332SMoses Reuben 	struct ib_flow_tunnel_filter  mask;
20080dbf3332SMoses Reuben };
20090dbf3332SMoses Reuben 
201056ab0b38SMatan Barak struct ib_flow_esp_filter {
201156ab0b38SMatan Barak 	__be32	spi;
201256ab0b38SMatan Barak 	__be32  seq;
201356ab0b38SMatan Barak 	/* Must be last */
20145b361328SGustavo A. R. Silva 	u8	real_sz[];
201556ab0b38SMatan Barak };
201656ab0b38SMatan Barak 
201756ab0b38SMatan Barak struct ib_flow_spec_esp {
201856ab0b38SMatan Barak 	u32                           type;
201956ab0b38SMatan Barak 	u16			      size;
202056ab0b38SMatan Barak 	struct ib_flow_esp_filter     val;
202156ab0b38SMatan Barak 	struct ib_flow_esp_filter     mask;
202256ab0b38SMatan Barak };
202356ab0b38SMatan Barak 
2024d90e5e50SAriel Levkovich struct ib_flow_gre_filter {
2025d90e5e50SAriel Levkovich 	__be16 c_ks_res0_ver;
2026d90e5e50SAriel Levkovich 	__be16 protocol;
2027d90e5e50SAriel Levkovich 	__be32 key;
2028d90e5e50SAriel Levkovich 	/* Must be last */
20295b361328SGustavo A. R. Silva 	u8	real_sz[];
2030d90e5e50SAriel Levkovich };
2031d90e5e50SAriel Levkovich 
2032d90e5e50SAriel Levkovich struct ib_flow_spec_gre {
2033d90e5e50SAriel Levkovich 	u32                           type;
2034d90e5e50SAriel Levkovich 	u16			      size;
2035d90e5e50SAriel Levkovich 	struct ib_flow_gre_filter     val;
2036d90e5e50SAriel Levkovich 	struct ib_flow_gre_filter     mask;
2037d90e5e50SAriel Levkovich };
2038d90e5e50SAriel Levkovich 
2039b04f0f03SAriel Levkovich struct ib_flow_mpls_filter {
2040b04f0f03SAriel Levkovich 	__be32 tag;
2041b04f0f03SAriel Levkovich 	/* Must be last */
20425b361328SGustavo A. R. Silva 	u8	real_sz[];
2043b04f0f03SAriel Levkovich };
2044b04f0f03SAriel Levkovich 
2045b04f0f03SAriel Levkovich struct ib_flow_spec_mpls {
2046b04f0f03SAriel Levkovich 	u32                           type;
2047b04f0f03SAriel Levkovich 	u16			      size;
2048b04f0f03SAriel Levkovich 	struct ib_flow_mpls_filter     val;
2049b04f0f03SAriel Levkovich 	struct ib_flow_mpls_filter     mask;
2050b04f0f03SAriel Levkovich };
2051b04f0f03SAriel Levkovich 
2052460d0198SMoses Reuben struct ib_flow_spec_action_tag {
2053460d0198SMoses Reuben 	enum ib_flow_spec_type	      type;
2054460d0198SMoses Reuben 	u16			      size;
2055460d0198SMoses Reuben 	u32                           tag_id;
2056460d0198SMoses Reuben };
2057460d0198SMoses Reuben 
2058483a3966SSlava Shwartsman struct ib_flow_spec_action_drop {
2059483a3966SSlava Shwartsman 	enum ib_flow_spec_type	      type;
2060483a3966SSlava Shwartsman 	u16			      size;
2061483a3966SSlava Shwartsman };
2062483a3966SSlava Shwartsman 
20639b828441SMatan Barak struct ib_flow_spec_action_handle {
20649b828441SMatan Barak 	enum ib_flow_spec_type	      type;
20659b828441SMatan Barak 	u16			      size;
20669b828441SMatan Barak 	struct ib_flow_action	     *act;
20679b828441SMatan Barak };
20689b828441SMatan Barak 
20697eea23a5SRaed Salem enum ib_counters_description {
20707eea23a5SRaed Salem 	IB_COUNTER_PACKETS,
20717eea23a5SRaed Salem 	IB_COUNTER_BYTES,
20727eea23a5SRaed Salem };
20737eea23a5SRaed Salem 
20747eea23a5SRaed Salem struct ib_flow_spec_action_count {
20757eea23a5SRaed Salem 	enum ib_flow_spec_type type;
20767eea23a5SRaed Salem 	u16 size;
20777eea23a5SRaed Salem 	struct ib_counters *counters;
20787eea23a5SRaed Salem };
20797eea23a5SRaed Salem 
2080319a441dSHadar Hen Zion union ib_flow_spec {
2081319a441dSHadar Hen Zion 	struct {
2082fbf46860SMoses Reuben 		u32			type;
2083319a441dSHadar Hen Zion 		u16			size;
2084319a441dSHadar Hen Zion 	};
2085319a441dSHadar Hen Zion 	struct ib_flow_spec_eth		eth;
2086240ae00eSMatan Barak 	struct ib_flow_spec_ib		ib;
2087319a441dSHadar Hen Zion 	struct ib_flow_spec_ipv4        ipv4;
2088319a441dSHadar Hen Zion 	struct ib_flow_spec_tcp_udp	tcp_udp;
20894c2aae71SMaor Gottlieb 	struct ib_flow_spec_ipv6        ipv6;
20900dbf3332SMoses Reuben 	struct ib_flow_spec_tunnel      tunnel;
209156ab0b38SMatan Barak 	struct ib_flow_spec_esp		esp;
2092d90e5e50SAriel Levkovich 	struct ib_flow_spec_gre		gre;
2093b04f0f03SAriel Levkovich 	struct ib_flow_spec_mpls	mpls;
2094460d0198SMoses Reuben 	struct ib_flow_spec_action_tag  flow_tag;
2095483a3966SSlava Shwartsman 	struct ib_flow_spec_action_drop drop;
20969b828441SMatan Barak 	struct ib_flow_spec_action_handle action;
20977eea23a5SRaed Salem 	struct ib_flow_spec_action_count flow_count;
2098319a441dSHadar Hen Zion };
2099319a441dSHadar Hen Zion 
2100319a441dSHadar Hen Zion struct ib_flow_attr {
2101319a441dSHadar Hen Zion 	enum ib_flow_attr_type type;
2102319a441dSHadar Hen Zion 	u16	     size;
2103319a441dSHadar Hen Zion 	u16	     priority;
2104319a441dSHadar Hen Zion 	u32	     flags;
2105319a441dSHadar Hen Zion 	u8	     num_of_specs;
21061fb7f897SMark Bloch 	u32	     port;
21077654cb1bSMatthew Wilcox 	union ib_flow_spec flows[];
2108319a441dSHadar Hen Zion };
2109319a441dSHadar Hen Zion 
2110319a441dSHadar Hen Zion struct ib_flow {
2111319a441dSHadar Hen Zion 	struct ib_qp		*qp;
21126cd080a6SYishai Hadas 	struct ib_device	*device;
2113319a441dSHadar Hen Zion 	struct ib_uobject	*uobject;
2114319a441dSHadar Hen Zion };
2115319a441dSHadar Hen Zion 
21162eb9beaeSMatan Barak enum ib_flow_action_type {
21172eb9beaeSMatan Barak 	IB_FLOW_ACTION_UNSPECIFIED,
21182eb9beaeSMatan Barak 	IB_FLOW_ACTION_ESP = 1,
21192eb9beaeSMatan Barak };
21202eb9beaeSMatan Barak 
21212eb9beaeSMatan Barak struct ib_flow_action_attrs_esp_keymats {
21222eb9beaeSMatan Barak 	enum ib_uverbs_flow_action_esp_keymat			protocol;
21232eb9beaeSMatan Barak 	union {
21242eb9beaeSMatan Barak 		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
21252eb9beaeSMatan Barak 	} keymat;
21262eb9beaeSMatan Barak };
21272eb9beaeSMatan Barak 
21282eb9beaeSMatan Barak struct ib_flow_action_attrs_esp_replays {
21292eb9beaeSMatan Barak 	enum ib_uverbs_flow_action_esp_replay			protocol;
21302eb9beaeSMatan Barak 	union {
21312eb9beaeSMatan Barak 		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
21322eb9beaeSMatan Barak 	} replay;
21332eb9beaeSMatan Barak };
21342eb9beaeSMatan Barak 
21352eb9beaeSMatan Barak enum ib_flow_action_attrs_esp_flags {
21362eb9beaeSMatan Barak 	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
21372eb9beaeSMatan Barak 	 * This is done in order to share the same flags between user-space and
21382eb9beaeSMatan Barak 	 * kernel and spare an unnecessary translation.
21392eb9beaeSMatan Barak 	 */
21402eb9beaeSMatan Barak 
21412eb9beaeSMatan Barak 	/* Kernel flags */
21422eb9beaeSMatan Barak 	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
21437d12f8d5SMatan Barak 	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
21442eb9beaeSMatan Barak };
21452eb9beaeSMatan Barak 
21462eb9beaeSMatan Barak struct ib_flow_spec_list {
21472eb9beaeSMatan Barak 	struct ib_flow_spec_list	*next;
21482eb9beaeSMatan Barak 	union ib_flow_spec		spec;
21492eb9beaeSMatan Barak };
21502eb9beaeSMatan Barak 
21512eb9beaeSMatan Barak struct ib_flow_action_attrs_esp {
21522eb9beaeSMatan Barak 	struct ib_flow_action_attrs_esp_keymats		*keymat;
21532eb9beaeSMatan Barak 	struct ib_flow_action_attrs_esp_replays		*replay;
21542eb9beaeSMatan Barak 	struct ib_flow_spec_list			*encap;
21552eb9beaeSMatan Barak 	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
21562eb9beaeSMatan Barak 	 * Value of 0 is a valid value.
21572eb9beaeSMatan Barak 	 */
21582eb9beaeSMatan Barak 	u32						esn;
21592eb9beaeSMatan Barak 	u32						spi;
21602eb9beaeSMatan Barak 	u32						seq;
21612eb9beaeSMatan Barak 	u32						tfc_pad;
21622eb9beaeSMatan Barak 	/* Use enum ib_flow_action_attrs_esp_flags */
21632eb9beaeSMatan Barak 	u64						flags;
21642eb9beaeSMatan Barak 	u64						hard_limit_pkts;
21652eb9beaeSMatan Barak };
21662eb9beaeSMatan Barak 
21672eb9beaeSMatan Barak struct ib_flow_action {
21682eb9beaeSMatan Barak 	struct ib_device		*device;
21692eb9beaeSMatan Barak 	struct ib_uobject		*uobject;
21702eb9beaeSMatan Barak 	enum ib_flow_action_type	type;
21712eb9beaeSMatan Barak 	atomic_t			usecnt;
21722eb9beaeSMatan Barak };
21732eb9beaeSMatan Barak 
2174e26e7b88SLeon Romanovsky struct ib_mad;
2175a4d61e84SRoland Dreier 
2176a4d61e84SRoland Dreier enum ib_process_mad_flags {
2177a4d61e84SRoland Dreier 	IB_MAD_IGNORE_MKEY	= 1,
2178a4d61e84SRoland Dreier 	IB_MAD_IGNORE_BKEY	= 2,
2179a4d61e84SRoland Dreier 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2180a4d61e84SRoland Dreier };
2181a4d61e84SRoland Dreier 
2182a4d61e84SRoland Dreier enum ib_mad_result {
2183a4d61e84SRoland Dreier 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2184a4d61e84SRoland Dreier 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2185a4d61e84SRoland Dreier 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2186a4d61e84SRoland Dreier 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2187a4d61e84SRoland Dreier };
2188a4d61e84SRoland Dreier 
218921d6454aSJack Wang struct ib_port_cache {
2190883c71feSDaniel Jurgens 	u64		      subnet_prefix;
219121d6454aSJack Wang 	struct ib_pkey_cache  *pkey;
219221d6454aSJack Wang 	struct ib_gid_table   *gid;
219321d6454aSJack Wang 	u8                     lmc;
219421d6454aSJack Wang 	enum ib_port_state     port_state;
219521d6454aSJack Wang };
219621d6454aSJack Wang 
21977738613eSIra Weiny struct ib_port_immutable {
21987738613eSIra Weiny 	int                           pkey_tbl_len;
21997738613eSIra Weiny 	int                           gid_tbl_len;
2200f9b22e35SIra Weiny 	u32                           core_cap_flags;
2201337877a4SIra Weiny 	u32                           max_mad_size;
22027738613eSIra Weiny };
22037738613eSIra Weiny 
22048ceb1357SJason Gunthorpe struct ib_port_data {
2205324e227eSJason Gunthorpe 	struct ib_device *ib_dev;
2206324e227eSJason Gunthorpe 
22078ceb1357SJason Gunthorpe 	struct ib_port_immutable immutable;
22088ceb1357SJason Gunthorpe 
22098ceb1357SJason Gunthorpe 	spinlock_t pkey_list_lock;
221084dcd8c7SAnand Khoje 
221184dcd8c7SAnand Khoje 	spinlock_t netdev_lock;
221284dcd8c7SAnand Khoje 
22138ceb1357SJason Gunthorpe 	struct list_head pkey_list;
22148faea9fdSJason Gunthorpe 
22158faea9fdSJason Gunthorpe 	struct ib_port_cache cache;
2216c2261dd7SJason Gunthorpe 
2217324e227eSJason Gunthorpe 	struct net_device __rcu *netdev;
221809f530f0SJason Gunthorpe 	netdevice_tracker netdev_tracker;
2219324e227eSJason Gunthorpe 	struct hlist_node ndev_hash_link;
2220413d3347SMark Zhang 	struct rdma_port_counter port_counter;
2221d8a58838SJason Gunthorpe 	struct ib_port *sysfs;
22228ceb1357SJason Gunthorpe };
22238ceb1357SJason Gunthorpe 
22242fc77572SVishwanathapura, Niranjana /* rdma netdev type - specifies protocol type */
22252fc77572SVishwanathapura, Niranjana enum rdma_netdev_t {
2226f0ad83acSNiranjana Vishwanathapura 	RDMA_NETDEV_OPA_VNIC,
2227f0ad83acSNiranjana Vishwanathapura 	RDMA_NETDEV_IPOIB,
22282fc77572SVishwanathapura, Niranjana };
22292fc77572SVishwanathapura, Niranjana 
22302fc77572SVishwanathapura, Niranjana /**
22312fc77572SVishwanathapura, Niranjana  * struct rdma_netdev - rdma netdev
22322fc77572SVishwanathapura, Niranjana  * For cases where netstack interfacing is required.
22332fc77572SVishwanathapura, Niranjana  */
22342fc77572SVishwanathapura, Niranjana struct rdma_netdev {
22352fc77572SVishwanathapura, Niranjana 	void              *clnt_priv;
22362fc77572SVishwanathapura, Niranjana 	struct ib_device  *hca;
22371fb7f897SMark Bloch 	u32		   port_num;
2238d99dc602SGary Leshner 	int                mtu;
22392fc77572SVishwanathapura, Niranjana 
22409f49a5b5SJason Gunthorpe 	/*
22419f49a5b5SJason Gunthorpe 	 * cleanup function must be specified.
22429f49a5b5SJason Gunthorpe 	 * FIXME: This is only used for OPA_VNIC and that usage should be
22439f49a5b5SJason Gunthorpe 	 * removed too.
22449f49a5b5SJason Gunthorpe 	 */
22458e959601SNiranjana Vishwanathapura 	void (*free_rdma_netdev)(struct net_device *netdev);
22468e959601SNiranjana Vishwanathapura 
22472fc77572SVishwanathapura, Niranjana 	/* control functions */
22482fc77572SVishwanathapura, Niranjana 	void (*set_id)(struct net_device *netdev, int id);
2249f0ad83acSNiranjana Vishwanathapura 	/* send packet */
2250f0ad83acSNiranjana Vishwanathapura 	int (*send)(struct net_device *dev, struct sk_buff *skb,
2251f0ad83acSNiranjana Vishwanathapura 		    struct ib_ah *address, u32 dqpn);
2252f0ad83acSNiranjana Vishwanathapura 	/* multicast */
2253f0ad83acSNiranjana Vishwanathapura 	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2254f0ad83acSNiranjana Vishwanathapura 			    union ib_gid *gid, u16 mlid,
2255f0ad83acSNiranjana Vishwanathapura 			    int set_qkey, u32 qkey);
2256f0ad83acSNiranjana Vishwanathapura 	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2257f0ad83acSNiranjana Vishwanathapura 			    union ib_gid *gid, u16 mlid);
2258042a00f9SMike Marciniszyn 	/* timeout */
2259042a00f9SMike Marciniszyn 	void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
22602fc77572SVishwanathapura, Niranjana };
22612fc77572SVishwanathapura, Niranjana 
2262f6a8a19bSDenis Drozdov struct rdma_netdev_alloc_params {
2263f6a8a19bSDenis Drozdov 	size_t sizeof_priv;
2264f6a8a19bSDenis Drozdov 	unsigned int txqs;
2265f6a8a19bSDenis Drozdov 	unsigned int rxqs;
2266f6a8a19bSDenis Drozdov 	void *param;
2267f6a8a19bSDenis Drozdov 
22681fb7f897SMark Bloch 	int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2269f6a8a19bSDenis Drozdov 				      struct net_device *netdev, void *param);
2270f6a8a19bSDenis Drozdov };
2271f6a8a19bSDenis Drozdov 
2272a3de94e3SErez Alfasi struct ib_odp_counters {
2273a3de94e3SErez Alfasi 	atomic64_t faults;
2274a3de94e3SErez Alfasi 	atomic64_t invalidations;
2275d473f4dcSMaor Gottlieb 	atomic64_t prefetch;
2276a3de94e3SErez Alfasi };
2277a3de94e3SErez Alfasi 
2278fa9b1802SRaed Salem struct ib_counters {
2279fa9b1802SRaed Salem 	struct ib_device	*device;
2280fa9b1802SRaed Salem 	struct ib_uobject	*uobject;
2281fa9b1802SRaed Salem 	/* num of objects attached */
2282fa9b1802SRaed Salem 	atomic_t	usecnt;
2283fa9b1802SRaed Salem };
2284fa9b1802SRaed Salem 
228551d7a538SRaed Salem struct ib_counters_read_attr {
228651d7a538SRaed Salem 	u64	*counters_buff;
228751d7a538SRaed Salem 	u32	ncounters;
228851d7a538SRaed Salem 	u32	flags; /* use enum ib_read_counters_flags */
228951d7a538SRaed Salem };
229051d7a538SRaed Salem 
22912eb9beaeSMatan Barak struct uverbs_attr_bundle;
2292dd05cb82SKamal Heib struct iw_cm_id;
2293dd05cb82SKamal Heib struct iw_cm_conn_param;
22942eb9beaeSMatan Barak 
229530471d4bSLeon Romanovsky #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
229630471d4bSLeon Romanovsky 	.size_##ib_struct =                                                    \
229730471d4bSLeon Romanovsky 		(sizeof(struct drv_struct) +                                   \
229830471d4bSLeon Romanovsky 		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
229930471d4bSLeon Romanovsky 		 BUILD_BUG_ON_ZERO(                                            \
230030471d4bSLeon Romanovsky 			 !__same_type(((struct drv_struct *)NULL)->member,     \
230130471d4bSLeon Romanovsky 				      struct ib_struct)))
230230471d4bSLeon Romanovsky 
2303f6316032SLeon Romanovsky #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                          \
2304514aee66SLeon Romanovsky 	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2305514aee66SLeon Romanovsky 					   gfp, false))
2306514aee66SLeon Romanovsky 
2307514aee66SLeon Romanovsky #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type)                              \
2308514aee66SLeon Romanovsky 	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2309514aee66SLeon Romanovsky 					   GFP_KERNEL, true))
2310f6316032SLeon Romanovsky 
231130471d4bSLeon Romanovsky #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2312f6316032SLeon Romanovsky 	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
231330471d4bSLeon Romanovsky 
231430471d4bSLeon Romanovsky #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
231530471d4bSLeon Romanovsky 
23163411f9f0SMichal Kalderon struct rdma_user_mmap_entry {
23173411f9f0SMichal Kalderon 	struct kref ref;
23183411f9f0SMichal Kalderon 	struct ib_ucontext *ucontext;
23193411f9f0SMichal Kalderon 	unsigned long start_pgoff;
23203411f9f0SMichal Kalderon 	size_t npages;
23213411f9f0SMichal Kalderon 	bool driver_removed;
23223411f9f0SMichal Kalderon };
23233411f9f0SMichal Kalderon 
23243411f9f0SMichal Kalderon /* Return the offset (in bytes) the user should pass to libc's mmap() */
23253411f9f0SMichal Kalderon static inline u64
rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry * entry)23263411f9f0SMichal Kalderon rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
23273411f9f0SMichal Kalderon {
23283411f9f0SMichal Kalderon 	return (u64)entry->start_pgoff << PAGE_SHIFT;
23293411f9f0SMichal Kalderon }
23303411f9f0SMichal Kalderon 
2331521ed0d9SKamal Heib /**
2332521ed0d9SKamal Heib  * struct ib_device_ops - InfiniBand device operations
2333521ed0d9SKamal Heib  * This structure defines all the InfiniBand device operations, providers will
2334521ed0d9SKamal Heib  * need to define the supported operations, otherwise they will be set to null.
2335521ed0d9SKamal Heib  */
2336521ed0d9SKamal Heib struct ib_device_ops {
23377a154142SJason Gunthorpe 	struct module *owner;
2338b9560a41SJason Gunthorpe 	enum rdma_driver_id driver_id;
233972c6ec18SJason Gunthorpe 	u32 uverbs_abi_ver;
23408f71bb00SJason Gunthorpe 	unsigned int uverbs_no_driver_id_binding:1;
2341b9560a41SJason Gunthorpe 
2342915e4af5SJason Gunthorpe 	/*
2343915e4af5SJason Gunthorpe 	 * NOTE: New drivers should not make use of device_group; instead new
2344915e4af5SJason Gunthorpe 	 * device parameter should be exposed via netlink command. This
2345915e4af5SJason Gunthorpe 	 * mechanism exists only for existing drivers.
2346915e4af5SJason Gunthorpe 	 */
2347915e4af5SJason Gunthorpe 	const struct attribute_group *device_group;
2348d7407d16SJason Gunthorpe 	const struct attribute_group **port_groups;
2349d7407d16SJason Gunthorpe 
2350521ed0d9SKamal Heib 	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2351521ed0d9SKamal Heib 			 const struct ib_send_wr **bad_send_wr);
2352521ed0d9SKamal Heib 	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2353521ed0d9SKamal Heib 			 const struct ib_recv_wr **bad_recv_wr);
2354521ed0d9SKamal Heib 	void (*drain_rq)(struct ib_qp *qp);
2355521ed0d9SKamal Heib 	void (*drain_sq)(struct ib_qp *qp);
2356521ed0d9SKamal Heib 	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2357521ed0d9SKamal Heib 	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2358521ed0d9SKamal Heib 	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2359521ed0d9SKamal Heib 	int (*post_srq_recv)(struct ib_srq *srq,
2360521ed0d9SKamal Heib 			     const struct ib_recv_wr *recv_wr,
2361521ed0d9SKamal Heib 			     const struct ib_recv_wr **bad_recv_wr);
2362521ed0d9SKamal Heib 	int (*process_mad)(struct ib_device *device, int process_mad_flags,
23631fb7f897SMark Bloch 			   u32 port_num, const struct ib_wc *in_wc,
2364521ed0d9SKamal Heib 			   const struct ib_grh *in_grh,
2365e26e7b88SLeon Romanovsky 			   const struct ib_mad *in_mad, struct ib_mad *out_mad,
2366e26e7b88SLeon Romanovsky 			   size_t *out_mad_size, u16 *out_mad_pkey_index);
2367521ed0d9SKamal Heib 	int (*query_device)(struct ib_device *device,
2368521ed0d9SKamal Heib 			    struct ib_device_attr *device_attr,
2369521ed0d9SKamal Heib 			    struct ib_udata *udata);
2370521ed0d9SKamal Heib 	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2371521ed0d9SKamal Heib 			     struct ib_device_modify *device_modify);
2372521ed0d9SKamal Heib 	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2373521ed0d9SKamal Heib 	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2374521ed0d9SKamal Heib 						     int comp_vector);
23751fb7f897SMark Bloch 	int (*query_port)(struct ib_device *device, u32 port_num,
2376521ed0d9SKamal Heib 			  struct ib_port_attr *port_attr);
23771fb7f897SMark Bloch 	int (*modify_port)(struct ib_device *device, u32 port_num,
2378521ed0d9SKamal Heib 			   int port_modify_mask,
2379521ed0d9SKamal Heib 			   struct ib_port_modify *port_modify);
2380521ed0d9SKamal Heib 	/**
2381521ed0d9SKamal Heib 	 * The following mandatory functions are used only at device
2382521ed0d9SKamal Heib 	 * registration.  Keep functions such as these at the end of this
2383521ed0d9SKamal Heib 	 * structure to avoid cache line misses when accessing struct ib_device
2384521ed0d9SKamal Heib 	 * in fast paths.
2385521ed0d9SKamal Heib 	 */
23861fb7f897SMark Bloch 	int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2387521ed0d9SKamal Heib 				  struct ib_port_immutable *immutable);
2388521ed0d9SKamal Heib 	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
23891fb7f897SMark Bloch 					       u32 port_num);
2390521ed0d9SKamal Heib 	/**
2391521ed0d9SKamal Heib 	 * When calling get_netdev, the HW vendor's driver should return the
2392521ed0d9SKamal Heib 	 * net device of device @device at port @port_num or NULL if such
2393521ed0d9SKamal Heib 	 * a net device doesn't exist. The vendor driver should call dev_hold
2394521ed0d9SKamal Heib 	 * on this net device. The HW vendor's device driver must guarantee
2395521ed0d9SKamal Heib 	 * that this function returns NULL before the net device has finished
2396521ed0d9SKamal Heib 	 * NETDEV_UNREGISTER state.
2397521ed0d9SKamal Heib 	 */
23981fb7f897SMark Bloch 	struct net_device *(*get_netdev)(struct ib_device *device,
23991fb7f897SMark Bloch 					 u32 port_num);
2400521ed0d9SKamal Heib 	/**
2401521ed0d9SKamal Heib 	 * rdma netdev operation
2402521ed0d9SKamal Heib 	 *
2403521ed0d9SKamal Heib 	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2404521ed0d9SKamal Heib 	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2405521ed0d9SKamal Heib 	 */
2406521ed0d9SKamal Heib 	struct net_device *(*alloc_rdma_netdev)(
24071fb7f897SMark Bloch 		struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2408521ed0d9SKamal Heib 		const char *name, unsigned char name_assign_type,
2409521ed0d9SKamal Heib 		void (*setup)(struct net_device *));
2410521ed0d9SKamal Heib 
24111fb7f897SMark Bloch 	int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2412521ed0d9SKamal Heib 				      enum rdma_netdev_t type,
2413521ed0d9SKamal Heib 				      struct rdma_netdev_alloc_params *params);
2414521ed0d9SKamal Heib 	/**
2415521ed0d9SKamal Heib 	 * query_gid should be return GID value for @device, when @port_num
2416521ed0d9SKamal Heib 	 * link layer is either IB or iWarp. It is no-op if @port_num port
2417521ed0d9SKamal Heib 	 * is RoCE link layer.
2418521ed0d9SKamal Heib 	 */
24191fb7f897SMark Bloch 	int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2420521ed0d9SKamal Heib 			 union ib_gid *gid);
2421521ed0d9SKamal Heib 	/**
2422521ed0d9SKamal Heib 	 * When calling add_gid, the HW vendor's driver should add the gid
2423521ed0d9SKamal Heib 	 * of device of port at gid index available at @attr. Meta-info of
2424521ed0d9SKamal Heib 	 * that gid (for example, the network device related to this gid) is
2425521ed0d9SKamal Heib 	 * available at @attr. @context allows the HW vendor driver to store
2426521ed0d9SKamal Heib 	 * extra information together with a GID entry. The HW vendor driver may
2427521ed0d9SKamal Heib 	 * allocate memory to contain this information and store it in @context
2428521ed0d9SKamal Heib 	 * when a new GID entry is written to. Params are consistent until the
2429521ed0d9SKamal Heib 	 * next call of add_gid or delete_gid. The function should return 0 on
2430521ed0d9SKamal Heib 	 * success or error otherwise. The function could be called
2431521ed0d9SKamal Heib 	 * concurrently for different ports. This function is only called when
2432521ed0d9SKamal Heib 	 * roce_gid_table is used.
2433521ed0d9SKamal Heib 	 */
2434521ed0d9SKamal Heib 	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2435521ed0d9SKamal Heib 	/**
2436521ed0d9SKamal Heib 	 * When calling del_gid, the HW vendor's driver should delete the
2437521ed0d9SKamal Heib 	 * gid of device @device at gid index gid_index of port port_num
2438521ed0d9SKamal Heib 	 * available in @attr.
2439521ed0d9SKamal Heib 	 * Upon the deletion of a GID entry, the HW vendor must free any
2440521ed0d9SKamal Heib 	 * allocated memory. The caller will clear @context afterwards.
2441521ed0d9SKamal Heib 	 * This function is only called when roce_gid_table is used.
2442521ed0d9SKamal Heib 	 */
2443521ed0d9SKamal Heib 	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
24441fb7f897SMark Bloch 	int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2445521ed0d9SKamal Heib 			  u16 *pkey);
2446a2a074efSLeon Romanovsky 	int (*alloc_ucontext)(struct ib_ucontext *context,
2447521ed0d9SKamal Heib 			      struct ib_udata *udata);
2448a2a074efSLeon Romanovsky 	void (*dealloc_ucontext)(struct ib_ucontext *context);
2449521ed0d9SKamal Heib 	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
24503411f9f0SMichal Kalderon 	/**
24513411f9f0SMichal Kalderon 	 * This will be called once refcount of an entry in mmap_xa reaches
24523411f9f0SMichal Kalderon 	 * zero. The type of the memory that was mapped may differ between
24533411f9f0SMichal Kalderon 	 * entries and is opaque to the rdma_user_mmap interface.
24543411f9f0SMichal Kalderon 	 * Therefore needs to be implemented by the driver in mmap_free.
24553411f9f0SMichal Kalderon 	 */
24563411f9f0SMichal Kalderon 	void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2457521ed0d9SKamal Heib 	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2458ff23dfa1SShamir Rabinovitch 	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
245991a7c58fSLeon Romanovsky 	int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2460fa5d010cSMaor Gottlieb 	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2461fa5d010cSMaor Gottlieb 			 struct ib_udata *udata);
2462676a80adSJason Gunthorpe 	int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2463676a80adSJason Gunthorpe 			      struct ib_udata *udata);
2464521ed0d9SKamal Heib 	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2465521ed0d9SKamal Heib 	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
24669a9ebf8cSLeon Romanovsky 	int (*destroy_ah)(struct ib_ah *ah, u32 flags);
246768e326deSLeon Romanovsky 	int (*create_srq)(struct ib_srq *srq,
2468521ed0d9SKamal Heib 			  struct ib_srq_init_attr *srq_init_attr,
2469521ed0d9SKamal Heib 			  struct ib_udata *udata);
2470521ed0d9SKamal Heib 	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2471521ed0d9SKamal Heib 			  enum ib_srq_attr_mask srq_attr_mask,
2472521ed0d9SKamal Heib 			  struct ib_udata *udata);
2473521ed0d9SKamal Heib 	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2474119181d1SLeon Romanovsky 	int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2475514aee66SLeon Romanovsky 	int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2476521ed0d9SKamal Heib 			 struct ib_udata *udata);
2477521ed0d9SKamal Heib 	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2478521ed0d9SKamal Heib 			 int qp_attr_mask, struct ib_udata *udata);
2479521ed0d9SKamal Heib 	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2480521ed0d9SKamal Heib 			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2481c4367a26SShamir Rabinovitch 	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2482e39afe3dSLeon Romanovsky 	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2483521ed0d9SKamal Heib 			 struct ib_udata *udata);
2484521ed0d9SKamal Heib 	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
248543d781b9SLeon Romanovsky 	int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2486521ed0d9SKamal Heib 	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2487521ed0d9SKamal Heib 	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2488521ed0d9SKamal Heib 	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2489521ed0d9SKamal Heib 				     u64 virt_addr, int mr_access_flags,
2490521ed0d9SKamal Heib 				     struct ib_udata *udata);
24913bc489e8SJianxin Xiong 	struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
24923bc489e8SJianxin Xiong 					    u64 length, u64 virt_addr, int fd,
24933bc489e8SJianxin Xiong 					    int mr_access_flags,
24943bc489e8SJianxin Xiong 					    struct ib_udata *udata);
24956e0954b1SJason Gunthorpe 	struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
24966e0954b1SJason Gunthorpe 				       u64 length, u64 virt_addr,
24976e0954b1SJason Gunthorpe 				       int mr_access_flags, struct ib_pd *pd,
24986e0954b1SJason Gunthorpe 				       struct ib_udata *udata);
2499c4367a26SShamir Rabinovitch 	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2500521ed0d9SKamal Heib 	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
250142a3b153SGal Pressman 				  u32 max_num_sg);
250226bc7eaeSIsrael Rukshin 	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
250326bc7eaeSIsrael Rukshin 					    u32 max_num_data_sg,
250426bc7eaeSIsrael Rukshin 					    u32 max_num_meta_sg);
2505ad8a4496SMoni Shoua 	int (*advise_mr)(struct ib_pd *pd,
2506ad8a4496SMoni Shoua 			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2507ad8a4496SMoni Shoua 			 struct ib_sge *sg_list, u32 num_sge,
2508ad8a4496SMoni Shoua 			 struct uverbs_attr_bundle *attrs);
25091477d44cSAvihai Horon 
25101477d44cSAvihai Horon 	/*
25111477d44cSAvihai Horon 	 * Kernel users should universally support relaxed ordering (RO), as
25121477d44cSAvihai Horon 	 * they are designed to read data only after observing the CQE and use
25131477d44cSAvihai Horon 	 * the DMA API correctly.
25141477d44cSAvihai Horon 	 *
25151477d44cSAvihai Horon 	 * Some drivers implicitly enable RO if platform supports it.
25161477d44cSAvihai Horon 	 */
2517521ed0d9SKamal Heib 	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2518521ed0d9SKamal Heib 			 unsigned int *sg_offset);
2519521ed0d9SKamal Heib 	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2520521ed0d9SKamal Heib 			       struct ib_mr_status *mr_status);
2521d18bb3e1SLeon Romanovsky 	int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2522521ed0d9SKamal Heib 	int (*dealloc_mw)(struct ib_mw *mw);
2523521ed0d9SKamal Heib 	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2524521ed0d9SKamal Heib 	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
252528ad5f65SLeon Romanovsky 	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2526d0c45c85SLeon Romanovsky 	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2527521ed0d9SKamal Heib 	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2528521ed0d9SKamal Heib 				       struct ib_flow_attr *flow_attr,
2529d6673746SLeon Romanovsky 				       struct ib_udata *udata);
2530521ed0d9SKamal Heib 	int (*destroy_flow)(struct ib_flow *flow_id);
2531521ed0d9SKamal Heib 	int (*destroy_flow_action)(struct ib_flow_action *action);
25321fb7f897SMark Bloch 	int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2533521ed0d9SKamal Heib 				 int state);
25341fb7f897SMark Bloch 	int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2535521ed0d9SKamal Heib 			     struct ifla_vf_info *ivf);
25361fb7f897SMark Bloch 	int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2537521ed0d9SKamal Heib 			    struct ifla_vf_stats *stats);
25381fb7f897SMark Bloch 	int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2539bfcb3c5dSDanit Goldberg 			    struct ifla_vf_guid *node_guid,
2540bfcb3c5dSDanit Goldberg 			    struct ifla_vf_guid *port_guid);
25411fb7f897SMark Bloch 	int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2542521ed0d9SKamal Heib 			   int type);
2543521ed0d9SKamal Heib 	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2544521ed0d9SKamal Heib 				   struct ib_wq_init_attr *init_attr,
2545521ed0d9SKamal Heib 				   struct ib_udata *udata);
2546add53535SLeon Romanovsky 	int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2547521ed0d9SKamal Heib 	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2548521ed0d9SKamal Heib 			 u32 wq_attr_mask, struct ib_udata *udata);
2549c0a6b5ecSLeon Romanovsky 	int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2550521ed0d9SKamal Heib 				    struct ib_rwq_ind_table_init_attr *init_attr,
2551521ed0d9SKamal Heib 				    struct ib_udata *udata);
2552521ed0d9SKamal Heib 	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2553521ed0d9SKamal Heib 	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2554521ed0d9SKamal Heib 				  struct ib_ucontext *context,
2555521ed0d9SKamal Heib 				  struct ib_dm_alloc_attr *attr,
2556521ed0d9SKamal Heib 				  struct uverbs_attr_bundle *attrs);
2557c4367a26SShamir Rabinovitch 	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2558521ed0d9SKamal Heib 	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2559521ed0d9SKamal Heib 				   struct ib_dm_mr_attr *attr,
2560521ed0d9SKamal Heib 				   struct uverbs_attr_bundle *attrs);
25613b023e1bSLeon Romanovsky 	int (*create_counters)(struct ib_counters *counters,
25623b023e1bSLeon Romanovsky 			       struct uverbs_attr_bundle *attrs);
256371ff3f62SLeon Romanovsky 	int (*destroy_counters)(struct ib_counters *counters);
2564521ed0d9SKamal Heib 	int (*read_counters)(struct ib_counters *counters,
2565521ed0d9SKamal Heib 			     struct ib_counters_read_attr *counters_read_attr,
2566521ed0d9SKamal Heib 			     struct uverbs_attr_bundle *attrs);
25672cdfcdd8SMax Gurtovoy 	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
25682cdfcdd8SMax Gurtovoy 			    int data_sg_nents, unsigned int *data_sg_offset,
25692cdfcdd8SMax Gurtovoy 			    struct scatterlist *meta_sg, int meta_sg_nents,
25702cdfcdd8SMax Gurtovoy 			    unsigned int *meta_sg_offset);
25712cdfcdd8SMax Gurtovoy 
2572521ed0d9SKamal Heib 	/**
25734b5f4d3fSJason Gunthorpe 	 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
25744b5f4d3fSJason Gunthorpe 	 *   fill in the driver initialized data.  The struct is kfree()'ed by
25754b5f4d3fSJason Gunthorpe 	 *   the sysfs core when the device is removed.  A lifespan of -1 in the
25764b5f4d3fSJason Gunthorpe 	 *   return struct tells the core to set a default lifespan.
2577521ed0d9SKamal Heib 	 */
25784b5f4d3fSJason Gunthorpe 	struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
25794b5f4d3fSJason Gunthorpe 	struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
25801fb7f897SMark Bloch 						     u32 port_num);
2581521ed0d9SKamal Heib 	/**
2582521ed0d9SKamal Heib 	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2583521ed0d9SKamal Heib 	 * @index - The index in the value array we wish to have updated, or
2584521ed0d9SKamal Heib 	 *   num_counters if we want all stats updated
2585521ed0d9SKamal Heib 	 * Return codes -
2586521ed0d9SKamal Heib 	 *   < 0 - Error, no counters updated
2587521ed0d9SKamal Heib 	 *   index - Updated the single counter pointed to by index
2588521ed0d9SKamal Heib 	 *   num_counters - Updated all counters (will reset the timestamp
2589521ed0d9SKamal Heib 	 *     and prevent further calls for lifespan milliseconds)
2590521ed0d9SKamal Heib 	 * Drivers are allowed to update all counters in leiu of just the
2591521ed0d9SKamal Heib 	 *   one given in index at their option
2592521ed0d9SKamal Heib 	 */
2593521ed0d9SKamal Heib 	int (*get_hw_stats)(struct ib_device *device,
25941fb7f897SMark Bloch 			    struct rdma_hw_stats *stats, u32 port, int index);
2595d7407d16SJason Gunthorpe 
259602da3750SLeon Romanovsky 	/**
25975e2ddd1eSAharon Landau 	 * modify_hw_stat - Modify the counter configuration
25985e2ddd1eSAharon Landau 	 * @enable: true/false when enable/disable a counter
25995e2ddd1eSAharon Landau 	 * Return codes - 0 on success or error code otherwise.
26005e2ddd1eSAharon Landau 	 */
26015e2ddd1eSAharon Landau 	int (*modify_hw_stat)(struct ib_device *device, u32 port,
26025e2ddd1eSAharon Landau 			      unsigned int counter_index, bool enable);
26035e2ddd1eSAharon Landau 	/**
260402da3750SLeon Romanovsky 	 * Allows rdma drivers to add their own restrack attributes.
260502da3750SLeon Romanovsky 	 */
2606f4434529SMaor Gottlieb 	int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
260765959522SMaor Gottlieb 	int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
26089e2a187aSMaor Gottlieb 	int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
260965959522SMaor Gottlieb 	int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
26105cc34116SMaor Gottlieb 	int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
261165959522SMaor Gottlieb 	int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2612211cd945SMaor Gottlieb 	int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
261321a428a0SLeon Romanovsky 
2614d0899892SJason Gunthorpe 	/* Device lifecycle callbacks */
2615d0899892SJason Gunthorpe 	/*
2616ca22354bSJason Gunthorpe 	 * Called after the device becomes registered, before clients are
2617ca22354bSJason Gunthorpe 	 * attached
2618ca22354bSJason Gunthorpe 	 */
2619ca22354bSJason Gunthorpe 	int (*enable_driver)(struct ib_device *dev);
2620ca22354bSJason Gunthorpe 	/*
2621d0899892SJason Gunthorpe 	 * This is called as part of ib_dealloc_device().
2622d0899892SJason Gunthorpe 	 */
2623d0899892SJason Gunthorpe 	void (*dealloc_driver)(struct ib_device *dev);
2624d0899892SJason Gunthorpe 
2625dd05cb82SKamal Heib 	/* iWarp CM callbacks */
2626dd05cb82SKamal Heib 	void (*iw_add_ref)(struct ib_qp *qp);
2627dd05cb82SKamal Heib 	void (*iw_rem_ref)(struct ib_qp *qp);
2628dd05cb82SKamal Heib 	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2629dd05cb82SKamal Heib 	int (*iw_connect)(struct iw_cm_id *cm_id,
2630dd05cb82SKamal Heib 			  struct iw_cm_conn_param *conn_param);
2631dd05cb82SKamal Heib 	int (*iw_accept)(struct iw_cm_id *cm_id,
2632dd05cb82SKamal Heib 			 struct iw_cm_conn_param *conn_param);
2633dd05cb82SKamal Heib 	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2634dd05cb82SKamal Heib 			 u8 pdata_len);
2635dd05cb82SKamal Heib 	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2636dd05cb82SKamal Heib 	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
263799fa331dSMark Zhang 	/**
263899fa331dSMark Zhang 	 * counter_bind_qp - Bind a QP to a counter.
263999fa331dSMark Zhang 	 * @counter - The counter to be bound. If counter->id is zero then
264099fa331dSMark Zhang 	 *   the driver needs to allocate a new counter and set counter->id
264199fa331dSMark Zhang 	 */
264299fa331dSMark Zhang 	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
264399fa331dSMark Zhang 	/**
264499fa331dSMark Zhang 	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
264599fa331dSMark Zhang 	 *   counter and bind it onto the default one
264699fa331dSMark Zhang 	 */
264799fa331dSMark Zhang 	int (*counter_unbind_qp)(struct ib_qp *qp);
264899fa331dSMark Zhang 	/**
264999fa331dSMark Zhang 	 * counter_dealloc -De-allocate the hw counter
265099fa331dSMark Zhang 	 */
265199fa331dSMark Zhang 	int (*counter_dealloc)(struct rdma_counter *counter);
2652c4ffee7cSMark Zhang 	/**
2653c4ffee7cSMark Zhang 	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2654c4ffee7cSMark Zhang 	 * the driver initialized data.
2655c4ffee7cSMark Zhang 	 */
2656c4ffee7cSMark Zhang 	struct rdma_hw_stats *(*counter_alloc_stats)(
2657c4ffee7cSMark Zhang 		struct rdma_counter *counter);
2658c4ffee7cSMark Zhang 	/**
2659c4ffee7cSMark Zhang 	 * counter_update_stats - Query the stats value of this counter
2660c4ffee7cSMark Zhang 	 */
2661c4ffee7cSMark Zhang 	int (*counter_update_stats)(struct rdma_counter *counter);
2662dd05cb82SKamal Heib 
26634061ff7aSErez Alfasi 	/**
26644061ff7aSErez Alfasi 	 * Allows rdma drivers to add their own restrack attributes
26654061ff7aSErez Alfasi 	 * dumped via 'rdma stat' iproute2 command.
26664061ff7aSErez Alfasi 	 */
2667f4434529SMaor Gottlieb 	int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
26684061ff7aSErez Alfasi 
26691c8fb1eaSYishai Hadas 	/* query driver for its ucontext properties */
26701c8fb1eaSYishai Hadas 	int (*query_ucontext)(struct ib_ucontext *context,
26711c8fb1eaSYishai Hadas 			      struct uverbs_attr_bundle *attrs);
26721c8fb1eaSYishai Hadas 
2673514aee66SLeon Romanovsky 	/*
2674514aee66SLeon Romanovsky 	 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2675514aee66SLeon Romanovsky 	 * Everyone else relies on Linux memory management model.
2676514aee66SLeon Romanovsky 	 */
2677514aee66SLeon Romanovsky 	int (*get_numa_node)(struct ib_device *dev);
2678514aee66SLeon Romanovsky 
2679d3456914SLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_ah);
26803b023e1bSLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_counters);
2681e39afe3dSLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2682d18bb3e1SLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_mw);
268321a428a0SLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2684514aee66SLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_qp);
2685c0a6b5ecSLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
268668e326deSLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2687a2a074efSLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
268828ad5f65SLeon Romanovsky 	DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2689521ed0d9SKamal Heib };
2690521ed0d9SKamal Heib 
2691cebe556bSParav Pandit struct ib_core_device {
2692cebe556bSParav Pandit 	/* device must be the first element in structure until,
2693cebe556bSParav Pandit 	 * union of ib_core_device and device exists in ib_device.
2694cebe556bSParav Pandit 	 */
2695cebe556bSParav Pandit 	struct device dev;
26964e0f7b90SParav Pandit 	possible_net_t rdma_net;
2697cebe556bSParav Pandit 	struct kobject *ports_kobj;
2698cebe556bSParav Pandit 	struct list_head port_list;
2699cebe556bSParav Pandit 	struct ib_device *owner; /* reach back to owner ib_device */
2700cebe556bSParav Pandit };
270141eda65cSLeon Romanovsky 
2702cebe556bSParav Pandit struct rdma_restrack_root;
2703a4d61e84SRoland Dreier struct ib_device {
27040957c29fSBart Van Assche 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
27050957c29fSBart Van Assche 	struct device                *dma_device;
27063023a1e9SKamal Heib 	struct ib_device_ops	     ops;
2707a4d61e84SRoland Dreier 	char                          name[IB_DEVICE_NAME_MAX];
2708324e227eSJason Gunthorpe 	struct rcu_head rcu_head;
2709a4d61e84SRoland Dreier 
2710a4d61e84SRoland Dreier 	struct list_head              event_handler_list;
27116b57cea9SParav Pandit 	/* Protects event_handler_list */
27126b57cea9SParav Pandit 	struct rw_semaphore event_handler_rwsem;
27136b57cea9SParav Pandit 
27146b57cea9SParav Pandit 	/* Protects QP's event_handler calls and open_qp list */
271540adf686SParav Pandit 	spinlock_t qp_open_list_lock;
2716a4d61e84SRoland Dreier 
2717921eab11SJason Gunthorpe 	struct rw_semaphore	      client_data_rwsem;
27180df91bb6SJason Gunthorpe 	struct xarray                 client_data;
2719d0899892SJason Gunthorpe 	struct mutex                  unregistration_lock;
2720a4d61e84SRoland Dreier 
272117e10646SParav Pandit 	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
272217e10646SParav Pandit 	rwlock_t cache_lock;
27237738613eSIra Weiny 	/**
27248ceb1357SJason Gunthorpe 	 * port_data is indexed by port number
27257738613eSIra Weiny 	 */
27268ceb1357SJason Gunthorpe 	struct ib_port_data *port_data;
2727a4d61e84SRoland Dreier 
2728f4fd0b22SMichael S. Tsirkin 	int			      num_comp_vectors;
2729f4fd0b22SMichael S. Tsirkin 
2730cebe556bSParav Pandit 	union {
2731f4e91eb4STony Jones 		struct device		dev;
2732cebe556bSParav Pandit 		struct ib_core_device	coredev;
2733cebe556bSParav Pandit 	};
2734cebe556bSParav Pandit 
2735b7066b32SJason Gunthorpe 	/* First group is for device attributes,
2736b7066b32SJason Gunthorpe 	 * Second group is for driver provided attributes (optional).
2737b7066b32SJason Gunthorpe 	 * Third group is for the hw_stats
2738b7066b32SJason Gunthorpe 	 * It is a NULL terminated array.
2739d4122f5aSParav Pandit 	 */
2740b7066b32SJason Gunthorpe 	const struct attribute_group	*groups[4];
2741adee9f3fSParav Pandit 
274217a55f79SAlexander Chiang 	u64			     uverbs_cmd_mask;
2743274c0891SRoland Dreier 
2744bd99fdeaSYuval Shaia 	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2745cf311cd4SSean Hefty 	__be64			     node_guid;
274696f15c03SSteve Wise 	u32			     local_dma_lkey;
27474139032bSHal Rosenstock 	u16                          is_switch:1;
27486780c4faSGal Pressman 	/* Indicates kernel verbs support, should not be used in drivers */
27496780c4faSGal Pressman 	u16                          kverbs_provider:1;
2750da662979SYamin Friedman 	/* CQ adaptive moderation (RDMA DIM) */
2751da662979SYamin Friedman 	u16                          use_cq_dim:1;
2752a4d61e84SRoland Dreier 	u8                           node_type;
27531fb7f897SMark Bloch 	u32			     phys_port_cnt;
27543e153a93SIra Weiny 	struct ib_device_attr        attrs;
2755467f432aSJason Gunthorpe 	struct hw_stats_device_data *hw_stats_data;
27567738613eSIra Weiny 
275743579b5fSParav Pandit #ifdef CONFIG_CGROUP_RDMA
275843579b5fSParav Pandit 	struct rdmacg_device         cg_device;
275943579b5fSParav Pandit #endif
276043579b5fSParav Pandit 
2761ecc82c53SLeon Romanovsky 	u32                          index;
2762c7ff819aSYamin Friedman 
2763c7ff819aSYamin Friedman 	spinlock_t                   cq_pools_lock;
2764c7ff819aSYamin Friedman 	struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2765c7ff819aSYamin Friedman 
276641eda65cSLeon Romanovsky 	struct rdma_restrack_root *res;
2767ecc82c53SLeon Romanovsky 
27680cbf432dSJason Gunthorpe 	const struct uapi_definition   *driver_def;
2769d79af724SJason Gunthorpe 
277001b67117SParav Pandit 	/*
2771d79af724SJason Gunthorpe 	 * Positive refcount indicates that the device is currently
2772d79af724SJason Gunthorpe 	 * registered and cannot be unregistered.
277301b67117SParav Pandit 	 */
277401b67117SParav Pandit 	refcount_t refcount;
277501b67117SParav Pandit 	struct completion unreg_completion;
2776d0899892SJason Gunthorpe 	struct work_struct unregistration_work;
27773856ec4bSSteve Wise 
27783856ec4bSSteve Wise 	const struct rdma_link_ops *link_ops;
27794e0f7b90SParav Pandit 
27804e0f7b90SParav Pandit 	/* Protects compat_devs xarray modifications */
27814e0f7b90SParav Pandit 	struct mutex compat_devs_mutex;
27824e0f7b90SParav Pandit 	/* Maintains compat devices for each net namespace */
27834e0f7b90SParav Pandit 	struct xarray compat_devs;
2784dd05cb82SKamal Heib 
2785dd05cb82SKamal Heib 	/* Used by iWarp CM */
2786dd05cb82SKamal Heib 	char iw_ifname[IFNAMSIZ];
2787dd05cb82SKamal Heib 	u32 iw_driver_flags;
2788bd3920eaSMaor Gottlieb 	u32 lag_flags;
2789a4d61e84SRoland Dreier };
2790a4d61e84SRoland Dreier 
rdma_zalloc_obj(struct ib_device * dev,size_t size,gfp_t gfp,bool is_numa_aware)2791514aee66SLeon Romanovsky static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2792514aee66SLeon Romanovsky 				    gfp_t gfp, bool is_numa_aware)
2793514aee66SLeon Romanovsky {
2794514aee66SLeon Romanovsky 	if (is_numa_aware && dev->ops.get_numa_node)
2795514aee66SLeon Romanovsky 		return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2796514aee66SLeon Romanovsky 
2797514aee66SLeon Romanovsky 	return kzalloc(size, gfp);
2798514aee66SLeon Romanovsky }
2799514aee66SLeon Romanovsky 
28000e2d00ebSJason Gunthorpe struct ib_client_nl_info;
2801a4d61e84SRoland Dreier struct ib_client {
2802e59178d8SJason Gunthorpe 	const char *name;
280311a0ae4cSJason Gunthorpe 	int (*add)(struct ib_device *ibdev);
28047c1eb45aSHaggai Eran 	void (*remove)(struct ib_device *, void *client_data);
2805dc1435c0SLeon Romanovsky 	void (*rename)(struct ib_device *dev, void *client_data);
28060e2d00ebSJason Gunthorpe 	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
28070e2d00ebSJason Gunthorpe 			   struct ib_client_nl_info *res);
28080e2d00ebSJason Gunthorpe 	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2809a4d61e84SRoland Dreier 
28109268f72dSYotam Kenneth 	/* Returns the net_dev belonging to this ib_client and matching the
28119268f72dSYotam Kenneth 	 * given parameters.
28129268f72dSYotam Kenneth 	 * @dev:	 An RDMA device that the net_dev use for communication.
28139268f72dSYotam Kenneth 	 * @port:	 A physical port number on the RDMA device.
28149268f72dSYotam Kenneth 	 * @pkey:	 P_Key that the net_dev uses if applicable.
28159268f72dSYotam Kenneth 	 * @gid:	 A GID that the net_dev uses to communicate.
28169268f72dSYotam Kenneth 	 * @addr:	 An IP address the net_dev is configured with.
28179268f72dSYotam Kenneth 	 * @client_data: The device's client data set by ib_set_client_data().
28189268f72dSYotam Kenneth 	 *
28199268f72dSYotam Kenneth 	 * An ib_client that implements a net_dev on top of RDMA devices
28209268f72dSYotam Kenneth 	 * (such as IP over IB) should implement this callback, allowing the
28219268f72dSYotam Kenneth 	 * rdma_cm module to find the right net_dev for a given request.
28229268f72dSYotam Kenneth 	 *
28239268f72dSYotam Kenneth 	 * The caller is responsible for calling dev_put on the returned
28249268f72dSYotam Kenneth 	 * netdev. */
28259268f72dSYotam Kenneth 	struct net_device *(*get_net_dev_by_params)(
28269268f72dSYotam Kenneth 			struct ib_device *dev,
28271fb7f897SMark Bloch 			u32 port,
28289268f72dSYotam Kenneth 			u16 pkey,
28299268f72dSYotam Kenneth 			const union ib_gid *gid,
28309268f72dSYotam Kenneth 			const struct sockaddr *addr,
28319268f72dSYotam Kenneth 			void *client_data);
2832621e55ffSJason Gunthorpe 
2833621e55ffSJason Gunthorpe 	refcount_t uses;
2834621e55ffSJason Gunthorpe 	struct completion uses_zero;
2835e59178d8SJason Gunthorpe 	u32 client_id;
28366780c4faSGal Pressman 
28376780c4faSGal Pressman 	/* kverbs are not required by the client */
28386780c4faSGal Pressman 	u8 no_kverbs_req:1;
2839a4d61e84SRoland Dreier };
2840a4d61e84SRoland Dreier 
2841a808273aSShiraz Saleem /*
2842a808273aSShiraz Saleem  * IB block DMA iterator
2843a808273aSShiraz Saleem  *
2844a808273aSShiraz Saleem  * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2845a808273aSShiraz Saleem  * to a HW supported page size.
2846a808273aSShiraz Saleem  */
2847a808273aSShiraz Saleem struct ib_block_iter {
2848a808273aSShiraz Saleem 	/* internal states */
2849a808273aSShiraz Saleem 	struct scatterlist *__sg;	/* sg holding the current aligned block */
2850a808273aSShiraz Saleem 	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2851d103c131SMike Marciniszyn 	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
2852a808273aSShiraz Saleem 	unsigned int __sg_nents;	/* number of SG entries */
2853a808273aSShiraz Saleem 	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2854a808273aSShiraz Saleem 	unsigned int __pg_bit;		/* alignment of current block */
2855a808273aSShiraz Saleem };
2856a808273aSShiraz Saleem 
2857459cc69fSLeon Romanovsky struct ib_device *_ib_alloc_device(size_t size);
2858459cc69fSLeon Romanovsky #define ib_alloc_device(drv_struct, member)                                    \
2859459cc69fSLeon Romanovsky 	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2860459cc69fSLeon Romanovsky 				      BUILD_BUG_ON_ZERO(offsetof(              \
2861459cc69fSLeon Romanovsky 					      struct drv_struct, member))),    \
2862459cc69fSLeon Romanovsky 		     struct drv_struct, member)
2863459cc69fSLeon Romanovsky 
2864a4d61e84SRoland Dreier void ib_dealloc_device(struct ib_device *device);
2865a4d61e84SRoland Dreier 
28669abb0d1bSLeon Romanovsky void ib_get_device_fw_str(struct ib_device *device, char *str);
28675fa76c20SIra Weiny 
2868e0477b34SJason Gunthorpe int ib_register_device(struct ib_device *device, const char *name,
2869e0477b34SJason Gunthorpe 		       struct device *dma_device);
2870a4d61e84SRoland Dreier void ib_unregister_device(struct ib_device *device);
2871d0899892SJason Gunthorpe void ib_unregister_driver(enum rdma_driver_id driver_id);
2872d0899892SJason Gunthorpe void ib_unregister_device_and_put(struct ib_device *device);
2873d0899892SJason Gunthorpe void ib_unregister_device_queued(struct ib_device *ib_dev);
2874a4d61e84SRoland Dreier 
2875a4d61e84SRoland Dreier int ib_register_client   (struct ib_client *client);
2876a4d61e84SRoland Dreier void ib_unregister_client(struct ib_client *client);
2877a4d61e84SRoland Dreier 
2878a808273aSShiraz Saleem void __rdma_block_iter_start(struct ib_block_iter *biter,
2879a808273aSShiraz Saleem 			     struct scatterlist *sglist,
2880a808273aSShiraz Saleem 			     unsigned int nents,
2881a808273aSShiraz Saleem 			     unsigned long pgsz);
2882a808273aSShiraz Saleem bool __rdma_block_iter_next(struct ib_block_iter *biter);
2883a808273aSShiraz Saleem 
2884a808273aSShiraz Saleem /**
2885a808273aSShiraz Saleem  * rdma_block_iter_dma_address - get the aligned dma address of the current
2886a808273aSShiraz Saleem  * block held by the block iterator.
2887a808273aSShiraz Saleem  * @biter: block iterator holding the memory block
2888a808273aSShiraz Saleem  */
2889a808273aSShiraz Saleem static inline dma_addr_t
rdma_block_iter_dma_address(struct ib_block_iter * biter)2890a808273aSShiraz Saleem rdma_block_iter_dma_address(struct ib_block_iter *biter)
2891a808273aSShiraz Saleem {
2892a808273aSShiraz Saleem 	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2893a808273aSShiraz Saleem }
2894a808273aSShiraz Saleem 
2895a808273aSShiraz Saleem /**
2896a808273aSShiraz Saleem  * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2897a808273aSShiraz Saleem  * @sglist: sglist to iterate over
2898a808273aSShiraz Saleem  * @biter: block iterator holding the memory block
2899a808273aSShiraz Saleem  * @nents: maximum number of sg entries to iterate over
2900a808273aSShiraz Saleem  * @pgsz: best HW supported page size to use
2901a808273aSShiraz Saleem  *
2902a808273aSShiraz Saleem  * Callers may use rdma_block_iter_dma_address() to get each
2903a808273aSShiraz Saleem  * blocks aligned DMA address.
2904a808273aSShiraz Saleem  */
2905a808273aSShiraz Saleem #define rdma_for_each_block(sglist, biter, nents, pgsz)		\
2906a808273aSShiraz Saleem 	for (__rdma_block_iter_start(biter, sglist, nents,	\
2907a808273aSShiraz Saleem 				     pgsz);			\
2908a808273aSShiraz Saleem 	     __rdma_block_iter_next(biter);)
2909a808273aSShiraz Saleem 
29100df91bb6SJason Gunthorpe /**
29110df91bb6SJason Gunthorpe  * ib_get_client_data - Get IB client context
29120df91bb6SJason Gunthorpe  * @device:Device to get context for
29130df91bb6SJason Gunthorpe  * @client:Client to get context for
29140df91bb6SJason Gunthorpe  *
29150df91bb6SJason Gunthorpe  * ib_get_client_data() returns the client context data set with
29160df91bb6SJason Gunthorpe  * ib_set_client_data(). This can only be called while the client is
29170df91bb6SJason Gunthorpe  * registered to the device, once the ib_client remove() callback returns this
29180df91bb6SJason Gunthorpe  * cannot be called.
29190df91bb6SJason Gunthorpe  */
ib_get_client_data(struct ib_device * device,struct ib_client * client)29200df91bb6SJason Gunthorpe static inline void *ib_get_client_data(struct ib_device *device,
29210df91bb6SJason Gunthorpe 				       struct ib_client *client)
29220df91bb6SJason Gunthorpe {
29230df91bb6SJason Gunthorpe 	return xa_load(&device->client_data, client->client_id);
29240df91bb6SJason Gunthorpe }
2925a4d61e84SRoland Dreier void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2926a4d61e84SRoland Dreier 			 void *data);
2927521ed0d9SKamal Heib void ib_set_device_ops(struct ib_device *device,
2928521ed0d9SKamal Heib 		       const struct ib_device_ops *ops);
2929a4d61e84SRoland Dreier 
29305f9794dcSJason Gunthorpe int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2931c043ff2cSMichal Kalderon 		      unsigned long pfn, unsigned long size, pgprot_t prot,
2932c043ff2cSMichal Kalderon 		      struct rdma_user_mmap_entry *entry);
29333411f9f0SMichal Kalderon int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
29343411f9f0SMichal Kalderon 				struct rdma_user_mmap_entry *entry,
29353411f9f0SMichal Kalderon 				size_t length);
29367a763d18SYishai Hadas int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
29377a763d18SYishai Hadas 				      struct rdma_user_mmap_entry *entry,
29387a763d18SYishai Hadas 				      size_t length, u32 min_pgoff,
29397a763d18SYishai Hadas 				      u32 max_pgoff);
29407a763d18SYishai Hadas 
29416d202d9fSChengchang Tang static inline int
rdma_user_mmap_entry_insert_exact(struct ib_ucontext * ucontext,struct rdma_user_mmap_entry * entry,size_t length,u32 pgoff)29426d202d9fSChengchang Tang rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
29436d202d9fSChengchang Tang 				  struct rdma_user_mmap_entry *entry,
29446d202d9fSChengchang Tang 				  size_t length, u32 pgoff)
29456d202d9fSChengchang Tang {
29466d202d9fSChengchang Tang 	return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
29476d202d9fSChengchang Tang 						 pgoff);
29486d202d9fSChengchang Tang }
29496d202d9fSChengchang Tang 
29503411f9f0SMichal Kalderon struct rdma_user_mmap_entry *
29513411f9f0SMichal Kalderon rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
29523411f9f0SMichal Kalderon 			       unsigned long pgoff);
29533411f9f0SMichal Kalderon struct rdma_user_mmap_entry *
29543411f9f0SMichal Kalderon rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
29553411f9f0SMichal Kalderon 			 struct vm_area_struct *vma);
29563411f9f0SMichal Kalderon void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
29573411f9f0SMichal Kalderon 
29583411f9f0SMichal Kalderon void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
29595f9794dcSJason Gunthorpe 
ib_copy_from_udata(void * dest,struct ib_udata * udata,size_t len)2960a4d61e84SRoland Dreier static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2961a4d61e84SRoland Dreier {
2962a4d61e84SRoland Dreier 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2963a4d61e84SRoland Dreier }
2964a4d61e84SRoland Dreier 
ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)2965a4d61e84SRoland Dreier static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2966a4d61e84SRoland Dreier {
296743c61165SYann Droneaud 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2968a4d61e84SRoland Dreier }
2969a4d61e84SRoland Dreier 
ib_is_buffer_cleared(const void __user * p,size_t len)2970c66db311SMatan Barak static inline bool ib_is_buffer_cleared(const void __user *p,
2971301a721eSMatan Barak 					size_t len)
2972301a721eSMatan Barak {
297392d27ae6SMarkus Elfring 	bool ret;
2974301a721eSMatan Barak 	u8 *buf;
2975301a721eSMatan Barak 
2976301a721eSMatan Barak 	if (len > USHRT_MAX)
2977301a721eSMatan Barak 		return false;
2978301a721eSMatan Barak 
297992d27ae6SMarkus Elfring 	buf = memdup_user(p, len);
298092d27ae6SMarkus Elfring 	if (IS_ERR(buf))
2981301a721eSMatan Barak 		return false;
2982301a721eSMatan Barak 
2983301a721eSMatan Barak 	ret = !memchr_inv(buf, 0, len);
2984301a721eSMatan Barak 	kfree(buf);
2985301a721eSMatan Barak 	return ret;
2986301a721eSMatan Barak }
2987301a721eSMatan Barak 
ib_is_udata_cleared(struct ib_udata * udata,size_t offset,size_t len)2988c66db311SMatan Barak static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2989c66db311SMatan Barak 				       size_t offset,
2990c66db311SMatan Barak 				       size_t len)
2991c66db311SMatan Barak {
2992c66db311SMatan Barak 	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2993c66db311SMatan Barak }
2994c66db311SMatan Barak 
29958a51866fSRoland Dreier /**
29968a51866fSRoland Dreier  * ib_modify_qp_is_ok - Check that the supplied attribute mask
29978a51866fSRoland Dreier  * contains all required attributes and no attributes not allowed for
29988a51866fSRoland Dreier  * the given QP state transition.
29998a51866fSRoland Dreier  * @cur_state: Current QP state
30008a51866fSRoland Dreier  * @next_state: Next QP state
30018a51866fSRoland Dreier  * @type: QP type
30028a51866fSRoland Dreier  * @mask: Mask of supplied QP attributes
30038a51866fSRoland Dreier  *
30048a51866fSRoland Dreier  * This function is a helper function that a low-level driver's
30058a51866fSRoland Dreier  * modify_qp method can use to validate the consumer's input.  It
30068a51866fSRoland Dreier  * checks that cur_state and next_state are valid QP states, that a
30078a51866fSRoland Dreier  * transition from cur_state to next_state is allowed by the IB spec,
30088a51866fSRoland Dreier  * and that the attribute mask supplied is allowed for the transition.
30098a51866fSRoland Dreier  */
301019b1f540SLeon Romanovsky bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
3011d31131bbSKamal Heib 			enum ib_qp_type type, enum ib_qp_attr_mask mask);
30128a51866fSRoland Dreier 
3013dcc9881eSLeon Romanovsky void ib_register_event_handler(struct ib_event_handler *event_handler);
3014dcc9881eSLeon Romanovsky void ib_unregister_event_handler(struct ib_event_handler *event_handler);
30156b57cea9SParav Pandit void ib_dispatch_event(const struct ib_event *event);
3016a4d61e84SRoland Dreier 
3017a4d61e84SRoland Dreier int ib_query_port(struct ib_device *device,
30181fb7f897SMark Bloch 		  u32 port_num, struct ib_port_attr *port_attr);
3019a4d61e84SRoland Dreier 
3020a3f5adafSEli Cohen enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
30211fb7f897SMark Bloch 					       u32 port_num);
3022a3f5adafSEli Cohen 
30230cf18d77SIra Weiny /**
30244139032bSHal Rosenstock  * rdma_cap_ib_switch - Check if the device is IB switch
30254139032bSHal Rosenstock  * @device: Device to check
30264139032bSHal Rosenstock  *
30274139032bSHal Rosenstock  * Device driver is responsible for setting is_switch bit on
30284139032bSHal Rosenstock  * in ib_device structure at init time.
30294139032bSHal Rosenstock  *
30304139032bSHal Rosenstock  * Return: true if the device is IB switch.
30314139032bSHal Rosenstock  */
rdma_cap_ib_switch(const struct ib_device * device)30324139032bSHal Rosenstock static inline bool rdma_cap_ib_switch(const struct ib_device *device)
30334139032bSHal Rosenstock {
30344139032bSHal Rosenstock 	return device->is_switch;
30354139032bSHal Rosenstock }
30364139032bSHal Rosenstock 
30374139032bSHal Rosenstock /**
30380cf18d77SIra Weiny  * rdma_start_port - Return the first valid port number for the device
30390cf18d77SIra Weiny  * specified
30400cf18d77SIra Weiny  *
30410cf18d77SIra Weiny  * @device: Device to be checked
30420cf18d77SIra Weiny  *
30430cf18d77SIra Weiny  * Return start port number
30440cf18d77SIra Weiny  */
rdma_start_port(const struct ib_device * device)30451fb7f897SMark Bloch static inline u32 rdma_start_port(const struct ib_device *device)
30460cf18d77SIra Weiny {
30474139032bSHal Rosenstock 	return rdma_cap_ib_switch(device) ? 0 : 1;
30480cf18d77SIra Weiny }
30490cf18d77SIra Weiny 
30500cf18d77SIra Weiny /**
3051ea1075edSJason Gunthorpe  * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3052ea1075edSJason Gunthorpe  * @device - The struct ib_device * to iterate over
3053ea1075edSJason Gunthorpe  * @iter - The unsigned int to store the port number
3054ea1075edSJason Gunthorpe  */
3055ea1075edSJason Gunthorpe #define rdma_for_each_port(device, iter)                                       \
30561fb7f897SMark Bloch 	for (iter = rdma_start_port(device +				       \
30571fb7f897SMark Bloch 				    BUILD_BUG_ON_ZERO(!__same_type(u32,	       \
30581fb7f897SMark Bloch 								   iter)));    \
30591fb7f897SMark Bloch 	     iter <= rdma_end_port(device); iter++)
3060ea1075edSJason Gunthorpe 
3061ea1075edSJason Gunthorpe /**
30620cf18d77SIra Weiny  * rdma_end_port - Return the last valid port number for the device
30630cf18d77SIra Weiny  * specified
30640cf18d77SIra Weiny  *
30650cf18d77SIra Weiny  * @device: Device to be checked
30660cf18d77SIra Weiny  *
30670cf18d77SIra Weiny  * Return last port number
30680cf18d77SIra Weiny  */
rdma_end_port(const struct ib_device * device)30691fb7f897SMark Bloch static inline u32 rdma_end_port(const struct ib_device *device)
30700cf18d77SIra Weiny {
30714139032bSHal Rosenstock 	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
30720cf18d77SIra Weiny }
30730cf18d77SIra Weiny 
rdma_is_port_valid(const struct ib_device * device,unsigned int port)307424dc831bSYuval Shaia static inline int rdma_is_port_valid(const struct ib_device *device,
307524dc831bSYuval Shaia 				     unsigned int port)
307624dc831bSYuval Shaia {
307724dc831bSYuval Shaia 	return (port >= rdma_start_port(device) &&
307824dc831bSYuval Shaia 		port <= rdma_end_port(device));
307924dc831bSYuval Shaia }
308024dc831bSYuval Shaia 
rdma_is_grh_required(const struct ib_device * device,u32 port_num)3081b02289b3SArtemy Kovalyov static inline bool rdma_is_grh_required(const struct ib_device *device,
30821fb7f897SMark Bloch 					u32 port_num)
3083b02289b3SArtemy Kovalyov {
30848ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
3085b02289b3SArtemy Kovalyov 	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
3086b02289b3SArtemy Kovalyov }
3087b02289b3SArtemy Kovalyov 
rdma_protocol_ib(const struct ib_device * device,u32 port_num)30881fb7f897SMark Bloch static inline bool rdma_protocol_ib(const struct ib_device *device,
30891fb7f897SMark Bloch 				    u32 port_num)
3090de66be94SMichael Wang {
30918ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
30928ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_PROT_IB;
3093de66be94SMichael Wang }
3094de66be94SMichael Wang 
rdma_protocol_roce(const struct ib_device * device,u32 port_num)30951fb7f897SMark Bloch static inline bool rdma_protocol_roce(const struct ib_device *device,
30961fb7f897SMark Bloch 				      u32 port_num)
3097de66be94SMichael Wang {
30988ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
30997766a99fSMatan Barak 	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
31007766a99fSMatan Barak }
31017766a99fSMatan Barak 
rdma_protocol_roce_udp_encap(const struct ib_device * device,u32 port_num)31021fb7f897SMark Bloch static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
31031fb7f897SMark Bloch 						u32 port_num)
31047766a99fSMatan Barak {
31058ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
31068ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
31077766a99fSMatan Barak }
31087766a99fSMatan Barak 
rdma_protocol_roce_eth_encap(const struct ib_device * device,u32 port_num)31091fb7f897SMark Bloch static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
31101fb7f897SMark Bloch 						u32 port_num)
31117766a99fSMatan Barak {
31128ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
31138ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_PROT_ROCE;
3114de66be94SMichael Wang }
3115de66be94SMichael Wang 
rdma_protocol_iwarp(const struct ib_device * device,u32 port_num)31161fb7f897SMark Bloch static inline bool rdma_protocol_iwarp(const struct ib_device *device,
31171fb7f897SMark Bloch 				       u32 port_num)
3118de66be94SMichael Wang {
31198ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
31208ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_PROT_IWARP;
3121de66be94SMichael Wang }
3122de66be94SMichael Wang 
rdma_ib_or_roce(const struct ib_device * device,u32 port_num)31231fb7f897SMark Bloch static inline bool rdma_ib_or_roce(const struct ib_device *device,
31241fb7f897SMark Bloch 				   u32 port_num)
3125de66be94SMichael Wang {
31267766a99fSMatan Barak 	return rdma_protocol_ib(device, port_num) ||
31277766a99fSMatan Barak 		rdma_protocol_roce(device, port_num);
3128de66be94SMichael Wang }
3129de66be94SMichael Wang 
rdma_protocol_raw_packet(const struct ib_device * device,u32 port_num)31301fb7f897SMark Bloch static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
31311fb7f897SMark Bloch 					    u32 port_num)
3132aa773bd4SOr Gerlitz {
31338ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
31348ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3135aa773bd4SOr Gerlitz }
3136aa773bd4SOr Gerlitz 
rdma_protocol_usnic(const struct ib_device * device,u32 port_num)31371fb7f897SMark Bloch static inline bool rdma_protocol_usnic(const struct ib_device *device,
31381fb7f897SMark Bloch 				       u32 port_num)
3139ce1e055fSOr Gerlitz {
31408ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
31418ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_PROT_USNIC;
3142ce1e055fSOr Gerlitz }
3143ce1e055fSOr Gerlitz 
3144c757dea8SMichael Wang /**
3145296ec009SMichael Wang  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3146c757dea8SMichael Wang  * Management Datagrams.
3147296ec009SMichael Wang  * @device: Device to check
3148296ec009SMichael Wang  * @port_num: Port number to check
3149c757dea8SMichael Wang  *
3150296ec009SMichael Wang  * Management Datagrams (MAD) are a required part of the InfiniBand
3151296ec009SMichael Wang  * specification and are supported on all InfiniBand devices.  A slightly
3152296ec009SMichael Wang  * extended version are also supported on OPA interfaces.
3153c757dea8SMichael Wang  *
3154296ec009SMichael Wang  * Return: true if the port supports sending/receiving of MAD packets.
3155c757dea8SMichael Wang  */
rdma_cap_ib_mad(const struct ib_device * device,u32 port_num)31561fb7f897SMark Bloch static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3157c757dea8SMichael Wang {
31588ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
31598ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_IB_MAD;
3160c757dea8SMichael Wang }
3161c757dea8SMichael Wang 
316229541e3aSMichael Wang /**
316365995feeSIra Weiny  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
316465995feeSIra Weiny  * Management Datagrams.
316565995feeSIra Weiny  * @device: Device to check
316665995feeSIra Weiny  * @port_num: Port number to check
316765995feeSIra Weiny  *
316865995feeSIra Weiny  * Intel OmniPath devices extend and/or replace the InfiniBand Management
316965995feeSIra Weiny  * datagrams with their own versions.  These OPA MADs share many but not all of
317065995feeSIra Weiny  * the characteristics of InfiniBand MADs.
317165995feeSIra Weiny  *
317265995feeSIra Weiny  * OPA MADs differ in the following ways:
317365995feeSIra Weiny  *
317465995feeSIra Weiny  *    1) MADs are variable size up to 2K
317565995feeSIra Weiny  *       IBTA defined MADs remain fixed at 256 bytes
317665995feeSIra Weiny  *    2) OPA SMPs must carry valid PKeys
317765995feeSIra Weiny  *    3) OPA SMP packets are a different format
317865995feeSIra Weiny  *
317965995feeSIra Weiny  * Return: true if the port supports OPA MAD packet formats.
318065995feeSIra Weiny  */
rdma_cap_opa_mad(struct ib_device * device,u32 port_num)31811fb7f897SMark Bloch static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
318265995feeSIra Weiny {
3183d3243da8SLeon Romanovsky 	return device->port_data[port_num].immutable.core_cap_flags &
3184d3243da8SLeon Romanovsky 		RDMA_CORE_CAP_OPA_MAD;
318565995feeSIra Weiny }
318665995feeSIra Weiny 
318765995feeSIra Weiny /**
3188296ec009SMichael Wang  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3189296ec009SMichael Wang  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3190296ec009SMichael Wang  * @device: Device to check
3191296ec009SMichael Wang  * @port_num: Port number to check
319229541e3aSMichael Wang  *
3193296ec009SMichael Wang  * Each InfiniBand node is required to provide a Subnet Management Agent
3194296ec009SMichael Wang  * that the subnet manager can access.  Prior to the fabric being fully
3195296ec009SMichael Wang  * configured by the subnet manager, the SMA is accessed via a well known
3196296ec009SMichael Wang  * interface called the Subnet Management Interface (SMI).  This interface
3197296ec009SMichael Wang  * uses directed route packets to communicate with the SM to get around the
3198296ec009SMichael Wang  * chicken and egg problem of the SM needing to know what's on the fabric
3199296ec009SMichael Wang  * in order to configure the fabric, and needing to configure the fabric in
3200296ec009SMichael Wang  * order to send packets to the devices on the fabric.  These directed
3201296ec009SMichael Wang  * route packets do not need the fabric fully configured in order to reach
3202296ec009SMichael Wang  * their destination.  The SMI is the only method allowed to send
3203296ec009SMichael Wang  * directed route packets on an InfiniBand fabric.
320429541e3aSMichael Wang  *
3205296ec009SMichael Wang  * Return: true if the port provides an SMI.
320629541e3aSMichael Wang  */
rdma_cap_ib_smi(const struct ib_device * device,u32 port_num)32071fb7f897SMark Bloch static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
320829541e3aSMichael Wang {
32098ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
32108ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_IB_SMI;
321129541e3aSMichael Wang }
321229541e3aSMichael Wang 
321372219ceaSMichael Wang /**
321472219ceaSMichael Wang  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
321572219ceaSMichael Wang  * Communication Manager.
3216296ec009SMichael Wang  * @device: Device to check
3217296ec009SMichael Wang  * @port_num: Port number to check
321872219ceaSMichael Wang  *
3219296ec009SMichael Wang  * The InfiniBand Communication Manager is one of many pre-defined General
3220296ec009SMichael Wang  * Service Agents (GSA) that are accessed via the General Service
3221296ec009SMichael Wang  * Interface (GSI).  It's role is to facilitate establishment of connections
3222296ec009SMichael Wang  * between nodes as well as other management related tasks for established
3223296ec009SMichael Wang  * connections.
322472219ceaSMichael Wang  *
3225296ec009SMichael Wang  * Return: true if the port supports an IB CM (this does not guarantee that
3226296ec009SMichael Wang  * a CM is actually running however).
322772219ceaSMichael Wang  */
rdma_cap_ib_cm(const struct ib_device * device,u32 port_num)32281fb7f897SMark Bloch static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
322972219ceaSMichael Wang {
32308ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
32318ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_IB_CM;
323272219ceaSMichael Wang }
323372219ceaSMichael Wang 
323404215330SMichael Wang /**
323504215330SMichael Wang  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
323604215330SMichael Wang  * Communication Manager.
3237296ec009SMichael Wang  * @device: Device to check
3238296ec009SMichael Wang  * @port_num: Port number to check
323904215330SMichael Wang  *
3240296ec009SMichael Wang  * Similar to above, but specific to iWARP connections which have a different
3241296ec009SMichael Wang  * managment protocol than InfiniBand.
324204215330SMichael Wang  *
3243296ec009SMichael Wang  * Return: true if the port supports an iWARP CM (this does not guarantee that
3244296ec009SMichael Wang  * a CM is actually running however).
324504215330SMichael Wang  */
rdma_cap_iw_cm(const struct ib_device * device,u32 port_num)32461fb7f897SMark Bloch static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
324704215330SMichael Wang {
32488ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
32498ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_IW_CM;
325004215330SMichael Wang }
325104215330SMichael Wang 
3252fe53ba2fSMichael Wang /**
3253fe53ba2fSMichael Wang  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3254fe53ba2fSMichael Wang  * Subnet Administration.
3255296ec009SMichael Wang  * @device: Device to check
3256296ec009SMichael Wang  * @port_num: Port number to check
3257fe53ba2fSMichael Wang  *
3258296ec009SMichael Wang  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3259296ec009SMichael Wang  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3260296ec009SMichael Wang  * fabrics, devices should resolve routes to other hosts by contacting the
3261296ec009SMichael Wang  * SA to query the proper route.
3262fe53ba2fSMichael Wang  *
3263296ec009SMichael Wang  * Return: true if the port should act as a client to the fabric Subnet
3264296ec009SMichael Wang  * Administration interface.  This does not imply that the SA service is
3265296ec009SMichael Wang  * running locally.
3266fe53ba2fSMichael Wang  */
rdma_cap_ib_sa(const struct ib_device * device,u32 port_num)32671fb7f897SMark Bloch static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3268fe53ba2fSMichael Wang {
32698ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
32708ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_IB_SA;
3271fe53ba2fSMichael Wang }
3272fe53ba2fSMichael Wang 
3273a31ad3b0SMichael Wang /**
3274a31ad3b0SMichael Wang  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3275a31ad3b0SMichael Wang  * Multicast.
3276296ec009SMichael Wang  * @device: Device to check
3277296ec009SMichael Wang  * @port_num: Port number to check
3278a31ad3b0SMichael Wang  *
3279296ec009SMichael Wang  * InfiniBand multicast registration is more complex than normal IPv4 or
3280296ec009SMichael Wang  * IPv6 multicast registration.  Each Host Channel Adapter must register
3281296ec009SMichael Wang  * with the Subnet Manager when it wishes to join a multicast group.  It
3282296ec009SMichael Wang  * should do so only once regardless of how many queue pairs it subscribes
3283296ec009SMichael Wang  * to this group.  And it should leave the group only after all queue pairs
3284296ec009SMichael Wang  * attached to the group have been detached.
3285a31ad3b0SMichael Wang  *
3286296ec009SMichael Wang  * Return: true if the port must undertake the additional adminstrative
3287296ec009SMichael Wang  * overhead of registering/unregistering with the SM and tracking of the
3288296ec009SMichael Wang  * total number of queue pairs attached to the multicast group.
3289a31ad3b0SMichael Wang  */
rdma_cap_ib_mcast(const struct ib_device * device,u32 port_num)32901fb7f897SMark Bloch static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
32911fb7f897SMark Bloch 				     u32 port_num)
3292a31ad3b0SMichael Wang {
3293a31ad3b0SMichael Wang 	return rdma_cap_ib_sa(device, port_num);
3294a31ad3b0SMichael Wang }
3295a31ad3b0SMichael Wang 
3296bc0f1d71SMichael Wang /**
329730a74ef4SMichael Wang  * rdma_cap_af_ib - Check if the port of device has the capability
329830a74ef4SMichael Wang  * Native Infiniband Address.
3299296ec009SMichael Wang  * @device: Device to check
3300296ec009SMichael Wang  * @port_num: Port number to check
330130a74ef4SMichael Wang  *
3302296ec009SMichael Wang  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3303296ec009SMichael Wang  * GID.  RoCE uses a different mechanism, but still generates a GID via
3304296ec009SMichael Wang  * a prescribed mechanism and port specific data.
330530a74ef4SMichael Wang  *
3306296ec009SMichael Wang  * Return: true if the port uses a GID address to identify devices on the
3307296ec009SMichael Wang  * network.
330830a74ef4SMichael Wang  */
rdma_cap_af_ib(const struct ib_device * device,u32 port_num)33091fb7f897SMark Bloch static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
331030a74ef4SMichael Wang {
33118ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
33128ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_AF_IB;
331330a74ef4SMichael Wang }
331430a74ef4SMichael Wang 
331530a74ef4SMichael Wang /**
3316227128fcSMichael Wang  * rdma_cap_eth_ah - Check if the port of device has the capability
3317296ec009SMichael Wang  * Ethernet Address Handle.
3318296ec009SMichael Wang  * @device: Device to check
3319296ec009SMichael Wang  * @port_num: Port number to check
3320227128fcSMichael Wang  *
3321296ec009SMichael Wang  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3322296ec009SMichael Wang  * to fabricate GIDs over Ethernet/IP specific addresses native to the
3323296ec009SMichael Wang  * port.  Normally, packet headers are generated by the sending host
3324296ec009SMichael Wang  * adapter, but when sending connectionless datagrams, we must manually
3325296ec009SMichael Wang  * inject the proper headers for the fabric we are communicating over.
3326227128fcSMichael Wang  *
3327296ec009SMichael Wang  * Return: true if we are running as a RoCE port and must force the
3328296ec009SMichael Wang  * addition of a Global Route Header built from our Ethernet Address
3329296ec009SMichael Wang  * Handle into our header list for connectionless packets.
3330227128fcSMichael Wang  */
rdma_cap_eth_ah(const struct ib_device * device,u32 port_num)33311fb7f897SMark Bloch static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3332227128fcSMichael Wang {
33338ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.core_cap_flags &
33348ceb1357SJason Gunthorpe 	       RDMA_CORE_CAP_ETH_AH;
3335227128fcSMichael Wang }
3336227128fcSMichael Wang 
3337227128fcSMichael Wang /**
333894d595c5SDasaratharaman Chandramouli  * rdma_cap_opa_ah - Check if the port of device supports
333994d595c5SDasaratharaman Chandramouli  * OPA Address handles
334094d595c5SDasaratharaman Chandramouli  * @device: Device to check
334194d595c5SDasaratharaman Chandramouli  * @port_num: Port number to check
334294d595c5SDasaratharaman Chandramouli  *
334394d595c5SDasaratharaman Chandramouli  * Return: true if we are running on an OPA device which supports
334494d595c5SDasaratharaman Chandramouli  * the extended OPA addressing.
334594d595c5SDasaratharaman Chandramouli  */
rdma_cap_opa_ah(struct ib_device * device,u32 port_num)33461fb7f897SMark Bloch static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
334794d595c5SDasaratharaman Chandramouli {
33488ceb1357SJason Gunthorpe 	return (device->port_data[port_num].immutable.core_cap_flags &
334994d595c5SDasaratharaman Chandramouli 		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
335094d595c5SDasaratharaman Chandramouli }
335194d595c5SDasaratharaman Chandramouli 
335294d595c5SDasaratharaman Chandramouli /**
3353337877a4SIra Weiny  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3354337877a4SIra Weiny  *
3355337877a4SIra Weiny  * @device: Device
3356337877a4SIra Weiny  * @port_num: Port number
3357337877a4SIra Weiny  *
3358337877a4SIra Weiny  * This MAD size includes the MAD headers and MAD payload.  No other headers
3359337877a4SIra Weiny  * are included.
3360337877a4SIra Weiny  *
3361337877a4SIra Weiny  * Return the max MAD size required by the Port.  Will return 0 if the port
3362337877a4SIra Weiny  * does not support MADs
3363337877a4SIra Weiny  */
rdma_max_mad_size(const struct ib_device * device,u32 port_num)33641fb7f897SMark Bloch static inline size_t rdma_max_mad_size(const struct ib_device *device,
33651fb7f897SMark Bloch 				       u32 port_num)
3366337877a4SIra Weiny {
33678ceb1357SJason Gunthorpe 	return device->port_data[port_num].immutable.max_mad_size;
3368337877a4SIra Weiny }
3369337877a4SIra Weiny 
337003db3a2dSMatan Barak /**
337103db3a2dSMatan Barak  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
337203db3a2dSMatan Barak  * @device: Device to check
337303db3a2dSMatan Barak  * @port_num: Port number to check
337403db3a2dSMatan Barak  *
337503db3a2dSMatan Barak  * RoCE GID table mechanism manages the various GIDs for a device.
337603db3a2dSMatan Barak  *
337703db3a2dSMatan Barak  * NOTE: if allocating the port's GID table has failed, this call will still
337803db3a2dSMatan Barak  * return true, but any RoCE GID table API will fail.
337903db3a2dSMatan Barak  *
338003db3a2dSMatan Barak  * Return: true if the port uses RoCE GID table mechanism in order to manage
338103db3a2dSMatan Barak  * its GIDs.
338203db3a2dSMatan Barak  */
rdma_cap_roce_gid_table(const struct ib_device * device,u32 port_num)338303db3a2dSMatan Barak static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
33841fb7f897SMark Bloch 					   u32 port_num)
338503db3a2dSMatan Barak {
338603db3a2dSMatan Barak 	return rdma_protocol_roce(device, port_num) &&
33873023a1e9SKamal Heib 		device->ops.add_gid && device->ops.del_gid;
338803db3a2dSMatan Barak }
338903db3a2dSMatan Barak 
3390002516edSChristoph Hellwig /*
3391002516edSChristoph Hellwig  * Check if the device supports READ W/ INVALIDATE.
3392002516edSChristoph Hellwig  */
rdma_cap_read_inv(struct ib_device * dev,u32 port_num)3393002516edSChristoph Hellwig static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3394002516edSChristoph Hellwig {
3395002516edSChristoph Hellwig 	/*
3396002516edSChristoph Hellwig 	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3397002516edSChristoph Hellwig 	 * has support for it yet.
3398002516edSChristoph Hellwig 	 */
3399002516edSChristoph Hellwig 	return rdma_protocol_iwarp(dev, port_num);
3400002516edSChristoph Hellwig }
3401002516edSChristoph Hellwig 
34024a353399SShiraz Saleem /**
34036d72344cSKaike Wan  * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
34046d72344cSKaike Wan  * @device: Device
34056d72344cSKaike Wan  * @port_num: 1 based Port number
34066d72344cSKaike Wan  *
34076d72344cSKaike Wan  * Return true if port is an Intel OPA port , false if not
34086d72344cSKaike Wan  */
rdma_core_cap_opa_port(struct ib_device * device,u32 port_num)34096d72344cSKaike Wan static inline bool rdma_core_cap_opa_port(struct ib_device *device,
34106d72344cSKaike Wan 					  u32 port_num)
34116d72344cSKaike Wan {
34126d72344cSKaike Wan 	return (device->port_data[port_num].immutable.core_cap_flags &
34136d72344cSKaike Wan 		RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
34146d72344cSKaike Wan }
34156d72344cSKaike Wan 
34166d72344cSKaike Wan /**
34176d72344cSKaike Wan  * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
34186d72344cSKaike Wan  * @device: Device
34196d72344cSKaike Wan  * @port_num: Port number
34206d72344cSKaike Wan  * @mtu: enum value of MTU
34216d72344cSKaike Wan  *
34226d72344cSKaike Wan  * Return the MTU size supported by the port as an integer value. Will return
34236d72344cSKaike Wan  * -1 if enum value of mtu is not supported.
34246d72344cSKaike Wan  */
rdma_mtu_enum_to_int(struct ib_device * device,u32 port,int mtu)34251fb7f897SMark Bloch static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
34266d72344cSKaike Wan 				       int mtu)
34276d72344cSKaike Wan {
34286d72344cSKaike Wan 	if (rdma_core_cap_opa_port(device, port))
34296d72344cSKaike Wan 		return opa_mtu_enum_to_int((enum opa_mtu)mtu);
34306d72344cSKaike Wan 	else
34316d72344cSKaike Wan 		return ib_mtu_enum_to_int((enum ib_mtu)mtu);
34326d72344cSKaike Wan }
34336d72344cSKaike Wan 
34346d72344cSKaike Wan /**
34356d72344cSKaike Wan  * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
34366d72344cSKaike Wan  * @device: Device
34376d72344cSKaike Wan  * @port_num: Port number
34386d72344cSKaike Wan  * @attr: port attribute
34396d72344cSKaike Wan  *
34406d72344cSKaike Wan  * Return the MTU size supported by the port as an integer value.
34416d72344cSKaike Wan  */
rdma_mtu_from_attr(struct ib_device * device,u32 port,struct ib_port_attr * attr)34421fb7f897SMark Bloch static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
34436d72344cSKaike Wan 				     struct ib_port_attr *attr)
34446d72344cSKaike Wan {
34456d72344cSKaike Wan 	if (rdma_core_cap_opa_port(device, port))
34466d72344cSKaike Wan 		return attr->phys_mtu;
34476d72344cSKaike Wan 	else
34486d72344cSKaike Wan 		return ib_mtu_enum_to_int(attr->max_mtu);
34496d72344cSKaike Wan }
34506d72344cSKaike Wan 
34511fb7f897SMark Bloch int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
345250174a7fSEli Cohen 			 int state);
34531fb7f897SMark Bloch int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
345450174a7fSEli Cohen 		     struct ifla_vf_info *info);
34551fb7f897SMark Bloch int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
345650174a7fSEli Cohen 		    struct ifla_vf_stats *stats);
34571fb7f897SMark Bloch int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3458bfcb3c5dSDanit Goldberg 		    struct ifla_vf_guid *node_guid,
3459bfcb3c5dSDanit Goldberg 		    struct ifla_vf_guid *port_guid);
34601fb7f897SMark Bloch int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
346150174a7fSEli Cohen 		   int type);
346250174a7fSEli Cohen 
3463a4d61e84SRoland Dreier int ib_query_pkey(struct ib_device *device,
34641fb7f897SMark Bloch 		  u32 port_num, u16 index, u16 *pkey);
3465a4d61e84SRoland Dreier 
3466a4d61e84SRoland Dreier int ib_modify_device(struct ib_device *device,
3467a4d61e84SRoland Dreier 		     int device_modify_mask,
3468a4d61e84SRoland Dreier 		     struct ib_device_modify *device_modify);
3469a4d61e84SRoland Dreier 
3470a4d61e84SRoland Dreier int ib_modify_port(struct ib_device *device,
34711fb7f897SMark Bloch 		   u32 port_num, int port_modify_mask,
3472a4d61e84SRoland Dreier 		   struct ib_port_modify *port_modify);
3473a4d61e84SRoland Dreier 
34745eb620c8SYosef Etigin int ib_find_gid(struct ib_device *device, union ib_gid *gid,
34751fb7f897SMark Bloch 		u32 *port_num, u16 *index);
34765eb620c8SYosef Etigin 
34775eb620c8SYosef Etigin int ib_find_pkey(struct ib_device *device,
34781fb7f897SMark Bloch 		 u32 port_num, u16 pkey, u16 *index);
34795eb620c8SYosef Etigin 
3480ed082d36SChristoph Hellwig enum ib_pd_flags {
3481ed082d36SChristoph Hellwig 	/*
3482ed082d36SChristoph Hellwig 	 * Create a memory registration for all memory in the system and place
3483ed082d36SChristoph Hellwig 	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3484ed082d36SChristoph Hellwig 	 * ULPs to avoid the overhead of dynamic MRs.
3485ed082d36SChristoph Hellwig 	 *
3486ed082d36SChristoph Hellwig 	 * This flag is generally considered unsafe and must only be used in
3487ed082d36SChristoph Hellwig 	 * extremly trusted environments.  Every use of it will log a warning
3488ed082d36SChristoph Hellwig 	 * in the kernel log.
3489ed082d36SChristoph Hellwig 	 */
3490ed082d36SChristoph Hellwig 	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3491ed082d36SChristoph Hellwig };
3492a4d61e84SRoland Dreier 
3493ed082d36SChristoph Hellwig struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3494ed082d36SChristoph Hellwig 		const char *caller);
3495c4367a26SShamir Rabinovitch 
34962988ca08SMauro Carvalho Chehab /**
34972988ca08SMauro Carvalho Chehab  * ib_alloc_pd - Allocates an unused protection domain.
34982988ca08SMauro Carvalho Chehab  * @device: The device on which to allocate the protection domain.
34992988ca08SMauro Carvalho Chehab  * @flags: protection domain flags
35002988ca08SMauro Carvalho Chehab  *
35012988ca08SMauro Carvalho Chehab  * A protection domain object provides an association between QPs, shared
35022988ca08SMauro Carvalho Chehab  * receive queues, address handles, memory regions, and memory windows.
35032988ca08SMauro Carvalho Chehab  *
35042988ca08SMauro Carvalho Chehab  * Every PD has a local_dma_lkey which can be used as the lkey value for local
35052988ca08SMauro Carvalho Chehab  * memory operations.
35062988ca08SMauro Carvalho Chehab  */
3507ed082d36SChristoph Hellwig #define ib_alloc_pd(device, flags) \
3508e4496447SLeon Romanovsky 	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3509c4367a26SShamir Rabinovitch 
351091a7c58fSLeon Romanovsky int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3511c4367a26SShamir Rabinovitch 
3512c4367a26SShamir Rabinovitch /**
3513c4367a26SShamir Rabinovitch  * ib_dealloc_pd - Deallocate kernel PD
3514c4367a26SShamir Rabinovitch  * @pd: The protection domain
3515c4367a26SShamir Rabinovitch  *
3516c4367a26SShamir Rabinovitch  * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3517c4367a26SShamir Rabinovitch  */
ib_dealloc_pd(struct ib_pd * pd)3518c4367a26SShamir Rabinovitch static inline void ib_dealloc_pd(struct ib_pd *pd)
3519c4367a26SShamir Rabinovitch {
352091a7c58fSLeon Romanovsky 	int ret = ib_dealloc_pd_user(pd, NULL);
352191a7c58fSLeon Romanovsky 
352291a7c58fSLeon Romanovsky 	WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3523c4367a26SShamir Rabinovitch }
3524a4d61e84SRoland Dreier 
3525b090c4e3SGal Pressman enum rdma_create_ah_flags {
3526b090c4e3SGal Pressman 	/* In a sleepable context */
3527b090c4e3SGal Pressman 	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3528b090c4e3SGal Pressman };
3529b090c4e3SGal Pressman 
3530a4d61e84SRoland Dreier /**
35310a18cfe4SDasaratharaman Chandramouli  * rdma_create_ah - Creates an address handle for the given address vector.
3532a4d61e84SRoland Dreier  * @pd: The protection domain associated with the address handle.
3533a4d61e84SRoland Dreier  * @ah_attr: The attributes of the address vector.
3534b090c4e3SGal Pressman  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3535a4d61e84SRoland Dreier  *
3536a4d61e84SRoland Dreier  * The address handle is used to reference a local or global destination
3537a4d61e84SRoland Dreier  * in all UD QP post sends.
3538a4d61e84SRoland Dreier  */
3539b090c4e3SGal Pressman struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3540b090c4e3SGal Pressman 			     u32 flags);
3541a4d61e84SRoland Dreier 
3542a4d61e84SRoland Dreier /**
35435cda6587SParav Pandit  * rdma_create_user_ah - Creates an address handle for the given address vector.
35445cda6587SParav Pandit  * It resolves destination mac address for ah attribute of RoCE type.
35455cda6587SParav Pandit  * @pd: The protection domain associated with the address handle.
35465cda6587SParav Pandit  * @ah_attr: The attributes of the address vector.
35475cda6587SParav Pandit  * @udata: pointer to user's input output buffer information need by
35485cda6587SParav Pandit  *         provider driver.
35495cda6587SParav Pandit  *
35505cda6587SParav Pandit  * It returns 0 on success and returns appropriate error code on error.
35515cda6587SParav Pandit  * The address handle is used to reference a local or global destination
35525cda6587SParav Pandit  * in all UD QP post sends.
35535cda6587SParav Pandit  */
35545cda6587SParav Pandit struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
35555cda6587SParav Pandit 				  struct rdma_ah_attr *ah_attr,
35565cda6587SParav Pandit 				  struct ib_udata *udata);
35575cda6587SParav Pandit /**
3558850d8fd7SMoni Shoua  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3559850d8fd7SMoni Shoua  *   work completion.
3560850d8fd7SMoni Shoua  * @hdr: the L3 header to parse
3561850d8fd7SMoni Shoua  * @net_type: type of header to parse
3562850d8fd7SMoni Shoua  * @sgid: place to store source gid
3563850d8fd7SMoni Shoua  * @dgid: place to store destination gid
3564850d8fd7SMoni Shoua  */
3565850d8fd7SMoni Shoua int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3566850d8fd7SMoni Shoua 			      enum rdma_network_type net_type,
3567850d8fd7SMoni Shoua 			      union ib_gid *sgid, union ib_gid *dgid);
3568850d8fd7SMoni Shoua 
3569850d8fd7SMoni Shoua /**
3570850d8fd7SMoni Shoua  * ib_get_rdma_header_version - Get the header version
3571850d8fd7SMoni Shoua  * @hdr: the L3 header to parse
3572850d8fd7SMoni Shoua  */
3573850d8fd7SMoni Shoua int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3574850d8fd7SMoni Shoua 
3575850d8fd7SMoni Shoua /**
3576f6bdb142SParav Pandit  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
35774e00d694SSean Hefty  *   work completion.
35784e00d694SSean Hefty  * @device: Device on which the received message arrived.
35794e00d694SSean Hefty  * @port_num: Port on which the received message arrived.
35804e00d694SSean Hefty  * @wc: Work completion associated with the received message.
35814e00d694SSean Hefty  * @grh: References the received global route header.  This parameter is
35824e00d694SSean Hefty  *   ignored unless the work completion indicates that the GRH is valid.
35834e00d694SSean Hefty  * @ah_attr: Returned attributes that can be used when creating an address
35844e00d694SSean Hefty  *   handle for replying to the message.
3585b7403217SParav Pandit  * When ib_init_ah_attr_from_wc() returns success,
3586b7403217SParav Pandit  * (a) for IB link layer it optionally contains a reference to SGID attribute
3587b7403217SParav Pandit  * when GRH is present for IB link layer.
3588b7403217SParav Pandit  * (b) for RoCE link layer it contains a reference to SGID attribute.
3589b7403217SParav Pandit  * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3590b7403217SParav Pandit  * attributes which are initialized using ib_init_ah_attr_from_wc().
3591b7403217SParav Pandit  *
35924e00d694SSean Hefty  */
35931fb7f897SMark Bloch int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
359473cdaaeeSIra Weiny 			    const struct ib_wc *wc, const struct ib_grh *grh,
359590898850SDasaratharaman Chandramouli 			    struct rdma_ah_attr *ah_attr);
35964e00d694SSean Hefty 
35974e00d694SSean Hefty /**
3598a4d61e84SRoland Dreier  * ib_create_ah_from_wc - Creates an address handle associated with the
3599a4d61e84SRoland Dreier  *   sender of the specified work completion.
3600a4d61e84SRoland Dreier  * @pd: The protection domain associated with the address handle.
3601a4d61e84SRoland Dreier  * @wc: Work completion information associated with a received message.
3602a4d61e84SRoland Dreier  * @grh: References the received global route header.  This parameter is
3603a4d61e84SRoland Dreier  *   ignored unless the work completion indicates that the GRH is valid.
3604a4d61e84SRoland Dreier  * @port_num: The outbound port number to associate with the address.
3605a4d61e84SRoland Dreier  *
3606a4d61e84SRoland Dreier  * The address handle is used to reference a local or global destination
3607a4d61e84SRoland Dreier  * in all UD QP post sends.
3608a4d61e84SRoland Dreier  */
360973cdaaeeSIra Weiny struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
36101fb7f897SMark Bloch 				   const struct ib_grh *grh, u32 port_num);
3611a4d61e84SRoland Dreier 
3612a4d61e84SRoland Dreier /**
361367b985b6SDasaratharaman Chandramouli  * rdma_modify_ah - Modifies the address vector associated with an address
3614a4d61e84SRoland Dreier  *   handle.
3615a4d61e84SRoland Dreier  * @ah: The address handle to modify.
3616a4d61e84SRoland Dreier  * @ah_attr: The new address vector attributes to associate with the
3617a4d61e84SRoland Dreier  *   address handle.
3618a4d61e84SRoland Dreier  */
361967b985b6SDasaratharaman Chandramouli int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3620a4d61e84SRoland Dreier 
3621a4d61e84SRoland Dreier /**
3622bfbfd661SDasaratharaman Chandramouli  * rdma_query_ah - Queries the address vector associated with an address
3623a4d61e84SRoland Dreier  *   handle.
3624a4d61e84SRoland Dreier  * @ah: The address handle to query.
3625a4d61e84SRoland Dreier  * @ah_attr: The address vector attributes associated with the address
3626a4d61e84SRoland Dreier  *   handle.
3627a4d61e84SRoland Dreier  */
3628bfbfd661SDasaratharaman Chandramouli int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3629a4d61e84SRoland Dreier 
36302553ba21SGal Pressman enum rdma_destroy_ah_flags {
36312553ba21SGal Pressman 	/* In a sleepable context */
36322553ba21SGal Pressman 	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
36332553ba21SGal Pressman };
36342553ba21SGal Pressman 
3635a4d61e84SRoland Dreier /**
3636c4367a26SShamir Rabinovitch  * rdma_destroy_ah_user - Destroys an address handle.
3637a4d61e84SRoland Dreier  * @ah: The address handle to destroy.
36382553ba21SGal Pressman  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3639c4367a26SShamir Rabinovitch  * @udata: Valid user data or NULL for kernel objects
3640a4d61e84SRoland Dreier  */
3641c4367a26SShamir Rabinovitch int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3642c4367a26SShamir Rabinovitch 
3643c4367a26SShamir Rabinovitch /**
3644c4367a26SShamir Rabinovitch  * rdma_destroy_ah - Destroys an kernel address handle.
3645c4367a26SShamir Rabinovitch  * @ah: The address handle to destroy.
3646c4367a26SShamir Rabinovitch  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3647c4367a26SShamir Rabinovitch  *
3648c4367a26SShamir Rabinovitch  * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3649c4367a26SShamir Rabinovitch  */
rdma_destroy_ah(struct ib_ah * ah,u32 flags)36509a9ebf8cSLeon Romanovsky static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3651c4367a26SShamir Rabinovitch {
36529a9ebf8cSLeon Romanovsky 	int ret = rdma_destroy_ah_user(ah, flags, NULL);
36539a9ebf8cSLeon Romanovsky 
36549a9ebf8cSLeon Romanovsky 	WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3655c4367a26SShamir Rabinovitch }
3656a4d61e84SRoland Dreier 
3657b0810b03SJason Gunthorpe struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3658b0810b03SJason Gunthorpe 				  struct ib_srq_init_attr *srq_init_attr,
3659b0810b03SJason Gunthorpe 				  struct ib_usrq_object *uobject,
3660b0810b03SJason Gunthorpe 				  struct ib_udata *udata);
3661b0810b03SJason Gunthorpe static inline struct ib_srq *
ib_create_srq(struct ib_pd * pd,struct ib_srq_init_attr * srq_init_attr)3662b0810b03SJason Gunthorpe ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3663b0810b03SJason Gunthorpe {
3664b0810b03SJason Gunthorpe 	if (!pd->device->ops.create_srq)
3665b0810b03SJason Gunthorpe 		return ERR_PTR(-EOPNOTSUPP);
3666b0810b03SJason Gunthorpe 
3667b0810b03SJason Gunthorpe 	return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3668b0810b03SJason Gunthorpe }
3669a4d61e84SRoland Dreier 
3670a4d61e84SRoland Dreier /**
3671a4d61e84SRoland Dreier  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3672a4d61e84SRoland Dreier  * @srq: The SRQ to modify.
3673a4d61e84SRoland Dreier  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3674a4d61e84SRoland Dreier  *   the current values of selected SRQ attributes are returned.
3675a4d61e84SRoland Dreier  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3676a4d61e84SRoland Dreier  *   are being modified.
3677a4d61e84SRoland Dreier  *
3678a4d61e84SRoland Dreier  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3679a4d61e84SRoland Dreier  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3680a4d61e84SRoland Dreier  * the number of receives queued drops below the limit.
3681a4d61e84SRoland Dreier  */
3682a4d61e84SRoland Dreier int ib_modify_srq(struct ib_srq *srq,
3683a4d61e84SRoland Dreier 		  struct ib_srq_attr *srq_attr,
3684a4d61e84SRoland Dreier 		  enum ib_srq_attr_mask srq_attr_mask);
3685a4d61e84SRoland Dreier 
3686a4d61e84SRoland Dreier /**
3687a4d61e84SRoland Dreier  * ib_query_srq - Returns the attribute list and current values for the
3688a4d61e84SRoland Dreier  *   specified SRQ.
3689a4d61e84SRoland Dreier  * @srq: The SRQ to query.
3690a4d61e84SRoland Dreier  * @srq_attr: The attributes of the specified SRQ.
3691a4d61e84SRoland Dreier  */
3692a4d61e84SRoland Dreier int ib_query_srq(struct ib_srq *srq,
3693a4d61e84SRoland Dreier 		 struct ib_srq_attr *srq_attr);
3694a4d61e84SRoland Dreier 
3695a4d61e84SRoland Dreier /**
3696c4367a26SShamir Rabinovitch  * ib_destroy_srq_user - Destroys the specified SRQ.
3697a4d61e84SRoland Dreier  * @srq: The SRQ to destroy.
3698c4367a26SShamir Rabinovitch  * @udata: Valid user data or NULL for kernel objects
3699a4d61e84SRoland Dreier  */
3700c4367a26SShamir Rabinovitch int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3701c4367a26SShamir Rabinovitch 
3702c4367a26SShamir Rabinovitch /**
3703c4367a26SShamir Rabinovitch  * ib_destroy_srq - Destroys the specified kernel SRQ.
3704c4367a26SShamir Rabinovitch  * @srq: The SRQ to destroy.
3705c4367a26SShamir Rabinovitch  *
3706c4367a26SShamir Rabinovitch  * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3707c4367a26SShamir Rabinovitch  */
ib_destroy_srq(struct ib_srq * srq)3708119181d1SLeon Romanovsky static inline void ib_destroy_srq(struct ib_srq *srq)
3709c4367a26SShamir Rabinovitch {
3710119181d1SLeon Romanovsky 	int ret = ib_destroy_srq_user(srq, NULL);
3711119181d1SLeon Romanovsky 
3712119181d1SLeon Romanovsky 	WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3713c4367a26SShamir Rabinovitch }
3714a4d61e84SRoland Dreier 
3715a4d61e84SRoland Dreier /**
3716a4d61e84SRoland Dreier  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3717a4d61e84SRoland Dreier  * @srq: The SRQ to post the work request on.
3718a4d61e84SRoland Dreier  * @recv_wr: A list of work requests to post on the receive queue.
3719a4d61e84SRoland Dreier  * @bad_recv_wr: On an immediate failure, this parameter will reference
3720a4d61e84SRoland Dreier  *   the work request that failed to be posted on the QP.
3721a4d61e84SRoland Dreier  */
ib_post_srq_recv(struct ib_srq * srq,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3722a4d61e84SRoland Dreier static inline int ib_post_srq_recv(struct ib_srq *srq,
3723d34ac5cdSBart Van Assche 				   const struct ib_recv_wr *recv_wr,
3724d34ac5cdSBart Van Assche 				   const struct ib_recv_wr **bad_recv_wr)
3725a4d61e84SRoland Dreier {
3726d34ac5cdSBart Van Assche 	const struct ib_recv_wr *dummy;
3727bb039a87SBart Van Assche 
37283023a1e9SKamal Heib 	return srq->device->ops.post_srq_recv(srq, recv_wr,
37293023a1e9SKamal Heib 					      bad_recv_wr ? : &dummy);
3730a4d61e84SRoland Dreier }
3731a4d61e84SRoland Dreier 
37328da9fe4eSLeon Romanovsky struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
373366f57b87SLeon Romanovsky 				  struct ib_qp_init_attr *qp_init_attr,
373466f57b87SLeon Romanovsky 				  const char *caller);
37358da9fe4eSLeon Romanovsky /**
37368da9fe4eSLeon Romanovsky  * ib_create_qp - Creates a kernel QP associated with the specific protection
37378da9fe4eSLeon Romanovsky  * domain.
37388da9fe4eSLeon Romanovsky  * @pd: The protection domain associated with the QP.
37398da9fe4eSLeon Romanovsky  * @init_attr: A list of initial attributes required to create the
37408da9fe4eSLeon Romanovsky  *   QP.  If QP creation succeeds, then the attributes are updated to
37418da9fe4eSLeon Romanovsky  *   the actual capabilities of the created QP.
37428da9fe4eSLeon Romanovsky  */
ib_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr)374366f57b87SLeon Romanovsky static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
374466f57b87SLeon Romanovsky 					 struct ib_qp_init_attr *init_attr)
374566f57b87SLeon Romanovsky {
37468da9fe4eSLeon Romanovsky 	return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
374766f57b87SLeon Romanovsky }
3748a4d61e84SRoland Dreier 
3749a4d61e84SRoland Dreier /**
3750a512c2fbSParav Pandit  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3751a512c2fbSParav Pandit  * @qp: The QP to modify.
3752a512c2fbSParav Pandit  * @attr: On input, specifies the QP attributes to modify.  On output,
3753a512c2fbSParav Pandit  *   the current values of selected QP attributes are returned.
3754a512c2fbSParav Pandit  * @attr_mask: A bit-mask used to specify which attributes of the QP
3755a512c2fbSParav Pandit  *   are being modified.
3756a512c2fbSParav Pandit  * @udata: pointer to user's input output buffer information
3757a512c2fbSParav Pandit  *   are being modified.
3758a512c2fbSParav Pandit  * It returns 0 on success and returns appropriate error code on error.
3759a512c2fbSParav Pandit  */
3760a512c2fbSParav Pandit int ib_modify_qp_with_udata(struct ib_qp *qp,
3761a512c2fbSParav Pandit 			    struct ib_qp_attr *attr,
3762a512c2fbSParav Pandit 			    int attr_mask,
3763a512c2fbSParav Pandit 			    struct ib_udata *udata);
3764a512c2fbSParav Pandit 
3765a512c2fbSParav Pandit /**
3766a4d61e84SRoland Dreier  * ib_modify_qp - Modifies the attributes for the specified QP and then
3767a4d61e84SRoland Dreier  *   transitions the QP to the given state.
3768a4d61e84SRoland Dreier  * @qp: The QP to modify.
3769a4d61e84SRoland Dreier  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3770a4d61e84SRoland Dreier  *   the current values of selected QP attributes are returned.
3771a4d61e84SRoland Dreier  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3772a4d61e84SRoland Dreier  *   are being modified.
3773a4d61e84SRoland Dreier  */
3774a4d61e84SRoland Dreier int ib_modify_qp(struct ib_qp *qp,
3775a4d61e84SRoland Dreier 		 struct ib_qp_attr *qp_attr,
3776a4d61e84SRoland Dreier 		 int qp_attr_mask);
3777a4d61e84SRoland Dreier 
3778a4d61e84SRoland Dreier /**
3779a4d61e84SRoland Dreier  * ib_query_qp - Returns the attribute list and current values for the
3780a4d61e84SRoland Dreier  *   specified QP.
3781a4d61e84SRoland Dreier  * @qp: The QP to query.
3782a4d61e84SRoland Dreier  * @qp_attr: The attributes of the specified QP.
3783a4d61e84SRoland Dreier  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3784a4d61e84SRoland Dreier  * @qp_init_attr: Additional attributes of the selected QP.
3785a4d61e84SRoland Dreier  *
3786a4d61e84SRoland Dreier  * The qp_attr_mask may be used to limit the query to gathering only the
3787a4d61e84SRoland Dreier  * selected attributes.
3788a4d61e84SRoland Dreier  */
3789a4d61e84SRoland Dreier int ib_query_qp(struct ib_qp *qp,
3790a4d61e84SRoland Dreier 		struct ib_qp_attr *qp_attr,
3791a4d61e84SRoland Dreier 		int qp_attr_mask,
3792a4d61e84SRoland Dreier 		struct ib_qp_init_attr *qp_init_attr);
3793a4d61e84SRoland Dreier 
3794a4d61e84SRoland Dreier /**
3795a4d61e84SRoland Dreier  * ib_destroy_qp - Destroys the specified QP.
3796a4d61e84SRoland Dreier  * @qp: The QP to destroy.
3797c4367a26SShamir Rabinovitch  * @udata: Valid udata or NULL for kernel objects
3798a4d61e84SRoland Dreier  */
3799c4367a26SShamir Rabinovitch int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3800c4367a26SShamir Rabinovitch 
3801c4367a26SShamir Rabinovitch /**
3802c4367a26SShamir Rabinovitch  * ib_destroy_qp - Destroys the specified kernel QP.
3803c4367a26SShamir Rabinovitch  * @qp: The QP to destroy.
3804c4367a26SShamir Rabinovitch  *
3805c4367a26SShamir Rabinovitch  * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3806c4367a26SShamir Rabinovitch  */
ib_destroy_qp(struct ib_qp * qp)3807c4367a26SShamir Rabinovitch static inline int ib_destroy_qp(struct ib_qp *qp)
3808c4367a26SShamir Rabinovitch {
3809c4367a26SShamir Rabinovitch 	return ib_destroy_qp_user(qp, NULL);
3810c4367a26SShamir Rabinovitch }
3811a4d61e84SRoland Dreier 
3812a4d61e84SRoland Dreier /**
38130e0ec7e0SSean Hefty  * ib_open_qp - Obtain a reference to an existing sharable QP.
38140e0ec7e0SSean Hefty  * @xrcd - XRC domain
38150e0ec7e0SSean Hefty  * @qp_open_attr: Attributes identifying the QP to open.
38160e0ec7e0SSean Hefty  *
38170e0ec7e0SSean Hefty  * Returns a reference to a sharable QP.
38180e0ec7e0SSean Hefty  */
38190e0ec7e0SSean Hefty struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
38200e0ec7e0SSean Hefty 			 struct ib_qp_open_attr *qp_open_attr);
38210e0ec7e0SSean Hefty 
38220e0ec7e0SSean Hefty /**
38230e0ec7e0SSean Hefty  * ib_close_qp - Release an external reference to a QP.
3824d3d72d90SSean Hefty  * @qp: The QP handle to release
3825d3d72d90SSean Hefty  *
38260e0ec7e0SSean Hefty  * The opened QP handle is released by the caller.  The underlying
38270e0ec7e0SSean Hefty  * shared QP is not destroyed until all internal references are released.
3828d3d72d90SSean Hefty  */
38290e0ec7e0SSean Hefty int ib_close_qp(struct ib_qp *qp);
3830d3d72d90SSean Hefty 
3831d3d72d90SSean Hefty /**
3832a4d61e84SRoland Dreier  * ib_post_send - Posts a list of work requests to the send queue of
3833a4d61e84SRoland Dreier  *   the specified QP.
3834a4d61e84SRoland Dreier  * @qp: The QP to post the work request on.
3835a4d61e84SRoland Dreier  * @send_wr: A list of work requests to post on the send queue.
3836a4d61e84SRoland Dreier  * @bad_send_wr: On an immediate failure, this parameter will reference
3837a4d61e84SRoland Dreier  *   the work request that failed to be posted on the QP.
383855464d46SBart Van Assche  *
383955464d46SBart Van Assche  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
384055464d46SBart Van Assche  * error is returned, the QP state shall not be affected,
384155464d46SBart Van Assche  * ib_post_send() will return an immediate error after queueing any
384255464d46SBart Van Assche  * earlier work requests in the list.
3843a4d61e84SRoland Dreier  */
ib_post_send(struct ib_qp * qp,const struct ib_send_wr * send_wr,const struct ib_send_wr ** bad_send_wr)3844a4d61e84SRoland Dreier static inline int ib_post_send(struct ib_qp *qp,
3845d34ac5cdSBart Van Assche 			       const struct ib_send_wr *send_wr,
3846d34ac5cdSBart Van Assche 			       const struct ib_send_wr **bad_send_wr)
3847a4d61e84SRoland Dreier {
3848d34ac5cdSBart Van Assche 	const struct ib_send_wr *dummy;
3849bb039a87SBart Van Assche 
38503023a1e9SKamal Heib 	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3851a4d61e84SRoland Dreier }
3852a4d61e84SRoland Dreier 
3853a4d61e84SRoland Dreier /**
3854a4d61e84SRoland Dreier  * ib_post_recv - Posts a list of work requests to the receive queue of
3855a4d61e84SRoland Dreier  *   the specified QP.
3856a4d61e84SRoland Dreier  * @qp: The QP to post the work request on.
3857a4d61e84SRoland Dreier  * @recv_wr: A list of work requests to post on the receive queue.
3858a4d61e84SRoland Dreier  * @bad_recv_wr: On an immediate failure, this parameter will reference
3859a4d61e84SRoland Dreier  *   the work request that failed to be posted on the QP.
3860a4d61e84SRoland Dreier  */
ib_post_recv(struct ib_qp * qp,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3861a4d61e84SRoland Dreier static inline int ib_post_recv(struct ib_qp *qp,
3862d34ac5cdSBart Van Assche 			       const struct ib_recv_wr *recv_wr,
3863d34ac5cdSBart Van Assche 			       const struct ib_recv_wr **bad_recv_wr)
3864a4d61e84SRoland Dreier {
3865d34ac5cdSBart Van Assche 	const struct ib_recv_wr *dummy;
3866bb039a87SBart Van Assche 
38673023a1e9SKamal Heib 	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3868a4d61e84SRoland Dreier }
3869a4d61e84SRoland Dreier 
38707e3c66c9SLeon Romanovsky struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
38717e3c66c9SLeon Romanovsky 			    int comp_vector, enum ib_poll_context poll_ctx,
38727e3c66c9SLeon Romanovsky 			    const char *caller);
ib_alloc_cq(struct ib_device * dev,void * private,int nr_cqe,int comp_vector,enum ib_poll_context poll_ctx)3873c4367a26SShamir Rabinovitch static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3874c4367a26SShamir Rabinovitch 					int nr_cqe, int comp_vector,
3875c4367a26SShamir Rabinovitch 					enum ib_poll_context poll_ctx)
3876c4367a26SShamir Rabinovitch {
38777e3c66c9SLeon Romanovsky 	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
38787e3c66c9SLeon Romanovsky 			     KBUILD_MODNAME);
3879c4367a26SShamir Rabinovitch }
3880c4367a26SShamir Rabinovitch 
388120cf4e02SChuck Lever struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
388220cf4e02SChuck Lever 				int nr_cqe, enum ib_poll_context poll_ctx,
388320cf4e02SChuck Lever 				const char *caller);
388420cf4e02SChuck Lever 
388520cf4e02SChuck Lever /**
388620cf4e02SChuck Lever  * ib_alloc_cq_any: Allocate kernel CQ
388720cf4e02SChuck Lever  * @dev: The IB device
388820cf4e02SChuck Lever  * @private: Private data attached to the CQE
388920cf4e02SChuck Lever  * @nr_cqe: Number of CQEs in the CQ
389020cf4e02SChuck Lever  * @poll_ctx: Context used for polling the CQ
389120cf4e02SChuck Lever  */
ib_alloc_cq_any(struct ib_device * dev,void * private,int nr_cqe,enum ib_poll_context poll_ctx)389220cf4e02SChuck Lever static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
389320cf4e02SChuck Lever 					    void *private, int nr_cqe,
389420cf4e02SChuck Lever 					    enum ib_poll_context poll_ctx)
389520cf4e02SChuck Lever {
389620cf4e02SChuck Lever 	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
389720cf4e02SChuck Lever 				 KBUILD_MODNAME);
389820cf4e02SChuck Lever }
389920cf4e02SChuck Lever 
39007e3c66c9SLeon Romanovsky void ib_free_cq(struct ib_cq *cq);
390114d3a3b2SChristoph Hellwig int ib_process_cq_direct(struct ib_cq *cq, int budget);
390214d3a3b2SChristoph Hellwig 
3903a4d61e84SRoland Dreier /**
3904a4d61e84SRoland Dreier  * ib_create_cq - Creates a CQ on the specified device.
3905a4d61e84SRoland Dreier  * @device: The device on which to create the CQ.
3906a4d61e84SRoland Dreier  * @comp_handler: A user-specified callback that is invoked when a
3907a4d61e84SRoland Dreier  *   completion event occurs on the CQ.
3908a4d61e84SRoland Dreier  * @event_handler: A user-specified callback that is invoked when an
3909a4d61e84SRoland Dreier  *   asynchronous event not associated with a completion occurs on the CQ.
3910a4d61e84SRoland Dreier  * @cq_context: Context associated with the CQ returned to the user via
3911a4d61e84SRoland Dreier  *   the associated completion and event handlers.
39128e37210bSMatan Barak  * @cq_attr: The attributes the CQ should be created upon.
3913a4d61e84SRoland Dreier  *
3914a4d61e84SRoland Dreier  * Users can examine the cq structure to determine the actual CQ size.
3915a4d61e84SRoland Dreier  */
39167350cdd0SBharat Potnuri struct ib_cq *__ib_create_cq(struct ib_device *device,
3917a4d61e84SRoland Dreier 			     ib_comp_handler comp_handler,
3918a4d61e84SRoland Dreier 			     void (*event_handler)(struct ib_event *, void *),
39198e37210bSMatan Barak 			     void *cq_context,
39207350cdd0SBharat Potnuri 			     const struct ib_cq_init_attr *cq_attr,
39217350cdd0SBharat Potnuri 			     const char *caller);
39227350cdd0SBharat Potnuri #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
39237350cdd0SBharat Potnuri 	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3924a4d61e84SRoland Dreier 
3925a4d61e84SRoland Dreier /**
3926a4d61e84SRoland Dreier  * ib_resize_cq - Modifies the capacity of the CQ.
3927a4d61e84SRoland Dreier  * @cq: The CQ to resize.
3928a4d61e84SRoland Dreier  * @cqe: The minimum size of the CQ.
3929a4d61e84SRoland Dreier  *
3930a4d61e84SRoland Dreier  * Users can examine the cq structure to determine the actual CQ size.
3931a4d61e84SRoland Dreier  */
3932a4d61e84SRoland Dreier int ib_resize_cq(struct ib_cq *cq, int cqe);
3933a4d61e84SRoland Dreier 
3934a4d61e84SRoland Dreier /**
39354190b4e9SLeon Romanovsky  * rdma_set_cq_moderation - Modifies moderation params of the CQ
39362dd57162SEli Cohen  * @cq: The CQ to modify.
39372dd57162SEli Cohen  * @cq_count: number of CQEs that will trigger an event
39382dd57162SEli Cohen  * @cq_period: max period of time in usec before triggering an event
39392dd57162SEli Cohen  *
39402dd57162SEli Cohen  */
39414190b4e9SLeon Romanovsky int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
39422dd57162SEli Cohen 
39432dd57162SEli Cohen /**
3944c4367a26SShamir Rabinovitch  * ib_destroy_cq_user - Destroys the specified CQ.
3945a4d61e84SRoland Dreier  * @cq: The CQ to destroy.
3946c4367a26SShamir Rabinovitch  * @udata: Valid user data or NULL for kernel objects
3947a4d61e84SRoland Dreier  */
3948c4367a26SShamir Rabinovitch int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3949c4367a26SShamir Rabinovitch 
3950c4367a26SShamir Rabinovitch /**
3951c4367a26SShamir Rabinovitch  * ib_destroy_cq - Destroys the specified kernel CQ.
3952c4367a26SShamir Rabinovitch  * @cq: The CQ to destroy.
3953c4367a26SShamir Rabinovitch  *
3954c4367a26SShamir Rabinovitch  * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3955c4367a26SShamir Rabinovitch  */
ib_destroy_cq(struct ib_cq * cq)3956890ac8d9SLeon Romanovsky static inline void ib_destroy_cq(struct ib_cq *cq)
3957c4367a26SShamir Rabinovitch {
395843d781b9SLeon Romanovsky 	int ret = ib_destroy_cq_user(cq, NULL);
395943d781b9SLeon Romanovsky 
396043d781b9SLeon Romanovsky 	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3961c4367a26SShamir Rabinovitch }
3962a4d61e84SRoland Dreier 
3963a4d61e84SRoland Dreier /**
3964a4d61e84SRoland Dreier  * ib_poll_cq - poll a CQ for completion(s)
3965a4d61e84SRoland Dreier  * @cq:the CQ being polled
3966a4d61e84SRoland Dreier  * @num_entries:maximum number of completions to return
3967a4d61e84SRoland Dreier  * @wc:array of at least @num_entries &struct ib_wc where completions
3968a4d61e84SRoland Dreier  *   will be returned
3969a4d61e84SRoland Dreier  *
3970a4d61e84SRoland Dreier  * Poll a CQ for (possibly multiple) completions.  If the return value
3971a4d61e84SRoland Dreier  * is < 0, an error occurred.  If the return value is >= 0, it is the
3972a4d61e84SRoland Dreier  * number of completions returned.  If the return value is
3973a4d61e84SRoland Dreier  * non-negative and < num_entries, then the CQ was emptied.
3974a4d61e84SRoland Dreier  */
ib_poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)3975a4d61e84SRoland Dreier static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3976a4d61e84SRoland Dreier 			     struct ib_wc *wc)
3977a4d61e84SRoland Dreier {
39783023a1e9SKamal Heib 	return cq->device->ops.poll_cq(cq, num_entries, wc);
3979a4d61e84SRoland Dreier }
3980a4d61e84SRoland Dreier 
3981a4d61e84SRoland Dreier /**
3982a4d61e84SRoland Dreier  * ib_req_notify_cq - Request completion notification on a CQ.
3983a4d61e84SRoland Dreier  * @cq: The CQ to generate an event for.
3984ed23a727SRoland Dreier  * @flags:
3985ed23a727SRoland Dreier  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3986ed23a727SRoland Dreier  *   to request an event on the next solicited event or next work
3987ed23a727SRoland Dreier  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3988ed23a727SRoland Dreier  *   may also be |ed in to request a hint about missed events, as
3989ed23a727SRoland Dreier  *   described below.
3990ed23a727SRoland Dreier  *
3991ed23a727SRoland Dreier  * Return Value:
3992ed23a727SRoland Dreier  *    < 0 means an error occurred while requesting notification
3993ed23a727SRoland Dreier  *   == 0 means notification was requested successfully, and if
3994ed23a727SRoland Dreier  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3995ed23a727SRoland Dreier  *        were missed and it is safe to wait for another event.  In
3996ed23a727SRoland Dreier  *        this case is it guaranteed that any work completions added
3997ed23a727SRoland Dreier  *        to the CQ since the last CQ poll will trigger a completion
3998ed23a727SRoland Dreier  *        notification event.
3999ed23a727SRoland Dreier  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4000ed23a727SRoland Dreier  *        in.  It means that the consumer must poll the CQ again to
4001ed23a727SRoland Dreier  *        make sure it is empty to avoid missing an event because of a
4002ed23a727SRoland Dreier  *        race between requesting notification and an entry being
4003ed23a727SRoland Dreier  *        added to the CQ.  This return value means it is possible
4004ed23a727SRoland Dreier  *        (but not guaranteed) that a work completion has been added
4005ed23a727SRoland Dreier  *        to the CQ since the last poll without triggering a
4006ed23a727SRoland Dreier  *        completion notification event.
4007a4d61e84SRoland Dreier  */
ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)4008a4d61e84SRoland Dreier static inline int ib_req_notify_cq(struct ib_cq *cq,
4009ed23a727SRoland Dreier 				   enum ib_cq_notify_flags flags)
4010a4d61e84SRoland Dreier {
40113023a1e9SKamal Heib 	return cq->device->ops.req_notify_cq(cq, flags);
4012a4d61e84SRoland Dreier }
4013a4d61e84SRoland Dreier 
4014c7ff819aSYamin Friedman struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4015c7ff819aSYamin Friedman 			     int comp_vector_hint,
4016c7ff819aSYamin Friedman 			     enum ib_poll_context poll_ctx);
4017c7ff819aSYamin Friedman 
4018c7ff819aSYamin Friedman void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4019c7ff819aSYamin Friedman 
40205a7a9e03SChristoph Hellwig /*
40215a7a9e03SChristoph Hellwig  * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
40225a7a9e03SChristoph Hellwig  * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
40235a7a9e03SChristoph Hellwig  * address into the dma address.
40245a7a9e03SChristoph Hellwig  */
ib_uses_virt_dma(struct ib_device * dev)40255a7a9e03SChristoph Hellwig static inline bool ib_uses_virt_dma(struct ib_device *dev)
40265a7a9e03SChristoph Hellwig {
40275a7a9e03SChristoph Hellwig 	return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
40285a7a9e03SChristoph Hellwig }
40295a7a9e03SChristoph Hellwig 
4030495758bbSLogan Gunthorpe /*
4031495758bbSLogan Gunthorpe  * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4032495758bbSLogan Gunthorpe  */
ib_dma_pci_p2p_dma_supported(struct ib_device * dev)4033495758bbSLogan Gunthorpe static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4034495758bbSLogan Gunthorpe {
4035495758bbSLogan Gunthorpe 	if (ib_uses_virt_dma(dev))
4036495758bbSLogan Gunthorpe 		return false;
4037495758bbSLogan Gunthorpe 
4038495758bbSLogan Gunthorpe 	return dma_pci_p2pdma_supported(dev->dma_device);
4039495758bbSLogan Gunthorpe }
4040495758bbSLogan Gunthorpe 
4041a4d61e84SRoland Dreier /**
40428d7c7c0eSJason Gunthorpe  * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
40438d7c7c0eSJason Gunthorpe  * @dma_addr: The DMA address
40448d7c7c0eSJason Gunthorpe  *
40458d7c7c0eSJason Gunthorpe  * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
40468d7c7c0eSJason Gunthorpe  * going through the dma_addr marshalling.
40478d7c7c0eSJason Gunthorpe  */
ib_virt_dma_to_ptr(u64 dma_addr)40488d7c7c0eSJason Gunthorpe static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
40498d7c7c0eSJason Gunthorpe {
40508d7c7c0eSJason Gunthorpe 	/* virt_dma mode maps the kvs's directly into the dma addr */
40518d7c7c0eSJason Gunthorpe 	return (void *)(uintptr_t)dma_addr;
40528d7c7c0eSJason Gunthorpe }
40538d7c7c0eSJason Gunthorpe 
40548d7c7c0eSJason Gunthorpe /**
40558d7c7c0eSJason Gunthorpe  * ib_virt_dma_to_page - Convert a dma_addr to a struct page
40568d7c7c0eSJason Gunthorpe  * @dma_addr: The DMA address
40578d7c7c0eSJason Gunthorpe  *
40588d7c7c0eSJason Gunthorpe  * Used by ib_uses_virt_dma() device to get back to the struct page after going
40598d7c7c0eSJason Gunthorpe  * through the dma_addr marshalling.
40608d7c7c0eSJason Gunthorpe  */
ib_virt_dma_to_page(u64 dma_addr)40618d7c7c0eSJason Gunthorpe static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
40628d7c7c0eSJason Gunthorpe {
40638d7c7c0eSJason Gunthorpe 	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
40648d7c7c0eSJason Gunthorpe }
40658d7c7c0eSJason Gunthorpe 
40668d7c7c0eSJason Gunthorpe /**
40679b513090SRalph Campbell  * ib_dma_mapping_error - check a DMA addr for error
40689b513090SRalph Campbell  * @dev: The device for which the dma_addr was created
40699b513090SRalph Campbell  * @dma_addr: The DMA address to check
40709b513090SRalph Campbell  */
ib_dma_mapping_error(struct ib_device * dev,u64 dma_addr)40719b513090SRalph Campbell static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
40729b513090SRalph Campbell {
40735a7a9e03SChristoph Hellwig 	if (ib_uses_virt_dma(dev))
40745a7a9e03SChristoph Hellwig 		return 0;
40750957c29fSBart Van Assche 	return dma_mapping_error(dev->dma_device, dma_addr);
40769b513090SRalph Campbell }
40779b513090SRalph Campbell 
40789b513090SRalph Campbell /**
40799b513090SRalph Campbell  * ib_dma_map_single - Map a kernel virtual address to DMA address
40809b513090SRalph Campbell  * @dev: The device for which the dma_addr is to be created
40819b513090SRalph Campbell  * @cpu_addr: The kernel virtual address
40829b513090SRalph Campbell  * @size: The size of the region in bytes
40839b513090SRalph Campbell  * @direction: The direction of the DMA
40849b513090SRalph Campbell  */
ib_dma_map_single(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)40859b513090SRalph Campbell static inline u64 ib_dma_map_single(struct ib_device *dev,
40869b513090SRalph Campbell 				    void *cpu_addr, size_t size,
40879b513090SRalph Campbell 				    enum dma_data_direction direction)
40889b513090SRalph Campbell {
40895a7a9e03SChristoph Hellwig 	if (ib_uses_virt_dma(dev))
40905a7a9e03SChristoph Hellwig 		return (uintptr_t)cpu_addr;
40910957c29fSBart Van Assche 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
40929b513090SRalph Campbell }
40939b513090SRalph Campbell 
40949b513090SRalph Campbell /**
40959b513090SRalph Campbell  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
40969b513090SRalph Campbell  * @dev: The device for which the DMA address was created
40979b513090SRalph Campbell  * @addr: The DMA address
40989b513090SRalph Campbell  * @size: The size of the region in bytes
40999b513090SRalph Campbell  * @direction: The direction of the DMA
41009b513090SRalph Campbell  */
ib_dma_unmap_single(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)41019b513090SRalph Campbell static inline void ib_dma_unmap_single(struct ib_device *dev,
41029b513090SRalph Campbell 				       u64 addr, size_t size,
41039b513090SRalph Campbell 				       enum dma_data_direction direction)
41049b513090SRalph Campbell {
41055a7a9e03SChristoph Hellwig 	if (!ib_uses_virt_dma(dev))
41060957c29fSBart Van Assche 		dma_unmap_single(dev->dma_device, addr, size, direction);
4107cb9fbc5cSArthur Kepner }
4108cb9fbc5cSArthur Kepner 
41099b513090SRalph Campbell /**
41109b513090SRalph Campbell  * ib_dma_map_page - Map a physical page to DMA address
41119b513090SRalph Campbell  * @dev: The device for which the dma_addr is to be created
41129b513090SRalph Campbell  * @page: The page to be mapped
41139b513090SRalph Campbell  * @offset: The offset within the page
41149b513090SRalph Campbell  * @size: The size of the region in bytes
41159b513090SRalph Campbell  * @direction: The direction of the DMA
41169b513090SRalph Campbell  */
ib_dma_map_page(struct ib_device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)41179b513090SRalph Campbell static inline u64 ib_dma_map_page(struct ib_device *dev,
41189b513090SRalph Campbell 				  struct page *page,
41199b513090SRalph Campbell 				  unsigned long offset,
41209b513090SRalph Campbell 				  size_t size,
41219b513090SRalph Campbell 					 enum dma_data_direction direction)
41229b513090SRalph Campbell {
41235a7a9e03SChristoph Hellwig 	if (ib_uses_virt_dma(dev))
41245a7a9e03SChristoph Hellwig 		return (uintptr_t)(page_address(page) + offset);
41250957c29fSBart Van Assche 	return dma_map_page(dev->dma_device, page, offset, size, direction);
41269b513090SRalph Campbell }
41279b513090SRalph Campbell 
41289b513090SRalph Campbell /**
41299b513090SRalph Campbell  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
41309b513090SRalph Campbell  * @dev: The device for which the DMA address was created
41319b513090SRalph Campbell  * @addr: The DMA address
41329b513090SRalph Campbell  * @size: The size of the region in bytes
41339b513090SRalph Campbell  * @direction: The direction of the DMA
41349b513090SRalph Campbell  */
ib_dma_unmap_page(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)41359b513090SRalph Campbell static inline void ib_dma_unmap_page(struct ib_device *dev,
41369b513090SRalph Campbell 				     u64 addr, size_t size,
41379b513090SRalph Campbell 				     enum dma_data_direction direction)
41389b513090SRalph Campbell {
41395a7a9e03SChristoph Hellwig 	if (!ib_uses_virt_dma(dev))
41400957c29fSBart Van Assche 		dma_unmap_page(dev->dma_device, addr, size, direction);
41419b513090SRalph Campbell }
41429b513090SRalph Campbell 
41435a7a9e03SChristoph Hellwig int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
ib_dma_map_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)41445a7a9e03SChristoph Hellwig static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
41455a7a9e03SChristoph Hellwig 				      struct scatterlist *sg, int nents,
41465a7a9e03SChristoph Hellwig 				      enum dma_data_direction direction,
41475a7a9e03SChristoph Hellwig 				      unsigned long dma_attrs)
41485a7a9e03SChristoph Hellwig {
41495a7a9e03SChristoph Hellwig 	if (ib_uses_virt_dma(dev))
41505a7a9e03SChristoph Hellwig 		return ib_dma_virt_map_sg(dev, sg, nents);
41515a7a9e03SChristoph Hellwig 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
41525a7a9e03SChristoph Hellwig 				dma_attrs);
41535a7a9e03SChristoph Hellwig }
41545a7a9e03SChristoph Hellwig 
ib_dma_unmap_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)41555a7a9e03SChristoph Hellwig static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
41565a7a9e03SChristoph Hellwig 					 struct scatterlist *sg, int nents,
41575a7a9e03SChristoph Hellwig 					 enum dma_data_direction direction,
41585a7a9e03SChristoph Hellwig 					 unsigned long dma_attrs)
41595a7a9e03SChristoph Hellwig {
41605a7a9e03SChristoph Hellwig 	if (!ib_uses_virt_dma(dev))
41615a7a9e03SChristoph Hellwig 		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
41625a7a9e03SChristoph Hellwig 				   dma_attrs);
41635a7a9e03SChristoph Hellwig }
41645a7a9e03SChristoph Hellwig 
41659b513090SRalph Campbell /**
416679fbd3e1SMaor Gottlieb  * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
416779fbd3e1SMaor Gottlieb  * @dev: The device for which the DMA addresses are to be created
416879fbd3e1SMaor Gottlieb  * @sg: The sg_table object describing the buffer
416979fbd3e1SMaor Gottlieb  * @direction: The direction of the DMA
417079fbd3e1SMaor Gottlieb  * @attrs: Optional DMA attributes for the map operation
417179fbd3e1SMaor Gottlieb  */
ib_dma_map_sgtable_attrs(struct ib_device * dev,struct sg_table * sgt,enum dma_data_direction direction,unsigned long dma_attrs)417279fbd3e1SMaor Gottlieb static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
417379fbd3e1SMaor Gottlieb 					   struct sg_table *sgt,
417479fbd3e1SMaor Gottlieb 					   enum dma_data_direction direction,
417579fbd3e1SMaor Gottlieb 					   unsigned long dma_attrs)
417679fbd3e1SMaor Gottlieb {
4177ac0fffa0SLogan Gunthorpe 	int nents;
4178ac0fffa0SLogan Gunthorpe 
417979fbd3e1SMaor Gottlieb 	if (ib_uses_virt_dma(dev)) {
4180ac0fffa0SLogan Gunthorpe 		nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4181ac0fffa0SLogan Gunthorpe 		if (!nents)
4182ac0fffa0SLogan Gunthorpe 			return -EIO;
4183ac0fffa0SLogan Gunthorpe 		sgt->nents = nents;
418479fbd3e1SMaor Gottlieb 		return 0;
418579fbd3e1SMaor Gottlieb 	}
418679fbd3e1SMaor Gottlieb 	return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
418779fbd3e1SMaor Gottlieb }
418879fbd3e1SMaor Gottlieb 
ib_dma_unmap_sgtable_attrs(struct ib_device * dev,struct sg_table * sgt,enum dma_data_direction direction,unsigned long dma_attrs)418979fbd3e1SMaor Gottlieb static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
419079fbd3e1SMaor Gottlieb 					      struct sg_table *sgt,
419179fbd3e1SMaor Gottlieb 					      enum dma_data_direction direction,
419279fbd3e1SMaor Gottlieb 					      unsigned long dma_attrs)
419379fbd3e1SMaor Gottlieb {
419479fbd3e1SMaor Gottlieb 	if (!ib_uses_virt_dma(dev))
419579fbd3e1SMaor Gottlieb 		dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
419679fbd3e1SMaor Gottlieb }
419779fbd3e1SMaor Gottlieb 
419879fbd3e1SMaor Gottlieb /**
41999b513090SRalph Campbell  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
42009b513090SRalph Campbell  * @dev: The device for which the DMA addresses are to be created
42019b513090SRalph Campbell  * @sg: The array of scatter/gather entries
42029b513090SRalph Campbell  * @nents: The number of scatter/gather entries
42039b513090SRalph Campbell  * @direction: The direction of the DMA
42049b513090SRalph Campbell  */
ib_dma_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)42059b513090SRalph Campbell static inline int ib_dma_map_sg(struct ib_device *dev,
42069b513090SRalph Campbell 				struct scatterlist *sg, int nents,
42079b513090SRalph Campbell 				enum dma_data_direction direction)
42089b513090SRalph Campbell {
42095a7a9e03SChristoph Hellwig 	return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
42109b513090SRalph Campbell }
42119b513090SRalph Campbell 
42129b513090SRalph Campbell /**
42139b513090SRalph Campbell  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
42149b513090SRalph Campbell  * @dev: The device for which the DMA addresses were created
42159b513090SRalph Campbell  * @sg: The array of scatter/gather entries
42169b513090SRalph Campbell  * @nents: The number of scatter/gather entries
42179b513090SRalph Campbell  * @direction: The direction of the DMA
42189b513090SRalph Campbell  */
ib_dma_unmap_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)42199b513090SRalph Campbell static inline void ib_dma_unmap_sg(struct ib_device *dev,
42209b513090SRalph Campbell 				   struct scatterlist *sg, int nents,
42219b513090SRalph Campbell 				   enum dma_data_direction direction)
42229b513090SRalph Campbell {
42235a7a9e03SChristoph Hellwig 	ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4224cb9fbc5cSArthur Kepner }
42259b513090SRalph Campbell 
42269b513090SRalph Campbell /**
42270b5cb330SBart Van Assche  * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
42280b5cb330SBart Van Assche  * @dev: The device to query
42290b5cb330SBart Van Assche  *
42300b5cb330SBart Van Assche  * The returned value represents a size in bytes.
42310b5cb330SBart Van Assche  */
ib_dma_max_seg_size(struct ib_device * dev)42320b5cb330SBart Van Assche static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
42330b5cb330SBart Van Assche {
42345a7a9e03SChristoph Hellwig 	if (ib_uses_virt_dma(dev))
42355a7a9e03SChristoph Hellwig 		return UINT_MAX;
4236ecdfdfdbSBart Van Assche 	return dma_get_max_seg_size(dev->dma_device);
42370b5cb330SBart Van Assche }
42380b5cb330SBart Van Assche 
42390b5cb330SBart Van Assche /**
42409b513090SRalph Campbell  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
42419b513090SRalph Campbell  * @dev: The device for which the DMA address was created
42429b513090SRalph Campbell  * @addr: The DMA address
42439b513090SRalph Campbell  * @size: The size of the region in bytes
42449b513090SRalph Campbell  * @dir: The direction of the DMA
42459b513090SRalph Campbell  */
ib_dma_sync_single_for_cpu(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)42469b513090SRalph Campbell static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
42479b513090SRalph Campbell 					      u64 addr,
42489b513090SRalph Campbell 					      size_t size,
42499b513090SRalph Campbell 					      enum dma_data_direction dir)
42509b513090SRalph Campbell {
42515a7a9e03SChristoph Hellwig 	if (!ib_uses_virt_dma(dev))
42520957c29fSBart Van Assche 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
42539b513090SRalph Campbell }
42549b513090SRalph Campbell 
42559b513090SRalph Campbell /**
42569b513090SRalph Campbell  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
42579b513090SRalph Campbell  * @dev: The device for which the DMA address was created
42589b513090SRalph Campbell  * @addr: The DMA address
42599b513090SRalph Campbell  * @size: The size of the region in bytes
42609b513090SRalph Campbell  * @dir: The direction of the DMA
42619b513090SRalph Campbell  */
ib_dma_sync_single_for_device(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)42629b513090SRalph Campbell static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
42639b513090SRalph Campbell 						 u64 addr,
42649b513090SRalph Campbell 						 size_t size,
42659b513090SRalph Campbell 						 enum dma_data_direction dir)
42669b513090SRalph Campbell {
42675a7a9e03SChristoph Hellwig 	if (!ib_uses_virt_dma(dev))
42680957c29fSBart Van Assche 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
42699b513090SRalph Campbell }
42709b513090SRalph Campbell 
427133006bd4SMoni Shoua /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
427233006bd4SMoni Shoua  * space. This function should be called when 'current' is the owning MM.
427333006bd4SMoni Shoua  */
427433006bd4SMoni Shoua struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
427533006bd4SMoni Shoua 			     u64 virt_addr, int mr_access_flags);
427633006bd4SMoni Shoua 
427787d8069fSMoni Shoua /* ib_advise_mr -  give an advice about an address range in a memory region */
427887d8069fSMoni Shoua int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
427987d8069fSMoni Shoua 		 u32 flags, struct ib_sge *sg_list, u32 num_sge);
42809b513090SRalph Campbell /**
4281c4367a26SShamir Rabinovitch  * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4282c4367a26SShamir Rabinovitch  *   HCA translation table.
4283c4367a26SShamir Rabinovitch  * @mr: The memory region to deregister.
4284c4367a26SShamir Rabinovitch  * @udata: Valid user data or NULL for kernel object
4285c4367a26SShamir Rabinovitch  *
4286c4367a26SShamir Rabinovitch  * This function can fail, if the memory region has memory windows bound to it.
4287c4367a26SShamir Rabinovitch  */
4288c4367a26SShamir Rabinovitch int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4289c4367a26SShamir Rabinovitch 
4290c4367a26SShamir Rabinovitch /**
4291c4367a26SShamir Rabinovitch  * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4292a4d61e84SRoland Dreier  *   HCA translation table.
4293a4d61e84SRoland Dreier  * @mr: The memory region to deregister.
42947083e42eSShani Michaeli  *
42957083e42eSShani Michaeli  * This function can fail, if the memory region has memory windows bound to it.
4296c4367a26SShamir Rabinovitch  *
4297c4367a26SShamir Rabinovitch  * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4298a4d61e84SRoland Dreier  */
ib_dereg_mr(struct ib_mr * mr)4299c4367a26SShamir Rabinovitch static inline int ib_dereg_mr(struct ib_mr *mr)
4300c4367a26SShamir Rabinovitch {
4301c4367a26SShamir Rabinovitch 	return ib_dereg_mr_user(mr, NULL);
4302c4367a26SShamir Rabinovitch }
4303a4d61e84SRoland Dreier 
4304b64b74b1SGal Pressman struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4305b64b74b1SGal Pressman 			  u32 max_num_sg);
430600f7ec36SSteve Wise 
430726bc7eaeSIsrael Rukshin struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
430826bc7eaeSIsrael Rukshin 				    u32 max_num_data_sg,
430926bc7eaeSIsrael Rukshin 				    u32 max_num_meta_sg);
431026bc7eaeSIsrael Rukshin 
431100f7ec36SSteve Wise /**
431200f7ec36SSteve Wise  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
431300f7ec36SSteve Wise  *   R_Key and L_Key.
431400f7ec36SSteve Wise  * @mr - struct ib_mr pointer to be updated.
431500f7ec36SSteve Wise  * @newkey - new key to be used.
431600f7ec36SSteve Wise  */
ib_update_fast_reg_key(struct ib_mr * mr,u8 newkey)431700f7ec36SSteve Wise static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
431800f7ec36SSteve Wise {
431900f7ec36SSteve Wise 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
432000f7ec36SSteve Wise 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
432100f7ec36SSteve Wise }
432200f7ec36SSteve Wise 
432300f7ec36SSteve Wise /**
43247083e42eSShani Michaeli  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
43257083e42eSShani Michaeli  * for calculating a new rkey for type 2 memory windows.
43267083e42eSShani Michaeli  * @rkey - the rkey to increment.
43277083e42eSShani Michaeli  */
ib_inc_rkey(u32 rkey)43287083e42eSShani Michaeli static inline u32 ib_inc_rkey(u32 rkey)
43297083e42eSShani Michaeli {
43307083e42eSShani Michaeli 	const u32 mask = 0x000000ff;
43317083e42eSShani Michaeli 	return ((rkey + 1) & mask) | (rkey & ~mask);
43327083e42eSShani Michaeli }
43337083e42eSShani Michaeli 
43347083e42eSShani Michaeli /**
4335a4d61e84SRoland Dreier  * ib_attach_mcast - Attaches the specified QP to a multicast group.
4336a4d61e84SRoland Dreier  * @qp: QP to attach to the multicast group.  The QP must be type
4337a4d61e84SRoland Dreier  *   IB_QPT_UD.
4338a4d61e84SRoland Dreier  * @gid: Multicast group GID.
4339a4d61e84SRoland Dreier  * @lid: Multicast group LID in host byte order.
4340a4d61e84SRoland Dreier  *
4341a4d61e84SRoland Dreier  * In order to send and receive multicast packets, subnet
4342a4d61e84SRoland Dreier  * administration must have created the multicast group and configured
4343a4d61e84SRoland Dreier  * the fabric appropriately.  The port associated with the specified
4344a4d61e84SRoland Dreier  * QP must also be a member of the multicast group.
4345a4d61e84SRoland Dreier  */
4346a4d61e84SRoland Dreier int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4347a4d61e84SRoland Dreier 
4348a4d61e84SRoland Dreier /**
4349a4d61e84SRoland Dreier  * ib_detach_mcast - Detaches the specified QP from a multicast group.
4350a4d61e84SRoland Dreier  * @qp: QP to detach from the multicast group.
4351a4d61e84SRoland Dreier  * @gid: Multicast group GID.
4352a4d61e84SRoland Dreier  * @lid: Multicast group LID in host byte order.
4353a4d61e84SRoland Dreier  */
4354a4d61e84SRoland Dreier int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4355a4d61e84SRoland Dreier 
4356b73efcb2SMaor Gottlieb struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4357b73efcb2SMaor Gottlieb 				   struct inode *inode, struct ib_udata *udata);
4358b73efcb2SMaor Gottlieb int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
435959991f94SSean Hefty 
ib_check_mr_access(struct ib_device * ib_dev,unsigned int flags)4360adac4cb3SJason Gunthorpe static inline int ib_check_mr_access(struct ib_device *ib_dev,
4361adac4cb3SJason Gunthorpe 				     unsigned int flags)
43621c636f80SEli Cohen {
4363208e3a13SLi Zhijian 	u64 device_cap = ib_dev->attrs.device_cap_flags;
4364208e3a13SLi Zhijian 
43651c636f80SEli Cohen 	/*
43661c636f80SEli Cohen 	 * Local write permission is required if remote write or
43671c636f80SEli Cohen 	 * remote atomic permission is also requested.
43681c636f80SEli Cohen 	 */
43691c636f80SEli Cohen 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
43701c636f80SEli Cohen 	    !(flags & IB_ACCESS_LOCAL_WRITE))
43711c636f80SEli Cohen 		return -EINVAL;
43721c636f80SEli Cohen 
4373ca95c141SMichael Guralnik 	if (flags & ~IB_ACCESS_SUPPORTED)
4374ca95c141SMichael Guralnik 		return -EINVAL;
4375ca95c141SMichael Guralnik 
4376adac4cb3SJason Gunthorpe 	if (flags & IB_ACCESS_ON_DEMAND &&
4377e945c653SJason Gunthorpe 	    !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
437853c2d5b1SLi Zhijian 		return -EOPNOTSUPP;
4379208e3a13SLi Zhijian 
4380208e3a13SLi Zhijian 	if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
4381208e3a13SLi Zhijian 	    !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
4382208e3a13SLi Zhijian 	    (flags & IB_ACCESS_FLUSH_PERSISTENT &&
4383208e3a13SLi Zhijian 	    !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
4384208e3a13SLi Zhijian 		return -EOPNOTSUPP;
4385208e3a13SLi Zhijian 
43861c636f80SEli Cohen 	return 0;
43871c636f80SEli Cohen }
43881c636f80SEli Cohen 
ib_access_writable(int access_flags)438908bb558aSJack Morgenstein static inline bool ib_access_writable(int access_flags)
439008bb558aSJack Morgenstein {
439108bb558aSJack Morgenstein 	/*
439208bb558aSJack Morgenstein 	 * We have writable memory backing the MR if any of the following
439308bb558aSJack Morgenstein 	 * access flags are set.  "Local write" and "remote write" obviously
439408bb558aSJack Morgenstein 	 * require write access.  "Remote atomic" can do things like fetch and
439508bb558aSJack Morgenstein 	 * add, which will modify memory, and "MW bind" can change permissions
439608bb558aSJack Morgenstein 	 * by binding a window.
439708bb558aSJack Morgenstein 	 */
439808bb558aSJack Morgenstein 	return access_flags &
439908bb558aSJack Morgenstein 		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
440008bb558aSJack Morgenstein 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
440108bb558aSJack Morgenstein }
440208bb558aSJack Morgenstein 
44031b01d335SSagi Grimberg /**
44041b01d335SSagi Grimberg  * ib_check_mr_status: lightweight check of MR status.
44051b01d335SSagi Grimberg  *     This routine may provide status checks on a selected
44061b01d335SSagi Grimberg  *     ib_mr. first use is for signature status check.
44071b01d335SSagi Grimberg  *
44081b01d335SSagi Grimberg  * @mr: A memory region.
44091b01d335SSagi Grimberg  * @check_mask: Bitmask of which checks to perform from
44101b01d335SSagi Grimberg  *     ib_mr_status_check enumeration.
44111b01d335SSagi Grimberg  * @mr_status: The container of relevant status checks.
44121b01d335SSagi Grimberg  *     failed checks will be indicated in the status bitmask
44131b01d335SSagi Grimberg  *     and the relevant info shall be in the error item.
44141b01d335SSagi Grimberg  */
44151b01d335SSagi Grimberg int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
44161b01d335SSagi Grimberg 		       struct ib_mr_status *mr_status);
44171b01d335SSagi Grimberg 
4418d79af724SJason Gunthorpe /**
4419d79af724SJason Gunthorpe  * ib_device_try_get: Hold a registration lock
4420d79af724SJason Gunthorpe  * device: The device to lock
4421d79af724SJason Gunthorpe  *
4422d79af724SJason Gunthorpe  * A device under an active registration lock cannot become unregistered. It
4423d79af724SJason Gunthorpe  * is only possible to obtain a registration lock on a device that is fully
4424d79af724SJason Gunthorpe  * registered, otherwise this function returns false.
4425d79af724SJason Gunthorpe  *
4426d79af724SJason Gunthorpe  * The registration lock is only necessary for actions which require the
4427d79af724SJason Gunthorpe  * device to still be registered. Uses that only require the device pointer to
4428d79af724SJason Gunthorpe  * be valid should use get_device(&ibdev->dev) to hold the memory.
4429d79af724SJason Gunthorpe  *
4430d79af724SJason Gunthorpe  */
ib_device_try_get(struct ib_device * dev)4431d79af724SJason Gunthorpe static inline bool ib_device_try_get(struct ib_device *dev)
4432d79af724SJason Gunthorpe {
4433d79af724SJason Gunthorpe 	return refcount_inc_not_zero(&dev->refcount);
4434d79af724SJason Gunthorpe }
4435d79af724SJason Gunthorpe 
4436d79af724SJason Gunthorpe void ib_device_put(struct ib_device *device);
4437324e227eSJason Gunthorpe struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4438324e227eSJason Gunthorpe 					  enum rdma_driver_id driver_id);
4439324e227eSJason Gunthorpe struct ib_device *ib_device_get_by_name(const char *name,
4440324e227eSJason Gunthorpe 					enum rdma_driver_id driver_id);
44411fb7f897SMark Bloch struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
44429268f72dSYotam Kenneth 					    u16 pkey, const union ib_gid *gid,
44439268f72dSYotam Kenneth 					    const struct sockaddr *addr);
4444c2261dd7SJason Gunthorpe int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4445c2261dd7SJason Gunthorpe 			 unsigned int port);
44465fd251c8SYishai Hadas struct ib_wq *ib_create_wq(struct ib_pd *pd,
44475fd251c8SYishai Hadas 			   struct ib_wq_init_attr *init_attr);
4448add53535SLeon Romanovsky int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
44499268f72dSYotam Kenneth 
4450ff2ba993SChristoph Hellwig int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
44519aa8b321SBart Van Assche 		 unsigned int *sg_offset, unsigned int page_size);
44522cdfcdd8SMax Gurtovoy int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
44532cdfcdd8SMax Gurtovoy 		    int data_sg_nents, unsigned int *data_sg_offset,
44542cdfcdd8SMax Gurtovoy 		    struct scatterlist *meta_sg, int meta_sg_nents,
44552cdfcdd8SMax Gurtovoy 		    unsigned int *meta_sg_offset, unsigned int page_size);
44564c67e2bfSSagi Grimberg 
44574c67e2bfSSagi Grimberg static inline int
ib_map_mr_sg_zbva(struct ib_mr * mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset,unsigned int page_size)4458ff2ba993SChristoph Hellwig ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
44599aa8b321SBart Van Assche 		  unsigned int *sg_offset, unsigned int page_size)
44604c67e2bfSSagi Grimberg {
44614c67e2bfSSagi Grimberg 	int n;
44624c67e2bfSSagi Grimberg 
4463ff2ba993SChristoph Hellwig 	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
44644c67e2bfSSagi Grimberg 	mr->iova = 0;
44654c67e2bfSSagi Grimberg 
44664c67e2bfSSagi Grimberg 	return n;
44674c67e2bfSSagi Grimberg }
44684c67e2bfSSagi Grimberg 
4469ff2ba993SChristoph Hellwig int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
44709aa8b321SBart Van Assche 		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
44714c67e2bfSSagi Grimberg 
4472765d6774SSteve Wise void ib_drain_rq(struct ib_qp *qp);
4473765d6774SSteve Wise void ib_drain_sq(struct ib_qp *qp);
4474765d6774SSteve Wise void ib_drain_qp(struct ib_qp *qp);
4475850d8fd7SMoni Shoua 
44761fb7f897SMark Bloch int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
44771fb7f897SMark Bloch 		     u8 *width);
44782224c47aSDasaratharaman Chandramouli 
rdma_ah_retrieve_dmac(struct rdma_ah_attr * attr)44792224c47aSDasaratharaman Chandramouli static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
44802224c47aSDasaratharaman Chandramouli {
448144c58487SDasaratharaman Chandramouli 	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
448244c58487SDasaratharaman Chandramouli 		return attr->roce.dmac;
448344c58487SDasaratharaman Chandramouli 	return NULL;
44842224c47aSDasaratharaman Chandramouli }
44852224c47aSDasaratharaman Chandramouli 
rdma_ah_set_dlid(struct rdma_ah_attr * attr,u32 dlid)448664b4646eSDasaratharaman Chandramouli static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
44872224c47aSDasaratharaman Chandramouli {
448844c58487SDasaratharaman Chandramouli 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
448964b4646eSDasaratharaman Chandramouli 		attr->ib.dlid = (u16)dlid;
449064b4646eSDasaratharaman Chandramouli 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
449164b4646eSDasaratharaman Chandramouli 		attr->opa.dlid = dlid;
44922224c47aSDasaratharaman Chandramouli }
44932224c47aSDasaratharaman Chandramouli 
rdma_ah_get_dlid(const struct rdma_ah_attr * attr)449464b4646eSDasaratharaman Chandramouli static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
44952224c47aSDasaratharaman Chandramouli {
449644c58487SDasaratharaman Chandramouli 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
449744c58487SDasaratharaman Chandramouli 		return attr->ib.dlid;
449864b4646eSDasaratharaman Chandramouli 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
449964b4646eSDasaratharaman Chandramouli 		return attr->opa.dlid;
450044c58487SDasaratharaman Chandramouli 	return 0;
45012224c47aSDasaratharaman Chandramouli }
45022224c47aSDasaratharaman Chandramouli 
rdma_ah_set_sl(struct rdma_ah_attr * attr,u8 sl)45032224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
45042224c47aSDasaratharaman Chandramouli {
45052224c47aSDasaratharaman Chandramouli 	attr->sl = sl;
45062224c47aSDasaratharaman Chandramouli }
45072224c47aSDasaratharaman Chandramouli 
rdma_ah_get_sl(const struct rdma_ah_attr * attr)45082224c47aSDasaratharaman Chandramouli static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
45092224c47aSDasaratharaman Chandramouli {
45102224c47aSDasaratharaman Chandramouli 	return attr->sl;
45112224c47aSDasaratharaman Chandramouli }
45122224c47aSDasaratharaman Chandramouli 
rdma_ah_set_path_bits(struct rdma_ah_attr * attr,u8 src_path_bits)45132224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
45142224c47aSDasaratharaman Chandramouli 					 u8 src_path_bits)
45152224c47aSDasaratharaman Chandramouli {
451644c58487SDasaratharaman Chandramouli 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
451744c58487SDasaratharaman Chandramouli 		attr->ib.src_path_bits = src_path_bits;
451864b4646eSDasaratharaman Chandramouli 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
451964b4646eSDasaratharaman Chandramouli 		attr->opa.src_path_bits = src_path_bits;
45202224c47aSDasaratharaman Chandramouli }
45212224c47aSDasaratharaman Chandramouli 
rdma_ah_get_path_bits(const struct rdma_ah_attr * attr)45222224c47aSDasaratharaman Chandramouli static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
45232224c47aSDasaratharaman Chandramouli {
452444c58487SDasaratharaman Chandramouli 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
452544c58487SDasaratharaman Chandramouli 		return attr->ib.src_path_bits;
452664b4646eSDasaratharaman Chandramouli 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
452764b4646eSDasaratharaman Chandramouli 		return attr->opa.src_path_bits;
452844c58487SDasaratharaman Chandramouli 	return 0;
45292224c47aSDasaratharaman Chandramouli }
45302224c47aSDasaratharaman Chandramouli 
rdma_ah_set_make_grd(struct rdma_ah_attr * attr,bool make_grd)4531d98bb7f7SDon Hiatt static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4532d98bb7f7SDon Hiatt 					bool make_grd)
4533d98bb7f7SDon Hiatt {
4534d98bb7f7SDon Hiatt 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4535d98bb7f7SDon Hiatt 		attr->opa.make_grd = make_grd;
4536d98bb7f7SDon Hiatt }
4537d98bb7f7SDon Hiatt 
rdma_ah_get_make_grd(const struct rdma_ah_attr * attr)4538d98bb7f7SDon Hiatt static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4539d98bb7f7SDon Hiatt {
4540d98bb7f7SDon Hiatt 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4541d98bb7f7SDon Hiatt 		return attr->opa.make_grd;
4542d98bb7f7SDon Hiatt 	return false;
4543d98bb7f7SDon Hiatt }
4544d98bb7f7SDon Hiatt 
rdma_ah_set_port_num(struct rdma_ah_attr * attr,u32 port_num)45451fb7f897SMark Bloch static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
45462224c47aSDasaratharaman Chandramouli {
45472224c47aSDasaratharaman Chandramouli 	attr->port_num = port_num;
45482224c47aSDasaratharaman Chandramouli }
45492224c47aSDasaratharaman Chandramouli 
rdma_ah_get_port_num(const struct rdma_ah_attr * attr)45501fb7f897SMark Bloch static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
45512224c47aSDasaratharaman Chandramouli {
45522224c47aSDasaratharaman Chandramouli 	return attr->port_num;
45532224c47aSDasaratharaman Chandramouli }
45542224c47aSDasaratharaman Chandramouli 
rdma_ah_set_static_rate(struct rdma_ah_attr * attr,u8 static_rate)45552224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
45562224c47aSDasaratharaman Chandramouli 					   u8 static_rate)
45572224c47aSDasaratharaman Chandramouli {
45582224c47aSDasaratharaman Chandramouli 	attr->static_rate = static_rate;
45592224c47aSDasaratharaman Chandramouli }
45602224c47aSDasaratharaman Chandramouli 
rdma_ah_get_static_rate(const struct rdma_ah_attr * attr)45612224c47aSDasaratharaman Chandramouli static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
45622224c47aSDasaratharaman Chandramouli {
45632224c47aSDasaratharaman Chandramouli 	return attr->static_rate;
45642224c47aSDasaratharaman Chandramouli }
45652224c47aSDasaratharaman Chandramouli 
rdma_ah_set_ah_flags(struct rdma_ah_attr * attr,enum ib_ah_flags flag)45662224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
45672224c47aSDasaratharaman Chandramouli 					enum ib_ah_flags flag)
45682224c47aSDasaratharaman Chandramouli {
45692224c47aSDasaratharaman Chandramouli 	attr->ah_flags = flag;
45702224c47aSDasaratharaman Chandramouli }
45712224c47aSDasaratharaman Chandramouli 
45722224c47aSDasaratharaman Chandramouli static inline enum ib_ah_flags
rdma_ah_get_ah_flags(const struct rdma_ah_attr * attr)45732224c47aSDasaratharaman Chandramouli 		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
45742224c47aSDasaratharaman Chandramouli {
45752224c47aSDasaratharaman Chandramouli 	return attr->ah_flags;
45762224c47aSDasaratharaman Chandramouli }
45772224c47aSDasaratharaman Chandramouli 
45782224c47aSDasaratharaman Chandramouli static inline const struct ib_global_route
rdma_ah_read_grh(const struct rdma_ah_attr * attr)45792224c47aSDasaratharaman Chandramouli 		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
45802224c47aSDasaratharaman Chandramouli {
45812224c47aSDasaratharaman Chandramouli 	return &attr->grh;
45822224c47aSDasaratharaman Chandramouli }
45832224c47aSDasaratharaman Chandramouli 
45842224c47aSDasaratharaman Chandramouli /*To retrieve and modify the grh */
45852224c47aSDasaratharaman Chandramouli static inline struct ib_global_route
rdma_ah_retrieve_grh(struct rdma_ah_attr * attr)45862224c47aSDasaratharaman Chandramouli 		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
45872224c47aSDasaratharaman Chandramouli {
45882224c47aSDasaratharaman Chandramouli 	return &attr->grh;
45892224c47aSDasaratharaman Chandramouli }
45902224c47aSDasaratharaman Chandramouli 
rdma_ah_set_dgid_raw(struct rdma_ah_attr * attr,void * dgid)45912224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
45922224c47aSDasaratharaman Chandramouli {
45932224c47aSDasaratharaman Chandramouli 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
45942224c47aSDasaratharaman Chandramouli 
45952224c47aSDasaratharaman Chandramouli 	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
45962224c47aSDasaratharaman Chandramouli }
45972224c47aSDasaratharaman Chandramouli 
rdma_ah_set_subnet_prefix(struct rdma_ah_attr * attr,__be64 prefix)45982224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
45992224c47aSDasaratharaman Chandramouli 					     __be64 prefix)
46002224c47aSDasaratharaman Chandramouli {
46012224c47aSDasaratharaman Chandramouli 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
46022224c47aSDasaratharaman Chandramouli 
46032224c47aSDasaratharaman Chandramouli 	grh->dgid.global.subnet_prefix = prefix;
46042224c47aSDasaratharaman Chandramouli }
46052224c47aSDasaratharaman Chandramouli 
rdma_ah_set_interface_id(struct rdma_ah_attr * attr,__be64 if_id)46062224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
46072224c47aSDasaratharaman Chandramouli 					    __be64 if_id)
46082224c47aSDasaratharaman Chandramouli {
46092224c47aSDasaratharaman Chandramouli 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
46102224c47aSDasaratharaman Chandramouli 
46112224c47aSDasaratharaman Chandramouli 	grh->dgid.global.interface_id = if_id;
46122224c47aSDasaratharaman Chandramouli }
46132224c47aSDasaratharaman Chandramouli 
rdma_ah_set_grh(struct rdma_ah_attr * attr,union ib_gid * dgid,u32 flow_label,u8 sgid_index,u8 hop_limit,u8 traffic_class)46142224c47aSDasaratharaman Chandramouli static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
46152224c47aSDasaratharaman Chandramouli 				   union ib_gid *dgid, u32 flow_label,
46162224c47aSDasaratharaman Chandramouli 				   u8 sgid_index, u8 hop_limit,
46172224c47aSDasaratharaman Chandramouli 				   u8 traffic_class)
46182224c47aSDasaratharaman Chandramouli {
46192224c47aSDasaratharaman Chandramouli 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
46202224c47aSDasaratharaman Chandramouli 
46212224c47aSDasaratharaman Chandramouli 	attr->ah_flags = IB_AH_GRH;
46222224c47aSDasaratharaman Chandramouli 	if (dgid)
46232224c47aSDasaratharaman Chandramouli 		grh->dgid = *dgid;
46242224c47aSDasaratharaman Chandramouli 	grh->flow_label = flow_label;
46252224c47aSDasaratharaman Chandramouli 	grh->sgid_index = sgid_index;
46262224c47aSDasaratharaman Chandramouli 	grh->hop_limit = hop_limit;
46272224c47aSDasaratharaman Chandramouli 	grh->traffic_class = traffic_class;
46288d9ec9adSJason Gunthorpe 	grh->sgid_attr = NULL;
46292224c47aSDasaratharaman Chandramouli }
463044c58487SDasaratharaman Chandramouli 
46318d9ec9adSJason Gunthorpe void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
46328d9ec9adSJason Gunthorpe void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
46338d9ec9adSJason Gunthorpe 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
46348d9ec9adSJason Gunthorpe 			     const struct ib_gid_attr *sgid_attr);
4635d97099feSJason Gunthorpe void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4636d97099feSJason Gunthorpe 		       const struct rdma_ah_attr *src);
4637d97099feSJason Gunthorpe void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4638d97099feSJason Gunthorpe 			  const struct rdma_ah_attr *new);
4639d97099feSJason Gunthorpe void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
46408d9ec9adSJason Gunthorpe 
464187daac68SDon Hiatt /**
464287daac68SDon Hiatt  * rdma_ah_find_type - Return address handle type.
464387daac68SDon Hiatt  *
464487daac68SDon Hiatt  * @dev: Device to be checked
464587daac68SDon Hiatt  * @port_num: Port number
464687daac68SDon Hiatt  */
rdma_ah_find_type(struct ib_device * dev,u32 port_num)464744c58487SDasaratharaman Chandramouli static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
46481fb7f897SMark Bloch 						       u32 port_num)
464944c58487SDasaratharaman Chandramouli {
4650a6532e71SParav Pandit 	if (rdma_protocol_roce(dev, port_num))
465144c58487SDasaratharaman Chandramouli 		return RDMA_AH_ATTR_TYPE_ROCE;
465287daac68SDon Hiatt 	if (rdma_protocol_ib(dev, port_num)) {
465387daac68SDon Hiatt 		if (rdma_cap_opa_ah(dev, port_num))
465464b4646eSDasaratharaman Chandramouli 			return RDMA_AH_ATTR_TYPE_OPA;
465544c58487SDasaratharaman Chandramouli 		return RDMA_AH_ATTR_TYPE_IB;
465644c58487SDasaratharaman Chandramouli 	}
46577db20ecdSHiatt, Don 
465887daac68SDon Hiatt 	return RDMA_AH_ATTR_TYPE_UNDEFINED;
465987daac68SDon Hiatt }
466087daac68SDon Hiatt 
466162ede777SHiatt, Don /**
466262ede777SHiatt, Don  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
46638937e28eSXin Gao  *     In the current implementation the only way to
466462ede777SHiatt, Don  *     get the 32bit lid is from other sources for OPA.
466562ede777SHiatt, Don  *     For IB, lids will always be 16bits so cast the
466662ede777SHiatt, Don  *     value accordingly.
466762ede777SHiatt, Don  *
466862ede777SHiatt, Don  * @lid: A 32bit LID
466962ede777SHiatt, Don  */
ib_lid_cpu16(u32 lid)467062ede777SHiatt, Don static inline u16 ib_lid_cpu16(u32 lid)
46717db20ecdSHiatt, Don {
467262ede777SHiatt, Don 	WARN_ON_ONCE(lid & 0xFFFF0000);
467362ede777SHiatt, Don 	return (u16)lid;
46747db20ecdSHiatt, Don }
46757db20ecdSHiatt, Don 
467662ede777SHiatt, Don /**
467762ede777SHiatt, Don  * ib_lid_be16 - Return lid in 16bit BE encoding.
467862ede777SHiatt, Don  *
467962ede777SHiatt, Don  * @lid: A 32bit LID
468062ede777SHiatt, Don  */
ib_lid_be16(u32 lid)468162ede777SHiatt, Don static inline __be16 ib_lid_be16(u32 lid)
46827db20ecdSHiatt, Don {
468362ede777SHiatt, Don 	WARN_ON_ONCE(lid & 0xFFFF0000);
468462ede777SHiatt, Don 	return cpu_to_be16((u16)lid);
46857db20ecdSHiatt, Don }
468632043830SDoug Ledford 
4687c66cd353SSagi Grimberg /**
4688c66cd353SSagi Grimberg  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4689c66cd353SSagi Grimberg  *   vector
4690c66cd353SSagi Grimberg  * @device:         the rdma device
4691c66cd353SSagi Grimberg  * @comp_vector:    index of completion vector
4692c66cd353SSagi Grimberg  *
4693c66cd353SSagi Grimberg  * Returns NULL on failure, otherwise a corresponding cpu map of the
4694c66cd353SSagi Grimberg  * completion vector (returns all-cpus map if the device driver doesn't
4695c66cd353SSagi Grimberg  * implement get_vector_affinity).
4696c66cd353SSagi Grimberg  */
4697c66cd353SSagi Grimberg static inline const struct cpumask *
ib_get_vector_affinity(struct ib_device * device,int comp_vector)4698c66cd353SSagi Grimberg ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4699c66cd353SSagi Grimberg {
4700c66cd353SSagi Grimberg 	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
47013023a1e9SKamal Heib 	    !device->ops.get_vector_affinity)
4702c66cd353SSagi Grimberg 		return NULL;
4703c66cd353SSagi Grimberg 
47043023a1e9SKamal Heib 	return device->ops.get_vector_affinity(device, comp_vector);
4705c66cd353SSagi Grimberg 
4706c66cd353SSagi Grimberg }
4707c66cd353SSagi Grimberg 
470832f69e4bSDaniel Jurgens /**
470932f69e4bSDaniel Jurgens  * rdma_roce_rescan_device - Rescan all of the network devices in the system
471032f69e4bSDaniel Jurgens  * and add their gids, as needed, to the relevant RoCE devices.
471132f69e4bSDaniel Jurgens  *
471232f69e4bSDaniel Jurgens  * @device:         the rdma device
471332f69e4bSDaniel Jurgens  */
471432f69e4bSDaniel Jurgens void rdma_roce_rescan_device(struct ib_device *ibdev);
471532f69e4bSDaniel Jurgens 
47168313c10fSJason Gunthorpe struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
47177dc08dcfSYishai Hadas 
471815a1b4beSJason Gunthorpe int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4719f6a8a19bSDenis Drozdov 
47201fb7f897SMark Bloch struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4721f6a8a19bSDenis Drozdov 				     enum rdma_netdev_t type, const char *name,
4722f6a8a19bSDenis Drozdov 				     unsigned char name_assign_type,
4723f6a8a19bSDenis Drozdov 				     void (*setup)(struct net_device *));
47245d6b0cb3SDenis Drozdov 
47251fb7f897SMark Bloch int rdma_init_netdev(struct ib_device *device, u32 port_num,
47265d6b0cb3SDenis Drozdov 		     enum rdma_netdev_t type, const char *name,
47275d6b0cb3SDenis Drozdov 		     unsigned char name_assign_type,
47285d6b0cb3SDenis Drozdov 		     void (*setup)(struct net_device *),
47295d6b0cb3SDenis Drozdov 		     struct net_device *netdev);
47305d6b0cb3SDenis Drozdov 
4731d4122f5aSParav Pandit /**
473254747231SParav Pandit  * rdma_device_to_ibdev - Get ib_device pointer from device pointer
473354747231SParav Pandit  *
473454747231SParav Pandit  * @device:	device pointer for which ib_device pointer to retrieve
473554747231SParav Pandit  *
473654747231SParav Pandit  * rdma_device_to_ibdev() retrieves ib_device pointer from device.
473754747231SParav Pandit  *
473854747231SParav Pandit  */
rdma_device_to_ibdev(struct device * device)473954747231SParav Pandit static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
474054747231SParav Pandit {
4741cebe556bSParav Pandit 	struct ib_core_device *coredev =
4742cebe556bSParav Pandit 		container_of(device, struct ib_core_device, dev);
4743cebe556bSParav Pandit 
4744cebe556bSParav Pandit 	return coredev->owner;
474554747231SParav Pandit }
474654747231SParav Pandit 
474754747231SParav Pandit /**
47488ecfca68SChristoph Hellwig  * ibdev_to_node - return the NUMA node for a given ib_device
47498ecfca68SChristoph Hellwig  * @dev:	device to get the NUMA node for.
47508ecfca68SChristoph Hellwig  */
ibdev_to_node(struct ib_device * ibdev)47518ecfca68SChristoph Hellwig static inline int ibdev_to_node(struct ib_device *ibdev)
47528ecfca68SChristoph Hellwig {
47538ecfca68SChristoph Hellwig 	struct device *parent = ibdev->dev.parent;
47548ecfca68SChristoph Hellwig 
47558ecfca68SChristoph Hellwig 	if (!parent)
47568ecfca68SChristoph Hellwig 		return NUMA_NO_NODE;
47578ecfca68SChristoph Hellwig 	return dev_to_node(parent);
47588ecfca68SChristoph Hellwig }
47598ecfca68SChristoph Hellwig 
47608ecfca68SChristoph Hellwig /**
476154747231SParav Pandit  * rdma_device_to_drv_device - Helper macro to reach back to driver's
476254747231SParav Pandit  *			       ib_device holder structure from device pointer.
476354747231SParav Pandit  *
476454747231SParav Pandit  * NOTE: New drivers should not make use of this API; This API is only for
476554747231SParav Pandit  * existing drivers who have exposed sysfs entries using
4766915e4af5SJason Gunthorpe  * ops->device_group.
476754747231SParav Pandit  */
476854747231SParav Pandit #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
476954747231SParav Pandit 	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
477041c61401SParav Pandit 
477141c61401SParav Pandit bool rdma_dev_access_netns(const struct ib_device *device,
477241c61401SParav Pandit 			   const struct net *net);
4773d5665a21SMark Zhang 
4774d5665a21SMark Zhang #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4775074bf2c2SWeihang Li #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4776d5665a21SMark Zhang #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4777d5665a21SMark Zhang 
4778d5665a21SMark Zhang /**
4779d5665a21SMark Zhang  * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4780d5665a21SMark Zhang  *                               on the flow_label
4781d5665a21SMark Zhang  *
4782d5665a21SMark Zhang  * This function will convert the 20 bit flow_label input to a valid RoCE v2
4783d5665a21SMark Zhang  * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4784d5665a21SMark Zhang  * convention.
4785d5665a21SMark Zhang  */
rdma_flow_label_to_udp_sport(u32 fl)4786d5665a21SMark Zhang static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4787d5665a21SMark Zhang {
4788d5665a21SMark Zhang 	u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4789d5665a21SMark Zhang 
4790d5665a21SMark Zhang 	fl_low ^= fl_high >> 14;
4791d5665a21SMark Zhang 	return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4792d5665a21SMark Zhang }
4793d5665a21SMark Zhang 
4794d5665a21SMark Zhang /**
4795d5665a21SMark Zhang  * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4796d5665a21SMark Zhang  *                        local and remote qpn values
4797d5665a21SMark Zhang  *
4798d5665a21SMark Zhang  * This function folded the multiplication results of two qpns, 24 bit each,
4799d5665a21SMark Zhang  * fields, and converts it to a 20 bit results.
4800d5665a21SMark Zhang  *
4801d5665a21SMark Zhang  * This function will create symmetric flow_label value based on the local
4802d5665a21SMark Zhang  * and remote qpn values. this will allow both the requester and responder
4803d5665a21SMark Zhang  * to calculate the same flow_label for a given connection.
4804d5665a21SMark Zhang  *
4805d5665a21SMark Zhang  * This helper function should be used by driver in case the upper layer
4806d5665a21SMark Zhang  * provide a zero flow_label value. This is to improve entropy of RDMA
4807d5665a21SMark Zhang  * traffic in the network.
4808d5665a21SMark Zhang  */
rdma_calc_flow_label(u32 lqpn,u32 rqpn)4809d5665a21SMark Zhang static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4810d5665a21SMark Zhang {
4811d5665a21SMark Zhang 	u64 v = (u64)lqpn * rqpn;
4812d5665a21SMark Zhang 
4813d5665a21SMark Zhang 	v ^= v >> 20;
4814d5665a21SMark Zhang 	v ^= v >> 40;
4815d5665a21SMark Zhang 
4816d5665a21SMark Zhang 	return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4817d5665a21SMark Zhang }
48187416790eSParav Pandit 
481918451db8SZhu Yanjun /**
482018451db8SZhu Yanjun  * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
482118451db8SZhu Yanjun  *                      label. If flow label is not defined in GRH then
482218451db8SZhu Yanjun  *                      calculate it based on lqpn/rqpn.
482318451db8SZhu Yanjun  *
482418451db8SZhu Yanjun  * @fl:                 flow label from GRH
482518451db8SZhu Yanjun  * @lqpn:               local qp number
482618451db8SZhu Yanjun  * @rqpn:               remote qp number
482718451db8SZhu Yanjun  */
rdma_get_udp_sport(u32 fl,u32 lqpn,u32 rqpn)482818451db8SZhu Yanjun static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
482918451db8SZhu Yanjun {
483018451db8SZhu Yanjun 	if (!fl)
483118451db8SZhu Yanjun 		fl = rdma_calc_flow_label(lqpn, rqpn);
483218451db8SZhu Yanjun 
483318451db8SZhu Yanjun 	return rdma_flow_label_to_udp_sport(fl);
483418451db8SZhu Yanjun }
483518451db8SZhu Yanjun 
48367416790eSParav Pandit const struct ib_port_immutable*
48377416790eSParav Pandit ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4838a4d61e84SRoland Dreier #endif /* IB_VERBS_H */
4839