xref: /openbmc/linux/include/rdma/ib_verbs.h (revision 33023fb85a42b53bf778bc025f9667b582282be4)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/socket.h>
52 #include <linux/irq_poll.h>
53 #include <uapi/linux/if_ether.h>
54 #include <net/ipv6.h>
55 #include <net/ip.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58 #include <linux/netdevice.h>
59 
60 #include <linux/if_link.h>
61 #include <linux/atomic.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/uaccess.h>
64 #include <linux/cgroup_rdma.h>
65 #include <uapi/rdma/ib_user_verbs.h>
66 #include <rdma/restrack.h>
67 #include <uapi/rdma/rdma_user_ioctl.h>
68 #include <uapi/rdma/ib_user_ioctl_verbs.h>
69 
70 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
71 
72 extern struct workqueue_struct *ib_wq;
73 extern struct workqueue_struct *ib_comp_wq;
74 
75 union ib_gid {
76 	u8	raw[16];
77 	struct {
78 		__be64	subnet_prefix;
79 		__be64	interface_id;
80 	} global;
81 };
82 
83 extern union ib_gid zgid;
84 
85 enum ib_gid_type {
86 	/* If link layer is Ethernet, this is RoCE V1 */
87 	IB_GID_TYPE_IB        = 0,
88 	IB_GID_TYPE_ROCE      = 0,
89 	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
90 	IB_GID_TYPE_SIZE
91 };
92 
93 #define ROCE_V2_UDP_DPORT      4791
94 struct ib_gid_attr {
95 	struct net_device	*ndev;
96 	struct ib_device	*device;
97 	union ib_gid		gid;
98 	enum ib_gid_type	gid_type;
99 	u16			index;
100 	u8			port_num;
101 };
102 
103 enum rdma_node_type {
104 	/* IB values map to NodeInfo:NodeType. */
105 	RDMA_NODE_IB_CA 	= 1,
106 	RDMA_NODE_IB_SWITCH,
107 	RDMA_NODE_IB_ROUTER,
108 	RDMA_NODE_RNIC,
109 	RDMA_NODE_USNIC,
110 	RDMA_NODE_USNIC_UDP,
111 };
112 
113 enum {
114 	/* set the local administered indication */
115 	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
116 };
117 
118 enum rdma_transport_type {
119 	RDMA_TRANSPORT_IB,
120 	RDMA_TRANSPORT_IWARP,
121 	RDMA_TRANSPORT_USNIC,
122 	RDMA_TRANSPORT_USNIC_UDP
123 };
124 
125 enum rdma_protocol_type {
126 	RDMA_PROTOCOL_IB,
127 	RDMA_PROTOCOL_IBOE,
128 	RDMA_PROTOCOL_IWARP,
129 	RDMA_PROTOCOL_USNIC_UDP
130 };
131 
132 __attribute_const__ enum rdma_transport_type
133 rdma_node_get_transport(enum rdma_node_type node_type);
134 
135 enum rdma_network_type {
136 	RDMA_NETWORK_IB,
137 	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
138 	RDMA_NETWORK_IPV4,
139 	RDMA_NETWORK_IPV6
140 };
141 
142 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
143 {
144 	if (network_type == RDMA_NETWORK_IPV4 ||
145 	    network_type == RDMA_NETWORK_IPV6)
146 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
147 
148 	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
149 	return IB_GID_TYPE_IB;
150 }
151 
152 static inline enum rdma_network_type
153 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
154 {
155 	if (attr->gid_type == IB_GID_TYPE_IB)
156 		return RDMA_NETWORK_IB;
157 
158 	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
159 		return RDMA_NETWORK_IPV4;
160 	else
161 		return RDMA_NETWORK_IPV6;
162 }
163 
164 enum rdma_link_layer {
165 	IB_LINK_LAYER_UNSPECIFIED,
166 	IB_LINK_LAYER_INFINIBAND,
167 	IB_LINK_LAYER_ETHERNET,
168 };
169 
170 enum ib_device_cap_flags {
171 	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
172 	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
173 	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
174 	IB_DEVICE_RAW_MULTI			= (1 << 3),
175 	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
176 	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
177 	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
178 	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
179 	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
180 	/* Not in use, former INIT_TYPE		= (1 << 9),*/
181 	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
182 	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
183 	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
184 	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
185 	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
186 
187 	/*
188 	 * This device supports a per-device lkey or stag that can be
189 	 * used without performing a memory registration for the local
190 	 * memory.  Note that ULPs should never check this flag, but
191 	 * instead of use the local_dma_lkey flag in the ib_pd structure,
192 	 * which will always contain a usable lkey.
193 	 */
194 	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
195 	/* Reserved, old SEND_W_INV		= (1 << 16),*/
196 	IB_DEVICE_MEM_WINDOW			= (1 << 17),
197 	/*
198 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
199 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
200 	 * messages and can verify the validity of checksum for
201 	 * incoming messages.  Setting this flag implies that the
202 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
203 	 */
204 	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
205 	IB_DEVICE_UD_TSO			= (1 << 19),
206 	IB_DEVICE_XRC				= (1 << 20),
207 
208 	/*
209 	 * This device supports the IB "base memory management extension",
210 	 * which includes support for fast registrations (IB_WR_REG_MR,
211 	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
212 	 * also be set by any iWarp device which must support FRs to comply
213 	 * to the iWarp verbs spec.  iWarp devices also support the
214 	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
215 	 * stag.
216 	 */
217 	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
218 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
219 	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
220 	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
221 	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
222 	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
223 	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
224 	/*
225 	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
226 	 * support execution of WQEs that involve synchronization
227 	 * of I/O operations with single completion queue managed
228 	 * by hardware.
229 	 */
230 	IB_DEVICE_CROSS_CHANNEL			= (1 << 27),
231 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
232 	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
233 	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
234 	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
235 	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
236 	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
237 	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
238 	IB_DEVICE_RDMA_NETDEV_OPA_VNIC		= (1ULL << 35),
239 	/* The device supports padding incoming writes to cacheline. */
240 	IB_DEVICE_PCI_WRITE_END_PADDING		= (1ULL << 36),
241 };
242 
243 enum ib_signature_prot_cap {
244 	IB_PROT_T10DIF_TYPE_1 = 1,
245 	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
246 	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
247 };
248 
249 enum ib_signature_guard_cap {
250 	IB_GUARD_T10DIF_CRC	= 1,
251 	IB_GUARD_T10DIF_CSUM	= 1 << 1,
252 };
253 
254 enum ib_atomic_cap {
255 	IB_ATOMIC_NONE,
256 	IB_ATOMIC_HCA,
257 	IB_ATOMIC_GLOB
258 };
259 
260 enum ib_odp_general_cap_bits {
261 	IB_ODP_SUPPORT		= 1 << 0,
262 	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
263 };
264 
265 enum ib_odp_transport_cap_bits {
266 	IB_ODP_SUPPORT_SEND	= 1 << 0,
267 	IB_ODP_SUPPORT_RECV	= 1 << 1,
268 	IB_ODP_SUPPORT_WRITE	= 1 << 2,
269 	IB_ODP_SUPPORT_READ	= 1 << 3,
270 	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
271 };
272 
273 struct ib_odp_caps {
274 	uint64_t general_caps;
275 	struct {
276 		uint32_t  rc_odp_caps;
277 		uint32_t  uc_odp_caps;
278 		uint32_t  ud_odp_caps;
279 	} per_transport_caps;
280 };
281 
282 struct ib_rss_caps {
283 	/* Corresponding bit will be set if qp type from
284 	 * 'enum ib_qp_type' is supported, e.g.
285 	 * supported_qpts |= 1 << IB_QPT_UD
286 	 */
287 	u32 supported_qpts;
288 	u32 max_rwq_indirection_tables;
289 	u32 max_rwq_indirection_table_size;
290 };
291 
292 enum ib_tm_cap_flags {
293 	/*  Support tag matching on RC transport */
294 	IB_TM_CAP_RC		    = 1 << 0,
295 };
296 
297 struct ib_tm_caps {
298 	/* Max size of RNDV header */
299 	u32 max_rndv_hdr_size;
300 	/* Max number of entries in tag matching list */
301 	u32 max_num_tags;
302 	/* From enum ib_tm_cap_flags */
303 	u32 flags;
304 	/* Max number of outstanding list operations */
305 	u32 max_ops;
306 	/* Max number of SGE in tag matching entry */
307 	u32 max_sge;
308 };
309 
310 struct ib_cq_init_attr {
311 	unsigned int	cqe;
312 	int		comp_vector;
313 	u32		flags;
314 };
315 
316 enum ib_cq_attr_mask {
317 	IB_CQ_MODERATE = 1 << 0,
318 };
319 
320 struct ib_cq_caps {
321 	u16     max_cq_moderation_count;
322 	u16     max_cq_moderation_period;
323 };
324 
325 struct ib_dm_mr_attr {
326 	u64		length;
327 	u64		offset;
328 	u32		access_flags;
329 };
330 
331 struct ib_dm_alloc_attr {
332 	u64	length;
333 	u32	alignment;
334 	u32	flags;
335 };
336 
337 struct ib_device_attr {
338 	u64			fw_ver;
339 	__be64			sys_image_guid;
340 	u64			max_mr_size;
341 	u64			page_size_cap;
342 	u32			vendor_id;
343 	u32			vendor_part_id;
344 	u32			hw_ver;
345 	int			max_qp;
346 	int			max_qp_wr;
347 	u64			device_cap_flags;
348 	int			max_send_sge;
349 	int			max_recv_sge;
350 	int			max_sge_rd;
351 	int			max_cq;
352 	int			max_cqe;
353 	int			max_mr;
354 	int			max_pd;
355 	int			max_qp_rd_atom;
356 	int			max_ee_rd_atom;
357 	int			max_res_rd_atom;
358 	int			max_qp_init_rd_atom;
359 	int			max_ee_init_rd_atom;
360 	enum ib_atomic_cap	atomic_cap;
361 	enum ib_atomic_cap	masked_atomic_cap;
362 	int			max_ee;
363 	int			max_rdd;
364 	int			max_mw;
365 	int			max_raw_ipv6_qp;
366 	int			max_raw_ethy_qp;
367 	int			max_mcast_grp;
368 	int			max_mcast_qp_attach;
369 	int			max_total_mcast_qp_attach;
370 	int			max_ah;
371 	int			max_fmr;
372 	int			max_map_per_fmr;
373 	int			max_srq;
374 	int			max_srq_wr;
375 	int			max_srq_sge;
376 	unsigned int		max_fast_reg_page_list_len;
377 	u16			max_pkeys;
378 	u8			local_ca_ack_delay;
379 	int			sig_prot_cap;
380 	int			sig_guard_cap;
381 	struct ib_odp_caps	odp_caps;
382 	uint64_t		timestamp_mask;
383 	uint64_t		hca_core_clock; /* in KHZ */
384 	struct ib_rss_caps	rss_caps;
385 	u32			max_wq_type_rq;
386 	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
387 	struct ib_tm_caps	tm_caps;
388 	struct ib_cq_caps       cq_caps;
389 	u64			max_dm_size;
390 };
391 
392 enum ib_mtu {
393 	IB_MTU_256  = 1,
394 	IB_MTU_512  = 2,
395 	IB_MTU_1024 = 3,
396 	IB_MTU_2048 = 4,
397 	IB_MTU_4096 = 5
398 };
399 
400 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
401 {
402 	switch (mtu) {
403 	case IB_MTU_256:  return  256;
404 	case IB_MTU_512:  return  512;
405 	case IB_MTU_1024: return 1024;
406 	case IB_MTU_2048: return 2048;
407 	case IB_MTU_4096: return 4096;
408 	default: 	  return -1;
409 	}
410 }
411 
412 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
413 {
414 	if (mtu >= 4096)
415 		return IB_MTU_4096;
416 	else if (mtu >= 2048)
417 		return IB_MTU_2048;
418 	else if (mtu >= 1024)
419 		return IB_MTU_1024;
420 	else if (mtu >= 512)
421 		return IB_MTU_512;
422 	else
423 		return IB_MTU_256;
424 }
425 
426 enum ib_port_state {
427 	IB_PORT_NOP		= 0,
428 	IB_PORT_DOWN		= 1,
429 	IB_PORT_INIT		= 2,
430 	IB_PORT_ARMED		= 3,
431 	IB_PORT_ACTIVE		= 4,
432 	IB_PORT_ACTIVE_DEFER	= 5
433 };
434 
435 enum ib_port_cap_flags {
436 	IB_PORT_SM				= 1 <<  1,
437 	IB_PORT_NOTICE_SUP			= 1 <<  2,
438 	IB_PORT_TRAP_SUP			= 1 <<  3,
439 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
440 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
441 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
442 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
443 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
444 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
445 	IB_PORT_SM_DISABLED			= 1 << 10,
446 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
447 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
448 	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
449 	IB_PORT_CM_SUP				= 1 << 16,
450 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
451 	IB_PORT_REINIT_SUP			= 1 << 18,
452 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
453 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
454 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
455 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
456 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
457 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
458 	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
459 	IB_PORT_IP_BASED_GIDS			= 1 << 26,
460 };
461 
462 enum ib_port_width {
463 	IB_WIDTH_1X	= 1,
464 	IB_WIDTH_4X	= 2,
465 	IB_WIDTH_8X	= 4,
466 	IB_WIDTH_12X	= 8
467 };
468 
469 static inline int ib_width_enum_to_int(enum ib_port_width width)
470 {
471 	switch (width) {
472 	case IB_WIDTH_1X:  return  1;
473 	case IB_WIDTH_4X:  return  4;
474 	case IB_WIDTH_8X:  return  8;
475 	case IB_WIDTH_12X: return 12;
476 	default: 	  return -1;
477 	}
478 }
479 
480 enum ib_port_speed {
481 	IB_SPEED_SDR	= 1,
482 	IB_SPEED_DDR	= 2,
483 	IB_SPEED_QDR	= 4,
484 	IB_SPEED_FDR10	= 8,
485 	IB_SPEED_FDR	= 16,
486 	IB_SPEED_EDR	= 32,
487 	IB_SPEED_HDR	= 64
488 };
489 
490 /**
491  * struct rdma_hw_stats
492  * @lock - Mutex to protect parallel write access to lifespan and values
493  *    of counters, which are 64bits and not guaranteeed to be written
494  *    atomicaly on 32bits systems.
495  * @timestamp - Used by the core code to track when the last update was
496  * @lifespan - Used by the core code to determine how old the counters
497  *   should be before being updated again.  Stored in jiffies, defaults
498  *   to 10 milliseconds, drivers can override the default be specifying
499  *   their own value during their allocation routine.
500  * @name - Array of pointers to static names used for the counters in
501  *   directory.
502  * @num_counters - How many hardware counters there are.  If name is
503  *   shorter than this number, a kernel oops will result.  Driver authors
504  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
505  *   in their code to prevent this.
506  * @value - Array of u64 counters that are accessed by the sysfs code and
507  *   filled in by the drivers get_stats routine
508  */
509 struct rdma_hw_stats {
510 	struct mutex	lock; /* Protect lifespan and values[] */
511 	unsigned long	timestamp;
512 	unsigned long	lifespan;
513 	const char * const *names;
514 	int		num_counters;
515 	u64		value[];
516 };
517 
518 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
519 /**
520  * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
521  *   for drivers.
522  * @names - Array of static const char *
523  * @num_counters - How many elements in array
524  * @lifespan - How many milliseconds between updates
525  */
526 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
527 		const char * const *names, int num_counters,
528 		unsigned long lifespan)
529 {
530 	struct rdma_hw_stats *stats;
531 
532 	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
533 			GFP_KERNEL);
534 	if (!stats)
535 		return NULL;
536 	stats->names = names;
537 	stats->num_counters = num_counters;
538 	stats->lifespan = msecs_to_jiffies(lifespan);
539 
540 	return stats;
541 }
542 
543 
544 /* Define bits for the various functionality this port needs to be supported by
545  * the core.
546  */
547 /* Management                           0x00000FFF */
548 #define RDMA_CORE_CAP_IB_MAD            0x00000001
549 #define RDMA_CORE_CAP_IB_SMI            0x00000002
550 #define RDMA_CORE_CAP_IB_CM             0x00000004
551 #define RDMA_CORE_CAP_IW_CM             0x00000008
552 #define RDMA_CORE_CAP_IB_SA             0x00000010
553 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
554 
555 /* Address format                       0x000FF000 */
556 #define RDMA_CORE_CAP_AF_IB             0x00001000
557 #define RDMA_CORE_CAP_ETH_AH            0x00002000
558 #define RDMA_CORE_CAP_OPA_AH            0x00004000
559 
560 /* Protocol                             0xFFF00000 */
561 #define RDMA_CORE_CAP_PROT_IB           0x00100000
562 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
563 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
564 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
565 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
566 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
567 
568 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
569 					| RDMA_CORE_CAP_IB_MAD \
570 					| RDMA_CORE_CAP_IB_SMI \
571 					| RDMA_CORE_CAP_IB_CM  \
572 					| RDMA_CORE_CAP_IB_SA  \
573 					| RDMA_CORE_CAP_AF_IB)
574 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
575 					| RDMA_CORE_CAP_IB_MAD  \
576 					| RDMA_CORE_CAP_IB_CM   \
577 					| RDMA_CORE_CAP_AF_IB   \
578 					| RDMA_CORE_CAP_ETH_AH)
579 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
580 					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
581 					| RDMA_CORE_CAP_IB_MAD  \
582 					| RDMA_CORE_CAP_IB_CM   \
583 					| RDMA_CORE_CAP_AF_IB   \
584 					| RDMA_CORE_CAP_ETH_AH)
585 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
586 					| RDMA_CORE_CAP_IW_CM)
587 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
588 					| RDMA_CORE_CAP_OPA_MAD)
589 
590 #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
591 
592 #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
593 
594 struct ib_port_attr {
595 	u64			subnet_prefix;
596 	enum ib_port_state	state;
597 	enum ib_mtu		max_mtu;
598 	enum ib_mtu		active_mtu;
599 	int			gid_tbl_len;
600 	u32			port_cap_flags;
601 	u32			max_msg_sz;
602 	u32			bad_pkey_cntr;
603 	u32			qkey_viol_cntr;
604 	u16			pkey_tbl_len;
605 	u32			sm_lid;
606 	u32			lid;
607 	u8			lmc;
608 	u8			max_vl_num;
609 	u8			sm_sl;
610 	u8			subnet_timeout;
611 	u8			init_type_reply;
612 	u8			active_width;
613 	u8			active_speed;
614 	u8                      phys_state;
615 	bool			grh_required;
616 };
617 
618 enum ib_device_modify_flags {
619 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
620 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
621 };
622 
623 #define IB_DEVICE_NODE_DESC_MAX 64
624 
625 struct ib_device_modify {
626 	u64	sys_image_guid;
627 	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
628 };
629 
630 enum ib_port_modify_flags {
631 	IB_PORT_SHUTDOWN		= 1,
632 	IB_PORT_INIT_TYPE		= (1<<2),
633 	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
634 	IB_PORT_OPA_MASK_CHG		= (1<<4)
635 };
636 
637 struct ib_port_modify {
638 	u32	set_port_cap_mask;
639 	u32	clr_port_cap_mask;
640 	u8	init_type;
641 };
642 
643 enum ib_event_type {
644 	IB_EVENT_CQ_ERR,
645 	IB_EVENT_QP_FATAL,
646 	IB_EVENT_QP_REQ_ERR,
647 	IB_EVENT_QP_ACCESS_ERR,
648 	IB_EVENT_COMM_EST,
649 	IB_EVENT_SQ_DRAINED,
650 	IB_EVENT_PATH_MIG,
651 	IB_EVENT_PATH_MIG_ERR,
652 	IB_EVENT_DEVICE_FATAL,
653 	IB_EVENT_PORT_ACTIVE,
654 	IB_EVENT_PORT_ERR,
655 	IB_EVENT_LID_CHANGE,
656 	IB_EVENT_PKEY_CHANGE,
657 	IB_EVENT_SM_CHANGE,
658 	IB_EVENT_SRQ_ERR,
659 	IB_EVENT_SRQ_LIMIT_REACHED,
660 	IB_EVENT_QP_LAST_WQE_REACHED,
661 	IB_EVENT_CLIENT_REREGISTER,
662 	IB_EVENT_GID_CHANGE,
663 	IB_EVENT_WQ_FATAL,
664 };
665 
666 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
667 
668 struct ib_event {
669 	struct ib_device	*device;
670 	union {
671 		struct ib_cq	*cq;
672 		struct ib_qp	*qp;
673 		struct ib_srq	*srq;
674 		struct ib_wq	*wq;
675 		u8		port_num;
676 	} element;
677 	enum ib_event_type	event;
678 };
679 
680 struct ib_event_handler {
681 	struct ib_device *device;
682 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
683 	struct list_head  list;
684 };
685 
686 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
687 	do {							\
688 		(_ptr)->device  = _device;			\
689 		(_ptr)->handler = _handler;			\
690 		INIT_LIST_HEAD(&(_ptr)->list);			\
691 	} while (0)
692 
693 struct ib_global_route {
694 	const struct ib_gid_attr *sgid_attr;
695 	union ib_gid	dgid;
696 	u32		flow_label;
697 	u8		sgid_index;
698 	u8		hop_limit;
699 	u8		traffic_class;
700 };
701 
702 struct ib_grh {
703 	__be32		version_tclass_flow;
704 	__be16		paylen;
705 	u8		next_hdr;
706 	u8		hop_limit;
707 	union ib_gid	sgid;
708 	union ib_gid	dgid;
709 };
710 
711 union rdma_network_hdr {
712 	struct ib_grh ibgrh;
713 	struct {
714 		/* The IB spec states that if it's IPv4, the header
715 		 * is located in the last 20 bytes of the header.
716 		 */
717 		u8		reserved[20];
718 		struct iphdr	roce4grh;
719 	};
720 };
721 
722 #define IB_QPN_MASK		0xFFFFFF
723 
724 enum {
725 	IB_MULTICAST_QPN = 0xffffff
726 };
727 
728 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
729 #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
730 
731 enum ib_ah_flags {
732 	IB_AH_GRH	= 1
733 };
734 
735 enum ib_rate {
736 	IB_RATE_PORT_CURRENT = 0,
737 	IB_RATE_2_5_GBPS = 2,
738 	IB_RATE_5_GBPS   = 5,
739 	IB_RATE_10_GBPS  = 3,
740 	IB_RATE_20_GBPS  = 6,
741 	IB_RATE_30_GBPS  = 4,
742 	IB_RATE_40_GBPS  = 7,
743 	IB_RATE_60_GBPS  = 8,
744 	IB_RATE_80_GBPS  = 9,
745 	IB_RATE_120_GBPS = 10,
746 	IB_RATE_14_GBPS  = 11,
747 	IB_RATE_56_GBPS  = 12,
748 	IB_RATE_112_GBPS = 13,
749 	IB_RATE_168_GBPS = 14,
750 	IB_RATE_25_GBPS  = 15,
751 	IB_RATE_100_GBPS = 16,
752 	IB_RATE_200_GBPS = 17,
753 	IB_RATE_300_GBPS = 18
754 };
755 
756 /**
757  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
758  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
759  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
760  * @rate: rate to convert.
761  */
762 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
763 
764 /**
765  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
766  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
767  * @rate: rate to convert.
768  */
769 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
770 
771 
772 /**
773  * enum ib_mr_type - memory region type
774  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
775  *                            normal registration
776  * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
777  *                            signature operations (data-integrity
778  *                            capable regions)
779  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
780  *                            register any arbitrary sg lists (without
781  *                            the normal mr constraints - see
782  *                            ib_map_mr_sg)
783  */
784 enum ib_mr_type {
785 	IB_MR_TYPE_MEM_REG,
786 	IB_MR_TYPE_SIGNATURE,
787 	IB_MR_TYPE_SG_GAPS,
788 };
789 
790 /**
791  * Signature types
792  * IB_SIG_TYPE_NONE: Unprotected.
793  * IB_SIG_TYPE_T10_DIF: Type T10-DIF
794  */
795 enum ib_signature_type {
796 	IB_SIG_TYPE_NONE,
797 	IB_SIG_TYPE_T10_DIF,
798 };
799 
800 /**
801  * Signature T10-DIF block-guard types
802  * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
803  * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
804  */
805 enum ib_t10_dif_bg_type {
806 	IB_T10DIF_CRC,
807 	IB_T10DIF_CSUM
808 };
809 
810 /**
811  * struct ib_t10_dif_domain - Parameters specific for T10-DIF
812  *     domain.
813  * @bg_type: T10-DIF block guard type (CRC|CSUM)
814  * @pi_interval: protection information interval.
815  * @bg: seed of guard computation.
816  * @app_tag: application tag of guard block
817  * @ref_tag: initial guard block reference tag.
818  * @ref_remap: Indicate wethear the reftag increments each block
819  * @app_escape: Indicate to skip block check if apptag=0xffff
820  * @ref_escape: Indicate to skip block check if reftag=0xffffffff
821  * @apptag_check_mask: check bitmask of application tag.
822  */
823 struct ib_t10_dif_domain {
824 	enum ib_t10_dif_bg_type bg_type;
825 	u16			pi_interval;
826 	u16			bg;
827 	u16			app_tag;
828 	u32			ref_tag;
829 	bool			ref_remap;
830 	bool			app_escape;
831 	bool			ref_escape;
832 	u16			apptag_check_mask;
833 };
834 
835 /**
836  * struct ib_sig_domain - Parameters for signature domain
837  * @sig_type: specific signauture type
838  * @sig: union of all signature domain attributes that may
839  *     be used to set domain layout.
840  */
841 struct ib_sig_domain {
842 	enum ib_signature_type sig_type;
843 	union {
844 		struct ib_t10_dif_domain dif;
845 	} sig;
846 };
847 
848 /**
849  * struct ib_sig_attrs - Parameters for signature handover operation
850  * @check_mask: bitmask for signature byte check (8 bytes)
851  * @mem: memory domain layout desciptor.
852  * @wire: wire domain layout desciptor.
853  */
854 struct ib_sig_attrs {
855 	u8			check_mask;
856 	struct ib_sig_domain	mem;
857 	struct ib_sig_domain	wire;
858 };
859 
860 enum ib_sig_err_type {
861 	IB_SIG_BAD_GUARD,
862 	IB_SIG_BAD_REFTAG,
863 	IB_SIG_BAD_APPTAG,
864 };
865 
866 /**
867  * Signature check masks (8 bytes in total) according to the T10-PI standard:
868  *  -------- -------- ------------
869  * | GUARD  | APPTAG |   REFTAG   |
870  * |  2B    |  2B    |    4B      |
871  *  -------- -------- ------------
872  */
873 enum {
874 	IB_SIG_CHECK_GUARD	= 0xc0,
875 	IB_SIG_CHECK_APPTAG	= 0x30,
876 	IB_SIG_CHECK_REFTAG	= 0x0f,
877 };
878 
879 /**
880  * struct ib_sig_err - signature error descriptor
881  */
882 struct ib_sig_err {
883 	enum ib_sig_err_type	err_type;
884 	u32			expected;
885 	u32			actual;
886 	u64			sig_err_offset;
887 	u32			key;
888 };
889 
890 enum ib_mr_status_check {
891 	IB_MR_CHECK_SIG_STATUS = 1,
892 };
893 
894 /**
895  * struct ib_mr_status - Memory region status container
896  *
897  * @fail_status: Bitmask of MR checks status. For each
898  *     failed check a corresponding status bit is set.
899  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
900  *     failure.
901  */
902 struct ib_mr_status {
903 	u32		    fail_status;
904 	struct ib_sig_err   sig_err;
905 };
906 
907 /**
908  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
909  * enum.
910  * @mult: multiple to convert.
911  */
912 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
913 
914 enum rdma_ah_attr_type {
915 	RDMA_AH_ATTR_TYPE_UNDEFINED,
916 	RDMA_AH_ATTR_TYPE_IB,
917 	RDMA_AH_ATTR_TYPE_ROCE,
918 	RDMA_AH_ATTR_TYPE_OPA,
919 };
920 
921 struct ib_ah_attr {
922 	u16			dlid;
923 	u8			src_path_bits;
924 };
925 
926 struct roce_ah_attr {
927 	u8			dmac[ETH_ALEN];
928 };
929 
930 struct opa_ah_attr {
931 	u32			dlid;
932 	u8			src_path_bits;
933 	bool			make_grd;
934 };
935 
936 struct rdma_ah_attr {
937 	struct ib_global_route	grh;
938 	u8			sl;
939 	u8			static_rate;
940 	u8			port_num;
941 	u8			ah_flags;
942 	enum rdma_ah_attr_type type;
943 	union {
944 		struct ib_ah_attr ib;
945 		struct roce_ah_attr roce;
946 		struct opa_ah_attr opa;
947 	};
948 };
949 
950 enum ib_wc_status {
951 	IB_WC_SUCCESS,
952 	IB_WC_LOC_LEN_ERR,
953 	IB_WC_LOC_QP_OP_ERR,
954 	IB_WC_LOC_EEC_OP_ERR,
955 	IB_WC_LOC_PROT_ERR,
956 	IB_WC_WR_FLUSH_ERR,
957 	IB_WC_MW_BIND_ERR,
958 	IB_WC_BAD_RESP_ERR,
959 	IB_WC_LOC_ACCESS_ERR,
960 	IB_WC_REM_INV_REQ_ERR,
961 	IB_WC_REM_ACCESS_ERR,
962 	IB_WC_REM_OP_ERR,
963 	IB_WC_RETRY_EXC_ERR,
964 	IB_WC_RNR_RETRY_EXC_ERR,
965 	IB_WC_LOC_RDD_VIOL_ERR,
966 	IB_WC_REM_INV_RD_REQ_ERR,
967 	IB_WC_REM_ABORT_ERR,
968 	IB_WC_INV_EECN_ERR,
969 	IB_WC_INV_EEC_STATE_ERR,
970 	IB_WC_FATAL_ERR,
971 	IB_WC_RESP_TIMEOUT_ERR,
972 	IB_WC_GENERAL_ERR
973 };
974 
975 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
976 
977 enum ib_wc_opcode {
978 	IB_WC_SEND,
979 	IB_WC_RDMA_WRITE,
980 	IB_WC_RDMA_READ,
981 	IB_WC_COMP_SWAP,
982 	IB_WC_FETCH_ADD,
983 	IB_WC_LSO,
984 	IB_WC_LOCAL_INV,
985 	IB_WC_REG_MR,
986 	IB_WC_MASKED_COMP_SWAP,
987 	IB_WC_MASKED_FETCH_ADD,
988 /*
989  * Set value of IB_WC_RECV so consumers can test if a completion is a
990  * receive by testing (opcode & IB_WC_RECV).
991  */
992 	IB_WC_RECV			= 1 << 7,
993 	IB_WC_RECV_RDMA_WITH_IMM
994 };
995 
996 enum ib_wc_flags {
997 	IB_WC_GRH		= 1,
998 	IB_WC_WITH_IMM		= (1<<1),
999 	IB_WC_WITH_INVALIDATE	= (1<<2),
1000 	IB_WC_IP_CSUM_OK	= (1<<3),
1001 	IB_WC_WITH_SMAC		= (1<<4),
1002 	IB_WC_WITH_VLAN		= (1<<5),
1003 	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
1004 };
1005 
1006 struct ib_wc {
1007 	union {
1008 		u64		wr_id;
1009 		struct ib_cqe	*wr_cqe;
1010 	};
1011 	enum ib_wc_status	status;
1012 	enum ib_wc_opcode	opcode;
1013 	u32			vendor_err;
1014 	u32			byte_len;
1015 	struct ib_qp	       *qp;
1016 	union {
1017 		__be32		imm_data;
1018 		u32		invalidate_rkey;
1019 	} ex;
1020 	u32			src_qp;
1021 	u32			slid;
1022 	int			wc_flags;
1023 	u16			pkey_index;
1024 	u8			sl;
1025 	u8			dlid_path_bits;
1026 	u8			port_num;	/* valid only for DR SMPs on switches */
1027 	u8			smac[ETH_ALEN];
1028 	u16			vlan_id;
1029 	u8			network_hdr_type;
1030 };
1031 
1032 enum ib_cq_notify_flags {
1033 	IB_CQ_SOLICITED			= 1 << 0,
1034 	IB_CQ_NEXT_COMP			= 1 << 1,
1035 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1036 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1037 };
1038 
1039 enum ib_srq_type {
1040 	IB_SRQT_BASIC,
1041 	IB_SRQT_XRC,
1042 	IB_SRQT_TM,
1043 };
1044 
1045 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1046 {
1047 	return srq_type == IB_SRQT_XRC ||
1048 	       srq_type == IB_SRQT_TM;
1049 }
1050 
1051 enum ib_srq_attr_mask {
1052 	IB_SRQ_MAX_WR	= 1 << 0,
1053 	IB_SRQ_LIMIT	= 1 << 1,
1054 };
1055 
1056 struct ib_srq_attr {
1057 	u32	max_wr;
1058 	u32	max_sge;
1059 	u32	srq_limit;
1060 };
1061 
1062 struct ib_srq_init_attr {
1063 	void		      (*event_handler)(struct ib_event *, void *);
1064 	void		       *srq_context;
1065 	struct ib_srq_attr	attr;
1066 	enum ib_srq_type	srq_type;
1067 
1068 	struct {
1069 		struct ib_cq   *cq;
1070 		union {
1071 			struct {
1072 				struct ib_xrcd *xrcd;
1073 			} xrc;
1074 
1075 			struct {
1076 				u32		max_num_tags;
1077 			} tag_matching;
1078 		};
1079 	} ext;
1080 };
1081 
1082 struct ib_qp_cap {
1083 	u32	max_send_wr;
1084 	u32	max_recv_wr;
1085 	u32	max_send_sge;
1086 	u32	max_recv_sge;
1087 	u32	max_inline_data;
1088 
1089 	/*
1090 	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1091 	 * ib_create_qp() will calculate the right amount of neededed WRs
1092 	 * and MRs based on this.
1093 	 */
1094 	u32	max_rdma_ctxs;
1095 };
1096 
1097 enum ib_sig_type {
1098 	IB_SIGNAL_ALL_WR,
1099 	IB_SIGNAL_REQ_WR
1100 };
1101 
1102 enum ib_qp_type {
1103 	/*
1104 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1105 	 * here (and in that order) since the MAD layer uses them as
1106 	 * indices into a 2-entry table.
1107 	 */
1108 	IB_QPT_SMI,
1109 	IB_QPT_GSI,
1110 
1111 	IB_QPT_RC,
1112 	IB_QPT_UC,
1113 	IB_QPT_UD,
1114 	IB_QPT_RAW_IPV6,
1115 	IB_QPT_RAW_ETHERTYPE,
1116 	IB_QPT_RAW_PACKET = 8,
1117 	IB_QPT_XRC_INI = 9,
1118 	IB_QPT_XRC_TGT,
1119 	IB_QPT_MAX,
1120 	IB_QPT_DRIVER = 0xFF,
1121 	/* Reserve a range for qp types internal to the low level driver.
1122 	 * These qp types will not be visible at the IB core layer, so the
1123 	 * IB_QPT_MAX usages should not be affected in the core layer
1124 	 */
1125 	IB_QPT_RESERVED1 = 0x1000,
1126 	IB_QPT_RESERVED2,
1127 	IB_QPT_RESERVED3,
1128 	IB_QPT_RESERVED4,
1129 	IB_QPT_RESERVED5,
1130 	IB_QPT_RESERVED6,
1131 	IB_QPT_RESERVED7,
1132 	IB_QPT_RESERVED8,
1133 	IB_QPT_RESERVED9,
1134 	IB_QPT_RESERVED10,
1135 };
1136 
1137 enum ib_qp_create_flags {
1138 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1139 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
1140 	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1141 	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1142 	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1143 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1144 	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
1145 	/* FREE					= 1 << 7, */
1146 	IB_QP_CREATE_SCATTER_FCS		= 1 << 8,
1147 	IB_QP_CREATE_CVLAN_STRIPPING		= 1 << 9,
1148 	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1149 	IB_QP_CREATE_PCI_WRITE_END_PADDING	= 1 << 11,
1150 	/* reserve bits 26-31 for low level drivers' internal use */
1151 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1152 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1153 };
1154 
1155 /*
1156  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1157  * callback to destroy the passed in QP.
1158  */
1159 
1160 struct ib_qp_init_attr {
1161 	void                  (*event_handler)(struct ib_event *, void *);
1162 	void		       *qp_context;
1163 	struct ib_cq	       *send_cq;
1164 	struct ib_cq	       *recv_cq;
1165 	struct ib_srq	       *srq;
1166 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1167 	struct ib_qp_cap	cap;
1168 	enum ib_sig_type	sq_sig_type;
1169 	enum ib_qp_type		qp_type;
1170 	enum ib_qp_create_flags	create_flags;
1171 
1172 	/*
1173 	 * Only needed for special QP types, or when using the RW API.
1174 	 */
1175 	u8			port_num;
1176 	struct ib_rwq_ind_table *rwq_ind_tbl;
1177 	u32			source_qpn;
1178 };
1179 
1180 struct ib_qp_open_attr {
1181 	void                  (*event_handler)(struct ib_event *, void *);
1182 	void		       *qp_context;
1183 	u32			qp_num;
1184 	enum ib_qp_type		qp_type;
1185 };
1186 
1187 enum ib_rnr_timeout {
1188 	IB_RNR_TIMER_655_36 =  0,
1189 	IB_RNR_TIMER_000_01 =  1,
1190 	IB_RNR_TIMER_000_02 =  2,
1191 	IB_RNR_TIMER_000_03 =  3,
1192 	IB_RNR_TIMER_000_04 =  4,
1193 	IB_RNR_TIMER_000_06 =  5,
1194 	IB_RNR_TIMER_000_08 =  6,
1195 	IB_RNR_TIMER_000_12 =  7,
1196 	IB_RNR_TIMER_000_16 =  8,
1197 	IB_RNR_TIMER_000_24 =  9,
1198 	IB_RNR_TIMER_000_32 = 10,
1199 	IB_RNR_TIMER_000_48 = 11,
1200 	IB_RNR_TIMER_000_64 = 12,
1201 	IB_RNR_TIMER_000_96 = 13,
1202 	IB_RNR_TIMER_001_28 = 14,
1203 	IB_RNR_TIMER_001_92 = 15,
1204 	IB_RNR_TIMER_002_56 = 16,
1205 	IB_RNR_TIMER_003_84 = 17,
1206 	IB_RNR_TIMER_005_12 = 18,
1207 	IB_RNR_TIMER_007_68 = 19,
1208 	IB_RNR_TIMER_010_24 = 20,
1209 	IB_RNR_TIMER_015_36 = 21,
1210 	IB_RNR_TIMER_020_48 = 22,
1211 	IB_RNR_TIMER_030_72 = 23,
1212 	IB_RNR_TIMER_040_96 = 24,
1213 	IB_RNR_TIMER_061_44 = 25,
1214 	IB_RNR_TIMER_081_92 = 26,
1215 	IB_RNR_TIMER_122_88 = 27,
1216 	IB_RNR_TIMER_163_84 = 28,
1217 	IB_RNR_TIMER_245_76 = 29,
1218 	IB_RNR_TIMER_327_68 = 30,
1219 	IB_RNR_TIMER_491_52 = 31
1220 };
1221 
1222 enum ib_qp_attr_mask {
1223 	IB_QP_STATE			= 1,
1224 	IB_QP_CUR_STATE			= (1<<1),
1225 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1226 	IB_QP_ACCESS_FLAGS		= (1<<3),
1227 	IB_QP_PKEY_INDEX		= (1<<4),
1228 	IB_QP_PORT			= (1<<5),
1229 	IB_QP_QKEY			= (1<<6),
1230 	IB_QP_AV			= (1<<7),
1231 	IB_QP_PATH_MTU			= (1<<8),
1232 	IB_QP_TIMEOUT			= (1<<9),
1233 	IB_QP_RETRY_CNT			= (1<<10),
1234 	IB_QP_RNR_RETRY			= (1<<11),
1235 	IB_QP_RQ_PSN			= (1<<12),
1236 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1237 	IB_QP_ALT_PATH			= (1<<14),
1238 	IB_QP_MIN_RNR_TIMER		= (1<<15),
1239 	IB_QP_SQ_PSN			= (1<<16),
1240 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1241 	IB_QP_PATH_MIG_STATE		= (1<<18),
1242 	IB_QP_CAP			= (1<<19),
1243 	IB_QP_DEST_QPN			= (1<<20),
1244 	IB_QP_RESERVED1			= (1<<21),
1245 	IB_QP_RESERVED2			= (1<<22),
1246 	IB_QP_RESERVED3			= (1<<23),
1247 	IB_QP_RESERVED4			= (1<<24),
1248 	IB_QP_RATE_LIMIT		= (1<<25),
1249 };
1250 
1251 enum ib_qp_state {
1252 	IB_QPS_RESET,
1253 	IB_QPS_INIT,
1254 	IB_QPS_RTR,
1255 	IB_QPS_RTS,
1256 	IB_QPS_SQD,
1257 	IB_QPS_SQE,
1258 	IB_QPS_ERR
1259 };
1260 
1261 enum ib_mig_state {
1262 	IB_MIG_MIGRATED,
1263 	IB_MIG_REARM,
1264 	IB_MIG_ARMED
1265 };
1266 
1267 enum ib_mw_type {
1268 	IB_MW_TYPE_1 = 1,
1269 	IB_MW_TYPE_2 = 2
1270 };
1271 
1272 struct ib_qp_attr {
1273 	enum ib_qp_state	qp_state;
1274 	enum ib_qp_state	cur_qp_state;
1275 	enum ib_mtu		path_mtu;
1276 	enum ib_mig_state	path_mig_state;
1277 	u32			qkey;
1278 	u32			rq_psn;
1279 	u32			sq_psn;
1280 	u32			dest_qp_num;
1281 	int			qp_access_flags;
1282 	struct ib_qp_cap	cap;
1283 	struct rdma_ah_attr	ah_attr;
1284 	struct rdma_ah_attr	alt_ah_attr;
1285 	u16			pkey_index;
1286 	u16			alt_pkey_index;
1287 	u8			en_sqd_async_notify;
1288 	u8			sq_draining;
1289 	u8			max_rd_atomic;
1290 	u8			max_dest_rd_atomic;
1291 	u8			min_rnr_timer;
1292 	u8			port_num;
1293 	u8			timeout;
1294 	u8			retry_cnt;
1295 	u8			rnr_retry;
1296 	u8			alt_port_num;
1297 	u8			alt_timeout;
1298 	u32			rate_limit;
1299 };
1300 
1301 enum ib_wr_opcode {
1302 	IB_WR_RDMA_WRITE,
1303 	IB_WR_RDMA_WRITE_WITH_IMM,
1304 	IB_WR_SEND,
1305 	IB_WR_SEND_WITH_IMM,
1306 	IB_WR_RDMA_READ,
1307 	IB_WR_ATOMIC_CMP_AND_SWP,
1308 	IB_WR_ATOMIC_FETCH_AND_ADD,
1309 	IB_WR_LSO,
1310 	IB_WR_SEND_WITH_INV,
1311 	IB_WR_RDMA_READ_WITH_INV,
1312 	IB_WR_LOCAL_INV,
1313 	IB_WR_REG_MR,
1314 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1315 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1316 	IB_WR_REG_SIG_MR,
1317 	/* reserve values for low level drivers' internal use.
1318 	 * These values will not be used at all in the ib core layer.
1319 	 */
1320 	IB_WR_RESERVED1 = 0xf0,
1321 	IB_WR_RESERVED2,
1322 	IB_WR_RESERVED3,
1323 	IB_WR_RESERVED4,
1324 	IB_WR_RESERVED5,
1325 	IB_WR_RESERVED6,
1326 	IB_WR_RESERVED7,
1327 	IB_WR_RESERVED8,
1328 	IB_WR_RESERVED9,
1329 	IB_WR_RESERVED10,
1330 };
1331 
1332 enum ib_send_flags {
1333 	IB_SEND_FENCE		= 1,
1334 	IB_SEND_SIGNALED	= (1<<1),
1335 	IB_SEND_SOLICITED	= (1<<2),
1336 	IB_SEND_INLINE		= (1<<3),
1337 	IB_SEND_IP_CSUM		= (1<<4),
1338 
1339 	/* reserve bits 26-31 for low level drivers' internal use */
1340 	IB_SEND_RESERVED_START	= (1 << 26),
1341 	IB_SEND_RESERVED_END	= (1 << 31),
1342 };
1343 
1344 struct ib_sge {
1345 	u64	addr;
1346 	u32	length;
1347 	u32	lkey;
1348 };
1349 
1350 struct ib_cqe {
1351 	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1352 };
1353 
1354 struct ib_send_wr {
1355 	struct ib_send_wr      *next;
1356 	union {
1357 		u64		wr_id;
1358 		struct ib_cqe	*wr_cqe;
1359 	};
1360 	struct ib_sge	       *sg_list;
1361 	int			num_sge;
1362 	enum ib_wr_opcode	opcode;
1363 	int			send_flags;
1364 	union {
1365 		__be32		imm_data;
1366 		u32		invalidate_rkey;
1367 	} ex;
1368 };
1369 
1370 struct ib_rdma_wr {
1371 	struct ib_send_wr	wr;
1372 	u64			remote_addr;
1373 	u32			rkey;
1374 };
1375 
1376 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1377 {
1378 	return container_of(wr, struct ib_rdma_wr, wr);
1379 }
1380 
1381 struct ib_atomic_wr {
1382 	struct ib_send_wr	wr;
1383 	u64			remote_addr;
1384 	u64			compare_add;
1385 	u64			swap;
1386 	u64			compare_add_mask;
1387 	u64			swap_mask;
1388 	u32			rkey;
1389 };
1390 
1391 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1392 {
1393 	return container_of(wr, struct ib_atomic_wr, wr);
1394 }
1395 
1396 struct ib_ud_wr {
1397 	struct ib_send_wr	wr;
1398 	struct ib_ah		*ah;
1399 	void			*header;
1400 	int			hlen;
1401 	int			mss;
1402 	u32			remote_qpn;
1403 	u32			remote_qkey;
1404 	u16			pkey_index; /* valid for GSI only */
1405 	u8			port_num;   /* valid for DR SMPs on switch only */
1406 };
1407 
1408 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1409 {
1410 	return container_of(wr, struct ib_ud_wr, wr);
1411 }
1412 
1413 struct ib_reg_wr {
1414 	struct ib_send_wr	wr;
1415 	struct ib_mr		*mr;
1416 	u32			key;
1417 	int			access;
1418 };
1419 
1420 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1421 {
1422 	return container_of(wr, struct ib_reg_wr, wr);
1423 }
1424 
1425 struct ib_sig_handover_wr {
1426 	struct ib_send_wr	wr;
1427 	struct ib_sig_attrs    *sig_attrs;
1428 	struct ib_mr	       *sig_mr;
1429 	int			access_flags;
1430 	struct ib_sge	       *prot;
1431 };
1432 
1433 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1434 {
1435 	return container_of(wr, struct ib_sig_handover_wr, wr);
1436 }
1437 
1438 struct ib_recv_wr {
1439 	struct ib_recv_wr      *next;
1440 	union {
1441 		u64		wr_id;
1442 		struct ib_cqe	*wr_cqe;
1443 	};
1444 	struct ib_sge	       *sg_list;
1445 	int			num_sge;
1446 };
1447 
1448 enum ib_access_flags {
1449 	IB_ACCESS_LOCAL_WRITE	= 1,
1450 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1451 	IB_ACCESS_REMOTE_READ	= (1<<2),
1452 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1453 	IB_ACCESS_MW_BIND	= (1<<4),
1454 	IB_ZERO_BASED		= (1<<5),
1455 	IB_ACCESS_ON_DEMAND     = (1<<6),
1456 	IB_ACCESS_HUGETLB	= (1<<7),
1457 };
1458 
1459 /*
1460  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1461  * are hidden here instead of a uapi header!
1462  */
1463 enum ib_mr_rereg_flags {
1464 	IB_MR_REREG_TRANS	= 1,
1465 	IB_MR_REREG_PD		= (1<<1),
1466 	IB_MR_REREG_ACCESS	= (1<<2),
1467 	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1468 };
1469 
1470 struct ib_fmr_attr {
1471 	int	max_pages;
1472 	int	max_maps;
1473 	u8	page_shift;
1474 };
1475 
1476 struct ib_umem;
1477 
1478 enum rdma_remove_reason {
1479 	/* Userspace requested uobject deletion. Call could fail */
1480 	RDMA_REMOVE_DESTROY,
1481 	/* Context deletion. This call should delete the actual object itself */
1482 	RDMA_REMOVE_CLOSE,
1483 	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1484 	RDMA_REMOVE_DRIVER_REMOVE,
1485 	/* Context is being cleaned-up, but commit was just completed */
1486 	RDMA_REMOVE_DURING_CLEANUP,
1487 };
1488 
1489 struct ib_rdmacg_object {
1490 #ifdef CONFIG_CGROUP_RDMA
1491 	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1492 #endif
1493 };
1494 
1495 struct ib_ucontext {
1496 	struct ib_device       *device;
1497 	struct ib_uverbs_file  *ufile;
1498 	int			closing;
1499 
1500 	/* locking the uobjects_list */
1501 	struct mutex		uobjects_lock;
1502 	struct list_head	uobjects;
1503 	/* protects cleanup process from other actions */
1504 	struct rw_semaphore	cleanup_rwsem;
1505 	enum rdma_remove_reason cleanup_reason;
1506 
1507 	struct pid             *tgid;
1508 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1509 	struct rb_root_cached   umem_tree;
1510 	/*
1511 	 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1512 	 * mmu notifiers registration.
1513 	 */
1514 	struct rw_semaphore	umem_rwsem;
1515 	void (*invalidate_range)(struct ib_umem *umem,
1516 				 unsigned long start, unsigned long end);
1517 
1518 	struct mmu_notifier	mn;
1519 	atomic_t		notifier_count;
1520 	/* A list of umems that don't have private mmu notifier counters yet. */
1521 	struct list_head	no_private_counters;
1522 	int                     odp_mrs_count;
1523 #endif
1524 
1525 	struct ib_rdmacg_object	cg_obj;
1526 };
1527 
1528 struct ib_uobject {
1529 	u64			user_handle;	/* handle given to us by userspace */
1530 	struct ib_ucontext     *context;	/* associated user context */
1531 	void		       *object;		/* containing object */
1532 	struct list_head	list;		/* link to context's list */
1533 	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1534 	int			id;		/* index into kernel idr */
1535 	struct kref		ref;
1536 	atomic_t		usecnt;		/* protects exclusive access */
1537 	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1538 
1539 	const struct uverbs_obj_type *type;
1540 };
1541 
1542 struct ib_uobject_file {
1543 	struct ib_uobject	uobj;
1544 	/* ufile contains the lock between context release and file close */
1545 	struct ib_uverbs_file	*ufile;
1546 };
1547 
1548 struct ib_udata {
1549 	const void __user *inbuf;
1550 	void __user *outbuf;
1551 	size_t       inlen;
1552 	size_t       outlen;
1553 };
1554 
1555 struct ib_pd {
1556 	u32			local_dma_lkey;
1557 	u32			flags;
1558 	struct ib_device       *device;
1559 	struct ib_uobject      *uobject;
1560 	atomic_t          	usecnt; /* count all resources */
1561 
1562 	u32			unsafe_global_rkey;
1563 
1564 	/*
1565 	 * Implementation details of the RDMA core, don't use in drivers:
1566 	 */
1567 	struct ib_mr	       *__internal_mr;
1568 	struct rdma_restrack_entry res;
1569 };
1570 
1571 struct ib_xrcd {
1572 	struct ib_device       *device;
1573 	atomic_t		usecnt; /* count all exposed resources */
1574 	struct inode	       *inode;
1575 
1576 	struct mutex		tgt_qp_mutex;
1577 	struct list_head	tgt_qp_list;
1578 };
1579 
1580 struct ib_ah {
1581 	struct ib_device	*device;
1582 	struct ib_pd		*pd;
1583 	struct ib_uobject	*uobject;
1584 	const struct ib_gid_attr *sgid_attr;
1585 	enum rdma_ah_attr_type	type;
1586 };
1587 
1588 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1589 
1590 enum ib_poll_context {
1591 	IB_POLL_DIRECT,		/* caller context, no hw completions */
1592 	IB_POLL_SOFTIRQ,	/* poll from softirq context */
1593 	IB_POLL_WORKQUEUE,	/* poll from workqueue */
1594 };
1595 
1596 struct ib_cq {
1597 	struct ib_device       *device;
1598 	struct ib_uobject      *uobject;
1599 	ib_comp_handler   	comp_handler;
1600 	void                  (*event_handler)(struct ib_event *, void *);
1601 	void                   *cq_context;
1602 	int               	cqe;
1603 	atomic_t          	usecnt; /* count number of work queues */
1604 	enum ib_poll_context	poll_ctx;
1605 	struct ib_wc		*wc;
1606 	union {
1607 		struct irq_poll		iop;
1608 		struct work_struct	work;
1609 	};
1610 	/*
1611 	 * Implementation details of the RDMA core, don't use in drivers:
1612 	 */
1613 	struct rdma_restrack_entry res;
1614 };
1615 
1616 struct ib_srq {
1617 	struct ib_device       *device;
1618 	struct ib_pd	       *pd;
1619 	struct ib_uobject      *uobject;
1620 	void		      (*event_handler)(struct ib_event *, void *);
1621 	void		       *srq_context;
1622 	enum ib_srq_type	srq_type;
1623 	atomic_t		usecnt;
1624 
1625 	struct {
1626 		struct ib_cq   *cq;
1627 		union {
1628 			struct {
1629 				struct ib_xrcd *xrcd;
1630 				u32		srq_num;
1631 			} xrc;
1632 		};
1633 	} ext;
1634 };
1635 
1636 enum ib_raw_packet_caps {
1637 	/* Strip cvlan from incoming packet and report it in the matching work
1638 	 * completion is supported.
1639 	 */
1640 	IB_RAW_PACKET_CAP_CVLAN_STRIPPING	= (1 << 0),
1641 	/* Scatter FCS field of an incoming packet to host memory is supported.
1642 	 */
1643 	IB_RAW_PACKET_CAP_SCATTER_FCS		= (1 << 1),
1644 	/* Checksum offloads are supported (for both send and receive). */
1645 	IB_RAW_PACKET_CAP_IP_CSUM		= (1 << 2),
1646 	/* When a packet is received for an RQ with no receive WQEs, the
1647 	 * packet processing is delayed.
1648 	 */
1649 	IB_RAW_PACKET_CAP_DELAY_DROP		= (1 << 3),
1650 };
1651 
1652 enum ib_wq_type {
1653 	IB_WQT_RQ
1654 };
1655 
1656 enum ib_wq_state {
1657 	IB_WQS_RESET,
1658 	IB_WQS_RDY,
1659 	IB_WQS_ERR
1660 };
1661 
1662 struct ib_wq {
1663 	struct ib_device       *device;
1664 	struct ib_uobject      *uobject;
1665 	void		    *wq_context;
1666 	void		    (*event_handler)(struct ib_event *, void *);
1667 	struct ib_pd	       *pd;
1668 	struct ib_cq	       *cq;
1669 	u32		wq_num;
1670 	enum ib_wq_state       state;
1671 	enum ib_wq_type	wq_type;
1672 	atomic_t		usecnt;
1673 };
1674 
1675 enum ib_wq_flags {
1676 	IB_WQ_FLAGS_CVLAN_STRIPPING	= 1 << 0,
1677 	IB_WQ_FLAGS_SCATTER_FCS		= 1 << 1,
1678 	IB_WQ_FLAGS_DELAY_DROP		= 1 << 2,
1679 	IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1680 };
1681 
1682 struct ib_wq_init_attr {
1683 	void		       *wq_context;
1684 	enum ib_wq_type	wq_type;
1685 	u32		max_wr;
1686 	u32		max_sge;
1687 	struct	ib_cq	       *cq;
1688 	void		    (*event_handler)(struct ib_event *, void *);
1689 	u32		create_flags; /* Use enum ib_wq_flags */
1690 };
1691 
1692 enum ib_wq_attr_mask {
1693 	IB_WQ_STATE		= 1 << 0,
1694 	IB_WQ_CUR_STATE		= 1 << 1,
1695 	IB_WQ_FLAGS		= 1 << 2,
1696 };
1697 
1698 struct ib_wq_attr {
1699 	enum	ib_wq_state	wq_state;
1700 	enum	ib_wq_state	curr_wq_state;
1701 	u32			flags; /* Use enum ib_wq_flags */
1702 	u32			flags_mask; /* Use enum ib_wq_flags */
1703 };
1704 
1705 struct ib_rwq_ind_table {
1706 	struct ib_device	*device;
1707 	struct ib_uobject      *uobject;
1708 	atomic_t		usecnt;
1709 	u32		ind_tbl_num;
1710 	u32		log_ind_tbl_size;
1711 	struct ib_wq	**ind_tbl;
1712 };
1713 
1714 struct ib_rwq_ind_table_init_attr {
1715 	u32		log_ind_tbl_size;
1716 	/* Each entry is a pointer to Receive Work Queue */
1717 	struct ib_wq	**ind_tbl;
1718 };
1719 
1720 enum port_pkey_state {
1721 	IB_PORT_PKEY_NOT_VALID = 0,
1722 	IB_PORT_PKEY_VALID = 1,
1723 	IB_PORT_PKEY_LISTED = 2,
1724 };
1725 
1726 struct ib_qp_security;
1727 
1728 struct ib_port_pkey {
1729 	enum port_pkey_state	state;
1730 	u16			pkey_index;
1731 	u8			port_num;
1732 	struct list_head	qp_list;
1733 	struct list_head	to_error_list;
1734 	struct ib_qp_security  *sec;
1735 };
1736 
1737 struct ib_ports_pkeys {
1738 	struct ib_port_pkey	main;
1739 	struct ib_port_pkey	alt;
1740 };
1741 
1742 struct ib_qp_security {
1743 	struct ib_qp	       *qp;
1744 	struct ib_device       *dev;
1745 	/* Hold this mutex when changing port and pkey settings. */
1746 	struct mutex		mutex;
1747 	struct ib_ports_pkeys  *ports_pkeys;
1748 	/* A list of all open shared QP handles.  Required to enforce security
1749 	 * properly for all users of a shared QP.
1750 	 */
1751 	struct list_head        shared_qp_list;
1752 	void                   *security;
1753 	bool			destroying;
1754 	atomic_t		error_list_count;
1755 	struct completion	error_complete;
1756 	int			error_comps_pending;
1757 };
1758 
1759 /*
1760  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1761  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1762  */
1763 struct ib_qp {
1764 	struct ib_device       *device;
1765 	struct ib_pd	       *pd;
1766 	struct ib_cq	       *send_cq;
1767 	struct ib_cq	       *recv_cq;
1768 	spinlock_t		mr_lock;
1769 	int			mrs_used;
1770 	struct list_head	rdma_mrs;
1771 	struct list_head	sig_mrs;
1772 	struct ib_srq	       *srq;
1773 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1774 	struct list_head	xrcd_list;
1775 
1776 	/* count times opened, mcast attaches, flow attaches */
1777 	atomic_t		usecnt;
1778 	struct list_head	open_list;
1779 	struct ib_qp           *real_qp;
1780 	struct ib_uobject      *uobject;
1781 	void                  (*event_handler)(struct ib_event *, void *);
1782 	void		       *qp_context;
1783 	/* sgid_attrs associated with the AV's */
1784 	const struct ib_gid_attr *av_sgid_attr;
1785 	const struct ib_gid_attr *alt_path_sgid_attr;
1786 	u32			qp_num;
1787 	u32			max_write_sge;
1788 	u32			max_read_sge;
1789 	enum ib_qp_type		qp_type;
1790 	struct ib_rwq_ind_table *rwq_ind_tbl;
1791 	struct ib_qp_security  *qp_sec;
1792 	u8			port;
1793 
1794 	/*
1795 	 * Implementation details of the RDMA core, don't use in drivers:
1796 	 */
1797 	struct rdma_restrack_entry     res;
1798 };
1799 
1800 struct ib_dm {
1801 	struct ib_device  *device;
1802 	u32		   length;
1803 	u32		   flags;
1804 	struct ib_uobject *uobject;
1805 	atomic_t	   usecnt;
1806 };
1807 
1808 struct ib_mr {
1809 	struct ib_device  *device;
1810 	struct ib_pd	  *pd;
1811 	u32		   lkey;
1812 	u32		   rkey;
1813 	u64		   iova;
1814 	u64		   length;
1815 	unsigned int	   page_size;
1816 	bool		   need_inval;
1817 	union {
1818 		struct ib_uobject	*uobject;	/* user */
1819 		struct list_head	qp_entry;	/* FR */
1820 	};
1821 
1822 	struct ib_dm      *dm;
1823 
1824 	/*
1825 	 * Implementation details of the RDMA core, don't use in drivers:
1826 	 */
1827 	struct rdma_restrack_entry res;
1828 };
1829 
1830 struct ib_mw {
1831 	struct ib_device	*device;
1832 	struct ib_pd		*pd;
1833 	struct ib_uobject	*uobject;
1834 	u32			rkey;
1835 	enum ib_mw_type         type;
1836 };
1837 
1838 struct ib_fmr {
1839 	struct ib_device	*device;
1840 	struct ib_pd		*pd;
1841 	struct list_head	list;
1842 	u32			lkey;
1843 	u32			rkey;
1844 };
1845 
1846 /* Supported steering options */
1847 enum ib_flow_attr_type {
1848 	/* steering according to rule specifications */
1849 	IB_FLOW_ATTR_NORMAL		= 0x0,
1850 	/* default unicast and multicast rule -
1851 	 * receive all Eth traffic which isn't steered to any QP
1852 	 */
1853 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1854 	/* default multicast rule -
1855 	 * receive all Eth multicast traffic which isn't steered to any QP
1856 	 */
1857 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1858 	/* sniffer rule - receive all port traffic */
1859 	IB_FLOW_ATTR_SNIFFER		= 0x3
1860 };
1861 
1862 /* Supported steering header types */
1863 enum ib_flow_spec_type {
1864 	/* L2 headers*/
1865 	IB_FLOW_SPEC_ETH		= 0x20,
1866 	IB_FLOW_SPEC_IB			= 0x22,
1867 	/* L3 header*/
1868 	IB_FLOW_SPEC_IPV4		= 0x30,
1869 	IB_FLOW_SPEC_IPV6		= 0x31,
1870 	IB_FLOW_SPEC_ESP                = 0x34,
1871 	/* L4 headers*/
1872 	IB_FLOW_SPEC_TCP		= 0x40,
1873 	IB_FLOW_SPEC_UDP		= 0x41,
1874 	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1875 	IB_FLOW_SPEC_GRE		= 0x51,
1876 	IB_FLOW_SPEC_MPLS		= 0x60,
1877 	IB_FLOW_SPEC_INNER		= 0x100,
1878 	/* Actions */
1879 	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1880 	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1881 	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1882 	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1883 };
1884 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1885 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1886 
1887 /* Flow steering rule priority is set according to it's domain.
1888  * Lower domain value means higher priority.
1889  */
1890 enum ib_flow_domain {
1891 	IB_FLOW_DOMAIN_USER,
1892 	IB_FLOW_DOMAIN_ETHTOOL,
1893 	IB_FLOW_DOMAIN_RFS,
1894 	IB_FLOW_DOMAIN_NIC,
1895 	IB_FLOW_DOMAIN_NUM /* Must be last */
1896 };
1897 
1898 enum ib_flow_flags {
1899 	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1900 	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1901 	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1902 };
1903 
1904 struct ib_flow_eth_filter {
1905 	u8	dst_mac[6];
1906 	u8	src_mac[6];
1907 	__be16	ether_type;
1908 	__be16	vlan_tag;
1909 	/* Must be last */
1910 	u8	real_sz[0];
1911 };
1912 
1913 struct ib_flow_spec_eth {
1914 	u32			  type;
1915 	u16			  size;
1916 	struct ib_flow_eth_filter val;
1917 	struct ib_flow_eth_filter mask;
1918 };
1919 
1920 struct ib_flow_ib_filter {
1921 	__be16 dlid;
1922 	__u8   sl;
1923 	/* Must be last */
1924 	u8	real_sz[0];
1925 };
1926 
1927 struct ib_flow_spec_ib {
1928 	u32			 type;
1929 	u16			 size;
1930 	struct ib_flow_ib_filter val;
1931 	struct ib_flow_ib_filter mask;
1932 };
1933 
1934 /* IPv4 header flags */
1935 enum ib_ipv4_flags {
1936 	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1937 	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1938 				    last have this flag set */
1939 };
1940 
1941 struct ib_flow_ipv4_filter {
1942 	__be32	src_ip;
1943 	__be32	dst_ip;
1944 	u8	proto;
1945 	u8	tos;
1946 	u8	ttl;
1947 	u8	flags;
1948 	/* Must be last */
1949 	u8	real_sz[0];
1950 };
1951 
1952 struct ib_flow_spec_ipv4 {
1953 	u32			   type;
1954 	u16			   size;
1955 	struct ib_flow_ipv4_filter val;
1956 	struct ib_flow_ipv4_filter mask;
1957 };
1958 
1959 struct ib_flow_ipv6_filter {
1960 	u8	src_ip[16];
1961 	u8	dst_ip[16];
1962 	__be32	flow_label;
1963 	u8	next_hdr;
1964 	u8	traffic_class;
1965 	u8	hop_limit;
1966 	/* Must be last */
1967 	u8	real_sz[0];
1968 };
1969 
1970 struct ib_flow_spec_ipv6 {
1971 	u32			   type;
1972 	u16			   size;
1973 	struct ib_flow_ipv6_filter val;
1974 	struct ib_flow_ipv6_filter mask;
1975 };
1976 
1977 struct ib_flow_tcp_udp_filter {
1978 	__be16	dst_port;
1979 	__be16	src_port;
1980 	/* Must be last */
1981 	u8	real_sz[0];
1982 };
1983 
1984 struct ib_flow_spec_tcp_udp {
1985 	u32			      type;
1986 	u16			      size;
1987 	struct ib_flow_tcp_udp_filter val;
1988 	struct ib_flow_tcp_udp_filter mask;
1989 };
1990 
1991 struct ib_flow_tunnel_filter {
1992 	__be32	tunnel_id;
1993 	u8	real_sz[0];
1994 };
1995 
1996 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1997  * the tunnel_id from val has the vni value
1998  */
1999 struct ib_flow_spec_tunnel {
2000 	u32			      type;
2001 	u16			      size;
2002 	struct ib_flow_tunnel_filter  val;
2003 	struct ib_flow_tunnel_filter  mask;
2004 };
2005 
2006 struct ib_flow_esp_filter {
2007 	__be32	spi;
2008 	__be32  seq;
2009 	/* Must be last */
2010 	u8	real_sz[0];
2011 };
2012 
2013 struct ib_flow_spec_esp {
2014 	u32                           type;
2015 	u16			      size;
2016 	struct ib_flow_esp_filter     val;
2017 	struct ib_flow_esp_filter     mask;
2018 };
2019 
2020 struct ib_flow_gre_filter {
2021 	__be16 c_ks_res0_ver;
2022 	__be16 protocol;
2023 	__be32 key;
2024 	/* Must be last */
2025 	u8	real_sz[0];
2026 };
2027 
2028 struct ib_flow_spec_gre {
2029 	u32                           type;
2030 	u16			      size;
2031 	struct ib_flow_gre_filter     val;
2032 	struct ib_flow_gre_filter     mask;
2033 };
2034 
2035 struct ib_flow_mpls_filter {
2036 	__be32 tag;
2037 	/* Must be last */
2038 	u8	real_sz[0];
2039 };
2040 
2041 struct ib_flow_spec_mpls {
2042 	u32                           type;
2043 	u16			      size;
2044 	struct ib_flow_mpls_filter     val;
2045 	struct ib_flow_mpls_filter     mask;
2046 };
2047 
2048 struct ib_flow_spec_action_tag {
2049 	enum ib_flow_spec_type	      type;
2050 	u16			      size;
2051 	u32                           tag_id;
2052 };
2053 
2054 struct ib_flow_spec_action_drop {
2055 	enum ib_flow_spec_type	      type;
2056 	u16			      size;
2057 };
2058 
2059 struct ib_flow_spec_action_handle {
2060 	enum ib_flow_spec_type	      type;
2061 	u16			      size;
2062 	struct ib_flow_action	     *act;
2063 };
2064 
2065 enum ib_counters_description {
2066 	IB_COUNTER_PACKETS,
2067 	IB_COUNTER_BYTES,
2068 };
2069 
2070 struct ib_flow_spec_action_count {
2071 	enum ib_flow_spec_type type;
2072 	u16 size;
2073 	struct ib_counters *counters;
2074 };
2075 
2076 union ib_flow_spec {
2077 	struct {
2078 		u32			type;
2079 		u16			size;
2080 	};
2081 	struct ib_flow_spec_eth		eth;
2082 	struct ib_flow_spec_ib		ib;
2083 	struct ib_flow_spec_ipv4        ipv4;
2084 	struct ib_flow_spec_tcp_udp	tcp_udp;
2085 	struct ib_flow_spec_ipv6        ipv6;
2086 	struct ib_flow_spec_tunnel      tunnel;
2087 	struct ib_flow_spec_esp		esp;
2088 	struct ib_flow_spec_gre		gre;
2089 	struct ib_flow_spec_mpls	mpls;
2090 	struct ib_flow_spec_action_tag  flow_tag;
2091 	struct ib_flow_spec_action_drop drop;
2092 	struct ib_flow_spec_action_handle action;
2093 	struct ib_flow_spec_action_count flow_count;
2094 };
2095 
2096 struct ib_flow_attr {
2097 	enum ib_flow_attr_type type;
2098 	u16	     size;
2099 	u16	     priority;
2100 	u32	     flags;
2101 	u8	     num_of_specs;
2102 	u8	     port;
2103 	union ib_flow_spec flows[];
2104 };
2105 
2106 struct ib_flow {
2107 	struct ib_qp		*qp;
2108 	struct ib_uobject	*uobject;
2109 };
2110 
2111 enum ib_flow_action_type {
2112 	IB_FLOW_ACTION_UNSPECIFIED,
2113 	IB_FLOW_ACTION_ESP = 1,
2114 };
2115 
2116 struct ib_flow_action_attrs_esp_keymats {
2117 	enum ib_uverbs_flow_action_esp_keymat			protocol;
2118 	union {
2119 		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2120 	} keymat;
2121 };
2122 
2123 struct ib_flow_action_attrs_esp_replays {
2124 	enum ib_uverbs_flow_action_esp_replay			protocol;
2125 	union {
2126 		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2127 	} replay;
2128 };
2129 
2130 enum ib_flow_action_attrs_esp_flags {
2131 	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2132 	 * This is done in order to share the same flags between user-space and
2133 	 * kernel and spare an unnecessary translation.
2134 	 */
2135 
2136 	/* Kernel flags */
2137 	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2138 	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2139 };
2140 
2141 struct ib_flow_spec_list {
2142 	struct ib_flow_spec_list	*next;
2143 	union ib_flow_spec		spec;
2144 };
2145 
2146 struct ib_flow_action_attrs_esp {
2147 	struct ib_flow_action_attrs_esp_keymats		*keymat;
2148 	struct ib_flow_action_attrs_esp_replays		*replay;
2149 	struct ib_flow_spec_list			*encap;
2150 	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2151 	 * Value of 0 is a valid value.
2152 	 */
2153 	u32						esn;
2154 	u32						spi;
2155 	u32						seq;
2156 	u32						tfc_pad;
2157 	/* Use enum ib_flow_action_attrs_esp_flags */
2158 	u64						flags;
2159 	u64						hard_limit_pkts;
2160 };
2161 
2162 struct ib_flow_action {
2163 	struct ib_device		*device;
2164 	struct ib_uobject		*uobject;
2165 	enum ib_flow_action_type	type;
2166 	atomic_t			usecnt;
2167 };
2168 
2169 struct ib_mad_hdr;
2170 struct ib_grh;
2171 
2172 enum ib_process_mad_flags {
2173 	IB_MAD_IGNORE_MKEY	= 1,
2174 	IB_MAD_IGNORE_BKEY	= 2,
2175 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2176 };
2177 
2178 enum ib_mad_result {
2179 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2180 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2181 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2182 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2183 };
2184 
2185 struct ib_port_cache {
2186 	u64		      subnet_prefix;
2187 	struct ib_pkey_cache  *pkey;
2188 	struct ib_gid_table   *gid;
2189 	u8                     lmc;
2190 	enum ib_port_state     port_state;
2191 };
2192 
2193 struct ib_cache {
2194 	rwlock_t                lock;
2195 	struct ib_event_handler event_handler;
2196 	struct ib_port_cache   *ports;
2197 };
2198 
2199 struct iw_cm_verbs;
2200 
2201 struct ib_port_immutable {
2202 	int                           pkey_tbl_len;
2203 	int                           gid_tbl_len;
2204 	u32                           core_cap_flags;
2205 	u32                           max_mad_size;
2206 };
2207 
2208 /* rdma netdev type - specifies protocol type */
2209 enum rdma_netdev_t {
2210 	RDMA_NETDEV_OPA_VNIC,
2211 	RDMA_NETDEV_IPOIB,
2212 };
2213 
2214 /**
2215  * struct rdma_netdev - rdma netdev
2216  * For cases where netstack interfacing is required.
2217  */
2218 struct rdma_netdev {
2219 	void              *clnt_priv;
2220 	struct ib_device  *hca;
2221 	u8                 port_num;
2222 
2223 	/* cleanup function must be specified */
2224 	void (*free_rdma_netdev)(struct net_device *netdev);
2225 
2226 	/* control functions */
2227 	void (*set_id)(struct net_device *netdev, int id);
2228 	/* send packet */
2229 	int (*send)(struct net_device *dev, struct sk_buff *skb,
2230 		    struct ib_ah *address, u32 dqpn);
2231 	/* multicast */
2232 	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2233 			    union ib_gid *gid, u16 mlid,
2234 			    int set_qkey, u32 qkey);
2235 	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2236 			    union ib_gid *gid, u16 mlid);
2237 };
2238 
2239 struct ib_port_pkey_list {
2240 	/* Lock to hold while modifying the list. */
2241 	spinlock_t                    list_lock;
2242 	struct list_head              pkey_list;
2243 };
2244 
2245 struct ib_counters {
2246 	struct ib_device	*device;
2247 	struct ib_uobject	*uobject;
2248 	/* num of objects attached */
2249 	atomic_t	usecnt;
2250 };
2251 
2252 enum ib_read_counters_flags {
2253 	/* prefer read values from driver cache */
2254 	IB_READ_COUNTERS_ATTR_PREFER_CACHED = 1 << 0,
2255 };
2256 
2257 struct ib_counters_read_attr {
2258 	u64	*counters_buff;
2259 	u32	ncounters;
2260 	u32	flags; /* use enum ib_read_counters_flags */
2261 };
2262 
2263 struct uverbs_attr_bundle;
2264 
2265 struct ib_device {
2266 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2267 	struct device                *dma_device;
2268 
2269 	char                          name[IB_DEVICE_NAME_MAX];
2270 
2271 	struct list_head              event_handler_list;
2272 	spinlock_t                    event_handler_lock;
2273 
2274 	spinlock_t                    client_data_lock;
2275 	struct list_head              core_list;
2276 	/* Access to the client_data_list is protected by the client_data_lock
2277 	 * spinlock and the lists_rwsem read-write semaphore */
2278 	struct list_head              client_data_list;
2279 
2280 	struct ib_cache               cache;
2281 	/**
2282 	 * port_immutable is indexed by port number
2283 	 */
2284 	struct ib_port_immutable     *port_immutable;
2285 
2286 	int			      num_comp_vectors;
2287 
2288 	struct ib_port_pkey_list     *port_pkey_list;
2289 
2290 	struct iw_cm_verbs	     *iwcm;
2291 
2292 	/**
2293 	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2294 	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
2295 	 *   core when the device is removed.  A lifespan of -1 in the return
2296 	 *   struct tells the core to set a default lifespan.
2297 	 */
2298 	struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2299 						     u8 port_num);
2300 	/**
2301 	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2302 	 * @index - The index in the value array we wish to have updated, or
2303 	 *   num_counters if we want all stats updated
2304 	 * Return codes -
2305 	 *   < 0 - Error, no counters updated
2306 	 *   index - Updated the single counter pointed to by index
2307 	 *   num_counters - Updated all counters (will reset the timestamp
2308 	 *     and prevent further calls for lifespan milliseconds)
2309 	 * Drivers are allowed to update all counters in leiu of just the
2310 	 *   one given in index at their option
2311 	 */
2312 	int		           (*get_hw_stats)(struct ib_device *device,
2313 						   struct rdma_hw_stats *stats,
2314 						   u8 port, int index);
2315 	int		           (*query_device)(struct ib_device *device,
2316 						   struct ib_device_attr *device_attr,
2317 						   struct ib_udata *udata);
2318 	int		           (*query_port)(struct ib_device *device,
2319 						 u8 port_num,
2320 						 struct ib_port_attr *port_attr);
2321 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
2322 						     u8 port_num);
2323 	/* When calling get_netdev, the HW vendor's driver should return the
2324 	 * net device of device @device at port @port_num or NULL if such
2325 	 * a net device doesn't exist. The vendor driver should call dev_hold
2326 	 * on this net device. The HW vendor's device driver must guarantee
2327 	 * that this function returns NULL before the net device has finished
2328 	 * NETDEV_UNREGISTER state.
2329 	 */
2330 	struct net_device	  *(*get_netdev)(struct ib_device *device,
2331 						 u8 port_num);
2332 	/* query_gid should be return GID value for @device, when @port_num
2333 	 * link layer is either IB or iWarp. It is no-op if @port_num port
2334 	 * is RoCE link layer.
2335 	 */
2336 	int		           (*query_gid)(struct ib_device *device,
2337 						u8 port_num, int index,
2338 						union ib_gid *gid);
2339 	/* When calling add_gid, the HW vendor's driver should add the gid
2340 	 * of device of port at gid index available at @attr. Meta-info of
2341 	 * that gid (for example, the network device related to this gid) is
2342 	 * available at @attr. @context allows the HW vendor driver to store
2343 	 * extra information together with a GID entry. The HW vendor driver may
2344 	 * allocate memory to contain this information and store it in @context
2345 	 * when a new GID entry is written to. Params are consistent until the
2346 	 * next call of add_gid or delete_gid. The function should return 0 on
2347 	 * success or error otherwise. The function could be called
2348 	 * concurrently for different ports. This function is only called when
2349 	 * roce_gid_table is used.
2350 	 */
2351 	int		           (*add_gid)(const struct ib_gid_attr *attr,
2352 					      void **context);
2353 	/* When calling del_gid, the HW vendor's driver should delete the
2354 	 * gid of device @device at gid index gid_index of port port_num
2355 	 * available in @attr.
2356 	 * Upon the deletion of a GID entry, the HW vendor must free any
2357 	 * allocated memory. The caller will clear @context afterwards.
2358 	 * This function is only called when roce_gid_table is used.
2359 	 */
2360 	int		           (*del_gid)(const struct ib_gid_attr *attr,
2361 					      void **context);
2362 	int		           (*query_pkey)(struct ib_device *device,
2363 						 u8 port_num, u16 index, u16 *pkey);
2364 	int		           (*modify_device)(struct ib_device *device,
2365 						    int device_modify_mask,
2366 						    struct ib_device_modify *device_modify);
2367 	int		           (*modify_port)(struct ib_device *device,
2368 						  u8 port_num, int port_modify_mask,
2369 						  struct ib_port_modify *port_modify);
2370 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2371 						     struct ib_udata *udata);
2372 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2373 	int                        (*mmap)(struct ib_ucontext *context,
2374 					   struct vm_area_struct *vma);
2375 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2376 					       struct ib_ucontext *context,
2377 					       struct ib_udata *udata);
2378 	int                        (*dealloc_pd)(struct ib_pd *pd);
2379 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2380 						struct rdma_ah_attr *ah_attr,
2381 						struct ib_udata *udata);
2382 	int                        (*modify_ah)(struct ib_ah *ah,
2383 						struct rdma_ah_attr *ah_attr);
2384 	int                        (*query_ah)(struct ib_ah *ah,
2385 					       struct rdma_ah_attr *ah_attr);
2386 	int                        (*destroy_ah)(struct ib_ah *ah);
2387 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2388 						 struct ib_srq_init_attr *srq_init_attr,
2389 						 struct ib_udata *udata);
2390 	int                        (*modify_srq)(struct ib_srq *srq,
2391 						 struct ib_srq_attr *srq_attr,
2392 						 enum ib_srq_attr_mask srq_attr_mask,
2393 						 struct ib_udata *udata);
2394 	int                        (*query_srq)(struct ib_srq *srq,
2395 						struct ib_srq_attr *srq_attr);
2396 	int                        (*destroy_srq)(struct ib_srq *srq);
2397 	int                        (*post_srq_recv)(struct ib_srq *srq,
2398 						    struct ib_recv_wr *recv_wr,
2399 						    struct ib_recv_wr **bad_recv_wr);
2400 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2401 						struct ib_qp_init_attr *qp_init_attr,
2402 						struct ib_udata *udata);
2403 	int                        (*modify_qp)(struct ib_qp *qp,
2404 						struct ib_qp_attr *qp_attr,
2405 						int qp_attr_mask,
2406 						struct ib_udata *udata);
2407 	int                        (*query_qp)(struct ib_qp *qp,
2408 					       struct ib_qp_attr *qp_attr,
2409 					       int qp_attr_mask,
2410 					       struct ib_qp_init_attr *qp_init_attr);
2411 	int                        (*destroy_qp)(struct ib_qp *qp);
2412 	int                        (*post_send)(struct ib_qp *qp,
2413 						struct ib_send_wr *send_wr,
2414 						struct ib_send_wr **bad_send_wr);
2415 	int                        (*post_recv)(struct ib_qp *qp,
2416 						struct ib_recv_wr *recv_wr,
2417 						struct ib_recv_wr **bad_recv_wr);
2418 	struct ib_cq *             (*create_cq)(struct ib_device *device,
2419 						const struct ib_cq_init_attr *attr,
2420 						struct ib_ucontext *context,
2421 						struct ib_udata *udata);
2422 	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2423 						u16 cq_period);
2424 	int                        (*destroy_cq)(struct ib_cq *cq);
2425 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2426 						struct ib_udata *udata);
2427 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2428 					      struct ib_wc *wc);
2429 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2430 	int                        (*req_notify_cq)(struct ib_cq *cq,
2431 						    enum ib_cq_notify_flags flags);
2432 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
2433 						      int wc_cnt);
2434 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2435 						 int mr_access_flags);
2436 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2437 						  u64 start, u64 length,
2438 						  u64 virt_addr,
2439 						  int mr_access_flags,
2440 						  struct ib_udata *udata);
2441 	int			   (*rereg_user_mr)(struct ib_mr *mr,
2442 						    int flags,
2443 						    u64 start, u64 length,
2444 						    u64 virt_addr,
2445 						    int mr_access_flags,
2446 						    struct ib_pd *pd,
2447 						    struct ib_udata *udata);
2448 	int                        (*dereg_mr)(struct ib_mr *mr);
2449 	struct ib_mr *		   (*alloc_mr)(struct ib_pd *pd,
2450 					       enum ib_mr_type mr_type,
2451 					       u32 max_num_sg);
2452 	int                        (*map_mr_sg)(struct ib_mr *mr,
2453 						struct scatterlist *sg,
2454 						int sg_nents,
2455 						unsigned int *sg_offset);
2456 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2457 					       enum ib_mw_type type,
2458 					       struct ib_udata *udata);
2459 	int                        (*dealloc_mw)(struct ib_mw *mw);
2460 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
2461 						int mr_access_flags,
2462 						struct ib_fmr_attr *fmr_attr);
2463 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
2464 						   u64 *page_list, int list_len,
2465 						   u64 iova);
2466 	int		           (*unmap_fmr)(struct list_head *fmr_list);
2467 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
2468 	int                        (*attach_mcast)(struct ib_qp *qp,
2469 						   union ib_gid *gid,
2470 						   u16 lid);
2471 	int                        (*detach_mcast)(struct ib_qp *qp,
2472 						   union ib_gid *gid,
2473 						   u16 lid);
2474 	int                        (*process_mad)(struct ib_device *device,
2475 						  int process_mad_flags,
2476 						  u8 port_num,
2477 						  const struct ib_wc *in_wc,
2478 						  const struct ib_grh *in_grh,
2479 						  const struct ib_mad_hdr *in_mad,
2480 						  size_t in_mad_size,
2481 						  struct ib_mad_hdr *out_mad,
2482 						  size_t *out_mad_size,
2483 						  u16 *out_mad_pkey_index);
2484 	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
2485 						 struct ib_ucontext *ucontext,
2486 						 struct ib_udata *udata);
2487 	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2488 	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
2489 						  struct ib_flow_attr
2490 						  *flow_attr,
2491 						  int domain,
2492 						  struct ib_udata *udata);
2493 	int			   (*destroy_flow)(struct ib_flow *flow_id);
2494 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2495 						      struct ib_mr_status *mr_status);
2496 	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2497 	void			   (*drain_rq)(struct ib_qp *qp);
2498 	void			   (*drain_sq)(struct ib_qp *qp);
2499 	int			   (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2500 							int state);
2501 	int			   (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2502 						   struct ifla_vf_info *ivf);
2503 	int			   (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2504 						   struct ifla_vf_stats *stats);
2505 	int			   (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2506 						  int type);
2507 	struct ib_wq *		   (*create_wq)(struct ib_pd *pd,
2508 						struct ib_wq_init_attr *init_attr,
2509 						struct ib_udata *udata);
2510 	int			   (*destroy_wq)(struct ib_wq *wq);
2511 	int			   (*modify_wq)(struct ib_wq *wq,
2512 						struct ib_wq_attr *attr,
2513 						u32 wq_attr_mask,
2514 						struct ib_udata *udata);
2515 	struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2516 							   struct ib_rwq_ind_table_init_attr *init_attr,
2517 							   struct ib_udata *udata);
2518 	int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2519 	struct ib_flow_action *	   (*create_flow_action_esp)(struct ib_device *device,
2520 							     const struct ib_flow_action_attrs_esp *attr,
2521 							     struct uverbs_attr_bundle *attrs);
2522 	int			   (*destroy_flow_action)(struct ib_flow_action *action);
2523 	int			   (*modify_flow_action_esp)(struct ib_flow_action *action,
2524 							     const struct ib_flow_action_attrs_esp *attr,
2525 							     struct uverbs_attr_bundle *attrs);
2526 	struct ib_dm *             (*alloc_dm)(struct ib_device *device,
2527 					       struct ib_ucontext *context,
2528 					       struct ib_dm_alloc_attr *attr,
2529 					       struct uverbs_attr_bundle *attrs);
2530 	int                        (*dealloc_dm)(struct ib_dm *dm);
2531 	struct ib_mr *             (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2532 						struct ib_dm_mr_attr *attr,
2533 						struct uverbs_attr_bundle *attrs);
2534 	struct ib_counters *	(*create_counters)(struct ib_device *device,
2535 						   struct uverbs_attr_bundle *attrs);
2536 	int	(*destroy_counters)(struct ib_counters	*counters);
2537 	int	(*read_counters)(struct ib_counters *counters,
2538 				 struct ib_counters_read_attr *counters_read_attr,
2539 				 struct uverbs_attr_bundle *attrs);
2540 
2541 	/**
2542 	 * rdma netdev operation
2543 	 *
2544 	 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2545 	 * doesn't support the specified rdma netdev type.
2546 	 */
2547 	struct net_device *(*alloc_rdma_netdev)(
2548 					struct ib_device *device,
2549 					u8 port_num,
2550 					enum rdma_netdev_t type,
2551 					const char *name,
2552 					unsigned char name_assign_type,
2553 					void (*setup)(struct net_device *));
2554 
2555 	struct module               *owner;
2556 	struct device                dev;
2557 	struct kobject               *ports_parent;
2558 	struct list_head             port_list;
2559 
2560 	enum {
2561 		IB_DEV_UNINITIALIZED,
2562 		IB_DEV_REGISTERED,
2563 		IB_DEV_UNREGISTERED
2564 	}                            reg_state;
2565 
2566 	int			     uverbs_abi_ver;
2567 	u64			     uverbs_cmd_mask;
2568 	u64			     uverbs_ex_cmd_mask;
2569 
2570 	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2571 	__be64			     node_guid;
2572 	u32			     local_dma_lkey;
2573 	u16                          is_switch:1;
2574 	u8                           node_type;
2575 	u8                           phys_port_cnt;
2576 	struct ib_device_attr        attrs;
2577 	struct attribute_group	     *hw_stats_ag;
2578 	struct rdma_hw_stats         *hw_stats;
2579 
2580 #ifdef CONFIG_CGROUP_RDMA
2581 	struct rdmacg_device         cg_device;
2582 #endif
2583 
2584 	u32                          index;
2585 	/*
2586 	 * Implementation details of the RDMA core, don't use in drivers
2587 	 */
2588 	struct rdma_restrack_root     res;
2589 
2590 	/**
2591 	 * The following mandatory functions are used only at device
2592 	 * registration.  Keep functions such as these at the end of this
2593 	 * structure to avoid cache line misses when accessing struct ib_device
2594 	 * in fast paths.
2595 	 */
2596 	int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2597 	void (*get_dev_fw_str)(struct ib_device *, char *str);
2598 	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2599 						     int comp_vector);
2600 
2601 	struct uverbs_root_spec		*specs_root;
2602 	enum rdma_driver_id		driver_id;
2603 };
2604 
2605 struct ib_client {
2606 	char  *name;
2607 	void (*add)   (struct ib_device *);
2608 	void (*remove)(struct ib_device *, void *client_data);
2609 
2610 	/* Returns the net_dev belonging to this ib_client and matching the
2611 	 * given parameters.
2612 	 * @dev:	 An RDMA device that the net_dev use for communication.
2613 	 * @port:	 A physical port number on the RDMA device.
2614 	 * @pkey:	 P_Key that the net_dev uses if applicable.
2615 	 * @gid:	 A GID that the net_dev uses to communicate.
2616 	 * @addr:	 An IP address the net_dev is configured with.
2617 	 * @client_data: The device's client data set by ib_set_client_data().
2618 	 *
2619 	 * An ib_client that implements a net_dev on top of RDMA devices
2620 	 * (such as IP over IB) should implement this callback, allowing the
2621 	 * rdma_cm module to find the right net_dev for a given request.
2622 	 *
2623 	 * The caller is responsible for calling dev_put on the returned
2624 	 * netdev. */
2625 	struct net_device *(*get_net_dev_by_params)(
2626 			struct ib_device *dev,
2627 			u8 port,
2628 			u16 pkey,
2629 			const union ib_gid *gid,
2630 			const struct sockaddr *addr,
2631 			void *client_data);
2632 	struct list_head list;
2633 };
2634 
2635 struct ib_device *ib_alloc_device(size_t size);
2636 void ib_dealloc_device(struct ib_device *device);
2637 
2638 void ib_get_device_fw_str(struct ib_device *device, char *str);
2639 
2640 int ib_register_device(struct ib_device *device,
2641 		       int (*port_callback)(struct ib_device *,
2642 					    u8, struct kobject *));
2643 void ib_unregister_device(struct ib_device *device);
2644 
2645 int ib_register_client   (struct ib_client *client);
2646 void ib_unregister_client(struct ib_client *client);
2647 
2648 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2649 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2650 			 void *data);
2651 
2652 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2653 {
2654 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2655 }
2656 
2657 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2658 {
2659 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2660 }
2661 
2662 static inline bool ib_is_buffer_cleared(const void __user *p,
2663 					size_t len)
2664 {
2665 	bool ret;
2666 	u8 *buf;
2667 
2668 	if (len > USHRT_MAX)
2669 		return false;
2670 
2671 	buf = memdup_user(p, len);
2672 	if (IS_ERR(buf))
2673 		return false;
2674 
2675 	ret = !memchr_inv(buf, 0, len);
2676 	kfree(buf);
2677 	return ret;
2678 }
2679 
2680 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2681 				       size_t offset,
2682 				       size_t len)
2683 {
2684 	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2685 }
2686 
2687 /**
2688  * ib_modify_qp_is_ok - Check that the supplied attribute mask
2689  * contains all required attributes and no attributes not allowed for
2690  * the given QP state transition.
2691  * @cur_state: Current QP state
2692  * @next_state: Next QP state
2693  * @type: QP type
2694  * @mask: Mask of supplied QP attributes
2695  * @ll : link layer of port
2696  *
2697  * This function is a helper function that a low-level driver's
2698  * modify_qp method can use to validate the consumer's input.  It
2699  * checks that cur_state and next_state are valid QP states, that a
2700  * transition from cur_state to next_state is allowed by the IB spec,
2701  * and that the attribute mask supplied is allowed for the transition.
2702  */
2703 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2704 			enum ib_qp_type type, enum ib_qp_attr_mask mask,
2705 			enum rdma_link_layer ll);
2706 
2707 void ib_register_event_handler(struct ib_event_handler *event_handler);
2708 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2709 void ib_dispatch_event(struct ib_event *event);
2710 
2711 int ib_query_port(struct ib_device *device,
2712 		  u8 port_num, struct ib_port_attr *port_attr);
2713 
2714 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2715 					       u8 port_num);
2716 
2717 /**
2718  * rdma_cap_ib_switch - Check if the device is IB switch
2719  * @device: Device to check
2720  *
2721  * Device driver is responsible for setting is_switch bit on
2722  * in ib_device structure at init time.
2723  *
2724  * Return: true if the device is IB switch.
2725  */
2726 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2727 {
2728 	return device->is_switch;
2729 }
2730 
2731 /**
2732  * rdma_start_port - Return the first valid port number for the device
2733  * specified
2734  *
2735  * @device: Device to be checked
2736  *
2737  * Return start port number
2738  */
2739 static inline u8 rdma_start_port(const struct ib_device *device)
2740 {
2741 	return rdma_cap_ib_switch(device) ? 0 : 1;
2742 }
2743 
2744 /**
2745  * rdma_end_port - Return the last valid port number for the device
2746  * specified
2747  *
2748  * @device: Device to be checked
2749  *
2750  * Return last port number
2751  */
2752 static inline u8 rdma_end_port(const struct ib_device *device)
2753 {
2754 	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2755 }
2756 
2757 static inline int rdma_is_port_valid(const struct ib_device *device,
2758 				     unsigned int port)
2759 {
2760 	return (port >= rdma_start_port(device) &&
2761 		port <= rdma_end_port(device));
2762 }
2763 
2764 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2765 {
2766 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2767 }
2768 
2769 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2770 {
2771 	return device->port_immutable[port_num].core_cap_flags &
2772 		(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2773 }
2774 
2775 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2776 {
2777 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2778 }
2779 
2780 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2781 {
2782 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2783 }
2784 
2785 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2786 {
2787 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2788 }
2789 
2790 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2791 {
2792 	return rdma_protocol_ib(device, port_num) ||
2793 		rdma_protocol_roce(device, port_num);
2794 }
2795 
2796 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2797 {
2798 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2799 }
2800 
2801 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2802 {
2803 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2804 }
2805 
2806 /**
2807  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2808  * Management Datagrams.
2809  * @device: Device to check
2810  * @port_num: Port number to check
2811  *
2812  * Management Datagrams (MAD) are a required part of the InfiniBand
2813  * specification and are supported on all InfiniBand devices.  A slightly
2814  * extended version are also supported on OPA interfaces.
2815  *
2816  * Return: true if the port supports sending/receiving of MAD packets.
2817  */
2818 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2819 {
2820 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2821 }
2822 
2823 /**
2824  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2825  * Management Datagrams.
2826  * @device: Device to check
2827  * @port_num: Port number to check
2828  *
2829  * Intel OmniPath devices extend and/or replace the InfiniBand Management
2830  * datagrams with their own versions.  These OPA MADs share many but not all of
2831  * the characteristics of InfiniBand MADs.
2832  *
2833  * OPA MADs differ in the following ways:
2834  *
2835  *    1) MADs are variable size up to 2K
2836  *       IBTA defined MADs remain fixed at 256 bytes
2837  *    2) OPA SMPs must carry valid PKeys
2838  *    3) OPA SMP packets are a different format
2839  *
2840  * Return: true if the port supports OPA MAD packet formats.
2841  */
2842 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2843 {
2844 	return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2845 		== RDMA_CORE_CAP_OPA_MAD;
2846 }
2847 
2848 /**
2849  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2850  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2851  * @device: Device to check
2852  * @port_num: Port number to check
2853  *
2854  * Each InfiniBand node is required to provide a Subnet Management Agent
2855  * that the subnet manager can access.  Prior to the fabric being fully
2856  * configured by the subnet manager, the SMA is accessed via a well known
2857  * interface called the Subnet Management Interface (SMI).  This interface
2858  * uses directed route packets to communicate with the SM to get around the
2859  * chicken and egg problem of the SM needing to know what's on the fabric
2860  * in order to configure the fabric, and needing to configure the fabric in
2861  * order to send packets to the devices on the fabric.  These directed
2862  * route packets do not need the fabric fully configured in order to reach
2863  * their destination.  The SMI is the only method allowed to send
2864  * directed route packets on an InfiniBand fabric.
2865  *
2866  * Return: true if the port provides an SMI.
2867  */
2868 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2869 {
2870 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2871 }
2872 
2873 /**
2874  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2875  * Communication Manager.
2876  * @device: Device to check
2877  * @port_num: Port number to check
2878  *
2879  * The InfiniBand Communication Manager is one of many pre-defined General
2880  * Service Agents (GSA) that are accessed via the General Service
2881  * Interface (GSI).  It's role is to facilitate establishment of connections
2882  * between nodes as well as other management related tasks for established
2883  * connections.
2884  *
2885  * Return: true if the port supports an IB CM (this does not guarantee that
2886  * a CM is actually running however).
2887  */
2888 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2889 {
2890 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2891 }
2892 
2893 /**
2894  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2895  * Communication Manager.
2896  * @device: Device to check
2897  * @port_num: Port number to check
2898  *
2899  * Similar to above, but specific to iWARP connections which have a different
2900  * managment protocol than InfiniBand.
2901  *
2902  * Return: true if the port supports an iWARP CM (this does not guarantee that
2903  * a CM is actually running however).
2904  */
2905 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2906 {
2907 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2908 }
2909 
2910 /**
2911  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2912  * Subnet Administration.
2913  * @device: Device to check
2914  * @port_num: Port number to check
2915  *
2916  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2917  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2918  * fabrics, devices should resolve routes to other hosts by contacting the
2919  * SA to query the proper route.
2920  *
2921  * Return: true if the port should act as a client to the fabric Subnet
2922  * Administration interface.  This does not imply that the SA service is
2923  * running locally.
2924  */
2925 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2926 {
2927 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2928 }
2929 
2930 /**
2931  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2932  * Multicast.
2933  * @device: Device to check
2934  * @port_num: Port number to check
2935  *
2936  * InfiniBand multicast registration is more complex than normal IPv4 or
2937  * IPv6 multicast registration.  Each Host Channel Adapter must register
2938  * with the Subnet Manager when it wishes to join a multicast group.  It
2939  * should do so only once regardless of how many queue pairs it subscribes
2940  * to this group.  And it should leave the group only after all queue pairs
2941  * attached to the group have been detached.
2942  *
2943  * Return: true if the port must undertake the additional adminstrative
2944  * overhead of registering/unregistering with the SM and tracking of the
2945  * total number of queue pairs attached to the multicast group.
2946  */
2947 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2948 {
2949 	return rdma_cap_ib_sa(device, port_num);
2950 }
2951 
2952 /**
2953  * rdma_cap_af_ib - Check if the port of device has the capability
2954  * Native Infiniband Address.
2955  * @device: Device to check
2956  * @port_num: Port number to check
2957  *
2958  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2959  * GID.  RoCE uses a different mechanism, but still generates a GID via
2960  * a prescribed mechanism and port specific data.
2961  *
2962  * Return: true if the port uses a GID address to identify devices on the
2963  * network.
2964  */
2965 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2966 {
2967 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2968 }
2969 
2970 /**
2971  * rdma_cap_eth_ah - Check if the port of device has the capability
2972  * Ethernet Address Handle.
2973  * @device: Device to check
2974  * @port_num: Port number to check
2975  *
2976  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2977  * to fabricate GIDs over Ethernet/IP specific addresses native to the
2978  * port.  Normally, packet headers are generated by the sending host
2979  * adapter, but when sending connectionless datagrams, we must manually
2980  * inject the proper headers for the fabric we are communicating over.
2981  *
2982  * Return: true if we are running as a RoCE port and must force the
2983  * addition of a Global Route Header built from our Ethernet Address
2984  * Handle into our header list for connectionless packets.
2985  */
2986 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2987 {
2988 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2989 }
2990 
2991 /**
2992  * rdma_cap_opa_ah - Check if the port of device supports
2993  * OPA Address handles
2994  * @device: Device to check
2995  * @port_num: Port number to check
2996  *
2997  * Return: true if we are running on an OPA device which supports
2998  * the extended OPA addressing.
2999  */
3000 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3001 {
3002 	return (device->port_immutable[port_num].core_cap_flags &
3003 		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3004 }
3005 
3006 /**
3007  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3008  *
3009  * @device: Device
3010  * @port_num: Port number
3011  *
3012  * This MAD size includes the MAD headers and MAD payload.  No other headers
3013  * are included.
3014  *
3015  * Return the max MAD size required by the Port.  Will return 0 if the port
3016  * does not support MADs
3017  */
3018 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3019 {
3020 	return device->port_immutable[port_num].max_mad_size;
3021 }
3022 
3023 /**
3024  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3025  * @device: Device to check
3026  * @port_num: Port number to check
3027  *
3028  * RoCE GID table mechanism manages the various GIDs for a device.
3029  *
3030  * NOTE: if allocating the port's GID table has failed, this call will still
3031  * return true, but any RoCE GID table API will fail.
3032  *
3033  * Return: true if the port uses RoCE GID table mechanism in order to manage
3034  * its GIDs.
3035  */
3036 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3037 					   u8 port_num)
3038 {
3039 	return rdma_protocol_roce(device, port_num) &&
3040 		device->add_gid && device->del_gid;
3041 }
3042 
3043 /*
3044  * Check if the device supports READ W/ INVALIDATE.
3045  */
3046 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3047 {
3048 	/*
3049 	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3050 	 * has support for it yet.
3051 	 */
3052 	return rdma_protocol_iwarp(dev, port_num);
3053 }
3054 
3055 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3056 			 int state);
3057 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3058 		     struct ifla_vf_info *info);
3059 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3060 		    struct ifla_vf_stats *stats);
3061 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3062 		   int type);
3063 
3064 int ib_query_pkey(struct ib_device *device,
3065 		  u8 port_num, u16 index, u16 *pkey);
3066 
3067 int ib_modify_device(struct ib_device *device,
3068 		     int device_modify_mask,
3069 		     struct ib_device_modify *device_modify);
3070 
3071 int ib_modify_port(struct ib_device *device,
3072 		   u8 port_num, int port_modify_mask,
3073 		   struct ib_port_modify *port_modify);
3074 
3075 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3076 		u8 *port_num, u16 *index);
3077 
3078 int ib_find_pkey(struct ib_device *device,
3079 		 u8 port_num, u16 pkey, u16 *index);
3080 
3081 enum ib_pd_flags {
3082 	/*
3083 	 * Create a memory registration for all memory in the system and place
3084 	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3085 	 * ULPs to avoid the overhead of dynamic MRs.
3086 	 *
3087 	 * This flag is generally considered unsafe and must only be used in
3088 	 * extremly trusted environments.  Every use of it will log a warning
3089 	 * in the kernel log.
3090 	 */
3091 	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3092 };
3093 
3094 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3095 		const char *caller);
3096 #define ib_alloc_pd(device, flags) \
3097 	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3098 void ib_dealloc_pd(struct ib_pd *pd);
3099 
3100 /**
3101  * rdma_create_ah - Creates an address handle for the given address vector.
3102  * @pd: The protection domain associated with the address handle.
3103  * @ah_attr: The attributes of the address vector.
3104  *
3105  * The address handle is used to reference a local or global destination
3106  * in all UD QP post sends.
3107  */
3108 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
3109 
3110 /**
3111  * rdma_create_user_ah - Creates an address handle for the given address vector.
3112  * It resolves destination mac address for ah attribute of RoCE type.
3113  * @pd: The protection domain associated with the address handle.
3114  * @ah_attr: The attributes of the address vector.
3115  * @udata: pointer to user's input output buffer information need by
3116  *         provider driver.
3117  *
3118  * It returns 0 on success and returns appropriate error code on error.
3119  * The address handle is used to reference a local or global destination
3120  * in all UD QP post sends.
3121  */
3122 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3123 				  struct rdma_ah_attr *ah_attr,
3124 				  struct ib_udata *udata);
3125 /**
3126  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3127  *   work completion.
3128  * @hdr: the L3 header to parse
3129  * @net_type: type of header to parse
3130  * @sgid: place to store source gid
3131  * @dgid: place to store destination gid
3132  */
3133 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3134 			      enum rdma_network_type net_type,
3135 			      union ib_gid *sgid, union ib_gid *dgid);
3136 
3137 /**
3138  * ib_get_rdma_header_version - Get the header version
3139  * @hdr: the L3 header to parse
3140  */
3141 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3142 
3143 /**
3144  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3145  *   work completion.
3146  * @device: Device on which the received message arrived.
3147  * @port_num: Port on which the received message arrived.
3148  * @wc: Work completion associated with the received message.
3149  * @grh: References the received global route header.  This parameter is
3150  *   ignored unless the work completion indicates that the GRH is valid.
3151  * @ah_attr: Returned attributes that can be used when creating an address
3152  *   handle for replying to the message.
3153  */
3154 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3155 			    const struct ib_wc *wc, const struct ib_grh *grh,
3156 			    struct rdma_ah_attr *ah_attr);
3157 
3158 /**
3159  * ib_create_ah_from_wc - Creates an address handle associated with the
3160  *   sender of the specified work completion.
3161  * @pd: The protection domain associated with the address handle.
3162  * @wc: Work completion information associated with a received message.
3163  * @grh: References the received global route header.  This parameter is
3164  *   ignored unless the work completion indicates that the GRH is valid.
3165  * @port_num: The outbound port number to associate with the address.
3166  *
3167  * The address handle is used to reference a local or global destination
3168  * in all UD QP post sends.
3169  */
3170 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3171 				   const struct ib_grh *grh, u8 port_num);
3172 
3173 /**
3174  * rdma_modify_ah - Modifies the address vector associated with an address
3175  *   handle.
3176  * @ah: The address handle to modify.
3177  * @ah_attr: The new address vector attributes to associate with the
3178  *   address handle.
3179  */
3180 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3181 
3182 /**
3183  * rdma_query_ah - Queries the address vector associated with an address
3184  *   handle.
3185  * @ah: The address handle to query.
3186  * @ah_attr: The address vector attributes associated with the address
3187  *   handle.
3188  */
3189 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3190 
3191 /**
3192  * rdma_destroy_ah - Destroys an address handle.
3193  * @ah: The address handle to destroy.
3194  */
3195 int rdma_destroy_ah(struct ib_ah *ah);
3196 
3197 /**
3198  * ib_create_srq - Creates a SRQ associated with the specified protection
3199  *   domain.
3200  * @pd: The protection domain associated with the SRQ.
3201  * @srq_init_attr: A list of initial attributes required to create the
3202  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
3203  *   the actual capabilities of the created SRQ.
3204  *
3205  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3206  * requested size of the SRQ, and set to the actual values allocated
3207  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
3208  * will always be at least as large as the requested values.
3209  */
3210 struct ib_srq *ib_create_srq(struct ib_pd *pd,
3211 			     struct ib_srq_init_attr *srq_init_attr);
3212 
3213 /**
3214  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3215  * @srq: The SRQ to modify.
3216  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3217  *   the current values of selected SRQ attributes are returned.
3218  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3219  *   are being modified.
3220  *
3221  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3222  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3223  * the number of receives queued drops below the limit.
3224  */
3225 int ib_modify_srq(struct ib_srq *srq,
3226 		  struct ib_srq_attr *srq_attr,
3227 		  enum ib_srq_attr_mask srq_attr_mask);
3228 
3229 /**
3230  * ib_query_srq - Returns the attribute list and current values for the
3231  *   specified SRQ.
3232  * @srq: The SRQ to query.
3233  * @srq_attr: The attributes of the specified SRQ.
3234  */
3235 int ib_query_srq(struct ib_srq *srq,
3236 		 struct ib_srq_attr *srq_attr);
3237 
3238 /**
3239  * ib_destroy_srq - Destroys the specified SRQ.
3240  * @srq: The SRQ to destroy.
3241  */
3242 int ib_destroy_srq(struct ib_srq *srq);
3243 
3244 /**
3245  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3246  * @srq: The SRQ to post the work request on.
3247  * @recv_wr: A list of work requests to post on the receive queue.
3248  * @bad_recv_wr: On an immediate failure, this parameter will reference
3249  *   the work request that failed to be posted on the QP.
3250  */
3251 static inline int ib_post_srq_recv(struct ib_srq *srq,
3252 				   struct ib_recv_wr *recv_wr,
3253 				   struct ib_recv_wr **bad_recv_wr)
3254 {
3255 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3256 }
3257 
3258 /**
3259  * ib_create_qp - Creates a QP associated with the specified protection
3260  *   domain.
3261  * @pd: The protection domain associated with the QP.
3262  * @qp_init_attr: A list of initial attributes required to create the
3263  *   QP.  If QP creation succeeds, then the attributes are updated to
3264  *   the actual capabilities of the created QP.
3265  */
3266 struct ib_qp *ib_create_qp(struct ib_pd *pd,
3267 			   struct ib_qp_init_attr *qp_init_attr);
3268 
3269 /**
3270  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3271  * @qp: The QP to modify.
3272  * @attr: On input, specifies the QP attributes to modify.  On output,
3273  *   the current values of selected QP attributes are returned.
3274  * @attr_mask: A bit-mask used to specify which attributes of the QP
3275  *   are being modified.
3276  * @udata: pointer to user's input output buffer information
3277  *   are being modified.
3278  * It returns 0 on success and returns appropriate error code on error.
3279  */
3280 int ib_modify_qp_with_udata(struct ib_qp *qp,
3281 			    struct ib_qp_attr *attr,
3282 			    int attr_mask,
3283 			    struct ib_udata *udata);
3284 
3285 /**
3286  * ib_modify_qp - Modifies the attributes for the specified QP and then
3287  *   transitions the QP to the given state.
3288  * @qp: The QP to modify.
3289  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3290  *   the current values of selected QP attributes are returned.
3291  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3292  *   are being modified.
3293  */
3294 int ib_modify_qp(struct ib_qp *qp,
3295 		 struct ib_qp_attr *qp_attr,
3296 		 int qp_attr_mask);
3297 
3298 /**
3299  * ib_query_qp - Returns the attribute list and current values for the
3300  *   specified QP.
3301  * @qp: The QP to query.
3302  * @qp_attr: The attributes of the specified QP.
3303  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3304  * @qp_init_attr: Additional attributes of the selected QP.
3305  *
3306  * The qp_attr_mask may be used to limit the query to gathering only the
3307  * selected attributes.
3308  */
3309 int ib_query_qp(struct ib_qp *qp,
3310 		struct ib_qp_attr *qp_attr,
3311 		int qp_attr_mask,
3312 		struct ib_qp_init_attr *qp_init_attr);
3313 
3314 /**
3315  * ib_destroy_qp - Destroys the specified QP.
3316  * @qp: The QP to destroy.
3317  */
3318 int ib_destroy_qp(struct ib_qp *qp);
3319 
3320 /**
3321  * ib_open_qp - Obtain a reference to an existing sharable QP.
3322  * @xrcd - XRC domain
3323  * @qp_open_attr: Attributes identifying the QP to open.
3324  *
3325  * Returns a reference to a sharable QP.
3326  */
3327 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3328 			 struct ib_qp_open_attr *qp_open_attr);
3329 
3330 /**
3331  * ib_close_qp - Release an external reference to a QP.
3332  * @qp: The QP handle to release
3333  *
3334  * The opened QP handle is released by the caller.  The underlying
3335  * shared QP is not destroyed until all internal references are released.
3336  */
3337 int ib_close_qp(struct ib_qp *qp);
3338 
3339 /**
3340  * ib_post_send - Posts a list of work requests to the send queue of
3341  *   the specified QP.
3342  * @qp: The QP to post the work request on.
3343  * @send_wr: A list of work requests to post on the send queue.
3344  * @bad_send_wr: On an immediate failure, this parameter will reference
3345  *   the work request that failed to be posted on the QP.
3346  *
3347  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3348  * error is returned, the QP state shall not be affected,
3349  * ib_post_send() will return an immediate error after queueing any
3350  * earlier work requests in the list.
3351  */
3352 static inline int ib_post_send(struct ib_qp *qp,
3353 			       struct ib_send_wr *send_wr,
3354 			       struct ib_send_wr **bad_send_wr)
3355 {
3356 	return qp->device->post_send(qp, send_wr, bad_send_wr);
3357 }
3358 
3359 /**
3360  * ib_post_recv - Posts a list of work requests to the receive queue of
3361  *   the specified QP.
3362  * @qp: The QP to post the work request on.
3363  * @recv_wr: A list of work requests to post on the receive queue.
3364  * @bad_recv_wr: On an immediate failure, this parameter will reference
3365  *   the work request that failed to be posted on the QP.
3366  */
3367 static inline int ib_post_recv(struct ib_qp *qp,
3368 			       struct ib_recv_wr *recv_wr,
3369 			       struct ib_recv_wr **bad_recv_wr)
3370 {
3371 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3372 }
3373 
3374 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3375 			    int nr_cqe, int comp_vector,
3376 			    enum ib_poll_context poll_ctx, const char *caller);
3377 #define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3378 	__ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3379 
3380 void ib_free_cq(struct ib_cq *cq);
3381 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3382 
3383 /**
3384  * ib_create_cq - Creates a CQ on the specified device.
3385  * @device: The device on which to create the CQ.
3386  * @comp_handler: A user-specified callback that is invoked when a
3387  *   completion event occurs on the CQ.
3388  * @event_handler: A user-specified callback that is invoked when an
3389  *   asynchronous event not associated with a completion occurs on the CQ.
3390  * @cq_context: Context associated with the CQ returned to the user via
3391  *   the associated completion and event handlers.
3392  * @cq_attr: The attributes the CQ should be created upon.
3393  *
3394  * Users can examine the cq structure to determine the actual CQ size.
3395  */
3396 struct ib_cq *ib_create_cq(struct ib_device *device,
3397 			   ib_comp_handler comp_handler,
3398 			   void (*event_handler)(struct ib_event *, void *),
3399 			   void *cq_context,
3400 			   const struct ib_cq_init_attr *cq_attr);
3401 
3402 /**
3403  * ib_resize_cq - Modifies the capacity of the CQ.
3404  * @cq: The CQ to resize.
3405  * @cqe: The minimum size of the CQ.
3406  *
3407  * Users can examine the cq structure to determine the actual CQ size.
3408  */
3409 int ib_resize_cq(struct ib_cq *cq, int cqe);
3410 
3411 /**
3412  * rdma_set_cq_moderation - Modifies moderation params of the CQ
3413  * @cq: The CQ to modify.
3414  * @cq_count: number of CQEs that will trigger an event
3415  * @cq_period: max period of time in usec before triggering an event
3416  *
3417  */
3418 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3419 
3420 /**
3421  * ib_destroy_cq - Destroys the specified CQ.
3422  * @cq: The CQ to destroy.
3423  */
3424 int ib_destroy_cq(struct ib_cq *cq);
3425 
3426 /**
3427  * ib_poll_cq - poll a CQ for completion(s)
3428  * @cq:the CQ being polled
3429  * @num_entries:maximum number of completions to return
3430  * @wc:array of at least @num_entries &struct ib_wc where completions
3431  *   will be returned
3432  *
3433  * Poll a CQ for (possibly multiple) completions.  If the return value
3434  * is < 0, an error occurred.  If the return value is >= 0, it is the
3435  * number of completions returned.  If the return value is
3436  * non-negative and < num_entries, then the CQ was emptied.
3437  */
3438 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3439 			     struct ib_wc *wc)
3440 {
3441 	return cq->device->poll_cq(cq, num_entries, wc);
3442 }
3443 
3444 /**
3445  * ib_req_notify_cq - Request completion notification on a CQ.
3446  * @cq: The CQ to generate an event for.
3447  * @flags:
3448  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3449  *   to request an event on the next solicited event or next work
3450  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3451  *   may also be |ed in to request a hint about missed events, as
3452  *   described below.
3453  *
3454  * Return Value:
3455  *    < 0 means an error occurred while requesting notification
3456  *   == 0 means notification was requested successfully, and if
3457  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3458  *        were missed and it is safe to wait for another event.  In
3459  *        this case is it guaranteed that any work completions added
3460  *        to the CQ since the last CQ poll will trigger a completion
3461  *        notification event.
3462  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3463  *        in.  It means that the consumer must poll the CQ again to
3464  *        make sure it is empty to avoid missing an event because of a
3465  *        race between requesting notification and an entry being
3466  *        added to the CQ.  This return value means it is possible
3467  *        (but not guaranteed) that a work completion has been added
3468  *        to the CQ since the last poll without triggering a
3469  *        completion notification event.
3470  */
3471 static inline int ib_req_notify_cq(struct ib_cq *cq,
3472 				   enum ib_cq_notify_flags flags)
3473 {
3474 	return cq->device->req_notify_cq(cq, flags);
3475 }
3476 
3477 /**
3478  * ib_req_ncomp_notif - Request completion notification when there are
3479  *   at least the specified number of unreaped completions on the CQ.
3480  * @cq: The CQ to generate an event for.
3481  * @wc_cnt: The number of unreaped completions that should be on the
3482  *   CQ before an event is generated.
3483  */
3484 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3485 {
3486 	return cq->device->req_ncomp_notif ?
3487 		cq->device->req_ncomp_notif(cq, wc_cnt) :
3488 		-ENOSYS;
3489 }
3490 
3491 /**
3492  * ib_dma_mapping_error - check a DMA addr for error
3493  * @dev: The device for which the dma_addr was created
3494  * @dma_addr: The DMA address to check
3495  */
3496 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3497 {
3498 	return dma_mapping_error(dev->dma_device, dma_addr);
3499 }
3500 
3501 /**
3502  * ib_dma_map_single - Map a kernel virtual address to DMA address
3503  * @dev: The device for which the dma_addr is to be created
3504  * @cpu_addr: The kernel virtual address
3505  * @size: The size of the region in bytes
3506  * @direction: The direction of the DMA
3507  */
3508 static inline u64 ib_dma_map_single(struct ib_device *dev,
3509 				    void *cpu_addr, size_t size,
3510 				    enum dma_data_direction direction)
3511 {
3512 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3513 }
3514 
3515 /**
3516  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3517  * @dev: The device for which the DMA address was created
3518  * @addr: The DMA address
3519  * @size: The size of the region in bytes
3520  * @direction: The direction of the DMA
3521  */
3522 static inline void ib_dma_unmap_single(struct ib_device *dev,
3523 				       u64 addr, size_t size,
3524 				       enum dma_data_direction direction)
3525 {
3526 	dma_unmap_single(dev->dma_device, addr, size, direction);
3527 }
3528 
3529 /**
3530  * ib_dma_map_page - Map a physical page to DMA address
3531  * @dev: The device for which the dma_addr is to be created
3532  * @page: The page to be mapped
3533  * @offset: The offset within the page
3534  * @size: The size of the region in bytes
3535  * @direction: The direction of the DMA
3536  */
3537 static inline u64 ib_dma_map_page(struct ib_device *dev,
3538 				  struct page *page,
3539 				  unsigned long offset,
3540 				  size_t size,
3541 					 enum dma_data_direction direction)
3542 {
3543 	return dma_map_page(dev->dma_device, page, offset, size, direction);
3544 }
3545 
3546 /**
3547  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3548  * @dev: The device for which the DMA address was created
3549  * @addr: The DMA address
3550  * @size: The size of the region in bytes
3551  * @direction: The direction of the DMA
3552  */
3553 static inline void ib_dma_unmap_page(struct ib_device *dev,
3554 				     u64 addr, size_t size,
3555 				     enum dma_data_direction direction)
3556 {
3557 	dma_unmap_page(dev->dma_device, addr, size, direction);
3558 }
3559 
3560 /**
3561  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3562  * @dev: The device for which the DMA addresses are to be created
3563  * @sg: The array of scatter/gather entries
3564  * @nents: The number of scatter/gather entries
3565  * @direction: The direction of the DMA
3566  */
3567 static inline int ib_dma_map_sg(struct ib_device *dev,
3568 				struct scatterlist *sg, int nents,
3569 				enum dma_data_direction direction)
3570 {
3571 	return dma_map_sg(dev->dma_device, sg, nents, direction);
3572 }
3573 
3574 /**
3575  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3576  * @dev: The device for which the DMA addresses were created
3577  * @sg: The array of scatter/gather entries
3578  * @nents: The number of scatter/gather entries
3579  * @direction: The direction of the DMA
3580  */
3581 static inline void ib_dma_unmap_sg(struct ib_device *dev,
3582 				   struct scatterlist *sg, int nents,
3583 				   enum dma_data_direction direction)
3584 {
3585 	dma_unmap_sg(dev->dma_device, sg, nents, direction);
3586 }
3587 
3588 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3589 				      struct scatterlist *sg, int nents,
3590 				      enum dma_data_direction direction,
3591 				      unsigned long dma_attrs)
3592 {
3593 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3594 				dma_attrs);
3595 }
3596 
3597 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3598 					 struct scatterlist *sg, int nents,
3599 					 enum dma_data_direction direction,
3600 					 unsigned long dma_attrs)
3601 {
3602 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3603 }
3604 /**
3605  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3606  * @dev: The device for which the DMA addresses were created
3607  * @sg: The scatter/gather entry
3608  *
3609  * Note: this function is obsolete. To do: change all occurrences of
3610  * ib_sg_dma_address() into sg_dma_address().
3611  */
3612 static inline u64 ib_sg_dma_address(struct ib_device *dev,
3613 				    struct scatterlist *sg)
3614 {
3615 	return sg_dma_address(sg);
3616 }
3617 
3618 /**
3619  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3620  * @dev: The device for which the DMA addresses were created
3621  * @sg: The scatter/gather entry
3622  *
3623  * Note: this function is obsolete. To do: change all occurrences of
3624  * ib_sg_dma_len() into sg_dma_len().
3625  */
3626 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3627 					 struct scatterlist *sg)
3628 {
3629 	return sg_dma_len(sg);
3630 }
3631 
3632 /**
3633  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3634  * @dev: The device for which the DMA address was created
3635  * @addr: The DMA address
3636  * @size: The size of the region in bytes
3637  * @dir: The direction of the DMA
3638  */
3639 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3640 					      u64 addr,
3641 					      size_t size,
3642 					      enum dma_data_direction dir)
3643 {
3644 	dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3645 }
3646 
3647 /**
3648  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3649  * @dev: The device for which the DMA address was created
3650  * @addr: The DMA address
3651  * @size: The size of the region in bytes
3652  * @dir: The direction of the DMA
3653  */
3654 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3655 						 u64 addr,
3656 						 size_t size,
3657 						 enum dma_data_direction dir)
3658 {
3659 	dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3660 }
3661 
3662 /**
3663  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3664  * @dev: The device for which the DMA address is requested
3665  * @size: The size of the region to allocate in bytes
3666  * @dma_handle: A pointer for returning the DMA address of the region
3667  * @flag: memory allocator flags
3668  */
3669 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3670 					   size_t size,
3671 					   dma_addr_t *dma_handle,
3672 					   gfp_t flag)
3673 {
3674 	return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3675 }
3676 
3677 /**
3678  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3679  * @dev: The device for which the DMA addresses were allocated
3680  * @size: The size of the region
3681  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3682  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3683  */
3684 static inline void ib_dma_free_coherent(struct ib_device *dev,
3685 					size_t size, void *cpu_addr,
3686 					dma_addr_t dma_handle)
3687 {
3688 	dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3689 }
3690 
3691 /**
3692  * ib_dereg_mr - Deregisters a memory region and removes it from the
3693  *   HCA translation table.
3694  * @mr: The memory region to deregister.
3695  *
3696  * This function can fail, if the memory region has memory windows bound to it.
3697  */
3698 int ib_dereg_mr(struct ib_mr *mr);
3699 
3700 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3701 			  enum ib_mr_type mr_type,
3702 			  u32 max_num_sg);
3703 
3704 /**
3705  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3706  *   R_Key and L_Key.
3707  * @mr - struct ib_mr pointer to be updated.
3708  * @newkey - new key to be used.
3709  */
3710 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3711 {
3712 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3713 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3714 }
3715 
3716 /**
3717  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3718  * for calculating a new rkey for type 2 memory windows.
3719  * @rkey - the rkey to increment.
3720  */
3721 static inline u32 ib_inc_rkey(u32 rkey)
3722 {
3723 	const u32 mask = 0x000000ff;
3724 	return ((rkey + 1) & mask) | (rkey & ~mask);
3725 }
3726 
3727 /**
3728  * ib_alloc_fmr - Allocates a unmapped fast memory region.
3729  * @pd: The protection domain associated with the unmapped region.
3730  * @mr_access_flags: Specifies the memory access rights.
3731  * @fmr_attr: Attributes of the unmapped region.
3732  *
3733  * A fast memory region must be mapped before it can be used as part of
3734  * a work request.
3735  */
3736 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3737 			    int mr_access_flags,
3738 			    struct ib_fmr_attr *fmr_attr);
3739 
3740 /**
3741  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3742  * @fmr: The fast memory region to associate with the pages.
3743  * @page_list: An array of physical pages to map to the fast memory region.
3744  * @list_len: The number of pages in page_list.
3745  * @iova: The I/O virtual address to use with the mapped region.
3746  */
3747 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3748 				  u64 *page_list, int list_len,
3749 				  u64 iova)
3750 {
3751 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3752 }
3753 
3754 /**
3755  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3756  * @fmr_list: A linked list of fast memory regions to unmap.
3757  */
3758 int ib_unmap_fmr(struct list_head *fmr_list);
3759 
3760 /**
3761  * ib_dealloc_fmr - Deallocates a fast memory region.
3762  * @fmr: The fast memory region to deallocate.
3763  */
3764 int ib_dealloc_fmr(struct ib_fmr *fmr);
3765 
3766 /**
3767  * ib_attach_mcast - Attaches the specified QP to a multicast group.
3768  * @qp: QP to attach to the multicast group.  The QP must be type
3769  *   IB_QPT_UD.
3770  * @gid: Multicast group GID.
3771  * @lid: Multicast group LID in host byte order.
3772  *
3773  * In order to send and receive multicast packets, subnet
3774  * administration must have created the multicast group and configured
3775  * the fabric appropriately.  The port associated with the specified
3776  * QP must also be a member of the multicast group.
3777  */
3778 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3779 
3780 /**
3781  * ib_detach_mcast - Detaches the specified QP from a multicast group.
3782  * @qp: QP to detach from the multicast group.
3783  * @gid: Multicast group GID.
3784  * @lid: Multicast group LID in host byte order.
3785  */
3786 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3787 
3788 /**
3789  * ib_alloc_xrcd - Allocates an XRC domain.
3790  * @device: The device on which to allocate the XRC domain.
3791  * @caller: Module name for kernel consumers
3792  */
3793 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3794 #define ib_alloc_xrcd(device) \
3795 	__ib_alloc_xrcd((device), KBUILD_MODNAME)
3796 
3797 /**
3798  * ib_dealloc_xrcd - Deallocates an XRC domain.
3799  * @xrcd: The XRC domain to deallocate.
3800  */
3801 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3802 
3803 struct ib_flow *ib_create_flow(struct ib_qp *qp,
3804 			       struct ib_flow_attr *flow_attr, int domain);
3805 int ib_destroy_flow(struct ib_flow *flow_id);
3806 
3807 static inline int ib_check_mr_access(int flags)
3808 {
3809 	/*
3810 	 * Local write permission is required if remote write or
3811 	 * remote atomic permission is also requested.
3812 	 */
3813 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3814 	    !(flags & IB_ACCESS_LOCAL_WRITE))
3815 		return -EINVAL;
3816 
3817 	return 0;
3818 }
3819 
3820 static inline bool ib_access_writable(int access_flags)
3821 {
3822 	/*
3823 	 * We have writable memory backing the MR if any of the following
3824 	 * access flags are set.  "Local write" and "remote write" obviously
3825 	 * require write access.  "Remote atomic" can do things like fetch and
3826 	 * add, which will modify memory, and "MW bind" can change permissions
3827 	 * by binding a window.
3828 	 */
3829 	return access_flags &
3830 		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
3831 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
3832 }
3833 
3834 /**
3835  * ib_check_mr_status: lightweight check of MR status.
3836  *     This routine may provide status checks on a selected
3837  *     ib_mr. first use is for signature status check.
3838  *
3839  * @mr: A memory region.
3840  * @check_mask: Bitmask of which checks to perform from
3841  *     ib_mr_status_check enumeration.
3842  * @mr_status: The container of relevant status checks.
3843  *     failed checks will be indicated in the status bitmask
3844  *     and the relevant info shall be in the error item.
3845  */
3846 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3847 		       struct ib_mr_status *mr_status);
3848 
3849 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3850 					    u16 pkey, const union ib_gid *gid,
3851 					    const struct sockaddr *addr);
3852 struct ib_wq *ib_create_wq(struct ib_pd *pd,
3853 			   struct ib_wq_init_attr *init_attr);
3854 int ib_destroy_wq(struct ib_wq *wq);
3855 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3856 		 u32 wq_attr_mask);
3857 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3858 						 struct ib_rwq_ind_table_init_attr*
3859 						 wq_ind_table_init_attr);
3860 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3861 
3862 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3863 		 unsigned int *sg_offset, unsigned int page_size);
3864 
3865 static inline int
3866 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3867 		  unsigned int *sg_offset, unsigned int page_size)
3868 {
3869 	int n;
3870 
3871 	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3872 	mr->iova = 0;
3873 
3874 	return n;
3875 }
3876 
3877 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3878 		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3879 
3880 void ib_drain_rq(struct ib_qp *qp);
3881 void ib_drain_sq(struct ib_qp *qp);
3882 void ib_drain_qp(struct ib_qp *qp);
3883 
3884 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3885 
3886 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3887 {
3888 	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3889 		return attr->roce.dmac;
3890 	return NULL;
3891 }
3892 
3893 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3894 {
3895 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3896 		attr->ib.dlid = (u16)dlid;
3897 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3898 		attr->opa.dlid = dlid;
3899 }
3900 
3901 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3902 {
3903 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3904 		return attr->ib.dlid;
3905 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3906 		return attr->opa.dlid;
3907 	return 0;
3908 }
3909 
3910 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3911 {
3912 	attr->sl = sl;
3913 }
3914 
3915 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3916 {
3917 	return attr->sl;
3918 }
3919 
3920 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3921 					 u8 src_path_bits)
3922 {
3923 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3924 		attr->ib.src_path_bits = src_path_bits;
3925 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3926 		attr->opa.src_path_bits = src_path_bits;
3927 }
3928 
3929 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3930 {
3931 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3932 		return attr->ib.src_path_bits;
3933 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3934 		return attr->opa.src_path_bits;
3935 	return 0;
3936 }
3937 
3938 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3939 					bool make_grd)
3940 {
3941 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3942 		attr->opa.make_grd = make_grd;
3943 }
3944 
3945 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3946 {
3947 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3948 		return attr->opa.make_grd;
3949 	return false;
3950 }
3951 
3952 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3953 {
3954 	attr->port_num = port_num;
3955 }
3956 
3957 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3958 {
3959 	return attr->port_num;
3960 }
3961 
3962 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3963 					   u8 static_rate)
3964 {
3965 	attr->static_rate = static_rate;
3966 }
3967 
3968 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3969 {
3970 	return attr->static_rate;
3971 }
3972 
3973 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3974 					enum ib_ah_flags flag)
3975 {
3976 	attr->ah_flags = flag;
3977 }
3978 
3979 static inline enum ib_ah_flags
3980 		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3981 {
3982 	return attr->ah_flags;
3983 }
3984 
3985 static inline const struct ib_global_route
3986 		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3987 {
3988 	return &attr->grh;
3989 }
3990 
3991 /*To retrieve and modify the grh */
3992 static inline struct ib_global_route
3993 		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3994 {
3995 	return &attr->grh;
3996 }
3997 
3998 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3999 {
4000 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4001 
4002 	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4003 }
4004 
4005 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4006 					     __be64 prefix)
4007 {
4008 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4009 
4010 	grh->dgid.global.subnet_prefix = prefix;
4011 }
4012 
4013 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4014 					    __be64 if_id)
4015 {
4016 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4017 
4018 	grh->dgid.global.interface_id = if_id;
4019 }
4020 
4021 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4022 				   union ib_gid *dgid, u32 flow_label,
4023 				   u8 sgid_index, u8 hop_limit,
4024 				   u8 traffic_class)
4025 {
4026 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4027 
4028 	attr->ah_flags = IB_AH_GRH;
4029 	if (dgid)
4030 		grh->dgid = *dgid;
4031 	grh->flow_label = flow_label;
4032 	grh->sgid_index = sgid_index;
4033 	grh->hop_limit = hop_limit;
4034 	grh->traffic_class = traffic_class;
4035 	grh->sgid_attr = NULL;
4036 }
4037 
4038 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4039 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4040 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4041 			     const struct ib_gid_attr *sgid_attr);
4042 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4043 		       const struct rdma_ah_attr *src);
4044 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4045 			  const struct rdma_ah_attr *new);
4046 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4047 
4048 /**
4049  * rdma_ah_find_type - Return address handle type.
4050  *
4051  * @dev: Device to be checked
4052  * @port_num: Port number
4053  */
4054 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4055 						       u8 port_num)
4056 {
4057 	if (rdma_protocol_roce(dev, port_num))
4058 		return RDMA_AH_ATTR_TYPE_ROCE;
4059 	if (rdma_protocol_ib(dev, port_num)) {
4060 		if (rdma_cap_opa_ah(dev, port_num))
4061 			return RDMA_AH_ATTR_TYPE_OPA;
4062 		return RDMA_AH_ATTR_TYPE_IB;
4063 	}
4064 
4065 	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4066 }
4067 
4068 /**
4069  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4070  *     In the current implementation the only way to get
4071  *     get the 32bit lid is from other sources for OPA.
4072  *     For IB, lids will always be 16bits so cast the
4073  *     value accordingly.
4074  *
4075  * @lid: A 32bit LID
4076  */
4077 static inline u16 ib_lid_cpu16(u32 lid)
4078 {
4079 	WARN_ON_ONCE(lid & 0xFFFF0000);
4080 	return (u16)lid;
4081 }
4082 
4083 /**
4084  * ib_lid_be16 - Return lid in 16bit BE encoding.
4085  *
4086  * @lid: A 32bit LID
4087  */
4088 static inline __be16 ib_lid_be16(u32 lid)
4089 {
4090 	WARN_ON_ONCE(lid & 0xFFFF0000);
4091 	return cpu_to_be16((u16)lid);
4092 }
4093 
4094 /**
4095  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4096  *   vector
4097  * @device:         the rdma device
4098  * @comp_vector:    index of completion vector
4099  *
4100  * Returns NULL on failure, otherwise a corresponding cpu map of the
4101  * completion vector (returns all-cpus map if the device driver doesn't
4102  * implement get_vector_affinity).
4103  */
4104 static inline const struct cpumask *
4105 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4106 {
4107 	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4108 	    !device->get_vector_affinity)
4109 		return NULL;
4110 
4111 	return device->get_vector_affinity(device, comp_vector);
4112 
4113 }
4114 
4115 /**
4116  * rdma_roce_rescan_device - Rescan all of the network devices in the system
4117  * and add their gids, as needed, to the relevant RoCE devices.
4118  *
4119  * @device:         the rdma device
4120  */
4121 void rdma_roce_rescan_device(struct ib_device *ibdev);
4122 
4123 #endif /* IB_VERBS_H */
4124