xref: /openbmc/linux/include/rdma/ib_verbs.h (revision dbf563ee)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
4  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
5  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
6  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
7  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
8  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
10  */
11 
12 #ifndef IB_VERBS_H
13 #define IB_VERBS_H
14 
15 #include <linux/types.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/kref.h>
19 #include <linux/list.h>
20 #include <linux/rwsem.h>
21 #include <linux/workqueue.h>
22 #include <linux/irq_poll.h>
23 #include <uapi/linux/if_ether.h>
24 #include <net/ipv6.h>
25 #include <net/ip.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/netdevice.h>
29 #include <linux/refcount.h>
30 #include <linux/if_link.h>
31 #include <linux/atomic.h>
32 #include <linux/mmu_notifier.h>
33 #include <linux/uaccess.h>
34 #include <linux/cgroup_rdma.h>
35 #include <linux/irqflags.h>
36 #include <linux/preempt.h>
37 #include <linux/dim.h>
38 #include <uapi/rdma/ib_user_verbs.h>
39 #include <rdma/rdma_counter.h>
40 #include <rdma/restrack.h>
41 #include <rdma/signature.h>
42 #include <uapi/rdma/rdma_user_ioctl.h>
43 #include <uapi/rdma/ib_user_ioctl_verbs.h>
44 
45 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
46 
47 struct ib_umem_odp;
48 struct ib_uqp_object;
49 struct ib_usrq_object;
50 struct ib_uwq_object;
51 struct rdma_cm_id;
52 
53 extern struct workqueue_struct *ib_wq;
54 extern struct workqueue_struct *ib_comp_wq;
55 extern struct workqueue_struct *ib_comp_unbound_wq;
56 
57 struct ib_ucq_object;
58 
59 __printf(3, 4) __cold
60 void ibdev_printk(const char *level, const struct ib_device *ibdev,
61 		  const char *format, ...);
62 __printf(2, 3) __cold
63 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
64 __printf(2, 3) __cold
65 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
66 __printf(2, 3) __cold
67 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
68 __printf(2, 3) __cold
69 void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
70 __printf(2, 3) __cold
71 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
72 __printf(2, 3) __cold
73 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
74 __printf(2, 3) __cold
75 void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
76 
77 #if defined(CONFIG_DYNAMIC_DEBUG) || \
78 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
79 #define ibdev_dbg(__dev, format, args...)                       \
80 	dynamic_ibdev_dbg(__dev, format, ##args)
81 #else
82 __printf(2, 3) __cold
83 static inline
84 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
85 #endif
86 
87 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
88 do {                                                                    \
89 	static DEFINE_RATELIMIT_STATE(_rs,                              \
90 				      DEFAULT_RATELIMIT_INTERVAL,       \
91 				      DEFAULT_RATELIMIT_BURST);         \
92 	if (__ratelimit(&_rs))                                          \
93 		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
94 } while (0)
95 
96 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
97 	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
98 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
99 	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
100 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
101 	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
102 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
103 	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
104 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
105 	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
106 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
107 	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
108 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
109 	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
110 
111 #if defined(CONFIG_DYNAMIC_DEBUG) || \
112 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
113 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
114 #define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
115 do {                                                                    \
116 	static DEFINE_RATELIMIT_STATE(_rs,                              \
117 				      DEFAULT_RATELIMIT_INTERVAL,       \
118 				      DEFAULT_RATELIMIT_BURST);         \
119 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
120 	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
121 		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
122 				    ##__VA_ARGS__);                     \
123 } while (0)
124 #else
125 __printf(2, 3) __cold
126 static inline
127 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
128 #endif
129 
130 union ib_gid {
131 	u8	raw[16];
132 	struct {
133 		__be64	subnet_prefix;
134 		__be64	interface_id;
135 	} global;
136 };
137 
138 extern union ib_gid zgid;
139 
140 enum ib_gid_type {
141 	/* If link layer is Ethernet, this is RoCE V1 */
142 	IB_GID_TYPE_IB        = 0,
143 	IB_GID_TYPE_ROCE      = 0,
144 	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
145 	IB_GID_TYPE_SIZE
146 };
147 
148 #define ROCE_V2_UDP_DPORT      4791
149 struct ib_gid_attr {
150 	struct net_device __rcu	*ndev;
151 	struct ib_device	*device;
152 	union ib_gid		gid;
153 	enum ib_gid_type	gid_type;
154 	u16			index;
155 	u8			port_num;
156 };
157 
158 enum {
159 	/* set the local administered indication */
160 	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
161 };
162 
163 enum rdma_transport_type {
164 	RDMA_TRANSPORT_IB,
165 	RDMA_TRANSPORT_IWARP,
166 	RDMA_TRANSPORT_USNIC,
167 	RDMA_TRANSPORT_USNIC_UDP,
168 	RDMA_TRANSPORT_UNSPECIFIED,
169 };
170 
171 enum rdma_protocol_type {
172 	RDMA_PROTOCOL_IB,
173 	RDMA_PROTOCOL_IBOE,
174 	RDMA_PROTOCOL_IWARP,
175 	RDMA_PROTOCOL_USNIC_UDP
176 };
177 
178 __attribute_const__ enum rdma_transport_type
179 rdma_node_get_transport(unsigned int node_type);
180 
181 enum rdma_network_type {
182 	RDMA_NETWORK_IB,
183 	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
184 	RDMA_NETWORK_IPV4,
185 	RDMA_NETWORK_IPV6
186 };
187 
188 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
189 {
190 	if (network_type == RDMA_NETWORK_IPV4 ||
191 	    network_type == RDMA_NETWORK_IPV6)
192 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
193 
194 	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
195 	return IB_GID_TYPE_IB;
196 }
197 
198 static inline enum rdma_network_type
199 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
200 {
201 	if (attr->gid_type == IB_GID_TYPE_IB)
202 		return RDMA_NETWORK_IB;
203 
204 	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
205 		return RDMA_NETWORK_IPV4;
206 	else
207 		return RDMA_NETWORK_IPV6;
208 }
209 
210 enum rdma_link_layer {
211 	IB_LINK_LAYER_UNSPECIFIED,
212 	IB_LINK_LAYER_INFINIBAND,
213 	IB_LINK_LAYER_ETHERNET,
214 };
215 
216 enum ib_device_cap_flags {
217 	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
218 	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
219 	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
220 	IB_DEVICE_RAW_MULTI			= (1 << 3),
221 	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
222 	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
223 	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
224 	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
225 	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
226 	/* Not in use, former INIT_TYPE		= (1 << 9),*/
227 	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
228 	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
229 	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
230 	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
231 	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
232 
233 	/*
234 	 * This device supports a per-device lkey or stag that can be
235 	 * used without performing a memory registration for the local
236 	 * memory.  Note that ULPs should never check this flag, but
237 	 * instead of use the local_dma_lkey flag in the ib_pd structure,
238 	 * which will always contain a usable lkey.
239 	 */
240 	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
241 	/* Reserved, old SEND_W_INV		= (1 << 16),*/
242 	IB_DEVICE_MEM_WINDOW			= (1 << 17),
243 	/*
244 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
245 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
246 	 * messages and can verify the validity of checksum for
247 	 * incoming messages.  Setting this flag implies that the
248 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
249 	 */
250 	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
251 	IB_DEVICE_UD_TSO			= (1 << 19),
252 	IB_DEVICE_XRC				= (1 << 20),
253 
254 	/*
255 	 * This device supports the IB "base memory management extension",
256 	 * which includes support for fast registrations (IB_WR_REG_MR,
257 	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
258 	 * also be set by any iWarp device which must support FRs to comply
259 	 * to the iWarp verbs spec.  iWarp devices also support the
260 	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
261 	 * stag.
262 	 */
263 	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
264 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
265 	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
266 	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
267 	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
268 	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
269 	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
270 	/*
271 	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
272 	 * support execution of WQEs that involve synchronization
273 	 * of I/O operations with single completion queue managed
274 	 * by hardware.
275 	 */
276 	IB_DEVICE_CROSS_CHANNEL			= (1 << 27),
277 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
278 	IB_DEVICE_INTEGRITY_HANDOVER		= (1 << 30),
279 	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
280 	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
281 	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
282 	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
283 	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
284 	IB_DEVICE_RDMA_NETDEV_OPA		= (1ULL << 35),
285 	/* The device supports padding incoming writes to cacheline. */
286 	IB_DEVICE_PCI_WRITE_END_PADDING		= (1ULL << 36),
287 	IB_DEVICE_ALLOW_USER_UNREG		= (1ULL << 37),
288 };
289 
290 enum ib_atomic_cap {
291 	IB_ATOMIC_NONE,
292 	IB_ATOMIC_HCA,
293 	IB_ATOMIC_GLOB
294 };
295 
296 enum ib_odp_general_cap_bits {
297 	IB_ODP_SUPPORT		= 1 << 0,
298 	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
299 };
300 
301 enum ib_odp_transport_cap_bits {
302 	IB_ODP_SUPPORT_SEND	= 1 << 0,
303 	IB_ODP_SUPPORT_RECV	= 1 << 1,
304 	IB_ODP_SUPPORT_WRITE	= 1 << 2,
305 	IB_ODP_SUPPORT_READ	= 1 << 3,
306 	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
307 	IB_ODP_SUPPORT_SRQ_RECV	= 1 << 5,
308 };
309 
310 struct ib_odp_caps {
311 	uint64_t general_caps;
312 	struct {
313 		uint32_t  rc_odp_caps;
314 		uint32_t  uc_odp_caps;
315 		uint32_t  ud_odp_caps;
316 		uint32_t  xrc_odp_caps;
317 	} per_transport_caps;
318 };
319 
320 struct ib_rss_caps {
321 	/* Corresponding bit will be set if qp type from
322 	 * 'enum ib_qp_type' is supported, e.g.
323 	 * supported_qpts |= 1 << IB_QPT_UD
324 	 */
325 	u32 supported_qpts;
326 	u32 max_rwq_indirection_tables;
327 	u32 max_rwq_indirection_table_size;
328 };
329 
330 enum ib_tm_cap_flags {
331 	/*  Support tag matching with rendezvous offload for RC transport */
332 	IB_TM_CAP_RNDV_RC = 1 << 0,
333 };
334 
335 struct ib_tm_caps {
336 	/* Max size of RNDV header */
337 	u32 max_rndv_hdr_size;
338 	/* Max number of entries in tag matching list */
339 	u32 max_num_tags;
340 	/* From enum ib_tm_cap_flags */
341 	u32 flags;
342 	/* Max number of outstanding list operations */
343 	u32 max_ops;
344 	/* Max number of SGE in tag matching entry */
345 	u32 max_sge;
346 };
347 
348 struct ib_cq_init_attr {
349 	unsigned int	cqe;
350 	u32		comp_vector;
351 	u32		flags;
352 };
353 
354 enum ib_cq_attr_mask {
355 	IB_CQ_MODERATE = 1 << 0,
356 };
357 
358 struct ib_cq_caps {
359 	u16     max_cq_moderation_count;
360 	u16     max_cq_moderation_period;
361 };
362 
363 struct ib_dm_mr_attr {
364 	u64		length;
365 	u64		offset;
366 	u32		access_flags;
367 };
368 
369 struct ib_dm_alloc_attr {
370 	u64	length;
371 	u32	alignment;
372 	u32	flags;
373 };
374 
375 struct ib_device_attr {
376 	u64			fw_ver;
377 	__be64			sys_image_guid;
378 	u64			max_mr_size;
379 	u64			page_size_cap;
380 	u32			vendor_id;
381 	u32			vendor_part_id;
382 	u32			hw_ver;
383 	int			max_qp;
384 	int			max_qp_wr;
385 	u64			device_cap_flags;
386 	int			max_send_sge;
387 	int			max_recv_sge;
388 	int			max_sge_rd;
389 	int			max_cq;
390 	int			max_cqe;
391 	int			max_mr;
392 	int			max_pd;
393 	int			max_qp_rd_atom;
394 	int			max_ee_rd_atom;
395 	int			max_res_rd_atom;
396 	int			max_qp_init_rd_atom;
397 	int			max_ee_init_rd_atom;
398 	enum ib_atomic_cap	atomic_cap;
399 	enum ib_atomic_cap	masked_atomic_cap;
400 	int			max_ee;
401 	int			max_rdd;
402 	int			max_mw;
403 	int			max_raw_ipv6_qp;
404 	int			max_raw_ethy_qp;
405 	int			max_mcast_grp;
406 	int			max_mcast_qp_attach;
407 	int			max_total_mcast_qp_attach;
408 	int			max_ah;
409 	int			max_srq;
410 	int			max_srq_wr;
411 	int			max_srq_sge;
412 	unsigned int		max_fast_reg_page_list_len;
413 	unsigned int		max_pi_fast_reg_page_list_len;
414 	u16			max_pkeys;
415 	u8			local_ca_ack_delay;
416 	int			sig_prot_cap;
417 	int			sig_guard_cap;
418 	struct ib_odp_caps	odp_caps;
419 	uint64_t		timestamp_mask;
420 	uint64_t		hca_core_clock; /* in KHZ */
421 	struct ib_rss_caps	rss_caps;
422 	u32			max_wq_type_rq;
423 	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
424 	struct ib_tm_caps	tm_caps;
425 	struct ib_cq_caps       cq_caps;
426 	u64			max_dm_size;
427 	/* Max entries for sgl for optimized performance per READ */
428 	u32			max_sgl_rd;
429 };
430 
431 enum ib_mtu {
432 	IB_MTU_256  = 1,
433 	IB_MTU_512  = 2,
434 	IB_MTU_1024 = 3,
435 	IB_MTU_2048 = 4,
436 	IB_MTU_4096 = 5
437 };
438 
439 enum opa_mtu {
440 	OPA_MTU_8192 = 6,
441 	OPA_MTU_10240 = 7
442 };
443 
444 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
445 {
446 	switch (mtu) {
447 	case IB_MTU_256:  return  256;
448 	case IB_MTU_512:  return  512;
449 	case IB_MTU_1024: return 1024;
450 	case IB_MTU_2048: return 2048;
451 	case IB_MTU_4096: return 4096;
452 	default: 	  return -1;
453 	}
454 }
455 
456 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
457 {
458 	if (mtu >= 4096)
459 		return IB_MTU_4096;
460 	else if (mtu >= 2048)
461 		return IB_MTU_2048;
462 	else if (mtu >= 1024)
463 		return IB_MTU_1024;
464 	else if (mtu >= 512)
465 		return IB_MTU_512;
466 	else
467 		return IB_MTU_256;
468 }
469 
470 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
471 {
472 	switch (mtu) {
473 	case OPA_MTU_8192:
474 		return 8192;
475 	case OPA_MTU_10240:
476 		return 10240;
477 	default:
478 		return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
479 	}
480 }
481 
482 static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
483 {
484 	if (mtu >= 10240)
485 		return OPA_MTU_10240;
486 	else if (mtu >= 8192)
487 		return OPA_MTU_8192;
488 	else
489 		return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
490 }
491 
492 enum ib_port_state {
493 	IB_PORT_NOP		= 0,
494 	IB_PORT_DOWN		= 1,
495 	IB_PORT_INIT		= 2,
496 	IB_PORT_ARMED		= 3,
497 	IB_PORT_ACTIVE		= 4,
498 	IB_PORT_ACTIVE_DEFER	= 5
499 };
500 
501 enum ib_port_phys_state {
502 	IB_PORT_PHYS_STATE_SLEEP = 1,
503 	IB_PORT_PHYS_STATE_POLLING = 2,
504 	IB_PORT_PHYS_STATE_DISABLED = 3,
505 	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
506 	IB_PORT_PHYS_STATE_LINK_UP = 5,
507 	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
508 	IB_PORT_PHYS_STATE_PHY_TEST = 7,
509 };
510 
511 enum ib_port_width {
512 	IB_WIDTH_1X	= 1,
513 	IB_WIDTH_2X	= 16,
514 	IB_WIDTH_4X	= 2,
515 	IB_WIDTH_8X	= 4,
516 	IB_WIDTH_12X	= 8
517 };
518 
519 static inline int ib_width_enum_to_int(enum ib_port_width width)
520 {
521 	switch (width) {
522 	case IB_WIDTH_1X:  return  1;
523 	case IB_WIDTH_2X:  return  2;
524 	case IB_WIDTH_4X:  return  4;
525 	case IB_WIDTH_8X:  return  8;
526 	case IB_WIDTH_12X: return 12;
527 	default: 	  return -1;
528 	}
529 }
530 
531 enum ib_port_speed {
532 	IB_SPEED_SDR	= 1,
533 	IB_SPEED_DDR	= 2,
534 	IB_SPEED_QDR	= 4,
535 	IB_SPEED_FDR10	= 8,
536 	IB_SPEED_FDR	= 16,
537 	IB_SPEED_EDR	= 32,
538 	IB_SPEED_HDR	= 64
539 };
540 
541 /**
542  * struct rdma_hw_stats
543  * @lock - Mutex to protect parallel write access to lifespan and values
544  *    of counters, which are 64bits and not guaranteeed to be written
545  *    atomicaly on 32bits systems.
546  * @timestamp - Used by the core code to track when the last update was
547  * @lifespan - Used by the core code to determine how old the counters
548  *   should be before being updated again.  Stored in jiffies, defaults
549  *   to 10 milliseconds, drivers can override the default be specifying
550  *   their own value during their allocation routine.
551  * @name - Array of pointers to static names used for the counters in
552  *   directory.
553  * @num_counters - How many hardware counters there are.  If name is
554  *   shorter than this number, a kernel oops will result.  Driver authors
555  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
556  *   in their code to prevent this.
557  * @value - Array of u64 counters that are accessed by the sysfs code and
558  *   filled in by the drivers get_stats routine
559  */
560 struct rdma_hw_stats {
561 	struct mutex	lock; /* Protect lifespan and values[] */
562 	unsigned long	timestamp;
563 	unsigned long	lifespan;
564 	const char * const *names;
565 	int		num_counters;
566 	u64		value[];
567 };
568 
569 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
570 /**
571  * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
572  *   for drivers.
573  * @names - Array of static const char *
574  * @num_counters - How many elements in array
575  * @lifespan - How many milliseconds between updates
576  */
577 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
578 		const char * const *names, int num_counters,
579 		unsigned long lifespan)
580 {
581 	struct rdma_hw_stats *stats;
582 
583 	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
584 			GFP_KERNEL);
585 	if (!stats)
586 		return NULL;
587 	stats->names = names;
588 	stats->num_counters = num_counters;
589 	stats->lifespan = msecs_to_jiffies(lifespan);
590 
591 	return stats;
592 }
593 
594 
595 /* Define bits for the various functionality this port needs to be supported by
596  * the core.
597  */
598 /* Management                           0x00000FFF */
599 #define RDMA_CORE_CAP_IB_MAD            0x00000001
600 #define RDMA_CORE_CAP_IB_SMI            0x00000002
601 #define RDMA_CORE_CAP_IB_CM             0x00000004
602 #define RDMA_CORE_CAP_IW_CM             0x00000008
603 #define RDMA_CORE_CAP_IB_SA             0x00000010
604 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
605 
606 /* Address format                       0x000FF000 */
607 #define RDMA_CORE_CAP_AF_IB             0x00001000
608 #define RDMA_CORE_CAP_ETH_AH            0x00002000
609 #define RDMA_CORE_CAP_OPA_AH            0x00004000
610 #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
611 
612 /* Protocol                             0xFFF00000 */
613 #define RDMA_CORE_CAP_PROT_IB           0x00100000
614 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
615 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
616 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
617 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
618 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
619 
620 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
621 					| RDMA_CORE_CAP_PROT_ROCE     \
622 					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
623 
624 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
625 					| RDMA_CORE_CAP_IB_MAD \
626 					| RDMA_CORE_CAP_IB_SMI \
627 					| RDMA_CORE_CAP_IB_CM  \
628 					| RDMA_CORE_CAP_IB_SA  \
629 					| RDMA_CORE_CAP_AF_IB)
630 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
631 					| RDMA_CORE_CAP_IB_MAD  \
632 					| RDMA_CORE_CAP_IB_CM   \
633 					| RDMA_CORE_CAP_AF_IB   \
634 					| RDMA_CORE_CAP_ETH_AH)
635 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
636 					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
637 					| RDMA_CORE_CAP_IB_MAD  \
638 					| RDMA_CORE_CAP_IB_CM   \
639 					| RDMA_CORE_CAP_AF_IB   \
640 					| RDMA_CORE_CAP_ETH_AH)
641 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
642 					| RDMA_CORE_CAP_IW_CM)
643 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
644 					| RDMA_CORE_CAP_OPA_MAD)
645 
646 #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
647 
648 #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
649 
650 struct ib_port_attr {
651 	u64			subnet_prefix;
652 	enum ib_port_state	state;
653 	enum ib_mtu		max_mtu;
654 	enum ib_mtu		active_mtu;
655 	u32                     phys_mtu;
656 	int			gid_tbl_len;
657 	unsigned int		ip_gids:1;
658 	/* This is the value from PortInfo CapabilityMask, defined by IBA */
659 	u32			port_cap_flags;
660 	u32			max_msg_sz;
661 	u32			bad_pkey_cntr;
662 	u32			qkey_viol_cntr;
663 	u16			pkey_tbl_len;
664 	u32			sm_lid;
665 	u32			lid;
666 	u8			lmc;
667 	u8			max_vl_num;
668 	u8			sm_sl;
669 	u8			subnet_timeout;
670 	u8			init_type_reply;
671 	u8			active_width;
672 	u8			active_speed;
673 	u8                      phys_state;
674 	u16			port_cap_flags2;
675 };
676 
677 enum ib_device_modify_flags {
678 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
679 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
680 };
681 
682 #define IB_DEVICE_NODE_DESC_MAX 64
683 
684 struct ib_device_modify {
685 	u64	sys_image_guid;
686 	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
687 };
688 
689 enum ib_port_modify_flags {
690 	IB_PORT_SHUTDOWN		= 1,
691 	IB_PORT_INIT_TYPE		= (1<<2),
692 	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
693 	IB_PORT_OPA_MASK_CHG		= (1<<4)
694 };
695 
696 struct ib_port_modify {
697 	u32	set_port_cap_mask;
698 	u32	clr_port_cap_mask;
699 	u8	init_type;
700 };
701 
702 enum ib_event_type {
703 	IB_EVENT_CQ_ERR,
704 	IB_EVENT_QP_FATAL,
705 	IB_EVENT_QP_REQ_ERR,
706 	IB_EVENT_QP_ACCESS_ERR,
707 	IB_EVENT_COMM_EST,
708 	IB_EVENT_SQ_DRAINED,
709 	IB_EVENT_PATH_MIG,
710 	IB_EVENT_PATH_MIG_ERR,
711 	IB_EVENT_DEVICE_FATAL,
712 	IB_EVENT_PORT_ACTIVE,
713 	IB_EVENT_PORT_ERR,
714 	IB_EVENT_LID_CHANGE,
715 	IB_EVENT_PKEY_CHANGE,
716 	IB_EVENT_SM_CHANGE,
717 	IB_EVENT_SRQ_ERR,
718 	IB_EVENT_SRQ_LIMIT_REACHED,
719 	IB_EVENT_QP_LAST_WQE_REACHED,
720 	IB_EVENT_CLIENT_REREGISTER,
721 	IB_EVENT_GID_CHANGE,
722 	IB_EVENT_WQ_FATAL,
723 };
724 
725 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
726 
727 struct ib_event {
728 	struct ib_device	*device;
729 	union {
730 		struct ib_cq	*cq;
731 		struct ib_qp	*qp;
732 		struct ib_srq	*srq;
733 		struct ib_wq	*wq;
734 		u8		port_num;
735 	} element;
736 	enum ib_event_type	event;
737 };
738 
739 struct ib_event_handler {
740 	struct ib_device *device;
741 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
742 	struct list_head  list;
743 };
744 
745 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
746 	do {							\
747 		(_ptr)->device  = _device;			\
748 		(_ptr)->handler = _handler;			\
749 		INIT_LIST_HEAD(&(_ptr)->list);			\
750 	} while (0)
751 
752 struct ib_global_route {
753 	const struct ib_gid_attr *sgid_attr;
754 	union ib_gid	dgid;
755 	u32		flow_label;
756 	u8		sgid_index;
757 	u8		hop_limit;
758 	u8		traffic_class;
759 };
760 
761 struct ib_grh {
762 	__be32		version_tclass_flow;
763 	__be16		paylen;
764 	u8		next_hdr;
765 	u8		hop_limit;
766 	union ib_gid	sgid;
767 	union ib_gid	dgid;
768 };
769 
770 union rdma_network_hdr {
771 	struct ib_grh ibgrh;
772 	struct {
773 		/* The IB spec states that if it's IPv4, the header
774 		 * is located in the last 20 bytes of the header.
775 		 */
776 		u8		reserved[20];
777 		struct iphdr	roce4grh;
778 	};
779 };
780 
781 #define IB_QPN_MASK		0xFFFFFF
782 
783 enum {
784 	IB_MULTICAST_QPN = 0xffffff
785 };
786 
787 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
788 #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
789 
790 enum ib_ah_flags {
791 	IB_AH_GRH	= 1
792 };
793 
794 enum ib_rate {
795 	IB_RATE_PORT_CURRENT = 0,
796 	IB_RATE_2_5_GBPS = 2,
797 	IB_RATE_5_GBPS   = 5,
798 	IB_RATE_10_GBPS  = 3,
799 	IB_RATE_20_GBPS  = 6,
800 	IB_RATE_30_GBPS  = 4,
801 	IB_RATE_40_GBPS  = 7,
802 	IB_RATE_60_GBPS  = 8,
803 	IB_RATE_80_GBPS  = 9,
804 	IB_RATE_120_GBPS = 10,
805 	IB_RATE_14_GBPS  = 11,
806 	IB_RATE_56_GBPS  = 12,
807 	IB_RATE_112_GBPS = 13,
808 	IB_RATE_168_GBPS = 14,
809 	IB_RATE_25_GBPS  = 15,
810 	IB_RATE_100_GBPS = 16,
811 	IB_RATE_200_GBPS = 17,
812 	IB_RATE_300_GBPS = 18,
813 	IB_RATE_28_GBPS  = 19,
814 	IB_RATE_50_GBPS  = 20,
815 	IB_RATE_400_GBPS = 21,
816 	IB_RATE_600_GBPS = 22,
817 };
818 
819 /**
820  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
821  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
822  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
823  * @rate: rate to convert.
824  */
825 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
826 
827 /**
828  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
829  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
830  * @rate: rate to convert.
831  */
832 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
833 
834 
835 /**
836  * enum ib_mr_type - memory region type
837  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
838  *                            normal registration
839  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
840  *                            register any arbitrary sg lists (without
841  *                            the normal mr constraints - see
842  *                            ib_map_mr_sg)
843  * @IB_MR_TYPE_DM:            memory region that is used for device
844  *                            memory registration
845  * @IB_MR_TYPE_USER:          memory region that is used for the user-space
846  *                            application
847  * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
848  *                            without address translations (VA=PA)
849  * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
850  *                            data integrity operations
851  */
852 enum ib_mr_type {
853 	IB_MR_TYPE_MEM_REG,
854 	IB_MR_TYPE_SG_GAPS,
855 	IB_MR_TYPE_DM,
856 	IB_MR_TYPE_USER,
857 	IB_MR_TYPE_DMA,
858 	IB_MR_TYPE_INTEGRITY,
859 };
860 
861 enum ib_mr_status_check {
862 	IB_MR_CHECK_SIG_STATUS = 1,
863 };
864 
865 /**
866  * struct ib_mr_status - Memory region status container
867  *
868  * @fail_status: Bitmask of MR checks status. For each
869  *     failed check a corresponding status bit is set.
870  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
871  *     failure.
872  */
873 struct ib_mr_status {
874 	u32		    fail_status;
875 	struct ib_sig_err   sig_err;
876 };
877 
878 /**
879  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
880  * enum.
881  * @mult: multiple to convert.
882  */
883 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
884 
885 struct rdma_ah_init_attr {
886 	struct rdma_ah_attr *ah_attr;
887 	u32 flags;
888 	struct net_device *xmit_slave;
889 };
890 
891 enum rdma_ah_attr_type {
892 	RDMA_AH_ATTR_TYPE_UNDEFINED,
893 	RDMA_AH_ATTR_TYPE_IB,
894 	RDMA_AH_ATTR_TYPE_ROCE,
895 	RDMA_AH_ATTR_TYPE_OPA,
896 };
897 
898 struct ib_ah_attr {
899 	u16			dlid;
900 	u8			src_path_bits;
901 };
902 
903 struct roce_ah_attr {
904 	u8			dmac[ETH_ALEN];
905 };
906 
907 struct opa_ah_attr {
908 	u32			dlid;
909 	u8			src_path_bits;
910 	bool			make_grd;
911 };
912 
913 struct rdma_ah_attr {
914 	struct ib_global_route	grh;
915 	u8			sl;
916 	u8			static_rate;
917 	u8			port_num;
918 	u8			ah_flags;
919 	enum rdma_ah_attr_type type;
920 	union {
921 		struct ib_ah_attr ib;
922 		struct roce_ah_attr roce;
923 		struct opa_ah_attr opa;
924 	};
925 };
926 
927 enum ib_wc_status {
928 	IB_WC_SUCCESS,
929 	IB_WC_LOC_LEN_ERR,
930 	IB_WC_LOC_QP_OP_ERR,
931 	IB_WC_LOC_EEC_OP_ERR,
932 	IB_WC_LOC_PROT_ERR,
933 	IB_WC_WR_FLUSH_ERR,
934 	IB_WC_MW_BIND_ERR,
935 	IB_WC_BAD_RESP_ERR,
936 	IB_WC_LOC_ACCESS_ERR,
937 	IB_WC_REM_INV_REQ_ERR,
938 	IB_WC_REM_ACCESS_ERR,
939 	IB_WC_REM_OP_ERR,
940 	IB_WC_RETRY_EXC_ERR,
941 	IB_WC_RNR_RETRY_EXC_ERR,
942 	IB_WC_LOC_RDD_VIOL_ERR,
943 	IB_WC_REM_INV_RD_REQ_ERR,
944 	IB_WC_REM_ABORT_ERR,
945 	IB_WC_INV_EECN_ERR,
946 	IB_WC_INV_EEC_STATE_ERR,
947 	IB_WC_FATAL_ERR,
948 	IB_WC_RESP_TIMEOUT_ERR,
949 	IB_WC_GENERAL_ERR
950 };
951 
952 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
953 
954 enum ib_wc_opcode {
955 	IB_WC_SEND,
956 	IB_WC_RDMA_WRITE,
957 	IB_WC_RDMA_READ,
958 	IB_WC_COMP_SWAP,
959 	IB_WC_FETCH_ADD,
960 	IB_WC_LSO,
961 	IB_WC_LOCAL_INV,
962 	IB_WC_REG_MR,
963 	IB_WC_MASKED_COMP_SWAP,
964 	IB_WC_MASKED_FETCH_ADD,
965 /*
966  * Set value of IB_WC_RECV so consumers can test if a completion is a
967  * receive by testing (opcode & IB_WC_RECV).
968  */
969 	IB_WC_RECV			= 1 << 7,
970 	IB_WC_RECV_RDMA_WITH_IMM
971 };
972 
973 enum ib_wc_flags {
974 	IB_WC_GRH		= 1,
975 	IB_WC_WITH_IMM		= (1<<1),
976 	IB_WC_WITH_INVALIDATE	= (1<<2),
977 	IB_WC_IP_CSUM_OK	= (1<<3),
978 	IB_WC_WITH_SMAC		= (1<<4),
979 	IB_WC_WITH_VLAN		= (1<<5),
980 	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
981 };
982 
983 struct ib_wc {
984 	union {
985 		u64		wr_id;
986 		struct ib_cqe	*wr_cqe;
987 	};
988 	enum ib_wc_status	status;
989 	enum ib_wc_opcode	opcode;
990 	u32			vendor_err;
991 	u32			byte_len;
992 	struct ib_qp	       *qp;
993 	union {
994 		__be32		imm_data;
995 		u32		invalidate_rkey;
996 	} ex;
997 	u32			src_qp;
998 	u32			slid;
999 	int			wc_flags;
1000 	u16			pkey_index;
1001 	u8			sl;
1002 	u8			dlid_path_bits;
1003 	u8			port_num;	/* valid only for DR SMPs on switches */
1004 	u8			smac[ETH_ALEN];
1005 	u16			vlan_id;
1006 	u8			network_hdr_type;
1007 };
1008 
1009 enum ib_cq_notify_flags {
1010 	IB_CQ_SOLICITED			= 1 << 0,
1011 	IB_CQ_NEXT_COMP			= 1 << 1,
1012 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1013 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1014 };
1015 
1016 enum ib_srq_type {
1017 	IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1018 	IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1019 	IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1020 };
1021 
1022 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1023 {
1024 	return srq_type == IB_SRQT_XRC ||
1025 	       srq_type == IB_SRQT_TM;
1026 }
1027 
1028 enum ib_srq_attr_mask {
1029 	IB_SRQ_MAX_WR	= 1 << 0,
1030 	IB_SRQ_LIMIT	= 1 << 1,
1031 };
1032 
1033 struct ib_srq_attr {
1034 	u32	max_wr;
1035 	u32	max_sge;
1036 	u32	srq_limit;
1037 };
1038 
1039 struct ib_srq_init_attr {
1040 	void		      (*event_handler)(struct ib_event *, void *);
1041 	void		       *srq_context;
1042 	struct ib_srq_attr	attr;
1043 	enum ib_srq_type	srq_type;
1044 
1045 	struct {
1046 		struct ib_cq   *cq;
1047 		union {
1048 			struct {
1049 				struct ib_xrcd *xrcd;
1050 			} xrc;
1051 
1052 			struct {
1053 				u32		max_num_tags;
1054 			} tag_matching;
1055 		};
1056 	} ext;
1057 };
1058 
1059 struct ib_qp_cap {
1060 	u32	max_send_wr;
1061 	u32	max_recv_wr;
1062 	u32	max_send_sge;
1063 	u32	max_recv_sge;
1064 	u32	max_inline_data;
1065 
1066 	/*
1067 	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1068 	 * ib_create_qp() will calculate the right amount of neededed WRs
1069 	 * and MRs based on this.
1070 	 */
1071 	u32	max_rdma_ctxs;
1072 };
1073 
1074 enum ib_sig_type {
1075 	IB_SIGNAL_ALL_WR,
1076 	IB_SIGNAL_REQ_WR
1077 };
1078 
1079 enum ib_qp_type {
1080 	/*
1081 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1082 	 * here (and in that order) since the MAD layer uses them as
1083 	 * indices into a 2-entry table.
1084 	 */
1085 	IB_QPT_SMI,
1086 	IB_QPT_GSI,
1087 
1088 	IB_QPT_RC = IB_UVERBS_QPT_RC,
1089 	IB_QPT_UC = IB_UVERBS_QPT_UC,
1090 	IB_QPT_UD = IB_UVERBS_QPT_UD,
1091 	IB_QPT_RAW_IPV6,
1092 	IB_QPT_RAW_ETHERTYPE,
1093 	IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1094 	IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1095 	IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1096 	IB_QPT_MAX,
1097 	IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1098 	/* Reserve a range for qp types internal to the low level driver.
1099 	 * These qp types will not be visible at the IB core layer, so the
1100 	 * IB_QPT_MAX usages should not be affected in the core layer
1101 	 */
1102 	IB_QPT_RESERVED1 = 0x1000,
1103 	IB_QPT_RESERVED2,
1104 	IB_QPT_RESERVED3,
1105 	IB_QPT_RESERVED4,
1106 	IB_QPT_RESERVED5,
1107 	IB_QPT_RESERVED6,
1108 	IB_QPT_RESERVED7,
1109 	IB_QPT_RESERVED8,
1110 	IB_QPT_RESERVED9,
1111 	IB_QPT_RESERVED10,
1112 };
1113 
1114 enum ib_qp_create_flags {
1115 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1116 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	=
1117 		IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1118 	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1119 	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1120 	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1121 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1122 	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
1123 	IB_QP_CREATE_NETDEV_USE			= 1 << 7,
1124 	IB_QP_CREATE_SCATTER_FCS		=
1125 		IB_UVERBS_QP_CREATE_SCATTER_FCS,
1126 	IB_QP_CREATE_CVLAN_STRIPPING		=
1127 		IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1128 	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1129 	IB_QP_CREATE_PCI_WRITE_END_PADDING	=
1130 		IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1131 	/* reserve bits 26-31 for low level drivers' internal use */
1132 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1133 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1134 };
1135 
1136 /*
1137  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1138  * callback to destroy the passed in QP.
1139  */
1140 
1141 struct ib_qp_init_attr {
1142 	/* Consumer's event_handler callback must not block */
1143 	void                  (*event_handler)(struct ib_event *, void *);
1144 
1145 	void		       *qp_context;
1146 	struct ib_cq	       *send_cq;
1147 	struct ib_cq	       *recv_cq;
1148 	struct ib_srq	       *srq;
1149 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1150 	struct ib_qp_cap	cap;
1151 	enum ib_sig_type	sq_sig_type;
1152 	enum ib_qp_type		qp_type;
1153 	u32			create_flags;
1154 
1155 	/*
1156 	 * Only needed for special QP types, or when using the RW API.
1157 	 */
1158 	u8			port_num;
1159 	struct ib_rwq_ind_table *rwq_ind_tbl;
1160 	u32			source_qpn;
1161 };
1162 
1163 struct ib_qp_open_attr {
1164 	void                  (*event_handler)(struct ib_event *, void *);
1165 	void		       *qp_context;
1166 	u32			qp_num;
1167 	enum ib_qp_type		qp_type;
1168 };
1169 
1170 enum ib_rnr_timeout {
1171 	IB_RNR_TIMER_655_36 =  0,
1172 	IB_RNR_TIMER_000_01 =  1,
1173 	IB_RNR_TIMER_000_02 =  2,
1174 	IB_RNR_TIMER_000_03 =  3,
1175 	IB_RNR_TIMER_000_04 =  4,
1176 	IB_RNR_TIMER_000_06 =  5,
1177 	IB_RNR_TIMER_000_08 =  6,
1178 	IB_RNR_TIMER_000_12 =  7,
1179 	IB_RNR_TIMER_000_16 =  8,
1180 	IB_RNR_TIMER_000_24 =  9,
1181 	IB_RNR_TIMER_000_32 = 10,
1182 	IB_RNR_TIMER_000_48 = 11,
1183 	IB_RNR_TIMER_000_64 = 12,
1184 	IB_RNR_TIMER_000_96 = 13,
1185 	IB_RNR_TIMER_001_28 = 14,
1186 	IB_RNR_TIMER_001_92 = 15,
1187 	IB_RNR_TIMER_002_56 = 16,
1188 	IB_RNR_TIMER_003_84 = 17,
1189 	IB_RNR_TIMER_005_12 = 18,
1190 	IB_RNR_TIMER_007_68 = 19,
1191 	IB_RNR_TIMER_010_24 = 20,
1192 	IB_RNR_TIMER_015_36 = 21,
1193 	IB_RNR_TIMER_020_48 = 22,
1194 	IB_RNR_TIMER_030_72 = 23,
1195 	IB_RNR_TIMER_040_96 = 24,
1196 	IB_RNR_TIMER_061_44 = 25,
1197 	IB_RNR_TIMER_081_92 = 26,
1198 	IB_RNR_TIMER_122_88 = 27,
1199 	IB_RNR_TIMER_163_84 = 28,
1200 	IB_RNR_TIMER_245_76 = 29,
1201 	IB_RNR_TIMER_327_68 = 30,
1202 	IB_RNR_TIMER_491_52 = 31
1203 };
1204 
1205 enum ib_qp_attr_mask {
1206 	IB_QP_STATE			= 1,
1207 	IB_QP_CUR_STATE			= (1<<1),
1208 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1209 	IB_QP_ACCESS_FLAGS		= (1<<3),
1210 	IB_QP_PKEY_INDEX		= (1<<4),
1211 	IB_QP_PORT			= (1<<5),
1212 	IB_QP_QKEY			= (1<<6),
1213 	IB_QP_AV			= (1<<7),
1214 	IB_QP_PATH_MTU			= (1<<8),
1215 	IB_QP_TIMEOUT			= (1<<9),
1216 	IB_QP_RETRY_CNT			= (1<<10),
1217 	IB_QP_RNR_RETRY			= (1<<11),
1218 	IB_QP_RQ_PSN			= (1<<12),
1219 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1220 	IB_QP_ALT_PATH			= (1<<14),
1221 	IB_QP_MIN_RNR_TIMER		= (1<<15),
1222 	IB_QP_SQ_PSN			= (1<<16),
1223 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1224 	IB_QP_PATH_MIG_STATE		= (1<<18),
1225 	IB_QP_CAP			= (1<<19),
1226 	IB_QP_DEST_QPN			= (1<<20),
1227 	IB_QP_RESERVED1			= (1<<21),
1228 	IB_QP_RESERVED2			= (1<<22),
1229 	IB_QP_RESERVED3			= (1<<23),
1230 	IB_QP_RESERVED4			= (1<<24),
1231 	IB_QP_RATE_LIMIT		= (1<<25),
1232 };
1233 
1234 enum ib_qp_state {
1235 	IB_QPS_RESET,
1236 	IB_QPS_INIT,
1237 	IB_QPS_RTR,
1238 	IB_QPS_RTS,
1239 	IB_QPS_SQD,
1240 	IB_QPS_SQE,
1241 	IB_QPS_ERR
1242 };
1243 
1244 enum ib_mig_state {
1245 	IB_MIG_MIGRATED,
1246 	IB_MIG_REARM,
1247 	IB_MIG_ARMED
1248 };
1249 
1250 enum ib_mw_type {
1251 	IB_MW_TYPE_1 = 1,
1252 	IB_MW_TYPE_2 = 2
1253 };
1254 
1255 struct ib_qp_attr {
1256 	enum ib_qp_state	qp_state;
1257 	enum ib_qp_state	cur_qp_state;
1258 	enum ib_mtu		path_mtu;
1259 	enum ib_mig_state	path_mig_state;
1260 	u32			qkey;
1261 	u32			rq_psn;
1262 	u32			sq_psn;
1263 	u32			dest_qp_num;
1264 	int			qp_access_flags;
1265 	struct ib_qp_cap	cap;
1266 	struct rdma_ah_attr	ah_attr;
1267 	struct rdma_ah_attr	alt_ah_attr;
1268 	u16			pkey_index;
1269 	u16			alt_pkey_index;
1270 	u8			en_sqd_async_notify;
1271 	u8			sq_draining;
1272 	u8			max_rd_atomic;
1273 	u8			max_dest_rd_atomic;
1274 	u8			min_rnr_timer;
1275 	u8			port_num;
1276 	u8			timeout;
1277 	u8			retry_cnt;
1278 	u8			rnr_retry;
1279 	u8			alt_port_num;
1280 	u8			alt_timeout;
1281 	u32			rate_limit;
1282 	struct net_device	*xmit_slave;
1283 };
1284 
1285 enum ib_wr_opcode {
1286 	/* These are shared with userspace */
1287 	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1288 	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1289 	IB_WR_SEND = IB_UVERBS_WR_SEND,
1290 	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1291 	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1292 	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1293 	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1294 	IB_WR_LSO = IB_UVERBS_WR_TSO,
1295 	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1296 	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1297 	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1298 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1299 		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1300 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1301 		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1302 
1303 	/* These are kernel only and can not be issued by userspace */
1304 	IB_WR_REG_MR = 0x20,
1305 	IB_WR_REG_MR_INTEGRITY,
1306 
1307 	/* reserve values for low level drivers' internal use.
1308 	 * These values will not be used at all in the ib core layer.
1309 	 */
1310 	IB_WR_RESERVED1 = 0xf0,
1311 	IB_WR_RESERVED2,
1312 	IB_WR_RESERVED3,
1313 	IB_WR_RESERVED4,
1314 	IB_WR_RESERVED5,
1315 	IB_WR_RESERVED6,
1316 	IB_WR_RESERVED7,
1317 	IB_WR_RESERVED8,
1318 	IB_WR_RESERVED9,
1319 	IB_WR_RESERVED10,
1320 };
1321 
1322 enum ib_send_flags {
1323 	IB_SEND_FENCE		= 1,
1324 	IB_SEND_SIGNALED	= (1<<1),
1325 	IB_SEND_SOLICITED	= (1<<2),
1326 	IB_SEND_INLINE		= (1<<3),
1327 	IB_SEND_IP_CSUM		= (1<<4),
1328 
1329 	/* reserve bits 26-31 for low level drivers' internal use */
1330 	IB_SEND_RESERVED_START	= (1 << 26),
1331 	IB_SEND_RESERVED_END	= (1 << 31),
1332 };
1333 
1334 struct ib_sge {
1335 	u64	addr;
1336 	u32	length;
1337 	u32	lkey;
1338 };
1339 
1340 struct ib_cqe {
1341 	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1342 };
1343 
1344 struct ib_send_wr {
1345 	struct ib_send_wr      *next;
1346 	union {
1347 		u64		wr_id;
1348 		struct ib_cqe	*wr_cqe;
1349 	};
1350 	struct ib_sge	       *sg_list;
1351 	int			num_sge;
1352 	enum ib_wr_opcode	opcode;
1353 	int			send_flags;
1354 	union {
1355 		__be32		imm_data;
1356 		u32		invalidate_rkey;
1357 	} ex;
1358 };
1359 
1360 struct ib_rdma_wr {
1361 	struct ib_send_wr	wr;
1362 	u64			remote_addr;
1363 	u32			rkey;
1364 };
1365 
1366 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1367 {
1368 	return container_of(wr, struct ib_rdma_wr, wr);
1369 }
1370 
1371 struct ib_atomic_wr {
1372 	struct ib_send_wr	wr;
1373 	u64			remote_addr;
1374 	u64			compare_add;
1375 	u64			swap;
1376 	u64			compare_add_mask;
1377 	u64			swap_mask;
1378 	u32			rkey;
1379 };
1380 
1381 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1382 {
1383 	return container_of(wr, struct ib_atomic_wr, wr);
1384 }
1385 
1386 struct ib_ud_wr {
1387 	struct ib_send_wr	wr;
1388 	struct ib_ah		*ah;
1389 	void			*header;
1390 	int			hlen;
1391 	int			mss;
1392 	u32			remote_qpn;
1393 	u32			remote_qkey;
1394 	u16			pkey_index; /* valid for GSI only */
1395 	u8			port_num;   /* valid for DR SMPs on switch only */
1396 };
1397 
1398 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1399 {
1400 	return container_of(wr, struct ib_ud_wr, wr);
1401 }
1402 
1403 struct ib_reg_wr {
1404 	struct ib_send_wr	wr;
1405 	struct ib_mr		*mr;
1406 	u32			key;
1407 	int			access;
1408 };
1409 
1410 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1411 {
1412 	return container_of(wr, struct ib_reg_wr, wr);
1413 }
1414 
1415 struct ib_recv_wr {
1416 	struct ib_recv_wr      *next;
1417 	union {
1418 		u64		wr_id;
1419 		struct ib_cqe	*wr_cqe;
1420 	};
1421 	struct ib_sge	       *sg_list;
1422 	int			num_sge;
1423 };
1424 
1425 enum ib_access_flags {
1426 	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1427 	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1428 	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1429 	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1430 	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1431 	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1432 	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1433 	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1434 	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1435 
1436 	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1437 	IB_ACCESS_SUPPORTED =
1438 		((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1439 };
1440 
1441 /*
1442  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1443  * are hidden here instead of a uapi header!
1444  */
1445 enum ib_mr_rereg_flags {
1446 	IB_MR_REREG_TRANS	= 1,
1447 	IB_MR_REREG_PD		= (1<<1),
1448 	IB_MR_REREG_ACCESS	= (1<<2),
1449 	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1450 };
1451 
1452 struct ib_umem;
1453 
1454 enum rdma_remove_reason {
1455 	/*
1456 	 * Userspace requested uobject deletion or initial try
1457 	 * to remove uobject via cleanup. Call could fail
1458 	 */
1459 	RDMA_REMOVE_DESTROY,
1460 	/* Context deletion. This call should delete the actual object itself */
1461 	RDMA_REMOVE_CLOSE,
1462 	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1463 	RDMA_REMOVE_DRIVER_REMOVE,
1464 	/* uobj is being cleaned-up before being committed */
1465 	RDMA_REMOVE_ABORT,
1466 	/*
1467 	 * uobj has been fully created, with the uobj->object set, but is being
1468 	 * cleaned up before being comitted
1469 	 */
1470 	RDMA_REMOVE_ABORT_HWOBJ,
1471 };
1472 
1473 struct ib_rdmacg_object {
1474 #ifdef CONFIG_CGROUP_RDMA
1475 	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1476 #endif
1477 };
1478 
1479 struct ib_ucontext {
1480 	struct ib_device       *device;
1481 	struct ib_uverbs_file  *ufile;
1482 	/*
1483 	 * 'closing' can be read by the driver only during a destroy callback,
1484 	 * it is set when we are closing the file descriptor and indicates
1485 	 * that mm_sem may be locked.
1486 	 */
1487 	bool closing;
1488 
1489 	bool cleanup_retryable;
1490 
1491 	struct ib_rdmacg_object	cg_obj;
1492 	/*
1493 	 * Implementation details of the RDMA core, don't use in drivers:
1494 	 */
1495 	struct rdma_restrack_entry res;
1496 	struct xarray mmap_xa;
1497 };
1498 
1499 struct ib_uobject {
1500 	u64			user_handle;	/* handle given to us by userspace */
1501 	/* ufile & ucontext owning this object */
1502 	struct ib_uverbs_file  *ufile;
1503 	/* FIXME, save memory: ufile->context == context */
1504 	struct ib_ucontext     *context;	/* associated user context */
1505 	void		       *object;		/* containing object */
1506 	struct list_head	list;		/* link to context's list */
1507 	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1508 	int			id;		/* index into kernel idr */
1509 	struct kref		ref;
1510 	atomic_t		usecnt;		/* protects exclusive access */
1511 	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1512 
1513 	const struct uverbs_api_object *uapi_object;
1514 };
1515 
1516 struct ib_udata {
1517 	const void __user *inbuf;
1518 	void __user *outbuf;
1519 	size_t       inlen;
1520 	size_t       outlen;
1521 };
1522 
1523 struct ib_pd {
1524 	u32			local_dma_lkey;
1525 	u32			flags;
1526 	struct ib_device       *device;
1527 	struct ib_uobject      *uobject;
1528 	atomic_t          	usecnt; /* count all resources */
1529 
1530 	u32			unsafe_global_rkey;
1531 
1532 	/*
1533 	 * Implementation details of the RDMA core, don't use in drivers:
1534 	 */
1535 	struct ib_mr	       *__internal_mr;
1536 	struct rdma_restrack_entry res;
1537 };
1538 
1539 struct ib_xrcd {
1540 	struct ib_device       *device;
1541 	atomic_t		usecnt; /* count all exposed resources */
1542 	struct inode	       *inode;
1543 	struct rw_semaphore	tgt_qps_rwsem;
1544 	struct xarray		tgt_qps;
1545 };
1546 
1547 struct ib_ah {
1548 	struct ib_device	*device;
1549 	struct ib_pd		*pd;
1550 	struct ib_uobject	*uobject;
1551 	const struct ib_gid_attr *sgid_attr;
1552 	enum rdma_ah_attr_type	type;
1553 };
1554 
1555 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1556 
1557 enum ib_poll_context {
1558 	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1559 	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1560 	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1561 	IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1562 
1563 	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1564 };
1565 
1566 struct ib_cq {
1567 	struct ib_device       *device;
1568 	struct ib_ucq_object   *uobject;
1569 	ib_comp_handler   	comp_handler;
1570 	void                  (*event_handler)(struct ib_event *, void *);
1571 	void                   *cq_context;
1572 	int               	cqe;
1573 	unsigned int		cqe_used;
1574 	atomic_t          	usecnt; /* count number of work queues */
1575 	enum ib_poll_context	poll_ctx;
1576 	struct ib_wc		*wc;
1577 	struct list_head        pool_entry;
1578 	union {
1579 		struct irq_poll		iop;
1580 		struct work_struct	work;
1581 	};
1582 	struct workqueue_struct *comp_wq;
1583 	struct dim *dim;
1584 
1585 	/* updated only by trace points */
1586 	ktime_t timestamp;
1587 	u8 interrupt:1;
1588 	u8 shared:1;
1589 	unsigned int comp_vector;
1590 
1591 	/*
1592 	 * Implementation details of the RDMA core, don't use in drivers:
1593 	 */
1594 	struct rdma_restrack_entry res;
1595 };
1596 
1597 struct ib_srq {
1598 	struct ib_device       *device;
1599 	struct ib_pd	       *pd;
1600 	struct ib_usrq_object  *uobject;
1601 	void		      (*event_handler)(struct ib_event *, void *);
1602 	void		       *srq_context;
1603 	enum ib_srq_type	srq_type;
1604 	atomic_t		usecnt;
1605 
1606 	struct {
1607 		struct ib_cq   *cq;
1608 		union {
1609 			struct {
1610 				struct ib_xrcd *xrcd;
1611 				u32		srq_num;
1612 			} xrc;
1613 		};
1614 	} ext;
1615 };
1616 
1617 enum ib_raw_packet_caps {
1618 	/* Strip cvlan from incoming packet and report it in the matching work
1619 	 * completion is supported.
1620 	 */
1621 	IB_RAW_PACKET_CAP_CVLAN_STRIPPING	= (1 << 0),
1622 	/* Scatter FCS field of an incoming packet to host memory is supported.
1623 	 */
1624 	IB_RAW_PACKET_CAP_SCATTER_FCS		= (1 << 1),
1625 	/* Checksum offloads are supported (for both send and receive). */
1626 	IB_RAW_PACKET_CAP_IP_CSUM		= (1 << 2),
1627 	/* When a packet is received for an RQ with no receive WQEs, the
1628 	 * packet processing is delayed.
1629 	 */
1630 	IB_RAW_PACKET_CAP_DELAY_DROP		= (1 << 3),
1631 };
1632 
1633 enum ib_wq_type {
1634 	IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1635 };
1636 
1637 enum ib_wq_state {
1638 	IB_WQS_RESET,
1639 	IB_WQS_RDY,
1640 	IB_WQS_ERR
1641 };
1642 
1643 struct ib_wq {
1644 	struct ib_device       *device;
1645 	struct ib_uwq_object   *uobject;
1646 	void		    *wq_context;
1647 	void		    (*event_handler)(struct ib_event *, void *);
1648 	struct ib_pd	       *pd;
1649 	struct ib_cq	       *cq;
1650 	u32		wq_num;
1651 	enum ib_wq_state       state;
1652 	enum ib_wq_type	wq_type;
1653 	atomic_t		usecnt;
1654 };
1655 
1656 enum ib_wq_flags {
1657 	IB_WQ_FLAGS_CVLAN_STRIPPING	= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1658 	IB_WQ_FLAGS_SCATTER_FCS		= IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1659 	IB_WQ_FLAGS_DELAY_DROP		= IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1660 	IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1661 				IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1662 };
1663 
1664 struct ib_wq_init_attr {
1665 	void		       *wq_context;
1666 	enum ib_wq_type	wq_type;
1667 	u32		max_wr;
1668 	u32		max_sge;
1669 	struct	ib_cq	       *cq;
1670 	void		    (*event_handler)(struct ib_event *, void *);
1671 	u32		create_flags; /* Use enum ib_wq_flags */
1672 };
1673 
1674 enum ib_wq_attr_mask {
1675 	IB_WQ_STATE		= 1 << 0,
1676 	IB_WQ_CUR_STATE		= 1 << 1,
1677 	IB_WQ_FLAGS		= 1 << 2,
1678 };
1679 
1680 struct ib_wq_attr {
1681 	enum	ib_wq_state	wq_state;
1682 	enum	ib_wq_state	curr_wq_state;
1683 	u32			flags; /* Use enum ib_wq_flags */
1684 	u32			flags_mask; /* Use enum ib_wq_flags */
1685 };
1686 
1687 struct ib_rwq_ind_table {
1688 	struct ib_device	*device;
1689 	struct ib_uobject      *uobject;
1690 	atomic_t		usecnt;
1691 	u32		ind_tbl_num;
1692 	u32		log_ind_tbl_size;
1693 	struct ib_wq	**ind_tbl;
1694 };
1695 
1696 struct ib_rwq_ind_table_init_attr {
1697 	u32		log_ind_tbl_size;
1698 	/* Each entry is a pointer to Receive Work Queue */
1699 	struct ib_wq	**ind_tbl;
1700 };
1701 
1702 enum port_pkey_state {
1703 	IB_PORT_PKEY_NOT_VALID = 0,
1704 	IB_PORT_PKEY_VALID = 1,
1705 	IB_PORT_PKEY_LISTED = 2,
1706 };
1707 
1708 struct ib_qp_security;
1709 
1710 struct ib_port_pkey {
1711 	enum port_pkey_state	state;
1712 	u16			pkey_index;
1713 	u8			port_num;
1714 	struct list_head	qp_list;
1715 	struct list_head	to_error_list;
1716 	struct ib_qp_security  *sec;
1717 };
1718 
1719 struct ib_ports_pkeys {
1720 	struct ib_port_pkey	main;
1721 	struct ib_port_pkey	alt;
1722 };
1723 
1724 struct ib_qp_security {
1725 	struct ib_qp	       *qp;
1726 	struct ib_device       *dev;
1727 	/* Hold this mutex when changing port and pkey settings. */
1728 	struct mutex		mutex;
1729 	struct ib_ports_pkeys  *ports_pkeys;
1730 	/* A list of all open shared QP handles.  Required to enforce security
1731 	 * properly for all users of a shared QP.
1732 	 */
1733 	struct list_head        shared_qp_list;
1734 	void                   *security;
1735 	bool			destroying;
1736 	atomic_t		error_list_count;
1737 	struct completion	error_complete;
1738 	int			error_comps_pending;
1739 };
1740 
1741 /*
1742  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1743  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1744  */
1745 struct ib_qp {
1746 	struct ib_device       *device;
1747 	struct ib_pd	       *pd;
1748 	struct ib_cq	       *send_cq;
1749 	struct ib_cq	       *recv_cq;
1750 	spinlock_t		mr_lock;
1751 	int			mrs_used;
1752 	struct list_head	rdma_mrs;
1753 	struct list_head	sig_mrs;
1754 	struct ib_srq	       *srq;
1755 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1756 	struct list_head	xrcd_list;
1757 
1758 	/* count times opened, mcast attaches, flow attaches */
1759 	atomic_t		usecnt;
1760 	struct list_head	open_list;
1761 	struct ib_qp           *real_qp;
1762 	struct ib_uqp_object   *uobject;
1763 	void                  (*event_handler)(struct ib_event *, void *);
1764 	void		       *qp_context;
1765 	/* sgid_attrs associated with the AV's */
1766 	const struct ib_gid_attr *av_sgid_attr;
1767 	const struct ib_gid_attr *alt_path_sgid_attr;
1768 	u32			qp_num;
1769 	u32			max_write_sge;
1770 	u32			max_read_sge;
1771 	enum ib_qp_type		qp_type;
1772 	struct ib_rwq_ind_table *rwq_ind_tbl;
1773 	struct ib_qp_security  *qp_sec;
1774 	u8			port;
1775 
1776 	bool			integrity_en;
1777 	/*
1778 	 * Implementation details of the RDMA core, don't use in drivers:
1779 	 */
1780 	struct rdma_restrack_entry     res;
1781 
1782 	/* The counter the qp is bind to */
1783 	struct rdma_counter    *counter;
1784 };
1785 
1786 struct ib_dm {
1787 	struct ib_device  *device;
1788 	u32		   length;
1789 	u32		   flags;
1790 	struct ib_uobject *uobject;
1791 	atomic_t	   usecnt;
1792 };
1793 
1794 struct ib_mr {
1795 	struct ib_device  *device;
1796 	struct ib_pd	  *pd;
1797 	u32		   lkey;
1798 	u32		   rkey;
1799 	u64		   iova;
1800 	u64		   length;
1801 	unsigned int	   page_size;
1802 	enum ib_mr_type	   type;
1803 	bool		   need_inval;
1804 	union {
1805 		struct ib_uobject	*uobject;	/* user */
1806 		struct list_head	qp_entry;	/* FR */
1807 	};
1808 
1809 	struct ib_dm      *dm;
1810 	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1811 	/*
1812 	 * Implementation details of the RDMA core, don't use in drivers:
1813 	 */
1814 	struct rdma_restrack_entry res;
1815 };
1816 
1817 struct ib_mw {
1818 	struct ib_device	*device;
1819 	struct ib_pd		*pd;
1820 	struct ib_uobject	*uobject;
1821 	u32			rkey;
1822 	enum ib_mw_type         type;
1823 };
1824 
1825 /* Supported steering options */
1826 enum ib_flow_attr_type {
1827 	/* steering according to rule specifications */
1828 	IB_FLOW_ATTR_NORMAL		= 0x0,
1829 	/* default unicast and multicast rule -
1830 	 * receive all Eth traffic which isn't steered to any QP
1831 	 */
1832 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1833 	/* default multicast rule -
1834 	 * receive all Eth multicast traffic which isn't steered to any QP
1835 	 */
1836 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1837 	/* sniffer rule - receive all port traffic */
1838 	IB_FLOW_ATTR_SNIFFER		= 0x3
1839 };
1840 
1841 /* Supported steering header types */
1842 enum ib_flow_spec_type {
1843 	/* L2 headers*/
1844 	IB_FLOW_SPEC_ETH		= 0x20,
1845 	IB_FLOW_SPEC_IB			= 0x22,
1846 	/* L3 header*/
1847 	IB_FLOW_SPEC_IPV4		= 0x30,
1848 	IB_FLOW_SPEC_IPV6		= 0x31,
1849 	IB_FLOW_SPEC_ESP                = 0x34,
1850 	/* L4 headers*/
1851 	IB_FLOW_SPEC_TCP		= 0x40,
1852 	IB_FLOW_SPEC_UDP		= 0x41,
1853 	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1854 	IB_FLOW_SPEC_GRE		= 0x51,
1855 	IB_FLOW_SPEC_MPLS		= 0x60,
1856 	IB_FLOW_SPEC_INNER		= 0x100,
1857 	/* Actions */
1858 	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1859 	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1860 	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1861 	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1862 };
1863 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1864 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1865 
1866 /* Flow steering rule priority is set according to it's domain.
1867  * Lower domain value means higher priority.
1868  */
1869 enum ib_flow_domain {
1870 	IB_FLOW_DOMAIN_USER,
1871 	IB_FLOW_DOMAIN_ETHTOOL,
1872 	IB_FLOW_DOMAIN_RFS,
1873 	IB_FLOW_DOMAIN_NIC,
1874 	IB_FLOW_DOMAIN_NUM /* Must be last */
1875 };
1876 
1877 enum ib_flow_flags {
1878 	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1879 	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1880 	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1881 };
1882 
1883 struct ib_flow_eth_filter {
1884 	u8	dst_mac[6];
1885 	u8	src_mac[6];
1886 	__be16	ether_type;
1887 	__be16	vlan_tag;
1888 	/* Must be last */
1889 	u8	real_sz[];
1890 };
1891 
1892 struct ib_flow_spec_eth {
1893 	u32			  type;
1894 	u16			  size;
1895 	struct ib_flow_eth_filter val;
1896 	struct ib_flow_eth_filter mask;
1897 };
1898 
1899 struct ib_flow_ib_filter {
1900 	__be16 dlid;
1901 	__u8   sl;
1902 	/* Must be last */
1903 	u8	real_sz[];
1904 };
1905 
1906 struct ib_flow_spec_ib {
1907 	u32			 type;
1908 	u16			 size;
1909 	struct ib_flow_ib_filter val;
1910 	struct ib_flow_ib_filter mask;
1911 };
1912 
1913 /* IPv4 header flags */
1914 enum ib_ipv4_flags {
1915 	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1916 	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1917 				    last have this flag set */
1918 };
1919 
1920 struct ib_flow_ipv4_filter {
1921 	__be32	src_ip;
1922 	__be32	dst_ip;
1923 	u8	proto;
1924 	u8	tos;
1925 	u8	ttl;
1926 	u8	flags;
1927 	/* Must be last */
1928 	u8	real_sz[];
1929 };
1930 
1931 struct ib_flow_spec_ipv4 {
1932 	u32			   type;
1933 	u16			   size;
1934 	struct ib_flow_ipv4_filter val;
1935 	struct ib_flow_ipv4_filter mask;
1936 };
1937 
1938 struct ib_flow_ipv6_filter {
1939 	u8	src_ip[16];
1940 	u8	dst_ip[16];
1941 	__be32	flow_label;
1942 	u8	next_hdr;
1943 	u8	traffic_class;
1944 	u8	hop_limit;
1945 	/* Must be last */
1946 	u8	real_sz[];
1947 };
1948 
1949 struct ib_flow_spec_ipv6 {
1950 	u32			   type;
1951 	u16			   size;
1952 	struct ib_flow_ipv6_filter val;
1953 	struct ib_flow_ipv6_filter mask;
1954 };
1955 
1956 struct ib_flow_tcp_udp_filter {
1957 	__be16	dst_port;
1958 	__be16	src_port;
1959 	/* Must be last */
1960 	u8	real_sz[];
1961 };
1962 
1963 struct ib_flow_spec_tcp_udp {
1964 	u32			      type;
1965 	u16			      size;
1966 	struct ib_flow_tcp_udp_filter val;
1967 	struct ib_flow_tcp_udp_filter mask;
1968 };
1969 
1970 struct ib_flow_tunnel_filter {
1971 	__be32	tunnel_id;
1972 	u8	real_sz[];
1973 };
1974 
1975 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1976  * the tunnel_id from val has the vni value
1977  */
1978 struct ib_flow_spec_tunnel {
1979 	u32			      type;
1980 	u16			      size;
1981 	struct ib_flow_tunnel_filter  val;
1982 	struct ib_flow_tunnel_filter  mask;
1983 };
1984 
1985 struct ib_flow_esp_filter {
1986 	__be32	spi;
1987 	__be32  seq;
1988 	/* Must be last */
1989 	u8	real_sz[];
1990 };
1991 
1992 struct ib_flow_spec_esp {
1993 	u32                           type;
1994 	u16			      size;
1995 	struct ib_flow_esp_filter     val;
1996 	struct ib_flow_esp_filter     mask;
1997 };
1998 
1999 struct ib_flow_gre_filter {
2000 	__be16 c_ks_res0_ver;
2001 	__be16 protocol;
2002 	__be32 key;
2003 	/* Must be last */
2004 	u8	real_sz[];
2005 };
2006 
2007 struct ib_flow_spec_gre {
2008 	u32                           type;
2009 	u16			      size;
2010 	struct ib_flow_gre_filter     val;
2011 	struct ib_flow_gre_filter     mask;
2012 };
2013 
2014 struct ib_flow_mpls_filter {
2015 	__be32 tag;
2016 	/* Must be last */
2017 	u8	real_sz[];
2018 };
2019 
2020 struct ib_flow_spec_mpls {
2021 	u32                           type;
2022 	u16			      size;
2023 	struct ib_flow_mpls_filter     val;
2024 	struct ib_flow_mpls_filter     mask;
2025 };
2026 
2027 struct ib_flow_spec_action_tag {
2028 	enum ib_flow_spec_type	      type;
2029 	u16			      size;
2030 	u32                           tag_id;
2031 };
2032 
2033 struct ib_flow_spec_action_drop {
2034 	enum ib_flow_spec_type	      type;
2035 	u16			      size;
2036 };
2037 
2038 struct ib_flow_spec_action_handle {
2039 	enum ib_flow_spec_type	      type;
2040 	u16			      size;
2041 	struct ib_flow_action	     *act;
2042 };
2043 
2044 enum ib_counters_description {
2045 	IB_COUNTER_PACKETS,
2046 	IB_COUNTER_BYTES,
2047 };
2048 
2049 struct ib_flow_spec_action_count {
2050 	enum ib_flow_spec_type type;
2051 	u16 size;
2052 	struct ib_counters *counters;
2053 };
2054 
2055 union ib_flow_spec {
2056 	struct {
2057 		u32			type;
2058 		u16			size;
2059 	};
2060 	struct ib_flow_spec_eth		eth;
2061 	struct ib_flow_spec_ib		ib;
2062 	struct ib_flow_spec_ipv4        ipv4;
2063 	struct ib_flow_spec_tcp_udp	tcp_udp;
2064 	struct ib_flow_spec_ipv6        ipv6;
2065 	struct ib_flow_spec_tunnel      tunnel;
2066 	struct ib_flow_spec_esp		esp;
2067 	struct ib_flow_spec_gre		gre;
2068 	struct ib_flow_spec_mpls	mpls;
2069 	struct ib_flow_spec_action_tag  flow_tag;
2070 	struct ib_flow_spec_action_drop drop;
2071 	struct ib_flow_spec_action_handle action;
2072 	struct ib_flow_spec_action_count flow_count;
2073 };
2074 
2075 struct ib_flow_attr {
2076 	enum ib_flow_attr_type type;
2077 	u16	     size;
2078 	u16	     priority;
2079 	u32	     flags;
2080 	u8	     num_of_specs;
2081 	u8	     port;
2082 	union ib_flow_spec flows[];
2083 };
2084 
2085 struct ib_flow {
2086 	struct ib_qp		*qp;
2087 	struct ib_device	*device;
2088 	struct ib_uobject	*uobject;
2089 };
2090 
2091 enum ib_flow_action_type {
2092 	IB_FLOW_ACTION_UNSPECIFIED,
2093 	IB_FLOW_ACTION_ESP = 1,
2094 };
2095 
2096 struct ib_flow_action_attrs_esp_keymats {
2097 	enum ib_uverbs_flow_action_esp_keymat			protocol;
2098 	union {
2099 		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2100 	} keymat;
2101 };
2102 
2103 struct ib_flow_action_attrs_esp_replays {
2104 	enum ib_uverbs_flow_action_esp_replay			protocol;
2105 	union {
2106 		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2107 	} replay;
2108 };
2109 
2110 enum ib_flow_action_attrs_esp_flags {
2111 	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2112 	 * This is done in order to share the same flags between user-space and
2113 	 * kernel and spare an unnecessary translation.
2114 	 */
2115 
2116 	/* Kernel flags */
2117 	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2118 	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2119 };
2120 
2121 struct ib_flow_spec_list {
2122 	struct ib_flow_spec_list	*next;
2123 	union ib_flow_spec		spec;
2124 };
2125 
2126 struct ib_flow_action_attrs_esp {
2127 	struct ib_flow_action_attrs_esp_keymats		*keymat;
2128 	struct ib_flow_action_attrs_esp_replays		*replay;
2129 	struct ib_flow_spec_list			*encap;
2130 	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2131 	 * Value of 0 is a valid value.
2132 	 */
2133 	u32						esn;
2134 	u32						spi;
2135 	u32						seq;
2136 	u32						tfc_pad;
2137 	/* Use enum ib_flow_action_attrs_esp_flags */
2138 	u64						flags;
2139 	u64						hard_limit_pkts;
2140 };
2141 
2142 struct ib_flow_action {
2143 	struct ib_device		*device;
2144 	struct ib_uobject		*uobject;
2145 	enum ib_flow_action_type	type;
2146 	atomic_t			usecnt;
2147 };
2148 
2149 struct ib_mad;
2150 struct ib_grh;
2151 
2152 enum ib_process_mad_flags {
2153 	IB_MAD_IGNORE_MKEY	= 1,
2154 	IB_MAD_IGNORE_BKEY	= 2,
2155 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2156 };
2157 
2158 enum ib_mad_result {
2159 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2160 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2161 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2162 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2163 };
2164 
2165 struct ib_port_cache {
2166 	u64		      subnet_prefix;
2167 	struct ib_pkey_cache  *pkey;
2168 	struct ib_gid_table   *gid;
2169 	u8                     lmc;
2170 	enum ib_port_state     port_state;
2171 };
2172 
2173 struct ib_port_immutable {
2174 	int                           pkey_tbl_len;
2175 	int                           gid_tbl_len;
2176 	u32                           core_cap_flags;
2177 	u32                           max_mad_size;
2178 };
2179 
2180 struct ib_port_data {
2181 	struct ib_device *ib_dev;
2182 
2183 	struct ib_port_immutable immutable;
2184 
2185 	spinlock_t pkey_list_lock;
2186 	struct list_head pkey_list;
2187 
2188 	struct ib_port_cache cache;
2189 
2190 	spinlock_t netdev_lock;
2191 	struct net_device __rcu *netdev;
2192 	struct hlist_node ndev_hash_link;
2193 	struct rdma_port_counter port_counter;
2194 	struct rdma_hw_stats *hw_stats;
2195 };
2196 
2197 /* rdma netdev type - specifies protocol type */
2198 enum rdma_netdev_t {
2199 	RDMA_NETDEV_OPA_VNIC,
2200 	RDMA_NETDEV_IPOIB,
2201 };
2202 
2203 /**
2204  * struct rdma_netdev - rdma netdev
2205  * For cases where netstack interfacing is required.
2206  */
2207 struct rdma_netdev {
2208 	void              *clnt_priv;
2209 	struct ib_device  *hca;
2210 	u8                 port_num;
2211 	int                mtu;
2212 
2213 	/*
2214 	 * cleanup function must be specified.
2215 	 * FIXME: This is only used for OPA_VNIC and that usage should be
2216 	 * removed too.
2217 	 */
2218 	void (*free_rdma_netdev)(struct net_device *netdev);
2219 
2220 	/* control functions */
2221 	void (*set_id)(struct net_device *netdev, int id);
2222 	/* send packet */
2223 	int (*send)(struct net_device *dev, struct sk_buff *skb,
2224 		    struct ib_ah *address, u32 dqpn);
2225 	/* multicast */
2226 	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2227 			    union ib_gid *gid, u16 mlid,
2228 			    int set_qkey, u32 qkey);
2229 	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2230 			    union ib_gid *gid, u16 mlid);
2231 };
2232 
2233 struct rdma_netdev_alloc_params {
2234 	size_t sizeof_priv;
2235 	unsigned int txqs;
2236 	unsigned int rxqs;
2237 	void *param;
2238 
2239 	int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2240 				      struct net_device *netdev, void *param);
2241 };
2242 
2243 struct ib_odp_counters {
2244 	atomic64_t faults;
2245 	atomic64_t invalidations;
2246 	atomic64_t prefetch;
2247 };
2248 
2249 struct ib_counters {
2250 	struct ib_device	*device;
2251 	struct ib_uobject	*uobject;
2252 	/* num of objects attached */
2253 	atomic_t	usecnt;
2254 };
2255 
2256 struct ib_counters_read_attr {
2257 	u64	*counters_buff;
2258 	u32	ncounters;
2259 	u32	flags; /* use enum ib_read_counters_flags */
2260 };
2261 
2262 struct uverbs_attr_bundle;
2263 struct iw_cm_id;
2264 struct iw_cm_conn_param;
2265 
2266 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2267 	.size_##ib_struct =                                                    \
2268 		(sizeof(struct drv_struct) +                                   \
2269 		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2270 		 BUILD_BUG_ON_ZERO(                                            \
2271 			 !__same_type(((struct drv_struct *)NULL)->member,     \
2272 				      struct ib_struct)))
2273 
2274 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
2275 	((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2276 
2277 #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2278 	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2279 
2280 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2281 
2282 struct rdma_user_mmap_entry {
2283 	struct kref ref;
2284 	struct ib_ucontext *ucontext;
2285 	unsigned long start_pgoff;
2286 	size_t npages;
2287 	bool driver_removed;
2288 };
2289 
2290 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2291 static inline u64
2292 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2293 {
2294 	return (u64)entry->start_pgoff << PAGE_SHIFT;
2295 }
2296 
2297 /**
2298  * struct ib_device_ops - InfiniBand device operations
2299  * This structure defines all the InfiniBand device operations, providers will
2300  * need to define the supported operations, otherwise they will be set to null.
2301  */
2302 struct ib_device_ops {
2303 	struct module *owner;
2304 	enum rdma_driver_id driver_id;
2305 	u32 uverbs_abi_ver;
2306 	unsigned int uverbs_no_driver_id_binding:1;
2307 
2308 	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2309 			 const struct ib_send_wr **bad_send_wr);
2310 	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2311 			 const struct ib_recv_wr **bad_recv_wr);
2312 	void (*drain_rq)(struct ib_qp *qp);
2313 	void (*drain_sq)(struct ib_qp *qp);
2314 	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2315 	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2316 	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2317 	int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2318 	int (*post_srq_recv)(struct ib_srq *srq,
2319 			     const struct ib_recv_wr *recv_wr,
2320 			     const struct ib_recv_wr **bad_recv_wr);
2321 	int (*process_mad)(struct ib_device *device, int process_mad_flags,
2322 			   u8 port_num, const struct ib_wc *in_wc,
2323 			   const struct ib_grh *in_grh,
2324 			   const struct ib_mad *in_mad, struct ib_mad *out_mad,
2325 			   size_t *out_mad_size, u16 *out_mad_pkey_index);
2326 	int (*query_device)(struct ib_device *device,
2327 			    struct ib_device_attr *device_attr,
2328 			    struct ib_udata *udata);
2329 	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2330 			     struct ib_device_modify *device_modify);
2331 	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2332 	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2333 						     int comp_vector);
2334 	int (*query_port)(struct ib_device *device, u8 port_num,
2335 			  struct ib_port_attr *port_attr);
2336 	int (*modify_port)(struct ib_device *device, u8 port_num,
2337 			   int port_modify_mask,
2338 			   struct ib_port_modify *port_modify);
2339 	/**
2340 	 * The following mandatory functions are used only at device
2341 	 * registration.  Keep functions such as these at the end of this
2342 	 * structure to avoid cache line misses when accessing struct ib_device
2343 	 * in fast paths.
2344 	 */
2345 	int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2346 				  struct ib_port_immutable *immutable);
2347 	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2348 					       u8 port_num);
2349 	/**
2350 	 * When calling get_netdev, the HW vendor's driver should return the
2351 	 * net device of device @device at port @port_num or NULL if such
2352 	 * a net device doesn't exist. The vendor driver should call dev_hold
2353 	 * on this net device. The HW vendor's device driver must guarantee
2354 	 * that this function returns NULL before the net device has finished
2355 	 * NETDEV_UNREGISTER state.
2356 	 */
2357 	struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2358 	/**
2359 	 * rdma netdev operation
2360 	 *
2361 	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2362 	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2363 	 */
2364 	struct net_device *(*alloc_rdma_netdev)(
2365 		struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2366 		const char *name, unsigned char name_assign_type,
2367 		void (*setup)(struct net_device *));
2368 
2369 	int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2370 				      enum rdma_netdev_t type,
2371 				      struct rdma_netdev_alloc_params *params);
2372 	/**
2373 	 * query_gid should be return GID value for @device, when @port_num
2374 	 * link layer is either IB or iWarp. It is no-op if @port_num port
2375 	 * is RoCE link layer.
2376 	 */
2377 	int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2378 			 union ib_gid *gid);
2379 	/**
2380 	 * When calling add_gid, the HW vendor's driver should add the gid
2381 	 * of device of port at gid index available at @attr. Meta-info of
2382 	 * that gid (for example, the network device related to this gid) is
2383 	 * available at @attr. @context allows the HW vendor driver to store
2384 	 * extra information together with a GID entry. The HW vendor driver may
2385 	 * allocate memory to contain this information and store it in @context
2386 	 * when a new GID entry is written to. Params are consistent until the
2387 	 * next call of add_gid or delete_gid. The function should return 0 on
2388 	 * success or error otherwise. The function could be called
2389 	 * concurrently for different ports. This function is only called when
2390 	 * roce_gid_table is used.
2391 	 */
2392 	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2393 	/**
2394 	 * When calling del_gid, the HW vendor's driver should delete the
2395 	 * gid of device @device at gid index gid_index of port port_num
2396 	 * available in @attr.
2397 	 * Upon the deletion of a GID entry, the HW vendor must free any
2398 	 * allocated memory. The caller will clear @context afterwards.
2399 	 * This function is only called when roce_gid_table is used.
2400 	 */
2401 	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2402 	int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2403 			  u16 *pkey);
2404 	int (*alloc_ucontext)(struct ib_ucontext *context,
2405 			      struct ib_udata *udata);
2406 	void (*dealloc_ucontext)(struct ib_ucontext *context);
2407 	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2408 	/**
2409 	 * This will be called once refcount of an entry in mmap_xa reaches
2410 	 * zero. The type of the memory that was mapped may differ between
2411 	 * entries and is opaque to the rdma_user_mmap interface.
2412 	 * Therefore needs to be implemented by the driver in mmap_free.
2413 	 */
2414 	void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2415 	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2416 	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2417 	void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2418 	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2419 			 struct ib_udata *udata);
2420 	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2421 	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2422 	void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2423 	int (*create_srq)(struct ib_srq *srq,
2424 			  struct ib_srq_init_attr *srq_init_attr,
2425 			  struct ib_udata *udata);
2426 	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2427 			  enum ib_srq_attr_mask srq_attr_mask,
2428 			  struct ib_udata *udata);
2429 	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2430 	void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2431 	struct ib_qp *(*create_qp)(struct ib_pd *pd,
2432 				   struct ib_qp_init_attr *qp_init_attr,
2433 				   struct ib_udata *udata);
2434 	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2435 			 int qp_attr_mask, struct ib_udata *udata);
2436 	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2437 			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2438 	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2439 	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2440 			 struct ib_udata *udata);
2441 	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2442 	void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2443 	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2444 	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2445 	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2446 				     u64 virt_addr, int mr_access_flags,
2447 				     struct ib_udata *udata);
2448 	int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2449 			     u64 virt_addr, int mr_access_flags,
2450 			     struct ib_pd *pd, struct ib_udata *udata);
2451 	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2452 	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2453 				  u32 max_num_sg);
2454 	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2455 					    u32 max_num_data_sg,
2456 					    u32 max_num_meta_sg);
2457 	int (*advise_mr)(struct ib_pd *pd,
2458 			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2459 			 struct ib_sge *sg_list, u32 num_sge,
2460 			 struct uverbs_attr_bundle *attrs);
2461 	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2462 			 unsigned int *sg_offset);
2463 	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2464 			       struct ib_mr_status *mr_status);
2465 	struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2466 				  struct ib_udata *udata);
2467 	int (*dealloc_mw)(struct ib_mw *mw);
2468 	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2469 	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2470 	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2471 	void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2472 	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2473 				       struct ib_flow_attr *flow_attr,
2474 				       int domain, struct ib_udata *udata);
2475 	int (*destroy_flow)(struct ib_flow *flow_id);
2476 	struct ib_flow_action *(*create_flow_action_esp)(
2477 		struct ib_device *device,
2478 		const struct ib_flow_action_attrs_esp *attr,
2479 		struct uverbs_attr_bundle *attrs);
2480 	int (*destroy_flow_action)(struct ib_flow_action *action);
2481 	int (*modify_flow_action_esp)(
2482 		struct ib_flow_action *action,
2483 		const struct ib_flow_action_attrs_esp *attr,
2484 		struct uverbs_attr_bundle *attrs);
2485 	int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2486 				 int state);
2487 	int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2488 			     struct ifla_vf_info *ivf);
2489 	int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2490 			    struct ifla_vf_stats *stats);
2491 	int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2492 			    struct ifla_vf_guid *node_guid,
2493 			    struct ifla_vf_guid *port_guid);
2494 	int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2495 			   int type);
2496 	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2497 				   struct ib_wq_init_attr *init_attr,
2498 				   struct ib_udata *udata);
2499 	void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2500 	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2501 			 u32 wq_attr_mask, struct ib_udata *udata);
2502 	struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2503 		struct ib_device *device,
2504 		struct ib_rwq_ind_table_init_attr *init_attr,
2505 		struct ib_udata *udata);
2506 	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2507 	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2508 				  struct ib_ucontext *context,
2509 				  struct ib_dm_alloc_attr *attr,
2510 				  struct uverbs_attr_bundle *attrs);
2511 	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2512 	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2513 				   struct ib_dm_mr_attr *attr,
2514 				   struct uverbs_attr_bundle *attrs);
2515 	int (*create_counters)(struct ib_counters *counters,
2516 			       struct uverbs_attr_bundle *attrs);
2517 	void (*destroy_counters)(struct ib_counters *counters);
2518 	int (*read_counters)(struct ib_counters *counters,
2519 			     struct ib_counters_read_attr *counters_read_attr,
2520 			     struct uverbs_attr_bundle *attrs);
2521 	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2522 			    int data_sg_nents, unsigned int *data_sg_offset,
2523 			    struct scatterlist *meta_sg, int meta_sg_nents,
2524 			    unsigned int *meta_sg_offset);
2525 
2526 	/**
2527 	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2528 	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
2529 	 *   core when the device is removed.  A lifespan of -1 in the return
2530 	 *   struct tells the core to set a default lifespan.
2531 	 */
2532 	struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2533 						u8 port_num);
2534 	/**
2535 	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2536 	 * @index - The index in the value array we wish to have updated, or
2537 	 *   num_counters if we want all stats updated
2538 	 * Return codes -
2539 	 *   < 0 - Error, no counters updated
2540 	 *   index - Updated the single counter pointed to by index
2541 	 *   num_counters - Updated all counters (will reset the timestamp
2542 	 *     and prevent further calls for lifespan milliseconds)
2543 	 * Drivers are allowed to update all counters in leiu of just the
2544 	 *   one given in index at their option
2545 	 */
2546 	int (*get_hw_stats)(struct ib_device *device,
2547 			    struct rdma_hw_stats *stats, u8 port, int index);
2548 	/*
2549 	 * This function is called once for each port when a ib device is
2550 	 * registered.
2551 	 */
2552 	int (*init_port)(struct ib_device *device, u8 port_num,
2553 			 struct kobject *port_sysfs);
2554 	/**
2555 	 * Allows rdma drivers to add their own restrack attributes.
2556 	 */
2557 	int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2558 	int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2559 	int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2560 	int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2561 	int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2562 	int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2563 	int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2564 
2565 	/* Device lifecycle callbacks */
2566 	/*
2567 	 * Called after the device becomes registered, before clients are
2568 	 * attached
2569 	 */
2570 	int (*enable_driver)(struct ib_device *dev);
2571 	/*
2572 	 * This is called as part of ib_dealloc_device().
2573 	 */
2574 	void (*dealloc_driver)(struct ib_device *dev);
2575 
2576 	/* iWarp CM callbacks */
2577 	void (*iw_add_ref)(struct ib_qp *qp);
2578 	void (*iw_rem_ref)(struct ib_qp *qp);
2579 	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2580 	int (*iw_connect)(struct iw_cm_id *cm_id,
2581 			  struct iw_cm_conn_param *conn_param);
2582 	int (*iw_accept)(struct iw_cm_id *cm_id,
2583 			 struct iw_cm_conn_param *conn_param);
2584 	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2585 			 u8 pdata_len);
2586 	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2587 	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2588 	/**
2589 	 * counter_bind_qp - Bind a QP to a counter.
2590 	 * @counter - The counter to be bound. If counter->id is zero then
2591 	 *   the driver needs to allocate a new counter and set counter->id
2592 	 */
2593 	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2594 	/**
2595 	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2596 	 *   counter and bind it onto the default one
2597 	 */
2598 	int (*counter_unbind_qp)(struct ib_qp *qp);
2599 	/**
2600 	 * counter_dealloc -De-allocate the hw counter
2601 	 */
2602 	int (*counter_dealloc)(struct rdma_counter *counter);
2603 	/**
2604 	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2605 	 * the driver initialized data.
2606 	 */
2607 	struct rdma_hw_stats *(*counter_alloc_stats)(
2608 		struct rdma_counter *counter);
2609 	/**
2610 	 * counter_update_stats - Query the stats value of this counter
2611 	 */
2612 	int (*counter_update_stats)(struct rdma_counter *counter);
2613 
2614 	/**
2615 	 * Allows rdma drivers to add their own restrack attributes
2616 	 * dumped via 'rdma stat' iproute2 command.
2617 	 */
2618 	int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2619 
2620 	/* query driver for its ucontext properties */
2621 	int (*query_ucontext)(struct ib_ucontext *context,
2622 			      struct uverbs_attr_bundle *attrs);
2623 
2624 	DECLARE_RDMA_OBJ_SIZE(ib_ah);
2625 	DECLARE_RDMA_OBJ_SIZE(ib_counters);
2626 	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2627 	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2628 	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2629 	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2630 	DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2631 };
2632 
2633 struct ib_core_device {
2634 	/* device must be the first element in structure until,
2635 	 * union of ib_core_device and device exists in ib_device.
2636 	 */
2637 	struct device dev;
2638 	possible_net_t rdma_net;
2639 	struct kobject *ports_kobj;
2640 	struct list_head port_list;
2641 	struct ib_device *owner; /* reach back to owner ib_device */
2642 };
2643 
2644 struct rdma_restrack_root;
2645 struct ib_device {
2646 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2647 	struct device                *dma_device;
2648 	struct ib_device_ops	     ops;
2649 	char                          name[IB_DEVICE_NAME_MAX];
2650 	struct rcu_head rcu_head;
2651 
2652 	struct list_head              event_handler_list;
2653 	/* Protects event_handler_list */
2654 	struct rw_semaphore event_handler_rwsem;
2655 
2656 	/* Protects QP's event_handler calls and open_qp list */
2657 	spinlock_t qp_open_list_lock;
2658 
2659 	struct rw_semaphore	      client_data_rwsem;
2660 	struct xarray                 client_data;
2661 	struct mutex                  unregistration_lock;
2662 
2663 	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2664 	rwlock_t cache_lock;
2665 	/**
2666 	 * port_data is indexed by port number
2667 	 */
2668 	struct ib_port_data *port_data;
2669 
2670 	int			      num_comp_vectors;
2671 
2672 	union {
2673 		struct device		dev;
2674 		struct ib_core_device	coredev;
2675 	};
2676 
2677 	/* First group for device attributes,
2678 	 * Second group for driver provided attributes (optional).
2679 	 * It is NULL terminated array.
2680 	 */
2681 	const struct attribute_group	*groups[3];
2682 
2683 	u64			     uverbs_cmd_mask;
2684 	u64			     uverbs_ex_cmd_mask;
2685 
2686 	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2687 	__be64			     node_guid;
2688 	u32			     local_dma_lkey;
2689 	u16                          is_switch:1;
2690 	/* Indicates kernel verbs support, should not be used in drivers */
2691 	u16                          kverbs_provider:1;
2692 	/* CQ adaptive moderation (RDMA DIM) */
2693 	u16                          use_cq_dim:1;
2694 	u8                           node_type;
2695 	u8                           phys_port_cnt;
2696 	struct ib_device_attr        attrs;
2697 	struct attribute_group	     *hw_stats_ag;
2698 	struct rdma_hw_stats         *hw_stats;
2699 
2700 #ifdef CONFIG_CGROUP_RDMA
2701 	struct rdmacg_device         cg_device;
2702 #endif
2703 
2704 	u32                          index;
2705 
2706 	spinlock_t                   cq_pools_lock;
2707 	struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2708 
2709 	struct rdma_restrack_root *res;
2710 
2711 	const struct uapi_definition   *driver_def;
2712 
2713 	/*
2714 	 * Positive refcount indicates that the device is currently
2715 	 * registered and cannot be unregistered.
2716 	 */
2717 	refcount_t refcount;
2718 	struct completion unreg_completion;
2719 	struct work_struct unregistration_work;
2720 
2721 	const struct rdma_link_ops *link_ops;
2722 
2723 	/* Protects compat_devs xarray modifications */
2724 	struct mutex compat_devs_mutex;
2725 	/* Maintains compat devices for each net namespace */
2726 	struct xarray compat_devs;
2727 
2728 	/* Used by iWarp CM */
2729 	char iw_ifname[IFNAMSIZ];
2730 	u32 iw_driver_flags;
2731 	u32 lag_flags;
2732 };
2733 
2734 struct ib_client_nl_info;
2735 struct ib_client {
2736 	const char *name;
2737 	int (*add)(struct ib_device *ibdev);
2738 	void (*remove)(struct ib_device *, void *client_data);
2739 	void (*rename)(struct ib_device *dev, void *client_data);
2740 	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2741 			   struct ib_client_nl_info *res);
2742 	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2743 
2744 	/* Returns the net_dev belonging to this ib_client and matching the
2745 	 * given parameters.
2746 	 * @dev:	 An RDMA device that the net_dev use for communication.
2747 	 * @port:	 A physical port number on the RDMA device.
2748 	 * @pkey:	 P_Key that the net_dev uses if applicable.
2749 	 * @gid:	 A GID that the net_dev uses to communicate.
2750 	 * @addr:	 An IP address the net_dev is configured with.
2751 	 * @client_data: The device's client data set by ib_set_client_data().
2752 	 *
2753 	 * An ib_client that implements a net_dev on top of RDMA devices
2754 	 * (such as IP over IB) should implement this callback, allowing the
2755 	 * rdma_cm module to find the right net_dev for a given request.
2756 	 *
2757 	 * The caller is responsible for calling dev_put on the returned
2758 	 * netdev. */
2759 	struct net_device *(*get_net_dev_by_params)(
2760 			struct ib_device *dev,
2761 			u8 port,
2762 			u16 pkey,
2763 			const union ib_gid *gid,
2764 			const struct sockaddr *addr,
2765 			void *client_data);
2766 
2767 	refcount_t uses;
2768 	struct completion uses_zero;
2769 	u32 client_id;
2770 
2771 	/* kverbs are not required by the client */
2772 	u8 no_kverbs_req:1;
2773 };
2774 
2775 /*
2776  * IB block DMA iterator
2777  *
2778  * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2779  * to a HW supported page size.
2780  */
2781 struct ib_block_iter {
2782 	/* internal states */
2783 	struct scatterlist *__sg;	/* sg holding the current aligned block */
2784 	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2785 	unsigned int __sg_nents;	/* number of SG entries */
2786 	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2787 	unsigned int __pg_bit;		/* alignment of current block */
2788 };
2789 
2790 struct ib_device *_ib_alloc_device(size_t size);
2791 #define ib_alloc_device(drv_struct, member)                                    \
2792 	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2793 				      BUILD_BUG_ON_ZERO(offsetof(              \
2794 					      struct drv_struct, member))),    \
2795 		     struct drv_struct, member)
2796 
2797 void ib_dealloc_device(struct ib_device *device);
2798 
2799 void ib_get_device_fw_str(struct ib_device *device, char *str);
2800 
2801 int ib_register_device(struct ib_device *device, const char *name);
2802 void ib_unregister_device(struct ib_device *device);
2803 void ib_unregister_driver(enum rdma_driver_id driver_id);
2804 void ib_unregister_device_and_put(struct ib_device *device);
2805 void ib_unregister_device_queued(struct ib_device *ib_dev);
2806 
2807 int ib_register_client   (struct ib_client *client);
2808 void ib_unregister_client(struct ib_client *client);
2809 
2810 void __rdma_block_iter_start(struct ib_block_iter *biter,
2811 			     struct scatterlist *sglist,
2812 			     unsigned int nents,
2813 			     unsigned long pgsz);
2814 bool __rdma_block_iter_next(struct ib_block_iter *biter);
2815 
2816 /**
2817  * rdma_block_iter_dma_address - get the aligned dma address of the current
2818  * block held by the block iterator.
2819  * @biter: block iterator holding the memory block
2820  */
2821 static inline dma_addr_t
2822 rdma_block_iter_dma_address(struct ib_block_iter *biter)
2823 {
2824 	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2825 }
2826 
2827 /**
2828  * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2829  * @sglist: sglist to iterate over
2830  * @biter: block iterator holding the memory block
2831  * @nents: maximum number of sg entries to iterate over
2832  * @pgsz: best HW supported page size to use
2833  *
2834  * Callers may use rdma_block_iter_dma_address() to get each
2835  * blocks aligned DMA address.
2836  */
2837 #define rdma_for_each_block(sglist, biter, nents, pgsz)		\
2838 	for (__rdma_block_iter_start(biter, sglist, nents,	\
2839 				     pgsz);			\
2840 	     __rdma_block_iter_next(biter);)
2841 
2842 /**
2843  * ib_get_client_data - Get IB client context
2844  * @device:Device to get context for
2845  * @client:Client to get context for
2846  *
2847  * ib_get_client_data() returns the client context data set with
2848  * ib_set_client_data(). This can only be called while the client is
2849  * registered to the device, once the ib_client remove() callback returns this
2850  * cannot be called.
2851  */
2852 static inline void *ib_get_client_data(struct ib_device *device,
2853 				       struct ib_client *client)
2854 {
2855 	return xa_load(&device->client_data, client->client_id);
2856 }
2857 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2858 			 void *data);
2859 void ib_set_device_ops(struct ib_device *device,
2860 		       const struct ib_device_ops *ops);
2861 
2862 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2863 		      unsigned long pfn, unsigned long size, pgprot_t prot,
2864 		      struct rdma_user_mmap_entry *entry);
2865 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2866 				struct rdma_user_mmap_entry *entry,
2867 				size_t length);
2868 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2869 				      struct rdma_user_mmap_entry *entry,
2870 				      size_t length, u32 min_pgoff,
2871 				      u32 max_pgoff);
2872 
2873 struct rdma_user_mmap_entry *
2874 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2875 			       unsigned long pgoff);
2876 struct rdma_user_mmap_entry *
2877 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2878 			 struct vm_area_struct *vma);
2879 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2880 
2881 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2882 
2883 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2884 {
2885 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2886 }
2887 
2888 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2889 {
2890 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2891 }
2892 
2893 static inline bool ib_is_buffer_cleared(const void __user *p,
2894 					size_t len)
2895 {
2896 	bool ret;
2897 	u8 *buf;
2898 
2899 	if (len > USHRT_MAX)
2900 		return false;
2901 
2902 	buf = memdup_user(p, len);
2903 	if (IS_ERR(buf))
2904 		return false;
2905 
2906 	ret = !memchr_inv(buf, 0, len);
2907 	kfree(buf);
2908 	return ret;
2909 }
2910 
2911 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2912 				       size_t offset,
2913 				       size_t len)
2914 {
2915 	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2916 }
2917 
2918 /**
2919  * ib_is_destroy_retryable - Check whether the uobject destruction
2920  * is retryable.
2921  * @ret: The initial destruction return code
2922  * @why: remove reason
2923  * @uobj: The uobject that is destroyed
2924  *
2925  * This function is a helper function that IB layer and low-level drivers
2926  * can use to consider whether the destruction of the given uobject is
2927  * retry-able.
2928  * It checks the original return code, if it wasn't success the destruction
2929  * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2930  * the remove reason. (i.e. why).
2931  * Must be called with the object locked for destroy.
2932  */
2933 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2934 					   struct ib_uobject *uobj)
2935 {
2936 	return ret && (why == RDMA_REMOVE_DESTROY ||
2937 		       uobj->context->cleanup_retryable);
2938 }
2939 
2940 /**
2941  * ib_destroy_usecnt - Called during destruction to check the usecnt
2942  * @usecnt: The usecnt atomic
2943  * @why: remove reason
2944  * @uobj: The uobject that is destroyed
2945  *
2946  * Non-zero usecnts will block destruction unless destruction was triggered by
2947  * a ucontext cleanup.
2948  */
2949 static inline int ib_destroy_usecnt(atomic_t *usecnt,
2950 				    enum rdma_remove_reason why,
2951 				    struct ib_uobject *uobj)
2952 {
2953 	if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2954 		return -EBUSY;
2955 	return 0;
2956 }
2957 
2958 /**
2959  * ib_modify_qp_is_ok - Check that the supplied attribute mask
2960  * contains all required attributes and no attributes not allowed for
2961  * the given QP state transition.
2962  * @cur_state: Current QP state
2963  * @next_state: Next QP state
2964  * @type: QP type
2965  * @mask: Mask of supplied QP attributes
2966  *
2967  * This function is a helper function that a low-level driver's
2968  * modify_qp method can use to validate the consumer's input.  It
2969  * checks that cur_state and next_state are valid QP states, that a
2970  * transition from cur_state to next_state is allowed by the IB spec,
2971  * and that the attribute mask supplied is allowed for the transition.
2972  */
2973 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2974 			enum ib_qp_type type, enum ib_qp_attr_mask mask);
2975 
2976 void ib_register_event_handler(struct ib_event_handler *event_handler);
2977 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2978 void ib_dispatch_event(const struct ib_event *event);
2979 
2980 int ib_query_port(struct ib_device *device,
2981 		  u8 port_num, struct ib_port_attr *port_attr);
2982 
2983 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2984 					       u8 port_num);
2985 
2986 /**
2987  * rdma_cap_ib_switch - Check if the device is IB switch
2988  * @device: Device to check
2989  *
2990  * Device driver is responsible for setting is_switch bit on
2991  * in ib_device structure at init time.
2992  *
2993  * Return: true if the device is IB switch.
2994  */
2995 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2996 {
2997 	return device->is_switch;
2998 }
2999 
3000 /**
3001  * rdma_start_port - Return the first valid port number for the device
3002  * specified
3003  *
3004  * @device: Device to be checked
3005  *
3006  * Return start port number
3007  */
3008 static inline u8 rdma_start_port(const struct ib_device *device)
3009 {
3010 	return rdma_cap_ib_switch(device) ? 0 : 1;
3011 }
3012 
3013 /**
3014  * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3015  * @device - The struct ib_device * to iterate over
3016  * @iter - The unsigned int to store the port number
3017  */
3018 #define rdma_for_each_port(device, iter)                                       \
3019 	for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type(   \
3020 						     unsigned int, iter)));    \
3021 	     iter <= rdma_end_port(device); (iter)++)
3022 
3023 /**
3024  * rdma_end_port - Return the last valid port number for the device
3025  * specified
3026  *
3027  * @device: Device to be checked
3028  *
3029  * Return last port number
3030  */
3031 static inline u8 rdma_end_port(const struct ib_device *device)
3032 {
3033 	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3034 }
3035 
3036 static inline int rdma_is_port_valid(const struct ib_device *device,
3037 				     unsigned int port)
3038 {
3039 	return (port >= rdma_start_port(device) &&
3040 		port <= rdma_end_port(device));
3041 }
3042 
3043 static inline bool rdma_is_grh_required(const struct ib_device *device,
3044 					u8 port_num)
3045 {
3046 	return device->port_data[port_num].immutable.core_cap_flags &
3047 	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
3048 }
3049 
3050 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
3051 {
3052 	return device->port_data[port_num].immutable.core_cap_flags &
3053 	       RDMA_CORE_CAP_PROT_IB;
3054 }
3055 
3056 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
3057 {
3058 	return device->port_data[port_num].immutable.core_cap_flags &
3059 	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3060 }
3061 
3062 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3063 {
3064 	return device->port_data[port_num].immutable.core_cap_flags &
3065 	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3066 }
3067 
3068 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
3069 {
3070 	return device->port_data[port_num].immutable.core_cap_flags &
3071 	       RDMA_CORE_CAP_PROT_ROCE;
3072 }
3073 
3074 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
3075 {
3076 	return device->port_data[port_num].immutable.core_cap_flags &
3077 	       RDMA_CORE_CAP_PROT_IWARP;
3078 }
3079 
3080 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3081 {
3082 	return rdma_protocol_ib(device, port_num) ||
3083 		rdma_protocol_roce(device, port_num);
3084 }
3085 
3086 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3087 {
3088 	return device->port_data[port_num].immutable.core_cap_flags &
3089 	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3090 }
3091 
3092 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3093 {
3094 	return device->port_data[port_num].immutable.core_cap_flags &
3095 	       RDMA_CORE_CAP_PROT_USNIC;
3096 }
3097 
3098 /**
3099  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3100  * Management Datagrams.
3101  * @device: Device to check
3102  * @port_num: Port number to check
3103  *
3104  * Management Datagrams (MAD) are a required part of the InfiniBand
3105  * specification and are supported on all InfiniBand devices.  A slightly
3106  * extended version are also supported on OPA interfaces.
3107  *
3108  * Return: true if the port supports sending/receiving of MAD packets.
3109  */
3110 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3111 {
3112 	return device->port_data[port_num].immutable.core_cap_flags &
3113 	       RDMA_CORE_CAP_IB_MAD;
3114 }
3115 
3116 /**
3117  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3118  * Management Datagrams.
3119  * @device: Device to check
3120  * @port_num: Port number to check
3121  *
3122  * Intel OmniPath devices extend and/or replace the InfiniBand Management
3123  * datagrams with their own versions.  These OPA MADs share many but not all of
3124  * the characteristics of InfiniBand MADs.
3125  *
3126  * OPA MADs differ in the following ways:
3127  *
3128  *    1) MADs are variable size up to 2K
3129  *       IBTA defined MADs remain fixed at 256 bytes
3130  *    2) OPA SMPs must carry valid PKeys
3131  *    3) OPA SMP packets are a different format
3132  *
3133  * Return: true if the port supports OPA MAD packet formats.
3134  */
3135 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3136 {
3137 	return device->port_data[port_num].immutable.core_cap_flags &
3138 		RDMA_CORE_CAP_OPA_MAD;
3139 }
3140 
3141 /**
3142  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3143  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3144  * @device: Device to check
3145  * @port_num: Port number to check
3146  *
3147  * Each InfiniBand node is required to provide a Subnet Management Agent
3148  * that the subnet manager can access.  Prior to the fabric being fully
3149  * configured by the subnet manager, the SMA is accessed via a well known
3150  * interface called the Subnet Management Interface (SMI).  This interface
3151  * uses directed route packets to communicate with the SM to get around the
3152  * chicken and egg problem of the SM needing to know what's on the fabric
3153  * in order to configure the fabric, and needing to configure the fabric in
3154  * order to send packets to the devices on the fabric.  These directed
3155  * route packets do not need the fabric fully configured in order to reach
3156  * their destination.  The SMI is the only method allowed to send
3157  * directed route packets on an InfiniBand fabric.
3158  *
3159  * Return: true if the port provides an SMI.
3160  */
3161 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3162 {
3163 	return device->port_data[port_num].immutable.core_cap_flags &
3164 	       RDMA_CORE_CAP_IB_SMI;
3165 }
3166 
3167 /**
3168  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3169  * Communication Manager.
3170  * @device: Device to check
3171  * @port_num: Port number to check
3172  *
3173  * The InfiniBand Communication Manager is one of many pre-defined General
3174  * Service Agents (GSA) that are accessed via the General Service
3175  * Interface (GSI).  It's role is to facilitate establishment of connections
3176  * between nodes as well as other management related tasks for established
3177  * connections.
3178  *
3179  * Return: true if the port supports an IB CM (this does not guarantee that
3180  * a CM is actually running however).
3181  */
3182 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3183 {
3184 	return device->port_data[port_num].immutable.core_cap_flags &
3185 	       RDMA_CORE_CAP_IB_CM;
3186 }
3187 
3188 /**
3189  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3190  * Communication Manager.
3191  * @device: Device to check
3192  * @port_num: Port number to check
3193  *
3194  * Similar to above, but specific to iWARP connections which have a different
3195  * managment protocol than InfiniBand.
3196  *
3197  * Return: true if the port supports an iWARP CM (this does not guarantee that
3198  * a CM is actually running however).
3199  */
3200 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3201 {
3202 	return device->port_data[port_num].immutable.core_cap_flags &
3203 	       RDMA_CORE_CAP_IW_CM;
3204 }
3205 
3206 /**
3207  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3208  * Subnet Administration.
3209  * @device: Device to check
3210  * @port_num: Port number to check
3211  *
3212  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3213  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3214  * fabrics, devices should resolve routes to other hosts by contacting the
3215  * SA to query the proper route.
3216  *
3217  * Return: true if the port should act as a client to the fabric Subnet
3218  * Administration interface.  This does not imply that the SA service is
3219  * running locally.
3220  */
3221 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3222 {
3223 	return device->port_data[port_num].immutable.core_cap_flags &
3224 	       RDMA_CORE_CAP_IB_SA;
3225 }
3226 
3227 /**
3228  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3229  * Multicast.
3230  * @device: Device to check
3231  * @port_num: Port number to check
3232  *
3233  * InfiniBand multicast registration is more complex than normal IPv4 or
3234  * IPv6 multicast registration.  Each Host Channel Adapter must register
3235  * with the Subnet Manager when it wishes to join a multicast group.  It
3236  * should do so only once regardless of how many queue pairs it subscribes
3237  * to this group.  And it should leave the group only after all queue pairs
3238  * attached to the group have been detached.
3239  *
3240  * Return: true if the port must undertake the additional adminstrative
3241  * overhead of registering/unregistering with the SM and tracking of the
3242  * total number of queue pairs attached to the multicast group.
3243  */
3244 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3245 {
3246 	return rdma_cap_ib_sa(device, port_num);
3247 }
3248 
3249 /**
3250  * rdma_cap_af_ib - Check if the port of device has the capability
3251  * Native Infiniband Address.
3252  * @device: Device to check
3253  * @port_num: Port number to check
3254  *
3255  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3256  * GID.  RoCE uses a different mechanism, but still generates a GID via
3257  * a prescribed mechanism and port specific data.
3258  *
3259  * Return: true if the port uses a GID address to identify devices on the
3260  * network.
3261  */
3262 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3263 {
3264 	return device->port_data[port_num].immutable.core_cap_flags &
3265 	       RDMA_CORE_CAP_AF_IB;
3266 }
3267 
3268 /**
3269  * rdma_cap_eth_ah - Check if the port of device has the capability
3270  * Ethernet Address Handle.
3271  * @device: Device to check
3272  * @port_num: Port number to check
3273  *
3274  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3275  * to fabricate GIDs over Ethernet/IP specific addresses native to the
3276  * port.  Normally, packet headers are generated by the sending host
3277  * adapter, but when sending connectionless datagrams, we must manually
3278  * inject the proper headers for the fabric we are communicating over.
3279  *
3280  * Return: true if we are running as a RoCE port and must force the
3281  * addition of a Global Route Header built from our Ethernet Address
3282  * Handle into our header list for connectionless packets.
3283  */
3284 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3285 {
3286 	return device->port_data[port_num].immutable.core_cap_flags &
3287 	       RDMA_CORE_CAP_ETH_AH;
3288 }
3289 
3290 /**
3291  * rdma_cap_opa_ah - Check if the port of device supports
3292  * OPA Address handles
3293  * @device: Device to check
3294  * @port_num: Port number to check
3295  *
3296  * Return: true if we are running on an OPA device which supports
3297  * the extended OPA addressing.
3298  */
3299 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3300 {
3301 	return (device->port_data[port_num].immutable.core_cap_flags &
3302 		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3303 }
3304 
3305 /**
3306  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3307  *
3308  * @device: Device
3309  * @port_num: Port number
3310  *
3311  * This MAD size includes the MAD headers and MAD payload.  No other headers
3312  * are included.
3313  *
3314  * Return the max MAD size required by the Port.  Will return 0 if the port
3315  * does not support MADs
3316  */
3317 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3318 {
3319 	return device->port_data[port_num].immutable.max_mad_size;
3320 }
3321 
3322 /**
3323  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3324  * @device: Device to check
3325  * @port_num: Port number to check
3326  *
3327  * RoCE GID table mechanism manages the various GIDs for a device.
3328  *
3329  * NOTE: if allocating the port's GID table has failed, this call will still
3330  * return true, but any RoCE GID table API will fail.
3331  *
3332  * Return: true if the port uses RoCE GID table mechanism in order to manage
3333  * its GIDs.
3334  */
3335 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3336 					   u8 port_num)
3337 {
3338 	return rdma_protocol_roce(device, port_num) &&
3339 		device->ops.add_gid && device->ops.del_gid;
3340 }
3341 
3342 /*
3343  * Check if the device supports READ W/ INVALIDATE.
3344  */
3345 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3346 {
3347 	/*
3348 	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3349 	 * has support for it yet.
3350 	 */
3351 	return rdma_protocol_iwarp(dev, port_num);
3352 }
3353 
3354 /**
3355  * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
3356  *
3357  * @addr: address
3358  * @pgsz_bitmap: bitmap of HW supported page sizes
3359  */
3360 static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3361 					    unsigned long pgsz_bitmap)
3362 {
3363 	unsigned long align;
3364 	unsigned long pgsz;
3365 
3366 	align = addr & -addr;
3367 
3368 	/* Find page bit such that addr is aligned to the highest supported
3369 	 * HW page size
3370 	 */
3371 	pgsz = pgsz_bitmap & ~(-align << 1);
3372 	if (!pgsz)
3373 		return __ffs(pgsz_bitmap);
3374 
3375 	return __fls(pgsz);
3376 }
3377 
3378 /**
3379  * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3380  * @device: Device
3381  * @port_num: 1 based Port number
3382  *
3383  * Return true if port is an Intel OPA port , false if not
3384  */
3385 static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3386 					  u32 port_num)
3387 {
3388 	return (device->port_data[port_num].immutable.core_cap_flags &
3389 		RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3390 }
3391 
3392 /**
3393  * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3394  * @device: Device
3395  * @port_num: Port number
3396  * @mtu: enum value of MTU
3397  *
3398  * Return the MTU size supported by the port as an integer value. Will return
3399  * -1 if enum value of mtu is not supported.
3400  */
3401 static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
3402 				       int mtu)
3403 {
3404 	if (rdma_core_cap_opa_port(device, port))
3405 		return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3406 	else
3407 		return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3408 }
3409 
3410 /**
3411  * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3412  * @device: Device
3413  * @port_num: Port number
3414  * @attr: port attribute
3415  *
3416  * Return the MTU size supported by the port as an integer value.
3417  */
3418 static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
3419 				     struct ib_port_attr *attr)
3420 {
3421 	if (rdma_core_cap_opa_port(device, port))
3422 		return attr->phys_mtu;
3423 	else
3424 		return ib_mtu_enum_to_int(attr->max_mtu);
3425 }
3426 
3427 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3428 			 int state);
3429 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3430 		     struct ifla_vf_info *info);
3431 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3432 		    struct ifla_vf_stats *stats);
3433 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3434 		    struct ifla_vf_guid *node_guid,
3435 		    struct ifla_vf_guid *port_guid);
3436 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3437 		   int type);
3438 
3439 int ib_query_pkey(struct ib_device *device,
3440 		  u8 port_num, u16 index, u16 *pkey);
3441 
3442 int ib_modify_device(struct ib_device *device,
3443 		     int device_modify_mask,
3444 		     struct ib_device_modify *device_modify);
3445 
3446 int ib_modify_port(struct ib_device *device,
3447 		   u8 port_num, int port_modify_mask,
3448 		   struct ib_port_modify *port_modify);
3449 
3450 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3451 		u8 *port_num, u16 *index);
3452 
3453 int ib_find_pkey(struct ib_device *device,
3454 		 u8 port_num, u16 pkey, u16 *index);
3455 
3456 enum ib_pd_flags {
3457 	/*
3458 	 * Create a memory registration for all memory in the system and place
3459 	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3460 	 * ULPs to avoid the overhead of dynamic MRs.
3461 	 *
3462 	 * This flag is generally considered unsafe and must only be used in
3463 	 * extremly trusted environments.  Every use of it will log a warning
3464 	 * in the kernel log.
3465 	 */
3466 	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3467 };
3468 
3469 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3470 		const char *caller);
3471 
3472 #define ib_alloc_pd(device, flags) \
3473 	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3474 
3475 /**
3476  * ib_dealloc_pd_user - Deallocate kernel/user PD
3477  * @pd: The protection domain
3478  * @udata: Valid user data or NULL for kernel objects
3479  */
3480 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3481 
3482 /**
3483  * ib_dealloc_pd - Deallocate kernel PD
3484  * @pd: The protection domain
3485  *
3486  * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3487  */
3488 static inline void ib_dealloc_pd(struct ib_pd *pd)
3489 {
3490 	ib_dealloc_pd_user(pd, NULL);
3491 }
3492 
3493 enum rdma_create_ah_flags {
3494 	/* In a sleepable context */
3495 	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3496 };
3497 
3498 /**
3499  * rdma_create_ah - Creates an address handle for the given address vector.
3500  * @pd: The protection domain associated with the address handle.
3501  * @ah_attr: The attributes of the address vector.
3502  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3503  *
3504  * The address handle is used to reference a local or global destination
3505  * in all UD QP post sends.
3506  */
3507 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3508 			     u32 flags);
3509 
3510 /**
3511  * rdma_create_user_ah - Creates an address handle for the given address vector.
3512  * It resolves destination mac address for ah attribute of RoCE type.
3513  * @pd: The protection domain associated with the address handle.
3514  * @ah_attr: The attributes of the address vector.
3515  * @udata: pointer to user's input output buffer information need by
3516  *         provider driver.
3517  *
3518  * It returns 0 on success and returns appropriate error code on error.
3519  * The address handle is used to reference a local or global destination
3520  * in all UD QP post sends.
3521  */
3522 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3523 				  struct rdma_ah_attr *ah_attr,
3524 				  struct ib_udata *udata);
3525 /**
3526  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3527  *   work completion.
3528  * @hdr: the L3 header to parse
3529  * @net_type: type of header to parse
3530  * @sgid: place to store source gid
3531  * @dgid: place to store destination gid
3532  */
3533 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3534 			      enum rdma_network_type net_type,
3535 			      union ib_gid *sgid, union ib_gid *dgid);
3536 
3537 /**
3538  * ib_get_rdma_header_version - Get the header version
3539  * @hdr: the L3 header to parse
3540  */
3541 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3542 
3543 /**
3544  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3545  *   work completion.
3546  * @device: Device on which the received message arrived.
3547  * @port_num: Port on which the received message arrived.
3548  * @wc: Work completion associated with the received message.
3549  * @grh: References the received global route header.  This parameter is
3550  *   ignored unless the work completion indicates that the GRH is valid.
3551  * @ah_attr: Returned attributes that can be used when creating an address
3552  *   handle for replying to the message.
3553  * When ib_init_ah_attr_from_wc() returns success,
3554  * (a) for IB link layer it optionally contains a reference to SGID attribute
3555  * when GRH is present for IB link layer.
3556  * (b) for RoCE link layer it contains a reference to SGID attribute.
3557  * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3558  * attributes which are initialized using ib_init_ah_attr_from_wc().
3559  *
3560  */
3561 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3562 			    const struct ib_wc *wc, const struct ib_grh *grh,
3563 			    struct rdma_ah_attr *ah_attr);
3564 
3565 /**
3566  * ib_create_ah_from_wc - Creates an address handle associated with the
3567  *   sender of the specified work completion.
3568  * @pd: The protection domain associated with the address handle.
3569  * @wc: Work completion information associated with a received message.
3570  * @grh: References the received global route header.  This parameter is
3571  *   ignored unless the work completion indicates that the GRH is valid.
3572  * @port_num: The outbound port number to associate with the address.
3573  *
3574  * The address handle is used to reference a local or global destination
3575  * in all UD QP post sends.
3576  */
3577 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3578 				   const struct ib_grh *grh, u8 port_num);
3579 
3580 /**
3581  * rdma_modify_ah - Modifies the address vector associated with an address
3582  *   handle.
3583  * @ah: The address handle to modify.
3584  * @ah_attr: The new address vector attributes to associate with the
3585  *   address handle.
3586  */
3587 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3588 
3589 /**
3590  * rdma_query_ah - Queries the address vector associated with an address
3591  *   handle.
3592  * @ah: The address handle to query.
3593  * @ah_attr: The address vector attributes associated with the address
3594  *   handle.
3595  */
3596 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3597 
3598 enum rdma_destroy_ah_flags {
3599 	/* In a sleepable context */
3600 	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3601 };
3602 
3603 /**
3604  * rdma_destroy_ah_user - Destroys an address handle.
3605  * @ah: The address handle to destroy.
3606  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3607  * @udata: Valid user data or NULL for kernel objects
3608  */
3609 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3610 
3611 /**
3612  * rdma_destroy_ah - Destroys an kernel address handle.
3613  * @ah: The address handle to destroy.
3614  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3615  *
3616  * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3617  */
3618 static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3619 {
3620 	return rdma_destroy_ah_user(ah, flags, NULL);
3621 }
3622 
3623 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3624 				  struct ib_srq_init_attr *srq_init_attr,
3625 				  struct ib_usrq_object *uobject,
3626 				  struct ib_udata *udata);
3627 static inline struct ib_srq *
3628 ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3629 {
3630 	if (!pd->device->ops.create_srq)
3631 		return ERR_PTR(-EOPNOTSUPP);
3632 
3633 	return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3634 }
3635 
3636 /**
3637  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3638  * @srq: The SRQ to modify.
3639  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3640  *   the current values of selected SRQ attributes are returned.
3641  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3642  *   are being modified.
3643  *
3644  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3645  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3646  * the number of receives queued drops below the limit.
3647  */
3648 int ib_modify_srq(struct ib_srq *srq,
3649 		  struct ib_srq_attr *srq_attr,
3650 		  enum ib_srq_attr_mask srq_attr_mask);
3651 
3652 /**
3653  * ib_query_srq - Returns the attribute list and current values for the
3654  *   specified SRQ.
3655  * @srq: The SRQ to query.
3656  * @srq_attr: The attributes of the specified SRQ.
3657  */
3658 int ib_query_srq(struct ib_srq *srq,
3659 		 struct ib_srq_attr *srq_attr);
3660 
3661 /**
3662  * ib_destroy_srq_user - Destroys the specified SRQ.
3663  * @srq: The SRQ to destroy.
3664  * @udata: Valid user data or NULL for kernel objects
3665  */
3666 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3667 
3668 /**
3669  * ib_destroy_srq - Destroys the specified kernel SRQ.
3670  * @srq: The SRQ to destroy.
3671  *
3672  * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3673  */
3674 static inline int ib_destroy_srq(struct ib_srq *srq)
3675 {
3676 	return ib_destroy_srq_user(srq, NULL);
3677 }
3678 
3679 /**
3680  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3681  * @srq: The SRQ to post the work request on.
3682  * @recv_wr: A list of work requests to post on the receive queue.
3683  * @bad_recv_wr: On an immediate failure, this parameter will reference
3684  *   the work request that failed to be posted on the QP.
3685  */
3686 static inline int ib_post_srq_recv(struct ib_srq *srq,
3687 				   const struct ib_recv_wr *recv_wr,
3688 				   const struct ib_recv_wr **bad_recv_wr)
3689 {
3690 	const struct ib_recv_wr *dummy;
3691 
3692 	return srq->device->ops.post_srq_recv(srq, recv_wr,
3693 					      bad_recv_wr ? : &dummy);
3694 }
3695 
3696 struct ib_qp *ib_create_qp(struct ib_pd *pd,
3697 			   struct ib_qp_init_attr *qp_init_attr);
3698 
3699 /**
3700  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3701  * @qp: The QP to modify.
3702  * @attr: On input, specifies the QP attributes to modify.  On output,
3703  *   the current values of selected QP attributes are returned.
3704  * @attr_mask: A bit-mask used to specify which attributes of the QP
3705  *   are being modified.
3706  * @udata: pointer to user's input output buffer information
3707  *   are being modified.
3708  * It returns 0 on success and returns appropriate error code on error.
3709  */
3710 int ib_modify_qp_with_udata(struct ib_qp *qp,
3711 			    struct ib_qp_attr *attr,
3712 			    int attr_mask,
3713 			    struct ib_udata *udata);
3714 
3715 /**
3716  * ib_modify_qp - Modifies the attributes for the specified QP and then
3717  *   transitions the QP to the given state.
3718  * @qp: The QP to modify.
3719  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3720  *   the current values of selected QP attributes are returned.
3721  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3722  *   are being modified.
3723  */
3724 int ib_modify_qp(struct ib_qp *qp,
3725 		 struct ib_qp_attr *qp_attr,
3726 		 int qp_attr_mask);
3727 
3728 /**
3729  * ib_query_qp - Returns the attribute list and current values for the
3730  *   specified QP.
3731  * @qp: The QP to query.
3732  * @qp_attr: The attributes of the specified QP.
3733  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3734  * @qp_init_attr: Additional attributes of the selected QP.
3735  *
3736  * The qp_attr_mask may be used to limit the query to gathering only the
3737  * selected attributes.
3738  */
3739 int ib_query_qp(struct ib_qp *qp,
3740 		struct ib_qp_attr *qp_attr,
3741 		int qp_attr_mask,
3742 		struct ib_qp_init_attr *qp_init_attr);
3743 
3744 /**
3745  * ib_destroy_qp - Destroys the specified QP.
3746  * @qp: The QP to destroy.
3747  * @udata: Valid udata or NULL for kernel objects
3748  */
3749 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3750 
3751 /**
3752  * ib_destroy_qp - Destroys the specified kernel QP.
3753  * @qp: The QP to destroy.
3754  *
3755  * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3756  */
3757 static inline int ib_destroy_qp(struct ib_qp *qp)
3758 {
3759 	return ib_destroy_qp_user(qp, NULL);
3760 }
3761 
3762 /**
3763  * ib_open_qp - Obtain a reference to an existing sharable QP.
3764  * @xrcd - XRC domain
3765  * @qp_open_attr: Attributes identifying the QP to open.
3766  *
3767  * Returns a reference to a sharable QP.
3768  */
3769 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3770 			 struct ib_qp_open_attr *qp_open_attr);
3771 
3772 /**
3773  * ib_close_qp - Release an external reference to a QP.
3774  * @qp: The QP handle to release
3775  *
3776  * The opened QP handle is released by the caller.  The underlying
3777  * shared QP is not destroyed until all internal references are released.
3778  */
3779 int ib_close_qp(struct ib_qp *qp);
3780 
3781 /**
3782  * ib_post_send - Posts a list of work requests to the send queue of
3783  *   the specified QP.
3784  * @qp: The QP to post the work request on.
3785  * @send_wr: A list of work requests to post on the send queue.
3786  * @bad_send_wr: On an immediate failure, this parameter will reference
3787  *   the work request that failed to be posted on the QP.
3788  *
3789  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3790  * error is returned, the QP state shall not be affected,
3791  * ib_post_send() will return an immediate error after queueing any
3792  * earlier work requests in the list.
3793  */
3794 static inline int ib_post_send(struct ib_qp *qp,
3795 			       const struct ib_send_wr *send_wr,
3796 			       const struct ib_send_wr **bad_send_wr)
3797 {
3798 	const struct ib_send_wr *dummy;
3799 
3800 	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3801 }
3802 
3803 /**
3804  * ib_post_recv - Posts a list of work requests to the receive queue of
3805  *   the specified QP.
3806  * @qp: The QP to post the work request on.
3807  * @recv_wr: A list of work requests to post on the receive queue.
3808  * @bad_recv_wr: On an immediate failure, this parameter will reference
3809  *   the work request that failed to be posted on the QP.
3810  */
3811 static inline int ib_post_recv(struct ib_qp *qp,
3812 			       const struct ib_recv_wr *recv_wr,
3813 			       const struct ib_recv_wr **bad_recv_wr)
3814 {
3815 	const struct ib_recv_wr *dummy;
3816 
3817 	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3818 }
3819 
3820 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3821 				 int nr_cqe, int comp_vector,
3822 				 enum ib_poll_context poll_ctx,
3823 				 const char *caller, struct ib_udata *udata);
3824 
3825 /**
3826  * ib_alloc_cq_user: Allocate kernel/user CQ
3827  * @dev: The IB device
3828  * @private: Private data attached to the CQE
3829  * @nr_cqe: Number of CQEs in the CQ
3830  * @comp_vector: Completion vector used for the IRQs
3831  * @poll_ctx: Context used for polling the CQ
3832  * @udata: Valid user data or NULL for kernel objects
3833  */
3834 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3835 					     void *private, int nr_cqe,
3836 					     int comp_vector,
3837 					     enum ib_poll_context poll_ctx,
3838 					     struct ib_udata *udata)
3839 {
3840 	return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3841 				  KBUILD_MODNAME, udata);
3842 }
3843 
3844 /**
3845  * ib_alloc_cq: Allocate kernel CQ
3846  * @dev: The IB device
3847  * @private: Private data attached to the CQE
3848  * @nr_cqe: Number of CQEs in the CQ
3849  * @comp_vector: Completion vector used for the IRQs
3850  * @poll_ctx: Context used for polling the CQ
3851  *
3852  * NOTE: for user cq use ib_alloc_cq_user with valid udata!
3853  */
3854 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3855 					int nr_cqe, int comp_vector,
3856 					enum ib_poll_context poll_ctx)
3857 {
3858 	return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3859 				NULL);
3860 }
3861 
3862 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3863 				int nr_cqe, enum ib_poll_context poll_ctx,
3864 				const char *caller);
3865 
3866 /**
3867  * ib_alloc_cq_any: Allocate kernel CQ
3868  * @dev: The IB device
3869  * @private: Private data attached to the CQE
3870  * @nr_cqe: Number of CQEs in the CQ
3871  * @poll_ctx: Context used for polling the CQ
3872  */
3873 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3874 					    void *private, int nr_cqe,
3875 					    enum ib_poll_context poll_ctx)
3876 {
3877 	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3878 				 KBUILD_MODNAME);
3879 }
3880 
3881 /**
3882  * ib_free_cq_user - Free kernel/user CQ
3883  * @cq: The CQ to free
3884  * @udata: Valid user data or NULL for kernel objects
3885  *
3886  * NOTE: This function shouldn't be called on shared CQs.
3887  */
3888 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3889 
3890 /**
3891  * ib_free_cq - Free kernel CQ
3892  * @cq: The CQ to free
3893  *
3894  * NOTE: for user cq use ib_free_cq_user with valid udata!
3895  */
3896 static inline void ib_free_cq(struct ib_cq *cq)
3897 {
3898 	ib_free_cq_user(cq, NULL);
3899 }
3900 
3901 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3902 
3903 /**
3904  * ib_create_cq - Creates a CQ on the specified device.
3905  * @device: The device on which to create the CQ.
3906  * @comp_handler: A user-specified callback that is invoked when a
3907  *   completion event occurs on the CQ.
3908  * @event_handler: A user-specified callback that is invoked when an
3909  *   asynchronous event not associated with a completion occurs on the CQ.
3910  * @cq_context: Context associated with the CQ returned to the user via
3911  *   the associated completion and event handlers.
3912  * @cq_attr: The attributes the CQ should be created upon.
3913  *
3914  * Users can examine the cq structure to determine the actual CQ size.
3915  */
3916 struct ib_cq *__ib_create_cq(struct ib_device *device,
3917 			     ib_comp_handler comp_handler,
3918 			     void (*event_handler)(struct ib_event *, void *),
3919 			     void *cq_context,
3920 			     const struct ib_cq_init_attr *cq_attr,
3921 			     const char *caller);
3922 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3923 	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3924 
3925 /**
3926  * ib_resize_cq - Modifies the capacity of the CQ.
3927  * @cq: The CQ to resize.
3928  * @cqe: The minimum size of the CQ.
3929  *
3930  * Users can examine the cq structure to determine the actual CQ size.
3931  */
3932 int ib_resize_cq(struct ib_cq *cq, int cqe);
3933 
3934 /**
3935  * rdma_set_cq_moderation - Modifies moderation params of the CQ
3936  * @cq: The CQ to modify.
3937  * @cq_count: number of CQEs that will trigger an event
3938  * @cq_period: max period of time in usec before triggering an event
3939  *
3940  */
3941 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3942 
3943 /**
3944  * ib_destroy_cq_user - Destroys the specified CQ.
3945  * @cq: The CQ to destroy.
3946  * @udata: Valid user data or NULL for kernel objects
3947  */
3948 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3949 
3950 /**
3951  * ib_destroy_cq - Destroys the specified kernel CQ.
3952  * @cq: The CQ to destroy.
3953  *
3954  * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3955  */
3956 static inline void ib_destroy_cq(struct ib_cq *cq)
3957 {
3958 	ib_destroy_cq_user(cq, NULL);
3959 }
3960 
3961 /**
3962  * ib_poll_cq - poll a CQ for completion(s)
3963  * @cq:the CQ being polled
3964  * @num_entries:maximum number of completions to return
3965  * @wc:array of at least @num_entries &struct ib_wc where completions
3966  *   will be returned
3967  *
3968  * Poll a CQ for (possibly multiple) completions.  If the return value
3969  * is < 0, an error occurred.  If the return value is >= 0, it is the
3970  * number of completions returned.  If the return value is
3971  * non-negative and < num_entries, then the CQ was emptied.
3972  */
3973 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3974 			     struct ib_wc *wc)
3975 {
3976 	return cq->device->ops.poll_cq(cq, num_entries, wc);
3977 }
3978 
3979 /**
3980  * ib_req_notify_cq - Request completion notification on a CQ.
3981  * @cq: The CQ to generate an event for.
3982  * @flags:
3983  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3984  *   to request an event on the next solicited event or next work
3985  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3986  *   may also be |ed in to request a hint about missed events, as
3987  *   described below.
3988  *
3989  * Return Value:
3990  *    < 0 means an error occurred while requesting notification
3991  *   == 0 means notification was requested successfully, and if
3992  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3993  *        were missed and it is safe to wait for another event.  In
3994  *        this case is it guaranteed that any work completions added
3995  *        to the CQ since the last CQ poll will trigger a completion
3996  *        notification event.
3997  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3998  *        in.  It means that the consumer must poll the CQ again to
3999  *        make sure it is empty to avoid missing an event because of a
4000  *        race between requesting notification and an entry being
4001  *        added to the CQ.  This return value means it is possible
4002  *        (but not guaranteed) that a work completion has been added
4003  *        to the CQ since the last poll without triggering a
4004  *        completion notification event.
4005  */
4006 static inline int ib_req_notify_cq(struct ib_cq *cq,
4007 				   enum ib_cq_notify_flags flags)
4008 {
4009 	return cq->device->ops.req_notify_cq(cq, flags);
4010 }
4011 
4012 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4013 			     int comp_vector_hint,
4014 			     enum ib_poll_context poll_ctx);
4015 
4016 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4017 
4018 /**
4019  * ib_req_ncomp_notif - Request completion notification when there are
4020  *   at least the specified number of unreaped completions on the CQ.
4021  * @cq: The CQ to generate an event for.
4022  * @wc_cnt: The number of unreaped completions that should be on the
4023  *   CQ before an event is generated.
4024  */
4025 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
4026 {
4027 	return cq->device->ops.req_ncomp_notif ?
4028 		cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
4029 		-ENOSYS;
4030 }
4031 
4032 /**
4033  * ib_dma_mapping_error - check a DMA addr for error
4034  * @dev: The device for which the dma_addr was created
4035  * @dma_addr: The DMA address to check
4036  */
4037 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4038 {
4039 	return dma_mapping_error(dev->dma_device, dma_addr);
4040 }
4041 
4042 /**
4043  * ib_dma_map_single - Map a kernel virtual address to DMA address
4044  * @dev: The device for which the dma_addr is to be created
4045  * @cpu_addr: The kernel virtual address
4046  * @size: The size of the region in bytes
4047  * @direction: The direction of the DMA
4048  */
4049 static inline u64 ib_dma_map_single(struct ib_device *dev,
4050 				    void *cpu_addr, size_t size,
4051 				    enum dma_data_direction direction)
4052 {
4053 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4054 }
4055 
4056 /**
4057  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4058  * @dev: The device for which the DMA address was created
4059  * @addr: The DMA address
4060  * @size: The size of the region in bytes
4061  * @direction: The direction of the DMA
4062  */
4063 static inline void ib_dma_unmap_single(struct ib_device *dev,
4064 				       u64 addr, size_t size,
4065 				       enum dma_data_direction direction)
4066 {
4067 	dma_unmap_single(dev->dma_device, addr, size, direction);
4068 }
4069 
4070 /**
4071  * ib_dma_map_page - Map a physical page to DMA address
4072  * @dev: The device for which the dma_addr is to be created
4073  * @page: The page to be mapped
4074  * @offset: The offset within the page
4075  * @size: The size of the region in bytes
4076  * @direction: The direction of the DMA
4077  */
4078 static inline u64 ib_dma_map_page(struct ib_device *dev,
4079 				  struct page *page,
4080 				  unsigned long offset,
4081 				  size_t size,
4082 					 enum dma_data_direction direction)
4083 {
4084 	return dma_map_page(dev->dma_device, page, offset, size, direction);
4085 }
4086 
4087 /**
4088  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4089  * @dev: The device for which the DMA address was created
4090  * @addr: The DMA address
4091  * @size: The size of the region in bytes
4092  * @direction: The direction of the DMA
4093  */
4094 static inline void ib_dma_unmap_page(struct ib_device *dev,
4095 				     u64 addr, size_t size,
4096 				     enum dma_data_direction direction)
4097 {
4098 	dma_unmap_page(dev->dma_device, addr, size, direction);
4099 }
4100 
4101 /**
4102  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4103  * @dev: The device for which the DMA addresses are to be created
4104  * @sg: The array of scatter/gather entries
4105  * @nents: The number of scatter/gather entries
4106  * @direction: The direction of the DMA
4107  */
4108 static inline int ib_dma_map_sg(struct ib_device *dev,
4109 				struct scatterlist *sg, int nents,
4110 				enum dma_data_direction direction)
4111 {
4112 	return dma_map_sg(dev->dma_device, sg, nents, direction);
4113 }
4114 
4115 /**
4116  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4117  * @dev: The device for which the DMA addresses were created
4118  * @sg: The array of scatter/gather entries
4119  * @nents: The number of scatter/gather entries
4120  * @direction: The direction of the DMA
4121  */
4122 static inline void ib_dma_unmap_sg(struct ib_device *dev,
4123 				   struct scatterlist *sg, int nents,
4124 				   enum dma_data_direction direction)
4125 {
4126 	dma_unmap_sg(dev->dma_device, sg, nents, direction);
4127 }
4128 
4129 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4130 				      struct scatterlist *sg, int nents,
4131 				      enum dma_data_direction direction,
4132 				      unsigned long dma_attrs)
4133 {
4134 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4135 				dma_attrs);
4136 }
4137 
4138 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4139 					 struct scatterlist *sg, int nents,
4140 					 enum dma_data_direction direction,
4141 					 unsigned long dma_attrs)
4142 {
4143 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4144 }
4145 
4146 /**
4147  * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4148  * @dev: The device to query
4149  *
4150  * The returned value represents a size in bytes.
4151  */
4152 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4153 {
4154 	return dma_get_max_seg_size(dev->dma_device);
4155 }
4156 
4157 /**
4158  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4159  * @dev: The device for which the DMA address was created
4160  * @addr: The DMA address
4161  * @size: The size of the region in bytes
4162  * @dir: The direction of the DMA
4163  */
4164 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4165 					      u64 addr,
4166 					      size_t size,
4167 					      enum dma_data_direction dir)
4168 {
4169 	dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4170 }
4171 
4172 /**
4173  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4174  * @dev: The device for which the DMA address was created
4175  * @addr: The DMA address
4176  * @size: The size of the region in bytes
4177  * @dir: The direction of the DMA
4178  */
4179 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4180 						 u64 addr,
4181 						 size_t size,
4182 						 enum dma_data_direction dir)
4183 {
4184 	dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4185 }
4186 
4187 /**
4188  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4189  * @dev: The device for which the DMA address is requested
4190  * @size: The size of the region to allocate in bytes
4191  * @dma_handle: A pointer for returning the DMA address of the region
4192  * @flag: memory allocator flags
4193  */
4194 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4195 					   size_t size,
4196 					   dma_addr_t *dma_handle,
4197 					   gfp_t flag)
4198 {
4199 	return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4200 }
4201 
4202 /**
4203  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4204  * @dev: The device for which the DMA addresses were allocated
4205  * @size: The size of the region
4206  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4207  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4208  */
4209 static inline void ib_dma_free_coherent(struct ib_device *dev,
4210 					size_t size, void *cpu_addr,
4211 					dma_addr_t dma_handle)
4212 {
4213 	dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4214 }
4215 
4216 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4217  * space. This function should be called when 'current' is the owning MM.
4218  */
4219 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4220 			     u64 virt_addr, int mr_access_flags);
4221 
4222 /* ib_advise_mr -  give an advice about an address range in a memory region */
4223 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4224 		 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4225 /**
4226  * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4227  *   HCA translation table.
4228  * @mr: The memory region to deregister.
4229  * @udata: Valid user data or NULL for kernel object
4230  *
4231  * This function can fail, if the memory region has memory windows bound to it.
4232  */
4233 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4234 
4235 /**
4236  * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4237  *   HCA translation table.
4238  * @mr: The memory region to deregister.
4239  *
4240  * This function can fail, if the memory region has memory windows bound to it.
4241  *
4242  * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4243  */
4244 static inline int ib_dereg_mr(struct ib_mr *mr)
4245 {
4246 	return ib_dereg_mr_user(mr, NULL);
4247 }
4248 
4249 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4250 			  u32 max_num_sg);
4251 
4252 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4253 				    u32 max_num_data_sg,
4254 				    u32 max_num_meta_sg);
4255 
4256 /**
4257  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4258  *   R_Key and L_Key.
4259  * @mr - struct ib_mr pointer to be updated.
4260  * @newkey - new key to be used.
4261  */
4262 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4263 {
4264 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4265 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4266 }
4267 
4268 /**
4269  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4270  * for calculating a new rkey for type 2 memory windows.
4271  * @rkey - the rkey to increment.
4272  */
4273 static inline u32 ib_inc_rkey(u32 rkey)
4274 {
4275 	const u32 mask = 0x000000ff;
4276 	return ((rkey + 1) & mask) | (rkey & ~mask);
4277 }
4278 
4279 /**
4280  * ib_attach_mcast - Attaches the specified QP to a multicast group.
4281  * @qp: QP to attach to the multicast group.  The QP must be type
4282  *   IB_QPT_UD.
4283  * @gid: Multicast group GID.
4284  * @lid: Multicast group LID in host byte order.
4285  *
4286  * In order to send and receive multicast packets, subnet
4287  * administration must have created the multicast group and configured
4288  * the fabric appropriately.  The port associated with the specified
4289  * QP must also be a member of the multicast group.
4290  */
4291 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4292 
4293 /**
4294  * ib_detach_mcast - Detaches the specified QP from a multicast group.
4295  * @qp: QP to detach from the multicast group.
4296  * @gid: Multicast group GID.
4297  * @lid: Multicast group LID in host byte order.
4298  */
4299 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4300 
4301 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4302 				   struct inode *inode, struct ib_udata *udata);
4303 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4304 
4305 static inline int ib_check_mr_access(int flags)
4306 {
4307 	/*
4308 	 * Local write permission is required if remote write or
4309 	 * remote atomic permission is also requested.
4310 	 */
4311 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4312 	    !(flags & IB_ACCESS_LOCAL_WRITE))
4313 		return -EINVAL;
4314 
4315 	if (flags & ~IB_ACCESS_SUPPORTED)
4316 		return -EINVAL;
4317 
4318 	return 0;
4319 }
4320 
4321 static inline bool ib_access_writable(int access_flags)
4322 {
4323 	/*
4324 	 * We have writable memory backing the MR if any of the following
4325 	 * access flags are set.  "Local write" and "remote write" obviously
4326 	 * require write access.  "Remote atomic" can do things like fetch and
4327 	 * add, which will modify memory, and "MW bind" can change permissions
4328 	 * by binding a window.
4329 	 */
4330 	return access_flags &
4331 		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4332 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4333 }
4334 
4335 /**
4336  * ib_check_mr_status: lightweight check of MR status.
4337  *     This routine may provide status checks on a selected
4338  *     ib_mr. first use is for signature status check.
4339  *
4340  * @mr: A memory region.
4341  * @check_mask: Bitmask of which checks to perform from
4342  *     ib_mr_status_check enumeration.
4343  * @mr_status: The container of relevant status checks.
4344  *     failed checks will be indicated in the status bitmask
4345  *     and the relevant info shall be in the error item.
4346  */
4347 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4348 		       struct ib_mr_status *mr_status);
4349 
4350 /**
4351  * ib_device_try_get: Hold a registration lock
4352  * device: The device to lock
4353  *
4354  * A device under an active registration lock cannot become unregistered. It
4355  * is only possible to obtain a registration lock on a device that is fully
4356  * registered, otherwise this function returns false.
4357  *
4358  * The registration lock is only necessary for actions which require the
4359  * device to still be registered. Uses that only require the device pointer to
4360  * be valid should use get_device(&ibdev->dev) to hold the memory.
4361  *
4362  */
4363 static inline bool ib_device_try_get(struct ib_device *dev)
4364 {
4365 	return refcount_inc_not_zero(&dev->refcount);
4366 }
4367 
4368 void ib_device_put(struct ib_device *device);
4369 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4370 					  enum rdma_driver_id driver_id);
4371 struct ib_device *ib_device_get_by_name(const char *name,
4372 					enum rdma_driver_id driver_id);
4373 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4374 					    u16 pkey, const union ib_gid *gid,
4375 					    const struct sockaddr *addr);
4376 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4377 			 unsigned int port);
4378 struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4379 
4380 struct ib_wq *ib_create_wq(struct ib_pd *pd,
4381 			   struct ib_wq_init_attr *init_attr);
4382 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4383 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4384 		 u32 wq_attr_mask);
4385 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4386 
4387 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4388 		 unsigned int *sg_offset, unsigned int page_size);
4389 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4390 		    int data_sg_nents, unsigned int *data_sg_offset,
4391 		    struct scatterlist *meta_sg, int meta_sg_nents,
4392 		    unsigned int *meta_sg_offset, unsigned int page_size);
4393 
4394 static inline int
4395 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4396 		  unsigned int *sg_offset, unsigned int page_size)
4397 {
4398 	int n;
4399 
4400 	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4401 	mr->iova = 0;
4402 
4403 	return n;
4404 }
4405 
4406 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4407 		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4408 
4409 void ib_drain_rq(struct ib_qp *qp);
4410 void ib_drain_sq(struct ib_qp *qp);
4411 void ib_drain_qp(struct ib_qp *qp);
4412 
4413 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4414 
4415 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4416 {
4417 	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4418 		return attr->roce.dmac;
4419 	return NULL;
4420 }
4421 
4422 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4423 {
4424 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4425 		attr->ib.dlid = (u16)dlid;
4426 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4427 		attr->opa.dlid = dlid;
4428 }
4429 
4430 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4431 {
4432 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4433 		return attr->ib.dlid;
4434 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4435 		return attr->opa.dlid;
4436 	return 0;
4437 }
4438 
4439 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4440 {
4441 	attr->sl = sl;
4442 }
4443 
4444 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4445 {
4446 	return attr->sl;
4447 }
4448 
4449 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4450 					 u8 src_path_bits)
4451 {
4452 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4453 		attr->ib.src_path_bits = src_path_bits;
4454 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4455 		attr->opa.src_path_bits = src_path_bits;
4456 }
4457 
4458 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4459 {
4460 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4461 		return attr->ib.src_path_bits;
4462 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4463 		return attr->opa.src_path_bits;
4464 	return 0;
4465 }
4466 
4467 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4468 					bool make_grd)
4469 {
4470 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4471 		attr->opa.make_grd = make_grd;
4472 }
4473 
4474 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4475 {
4476 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4477 		return attr->opa.make_grd;
4478 	return false;
4479 }
4480 
4481 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4482 {
4483 	attr->port_num = port_num;
4484 }
4485 
4486 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4487 {
4488 	return attr->port_num;
4489 }
4490 
4491 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4492 					   u8 static_rate)
4493 {
4494 	attr->static_rate = static_rate;
4495 }
4496 
4497 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4498 {
4499 	return attr->static_rate;
4500 }
4501 
4502 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4503 					enum ib_ah_flags flag)
4504 {
4505 	attr->ah_flags = flag;
4506 }
4507 
4508 static inline enum ib_ah_flags
4509 		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4510 {
4511 	return attr->ah_flags;
4512 }
4513 
4514 static inline const struct ib_global_route
4515 		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4516 {
4517 	return &attr->grh;
4518 }
4519 
4520 /*To retrieve and modify the grh */
4521 static inline struct ib_global_route
4522 		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4523 {
4524 	return &attr->grh;
4525 }
4526 
4527 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4528 {
4529 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4530 
4531 	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4532 }
4533 
4534 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4535 					     __be64 prefix)
4536 {
4537 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4538 
4539 	grh->dgid.global.subnet_prefix = prefix;
4540 }
4541 
4542 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4543 					    __be64 if_id)
4544 {
4545 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4546 
4547 	grh->dgid.global.interface_id = if_id;
4548 }
4549 
4550 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4551 				   union ib_gid *dgid, u32 flow_label,
4552 				   u8 sgid_index, u8 hop_limit,
4553 				   u8 traffic_class)
4554 {
4555 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4556 
4557 	attr->ah_flags = IB_AH_GRH;
4558 	if (dgid)
4559 		grh->dgid = *dgid;
4560 	grh->flow_label = flow_label;
4561 	grh->sgid_index = sgid_index;
4562 	grh->hop_limit = hop_limit;
4563 	grh->traffic_class = traffic_class;
4564 	grh->sgid_attr = NULL;
4565 }
4566 
4567 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4568 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4569 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4570 			     const struct ib_gid_attr *sgid_attr);
4571 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4572 		       const struct rdma_ah_attr *src);
4573 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4574 			  const struct rdma_ah_attr *new);
4575 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4576 
4577 /**
4578  * rdma_ah_find_type - Return address handle type.
4579  *
4580  * @dev: Device to be checked
4581  * @port_num: Port number
4582  */
4583 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4584 						       u8 port_num)
4585 {
4586 	if (rdma_protocol_roce(dev, port_num))
4587 		return RDMA_AH_ATTR_TYPE_ROCE;
4588 	if (rdma_protocol_ib(dev, port_num)) {
4589 		if (rdma_cap_opa_ah(dev, port_num))
4590 			return RDMA_AH_ATTR_TYPE_OPA;
4591 		return RDMA_AH_ATTR_TYPE_IB;
4592 	}
4593 
4594 	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4595 }
4596 
4597 /**
4598  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4599  *     In the current implementation the only way to get
4600  *     get the 32bit lid is from other sources for OPA.
4601  *     For IB, lids will always be 16bits so cast the
4602  *     value accordingly.
4603  *
4604  * @lid: A 32bit LID
4605  */
4606 static inline u16 ib_lid_cpu16(u32 lid)
4607 {
4608 	WARN_ON_ONCE(lid & 0xFFFF0000);
4609 	return (u16)lid;
4610 }
4611 
4612 /**
4613  * ib_lid_be16 - Return lid in 16bit BE encoding.
4614  *
4615  * @lid: A 32bit LID
4616  */
4617 static inline __be16 ib_lid_be16(u32 lid)
4618 {
4619 	WARN_ON_ONCE(lid & 0xFFFF0000);
4620 	return cpu_to_be16((u16)lid);
4621 }
4622 
4623 /**
4624  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4625  *   vector
4626  * @device:         the rdma device
4627  * @comp_vector:    index of completion vector
4628  *
4629  * Returns NULL on failure, otherwise a corresponding cpu map of the
4630  * completion vector (returns all-cpus map if the device driver doesn't
4631  * implement get_vector_affinity).
4632  */
4633 static inline const struct cpumask *
4634 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4635 {
4636 	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4637 	    !device->ops.get_vector_affinity)
4638 		return NULL;
4639 
4640 	return device->ops.get_vector_affinity(device, comp_vector);
4641 
4642 }
4643 
4644 /**
4645  * rdma_roce_rescan_device - Rescan all of the network devices in the system
4646  * and add their gids, as needed, to the relevant RoCE devices.
4647  *
4648  * @device:         the rdma device
4649  */
4650 void rdma_roce_rescan_device(struct ib_device *ibdev);
4651 
4652 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4653 
4654 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4655 
4656 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4657 				     enum rdma_netdev_t type, const char *name,
4658 				     unsigned char name_assign_type,
4659 				     void (*setup)(struct net_device *));
4660 
4661 int rdma_init_netdev(struct ib_device *device, u8 port_num,
4662 		     enum rdma_netdev_t type, const char *name,
4663 		     unsigned char name_assign_type,
4664 		     void (*setup)(struct net_device *),
4665 		     struct net_device *netdev);
4666 
4667 /**
4668  * rdma_set_device_sysfs_group - Set device attributes group to have
4669  *				 driver specific sysfs entries at
4670  *				 for infiniband class.
4671  *
4672  * @device:	device pointer for which attributes to be created
4673  * @group:	Pointer to group which should be added when device
4674  *		is registered with sysfs.
4675  * rdma_set_device_sysfs_group() allows existing drivers to expose one
4676  * group per device to have sysfs attributes.
4677  *
4678  * NOTE: New drivers should not make use of this API; instead new device
4679  * parameter should be exposed via netlink command. This API and mechanism
4680  * exist only for existing drivers.
4681  */
4682 static inline void
4683 rdma_set_device_sysfs_group(struct ib_device *dev,
4684 			    const struct attribute_group *group)
4685 {
4686 	dev->groups[1] = group;
4687 }
4688 
4689 /**
4690  * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4691  *
4692  * @device:	device pointer for which ib_device pointer to retrieve
4693  *
4694  * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4695  *
4696  */
4697 static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4698 {
4699 	struct ib_core_device *coredev =
4700 		container_of(device, struct ib_core_device, dev);
4701 
4702 	return coredev->owner;
4703 }
4704 
4705 /**
4706  * rdma_device_to_drv_device - Helper macro to reach back to driver's
4707  *			       ib_device holder structure from device pointer.
4708  *
4709  * NOTE: New drivers should not make use of this API; This API is only for
4710  * existing drivers who have exposed sysfs entries using
4711  * rdma_set_device_sysfs_group().
4712  */
4713 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4714 	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4715 
4716 bool rdma_dev_access_netns(const struct ib_device *device,
4717 			   const struct net *net);
4718 
4719 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4720 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4721 
4722 /**
4723  * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4724  *                               on the flow_label
4725  *
4726  * This function will convert the 20 bit flow_label input to a valid RoCE v2
4727  * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4728  * convention.
4729  */
4730 static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4731 {
4732 	u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4733 
4734 	fl_low ^= fl_high >> 14;
4735 	return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4736 }
4737 
4738 /**
4739  * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4740  *                        local and remote qpn values
4741  *
4742  * This function folded the multiplication results of two qpns, 24 bit each,
4743  * fields, and converts it to a 20 bit results.
4744  *
4745  * This function will create symmetric flow_label value based on the local
4746  * and remote qpn values. this will allow both the requester and responder
4747  * to calculate the same flow_label for a given connection.
4748  *
4749  * This helper function should be used by driver in case the upper layer
4750  * provide a zero flow_label value. This is to improve entropy of RDMA
4751  * traffic in the network.
4752  */
4753 static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4754 {
4755 	u64 v = (u64)lqpn * rqpn;
4756 
4757 	v ^= v >> 20;
4758 	v ^= v >> 40;
4759 
4760 	return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4761 }
4762 #endif /* IB_VERBS_H */
4763