xref: /openbmc/linux/include/rdma/ib_verbs.h (revision d0c44de2d8ffd2e4780d360b34ee6614aa4af080)
1  /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2  /*
3   * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
4   * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
5   * Copyright (c) 2004, 2020 Intel Corporation.  All rights reserved.
6   * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
7   * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
8   * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9   * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
10   */
11  
12  #ifndef IB_VERBS_H
13  #define IB_VERBS_H
14  
15  #include <linux/ethtool.h>
16  #include <linux/types.h>
17  #include <linux/device.h>
18  #include <linux/dma-mapping.h>
19  #include <linux/kref.h>
20  #include <linux/list.h>
21  #include <linux/rwsem.h>
22  #include <linux/workqueue.h>
23  #include <linux/irq_poll.h>
24  #include <uapi/linux/if_ether.h>
25  #include <net/ipv6.h>
26  #include <net/ip.h>
27  #include <linux/string.h>
28  #include <linux/slab.h>
29  #include <linux/netdevice.h>
30  #include <linux/refcount.h>
31  #include <linux/if_link.h>
32  #include <linux/atomic.h>
33  #include <linux/mmu_notifier.h>
34  #include <linux/uaccess.h>
35  #include <linux/cgroup_rdma.h>
36  #include <linux/irqflags.h>
37  #include <linux/preempt.h>
38  #include <linux/dim.h>
39  #include <uapi/rdma/ib_user_verbs.h>
40  #include <rdma/rdma_counter.h>
41  #include <rdma/restrack.h>
42  #include <rdma/signature.h>
43  #include <uapi/rdma/rdma_user_ioctl.h>
44  #include <uapi/rdma/ib_user_ioctl_verbs.h>
45  
46  #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
47  
48  struct ib_umem_odp;
49  struct ib_uqp_object;
50  struct ib_usrq_object;
51  struct ib_uwq_object;
52  struct rdma_cm_id;
53  struct ib_port;
54  struct hw_stats_device_data;
55  
56  extern struct workqueue_struct *ib_wq;
57  extern struct workqueue_struct *ib_comp_wq;
58  extern struct workqueue_struct *ib_comp_unbound_wq;
59  
60  struct ib_ucq_object;
61  
62  __printf(3, 4) __cold
63  void ibdev_printk(const char *level, const struct ib_device *ibdev,
64  		  const char *format, ...);
65  __printf(2, 3) __cold
66  void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
67  __printf(2, 3) __cold
68  void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
69  __printf(2, 3) __cold
70  void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
71  __printf(2, 3) __cold
72  void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
73  __printf(2, 3) __cold
74  void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
75  __printf(2, 3) __cold
76  void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
77  __printf(2, 3) __cold
78  void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
79  
80  #if defined(CONFIG_DYNAMIC_DEBUG) || \
81  	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
82  #define ibdev_dbg(__dev, format, args...)                       \
83  	dynamic_ibdev_dbg(__dev, format, ##args)
84  #else
85  __printf(2, 3) __cold
86  static inline
ibdev_dbg(const struct ib_device * ibdev,const char * format,...)87  void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
88  #endif
89  
90  #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
91  do {                                                                    \
92  	static DEFINE_RATELIMIT_STATE(_rs,                              \
93  				      DEFAULT_RATELIMIT_INTERVAL,       \
94  				      DEFAULT_RATELIMIT_BURST);         \
95  	if (__ratelimit(&_rs))                                          \
96  		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
97  } while (0)
98  
99  #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
100  	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
101  #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
102  	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
103  #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
104  	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
105  #define ibdev_err_ratelimited(ibdev, fmt, ...) \
106  	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
107  #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
108  	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
109  #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
110  	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
111  #define ibdev_info_ratelimited(ibdev, fmt, ...) \
112  	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
113  
114  #if defined(CONFIG_DYNAMIC_DEBUG) || \
115  	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
116  /* descriptor check is first to prevent flooding with "callbacks suppressed" */
117  #define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
118  do {                                                                    \
119  	static DEFINE_RATELIMIT_STATE(_rs,                              \
120  				      DEFAULT_RATELIMIT_INTERVAL,       \
121  				      DEFAULT_RATELIMIT_BURST);         \
122  	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
123  	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
124  		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
125  				    ##__VA_ARGS__);                     \
126  } while (0)
127  #else
128  __printf(2, 3) __cold
129  static inline
ibdev_dbg_ratelimited(const struct ib_device * ibdev,const char * format,...)130  void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
131  #endif
132  
133  union ib_gid {
134  	u8	raw[16];
135  	struct {
136  		__be64	subnet_prefix;
137  		__be64	interface_id;
138  	} global;
139  };
140  
141  extern union ib_gid zgid;
142  
143  enum ib_gid_type {
144  	IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
145  	IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
146  	IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
147  	IB_GID_TYPE_SIZE
148  };
149  
150  #define ROCE_V2_UDP_DPORT      4791
151  struct ib_gid_attr {
152  	struct net_device __rcu	*ndev;
153  	struct ib_device	*device;
154  	union ib_gid		gid;
155  	enum ib_gid_type	gid_type;
156  	u16			index;
157  	u32			port_num;
158  };
159  
160  enum {
161  	/* set the local administered indication */
162  	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
163  };
164  
165  enum rdma_transport_type {
166  	RDMA_TRANSPORT_IB,
167  	RDMA_TRANSPORT_IWARP,
168  	RDMA_TRANSPORT_USNIC,
169  	RDMA_TRANSPORT_USNIC_UDP,
170  	RDMA_TRANSPORT_UNSPECIFIED,
171  };
172  
173  enum rdma_protocol_type {
174  	RDMA_PROTOCOL_IB,
175  	RDMA_PROTOCOL_IBOE,
176  	RDMA_PROTOCOL_IWARP,
177  	RDMA_PROTOCOL_USNIC_UDP
178  };
179  
180  __attribute_const__ enum rdma_transport_type
181  rdma_node_get_transport(unsigned int node_type);
182  
183  enum rdma_network_type {
184  	RDMA_NETWORK_IB,
185  	RDMA_NETWORK_ROCE_V1,
186  	RDMA_NETWORK_IPV4,
187  	RDMA_NETWORK_IPV6
188  };
189  
ib_network_to_gid_type(enum rdma_network_type network_type)190  static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
191  {
192  	if (network_type == RDMA_NETWORK_IPV4 ||
193  	    network_type == RDMA_NETWORK_IPV6)
194  		return IB_GID_TYPE_ROCE_UDP_ENCAP;
195  	else if (network_type == RDMA_NETWORK_ROCE_V1)
196  		return IB_GID_TYPE_ROCE;
197  	else
198  		return IB_GID_TYPE_IB;
199  }
200  
201  static inline enum rdma_network_type
rdma_gid_attr_network_type(const struct ib_gid_attr * attr)202  rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
203  {
204  	if (attr->gid_type == IB_GID_TYPE_IB)
205  		return RDMA_NETWORK_IB;
206  
207  	if (attr->gid_type == IB_GID_TYPE_ROCE)
208  		return RDMA_NETWORK_ROCE_V1;
209  
210  	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
211  		return RDMA_NETWORK_IPV4;
212  	else
213  		return RDMA_NETWORK_IPV6;
214  }
215  
216  enum rdma_link_layer {
217  	IB_LINK_LAYER_UNSPECIFIED,
218  	IB_LINK_LAYER_INFINIBAND,
219  	IB_LINK_LAYER_ETHERNET,
220  };
221  
222  enum ib_device_cap_flags {
223  	IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
224  	IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
225  	IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
226  	IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
227  	IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
228  	IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
229  	IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
230  	IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
231  	IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
232  	/* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
233  	IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
234  	IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
235  	IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
236  	IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
237  	IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
238  
239  	/* Reserved, old SEND_W_INV = 1 << 16,*/
240  	IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
241  	/*
242  	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
243  	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
244  	 * messages and can verify the validity of checksum for
245  	 * incoming messages.  Setting this flag implies that the
246  	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
247  	 */
248  	IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
249  	IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
250  
251  	/*
252  	 * This device supports the IB "base memory management extension",
253  	 * which includes support for fast registrations (IB_WR_REG_MR,
254  	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
255  	 * also be set by any iWarp device which must support FRs to comply
256  	 * to the iWarp verbs spec.  iWarp devices also support the
257  	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
258  	 * stag.
259  	 */
260  	IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
261  	IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
262  	IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
263  	IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
264  	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
265  	IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
266  	IB_DEVICE_MANAGED_FLOW_STEERING =
267  		IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
268  	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
269  	IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
270  	/* The device supports padding incoming writes to cacheline. */
271  	IB_DEVICE_PCI_WRITE_END_PADDING =
272  		IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
273  	/* Placement type attributes */
274  	IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
275  	IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
276  	IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
277  };
278  
279  enum ib_kernel_cap_flags {
280  	/*
281  	 * This device supports a per-device lkey or stag that can be
282  	 * used without performing a memory registration for the local
283  	 * memory.  Note that ULPs should never check this flag, but
284  	 * instead of use the local_dma_lkey flag in the ib_pd structure,
285  	 * which will always contain a usable lkey.
286  	 */
287  	IBK_LOCAL_DMA_LKEY = 1 << 0,
288  	/* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
289  	IBK_INTEGRITY_HANDOVER = 1 << 1,
290  	/* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
291  	IBK_ON_DEMAND_PAGING = 1 << 2,
292  	/* IB_MR_TYPE_SG_GAPS is supported */
293  	IBK_SG_GAPS_REG = 1 << 3,
294  	/* Driver supports RDMA_NLDEV_CMD_DELLINK */
295  	IBK_ALLOW_USER_UNREG = 1 << 4,
296  
297  	/* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
298  	IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
299  	/* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
300  	IBK_UD_TSO = 1 << 6,
301  	/* iopib will use the device ops:
302  	 *   get_vf_config
303  	 *   get_vf_guid
304  	 *   get_vf_stats
305  	 *   set_vf_guid
306  	 *   set_vf_link_state
307  	 */
308  	IBK_VIRTUAL_FUNCTION = 1 << 7,
309  	/* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
310  	IBK_RDMA_NETDEV_OPA = 1 << 8,
311  };
312  
313  enum ib_atomic_cap {
314  	IB_ATOMIC_NONE,
315  	IB_ATOMIC_HCA,
316  	IB_ATOMIC_GLOB
317  };
318  
319  enum ib_odp_general_cap_bits {
320  	IB_ODP_SUPPORT		= 1 << 0,
321  	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
322  };
323  
324  enum ib_odp_transport_cap_bits {
325  	IB_ODP_SUPPORT_SEND	= 1 << 0,
326  	IB_ODP_SUPPORT_RECV	= 1 << 1,
327  	IB_ODP_SUPPORT_WRITE	= 1 << 2,
328  	IB_ODP_SUPPORT_READ	= 1 << 3,
329  	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
330  	IB_ODP_SUPPORT_SRQ_RECV	= 1 << 5,
331  };
332  
333  struct ib_odp_caps {
334  	uint64_t general_caps;
335  	struct {
336  		uint32_t  rc_odp_caps;
337  		uint32_t  uc_odp_caps;
338  		uint32_t  ud_odp_caps;
339  		uint32_t  xrc_odp_caps;
340  	} per_transport_caps;
341  };
342  
343  struct ib_rss_caps {
344  	/* Corresponding bit will be set if qp type from
345  	 * 'enum ib_qp_type' is supported, e.g.
346  	 * supported_qpts |= 1 << IB_QPT_UD
347  	 */
348  	u32 supported_qpts;
349  	u32 max_rwq_indirection_tables;
350  	u32 max_rwq_indirection_table_size;
351  };
352  
353  enum ib_tm_cap_flags {
354  	/*  Support tag matching with rendezvous offload for RC transport */
355  	IB_TM_CAP_RNDV_RC = 1 << 0,
356  };
357  
358  struct ib_tm_caps {
359  	/* Max size of RNDV header */
360  	u32 max_rndv_hdr_size;
361  	/* Max number of entries in tag matching list */
362  	u32 max_num_tags;
363  	/* From enum ib_tm_cap_flags */
364  	u32 flags;
365  	/* Max number of outstanding list operations */
366  	u32 max_ops;
367  	/* Max number of SGE in tag matching entry */
368  	u32 max_sge;
369  };
370  
371  struct ib_cq_init_attr {
372  	unsigned int	cqe;
373  	u32		comp_vector;
374  	u32		flags;
375  };
376  
377  enum ib_cq_attr_mask {
378  	IB_CQ_MODERATE = 1 << 0,
379  };
380  
381  struct ib_cq_caps {
382  	u16     max_cq_moderation_count;
383  	u16     max_cq_moderation_period;
384  };
385  
386  struct ib_dm_mr_attr {
387  	u64		length;
388  	u64		offset;
389  	u32		access_flags;
390  };
391  
392  struct ib_dm_alloc_attr {
393  	u64	length;
394  	u32	alignment;
395  	u32	flags;
396  };
397  
398  struct ib_device_attr {
399  	u64			fw_ver;
400  	__be64			sys_image_guid;
401  	u64			max_mr_size;
402  	u64			page_size_cap;
403  	u32			vendor_id;
404  	u32			vendor_part_id;
405  	u32			hw_ver;
406  	int			max_qp;
407  	int			max_qp_wr;
408  	u64			device_cap_flags;
409  	u64			kernel_cap_flags;
410  	int			max_send_sge;
411  	int			max_recv_sge;
412  	int			max_sge_rd;
413  	int			max_cq;
414  	int			max_cqe;
415  	int			max_mr;
416  	int			max_pd;
417  	int			max_qp_rd_atom;
418  	int			max_ee_rd_atom;
419  	int			max_res_rd_atom;
420  	int			max_qp_init_rd_atom;
421  	int			max_ee_init_rd_atom;
422  	enum ib_atomic_cap	atomic_cap;
423  	enum ib_atomic_cap	masked_atomic_cap;
424  	int			max_ee;
425  	int			max_rdd;
426  	int			max_mw;
427  	int			max_raw_ipv6_qp;
428  	int			max_raw_ethy_qp;
429  	int			max_mcast_grp;
430  	int			max_mcast_qp_attach;
431  	int			max_total_mcast_qp_attach;
432  	int			max_ah;
433  	int			max_srq;
434  	int			max_srq_wr;
435  	int			max_srq_sge;
436  	unsigned int		max_fast_reg_page_list_len;
437  	unsigned int		max_pi_fast_reg_page_list_len;
438  	u16			max_pkeys;
439  	u8			local_ca_ack_delay;
440  	int			sig_prot_cap;
441  	int			sig_guard_cap;
442  	struct ib_odp_caps	odp_caps;
443  	uint64_t		timestamp_mask;
444  	uint64_t		hca_core_clock; /* in KHZ */
445  	struct ib_rss_caps	rss_caps;
446  	u32			max_wq_type_rq;
447  	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
448  	struct ib_tm_caps	tm_caps;
449  	struct ib_cq_caps       cq_caps;
450  	u64			max_dm_size;
451  	/* Max entries for sgl for optimized performance per READ */
452  	u32			max_sgl_rd;
453  };
454  
455  enum ib_mtu {
456  	IB_MTU_256  = 1,
457  	IB_MTU_512  = 2,
458  	IB_MTU_1024 = 3,
459  	IB_MTU_2048 = 4,
460  	IB_MTU_4096 = 5
461  };
462  
463  enum opa_mtu {
464  	OPA_MTU_8192 = 6,
465  	OPA_MTU_10240 = 7
466  };
467  
ib_mtu_enum_to_int(enum ib_mtu mtu)468  static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
469  {
470  	switch (mtu) {
471  	case IB_MTU_256:  return  256;
472  	case IB_MTU_512:  return  512;
473  	case IB_MTU_1024: return 1024;
474  	case IB_MTU_2048: return 2048;
475  	case IB_MTU_4096: return 4096;
476  	default: 	  return -1;
477  	}
478  }
479  
ib_mtu_int_to_enum(int mtu)480  static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
481  {
482  	if (mtu >= 4096)
483  		return IB_MTU_4096;
484  	else if (mtu >= 2048)
485  		return IB_MTU_2048;
486  	else if (mtu >= 1024)
487  		return IB_MTU_1024;
488  	else if (mtu >= 512)
489  		return IB_MTU_512;
490  	else
491  		return IB_MTU_256;
492  }
493  
opa_mtu_enum_to_int(enum opa_mtu mtu)494  static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
495  {
496  	switch (mtu) {
497  	case OPA_MTU_8192:
498  		return 8192;
499  	case OPA_MTU_10240:
500  		return 10240;
501  	default:
502  		return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
503  	}
504  }
505  
opa_mtu_int_to_enum(int mtu)506  static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
507  {
508  	if (mtu >= 10240)
509  		return OPA_MTU_10240;
510  	else if (mtu >= 8192)
511  		return OPA_MTU_8192;
512  	else
513  		return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
514  }
515  
516  enum ib_port_state {
517  	IB_PORT_NOP		= 0,
518  	IB_PORT_DOWN		= 1,
519  	IB_PORT_INIT		= 2,
520  	IB_PORT_ARMED		= 3,
521  	IB_PORT_ACTIVE		= 4,
522  	IB_PORT_ACTIVE_DEFER	= 5
523  };
524  
525  enum ib_port_phys_state {
526  	IB_PORT_PHYS_STATE_SLEEP = 1,
527  	IB_PORT_PHYS_STATE_POLLING = 2,
528  	IB_PORT_PHYS_STATE_DISABLED = 3,
529  	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
530  	IB_PORT_PHYS_STATE_LINK_UP = 5,
531  	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
532  	IB_PORT_PHYS_STATE_PHY_TEST = 7,
533  };
534  
535  enum ib_port_width {
536  	IB_WIDTH_1X	= 1,
537  	IB_WIDTH_2X	= 16,
538  	IB_WIDTH_4X	= 2,
539  	IB_WIDTH_8X	= 4,
540  	IB_WIDTH_12X	= 8
541  };
542  
ib_width_enum_to_int(enum ib_port_width width)543  static inline int ib_width_enum_to_int(enum ib_port_width width)
544  {
545  	switch (width) {
546  	case IB_WIDTH_1X:  return  1;
547  	case IB_WIDTH_2X:  return  2;
548  	case IB_WIDTH_4X:  return  4;
549  	case IB_WIDTH_8X:  return  8;
550  	case IB_WIDTH_12X: return 12;
551  	default: 	  return -1;
552  	}
553  }
554  
555  enum ib_port_speed {
556  	IB_SPEED_SDR	= 1,
557  	IB_SPEED_DDR	= 2,
558  	IB_SPEED_QDR	= 4,
559  	IB_SPEED_FDR10	= 8,
560  	IB_SPEED_FDR	= 16,
561  	IB_SPEED_EDR	= 32,
562  	IB_SPEED_HDR	= 64,
563  	IB_SPEED_NDR	= 128,
564  };
565  
566  enum ib_stat_flag {
567  	IB_STAT_FLAG_OPTIONAL = 1 << 0,
568  };
569  
570  /**
571   * struct rdma_stat_desc
572   * @name - The name of the counter
573   * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
574   * @priv - Driver private information; Core code should not use
575   */
576  struct rdma_stat_desc {
577  	const char *name;
578  	unsigned int flags;
579  	const void *priv;
580  };
581  
582  /**
583   * struct rdma_hw_stats
584   * @lock - Mutex to protect parallel write access to lifespan and values
585   *    of counters, which are 64bits and not guaranteed to be written
586   *    atomicaly on 32bits systems.
587   * @timestamp - Used by the core code to track when the last update was
588   * @lifespan - Used by the core code to determine how old the counters
589   *   should be before being updated again.  Stored in jiffies, defaults
590   *   to 10 milliseconds, drivers can override the default be specifying
591   *   their own value during their allocation routine.
592   * @descs - Array of pointers to static descriptors used for the counters
593   *   in directory.
594   * @is_disabled - A bitmap to indicate each counter is currently disabled
595   *   or not.
596   * @num_counters - How many hardware counters there are.  If name is
597   *   shorter than this number, a kernel oops will result.  Driver authors
598   *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
599   *   in their code to prevent this.
600   * @value - Array of u64 counters that are accessed by the sysfs code and
601   *   filled in by the drivers get_stats routine
602   */
603  struct rdma_hw_stats {
604  	struct mutex	lock; /* Protect lifespan and values[] */
605  	unsigned long	timestamp;
606  	unsigned long	lifespan;
607  	const struct rdma_stat_desc *descs;
608  	unsigned long	*is_disabled;
609  	int		num_counters;
610  	u64		value[];
611  };
612  
613  #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
614  
615  struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
616  	const struct rdma_stat_desc *descs, int num_counters,
617  	unsigned long lifespan);
618  
619  void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
620  
621  /* Define bits for the various functionality this port needs to be supported by
622   * the core.
623   */
624  /* Management                           0x00000FFF */
625  #define RDMA_CORE_CAP_IB_MAD            0x00000001
626  #define RDMA_CORE_CAP_IB_SMI            0x00000002
627  #define RDMA_CORE_CAP_IB_CM             0x00000004
628  #define RDMA_CORE_CAP_IW_CM             0x00000008
629  #define RDMA_CORE_CAP_IB_SA             0x00000010
630  #define RDMA_CORE_CAP_OPA_MAD           0x00000020
631  
632  /* Address format                       0x000FF000 */
633  #define RDMA_CORE_CAP_AF_IB             0x00001000
634  #define RDMA_CORE_CAP_ETH_AH            0x00002000
635  #define RDMA_CORE_CAP_OPA_AH            0x00004000
636  #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
637  
638  /* Protocol                             0xFFF00000 */
639  #define RDMA_CORE_CAP_PROT_IB           0x00100000
640  #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
641  #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
642  #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
643  #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
644  #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
645  
646  #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
647  					| RDMA_CORE_CAP_PROT_ROCE     \
648  					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
649  
650  #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
651  					| RDMA_CORE_CAP_IB_MAD \
652  					| RDMA_CORE_CAP_IB_SMI \
653  					| RDMA_CORE_CAP_IB_CM  \
654  					| RDMA_CORE_CAP_IB_SA  \
655  					| RDMA_CORE_CAP_AF_IB)
656  #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
657  					| RDMA_CORE_CAP_IB_MAD  \
658  					| RDMA_CORE_CAP_IB_CM   \
659  					| RDMA_CORE_CAP_AF_IB   \
660  					| RDMA_CORE_CAP_ETH_AH)
661  #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
662  					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
663  					| RDMA_CORE_CAP_IB_MAD  \
664  					| RDMA_CORE_CAP_IB_CM   \
665  					| RDMA_CORE_CAP_AF_IB   \
666  					| RDMA_CORE_CAP_ETH_AH)
667  #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
668  					| RDMA_CORE_CAP_IW_CM)
669  #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
670  					| RDMA_CORE_CAP_OPA_MAD)
671  
672  #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
673  
674  #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
675  
676  struct ib_port_attr {
677  	u64			subnet_prefix;
678  	enum ib_port_state	state;
679  	enum ib_mtu		max_mtu;
680  	enum ib_mtu		active_mtu;
681  	u32                     phys_mtu;
682  	int			gid_tbl_len;
683  	unsigned int		ip_gids:1;
684  	/* This is the value from PortInfo CapabilityMask, defined by IBA */
685  	u32			port_cap_flags;
686  	u32			max_msg_sz;
687  	u32			bad_pkey_cntr;
688  	u32			qkey_viol_cntr;
689  	u16			pkey_tbl_len;
690  	u32			sm_lid;
691  	u32			lid;
692  	u8			lmc;
693  	u8			max_vl_num;
694  	u8			sm_sl;
695  	u8			subnet_timeout;
696  	u8			init_type_reply;
697  	u8			active_width;
698  	u16			active_speed;
699  	u8                      phys_state;
700  	u16			port_cap_flags2;
701  };
702  
703  enum ib_device_modify_flags {
704  	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
705  	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
706  };
707  
708  #define IB_DEVICE_NODE_DESC_MAX 64
709  
710  struct ib_device_modify {
711  	u64	sys_image_guid;
712  	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
713  };
714  
715  enum ib_port_modify_flags {
716  	IB_PORT_SHUTDOWN		= 1,
717  	IB_PORT_INIT_TYPE		= (1<<2),
718  	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
719  	IB_PORT_OPA_MASK_CHG		= (1<<4)
720  };
721  
722  struct ib_port_modify {
723  	u32	set_port_cap_mask;
724  	u32	clr_port_cap_mask;
725  	u8	init_type;
726  };
727  
728  enum ib_event_type {
729  	IB_EVENT_CQ_ERR,
730  	IB_EVENT_QP_FATAL,
731  	IB_EVENT_QP_REQ_ERR,
732  	IB_EVENT_QP_ACCESS_ERR,
733  	IB_EVENT_COMM_EST,
734  	IB_EVENT_SQ_DRAINED,
735  	IB_EVENT_PATH_MIG,
736  	IB_EVENT_PATH_MIG_ERR,
737  	IB_EVENT_DEVICE_FATAL,
738  	IB_EVENT_PORT_ACTIVE,
739  	IB_EVENT_PORT_ERR,
740  	IB_EVENT_LID_CHANGE,
741  	IB_EVENT_PKEY_CHANGE,
742  	IB_EVENT_SM_CHANGE,
743  	IB_EVENT_SRQ_ERR,
744  	IB_EVENT_SRQ_LIMIT_REACHED,
745  	IB_EVENT_QP_LAST_WQE_REACHED,
746  	IB_EVENT_CLIENT_REREGISTER,
747  	IB_EVENT_GID_CHANGE,
748  	IB_EVENT_WQ_FATAL,
749  };
750  
751  const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
752  
753  struct ib_event {
754  	struct ib_device	*device;
755  	union {
756  		struct ib_cq	*cq;
757  		struct ib_qp	*qp;
758  		struct ib_srq	*srq;
759  		struct ib_wq	*wq;
760  		u32		port_num;
761  	} element;
762  	enum ib_event_type	event;
763  };
764  
765  struct ib_event_handler {
766  	struct ib_device *device;
767  	void            (*handler)(struct ib_event_handler *, struct ib_event *);
768  	struct list_head  list;
769  };
770  
771  #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
772  	do {							\
773  		(_ptr)->device  = _device;			\
774  		(_ptr)->handler = _handler;			\
775  		INIT_LIST_HEAD(&(_ptr)->list);			\
776  	} while (0)
777  
778  struct ib_global_route {
779  	const struct ib_gid_attr *sgid_attr;
780  	union ib_gid	dgid;
781  	u32		flow_label;
782  	u8		sgid_index;
783  	u8		hop_limit;
784  	u8		traffic_class;
785  };
786  
787  struct ib_grh {
788  	__be32		version_tclass_flow;
789  	__be16		paylen;
790  	u8		next_hdr;
791  	u8		hop_limit;
792  	union ib_gid	sgid;
793  	union ib_gid	dgid;
794  };
795  
796  union rdma_network_hdr {
797  	struct ib_grh ibgrh;
798  	struct {
799  		/* The IB spec states that if it's IPv4, the header
800  		 * is located in the last 20 bytes of the header.
801  		 */
802  		u8		reserved[20];
803  		struct iphdr	roce4grh;
804  	};
805  };
806  
807  #define IB_QPN_MASK		0xFFFFFF
808  
809  enum {
810  	IB_MULTICAST_QPN = 0xffffff
811  };
812  
813  #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
814  #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
815  
816  enum ib_ah_flags {
817  	IB_AH_GRH	= 1
818  };
819  
820  enum ib_rate {
821  	IB_RATE_PORT_CURRENT = 0,
822  	IB_RATE_2_5_GBPS = 2,
823  	IB_RATE_5_GBPS   = 5,
824  	IB_RATE_10_GBPS  = 3,
825  	IB_RATE_20_GBPS  = 6,
826  	IB_RATE_30_GBPS  = 4,
827  	IB_RATE_40_GBPS  = 7,
828  	IB_RATE_60_GBPS  = 8,
829  	IB_RATE_80_GBPS  = 9,
830  	IB_RATE_120_GBPS = 10,
831  	IB_RATE_14_GBPS  = 11,
832  	IB_RATE_56_GBPS  = 12,
833  	IB_RATE_112_GBPS = 13,
834  	IB_RATE_168_GBPS = 14,
835  	IB_RATE_25_GBPS  = 15,
836  	IB_RATE_100_GBPS = 16,
837  	IB_RATE_200_GBPS = 17,
838  	IB_RATE_300_GBPS = 18,
839  	IB_RATE_28_GBPS  = 19,
840  	IB_RATE_50_GBPS  = 20,
841  	IB_RATE_400_GBPS = 21,
842  	IB_RATE_600_GBPS = 22,
843  };
844  
845  /**
846   * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
847   * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
848   * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
849   * @rate: rate to convert.
850   */
851  __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
852  
853  /**
854   * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
855   * For example, IB_RATE_2_5_GBPS will be converted to 2500.
856   * @rate: rate to convert.
857   */
858  __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
859  
860  
861  /**
862   * enum ib_mr_type - memory region type
863   * @IB_MR_TYPE_MEM_REG:       memory region that is used for
864   *                            normal registration
865   * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
866   *                            register any arbitrary sg lists (without
867   *                            the normal mr constraints - see
868   *                            ib_map_mr_sg)
869   * @IB_MR_TYPE_DM:            memory region that is used for device
870   *                            memory registration
871   * @IB_MR_TYPE_USER:          memory region that is used for the user-space
872   *                            application
873   * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
874   *                            without address translations (VA=PA)
875   * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
876   *                            data integrity operations
877   */
878  enum ib_mr_type {
879  	IB_MR_TYPE_MEM_REG,
880  	IB_MR_TYPE_SG_GAPS,
881  	IB_MR_TYPE_DM,
882  	IB_MR_TYPE_USER,
883  	IB_MR_TYPE_DMA,
884  	IB_MR_TYPE_INTEGRITY,
885  };
886  
887  enum ib_mr_status_check {
888  	IB_MR_CHECK_SIG_STATUS = 1,
889  };
890  
891  /**
892   * struct ib_mr_status - Memory region status container
893   *
894   * @fail_status: Bitmask of MR checks status. For each
895   *     failed check a corresponding status bit is set.
896   * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
897   *     failure.
898   */
899  struct ib_mr_status {
900  	u32		    fail_status;
901  	struct ib_sig_err   sig_err;
902  };
903  
904  /**
905   * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
906   * enum.
907   * @mult: multiple to convert.
908   */
909  __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
910  
911  struct rdma_ah_init_attr {
912  	struct rdma_ah_attr *ah_attr;
913  	u32 flags;
914  	struct net_device *xmit_slave;
915  };
916  
917  enum rdma_ah_attr_type {
918  	RDMA_AH_ATTR_TYPE_UNDEFINED,
919  	RDMA_AH_ATTR_TYPE_IB,
920  	RDMA_AH_ATTR_TYPE_ROCE,
921  	RDMA_AH_ATTR_TYPE_OPA,
922  };
923  
924  struct ib_ah_attr {
925  	u16			dlid;
926  	u8			src_path_bits;
927  };
928  
929  struct roce_ah_attr {
930  	u8			dmac[ETH_ALEN];
931  };
932  
933  struct opa_ah_attr {
934  	u32			dlid;
935  	u8			src_path_bits;
936  	bool			make_grd;
937  };
938  
939  struct rdma_ah_attr {
940  	struct ib_global_route	grh;
941  	u8			sl;
942  	u8			static_rate;
943  	u32			port_num;
944  	u8			ah_flags;
945  	enum rdma_ah_attr_type type;
946  	union {
947  		struct ib_ah_attr ib;
948  		struct roce_ah_attr roce;
949  		struct opa_ah_attr opa;
950  	};
951  };
952  
953  enum ib_wc_status {
954  	IB_WC_SUCCESS,
955  	IB_WC_LOC_LEN_ERR,
956  	IB_WC_LOC_QP_OP_ERR,
957  	IB_WC_LOC_EEC_OP_ERR,
958  	IB_WC_LOC_PROT_ERR,
959  	IB_WC_WR_FLUSH_ERR,
960  	IB_WC_MW_BIND_ERR,
961  	IB_WC_BAD_RESP_ERR,
962  	IB_WC_LOC_ACCESS_ERR,
963  	IB_WC_REM_INV_REQ_ERR,
964  	IB_WC_REM_ACCESS_ERR,
965  	IB_WC_REM_OP_ERR,
966  	IB_WC_RETRY_EXC_ERR,
967  	IB_WC_RNR_RETRY_EXC_ERR,
968  	IB_WC_LOC_RDD_VIOL_ERR,
969  	IB_WC_REM_INV_RD_REQ_ERR,
970  	IB_WC_REM_ABORT_ERR,
971  	IB_WC_INV_EECN_ERR,
972  	IB_WC_INV_EEC_STATE_ERR,
973  	IB_WC_FATAL_ERR,
974  	IB_WC_RESP_TIMEOUT_ERR,
975  	IB_WC_GENERAL_ERR
976  };
977  
978  const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
979  
980  enum ib_wc_opcode {
981  	IB_WC_SEND = IB_UVERBS_WC_SEND,
982  	IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
983  	IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
984  	IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
985  	IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
986  	IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
987  	IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
988  	IB_WC_LSO = IB_UVERBS_WC_TSO,
989  	IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
990  	IB_WC_REG_MR,
991  	IB_WC_MASKED_COMP_SWAP,
992  	IB_WC_MASKED_FETCH_ADD,
993  	IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
994  /*
995   * Set value of IB_WC_RECV so consumers can test if a completion is a
996   * receive by testing (opcode & IB_WC_RECV).
997   */
998  	IB_WC_RECV			= 1 << 7,
999  	IB_WC_RECV_RDMA_WITH_IMM
1000  };
1001  
1002  enum ib_wc_flags {
1003  	IB_WC_GRH		= 1,
1004  	IB_WC_WITH_IMM		= (1<<1),
1005  	IB_WC_WITH_INVALIDATE	= (1<<2),
1006  	IB_WC_IP_CSUM_OK	= (1<<3),
1007  	IB_WC_WITH_SMAC		= (1<<4),
1008  	IB_WC_WITH_VLAN		= (1<<5),
1009  	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
1010  };
1011  
1012  struct ib_wc {
1013  	union {
1014  		u64		wr_id;
1015  		struct ib_cqe	*wr_cqe;
1016  	};
1017  	enum ib_wc_status	status;
1018  	enum ib_wc_opcode	opcode;
1019  	u32			vendor_err;
1020  	u32			byte_len;
1021  	struct ib_qp	       *qp;
1022  	union {
1023  		__be32		imm_data;
1024  		u32		invalidate_rkey;
1025  	} ex;
1026  	u32			src_qp;
1027  	u32			slid;
1028  	int			wc_flags;
1029  	u16			pkey_index;
1030  	u8			sl;
1031  	u8			dlid_path_bits;
1032  	u32 port_num; /* valid only for DR SMPs on switches */
1033  	u8			smac[ETH_ALEN];
1034  	u16			vlan_id;
1035  	u8			network_hdr_type;
1036  };
1037  
1038  enum ib_cq_notify_flags {
1039  	IB_CQ_SOLICITED			= 1 << 0,
1040  	IB_CQ_NEXT_COMP			= 1 << 1,
1041  	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1042  	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1043  };
1044  
1045  enum ib_srq_type {
1046  	IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1047  	IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1048  	IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1049  };
1050  
ib_srq_has_cq(enum ib_srq_type srq_type)1051  static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1052  {
1053  	return srq_type == IB_SRQT_XRC ||
1054  	       srq_type == IB_SRQT_TM;
1055  }
1056  
1057  enum ib_srq_attr_mask {
1058  	IB_SRQ_MAX_WR	= 1 << 0,
1059  	IB_SRQ_LIMIT	= 1 << 1,
1060  };
1061  
1062  struct ib_srq_attr {
1063  	u32	max_wr;
1064  	u32	max_sge;
1065  	u32	srq_limit;
1066  };
1067  
1068  struct ib_srq_init_attr {
1069  	void		      (*event_handler)(struct ib_event *, void *);
1070  	void		       *srq_context;
1071  	struct ib_srq_attr	attr;
1072  	enum ib_srq_type	srq_type;
1073  
1074  	struct {
1075  		struct ib_cq   *cq;
1076  		union {
1077  			struct {
1078  				struct ib_xrcd *xrcd;
1079  			} xrc;
1080  
1081  			struct {
1082  				u32		max_num_tags;
1083  			} tag_matching;
1084  		};
1085  	} ext;
1086  };
1087  
1088  struct ib_qp_cap {
1089  	u32	max_send_wr;
1090  	u32	max_recv_wr;
1091  	u32	max_send_sge;
1092  	u32	max_recv_sge;
1093  	u32	max_inline_data;
1094  
1095  	/*
1096  	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1097  	 * ib_create_qp() will calculate the right amount of neededed WRs
1098  	 * and MRs based on this.
1099  	 */
1100  	u32	max_rdma_ctxs;
1101  };
1102  
1103  enum ib_sig_type {
1104  	IB_SIGNAL_ALL_WR,
1105  	IB_SIGNAL_REQ_WR
1106  };
1107  
1108  enum ib_qp_type {
1109  	/*
1110  	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1111  	 * here (and in that order) since the MAD layer uses them as
1112  	 * indices into a 2-entry table.
1113  	 */
1114  	IB_QPT_SMI,
1115  	IB_QPT_GSI,
1116  
1117  	IB_QPT_RC = IB_UVERBS_QPT_RC,
1118  	IB_QPT_UC = IB_UVERBS_QPT_UC,
1119  	IB_QPT_UD = IB_UVERBS_QPT_UD,
1120  	IB_QPT_RAW_IPV6,
1121  	IB_QPT_RAW_ETHERTYPE,
1122  	IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1123  	IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1124  	IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1125  	IB_QPT_MAX,
1126  	IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1127  	/* Reserve a range for qp types internal to the low level driver.
1128  	 * These qp types will not be visible at the IB core layer, so the
1129  	 * IB_QPT_MAX usages should not be affected in the core layer
1130  	 */
1131  	IB_QPT_RESERVED1 = 0x1000,
1132  	IB_QPT_RESERVED2,
1133  	IB_QPT_RESERVED3,
1134  	IB_QPT_RESERVED4,
1135  	IB_QPT_RESERVED5,
1136  	IB_QPT_RESERVED6,
1137  	IB_QPT_RESERVED7,
1138  	IB_QPT_RESERVED8,
1139  	IB_QPT_RESERVED9,
1140  	IB_QPT_RESERVED10,
1141  };
1142  
1143  enum ib_qp_create_flags {
1144  	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1145  	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	=
1146  		IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1147  	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1148  	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1149  	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1150  	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1151  	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
1152  	IB_QP_CREATE_NETDEV_USE			= 1 << 7,
1153  	IB_QP_CREATE_SCATTER_FCS		=
1154  		IB_UVERBS_QP_CREATE_SCATTER_FCS,
1155  	IB_QP_CREATE_CVLAN_STRIPPING		=
1156  		IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1157  	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1158  	IB_QP_CREATE_PCI_WRITE_END_PADDING	=
1159  		IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1160  	/* reserve bits 26-31 for low level drivers' internal use */
1161  	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1162  	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1163  };
1164  
1165  /*
1166   * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1167   * callback to destroy the passed in QP.
1168   */
1169  
1170  struct ib_qp_init_attr {
1171  	/* This callback occurs in workqueue context */
1172  	void                  (*event_handler)(struct ib_event *, void *);
1173  
1174  	void		       *qp_context;
1175  	struct ib_cq	       *send_cq;
1176  	struct ib_cq	       *recv_cq;
1177  	struct ib_srq	       *srq;
1178  	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1179  	struct ib_qp_cap	cap;
1180  	enum ib_sig_type	sq_sig_type;
1181  	enum ib_qp_type		qp_type;
1182  	u32			create_flags;
1183  
1184  	/*
1185  	 * Only needed for special QP types, or when using the RW API.
1186  	 */
1187  	u32			port_num;
1188  	struct ib_rwq_ind_table *rwq_ind_tbl;
1189  	u32			source_qpn;
1190  };
1191  
1192  struct ib_qp_open_attr {
1193  	void                  (*event_handler)(struct ib_event *, void *);
1194  	void		       *qp_context;
1195  	u32			qp_num;
1196  	enum ib_qp_type		qp_type;
1197  };
1198  
1199  enum ib_rnr_timeout {
1200  	IB_RNR_TIMER_655_36 =  0,
1201  	IB_RNR_TIMER_000_01 =  1,
1202  	IB_RNR_TIMER_000_02 =  2,
1203  	IB_RNR_TIMER_000_03 =  3,
1204  	IB_RNR_TIMER_000_04 =  4,
1205  	IB_RNR_TIMER_000_06 =  5,
1206  	IB_RNR_TIMER_000_08 =  6,
1207  	IB_RNR_TIMER_000_12 =  7,
1208  	IB_RNR_TIMER_000_16 =  8,
1209  	IB_RNR_TIMER_000_24 =  9,
1210  	IB_RNR_TIMER_000_32 = 10,
1211  	IB_RNR_TIMER_000_48 = 11,
1212  	IB_RNR_TIMER_000_64 = 12,
1213  	IB_RNR_TIMER_000_96 = 13,
1214  	IB_RNR_TIMER_001_28 = 14,
1215  	IB_RNR_TIMER_001_92 = 15,
1216  	IB_RNR_TIMER_002_56 = 16,
1217  	IB_RNR_TIMER_003_84 = 17,
1218  	IB_RNR_TIMER_005_12 = 18,
1219  	IB_RNR_TIMER_007_68 = 19,
1220  	IB_RNR_TIMER_010_24 = 20,
1221  	IB_RNR_TIMER_015_36 = 21,
1222  	IB_RNR_TIMER_020_48 = 22,
1223  	IB_RNR_TIMER_030_72 = 23,
1224  	IB_RNR_TIMER_040_96 = 24,
1225  	IB_RNR_TIMER_061_44 = 25,
1226  	IB_RNR_TIMER_081_92 = 26,
1227  	IB_RNR_TIMER_122_88 = 27,
1228  	IB_RNR_TIMER_163_84 = 28,
1229  	IB_RNR_TIMER_245_76 = 29,
1230  	IB_RNR_TIMER_327_68 = 30,
1231  	IB_RNR_TIMER_491_52 = 31
1232  };
1233  
1234  enum ib_qp_attr_mask {
1235  	IB_QP_STATE			= 1,
1236  	IB_QP_CUR_STATE			= (1<<1),
1237  	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1238  	IB_QP_ACCESS_FLAGS		= (1<<3),
1239  	IB_QP_PKEY_INDEX		= (1<<4),
1240  	IB_QP_PORT			= (1<<5),
1241  	IB_QP_QKEY			= (1<<6),
1242  	IB_QP_AV			= (1<<7),
1243  	IB_QP_PATH_MTU			= (1<<8),
1244  	IB_QP_TIMEOUT			= (1<<9),
1245  	IB_QP_RETRY_CNT			= (1<<10),
1246  	IB_QP_RNR_RETRY			= (1<<11),
1247  	IB_QP_RQ_PSN			= (1<<12),
1248  	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1249  	IB_QP_ALT_PATH			= (1<<14),
1250  	IB_QP_MIN_RNR_TIMER		= (1<<15),
1251  	IB_QP_SQ_PSN			= (1<<16),
1252  	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1253  	IB_QP_PATH_MIG_STATE		= (1<<18),
1254  	IB_QP_CAP			= (1<<19),
1255  	IB_QP_DEST_QPN			= (1<<20),
1256  	IB_QP_RESERVED1			= (1<<21),
1257  	IB_QP_RESERVED2			= (1<<22),
1258  	IB_QP_RESERVED3			= (1<<23),
1259  	IB_QP_RESERVED4			= (1<<24),
1260  	IB_QP_RATE_LIMIT		= (1<<25),
1261  
1262  	IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1263  };
1264  
1265  enum ib_qp_state {
1266  	IB_QPS_RESET,
1267  	IB_QPS_INIT,
1268  	IB_QPS_RTR,
1269  	IB_QPS_RTS,
1270  	IB_QPS_SQD,
1271  	IB_QPS_SQE,
1272  	IB_QPS_ERR
1273  };
1274  
1275  enum ib_mig_state {
1276  	IB_MIG_MIGRATED,
1277  	IB_MIG_REARM,
1278  	IB_MIG_ARMED
1279  };
1280  
1281  enum ib_mw_type {
1282  	IB_MW_TYPE_1 = 1,
1283  	IB_MW_TYPE_2 = 2
1284  };
1285  
1286  struct ib_qp_attr {
1287  	enum ib_qp_state	qp_state;
1288  	enum ib_qp_state	cur_qp_state;
1289  	enum ib_mtu		path_mtu;
1290  	enum ib_mig_state	path_mig_state;
1291  	u32			qkey;
1292  	u32			rq_psn;
1293  	u32			sq_psn;
1294  	u32			dest_qp_num;
1295  	int			qp_access_flags;
1296  	struct ib_qp_cap	cap;
1297  	struct rdma_ah_attr	ah_attr;
1298  	struct rdma_ah_attr	alt_ah_attr;
1299  	u16			pkey_index;
1300  	u16			alt_pkey_index;
1301  	u8			en_sqd_async_notify;
1302  	u8			sq_draining;
1303  	u8			max_rd_atomic;
1304  	u8			max_dest_rd_atomic;
1305  	u8			min_rnr_timer;
1306  	u32			port_num;
1307  	u8			timeout;
1308  	u8			retry_cnt;
1309  	u8			rnr_retry;
1310  	u32			alt_port_num;
1311  	u8			alt_timeout;
1312  	u32			rate_limit;
1313  	struct net_device	*xmit_slave;
1314  };
1315  
1316  enum ib_wr_opcode {
1317  	/* These are shared with userspace */
1318  	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1319  	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1320  	IB_WR_SEND = IB_UVERBS_WR_SEND,
1321  	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1322  	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1323  	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1324  	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1325  	IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1326  	IB_WR_LSO = IB_UVERBS_WR_TSO,
1327  	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1328  	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1329  	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1330  	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1331  		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1332  	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1333  		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1334  	IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
1335  	IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
1336  
1337  	/* These are kernel only and can not be issued by userspace */
1338  	IB_WR_REG_MR = 0x20,
1339  	IB_WR_REG_MR_INTEGRITY,
1340  
1341  	/* reserve values for low level drivers' internal use.
1342  	 * These values will not be used at all in the ib core layer.
1343  	 */
1344  	IB_WR_RESERVED1 = 0xf0,
1345  	IB_WR_RESERVED2,
1346  	IB_WR_RESERVED3,
1347  	IB_WR_RESERVED4,
1348  	IB_WR_RESERVED5,
1349  	IB_WR_RESERVED6,
1350  	IB_WR_RESERVED7,
1351  	IB_WR_RESERVED8,
1352  	IB_WR_RESERVED9,
1353  	IB_WR_RESERVED10,
1354  };
1355  
1356  enum ib_send_flags {
1357  	IB_SEND_FENCE		= 1,
1358  	IB_SEND_SIGNALED	= (1<<1),
1359  	IB_SEND_SOLICITED	= (1<<2),
1360  	IB_SEND_INLINE		= (1<<3),
1361  	IB_SEND_IP_CSUM		= (1<<4),
1362  
1363  	/* reserve bits 26-31 for low level drivers' internal use */
1364  	IB_SEND_RESERVED_START	= (1 << 26),
1365  	IB_SEND_RESERVED_END	= (1 << 31),
1366  };
1367  
1368  struct ib_sge {
1369  	u64	addr;
1370  	u32	length;
1371  	u32	lkey;
1372  };
1373  
1374  struct ib_cqe {
1375  	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1376  };
1377  
1378  struct ib_send_wr {
1379  	struct ib_send_wr      *next;
1380  	union {
1381  		u64		wr_id;
1382  		struct ib_cqe	*wr_cqe;
1383  	};
1384  	struct ib_sge	       *sg_list;
1385  	int			num_sge;
1386  	enum ib_wr_opcode	opcode;
1387  	int			send_flags;
1388  	union {
1389  		__be32		imm_data;
1390  		u32		invalidate_rkey;
1391  	} ex;
1392  };
1393  
1394  struct ib_rdma_wr {
1395  	struct ib_send_wr	wr;
1396  	u64			remote_addr;
1397  	u32			rkey;
1398  };
1399  
rdma_wr(const struct ib_send_wr * wr)1400  static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1401  {
1402  	return container_of(wr, struct ib_rdma_wr, wr);
1403  }
1404  
1405  struct ib_atomic_wr {
1406  	struct ib_send_wr	wr;
1407  	u64			remote_addr;
1408  	u64			compare_add;
1409  	u64			swap;
1410  	u64			compare_add_mask;
1411  	u64			swap_mask;
1412  	u32			rkey;
1413  };
1414  
atomic_wr(const struct ib_send_wr * wr)1415  static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1416  {
1417  	return container_of(wr, struct ib_atomic_wr, wr);
1418  }
1419  
1420  struct ib_ud_wr {
1421  	struct ib_send_wr	wr;
1422  	struct ib_ah		*ah;
1423  	void			*header;
1424  	int			hlen;
1425  	int			mss;
1426  	u32			remote_qpn;
1427  	u32			remote_qkey;
1428  	u16			pkey_index; /* valid for GSI only */
1429  	u32			port_num; /* valid for DR SMPs on switch only */
1430  };
1431  
ud_wr(const struct ib_send_wr * wr)1432  static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1433  {
1434  	return container_of(wr, struct ib_ud_wr, wr);
1435  }
1436  
1437  struct ib_reg_wr {
1438  	struct ib_send_wr	wr;
1439  	struct ib_mr		*mr;
1440  	u32			key;
1441  	int			access;
1442  };
1443  
reg_wr(const struct ib_send_wr * wr)1444  static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1445  {
1446  	return container_of(wr, struct ib_reg_wr, wr);
1447  }
1448  
1449  struct ib_recv_wr {
1450  	struct ib_recv_wr      *next;
1451  	union {
1452  		u64		wr_id;
1453  		struct ib_cqe	*wr_cqe;
1454  	};
1455  	struct ib_sge	       *sg_list;
1456  	int			num_sge;
1457  };
1458  
1459  enum ib_access_flags {
1460  	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1461  	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1462  	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1463  	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1464  	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1465  	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1466  	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1467  	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1468  	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1469  	IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
1470  	IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
1471  
1472  	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1473  	IB_ACCESS_SUPPORTED =
1474  		((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
1475  };
1476  
1477  /*
1478   * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1479   * are hidden here instead of a uapi header!
1480   */
1481  enum ib_mr_rereg_flags {
1482  	IB_MR_REREG_TRANS	= 1,
1483  	IB_MR_REREG_PD		= (1<<1),
1484  	IB_MR_REREG_ACCESS	= (1<<2),
1485  	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1486  };
1487  
1488  struct ib_umem;
1489  
1490  enum rdma_remove_reason {
1491  	/*
1492  	 * Userspace requested uobject deletion or initial try
1493  	 * to remove uobject via cleanup. Call could fail
1494  	 */
1495  	RDMA_REMOVE_DESTROY,
1496  	/* Context deletion. This call should delete the actual object itself */
1497  	RDMA_REMOVE_CLOSE,
1498  	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1499  	RDMA_REMOVE_DRIVER_REMOVE,
1500  	/* uobj is being cleaned-up before being committed */
1501  	RDMA_REMOVE_ABORT,
1502  	/* The driver failed to destroy the uobject and is being disconnected */
1503  	RDMA_REMOVE_DRIVER_FAILURE,
1504  };
1505  
1506  struct ib_rdmacg_object {
1507  #ifdef CONFIG_CGROUP_RDMA
1508  	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1509  #endif
1510  };
1511  
1512  struct ib_ucontext {
1513  	struct ib_device       *device;
1514  	struct ib_uverbs_file  *ufile;
1515  
1516  	struct ib_rdmacg_object	cg_obj;
1517  	/*
1518  	 * Implementation details of the RDMA core, don't use in drivers:
1519  	 */
1520  	struct rdma_restrack_entry res;
1521  	struct xarray mmap_xa;
1522  };
1523  
1524  struct ib_uobject {
1525  	u64			user_handle;	/* handle given to us by userspace */
1526  	/* ufile & ucontext owning this object */
1527  	struct ib_uverbs_file  *ufile;
1528  	/* FIXME, save memory: ufile->context == context */
1529  	struct ib_ucontext     *context;	/* associated user context */
1530  	void		       *object;		/* containing object */
1531  	struct list_head	list;		/* link to context's list */
1532  	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1533  	int			id;		/* index into kernel idr */
1534  	struct kref		ref;
1535  	atomic_t		usecnt;		/* protects exclusive access */
1536  	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1537  
1538  	const struct uverbs_api_object *uapi_object;
1539  };
1540  
1541  struct ib_udata {
1542  	const void __user *inbuf;
1543  	void __user *outbuf;
1544  	size_t       inlen;
1545  	size_t       outlen;
1546  };
1547  
1548  struct ib_pd {
1549  	u32			local_dma_lkey;
1550  	u32			flags;
1551  	struct ib_device       *device;
1552  	struct ib_uobject      *uobject;
1553  	atomic_t          	usecnt; /* count all resources */
1554  
1555  	u32			unsafe_global_rkey;
1556  
1557  	/*
1558  	 * Implementation details of the RDMA core, don't use in drivers:
1559  	 */
1560  	struct ib_mr	       *__internal_mr;
1561  	struct rdma_restrack_entry res;
1562  };
1563  
1564  struct ib_xrcd {
1565  	struct ib_device       *device;
1566  	atomic_t		usecnt; /* count all exposed resources */
1567  	struct inode	       *inode;
1568  	struct rw_semaphore	tgt_qps_rwsem;
1569  	struct xarray		tgt_qps;
1570  };
1571  
1572  struct ib_ah {
1573  	struct ib_device	*device;
1574  	struct ib_pd		*pd;
1575  	struct ib_uobject	*uobject;
1576  	const struct ib_gid_attr *sgid_attr;
1577  	enum rdma_ah_attr_type	type;
1578  };
1579  
1580  typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1581  
1582  enum ib_poll_context {
1583  	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1584  	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1585  	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1586  	IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1587  
1588  	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1589  };
1590  
1591  struct ib_cq {
1592  	struct ib_device       *device;
1593  	struct ib_ucq_object   *uobject;
1594  	ib_comp_handler   	comp_handler;
1595  	void                  (*event_handler)(struct ib_event *, void *);
1596  	void                   *cq_context;
1597  	int               	cqe;
1598  	unsigned int		cqe_used;
1599  	atomic_t          	usecnt; /* count number of work queues */
1600  	enum ib_poll_context	poll_ctx;
1601  	struct ib_wc		*wc;
1602  	struct list_head        pool_entry;
1603  	union {
1604  		struct irq_poll		iop;
1605  		struct work_struct	work;
1606  	};
1607  	struct workqueue_struct *comp_wq;
1608  	struct dim *dim;
1609  
1610  	/* updated only by trace points */
1611  	ktime_t timestamp;
1612  	u8 interrupt:1;
1613  	u8 shared:1;
1614  	unsigned int comp_vector;
1615  
1616  	/*
1617  	 * Implementation details of the RDMA core, don't use in drivers:
1618  	 */
1619  	struct rdma_restrack_entry res;
1620  };
1621  
1622  struct ib_srq {
1623  	struct ib_device       *device;
1624  	struct ib_pd	       *pd;
1625  	struct ib_usrq_object  *uobject;
1626  	void		      (*event_handler)(struct ib_event *, void *);
1627  	void		       *srq_context;
1628  	enum ib_srq_type	srq_type;
1629  	atomic_t		usecnt;
1630  
1631  	struct {
1632  		struct ib_cq   *cq;
1633  		union {
1634  			struct {
1635  				struct ib_xrcd *xrcd;
1636  				u32		srq_num;
1637  			} xrc;
1638  		};
1639  	} ext;
1640  
1641  	/*
1642  	 * Implementation details of the RDMA core, don't use in drivers:
1643  	 */
1644  	struct rdma_restrack_entry res;
1645  };
1646  
1647  enum ib_raw_packet_caps {
1648  	/*
1649  	 * Strip cvlan from incoming packet and report it in the matching work
1650  	 * completion is supported.
1651  	 */
1652  	IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1653  		IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1654  	/*
1655  	 * Scatter FCS field of an incoming packet to host memory is supported.
1656  	 */
1657  	IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1658  	/* Checksum offloads are supported (for both send and receive). */
1659  	IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1660  	/*
1661  	 * When a packet is received for an RQ with no receive WQEs, the
1662  	 * packet processing is delayed.
1663  	 */
1664  	IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1665  };
1666  
1667  enum ib_wq_type {
1668  	IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1669  };
1670  
1671  enum ib_wq_state {
1672  	IB_WQS_RESET,
1673  	IB_WQS_RDY,
1674  	IB_WQS_ERR
1675  };
1676  
1677  struct ib_wq {
1678  	struct ib_device       *device;
1679  	struct ib_uwq_object   *uobject;
1680  	void		    *wq_context;
1681  	void		    (*event_handler)(struct ib_event *, void *);
1682  	struct ib_pd	       *pd;
1683  	struct ib_cq	       *cq;
1684  	u32		wq_num;
1685  	enum ib_wq_state       state;
1686  	enum ib_wq_type	wq_type;
1687  	atomic_t		usecnt;
1688  };
1689  
1690  enum ib_wq_flags {
1691  	IB_WQ_FLAGS_CVLAN_STRIPPING	= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1692  	IB_WQ_FLAGS_SCATTER_FCS		= IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1693  	IB_WQ_FLAGS_DELAY_DROP		= IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1694  	IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1695  				IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1696  };
1697  
1698  struct ib_wq_init_attr {
1699  	void		       *wq_context;
1700  	enum ib_wq_type	wq_type;
1701  	u32		max_wr;
1702  	u32		max_sge;
1703  	struct	ib_cq	       *cq;
1704  	void		    (*event_handler)(struct ib_event *, void *);
1705  	u32		create_flags; /* Use enum ib_wq_flags */
1706  };
1707  
1708  enum ib_wq_attr_mask {
1709  	IB_WQ_STATE		= 1 << 0,
1710  	IB_WQ_CUR_STATE		= 1 << 1,
1711  	IB_WQ_FLAGS		= 1 << 2,
1712  };
1713  
1714  struct ib_wq_attr {
1715  	enum	ib_wq_state	wq_state;
1716  	enum	ib_wq_state	curr_wq_state;
1717  	u32			flags; /* Use enum ib_wq_flags */
1718  	u32			flags_mask; /* Use enum ib_wq_flags */
1719  };
1720  
1721  struct ib_rwq_ind_table {
1722  	struct ib_device	*device;
1723  	struct ib_uobject      *uobject;
1724  	atomic_t		usecnt;
1725  	u32		ind_tbl_num;
1726  	u32		log_ind_tbl_size;
1727  	struct ib_wq	**ind_tbl;
1728  };
1729  
1730  struct ib_rwq_ind_table_init_attr {
1731  	u32		log_ind_tbl_size;
1732  	/* Each entry is a pointer to Receive Work Queue */
1733  	struct ib_wq	**ind_tbl;
1734  };
1735  
1736  enum port_pkey_state {
1737  	IB_PORT_PKEY_NOT_VALID = 0,
1738  	IB_PORT_PKEY_VALID = 1,
1739  	IB_PORT_PKEY_LISTED = 2,
1740  };
1741  
1742  struct ib_qp_security;
1743  
1744  struct ib_port_pkey {
1745  	enum port_pkey_state	state;
1746  	u16			pkey_index;
1747  	u32			port_num;
1748  	struct list_head	qp_list;
1749  	struct list_head	to_error_list;
1750  	struct ib_qp_security  *sec;
1751  };
1752  
1753  struct ib_ports_pkeys {
1754  	struct ib_port_pkey	main;
1755  	struct ib_port_pkey	alt;
1756  };
1757  
1758  struct ib_qp_security {
1759  	struct ib_qp	       *qp;
1760  	struct ib_device       *dev;
1761  	/* Hold this mutex when changing port and pkey settings. */
1762  	struct mutex		mutex;
1763  	struct ib_ports_pkeys  *ports_pkeys;
1764  	/* A list of all open shared QP handles.  Required to enforce security
1765  	 * properly for all users of a shared QP.
1766  	 */
1767  	struct list_head        shared_qp_list;
1768  	void                   *security;
1769  	bool			destroying;
1770  	atomic_t		error_list_count;
1771  	struct completion	error_complete;
1772  	int			error_comps_pending;
1773  };
1774  
1775  /*
1776   * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1777   * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1778   */
1779  struct ib_qp {
1780  	struct ib_device       *device;
1781  	struct ib_pd	       *pd;
1782  	struct ib_cq	       *send_cq;
1783  	struct ib_cq	       *recv_cq;
1784  	spinlock_t		mr_lock;
1785  	int			mrs_used;
1786  	struct list_head	rdma_mrs;
1787  	struct list_head	sig_mrs;
1788  	struct ib_srq	       *srq;
1789  	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1790  	struct list_head	xrcd_list;
1791  
1792  	/* count times opened, mcast attaches, flow attaches */
1793  	atomic_t		usecnt;
1794  	struct list_head	open_list;
1795  	struct ib_qp           *real_qp;
1796  	struct ib_uqp_object   *uobject;
1797  	void                  (*event_handler)(struct ib_event *, void *);
1798  	void		       *qp_context;
1799  	/* sgid_attrs associated with the AV's */
1800  	const struct ib_gid_attr *av_sgid_attr;
1801  	const struct ib_gid_attr *alt_path_sgid_attr;
1802  	u32			qp_num;
1803  	u32			max_write_sge;
1804  	u32			max_read_sge;
1805  	enum ib_qp_type		qp_type;
1806  	struct ib_rwq_ind_table *rwq_ind_tbl;
1807  	struct ib_qp_security  *qp_sec;
1808  	u32			port;
1809  
1810  	bool			integrity_en;
1811  	/*
1812  	 * Implementation details of the RDMA core, don't use in drivers:
1813  	 */
1814  	struct rdma_restrack_entry     res;
1815  
1816  	/* The counter the qp is bind to */
1817  	struct rdma_counter    *counter;
1818  };
1819  
1820  struct ib_dm {
1821  	struct ib_device  *device;
1822  	u32		   length;
1823  	u32		   flags;
1824  	struct ib_uobject *uobject;
1825  	atomic_t	   usecnt;
1826  };
1827  
1828  struct ib_mr {
1829  	struct ib_device  *device;
1830  	struct ib_pd	  *pd;
1831  	u32		   lkey;
1832  	u32		   rkey;
1833  	u64		   iova;
1834  	u64		   length;
1835  	unsigned int	   page_size;
1836  	enum ib_mr_type	   type;
1837  	bool		   need_inval;
1838  	union {
1839  		struct ib_uobject	*uobject;	/* user */
1840  		struct list_head	qp_entry;	/* FR */
1841  	};
1842  
1843  	struct ib_dm      *dm;
1844  	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1845  	/*
1846  	 * Implementation details of the RDMA core, don't use in drivers:
1847  	 */
1848  	struct rdma_restrack_entry res;
1849  };
1850  
1851  struct ib_mw {
1852  	struct ib_device	*device;
1853  	struct ib_pd		*pd;
1854  	struct ib_uobject	*uobject;
1855  	u32			rkey;
1856  	enum ib_mw_type         type;
1857  };
1858  
1859  /* Supported steering options */
1860  enum ib_flow_attr_type {
1861  	/* steering according to rule specifications */
1862  	IB_FLOW_ATTR_NORMAL		= 0x0,
1863  	/* default unicast and multicast rule -
1864  	 * receive all Eth traffic which isn't steered to any QP
1865  	 */
1866  	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1867  	/* default multicast rule -
1868  	 * receive all Eth multicast traffic which isn't steered to any QP
1869  	 */
1870  	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1871  	/* sniffer rule - receive all port traffic */
1872  	IB_FLOW_ATTR_SNIFFER		= 0x3
1873  };
1874  
1875  /* Supported steering header types */
1876  enum ib_flow_spec_type {
1877  	/* L2 headers*/
1878  	IB_FLOW_SPEC_ETH		= 0x20,
1879  	IB_FLOW_SPEC_IB			= 0x22,
1880  	/* L3 header*/
1881  	IB_FLOW_SPEC_IPV4		= 0x30,
1882  	IB_FLOW_SPEC_IPV6		= 0x31,
1883  	IB_FLOW_SPEC_ESP                = 0x34,
1884  	/* L4 headers*/
1885  	IB_FLOW_SPEC_TCP		= 0x40,
1886  	IB_FLOW_SPEC_UDP		= 0x41,
1887  	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1888  	IB_FLOW_SPEC_GRE		= 0x51,
1889  	IB_FLOW_SPEC_MPLS		= 0x60,
1890  	IB_FLOW_SPEC_INNER		= 0x100,
1891  	/* Actions */
1892  	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1893  	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1894  	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1895  	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1896  };
1897  #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1898  #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1899  
1900  enum ib_flow_flags {
1901  	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1902  	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1903  	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1904  };
1905  
1906  struct ib_flow_eth_filter {
1907  	u8	dst_mac[6];
1908  	u8	src_mac[6];
1909  	__be16	ether_type;
1910  	__be16	vlan_tag;
1911  	/* Must be last */
1912  	u8	real_sz[];
1913  };
1914  
1915  struct ib_flow_spec_eth {
1916  	u32			  type;
1917  	u16			  size;
1918  	struct ib_flow_eth_filter val;
1919  	struct ib_flow_eth_filter mask;
1920  };
1921  
1922  struct ib_flow_ib_filter {
1923  	__be16 dlid;
1924  	__u8   sl;
1925  	/* Must be last */
1926  	u8	real_sz[];
1927  };
1928  
1929  struct ib_flow_spec_ib {
1930  	u32			 type;
1931  	u16			 size;
1932  	struct ib_flow_ib_filter val;
1933  	struct ib_flow_ib_filter mask;
1934  };
1935  
1936  /* IPv4 header flags */
1937  enum ib_ipv4_flags {
1938  	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1939  	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1940  				    last have this flag set */
1941  };
1942  
1943  struct ib_flow_ipv4_filter {
1944  	__be32	src_ip;
1945  	__be32	dst_ip;
1946  	u8	proto;
1947  	u8	tos;
1948  	u8	ttl;
1949  	u8	flags;
1950  	/* Must be last */
1951  	u8	real_sz[];
1952  };
1953  
1954  struct ib_flow_spec_ipv4 {
1955  	u32			   type;
1956  	u16			   size;
1957  	struct ib_flow_ipv4_filter val;
1958  	struct ib_flow_ipv4_filter mask;
1959  };
1960  
1961  struct ib_flow_ipv6_filter {
1962  	u8	src_ip[16];
1963  	u8	dst_ip[16];
1964  	__be32	flow_label;
1965  	u8	next_hdr;
1966  	u8	traffic_class;
1967  	u8	hop_limit;
1968  	/* Must be last */
1969  	u8	real_sz[];
1970  };
1971  
1972  struct ib_flow_spec_ipv6 {
1973  	u32			   type;
1974  	u16			   size;
1975  	struct ib_flow_ipv6_filter val;
1976  	struct ib_flow_ipv6_filter mask;
1977  };
1978  
1979  struct ib_flow_tcp_udp_filter {
1980  	__be16	dst_port;
1981  	__be16	src_port;
1982  	/* Must be last */
1983  	u8	real_sz[];
1984  };
1985  
1986  struct ib_flow_spec_tcp_udp {
1987  	u32			      type;
1988  	u16			      size;
1989  	struct ib_flow_tcp_udp_filter val;
1990  	struct ib_flow_tcp_udp_filter mask;
1991  };
1992  
1993  struct ib_flow_tunnel_filter {
1994  	__be32	tunnel_id;
1995  	u8	real_sz[];
1996  };
1997  
1998  /* ib_flow_spec_tunnel describes the Vxlan tunnel
1999   * the tunnel_id from val has the vni value
2000   */
2001  struct ib_flow_spec_tunnel {
2002  	u32			      type;
2003  	u16			      size;
2004  	struct ib_flow_tunnel_filter  val;
2005  	struct ib_flow_tunnel_filter  mask;
2006  };
2007  
2008  struct ib_flow_esp_filter {
2009  	__be32	spi;
2010  	__be32  seq;
2011  	/* Must be last */
2012  	u8	real_sz[];
2013  };
2014  
2015  struct ib_flow_spec_esp {
2016  	u32                           type;
2017  	u16			      size;
2018  	struct ib_flow_esp_filter     val;
2019  	struct ib_flow_esp_filter     mask;
2020  };
2021  
2022  struct ib_flow_gre_filter {
2023  	__be16 c_ks_res0_ver;
2024  	__be16 protocol;
2025  	__be32 key;
2026  	/* Must be last */
2027  	u8	real_sz[];
2028  };
2029  
2030  struct ib_flow_spec_gre {
2031  	u32                           type;
2032  	u16			      size;
2033  	struct ib_flow_gre_filter     val;
2034  	struct ib_flow_gre_filter     mask;
2035  };
2036  
2037  struct ib_flow_mpls_filter {
2038  	__be32 tag;
2039  	/* Must be last */
2040  	u8	real_sz[];
2041  };
2042  
2043  struct ib_flow_spec_mpls {
2044  	u32                           type;
2045  	u16			      size;
2046  	struct ib_flow_mpls_filter     val;
2047  	struct ib_flow_mpls_filter     mask;
2048  };
2049  
2050  struct ib_flow_spec_action_tag {
2051  	enum ib_flow_spec_type	      type;
2052  	u16			      size;
2053  	u32                           tag_id;
2054  };
2055  
2056  struct ib_flow_spec_action_drop {
2057  	enum ib_flow_spec_type	      type;
2058  	u16			      size;
2059  };
2060  
2061  struct ib_flow_spec_action_handle {
2062  	enum ib_flow_spec_type	      type;
2063  	u16			      size;
2064  	struct ib_flow_action	     *act;
2065  };
2066  
2067  enum ib_counters_description {
2068  	IB_COUNTER_PACKETS,
2069  	IB_COUNTER_BYTES,
2070  };
2071  
2072  struct ib_flow_spec_action_count {
2073  	enum ib_flow_spec_type type;
2074  	u16 size;
2075  	struct ib_counters *counters;
2076  };
2077  
2078  union ib_flow_spec {
2079  	struct {
2080  		u32			type;
2081  		u16			size;
2082  	};
2083  	struct ib_flow_spec_eth		eth;
2084  	struct ib_flow_spec_ib		ib;
2085  	struct ib_flow_spec_ipv4        ipv4;
2086  	struct ib_flow_spec_tcp_udp	tcp_udp;
2087  	struct ib_flow_spec_ipv6        ipv6;
2088  	struct ib_flow_spec_tunnel      tunnel;
2089  	struct ib_flow_spec_esp		esp;
2090  	struct ib_flow_spec_gre		gre;
2091  	struct ib_flow_spec_mpls	mpls;
2092  	struct ib_flow_spec_action_tag  flow_tag;
2093  	struct ib_flow_spec_action_drop drop;
2094  	struct ib_flow_spec_action_handle action;
2095  	struct ib_flow_spec_action_count flow_count;
2096  };
2097  
2098  struct ib_flow_attr {
2099  	enum ib_flow_attr_type type;
2100  	u16	     size;
2101  	u16	     priority;
2102  	u32	     flags;
2103  	u8	     num_of_specs;
2104  	u32	     port;
2105  	union ib_flow_spec flows[];
2106  };
2107  
2108  struct ib_flow {
2109  	struct ib_qp		*qp;
2110  	struct ib_device	*device;
2111  	struct ib_uobject	*uobject;
2112  };
2113  
2114  enum ib_flow_action_type {
2115  	IB_FLOW_ACTION_UNSPECIFIED,
2116  	IB_FLOW_ACTION_ESP = 1,
2117  };
2118  
2119  struct ib_flow_action_attrs_esp_keymats {
2120  	enum ib_uverbs_flow_action_esp_keymat			protocol;
2121  	union {
2122  		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2123  	} keymat;
2124  };
2125  
2126  struct ib_flow_action_attrs_esp_replays {
2127  	enum ib_uverbs_flow_action_esp_replay			protocol;
2128  	union {
2129  		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2130  	} replay;
2131  };
2132  
2133  enum ib_flow_action_attrs_esp_flags {
2134  	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2135  	 * This is done in order to share the same flags between user-space and
2136  	 * kernel and spare an unnecessary translation.
2137  	 */
2138  
2139  	/* Kernel flags */
2140  	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2141  	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2142  };
2143  
2144  struct ib_flow_spec_list {
2145  	struct ib_flow_spec_list	*next;
2146  	union ib_flow_spec		spec;
2147  };
2148  
2149  struct ib_flow_action_attrs_esp {
2150  	struct ib_flow_action_attrs_esp_keymats		*keymat;
2151  	struct ib_flow_action_attrs_esp_replays		*replay;
2152  	struct ib_flow_spec_list			*encap;
2153  	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2154  	 * Value of 0 is a valid value.
2155  	 */
2156  	u32						esn;
2157  	u32						spi;
2158  	u32						seq;
2159  	u32						tfc_pad;
2160  	/* Use enum ib_flow_action_attrs_esp_flags */
2161  	u64						flags;
2162  	u64						hard_limit_pkts;
2163  };
2164  
2165  struct ib_flow_action {
2166  	struct ib_device		*device;
2167  	struct ib_uobject		*uobject;
2168  	enum ib_flow_action_type	type;
2169  	atomic_t			usecnt;
2170  };
2171  
2172  struct ib_mad;
2173  
2174  enum ib_process_mad_flags {
2175  	IB_MAD_IGNORE_MKEY	= 1,
2176  	IB_MAD_IGNORE_BKEY	= 2,
2177  	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2178  };
2179  
2180  enum ib_mad_result {
2181  	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2182  	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2183  	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2184  	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2185  };
2186  
2187  struct ib_port_cache {
2188  	u64		      subnet_prefix;
2189  	struct ib_pkey_cache  *pkey;
2190  	struct ib_gid_table   *gid;
2191  	u8                     lmc;
2192  	enum ib_port_state     port_state;
2193  };
2194  
2195  struct ib_port_immutable {
2196  	int                           pkey_tbl_len;
2197  	int                           gid_tbl_len;
2198  	u32                           core_cap_flags;
2199  	u32                           max_mad_size;
2200  };
2201  
2202  struct ib_port_data {
2203  	struct ib_device *ib_dev;
2204  
2205  	struct ib_port_immutable immutable;
2206  
2207  	spinlock_t pkey_list_lock;
2208  
2209  	spinlock_t netdev_lock;
2210  
2211  	struct list_head pkey_list;
2212  
2213  	struct ib_port_cache cache;
2214  
2215  	struct net_device __rcu *netdev;
2216  	netdevice_tracker netdev_tracker;
2217  	struct hlist_node ndev_hash_link;
2218  	struct rdma_port_counter port_counter;
2219  	struct ib_port *sysfs;
2220  };
2221  
2222  /* rdma netdev type - specifies protocol type */
2223  enum rdma_netdev_t {
2224  	RDMA_NETDEV_OPA_VNIC,
2225  	RDMA_NETDEV_IPOIB,
2226  };
2227  
2228  /**
2229   * struct rdma_netdev - rdma netdev
2230   * For cases where netstack interfacing is required.
2231   */
2232  struct rdma_netdev {
2233  	void              *clnt_priv;
2234  	struct ib_device  *hca;
2235  	u32		   port_num;
2236  	int                mtu;
2237  
2238  	/*
2239  	 * cleanup function must be specified.
2240  	 * FIXME: This is only used for OPA_VNIC and that usage should be
2241  	 * removed too.
2242  	 */
2243  	void (*free_rdma_netdev)(struct net_device *netdev);
2244  
2245  	/* control functions */
2246  	void (*set_id)(struct net_device *netdev, int id);
2247  	/* send packet */
2248  	int (*send)(struct net_device *dev, struct sk_buff *skb,
2249  		    struct ib_ah *address, u32 dqpn);
2250  	/* multicast */
2251  	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2252  			    union ib_gid *gid, u16 mlid,
2253  			    int set_qkey, u32 qkey);
2254  	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2255  			    union ib_gid *gid, u16 mlid);
2256  	/* timeout */
2257  	void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2258  };
2259  
2260  struct rdma_netdev_alloc_params {
2261  	size_t sizeof_priv;
2262  	unsigned int txqs;
2263  	unsigned int rxqs;
2264  	void *param;
2265  
2266  	int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2267  				      struct net_device *netdev, void *param);
2268  };
2269  
2270  struct ib_odp_counters {
2271  	atomic64_t faults;
2272  	atomic64_t invalidations;
2273  	atomic64_t prefetch;
2274  };
2275  
2276  struct ib_counters {
2277  	struct ib_device	*device;
2278  	struct ib_uobject	*uobject;
2279  	/* num of objects attached */
2280  	atomic_t	usecnt;
2281  };
2282  
2283  struct ib_counters_read_attr {
2284  	u64	*counters_buff;
2285  	u32	ncounters;
2286  	u32	flags; /* use enum ib_read_counters_flags */
2287  };
2288  
2289  struct uverbs_attr_bundle;
2290  struct iw_cm_id;
2291  struct iw_cm_conn_param;
2292  
2293  #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2294  	.size_##ib_struct =                                                    \
2295  		(sizeof(struct drv_struct) +                                   \
2296  		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2297  		 BUILD_BUG_ON_ZERO(                                            \
2298  			 !__same_type(((struct drv_struct *)NULL)->member,     \
2299  				      struct ib_struct)))
2300  
2301  #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                          \
2302  	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2303  					   gfp, false))
2304  
2305  #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type)                              \
2306  	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2307  					   GFP_KERNEL, true))
2308  
2309  #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2310  	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2311  
2312  #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2313  
2314  struct rdma_user_mmap_entry {
2315  	struct kref ref;
2316  	struct ib_ucontext *ucontext;
2317  	unsigned long start_pgoff;
2318  	size_t npages;
2319  	bool driver_removed;
2320  };
2321  
2322  /* Return the offset (in bytes) the user should pass to libc's mmap() */
2323  static inline u64
rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry * entry)2324  rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2325  {
2326  	return (u64)entry->start_pgoff << PAGE_SHIFT;
2327  }
2328  
2329  /**
2330   * struct ib_device_ops - InfiniBand device operations
2331   * This structure defines all the InfiniBand device operations, providers will
2332   * need to define the supported operations, otherwise they will be set to null.
2333   */
2334  struct ib_device_ops {
2335  	struct module *owner;
2336  	enum rdma_driver_id driver_id;
2337  	u32 uverbs_abi_ver;
2338  	unsigned int uverbs_no_driver_id_binding:1;
2339  
2340  	/*
2341  	 * NOTE: New drivers should not make use of device_group; instead new
2342  	 * device parameter should be exposed via netlink command. This
2343  	 * mechanism exists only for existing drivers.
2344  	 */
2345  	const struct attribute_group *device_group;
2346  	const struct attribute_group **port_groups;
2347  
2348  	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2349  			 const struct ib_send_wr **bad_send_wr);
2350  	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2351  			 const struct ib_recv_wr **bad_recv_wr);
2352  	void (*drain_rq)(struct ib_qp *qp);
2353  	void (*drain_sq)(struct ib_qp *qp);
2354  	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2355  	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2356  	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2357  	int (*post_srq_recv)(struct ib_srq *srq,
2358  			     const struct ib_recv_wr *recv_wr,
2359  			     const struct ib_recv_wr **bad_recv_wr);
2360  	int (*process_mad)(struct ib_device *device, int process_mad_flags,
2361  			   u32 port_num, const struct ib_wc *in_wc,
2362  			   const struct ib_grh *in_grh,
2363  			   const struct ib_mad *in_mad, struct ib_mad *out_mad,
2364  			   size_t *out_mad_size, u16 *out_mad_pkey_index);
2365  	int (*query_device)(struct ib_device *device,
2366  			    struct ib_device_attr *device_attr,
2367  			    struct ib_udata *udata);
2368  	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2369  			     struct ib_device_modify *device_modify);
2370  	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2371  	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2372  						     int comp_vector);
2373  	int (*query_port)(struct ib_device *device, u32 port_num,
2374  			  struct ib_port_attr *port_attr);
2375  	int (*modify_port)(struct ib_device *device, u32 port_num,
2376  			   int port_modify_mask,
2377  			   struct ib_port_modify *port_modify);
2378  	/**
2379  	 * The following mandatory functions are used only at device
2380  	 * registration.  Keep functions such as these at the end of this
2381  	 * structure to avoid cache line misses when accessing struct ib_device
2382  	 * in fast paths.
2383  	 */
2384  	int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2385  				  struct ib_port_immutable *immutable);
2386  	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2387  					       u32 port_num);
2388  	/**
2389  	 * When calling get_netdev, the HW vendor's driver should return the
2390  	 * net device of device @device at port @port_num or NULL if such
2391  	 * a net device doesn't exist. The vendor driver should call dev_hold
2392  	 * on this net device. The HW vendor's device driver must guarantee
2393  	 * that this function returns NULL before the net device has finished
2394  	 * NETDEV_UNREGISTER state.
2395  	 */
2396  	struct net_device *(*get_netdev)(struct ib_device *device,
2397  					 u32 port_num);
2398  	/**
2399  	 * rdma netdev operation
2400  	 *
2401  	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2402  	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2403  	 */
2404  	struct net_device *(*alloc_rdma_netdev)(
2405  		struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2406  		const char *name, unsigned char name_assign_type,
2407  		void (*setup)(struct net_device *));
2408  
2409  	int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2410  				      enum rdma_netdev_t type,
2411  				      struct rdma_netdev_alloc_params *params);
2412  	/**
2413  	 * query_gid should be return GID value for @device, when @port_num
2414  	 * link layer is either IB or iWarp. It is no-op if @port_num port
2415  	 * is RoCE link layer.
2416  	 */
2417  	int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2418  			 union ib_gid *gid);
2419  	/**
2420  	 * When calling add_gid, the HW vendor's driver should add the gid
2421  	 * of device of port at gid index available at @attr. Meta-info of
2422  	 * that gid (for example, the network device related to this gid) is
2423  	 * available at @attr. @context allows the HW vendor driver to store
2424  	 * extra information together with a GID entry. The HW vendor driver may
2425  	 * allocate memory to contain this information and store it in @context
2426  	 * when a new GID entry is written to. Params are consistent until the
2427  	 * next call of add_gid or delete_gid. The function should return 0 on
2428  	 * success or error otherwise. The function could be called
2429  	 * concurrently for different ports. This function is only called when
2430  	 * roce_gid_table is used.
2431  	 */
2432  	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2433  	/**
2434  	 * When calling del_gid, the HW vendor's driver should delete the
2435  	 * gid of device @device at gid index gid_index of port port_num
2436  	 * available in @attr.
2437  	 * Upon the deletion of a GID entry, the HW vendor must free any
2438  	 * allocated memory. The caller will clear @context afterwards.
2439  	 * This function is only called when roce_gid_table is used.
2440  	 */
2441  	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2442  	int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2443  			  u16 *pkey);
2444  	int (*alloc_ucontext)(struct ib_ucontext *context,
2445  			      struct ib_udata *udata);
2446  	void (*dealloc_ucontext)(struct ib_ucontext *context);
2447  	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2448  	/**
2449  	 * This will be called once refcount of an entry in mmap_xa reaches
2450  	 * zero. The type of the memory that was mapped may differ between
2451  	 * entries and is opaque to the rdma_user_mmap interface.
2452  	 * Therefore needs to be implemented by the driver in mmap_free.
2453  	 */
2454  	void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2455  	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2456  	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2457  	int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2458  	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2459  			 struct ib_udata *udata);
2460  	int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2461  			      struct ib_udata *udata);
2462  	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2463  	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2464  	int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2465  	int (*create_srq)(struct ib_srq *srq,
2466  			  struct ib_srq_init_attr *srq_init_attr,
2467  			  struct ib_udata *udata);
2468  	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2469  			  enum ib_srq_attr_mask srq_attr_mask,
2470  			  struct ib_udata *udata);
2471  	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2472  	int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2473  	int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2474  			 struct ib_udata *udata);
2475  	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2476  			 int qp_attr_mask, struct ib_udata *udata);
2477  	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2478  			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2479  	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2480  	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2481  			 struct ib_udata *udata);
2482  	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2483  	int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2484  	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2485  	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2486  	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2487  				     u64 virt_addr, int mr_access_flags,
2488  				     struct ib_udata *udata);
2489  	struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2490  					    u64 length, u64 virt_addr, int fd,
2491  					    int mr_access_flags,
2492  					    struct ib_udata *udata);
2493  	struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2494  				       u64 length, u64 virt_addr,
2495  				       int mr_access_flags, struct ib_pd *pd,
2496  				       struct ib_udata *udata);
2497  	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2498  	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2499  				  u32 max_num_sg);
2500  	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2501  					    u32 max_num_data_sg,
2502  					    u32 max_num_meta_sg);
2503  	int (*advise_mr)(struct ib_pd *pd,
2504  			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2505  			 struct ib_sge *sg_list, u32 num_sge,
2506  			 struct uverbs_attr_bundle *attrs);
2507  
2508  	/*
2509  	 * Kernel users should universally support relaxed ordering (RO), as
2510  	 * they are designed to read data only after observing the CQE and use
2511  	 * the DMA API correctly.
2512  	 *
2513  	 * Some drivers implicitly enable RO if platform supports it.
2514  	 */
2515  	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2516  			 unsigned int *sg_offset);
2517  	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2518  			       struct ib_mr_status *mr_status);
2519  	int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2520  	int (*dealloc_mw)(struct ib_mw *mw);
2521  	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2522  	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2523  	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2524  	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2525  	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2526  				       struct ib_flow_attr *flow_attr,
2527  				       struct ib_udata *udata);
2528  	int (*destroy_flow)(struct ib_flow *flow_id);
2529  	int (*destroy_flow_action)(struct ib_flow_action *action);
2530  	int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2531  				 int state);
2532  	int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2533  			     struct ifla_vf_info *ivf);
2534  	int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2535  			    struct ifla_vf_stats *stats);
2536  	int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2537  			    struct ifla_vf_guid *node_guid,
2538  			    struct ifla_vf_guid *port_guid);
2539  	int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2540  			   int type);
2541  	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2542  				   struct ib_wq_init_attr *init_attr,
2543  				   struct ib_udata *udata);
2544  	int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2545  	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2546  			 u32 wq_attr_mask, struct ib_udata *udata);
2547  	int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2548  				    struct ib_rwq_ind_table_init_attr *init_attr,
2549  				    struct ib_udata *udata);
2550  	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2551  	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2552  				  struct ib_ucontext *context,
2553  				  struct ib_dm_alloc_attr *attr,
2554  				  struct uverbs_attr_bundle *attrs);
2555  	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2556  	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2557  				   struct ib_dm_mr_attr *attr,
2558  				   struct uverbs_attr_bundle *attrs);
2559  	int (*create_counters)(struct ib_counters *counters,
2560  			       struct uverbs_attr_bundle *attrs);
2561  	int (*destroy_counters)(struct ib_counters *counters);
2562  	int (*read_counters)(struct ib_counters *counters,
2563  			     struct ib_counters_read_attr *counters_read_attr,
2564  			     struct uverbs_attr_bundle *attrs);
2565  	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2566  			    int data_sg_nents, unsigned int *data_sg_offset,
2567  			    struct scatterlist *meta_sg, int meta_sg_nents,
2568  			    unsigned int *meta_sg_offset);
2569  
2570  	/**
2571  	 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2572  	 *   fill in the driver initialized data.  The struct is kfree()'ed by
2573  	 *   the sysfs core when the device is removed.  A lifespan of -1 in the
2574  	 *   return struct tells the core to set a default lifespan.
2575  	 */
2576  	struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2577  	struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2578  						     u32 port_num);
2579  	/**
2580  	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2581  	 * @index - The index in the value array we wish to have updated, or
2582  	 *   num_counters if we want all stats updated
2583  	 * Return codes -
2584  	 *   < 0 - Error, no counters updated
2585  	 *   index - Updated the single counter pointed to by index
2586  	 *   num_counters - Updated all counters (will reset the timestamp
2587  	 *     and prevent further calls for lifespan milliseconds)
2588  	 * Drivers are allowed to update all counters in leiu of just the
2589  	 *   one given in index at their option
2590  	 */
2591  	int (*get_hw_stats)(struct ib_device *device,
2592  			    struct rdma_hw_stats *stats, u32 port, int index);
2593  
2594  	/**
2595  	 * modify_hw_stat - Modify the counter configuration
2596  	 * @enable: true/false when enable/disable a counter
2597  	 * Return codes - 0 on success or error code otherwise.
2598  	 */
2599  	int (*modify_hw_stat)(struct ib_device *device, u32 port,
2600  			      unsigned int counter_index, bool enable);
2601  	/**
2602  	 * Allows rdma drivers to add their own restrack attributes.
2603  	 */
2604  	int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2605  	int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2606  	int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2607  	int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2608  	int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2609  	int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2610  	int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2611  
2612  	/* Device lifecycle callbacks */
2613  	/*
2614  	 * Called after the device becomes registered, before clients are
2615  	 * attached
2616  	 */
2617  	int (*enable_driver)(struct ib_device *dev);
2618  	/*
2619  	 * This is called as part of ib_dealloc_device().
2620  	 */
2621  	void (*dealloc_driver)(struct ib_device *dev);
2622  
2623  	/* iWarp CM callbacks */
2624  	void (*iw_add_ref)(struct ib_qp *qp);
2625  	void (*iw_rem_ref)(struct ib_qp *qp);
2626  	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2627  	int (*iw_connect)(struct iw_cm_id *cm_id,
2628  			  struct iw_cm_conn_param *conn_param);
2629  	int (*iw_accept)(struct iw_cm_id *cm_id,
2630  			 struct iw_cm_conn_param *conn_param);
2631  	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2632  			 u8 pdata_len);
2633  	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2634  	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2635  	/**
2636  	 * counter_bind_qp - Bind a QP to a counter.
2637  	 * @counter - The counter to be bound. If counter->id is zero then
2638  	 *   the driver needs to allocate a new counter and set counter->id
2639  	 */
2640  	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2641  	/**
2642  	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2643  	 *   counter and bind it onto the default one
2644  	 */
2645  	int (*counter_unbind_qp)(struct ib_qp *qp);
2646  	/**
2647  	 * counter_dealloc -De-allocate the hw counter
2648  	 */
2649  	int (*counter_dealloc)(struct rdma_counter *counter);
2650  	/**
2651  	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2652  	 * the driver initialized data.
2653  	 */
2654  	struct rdma_hw_stats *(*counter_alloc_stats)(
2655  		struct rdma_counter *counter);
2656  	/**
2657  	 * counter_update_stats - Query the stats value of this counter
2658  	 */
2659  	int (*counter_update_stats)(struct rdma_counter *counter);
2660  
2661  	/**
2662  	 * Allows rdma drivers to add their own restrack attributes
2663  	 * dumped via 'rdma stat' iproute2 command.
2664  	 */
2665  	int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2666  
2667  	/* query driver for its ucontext properties */
2668  	int (*query_ucontext)(struct ib_ucontext *context,
2669  			      struct uverbs_attr_bundle *attrs);
2670  
2671  	/*
2672  	 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2673  	 * Everyone else relies on Linux memory management model.
2674  	 */
2675  	int (*get_numa_node)(struct ib_device *dev);
2676  
2677  	DECLARE_RDMA_OBJ_SIZE(ib_ah);
2678  	DECLARE_RDMA_OBJ_SIZE(ib_counters);
2679  	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2680  	DECLARE_RDMA_OBJ_SIZE(ib_mw);
2681  	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2682  	DECLARE_RDMA_OBJ_SIZE(ib_qp);
2683  	DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2684  	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2685  	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2686  	DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2687  };
2688  
2689  struct ib_core_device {
2690  	/* device must be the first element in structure until,
2691  	 * union of ib_core_device and device exists in ib_device.
2692  	 */
2693  	struct device dev;
2694  	possible_net_t rdma_net;
2695  	struct kobject *ports_kobj;
2696  	struct list_head port_list;
2697  	struct ib_device *owner; /* reach back to owner ib_device */
2698  };
2699  
2700  struct rdma_restrack_root;
2701  struct ib_device {
2702  	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2703  	struct device                *dma_device;
2704  	struct ib_device_ops	     ops;
2705  	char                          name[IB_DEVICE_NAME_MAX];
2706  	struct rcu_head rcu_head;
2707  
2708  	struct list_head              event_handler_list;
2709  	/* Protects event_handler_list */
2710  	struct rw_semaphore event_handler_rwsem;
2711  
2712  	/* Protects QP's event_handler calls and open_qp list */
2713  	spinlock_t qp_open_list_lock;
2714  
2715  	struct rw_semaphore	      client_data_rwsem;
2716  	struct xarray                 client_data;
2717  	struct mutex                  unregistration_lock;
2718  
2719  	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2720  	rwlock_t cache_lock;
2721  	/**
2722  	 * port_data is indexed by port number
2723  	 */
2724  	struct ib_port_data *port_data;
2725  
2726  	int			      num_comp_vectors;
2727  
2728  	union {
2729  		struct device		dev;
2730  		struct ib_core_device	coredev;
2731  	};
2732  
2733  	/* First group is for device attributes,
2734  	 * Second group is for driver provided attributes (optional).
2735  	 * Third group is for the hw_stats
2736  	 * It is a NULL terminated array.
2737  	 */
2738  	const struct attribute_group	*groups[4];
2739  
2740  	u64			     uverbs_cmd_mask;
2741  
2742  	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2743  	__be64			     node_guid;
2744  	u32			     local_dma_lkey;
2745  	u16                          is_switch:1;
2746  	/* Indicates kernel verbs support, should not be used in drivers */
2747  	u16                          kverbs_provider:1;
2748  	/* CQ adaptive moderation (RDMA DIM) */
2749  	u16                          use_cq_dim:1;
2750  	u8                           node_type;
2751  	u32			     phys_port_cnt;
2752  	struct ib_device_attr        attrs;
2753  	struct hw_stats_device_data *hw_stats_data;
2754  
2755  #ifdef CONFIG_CGROUP_RDMA
2756  	struct rdmacg_device         cg_device;
2757  #endif
2758  
2759  	u32                          index;
2760  
2761  	spinlock_t                   cq_pools_lock;
2762  	struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2763  
2764  	struct rdma_restrack_root *res;
2765  
2766  	const struct uapi_definition   *driver_def;
2767  
2768  	/*
2769  	 * Positive refcount indicates that the device is currently
2770  	 * registered and cannot be unregistered.
2771  	 */
2772  	refcount_t refcount;
2773  	struct completion unreg_completion;
2774  	struct work_struct unregistration_work;
2775  
2776  	const struct rdma_link_ops *link_ops;
2777  
2778  	/* Protects compat_devs xarray modifications */
2779  	struct mutex compat_devs_mutex;
2780  	/* Maintains compat devices for each net namespace */
2781  	struct xarray compat_devs;
2782  
2783  	/* Used by iWarp CM */
2784  	char iw_ifname[IFNAMSIZ];
2785  	u32 iw_driver_flags;
2786  	u32 lag_flags;
2787  };
2788  
rdma_zalloc_obj(struct ib_device * dev,size_t size,gfp_t gfp,bool is_numa_aware)2789  static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2790  				    gfp_t gfp, bool is_numa_aware)
2791  {
2792  	if (is_numa_aware && dev->ops.get_numa_node)
2793  		return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2794  
2795  	return kzalloc(size, gfp);
2796  }
2797  
2798  struct ib_client_nl_info;
2799  struct ib_client {
2800  	const char *name;
2801  	int (*add)(struct ib_device *ibdev);
2802  	void (*remove)(struct ib_device *, void *client_data);
2803  	void (*rename)(struct ib_device *dev, void *client_data);
2804  	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2805  			   struct ib_client_nl_info *res);
2806  	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2807  
2808  	/* Returns the net_dev belonging to this ib_client and matching the
2809  	 * given parameters.
2810  	 * @dev:	 An RDMA device that the net_dev use for communication.
2811  	 * @port:	 A physical port number on the RDMA device.
2812  	 * @pkey:	 P_Key that the net_dev uses if applicable.
2813  	 * @gid:	 A GID that the net_dev uses to communicate.
2814  	 * @addr:	 An IP address the net_dev is configured with.
2815  	 * @client_data: The device's client data set by ib_set_client_data().
2816  	 *
2817  	 * An ib_client that implements a net_dev on top of RDMA devices
2818  	 * (such as IP over IB) should implement this callback, allowing the
2819  	 * rdma_cm module to find the right net_dev for a given request.
2820  	 *
2821  	 * The caller is responsible for calling dev_put on the returned
2822  	 * netdev. */
2823  	struct net_device *(*get_net_dev_by_params)(
2824  			struct ib_device *dev,
2825  			u32 port,
2826  			u16 pkey,
2827  			const union ib_gid *gid,
2828  			const struct sockaddr *addr,
2829  			void *client_data);
2830  
2831  	refcount_t uses;
2832  	struct completion uses_zero;
2833  	u32 client_id;
2834  
2835  	/* kverbs are not required by the client */
2836  	u8 no_kverbs_req:1;
2837  };
2838  
2839  /*
2840   * IB block DMA iterator
2841   *
2842   * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2843   * to a HW supported page size.
2844   */
2845  struct ib_block_iter {
2846  	/* internal states */
2847  	struct scatterlist *__sg;	/* sg holding the current aligned block */
2848  	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2849  	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
2850  	unsigned int __sg_nents;	/* number of SG entries */
2851  	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2852  	unsigned int __pg_bit;		/* alignment of current block */
2853  };
2854  
2855  struct ib_device *_ib_alloc_device(size_t size);
2856  #define ib_alloc_device(drv_struct, member)                                    \
2857  	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2858  				      BUILD_BUG_ON_ZERO(offsetof(              \
2859  					      struct drv_struct, member))),    \
2860  		     struct drv_struct, member)
2861  
2862  void ib_dealloc_device(struct ib_device *device);
2863  
2864  void ib_get_device_fw_str(struct ib_device *device, char *str);
2865  
2866  int ib_register_device(struct ib_device *device, const char *name,
2867  		       struct device *dma_device);
2868  void ib_unregister_device(struct ib_device *device);
2869  void ib_unregister_driver(enum rdma_driver_id driver_id);
2870  void ib_unregister_device_and_put(struct ib_device *device);
2871  void ib_unregister_device_queued(struct ib_device *ib_dev);
2872  
2873  int ib_register_client   (struct ib_client *client);
2874  void ib_unregister_client(struct ib_client *client);
2875  
2876  void __rdma_block_iter_start(struct ib_block_iter *biter,
2877  			     struct scatterlist *sglist,
2878  			     unsigned int nents,
2879  			     unsigned long pgsz);
2880  bool __rdma_block_iter_next(struct ib_block_iter *biter);
2881  
2882  /**
2883   * rdma_block_iter_dma_address - get the aligned dma address of the current
2884   * block held by the block iterator.
2885   * @biter: block iterator holding the memory block
2886   */
2887  static inline dma_addr_t
rdma_block_iter_dma_address(struct ib_block_iter * biter)2888  rdma_block_iter_dma_address(struct ib_block_iter *biter)
2889  {
2890  	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2891  }
2892  
2893  /**
2894   * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2895   * @sglist: sglist to iterate over
2896   * @biter: block iterator holding the memory block
2897   * @nents: maximum number of sg entries to iterate over
2898   * @pgsz: best HW supported page size to use
2899   *
2900   * Callers may use rdma_block_iter_dma_address() to get each
2901   * blocks aligned DMA address.
2902   */
2903  #define rdma_for_each_block(sglist, biter, nents, pgsz)		\
2904  	for (__rdma_block_iter_start(biter, sglist, nents,	\
2905  				     pgsz);			\
2906  	     __rdma_block_iter_next(biter);)
2907  
2908  /**
2909   * ib_get_client_data - Get IB client context
2910   * @device:Device to get context for
2911   * @client:Client to get context for
2912   *
2913   * ib_get_client_data() returns the client context data set with
2914   * ib_set_client_data(). This can only be called while the client is
2915   * registered to the device, once the ib_client remove() callback returns this
2916   * cannot be called.
2917   */
ib_get_client_data(struct ib_device * device,struct ib_client * client)2918  static inline void *ib_get_client_data(struct ib_device *device,
2919  				       struct ib_client *client)
2920  {
2921  	return xa_load(&device->client_data, client->client_id);
2922  }
2923  void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2924  			 void *data);
2925  void ib_set_device_ops(struct ib_device *device,
2926  		       const struct ib_device_ops *ops);
2927  
2928  int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2929  		      unsigned long pfn, unsigned long size, pgprot_t prot,
2930  		      struct rdma_user_mmap_entry *entry);
2931  int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2932  				struct rdma_user_mmap_entry *entry,
2933  				size_t length);
2934  int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2935  				      struct rdma_user_mmap_entry *entry,
2936  				      size_t length, u32 min_pgoff,
2937  				      u32 max_pgoff);
2938  
2939  static inline int
rdma_user_mmap_entry_insert_exact(struct ib_ucontext * ucontext,struct rdma_user_mmap_entry * entry,size_t length,u32 pgoff)2940  rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
2941  				  struct rdma_user_mmap_entry *entry,
2942  				  size_t length, u32 pgoff)
2943  {
2944  	return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
2945  						 pgoff);
2946  }
2947  
2948  struct rdma_user_mmap_entry *
2949  rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2950  			       unsigned long pgoff);
2951  struct rdma_user_mmap_entry *
2952  rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2953  			 struct vm_area_struct *vma);
2954  void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2955  
2956  void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2957  
ib_copy_from_udata(void * dest,struct ib_udata * udata,size_t len)2958  static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2959  {
2960  	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2961  }
2962  
ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)2963  static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2964  {
2965  	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2966  }
2967  
ib_is_buffer_cleared(const void __user * p,size_t len)2968  static inline bool ib_is_buffer_cleared(const void __user *p,
2969  					size_t len)
2970  {
2971  	bool ret;
2972  	u8 *buf;
2973  
2974  	if (len > USHRT_MAX)
2975  		return false;
2976  
2977  	buf = memdup_user(p, len);
2978  	if (IS_ERR(buf))
2979  		return false;
2980  
2981  	ret = !memchr_inv(buf, 0, len);
2982  	kfree(buf);
2983  	return ret;
2984  }
2985  
ib_is_udata_cleared(struct ib_udata * udata,size_t offset,size_t len)2986  static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2987  				       size_t offset,
2988  				       size_t len)
2989  {
2990  	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2991  }
2992  
2993  /**
2994   * ib_modify_qp_is_ok - Check that the supplied attribute mask
2995   * contains all required attributes and no attributes not allowed for
2996   * the given QP state transition.
2997   * @cur_state: Current QP state
2998   * @next_state: Next QP state
2999   * @type: QP type
3000   * @mask: Mask of supplied QP attributes
3001   *
3002   * This function is a helper function that a low-level driver's
3003   * modify_qp method can use to validate the consumer's input.  It
3004   * checks that cur_state and next_state are valid QP states, that a
3005   * transition from cur_state to next_state is allowed by the IB spec,
3006   * and that the attribute mask supplied is allowed for the transition.
3007   */
3008  bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
3009  			enum ib_qp_type type, enum ib_qp_attr_mask mask);
3010  
3011  void ib_register_event_handler(struct ib_event_handler *event_handler);
3012  void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3013  void ib_dispatch_event(const struct ib_event *event);
3014  
3015  int ib_query_port(struct ib_device *device,
3016  		  u32 port_num, struct ib_port_attr *port_attr);
3017  
3018  enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3019  					       u32 port_num);
3020  
3021  /**
3022   * rdma_cap_ib_switch - Check if the device is IB switch
3023   * @device: Device to check
3024   *
3025   * Device driver is responsible for setting is_switch bit on
3026   * in ib_device structure at init time.
3027   *
3028   * Return: true if the device is IB switch.
3029   */
rdma_cap_ib_switch(const struct ib_device * device)3030  static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3031  {
3032  	return device->is_switch;
3033  }
3034  
3035  /**
3036   * rdma_start_port - Return the first valid port number for the device
3037   * specified
3038   *
3039   * @device: Device to be checked
3040   *
3041   * Return start port number
3042   */
rdma_start_port(const struct ib_device * device)3043  static inline u32 rdma_start_port(const struct ib_device *device)
3044  {
3045  	return rdma_cap_ib_switch(device) ? 0 : 1;
3046  }
3047  
3048  /**
3049   * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3050   * @device - The struct ib_device * to iterate over
3051   * @iter - The unsigned int to store the port number
3052   */
3053  #define rdma_for_each_port(device, iter)                                       \
3054  	for (iter = rdma_start_port(device +				       \
3055  				    BUILD_BUG_ON_ZERO(!__same_type(u32,	       \
3056  								   iter)));    \
3057  	     iter <= rdma_end_port(device); iter++)
3058  
3059  /**
3060   * rdma_end_port - Return the last valid port number for the device
3061   * specified
3062   *
3063   * @device: Device to be checked
3064   *
3065   * Return last port number
3066   */
rdma_end_port(const struct ib_device * device)3067  static inline u32 rdma_end_port(const struct ib_device *device)
3068  {
3069  	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3070  }
3071  
rdma_is_port_valid(const struct ib_device * device,unsigned int port)3072  static inline int rdma_is_port_valid(const struct ib_device *device,
3073  				     unsigned int port)
3074  {
3075  	return (port >= rdma_start_port(device) &&
3076  		port <= rdma_end_port(device));
3077  }
3078  
rdma_is_grh_required(const struct ib_device * device,u32 port_num)3079  static inline bool rdma_is_grh_required(const struct ib_device *device,
3080  					u32 port_num)
3081  {
3082  	return device->port_data[port_num].immutable.core_cap_flags &
3083  	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
3084  }
3085  
rdma_protocol_ib(const struct ib_device * device,u32 port_num)3086  static inline bool rdma_protocol_ib(const struct ib_device *device,
3087  				    u32 port_num)
3088  {
3089  	return device->port_data[port_num].immutable.core_cap_flags &
3090  	       RDMA_CORE_CAP_PROT_IB;
3091  }
3092  
rdma_protocol_roce(const struct ib_device * device,u32 port_num)3093  static inline bool rdma_protocol_roce(const struct ib_device *device,
3094  				      u32 port_num)
3095  {
3096  	return device->port_data[port_num].immutable.core_cap_flags &
3097  	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3098  }
3099  
rdma_protocol_roce_udp_encap(const struct ib_device * device,u32 port_num)3100  static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3101  						u32 port_num)
3102  {
3103  	return device->port_data[port_num].immutable.core_cap_flags &
3104  	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3105  }
3106  
rdma_protocol_roce_eth_encap(const struct ib_device * device,u32 port_num)3107  static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3108  						u32 port_num)
3109  {
3110  	return device->port_data[port_num].immutable.core_cap_flags &
3111  	       RDMA_CORE_CAP_PROT_ROCE;
3112  }
3113  
rdma_protocol_iwarp(const struct ib_device * device,u32 port_num)3114  static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3115  				       u32 port_num)
3116  {
3117  	return device->port_data[port_num].immutable.core_cap_flags &
3118  	       RDMA_CORE_CAP_PROT_IWARP;
3119  }
3120  
rdma_ib_or_roce(const struct ib_device * device,u32 port_num)3121  static inline bool rdma_ib_or_roce(const struct ib_device *device,
3122  				   u32 port_num)
3123  {
3124  	return rdma_protocol_ib(device, port_num) ||
3125  		rdma_protocol_roce(device, port_num);
3126  }
3127  
rdma_protocol_raw_packet(const struct ib_device * device,u32 port_num)3128  static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3129  					    u32 port_num)
3130  {
3131  	return device->port_data[port_num].immutable.core_cap_flags &
3132  	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3133  }
3134  
rdma_protocol_usnic(const struct ib_device * device,u32 port_num)3135  static inline bool rdma_protocol_usnic(const struct ib_device *device,
3136  				       u32 port_num)
3137  {
3138  	return device->port_data[port_num].immutable.core_cap_flags &
3139  	       RDMA_CORE_CAP_PROT_USNIC;
3140  }
3141  
3142  /**
3143   * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3144   * Management Datagrams.
3145   * @device: Device to check
3146   * @port_num: Port number to check
3147   *
3148   * Management Datagrams (MAD) are a required part of the InfiniBand
3149   * specification and are supported on all InfiniBand devices.  A slightly
3150   * extended version are also supported on OPA interfaces.
3151   *
3152   * Return: true if the port supports sending/receiving of MAD packets.
3153   */
rdma_cap_ib_mad(const struct ib_device * device,u32 port_num)3154  static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3155  {
3156  	return device->port_data[port_num].immutable.core_cap_flags &
3157  	       RDMA_CORE_CAP_IB_MAD;
3158  }
3159  
3160  /**
3161   * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3162   * Management Datagrams.
3163   * @device: Device to check
3164   * @port_num: Port number to check
3165   *
3166   * Intel OmniPath devices extend and/or replace the InfiniBand Management
3167   * datagrams with their own versions.  These OPA MADs share many but not all of
3168   * the characteristics of InfiniBand MADs.
3169   *
3170   * OPA MADs differ in the following ways:
3171   *
3172   *    1) MADs are variable size up to 2K
3173   *       IBTA defined MADs remain fixed at 256 bytes
3174   *    2) OPA SMPs must carry valid PKeys
3175   *    3) OPA SMP packets are a different format
3176   *
3177   * Return: true if the port supports OPA MAD packet formats.
3178   */
rdma_cap_opa_mad(struct ib_device * device,u32 port_num)3179  static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3180  {
3181  	return device->port_data[port_num].immutable.core_cap_flags &
3182  		RDMA_CORE_CAP_OPA_MAD;
3183  }
3184  
3185  /**
3186   * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3187   * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3188   * @device: Device to check
3189   * @port_num: Port number to check
3190   *
3191   * Each InfiniBand node is required to provide a Subnet Management Agent
3192   * that the subnet manager can access.  Prior to the fabric being fully
3193   * configured by the subnet manager, the SMA is accessed via a well known
3194   * interface called the Subnet Management Interface (SMI).  This interface
3195   * uses directed route packets to communicate with the SM to get around the
3196   * chicken and egg problem of the SM needing to know what's on the fabric
3197   * in order to configure the fabric, and needing to configure the fabric in
3198   * order to send packets to the devices on the fabric.  These directed
3199   * route packets do not need the fabric fully configured in order to reach
3200   * their destination.  The SMI is the only method allowed to send
3201   * directed route packets on an InfiniBand fabric.
3202   *
3203   * Return: true if the port provides an SMI.
3204   */
rdma_cap_ib_smi(const struct ib_device * device,u32 port_num)3205  static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3206  {
3207  	return device->port_data[port_num].immutable.core_cap_flags &
3208  	       RDMA_CORE_CAP_IB_SMI;
3209  }
3210  
3211  /**
3212   * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3213   * Communication Manager.
3214   * @device: Device to check
3215   * @port_num: Port number to check
3216   *
3217   * The InfiniBand Communication Manager is one of many pre-defined General
3218   * Service Agents (GSA) that are accessed via the General Service
3219   * Interface (GSI).  It's role is to facilitate establishment of connections
3220   * between nodes as well as other management related tasks for established
3221   * connections.
3222   *
3223   * Return: true if the port supports an IB CM (this does not guarantee that
3224   * a CM is actually running however).
3225   */
rdma_cap_ib_cm(const struct ib_device * device,u32 port_num)3226  static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3227  {
3228  	return device->port_data[port_num].immutable.core_cap_flags &
3229  	       RDMA_CORE_CAP_IB_CM;
3230  }
3231  
3232  /**
3233   * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3234   * Communication Manager.
3235   * @device: Device to check
3236   * @port_num: Port number to check
3237   *
3238   * Similar to above, but specific to iWARP connections which have a different
3239   * managment protocol than InfiniBand.
3240   *
3241   * Return: true if the port supports an iWARP CM (this does not guarantee that
3242   * a CM is actually running however).
3243   */
rdma_cap_iw_cm(const struct ib_device * device,u32 port_num)3244  static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3245  {
3246  	return device->port_data[port_num].immutable.core_cap_flags &
3247  	       RDMA_CORE_CAP_IW_CM;
3248  }
3249  
3250  /**
3251   * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3252   * Subnet Administration.
3253   * @device: Device to check
3254   * @port_num: Port number to check
3255   *
3256   * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3257   * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3258   * fabrics, devices should resolve routes to other hosts by contacting the
3259   * SA to query the proper route.
3260   *
3261   * Return: true if the port should act as a client to the fabric Subnet
3262   * Administration interface.  This does not imply that the SA service is
3263   * running locally.
3264   */
rdma_cap_ib_sa(const struct ib_device * device,u32 port_num)3265  static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3266  {
3267  	return device->port_data[port_num].immutable.core_cap_flags &
3268  	       RDMA_CORE_CAP_IB_SA;
3269  }
3270  
3271  /**
3272   * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3273   * Multicast.
3274   * @device: Device to check
3275   * @port_num: Port number to check
3276   *
3277   * InfiniBand multicast registration is more complex than normal IPv4 or
3278   * IPv6 multicast registration.  Each Host Channel Adapter must register
3279   * with the Subnet Manager when it wishes to join a multicast group.  It
3280   * should do so only once regardless of how many queue pairs it subscribes
3281   * to this group.  And it should leave the group only after all queue pairs
3282   * attached to the group have been detached.
3283   *
3284   * Return: true if the port must undertake the additional adminstrative
3285   * overhead of registering/unregistering with the SM and tracking of the
3286   * total number of queue pairs attached to the multicast group.
3287   */
rdma_cap_ib_mcast(const struct ib_device * device,u32 port_num)3288  static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3289  				     u32 port_num)
3290  {
3291  	return rdma_cap_ib_sa(device, port_num);
3292  }
3293  
3294  /**
3295   * rdma_cap_af_ib - Check if the port of device has the capability
3296   * Native Infiniband Address.
3297   * @device: Device to check
3298   * @port_num: Port number to check
3299   *
3300   * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3301   * GID.  RoCE uses a different mechanism, but still generates a GID via
3302   * a prescribed mechanism and port specific data.
3303   *
3304   * Return: true if the port uses a GID address to identify devices on the
3305   * network.
3306   */
rdma_cap_af_ib(const struct ib_device * device,u32 port_num)3307  static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3308  {
3309  	return device->port_data[port_num].immutable.core_cap_flags &
3310  	       RDMA_CORE_CAP_AF_IB;
3311  }
3312  
3313  /**
3314   * rdma_cap_eth_ah - Check if the port of device has the capability
3315   * Ethernet Address Handle.
3316   * @device: Device to check
3317   * @port_num: Port number to check
3318   *
3319   * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3320   * to fabricate GIDs over Ethernet/IP specific addresses native to the
3321   * port.  Normally, packet headers are generated by the sending host
3322   * adapter, but when sending connectionless datagrams, we must manually
3323   * inject the proper headers for the fabric we are communicating over.
3324   *
3325   * Return: true if we are running as a RoCE port and must force the
3326   * addition of a Global Route Header built from our Ethernet Address
3327   * Handle into our header list for connectionless packets.
3328   */
rdma_cap_eth_ah(const struct ib_device * device,u32 port_num)3329  static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3330  {
3331  	return device->port_data[port_num].immutable.core_cap_flags &
3332  	       RDMA_CORE_CAP_ETH_AH;
3333  }
3334  
3335  /**
3336   * rdma_cap_opa_ah - Check if the port of device supports
3337   * OPA Address handles
3338   * @device: Device to check
3339   * @port_num: Port number to check
3340   *
3341   * Return: true if we are running on an OPA device which supports
3342   * the extended OPA addressing.
3343   */
rdma_cap_opa_ah(struct ib_device * device,u32 port_num)3344  static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3345  {
3346  	return (device->port_data[port_num].immutable.core_cap_flags &
3347  		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3348  }
3349  
3350  /**
3351   * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3352   *
3353   * @device: Device
3354   * @port_num: Port number
3355   *
3356   * This MAD size includes the MAD headers and MAD payload.  No other headers
3357   * are included.
3358   *
3359   * Return the max MAD size required by the Port.  Will return 0 if the port
3360   * does not support MADs
3361   */
rdma_max_mad_size(const struct ib_device * device,u32 port_num)3362  static inline size_t rdma_max_mad_size(const struct ib_device *device,
3363  				       u32 port_num)
3364  {
3365  	return device->port_data[port_num].immutable.max_mad_size;
3366  }
3367  
3368  /**
3369   * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3370   * @device: Device to check
3371   * @port_num: Port number to check
3372   *
3373   * RoCE GID table mechanism manages the various GIDs for a device.
3374   *
3375   * NOTE: if allocating the port's GID table has failed, this call will still
3376   * return true, but any RoCE GID table API will fail.
3377   *
3378   * Return: true if the port uses RoCE GID table mechanism in order to manage
3379   * its GIDs.
3380   */
rdma_cap_roce_gid_table(const struct ib_device * device,u32 port_num)3381  static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3382  					   u32 port_num)
3383  {
3384  	return rdma_protocol_roce(device, port_num) &&
3385  		device->ops.add_gid && device->ops.del_gid;
3386  }
3387  
3388  /*
3389   * Check if the device supports READ W/ INVALIDATE.
3390   */
rdma_cap_read_inv(struct ib_device * dev,u32 port_num)3391  static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3392  {
3393  	/*
3394  	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3395  	 * has support for it yet.
3396  	 */
3397  	return rdma_protocol_iwarp(dev, port_num);
3398  }
3399  
3400  /**
3401   * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3402   * @device: Device
3403   * @port_num: 1 based Port number
3404   *
3405   * Return true if port is an Intel OPA port , false if not
3406   */
rdma_core_cap_opa_port(struct ib_device * device,u32 port_num)3407  static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3408  					  u32 port_num)
3409  {
3410  	return (device->port_data[port_num].immutable.core_cap_flags &
3411  		RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3412  }
3413  
3414  /**
3415   * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3416   * @device: Device
3417   * @port_num: Port number
3418   * @mtu: enum value of MTU
3419   *
3420   * Return the MTU size supported by the port as an integer value. Will return
3421   * -1 if enum value of mtu is not supported.
3422   */
rdma_mtu_enum_to_int(struct ib_device * device,u32 port,int mtu)3423  static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3424  				       int mtu)
3425  {
3426  	if (rdma_core_cap_opa_port(device, port))
3427  		return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3428  	else
3429  		return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3430  }
3431  
3432  /**
3433   * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3434   * @device: Device
3435   * @port_num: Port number
3436   * @attr: port attribute
3437   *
3438   * Return the MTU size supported by the port as an integer value.
3439   */
rdma_mtu_from_attr(struct ib_device * device,u32 port,struct ib_port_attr * attr)3440  static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3441  				     struct ib_port_attr *attr)
3442  {
3443  	if (rdma_core_cap_opa_port(device, port))
3444  		return attr->phys_mtu;
3445  	else
3446  		return ib_mtu_enum_to_int(attr->max_mtu);
3447  }
3448  
3449  int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3450  			 int state);
3451  int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3452  		     struct ifla_vf_info *info);
3453  int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3454  		    struct ifla_vf_stats *stats);
3455  int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3456  		    struct ifla_vf_guid *node_guid,
3457  		    struct ifla_vf_guid *port_guid);
3458  int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3459  		   int type);
3460  
3461  int ib_query_pkey(struct ib_device *device,
3462  		  u32 port_num, u16 index, u16 *pkey);
3463  
3464  int ib_modify_device(struct ib_device *device,
3465  		     int device_modify_mask,
3466  		     struct ib_device_modify *device_modify);
3467  
3468  int ib_modify_port(struct ib_device *device,
3469  		   u32 port_num, int port_modify_mask,
3470  		   struct ib_port_modify *port_modify);
3471  
3472  int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3473  		u32 *port_num, u16 *index);
3474  
3475  int ib_find_pkey(struct ib_device *device,
3476  		 u32 port_num, u16 pkey, u16 *index);
3477  
3478  enum ib_pd_flags {
3479  	/*
3480  	 * Create a memory registration for all memory in the system and place
3481  	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3482  	 * ULPs to avoid the overhead of dynamic MRs.
3483  	 *
3484  	 * This flag is generally considered unsafe and must only be used in
3485  	 * extremly trusted environments.  Every use of it will log a warning
3486  	 * in the kernel log.
3487  	 */
3488  	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3489  };
3490  
3491  struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3492  		const char *caller);
3493  
3494  /**
3495   * ib_alloc_pd - Allocates an unused protection domain.
3496   * @device: The device on which to allocate the protection domain.
3497   * @flags: protection domain flags
3498   *
3499   * A protection domain object provides an association between QPs, shared
3500   * receive queues, address handles, memory regions, and memory windows.
3501   *
3502   * Every PD has a local_dma_lkey which can be used as the lkey value for local
3503   * memory operations.
3504   */
3505  #define ib_alloc_pd(device, flags) \
3506  	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3507  
3508  int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3509  
3510  /**
3511   * ib_dealloc_pd - Deallocate kernel PD
3512   * @pd: The protection domain
3513   *
3514   * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3515   */
ib_dealloc_pd(struct ib_pd * pd)3516  static inline void ib_dealloc_pd(struct ib_pd *pd)
3517  {
3518  	int ret = ib_dealloc_pd_user(pd, NULL);
3519  
3520  	WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3521  }
3522  
3523  enum rdma_create_ah_flags {
3524  	/* In a sleepable context */
3525  	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3526  };
3527  
3528  /**
3529   * rdma_create_ah - Creates an address handle for the given address vector.
3530   * @pd: The protection domain associated with the address handle.
3531   * @ah_attr: The attributes of the address vector.
3532   * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3533   *
3534   * The address handle is used to reference a local or global destination
3535   * in all UD QP post sends.
3536   */
3537  struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3538  			     u32 flags);
3539  
3540  /**
3541   * rdma_create_user_ah - Creates an address handle for the given address vector.
3542   * It resolves destination mac address for ah attribute of RoCE type.
3543   * @pd: The protection domain associated with the address handle.
3544   * @ah_attr: The attributes of the address vector.
3545   * @udata: pointer to user's input output buffer information need by
3546   *         provider driver.
3547   *
3548   * It returns 0 on success and returns appropriate error code on error.
3549   * The address handle is used to reference a local or global destination
3550   * in all UD QP post sends.
3551   */
3552  struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3553  				  struct rdma_ah_attr *ah_attr,
3554  				  struct ib_udata *udata);
3555  /**
3556   * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3557   *   work completion.
3558   * @hdr: the L3 header to parse
3559   * @net_type: type of header to parse
3560   * @sgid: place to store source gid
3561   * @dgid: place to store destination gid
3562   */
3563  int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3564  			      enum rdma_network_type net_type,
3565  			      union ib_gid *sgid, union ib_gid *dgid);
3566  
3567  /**
3568   * ib_get_rdma_header_version - Get the header version
3569   * @hdr: the L3 header to parse
3570   */
3571  int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3572  
3573  /**
3574   * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3575   *   work completion.
3576   * @device: Device on which the received message arrived.
3577   * @port_num: Port on which the received message arrived.
3578   * @wc: Work completion associated with the received message.
3579   * @grh: References the received global route header.  This parameter is
3580   *   ignored unless the work completion indicates that the GRH is valid.
3581   * @ah_attr: Returned attributes that can be used when creating an address
3582   *   handle for replying to the message.
3583   * When ib_init_ah_attr_from_wc() returns success,
3584   * (a) for IB link layer it optionally contains a reference to SGID attribute
3585   * when GRH is present for IB link layer.
3586   * (b) for RoCE link layer it contains a reference to SGID attribute.
3587   * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3588   * attributes which are initialized using ib_init_ah_attr_from_wc().
3589   *
3590   */
3591  int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3592  			    const struct ib_wc *wc, const struct ib_grh *grh,
3593  			    struct rdma_ah_attr *ah_attr);
3594  
3595  /**
3596   * ib_create_ah_from_wc - Creates an address handle associated with the
3597   *   sender of the specified work completion.
3598   * @pd: The protection domain associated with the address handle.
3599   * @wc: Work completion information associated with a received message.
3600   * @grh: References the received global route header.  This parameter is
3601   *   ignored unless the work completion indicates that the GRH is valid.
3602   * @port_num: The outbound port number to associate with the address.
3603   *
3604   * The address handle is used to reference a local or global destination
3605   * in all UD QP post sends.
3606   */
3607  struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3608  				   const struct ib_grh *grh, u32 port_num);
3609  
3610  /**
3611   * rdma_modify_ah - Modifies the address vector associated with an address
3612   *   handle.
3613   * @ah: The address handle to modify.
3614   * @ah_attr: The new address vector attributes to associate with the
3615   *   address handle.
3616   */
3617  int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3618  
3619  /**
3620   * rdma_query_ah - Queries the address vector associated with an address
3621   *   handle.
3622   * @ah: The address handle to query.
3623   * @ah_attr: The address vector attributes associated with the address
3624   *   handle.
3625   */
3626  int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3627  
3628  enum rdma_destroy_ah_flags {
3629  	/* In a sleepable context */
3630  	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3631  };
3632  
3633  /**
3634   * rdma_destroy_ah_user - Destroys an address handle.
3635   * @ah: The address handle to destroy.
3636   * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3637   * @udata: Valid user data or NULL for kernel objects
3638   */
3639  int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3640  
3641  /**
3642   * rdma_destroy_ah - Destroys an kernel address handle.
3643   * @ah: The address handle to destroy.
3644   * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3645   *
3646   * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3647   */
rdma_destroy_ah(struct ib_ah * ah,u32 flags)3648  static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3649  {
3650  	int ret = rdma_destroy_ah_user(ah, flags, NULL);
3651  
3652  	WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3653  }
3654  
3655  struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3656  				  struct ib_srq_init_attr *srq_init_attr,
3657  				  struct ib_usrq_object *uobject,
3658  				  struct ib_udata *udata);
3659  static inline struct ib_srq *
ib_create_srq(struct ib_pd * pd,struct ib_srq_init_attr * srq_init_attr)3660  ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3661  {
3662  	if (!pd->device->ops.create_srq)
3663  		return ERR_PTR(-EOPNOTSUPP);
3664  
3665  	return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3666  }
3667  
3668  /**
3669   * ib_modify_srq - Modifies the attributes for the specified SRQ.
3670   * @srq: The SRQ to modify.
3671   * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3672   *   the current values of selected SRQ attributes are returned.
3673   * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3674   *   are being modified.
3675   *
3676   * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3677   * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3678   * the number of receives queued drops below the limit.
3679   */
3680  int ib_modify_srq(struct ib_srq *srq,
3681  		  struct ib_srq_attr *srq_attr,
3682  		  enum ib_srq_attr_mask srq_attr_mask);
3683  
3684  /**
3685   * ib_query_srq - Returns the attribute list and current values for the
3686   *   specified SRQ.
3687   * @srq: The SRQ to query.
3688   * @srq_attr: The attributes of the specified SRQ.
3689   */
3690  int ib_query_srq(struct ib_srq *srq,
3691  		 struct ib_srq_attr *srq_attr);
3692  
3693  /**
3694   * ib_destroy_srq_user - Destroys the specified SRQ.
3695   * @srq: The SRQ to destroy.
3696   * @udata: Valid user data or NULL for kernel objects
3697   */
3698  int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3699  
3700  /**
3701   * ib_destroy_srq - Destroys the specified kernel SRQ.
3702   * @srq: The SRQ to destroy.
3703   *
3704   * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3705   */
ib_destroy_srq(struct ib_srq * srq)3706  static inline void ib_destroy_srq(struct ib_srq *srq)
3707  {
3708  	int ret = ib_destroy_srq_user(srq, NULL);
3709  
3710  	WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3711  }
3712  
3713  /**
3714   * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3715   * @srq: The SRQ to post the work request on.
3716   * @recv_wr: A list of work requests to post on the receive queue.
3717   * @bad_recv_wr: On an immediate failure, this parameter will reference
3718   *   the work request that failed to be posted on the QP.
3719   */
ib_post_srq_recv(struct ib_srq * srq,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3720  static inline int ib_post_srq_recv(struct ib_srq *srq,
3721  				   const struct ib_recv_wr *recv_wr,
3722  				   const struct ib_recv_wr **bad_recv_wr)
3723  {
3724  	const struct ib_recv_wr *dummy;
3725  
3726  	return srq->device->ops.post_srq_recv(srq, recv_wr,
3727  					      bad_recv_wr ? : &dummy);
3728  }
3729  
3730  struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3731  				  struct ib_qp_init_attr *qp_init_attr,
3732  				  const char *caller);
3733  /**
3734   * ib_create_qp - Creates a kernel QP associated with the specific protection
3735   * domain.
3736   * @pd: The protection domain associated with the QP.
3737   * @init_attr: A list of initial attributes required to create the
3738   *   QP.  If QP creation succeeds, then the attributes are updated to
3739   *   the actual capabilities of the created QP.
3740   */
ib_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr)3741  static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3742  					 struct ib_qp_init_attr *init_attr)
3743  {
3744  	return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3745  }
3746  
3747  /**
3748   * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3749   * @qp: The QP to modify.
3750   * @attr: On input, specifies the QP attributes to modify.  On output,
3751   *   the current values of selected QP attributes are returned.
3752   * @attr_mask: A bit-mask used to specify which attributes of the QP
3753   *   are being modified.
3754   * @udata: pointer to user's input output buffer information
3755   *   are being modified.
3756   * It returns 0 on success and returns appropriate error code on error.
3757   */
3758  int ib_modify_qp_with_udata(struct ib_qp *qp,
3759  			    struct ib_qp_attr *attr,
3760  			    int attr_mask,
3761  			    struct ib_udata *udata);
3762  
3763  /**
3764   * ib_modify_qp - Modifies the attributes for the specified QP and then
3765   *   transitions the QP to the given state.
3766   * @qp: The QP to modify.
3767   * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3768   *   the current values of selected QP attributes are returned.
3769   * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3770   *   are being modified.
3771   */
3772  int ib_modify_qp(struct ib_qp *qp,
3773  		 struct ib_qp_attr *qp_attr,
3774  		 int qp_attr_mask);
3775  
3776  /**
3777   * ib_query_qp - Returns the attribute list and current values for the
3778   *   specified QP.
3779   * @qp: The QP to query.
3780   * @qp_attr: The attributes of the specified QP.
3781   * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3782   * @qp_init_attr: Additional attributes of the selected QP.
3783   *
3784   * The qp_attr_mask may be used to limit the query to gathering only the
3785   * selected attributes.
3786   */
3787  int ib_query_qp(struct ib_qp *qp,
3788  		struct ib_qp_attr *qp_attr,
3789  		int qp_attr_mask,
3790  		struct ib_qp_init_attr *qp_init_attr);
3791  
3792  /**
3793   * ib_destroy_qp - Destroys the specified QP.
3794   * @qp: The QP to destroy.
3795   * @udata: Valid udata or NULL for kernel objects
3796   */
3797  int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3798  
3799  /**
3800   * ib_destroy_qp - Destroys the specified kernel QP.
3801   * @qp: The QP to destroy.
3802   *
3803   * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3804   */
ib_destroy_qp(struct ib_qp * qp)3805  static inline int ib_destroy_qp(struct ib_qp *qp)
3806  {
3807  	return ib_destroy_qp_user(qp, NULL);
3808  }
3809  
3810  /**
3811   * ib_open_qp - Obtain a reference to an existing sharable QP.
3812   * @xrcd - XRC domain
3813   * @qp_open_attr: Attributes identifying the QP to open.
3814   *
3815   * Returns a reference to a sharable QP.
3816   */
3817  struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3818  			 struct ib_qp_open_attr *qp_open_attr);
3819  
3820  /**
3821   * ib_close_qp - Release an external reference to a QP.
3822   * @qp: The QP handle to release
3823   *
3824   * The opened QP handle is released by the caller.  The underlying
3825   * shared QP is not destroyed until all internal references are released.
3826   */
3827  int ib_close_qp(struct ib_qp *qp);
3828  
3829  /**
3830   * ib_post_send - Posts a list of work requests to the send queue of
3831   *   the specified QP.
3832   * @qp: The QP to post the work request on.
3833   * @send_wr: A list of work requests to post on the send queue.
3834   * @bad_send_wr: On an immediate failure, this parameter will reference
3835   *   the work request that failed to be posted on the QP.
3836   *
3837   * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3838   * error is returned, the QP state shall not be affected,
3839   * ib_post_send() will return an immediate error after queueing any
3840   * earlier work requests in the list.
3841   */
ib_post_send(struct ib_qp * qp,const struct ib_send_wr * send_wr,const struct ib_send_wr ** bad_send_wr)3842  static inline int ib_post_send(struct ib_qp *qp,
3843  			       const struct ib_send_wr *send_wr,
3844  			       const struct ib_send_wr **bad_send_wr)
3845  {
3846  	const struct ib_send_wr *dummy;
3847  
3848  	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3849  }
3850  
3851  /**
3852   * ib_post_recv - Posts a list of work requests to the receive queue of
3853   *   the specified QP.
3854   * @qp: The QP to post the work request on.
3855   * @recv_wr: A list of work requests to post on the receive queue.
3856   * @bad_recv_wr: On an immediate failure, this parameter will reference
3857   *   the work request that failed to be posted on the QP.
3858   */
ib_post_recv(struct ib_qp * qp,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3859  static inline int ib_post_recv(struct ib_qp *qp,
3860  			       const struct ib_recv_wr *recv_wr,
3861  			       const struct ib_recv_wr **bad_recv_wr)
3862  {
3863  	const struct ib_recv_wr *dummy;
3864  
3865  	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3866  }
3867  
3868  struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3869  			    int comp_vector, enum ib_poll_context poll_ctx,
3870  			    const char *caller);
ib_alloc_cq(struct ib_device * dev,void * private,int nr_cqe,int comp_vector,enum ib_poll_context poll_ctx)3871  static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3872  					int nr_cqe, int comp_vector,
3873  					enum ib_poll_context poll_ctx)
3874  {
3875  	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3876  			     KBUILD_MODNAME);
3877  }
3878  
3879  struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3880  				int nr_cqe, enum ib_poll_context poll_ctx,
3881  				const char *caller);
3882  
3883  /**
3884   * ib_alloc_cq_any: Allocate kernel CQ
3885   * @dev: The IB device
3886   * @private: Private data attached to the CQE
3887   * @nr_cqe: Number of CQEs in the CQ
3888   * @poll_ctx: Context used for polling the CQ
3889   */
ib_alloc_cq_any(struct ib_device * dev,void * private,int nr_cqe,enum ib_poll_context poll_ctx)3890  static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3891  					    void *private, int nr_cqe,
3892  					    enum ib_poll_context poll_ctx)
3893  {
3894  	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3895  				 KBUILD_MODNAME);
3896  }
3897  
3898  void ib_free_cq(struct ib_cq *cq);
3899  int ib_process_cq_direct(struct ib_cq *cq, int budget);
3900  
3901  /**
3902   * ib_create_cq - Creates a CQ on the specified device.
3903   * @device: The device on which to create the CQ.
3904   * @comp_handler: A user-specified callback that is invoked when a
3905   *   completion event occurs on the CQ.
3906   * @event_handler: A user-specified callback that is invoked when an
3907   *   asynchronous event not associated with a completion occurs on the CQ.
3908   * @cq_context: Context associated with the CQ returned to the user via
3909   *   the associated completion and event handlers.
3910   * @cq_attr: The attributes the CQ should be created upon.
3911   *
3912   * Users can examine the cq structure to determine the actual CQ size.
3913   */
3914  struct ib_cq *__ib_create_cq(struct ib_device *device,
3915  			     ib_comp_handler comp_handler,
3916  			     void (*event_handler)(struct ib_event *, void *),
3917  			     void *cq_context,
3918  			     const struct ib_cq_init_attr *cq_attr,
3919  			     const char *caller);
3920  #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3921  	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3922  
3923  /**
3924   * ib_resize_cq - Modifies the capacity of the CQ.
3925   * @cq: The CQ to resize.
3926   * @cqe: The minimum size of the CQ.
3927   *
3928   * Users can examine the cq structure to determine the actual CQ size.
3929   */
3930  int ib_resize_cq(struct ib_cq *cq, int cqe);
3931  
3932  /**
3933   * rdma_set_cq_moderation - Modifies moderation params of the CQ
3934   * @cq: The CQ to modify.
3935   * @cq_count: number of CQEs that will trigger an event
3936   * @cq_period: max period of time in usec before triggering an event
3937   *
3938   */
3939  int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3940  
3941  /**
3942   * ib_destroy_cq_user - Destroys the specified CQ.
3943   * @cq: The CQ to destroy.
3944   * @udata: Valid user data or NULL for kernel objects
3945   */
3946  int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3947  
3948  /**
3949   * ib_destroy_cq - Destroys the specified kernel CQ.
3950   * @cq: The CQ to destroy.
3951   *
3952   * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3953   */
ib_destroy_cq(struct ib_cq * cq)3954  static inline void ib_destroy_cq(struct ib_cq *cq)
3955  {
3956  	int ret = ib_destroy_cq_user(cq, NULL);
3957  
3958  	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3959  }
3960  
3961  /**
3962   * ib_poll_cq - poll a CQ for completion(s)
3963   * @cq:the CQ being polled
3964   * @num_entries:maximum number of completions to return
3965   * @wc:array of at least @num_entries &struct ib_wc where completions
3966   *   will be returned
3967   *
3968   * Poll a CQ for (possibly multiple) completions.  If the return value
3969   * is < 0, an error occurred.  If the return value is >= 0, it is the
3970   * number of completions returned.  If the return value is
3971   * non-negative and < num_entries, then the CQ was emptied.
3972   */
ib_poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)3973  static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3974  			     struct ib_wc *wc)
3975  {
3976  	return cq->device->ops.poll_cq(cq, num_entries, wc);
3977  }
3978  
3979  /**
3980   * ib_req_notify_cq - Request completion notification on a CQ.
3981   * @cq: The CQ to generate an event for.
3982   * @flags:
3983   *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3984   *   to request an event on the next solicited event or next work
3985   *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3986   *   may also be |ed in to request a hint about missed events, as
3987   *   described below.
3988   *
3989   * Return Value:
3990   *    < 0 means an error occurred while requesting notification
3991   *   == 0 means notification was requested successfully, and if
3992   *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3993   *        were missed and it is safe to wait for another event.  In
3994   *        this case is it guaranteed that any work completions added
3995   *        to the CQ since the last CQ poll will trigger a completion
3996   *        notification event.
3997   *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3998   *        in.  It means that the consumer must poll the CQ again to
3999   *        make sure it is empty to avoid missing an event because of a
4000   *        race between requesting notification and an entry being
4001   *        added to the CQ.  This return value means it is possible
4002   *        (but not guaranteed) that a work completion has been added
4003   *        to the CQ since the last poll without triggering a
4004   *        completion notification event.
4005   */
ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)4006  static inline int ib_req_notify_cq(struct ib_cq *cq,
4007  				   enum ib_cq_notify_flags flags)
4008  {
4009  	return cq->device->ops.req_notify_cq(cq, flags);
4010  }
4011  
4012  struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4013  			     int comp_vector_hint,
4014  			     enum ib_poll_context poll_ctx);
4015  
4016  void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4017  
4018  /*
4019   * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4020   * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4021   * address into the dma address.
4022   */
ib_uses_virt_dma(struct ib_device * dev)4023  static inline bool ib_uses_virt_dma(struct ib_device *dev)
4024  {
4025  	return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4026  }
4027  
4028  /*
4029   * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4030   */
ib_dma_pci_p2p_dma_supported(struct ib_device * dev)4031  static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4032  {
4033  	if (ib_uses_virt_dma(dev))
4034  		return false;
4035  
4036  	return dma_pci_p2pdma_supported(dev->dma_device);
4037  }
4038  
4039  /**
4040   * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4041   * @dma_addr: The DMA address
4042   *
4043   * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4044   * going through the dma_addr marshalling.
4045   */
ib_virt_dma_to_ptr(u64 dma_addr)4046  static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
4047  {
4048  	/* virt_dma mode maps the kvs's directly into the dma addr */
4049  	return (void *)(uintptr_t)dma_addr;
4050  }
4051  
4052  /**
4053   * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4054   * @dma_addr: The DMA address
4055   *
4056   * Used by ib_uses_virt_dma() device to get back to the struct page after going
4057   * through the dma_addr marshalling.
4058   */
ib_virt_dma_to_page(u64 dma_addr)4059  static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
4060  {
4061  	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
4062  }
4063  
4064  /**
4065   * ib_dma_mapping_error - check a DMA addr for error
4066   * @dev: The device for which the dma_addr was created
4067   * @dma_addr: The DMA address to check
4068   */
ib_dma_mapping_error(struct ib_device * dev,u64 dma_addr)4069  static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4070  {
4071  	if (ib_uses_virt_dma(dev))
4072  		return 0;
4073  	return dma_mapping_error(dev->dma_device, dma_addr);
4074  }
4075  
4076  /**
4077   * ib_dma_map_single - Map a kernel virtual address to DMA address
4078   * @dev: The device for which the dma_addr is to be created
4079   * @cpu_addr: The kernel virtual address
4080   * @size: The size of the region in bytes
4081   * @direction: The direction of the DMA
4082   */
ib_dma_map_single(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)4083  static inline u64 ib_dma_map_single(struct ib_device *dev,
4084  				    void *cpu_addr, size_t size,
4085  				    enum dma_data_direction direction)
4086  {
4087  	if (ib_uses_virt_dma(dev))
4088  		return (uintptr_t)cpu_addr;
4089  	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4090  }
4091  
4092  /**
4093   * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4094   * @dev: The device for which the DMA address was created
4095   * @addr: The DMA address
4096   * @size: The size of the region in bytes
4097   * @direction: The direction of the DMA
4098   */
ib_dma_unmap_single(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)4099  static inline void ib_dma_unmap_single(struct ib_device *dev,
4100  				       u64 addr, size_t size,
4101  				       enum dma_data_direction direction)
4102  {
4103  	if (!ib_uses_virt_dma(dev))
4104  		dma_unmap_single(dev->dma_device, addr, size, direction);
4105  }
4106  
4107  /**
4108   * ib_dma_map_page - Map a physical page to DMA address
4109   * @dev: The device for which the dma_addr is to be created
4110   * @page: The page to be mapped
4111   * @offset: The offset within the page
4112   * @size: The size of the region in bytes
4113   * @direction: The direction of the DMA
4114   */
ib_dma_map_page(struct ib_device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)4115  static inline u64 ib_dma_map_page(struct ib_device *dev,
4116  				  struct page *page,
4117  				  unsigned long offset,
4118  				  size_t size,
4119  					 enum dma_data_direction direction)
4120  {
4121  	if (ib_uses_virt_dma(dev))
4122  		return (uintptr_t)(page_address(page) + offset);
4123  	return dma_map_page(dev->dma_device, page, offset, size, direction);
4124  }
4125  
4126  /**
4127   * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4128   * @dev: The device for which the DMA address was created
4129   * @addr: The DMA address
4130   * @size: The size of the region in bytes
4131   * @direction: The direction of the DMA
4132   */
ib_dma_unmap_page(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)4133  static inline void ib_dma_unmap_page(struct ib_device *dev,
4134  				     u64 addr, size_t size,
4135  				     enum dma_data_direction direction)
4136  {
4137  	if (!ib_uses_virt_dma(dev))
4138  		dma_unmap_page(dev->dma_device, addr, size, direction);
4139  }
4140  
4141  int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
ib_dma_map_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)4142  static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4143  				      struct scatterlist *sg, int nents,
4144  				      enum dma_data_direction direction,
4145  				      unsigned long dma_attrs)
4146  {
4147  	if (ib_uses_virt_dma(dev))
4148  		return ib_dma_virt_map_sg(dev, sg, nents);
4149  	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4150  				dma_attrs);
4151  }
4152  
ib_dma_unmap_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)4153  static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4154  					 struct scatterlist *sg, int nents,
4155  					 enum dma_data_direction direction,
4156  					 unsigned long dma_attrs)
4157  {
4158  	if (!ib_uses_virt_dma(dev))
4159  		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4160  				   dma_attrs);
4161  }
4162  
4163  /**
4164   * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4165   * @dev: The device for which the DMA addresses are to be created
4166   * @sg: The sg_table object describing the buffer
4167   * @direction: The direction of the DMA
4168   * @attrs: Optional DMA attributes for the map operation
4169   */
ib_dma_map_sgtable_attrs(struct ib_device * dev,struct sg_table * sgt,enum dma_data_direction direction,unsigned long dma_attrs)4170  static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4171  					   struct sg_table *sgt,
4172  					   enum dma_data_direction direction,
4173  					   unsigned long dma_attrs)
4174  {
4175  	int nents;
4176  
4177  	if (ib_uses_virt_dma(dev)) {
4178  		nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4179  		if (!nents)
4180  			return -EIO;
4181  		sgt->nents = nents;
4182  		return 0;
4183  	}
4184  	return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4185  }
4186  
ib_dma_unmap_sgtable_attrs(struct ib_device * dev,struct sg_table * sgt,enum dma_data_direction direction,unsigned long dma_attrs)4187  static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4188  					      struct sg_table *sgt,
4189  					      enum dma_data_direction direction,
4190  					      unsigned long dma_attrs)
4191  {
4192  	if (!ib_uses_virt_dma(dev))
4193  		dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4194  }
4195  
4196  /**
4197   * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4198   * @dev: The device for which the DMA addresses are to be created
4199   * @sg: The array of scatter/gather entries
4200   * @nents: The number of scatter/gather entries
4201   * @direction: The direction of the DMA
4202   */
ib_dma_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)4203  static inline int ib_dma_map_sg(struct ib_device *dev,
4204  				struct scatterlist *sg, int nents,
4205  				enum dma_data_direction direction)
4206  {
4207  	return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4208  }
4209  
4210  /**
4211   * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4212   * @dev: The device for which the DMA addresses were created
4213   * @sg: The array of scatter/gather entries
4214   * @nents: The number of scatter/gather entries
4215   * @direction: The direction of the DMA
4216   */
ib_dma_unmap_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)4217  static inline void ib_dma_unmap_sg(struct ib_device *dev,
4218  				   struct scatterlist *sg, int nents,
4219  				   enum dma_data_direction direction)
4220  {
4221  	ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4222  }
4223  
4224  /**
4225   * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4226   * @dev: The device to query
4227   *
4228   * The returned value represents a size in bytes.
4229   */
ib_dma_max_seg_size(struct ib_device * dev)4230  static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4231  {
4232  	if (ib_uses_virt_dma(dev))
4233  		return UINT_MAX;
4234  	return dma_get_max_seg_size(dev->dma_device);
4235  }
4236  
4237  /**
4238   * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4239   * @dev: The device for which the DMA address was created
4240   * @addr: The DMA address
4241   * @size: The size of the region in bytes
4242   * @dir: The direction of the DMA
4243   */
ib_dma_sync_single_for_cpu(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)4244  static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4245  					      u64 addr,
4246  					      size_t size,
4247  					      enum dma_data_direction dir)
4248  {
4249  	if (!ib_uses_virt_dma(dev))
4250  		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4251  }
4252  
4253  /**
4254   * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4255   * @dev: The device for which the DMA address was created
4256   * @addr: The DMA address
4257   * @size: The size of the region in bytes
4258   * @dir: The direction of the DMA
4259   */
ib_dma_sync_single_for_device(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)4260  static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4261  						 u64 addr,
4262  						 size_t size,
4263  						 enum dma_data_direction dir)
4264  {
4265  	if (!ib_uses_virt_dma(dev))
4266  		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4267  }
4268  
4269  /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4270   * space. This function should be called when 'current' is the owning MM.
4271   */
4272  struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4273  			     u64 virt_addr, int mr_access_flags);
4274  
4275  /* ib_advise_mr -  give an advice about an address range in a memory region */
4276  int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4277  		 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4278  /**
4279   * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4280   *   HCA translation table.
4281   * @mr: The memory region to deregister.
4282   * @udata: Valid user data or NULL for kernel object
4283   *
4284   * This function can fail, if the memory region has memory windows bound to it.
4285   */
4286  int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4287  
4288  /**
4289   * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4290   *   HCA translation table.
4291   * @mr: The memory region to deregister.
4292   *
4293   * This function can fail, if the memory region has memory windows bound to it.
4294   *
4295   * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4296   */
ib_dereg_mr(struct ib_mr * mr)4297  static inline int ib_dereg_mr(struct ib_mr *mr)
4298  {
4299  	return ib_dereg_mr_user(mr, NULL);
4300  }
4301  
4302  struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4303  			  u32 max_num_sg);
4304  
4305  struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4306  				    u32 max_num_data_sg,
4307  				    u32 max_num_meta_sg);
4308  
4309  /**
4310   * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4311   *   R_Key and L_Key.
4312   * @mr - struct ib_mr pointer to be updated.
4313   * @newkey - new key to be used.
4314   */
ib_update_fast_reg_key(struct ib_mr * mr,u8 newkey)4315  static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4316  {
4317  	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4318  	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4319  }
4320  
4321  /**
4322   * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4323   * for calculating a new rkey for type 2 memory windows.
4324   * @rkey - the rkey to increment.
4325   */
ib_inc_rkey(u32 rkey)4326  static inline u32 ib_inc_rkey(u32 rkey)
4327  {
4328  	const u32 mask = 0x000000ff;
4329  	return ((rkey + 1) & mask) | (rkey & ~mask);
4330  }
4331  
4332  /**
4333   * ib_attach_mcast - Attaches the specified QP to a multicast group.
4334   * @qp: QP to attach to the multicast group.  The QP must be type
4335   *   IB_QPT_UD.
4336   * @gid: Multicast group GID.
4337   * @lid: Multicast group LID in host byte order.
4338   *
4339   * In order to send and receive multicast packets, subnet
4340   * administration must have created the multicast group and configured
4341   * the fabric appropriately.  The port associated with the specified
4342   * QP must also be a member of the multicast group.
4343   */
4344  int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4345  
4346  /**
4347   * ib_detach_mcast - Detaches the specified QP from a multicast group.
4348   * @qp: QP to detach from the multicast group.
4349   * @gid: Multicast group GID.
4350   * @lid: Multicast group LID in host byte order.
4351   */
4352  int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4353  
4354  struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4355  				   struct inode *inode, struct ib_udata *udata);
4356  int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4357  
ib_check_mr_access(struct ib_device * ib_dev,unsigned int flags)4358  static inline int ib_check_mr_access(struct ib_device *ib_dev,
4359  				     unsigned int flags)
4360  {
4361  	u64 device_cap = ib_dev->attrs.device_cap_flags;
4362  
4363  	/*
4364  	 * Local write permission is required if remote write or
4365  	 * remote atomic permission is also requested.
4366  	 */
4367  	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4368  	    !(flags & IB_ACCESS_LOCAL_WRITE))
4369  		return -EINVAL;
4370  
4371  	if (flags & ~IB_ACCESS_SUPPORTED)
4372  		return -EINVAL;
4373  
4374  	if (flags & IB_ACCESS_ON_DEMAND &&
4375  	    !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4376  		return -EOPNOTSUPP;
4377  
4378  	if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
4379  	    !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
4380  	    (flags & IB_ACCESS_FLUSH_PERSISTENT &&
4381  	    !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
4382  		return -EOPNOTSUPP;
4383  
4384  	return 0;
4385  }
4386  
ib_access_writable(int access_flags)4387  static inline bool ib_access_writable(int access_flags)
4388  {
4389  	/*
4390  	 * We have writable memory backing the MR if any of the following
4391  	 * access flags are set.  "Local write" and "remote write" obviously
4392  	 * require write access.  "Remote atomic" can do things like fetch and
4393  	 * add, which will modify memory, and "MW bind" can change permissions
4394  	 * by binding a window.
4395  	 */
4396  	return access_flags &
4397  		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4398  		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4399  }
4400  
4401  /**
4402   * ib_check_mr_status: lightweight check of MR status.
4403   *     This routine may provide status checks on a selected
4404   *     ib_mr. first use is for signature status check.
4405   *
4406   * @mr: A memory region.
4407   * @check_mask: Bitmask of which checks to perform from
4408   *     ib_mr_status_check enumeration.
4409   * @mr_status: The container of relevant status checks.
4410   *     failed checks will be indicated in the status bitmask
4411   *     and the relevant info shall be in the error item.
4412   */
4413  int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4414  		       struct ib_mr_status *mr_status);
4415  
4416  /**
4417   * ib_device_try_get: Hold a registration lock
4418   * device: The device to lock
4419   *
4420   * A device under an active registration lock cannot become unregistered. It
4421   * is only possible to obtain a registration lock on a device that is fully
4422   * registered, otherwise this function returns false.
4423   *
4424   * The registration lock is only necessary for actions which require the
4425   * device to still be registered. Uses that only require the device pointer to
4426   * be valid should use get_device(&ibdev->dev) to hold the memory.
4427   *
4428   */
ib_device_try_get(struct ib_device * dev)4429  static inline bool ib_device_try_get(struct ib_device *dev)
4430  {
4431  	return refcount_inc_not_zero(&dev->refcount);
4432  }
4433  
4434  void ib_device_put(struct ib_device *device);
4435  struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4436  					  enum rdma_driver_id driver_id);
4437  struct ib_device *ib_device_get_by_name(const char *name,
4438  					enum rdma_driver_id driver_id);
4439  struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4440  					    u16 pkey, const union ib_gid *gid,
4441  					    const struct sockaddr *addr);
4442  int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4443  			 unsigned int port);
4444  struct ib_wq *ib_create_wq(struct ib_pd *pd,
4445  			   struct ib_wq_init_attr *init_attr);
4446  int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4447  
4448  int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4449  		 unsigned int *sg_offset, unsigned int page_size);
4450  int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4451  		    int data_sg_nents, unsigned int *data_sg_offset,
4452  		    struct scatterlist *meta_sg, int meta_sg_nents,
4453  		    unsigned int *meta_sg_offset, unsigned int page_size);
4454  
4455  static inline int
ib_map_mr_sg_zbva(struct ib_mr * mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset,unsigned int page_size)4456  ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4457  		  unsigned int *sg_offset, unsigned int page_size)
4458  {
4459  	int n;
4460  
4461  	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4462  	mr->iova = 0;
4463  
4464  	return n;
4465  }
4466  
4467  int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4468  		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4469  
4470  void ib_drain_rq(struct ib_qp *qp);
4471  void ib_drain_sq(struct ib_qp *qp);
4472  void ib_drain_qp(struct ib_qp *qp);
4473  
4474  int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4475  		     u8 *width);
4476  
rdma_ah_retrieve_dmac(struct rdma_ah_attr * attr)4477  static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4478  {
4479  	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4480  		return attr->roce.dmac;
4481  	return NULL;
4482  }
4483  
rdma_ah_set_dlid(struct rdma_ah_attr * attr,u32 dlid)4484  static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4485  {
4486  	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4487  		attr->ib.dlid = (u16)dlid;
4488  	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4489  		attr->opa.dlid = dlid;
4490  }
4491  
rdma_ah_get_dlid(const struct rdma_ah_attr * attr)4492  static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4493  {
4494  	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4495  		return attr->ib.dlid;
4496  	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4497  		return attr->opa.dlid;
4498  	return 0;
4499  }
4500  
rdma_ah_set_sl(struct rdma_ah_attr * attr,u8 sl)4501  static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4502  {
4503  	attr->sl = sl;
4504  }
4505  
rdma_ah_get_sl(const struct rdma_ah_attr * attr)4506  static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4507  {
4508  	return attr->sl;
4509  }
4510  
rdma_ah_set_path_bits(struct rdma_ah_attr * attr,u8 src_path_bits)4511  static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4512  					 u8 src_path_bits)
4513  {
4514  	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4515  		attr->ib.src_path_bits = src_path_bits;
4516  	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4517  		attr->opa.src_path_bits = src_path_bits;
4518  }
4519  
rdma_ah_get_path_bits(const struct rdma_ah_attr * attr)4520  static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4521  {
4522  	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4523  		return attr->ib.src_path_bits;
4524  	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4525  		return attr->opa.src_path_bits;
4526  	return 0;
4527  }
4528  
rdma_ah_set_make_grd(struct rdma_ah_attr * attr,bool make_grd)4529  static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4530  					bool make_grd)
4531  {
4532  	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4533  		attr->opa.make_grd = make_grd;
4534  }
4535  
rdma_ah_get_make_grd(const struct rdma_ah_attr * attr)4536  static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4537  {
4538  	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4539  		return attr->opa.make_grd;
4540  	return false;
4541  }
4542  
rdma_ah_set_port_num(struct rdma_ah_attr * attr,u32 port_num)4543  static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4544  {
4545  	attr->port_num = port_num;
4546  }
4547  
rdma_ah_get_port_num(const struct rdma_ah_attr * attr)4548  static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4549  {
4550  	return attr->port_num;
4551  }
4552  
rdma_ah_set_static_rate(struct rdma_ah_attr * attr,u8 static_rate)4553  static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4554  					   u8 static_rate)
4555  {
4556  	attr->static_rate = static_rate;
4557  }
4558  
rdma_ah_get_static_rate(const struct rdma_ah_attr * attr)4559  static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4560  {
4561  	return attr->static_rate;
4562  }
4563  
rdma_ah_set_ah_flags(struct rdma_ah_attr * attr,enum ib_ah_flags flag)4564  static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4565  					enum ib_ah_flags flag)
4566  {
4567  	attr->ah_flags = flag;
4568  }
4569  
4570  static inline enum ib_ah_flags
rdma_ah_get_ah_flags(const struct rdma_ah_attr * attr)4571  		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4572  {
4573  	return attr->ah_flags;
4574  }
4575  
4576  static inline const struct ib_global_route
rdma_ah_read_grh(const struct rdma_ah_attr * attr)4577  		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4578  {
4579  	return &attr->grh;
4580  }
4581  
4582  /*To retrieve and modify the grh */
4583  static inline struct ib_global_route
rdma_ah_retrieve_grh(struct rdma_ah_attr * attr)4584  		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4585  {
4586  	return &attr->grh;
4587  }
4588  
rdma_ah_set_dgid_raw(struct rdma_ah_attr * attr,void * dgid)4589  static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4590  {
4591  	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4592  
4593  	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4594  }
4595  
rdma_ah_set_subnet_prefix(struct rdma_ah_attr * attr,__be64 prefix)4596  static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4597  					     __be64 prefix)
4598  {
4599  	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4600  
4601  	grh->dgid.global.subnet_prefix = prefix;
4602  }
4603  
rdma_ah_set_interface_id(struct rdma_ah_attr * attr,__be64 if_id)4604  static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4605  					    __be64 if_id)
4606  {
4607  	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4608  
4609  	grh->dgid.global.interface_id = if_id;
4610  }
4611  
rdma_ah_set_grh(struct rdma_ah_attr * attr,union ib_gid * dgid,u32 flow_label,u8 sgid_index,u8 hop_limit,u8 traffic_class)4612  static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4613  				   union ib_gid *dgid, u32 flow_label,
4614  				   u8 sgid_index, u8 hop_limit,
4615  				   u8 traffic_class)
4616  {
4617  	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4618  
4619  	attr->ah_flags = IB_AH_GRH;
4620  	if (dgid)
4621  		grh->dgid = *dgid;
4622  	grh->flow_label = flow_label;
4623  	grh->sgid_index = sgid_index;
4624  	grh->hop_limit = hop_limit;
4625  	grh->traffic_class = traffic_class;
4626  	grh->sgid_attr = NULL;
4627  }
4628  
4629  void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4630  void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4631  			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4632  			     const struct ib_gid_attr *sgid_attr);
4633  void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4634  		       const struct rdma_ah_attr *src);
4635  void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4636  			  const struct rdma_ah_attr *new);
4637  void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4638  
4639  /**
4640   * rdma_ah_find_type - Return address handle type.
4641   *
4642   * @dev: Device to be checked
4643   * @port_num: Port number
4644   */
rdma_ah_find_type(struct ib_device * dev,u32 port_num)4645  static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4646  						       u32 port_num)
4647  {
4648  	if (rdma_protocol_roce(dev, port_num))
4649  		return RDMA_AH_ATTR_TYPE_ROCE;
4650  	if (rdma_protocol_ib(dev, port_num)) {
4651  		if (rdma_cap_opa_ah(dev, port_num))
4652  			return RDMA_AH_ATTR_TYPE_OPA;
4653  		return RDMA_AH_ATTR_TYPE_IB;
4654  	}
4655  
4656  	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4657  }
4658  
4659  /**
4660   * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4661   *     In the current implementation the only way to
4662   *     get the 32bit lid is from other sources for OPA.
4663   *     For IB, lids will always be 16bits so cast the
4664   *     value accordingly.
4665   *
4666   * @lid: A 32bit LID
4667   */
ib_lid_cpu16(u32 lid)4668  static inline u16 ib_lid_cpu16(u32 lid)
4669  {
4670  	WARN_ON_ONCE(lid & 0xFFFF0000);
4671  	return (u16)lid;
4672  }
4673  
4674  /**
4675   * ib_lid_be16 - Return lid in 16bit BE encoding.
4676   *
4677   * @lid: A 32bit LID
4678   */
ib_lid_be16(u32 lid)4679  static inline __be16 ib_lid_be16(u32 lid)
4680  {
4681  	WARN_ON_ONCE(lid & 0xFFFF0000);
4682  	return cpu_to_be16((u16)lid);
4683  }
4684  
4685  /**
4686   * ib_get_vector_affinity - Get the affinity mappings of a given completion
4687   *   vector
4688   * @device:         the rdma device
4689   * @comp_vector:    index of completion vector
4690   *
4691   * Returns NULL on failure, otherwise a corresponding cpu map of the
4692   * completion vector (returns all-cpus map if the device driver doesn't
4693   * implement get_vector_affinity).
4694   */
4695  static inline const struct cpumask *
ib_get_vector_affinity(struct ib_device * device,int comp_vector)4696  ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4697  {
4698  	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4699  	    !device->ops.get_vector_affinity)
4700  		return NULL;
4701  
4702  	return device->ops.get_vector_affinity(device, comp_vector);
4703  
4704  }
4705  
4706  /**
4707   * rdma_roce_rescan_device - Rescan all of the network devices in the system
4708   * and add their gids, as needed, to the relevant RoCE devices.
4709   *
4710   * @device:         the rdma device
4711   */
4712  void rdma_roce_rescan_device(struct ib_device *ibdev);
4713  
4714  struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4715  
4716  int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4717  
4718  struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4719  				     enum rdma_netdev_t type, const char *name,
4720  				     unsigned char name_assign_type,
4721  				     void (*setup)(struct net_device *));
4722  
4723  int rdma_init_netdev(struct ib_device *device, u32 port_num,
4724  		     enum rdma_netdev_t type, const char *name,
4725  		     unsigned char name_assign_type,
4726  		     void (*setup)(struct net_device *),
4727  		     struct net_device *netdev);
4728  
4729  /**
4730   * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4731   *
4732   * @device:	device pointer for which ib_device pointer to retrieve
4733   *
4734   * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4735   *
4736   */
rdma_device_to_ibdev(struct device * device)4737  static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4738  {
4739  	struct ib_core_device *coredev =
4740  		container_of(device, struct ib_core_device, dev);
4741  
4742  	return coredev->owner;
4743  }
4744  
4745  /**
4746   * ibdev_to_node - return the NUMA node for a given ib_device
4747   * @dev:	device to get the NUMA node for.
4748   */
ibdev_to_node(struct ib_device * ibdev)4749  static inline int ibdev_to_node(struct ib_device *ibdev)
4750  {
4751  	struct device *parent = ibdev->dev.parent;
4752  
4753  	if (!parent)
4754  		return NUMA_NO_NODE;
4755  	return dev_to_node(parent);
4756  }
4757  
4758  /**
4759   * rdma_device_to_drv_device - Helper macro to reach back to driver's
4760   *			       ib_device holder structure from device pointer.
4761   *
4762   * NOTE: New drivers should not make use of this API; This API is only for
4763   * existing drivers who have exposed sysfs entries using
4764   * ops->device_group.
4765   */
4766  #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4767  	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4768  
4769  bool rdma_dev_access_netns(const struct ib_device *device,
4770  			   const struct net *net);
4771  
4772  #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4773  #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4774  #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4775  
4776  /**
4777   * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4778   *                               on the flow_label
4779   *
4780   * This function will convert the 20 bit flow_label input to a valid RoCE v2
4781   * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4782   * convention.
4783   */
rdma_flow_label_to_udp_sport(u32 fl)4784  static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4785  {
4786  	u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4787  
4788  	fl_low ^= fl_high >> 14;
4789  	return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4790  }
4791  
4792  /**
4793   * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4794   *                        local and remote qpn values
4795   *
4796   * This function folded the multiplication results of two qpns, 24 bit each,
4797   * fields, and converts it to a 20 bit results.
4798   *
4799   * This function will create symmetric flow_label value based on the local
4800   * and remote qpn values. this will allow both the requester and responder
4801   * to calculate the same flow_label for a given connection.
4802   *
4803   * This helper function should be used by driver in case the upper layer
4804   * provide a zero flow_label value. This is to improve entropy of RDMA
4805   * traffic in the network.
4806   */
rdma_calc_flow_label(u32 lqpn,u32 rqpn)4807  static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4808  {
4809  	u64 v = (u64)lqpn * rqpn;
4810  
4811  	v ^= v >> 20;
4812  	v ^= v >> 40;
4813  
4814  	return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4815  }
4816  
4817  /**
4818   * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
4819   *                      label. If flow label is not defined in GRH then
4820   *                      calculate it based on lqpn/rqpn.
4821   *
4822   * @fl:                 flow label from GRH
4823   * @lqpn:               local qp number
4824   * @rqpn:               remote qp number
4825   */
rdma_get_udp_sport(u32 fl,u32 lqpn,u32 rqpn)4826  static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4827  {
4828  	if (!fl)
4829  		fl = rdma_calc_flow_label(lqpn, rqpn);
4830  
4831  	return rdma_flow_label_to_udp_sport(fl);
4832  }
4833  
4834  const struct ib_port_immutable*
4835  ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4836  #endif /* IB_VERBS_H */
4837