xref: /openbmc/linux/include/rdma/ib_verbs.h (revision 206204a1)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <uapi/linux/if_ether.h>
52 
53 #include <linux/atomic.h>
54 #include <asm/uaccess.h>
55 
56 extern struct workqueue_struct *ib_wq;
57 
58 union ib_gid {
59 	u8	raw[16];
60 	struct {
61 		__be64	subnet_prefix;
62 		__be64	interface_id;
63 	} global;
64 };
65 
66 enum rdma_node_type {
67 	/* IB values map to NodeInfo:NodeType. */
68 	RDMA_NODE_IB_CA 	= 1,
69 	RDMA_NODE_IB_SWITCH,
70 	RDMA_NODE_IB_ROUTER,
71 	RDMA_NODE_RNIC,
72 	RDMA_NODE_USNIC,
73 	RDMA_NODE_USNIC_UDP,
74 };
75 
76 enum rdma_transport_type {
77 	RDMA_TRANSPORT_IB,
78 	RDMA_TRANSPORT_IWARP,
79 	RDMA_TRANSPORT_USNIC,
80 	RDMA_TRANSPORT_USNIC_UDP
81 };
82 
83 __attribute_const__ enum rdma_transport_type
84 rdma_node_get_transport(enum rdma_node_type node_type);
85 
86 enum rdma_link_layer {
87 	IB_LINK_LAYER_UNSPECIFIED,
88 	IB_LINK_LAYER_INFINIBAND,
89 	IB_LINK_LAYER_ETHERNET,
90 };
91 
92 enum ib_device_cap_flags {
93 	IB_DEVICE_RESIZE_MAX_WR		= 1,
94 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
95 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
96 	IB_DEVICE_RAW_MULTI		= (1<<3),
97 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
98 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
99 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
100 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
101 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
102 	IB_DEVICE_INIT_TYPE		= (1<<9),
103 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
104 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
105 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
106 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
107 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
108 	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
109 	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
110 	IB_DEVICE_MEM_WINDOW		= (1<<17),
111 	/*
112 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
113 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
114 	 * messages and can verify the validity of checksum for
115 	 * incoming messages.  Setting this flag implies that the
116 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
117 	 */
118 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
119 	IB_DEVICE_UD_TSO		= (1<<19),
120 	IB_DEVICE_XRC			= (1<<20),
121 	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
122 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
123 	IB_DEVICE_MEM_WINDOW_TYPE_2A	= (1<<23),
124 	IB_DEVICE_MEM_WINDOW_TYPE_2B	= (1<<24),
125 	IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
126 	IB_DEVICE_SIGNATURE_HANDOVER	= (1<<30)
127 };
128 
129 enum ib_signature_prot_cap {
130 	IB_PROT_T10DIF_TYPE_1 = 1,
131 	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
132 	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
133 };
134 
135 enum ib_signature_guard_cap {
136 	IB_GUARD_T10DIF_CRC	= 1,
137 	IB_GUARD_T10DIF_CSUM	= 1 << 1,
138 };
139 
140 enum ib_atomic_cap {
141 	IB_ATOMIC_NONE,
142 	IB_ATOMIC_HCA,
143 	IB_ATOMIC_GLOB
144 };
145 
146 struct ib_device_attr {
147 	u64			fw_ver;
148 	__be64			sys_image_guid;
149 	u64			max_mr_size;
150 	u64			page_size_cap;
151 	u32			vendor_id;
152 	u32			vendor_part_id;
153 	u32			hw_ver;
154 	int			max_qp;
155 	int			max_qp_wr;
156 	int			device_cap_flags;
157 	int			max_sge;
158 	int			max_sge_rd;
159 	int			max_cq;
160 	int			max_cqe;
161 	int			max_mr;
162 	int			max_pd;
163 	int			max_qp_rd_atom;
164 	int			max_ee_rd_atom;
165 	int			max_res_rd_atom;
166 	int			max_qp_init_rd_atom;
167 	int			max_ee_init_rd_atom;
168 	enum ib_atomic_cap	atomic_cap;
169 	enum ib_atomic_cap	masked_atomic_cap;
170 	int			max_ee;
171 	int			max_rdd;
172 	int			max_mw;
173 	int			max_raw_ipv6_qp;
174 	int			max_raw_ethy_qp;
175 	int			max_mcast_grp;
176 	int			max_mcast_qp_attach;
177 	int			max_total_mcast_qp_attach;
178 	int			max_ah;
179 	int			max_fmr;
180 	int			max_map_per_fmr;
181 	int			max_srq;
182 	int			max_srq_wr;
183 	int			max_srq_sge;
184 	unsigned int		max_fast_reg_page_list_len;
185 	u16			max_pkeys;
186 	u8			local_ca_ack_delay;
187 	int			sig_prot_cap;
188 	int			sig_guard_cap;
189 };
190 
191 enum ib_mtu {
192 	IB_MTU_256  = 1,
193 	IB_MTU_512  = 2,
194 	IB_MTU_1024 = 3,
195 	IB_MTU_2048 = 4,
196 	IB_MTU_4096 = 5
197 };
198 
199 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
200 {
201 	switch (mtu) {
202 	case IB_MTU_256:  return  256;
203 	case IB_MTU_512:  return  512;
204 	case IB_MTU_1024: return 1024;
205 	case IB_MTU_2048: return 2048;
206 	case IB_MTU_4096: return 4096;
207 	default: 	  return -1;
208 	}
209 }
210 
211 enum ib_port_state {
212 	IB_PORT_NOP		= 0,
213 	IB_PORT_DOWN		= 1,
214 	IB_PORT_INIT		= 2,
215 	IB_PORT_ARMED		= 3,
216 	IB_PORT_ACTIVE		= 4,
217 	IB_PORT_ACTIVE_DEFER	= 5
218 };
219 
220 enum ib_port_cap_flags {
221 	IB_PORT_SM				= 1 <<  1,
222 	IB_PORT_NOTICE_SUP			= 1 <<  2,
223 	IB_PORT_TRAP_SUP			= 1 <<  3,
224 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
225 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
226 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
227 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
228 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
229 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
230 	IB_PORT_SM_DISABLED			= 1 << 10,
231 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
232 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
233 	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
234 	IB_PORT_CM_SUP				= 1 << 16,
235 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
236 	IB_PORT_REINIT_SUP			= 1 << 18,
237 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
238 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
239 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
240 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
241 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
242 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
243 	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
244 	IB_PORT_IP_BASED_GIDS			= 1 << 26
245 };
246 
247 enum ib_port_width {
248 	IB_WIDTH_1X	= 1,
249 	IB_WIDTH_4X	= 2,
250 	IB_WIDTH_8X	= 4,
251 	IB_WIDTH_12X	= 8
252 };
253 
254 static inline int ib_width_enum_to_int(enum ib_port_width width)
255 {
256 	switch (width) {
257 	case IB_WIDTH_1X:  return  1;
258 	case IB_WIDTH_4X:  return  4;
259 	case IB_WIDTH_8X:  return  8;
260 	case IB_WIDTH_12X: return 12;
261 	default: 	  return -1;
262 	}
263 }
264 
265 enum ib_port_speed {
266 	IB_SPEED_SDR	= 1,
267 	IB_SPEED_DDR	= 2,
268 	IB_SPEED_QDR	= 4,
269 	IB_SPEED_FDR10	= 8,
270 	IB_SPEED_FDR	= 16,
271 	IB_SPEED_EDR	= 32
272 };
273 
274 struct ib_protocol_stats {
275 	/* TBD... */
276 };
277 
278 struct iw_protocol_stats {
279 	u64	ipInReceives;
280 	u64	ipInHdrErrors;
281 	u64	ipInTooBigErrors;
282 	u64	ipInNoRoutes;
283 	u64	ipInAddrErrors;
284 	u64	ipInUnknownProtos;
285 	u64	ipInTruncatedPkts;
286 	u64	ipInDiscards;
287 	u64	ipInDelivers;
288 	u64	ipOutForwDatagrams;
289 	u64	ipOutRequests;
290 	u64	ipOutDiscards;
291 	u64	ipOutNoRoutes;
292 	u64	ipReasmTimeout;
293 	u64	ipReasmReqds;
294 	u64	ipReasmOKs;
295 	u64	ipReasmFails;
296 	u64	ipFragOKs;
297 	u64	ipFragFails;
298 	u64	ipFragCreates;
299 	u64	ipInMcastPkts;
300 	u64	ipOutMcastPkts;
301 	u64	ipInBcastPkts;
302 	u64	ipOutBcastPkts;
303 
304 	u64	tcpRtoAlgorithm;
305 	u64	tcpRtoMin;
306 	u64	tcpRtoMax;
307 	u64	tcpMaxConn;
308 	u64	tcpActiveOpens;
309 	u64	tcpPassiveOpens;
310 	u64	tcpAttemptFails;
311 	u64	tcpEstabResets;
312 	u64	tcpCurrEstab;
313 	u64	tcpInSegs;
314 	u64	tcpOutSegs;
315 	u64	tcpRetransSegs;
316 	u64	tcpInErrs;
317 	u64	tcpOutRsts;
318 };
319 
320 union rdma_protocol_stats {
321 	struct ib_protocol_stats	ib;
322 	struct iw_protocol_stats	iw;
323 };
324 
325 struct ib_port_attr {
326 	enum ib_port_state	state;
327 	enum ib_mtu		max_mtu;
328 	enum ib_mtu		active_mtu;
329 	int			gid_tbl_len;
330 	u32			port_cap_flags;
331 	u32			max_msg_sz;
332 	u32			bad_pkey_cntr;
333 	u32			qkey_viol_cntr;
334 	u16			pkey_tbl_len;
335 	u16			lid;
336 	u16			sm_lid;
337 	u8			lmc;
338 	u8			max_vl_num;
339 	u8			sm_sl;
340 	u8			subnet_timeout;
341 	u8			init_type_reply;
342 	u8			active_width;
343 	u8			active_speed;
344 	u8                      phys_state;
345 };
346 
347 enum ib_device_modify_flags {
348 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
349 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
350 };
351 
352 struct ib_device_modify {
353 	u64	sys_image_guid;
354 	char	node_desc[64];
355 };
356 
357 enum ib_port_modify_flags {
358 	IB_PORT_SHUTDOWN		= 1,
359 	IB_PORT_INIT_TYPE		= (1<<2),
360 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
361 };
362 
363 struct ib_port_modify {
364 	u32	set_port_cap_mask;
365 	u32	clr_port_cap_mask;
366 	u8	init_type;
367 };
368 
369 enum ib_event_type {
370 	IB_EVENT_CQ_ERR,
371 	IB_EVENT_QP_FATAL,
372 	IB_EVENT_QP_REQ_ERR,
373 	IB_EVENT_QP_ACCESS_ERR,
374 	IB_EVENT_COMM_EST,
375 	IB_EVENT_SQ_DRAINED,
376 	IB_EVENT_PATH_MIG,
377 	IB_EVENT_PATH_MIG_ERR,
378 	IB_EVENT_DEVICE_FATAL,
379 	IB_EVENT_PORT_ACTIVE,
380 	IB_EVENT_PORT_ERR,
381 	IB_EVENT_LID_CHANGE,
382 	IB_EVENT_PKEY_CHANGE,
383 	IB_EVENT_SM_CHANGE,
384 	IB_EVENT_SRQ_ERR,
385 	IB_EVENT_SRQ_LIMIT_REACHED,
386 	IB_EVENT_QP_LAST_WQE_REACHED,
387 	IB_EVENT_CLIENT_REREGISTER,
388 	IB_EVENT_GID_CHANGE,
389 };
390 
391 struct ib_event {
392 	struct ib_device	*device;
393 	union {
394 		struct ib_cq	*cq;
395 		struct ib_qp	*qp;
396 		struct ib_srq	*srq;
397 		u8		port_num;
398 	} element;
399 	enum ib_event_type	event;
400 };
401 
402 struct ib_event_handler {
403 	struct ib_device *device;
404 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
405 	struct list_head  list;
406 };
407 
408 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
409 	do {							\
410 		(_ptr)->device  = _device;			\
411 		(_ptr)->handler = _handler;			\
412 		INIT_LIST_HEAD(&(_ptr)->list);			\
413 	} while (0)
414 
415 struct ib_global_route {
416 	union ib_gid	dgid;
417 	u32		flow_label;
418 	u8		sgid_index;
419 	u8		hop_limit;
420 	u8		traffic_class;
421 };
422 
423 struct ib_grh {
424 	__be32		version_tclass_flow;
425 	__be16		paylen;
426 	u8		next_hdr;
427 	u8		hop_limit;
428 	union ib_gid	sgid;
429 	union ib_gid	dgid;
430 };
431 
432 enum {
433 	IB_MULTICAST_QPN = 0xffffff
434 };
435 
436 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
437 
438 enum ib_ah_flags {
439 	IB_AH_GRH	= 1
440 };
441 
442 enum ib_rate {
443 	IB_RATE_PORT_CURRENT = 0,
444 	IB_RATE_2_5_GBPS = 2,
445 	IB_RATE_5_GBPS   = 5,
446 	IB_RATE_10_GBPS  = 3,
447 	IB_RATE_20_GBPS  = 6,
448 	IB_RATE_30_GBPS  = 4,
449 	IB_RATE_40_GBPS  = 7,
450 	IB_RATE_60_GBPS  = 8,
451 	IB_RATE_80_GBPS  = 9,
452 	IB_RATE_120_GBPS = 10,
453 	IB_RATE_14_GBPS  = 11,
454 	IB_RATE_56_GBPS  = 12,
455 	IB_RATE_112_GBPS = 13,
456 	IB_RATE_168_GBPS = 14,
457 	IB_RATE_25_GBPS  = 15,
458 	IB_RATE_100_GBPS = 16,
459 	IB_RATE_200_GBPS = 17,
460 	IB_RATE_300_GBPS = 18
461 };
462 
463 /**
464  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
465  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
466  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
467  * @rate: rate to convert.
468  */
469 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
470 
471 /**
472  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
473  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
474  * @rate: rate to convert.
475  */
476 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
477 
478 enum ib_mr_create_flags {
479 	IB_MR_SIGNATURE_EN = 1,
480 };
481 
482 /**
483  * ib_mr_init_attr - Memory region init attributes passed to routine
484  *     ib_create_mr.
485  * @max_reg_descriptors: max number of registration descriptors that
486  *     may be used with registration work requests.
487  * @flags: MR creation flags bit mask.
488  */
489 struct ib_mr_init_attr {
490 	int	    max_reg_descriptors;
491 	u32	    flags;
492 };
493 
494 enum ib_signature_type {
495 	IB_SIG_TYPE_T10_DIF,
496 };
497 
498 /**
499  * T10-DIF Signature types
500  * T10-DIF types are defined by SCSI
501  * specifications.
502  */
503 enum ib_t10_dif_type {
504 	IB_T10DIF_NONE,
505 	IB_T10DIF_TYPE1,
506 	IB_T10DIF_TYPE2,
507 	IB_T10DIF_TYPE3
508 };
509 
510 /**
511  * Signature T10-DIF block-guard types
512  * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
513  * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
514  */
515 enum ib_t10_dif_bg_type {
516 	IB_T10DIF_CRC,
517 	IB_T10DIF_CSUM
518 };
519 
520 /**
521  * struct ib_t10_dif_domain - Parameters specific for T10-DIF
522  *     domain.
523  * @type: T10-DIF type (0|1|2|3)
524  * @bg_type: T10-DIF block guard type (CRC|CSUM)
525  * @pi_interval: protection information interval.
526  * @bg: seed of guard computation.
527  * @app_tag: application tag of guard block
528  * @ref_tag: initial guard block reference tag.
529  * @type3_inc_reftag: T10-DIF type 3 does not state
530  *     about the reference tag, it is the user
531  *     choice to increment it or not.
532  */
533 struct ib_t10_dif_domain {
534 	enum ib_t10_dif_type	type;
535 	enum ib_t10_dif_bg_type bg_type;
536 	u16			pi_interval;
537 	u16			bg;
538 	u16			app_tag;
539 	u32			ref_tag;
540 	bool			type3_inc_reftag;
541 };
542 
543 /**
544  * struct ib_sig_domain - Parameters for signature domain
545  * @sig_type: specific signauture type
546  * @sig: union of all signature domain attributes that may
547  *     be used to set domain layout.
548  */
549 struct ib_sig_domain {
550 	enum ib_signature_type sig_type;
551 	union {
552 		struct ib_t10_dif_domain dif;
553 	} sig;
554 };
555 
556 /**
557  * struct ib_sig_attrs - Parameters for signature handover operation
558  * @check_mask: bitmask for signature byte check (8 bytes)
559  * @mem: memory domain layout desciptor.
560  * @wire: wire domain layout desciptor.
561  */
562 struct ib_sig_attrs {
563 	u8			check_mask;
564 	struct ib_sig_domain	mem;
565 	struct ib_sig_domain	wire;
566 };
567 
568 enum ib_sig_err_type {
569 	IB_SIG_BAD_GUARD,
570 	IB_SIG_BAD_REFTAG,
571 	IB_SIG_BAD_APPTAG,
572 };
573 
574 /**
575  * struct ib_sig_err - signature error descriptor
576  */
577 struct ib_sig_err {
578 	enum ib_sig_err_type	err_type;
579 	u32			expected;
580 	u32			actual;
581 	u64			sig_err_offset;
582 	u32			key;
583 };
584 
585 enum ib_mr_status_check {
586 	IB_MR_CHECK_SIG_STATUS = 1,
587 };
588 
589 /**
590  * struct ib_mr_status - Memory region status container
591  *
592  * @fail_status: Bitmask of MR checks status. For each
593  *     failed check a corresponding status bit is set.
594  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
595  *     failure.
596  */
597 struct ib_mr_status {
598 	u32		    fail_status;
599 	struct ib_sig_err   sig_err;
600 };
601 
602 /**
603  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
604  * enum.
605  * @mult: multiple to convert.
606  */
607 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
608 
609 struct ib_ah_attr {
610 	struct ib_global_route	grh;
611 	u16			dlid;
612 	u8			sl;
613 	u8			src_path_bits;
614 	u8			static_rate;
615 	u8			ah_flags;
616 	u8			port_num;
617 	u8			dmac[ETH_ALEN];
618 	u16			vlan_id;
619 };
620 
621 enum ib_wc_status {
622 	IB_WC_SUCCESS,
623 	IB_WC_LOC_LEN_ERR,
624 	IB_WC_LOC_QP_OP_ERR,
625 	IB_WC_LOC_EEC_OP_ERR,
626 	IB_WC_LOC_PROT_ERR,
627 	IB_WC_WR_FLUSH_ERR,
628 	IB_WC_MW_BIND_ERR,
629 	IB_WC_BAD_RESP_ERR,
630 	IB_WC_LOC_ACCESS_ERR,
631 	IB_WC_REM_INV_REQ_ERR,
632 	IB_WC_REM_ACCESS_ERR,
633 	IB_WC_REM_OP_ERR,
634 	IB_WC_RETRY_EXC_ERR,
635 	IB_WC_RNR_RETRY_EXC_ERR,
636 	IB_WC_LOC_RDD_VIOL_ERR,
637 	IB_WC_REM_INV_RD_REQ_ERR,
638 	IB_WC_REM_ABORT_ERR,
639 	IB_WC_INV_EECN_ERR,
640 	IB_WC_INV_EEC_STATE_ERR,
641 	IB_WC_FATAL_ERR,
642 	IB_WC_RESP_TIMEOUT_ERR,
643 	IB_WC_GENERAL_ERR
644 };
645 
646 enum ib_wc_opcode {
647 	IB_WC_SEND,
648 	IB_WC_RDMA_WRITE,
649 	IB_WC_RDMA_READ,
650 	IB_WC_COMP_SWAP,
651 	IB_WC_FETCH_ADD,
652 	IB_WC_BIND_MW,
653 	IB_WC_LSO,
654 	IB_WC_LOCAL_INV,
655 	IB_WC_FAST_REG_MR,
656 	IB_WC_MASKED_COMP_SWAP,
657 	IB_WC_MASKED_FETCH_ADD,
658 /*
659  * Set value of IB_WC_RECV so consumers can test if a completion is a
660  * receive by testing (opcode & IB_WC_RECV).
661  */
662 	IB_WC_RECV			= 1 << 7,
663 	IB_WC_RECV_RDMA_WITH_IMM
664 };
665 
666 enum ib_wc_flags {
667 	IB_WC_GRH		= 1,
668 	IB_WC_WITH_IMM		= (1<<1),
669 	IB_WC_WITH_INVALIDATE	= (1<<2),
670 	IB_WC_IP_CSUM_OK	= (1<<3),
671 	IB_WC_WITH_SMAC		= (1<<4),
672 	IB_WC_WITH_VLAN		= (1<<5),
673 };
674 
675 struct ib_wc {
676 	u64			wr_id;
677 	enum ib_wc_status	status;
678 	enum ib_wc_opcode	opcode;
679 	u32			vendor_err;
680 	u32			byte_len;
681 	struct ib_qp	       *qp;
682 	union {
683 		__be32		imm_data;
684 		u32		invalidate_rkey;
685 	} ex;
686 	u32			src_qp;
687 	int			wc_flags;
688 	u16			pkey_index;
689 	u16			slid;
690 	u8			sl;
691 	u8			dlid_path_bits;
692 	u8			port_num;	/* valid only for DR SMPs on switches */
693 	u8			smac[ETH_ALEN];
694 	u16			vlan_id;
695 };
696 
697 enum ib_cq_notify_flags {
698 	IB_CQ_SOLICITED			= 1 << 0,
699 	IB_CQ_NEXT_COMP			= 1 << 1,
700 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
701 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
702 };
703 
704 enum ib_srq_type {
705 	IB_SRQT_BASIC,
706 	IB_SRQT_XRC
707 };
708 
709 enum ib_srq_attr_mask {
710 	IB_SRQ_MAX_WR	= 1 << 0,
711 	IB_SRQ_LIMIT	= 1 << 1,
712 };
713 
714 struct ib_srq_attr {
715 	u32	max_wr;
716 	u32	max_sge;
717 	u32	srq_limit;
718 };
719 
720 struct ib_srq_init_attr {
721 	void		      (*event_handler)(struct ib_event *, void *);
722 	void		       *srq_context;
723 	struct ib_srq_attr	attr;
724 	enum ib_srq_type	srq_type;
725 
726 	union {
727 		struct {
728 			struct ib_xrcd *xrcd;
729 			struct ib_cq   *cq;
730 		} xrc;
731 	} ext;
732 };
733 
734 struct ib_qp_cap {
735 	u32	max_send_wr;
736 	u32	max_recv_wr;
737 	u32	max_send_sge;
738 	u32	max_recv_sge;
739 	u32	max_inline_data;
740 };
741 
742 enum ib_sig_type {
743 	IB_SIGNAL_ALL_WR,
744 	IB_SIGNAL_REQ_WR
745 };
746 
747 enum ib_qp_type {
748 	/*
749 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
750 	 * here (and in that order) since the MAD layer uses them as
751 	 * indices into a 2-entry table.
752 	 */
753 	IB_QPT_SMI,
754 	IB_QPT_GSI,
755 
756 	IB_QPT_RC,
757 	IB_QPT_UC,
758 	IB_QPT_UD,
759 	IB_QPT_RAW_IPV6,
760 	IB_QPT_RAW_ETHERTYPE,
761 	IB_QPT_RAW_PACKET = 8,
762 	IB_QPT_XRC_INI = 9,
763 	IB_QPT_XRC_TGT,
764 	IB_QPT_MAX,
765 	/* Reserve a range for qp types internal to the low level driver.
766 	 * These qp types will not be visible at the IB core layer, so the
767 	 * IB_QPT_MAX usages should not be affected in the core layer
768 	 */
769 	IB_QPT_RESERVED1 = 0x1000,
770 	IB_QPT_RESERVED2,
771 	IB_QPT_RESERVED3,
772 	IB_QPT_RESERVED4,
773 	IB_QPT_RESERVED5,
774 	IB_QPT_RESERVED6,
775 	IB_QPT_RESERVED7,
776 	IB_QPT_RESERVED8,
777 	IB_QPT_RESERVED9,
778 	IB_QPT_RESERVED10,
779 };
780 
781 enum ib_qp_create_flags {
782 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
783 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
784 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
785 	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
786 	IB_QP_CREATE_USE_GFP_NOIO		= 1 << 7,
787 	/* reserve bits 26-31 for low level drivers' internal use */
788 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
789 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
790 };
791 
792 
793 /*
794  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
795  * callback to destroy the passed in QP.
796  */
797 
798 struct ib_qp_init_attr {
799 	void                  (*event_handler)(struct ib_event *, void *);
800 	void		       *qp_context;
801 	struct ib_cq	       *send_cq;
802 	struct ib_cq	       *recv_cq;
803 	struct ib_srq	       *srq;
804 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
805 	struct ib_qp_cap	cap;
806 	enum ib_sig_type	sq_sig_type;
807 	enum ib_qp_type		qp_type;
808 	enum ib_qp_create_flags	create_flags;
809 	u8			port_num; /* special QP types only */
810 };
811 
812 struct ib_qp_open_attr {
813 	void                  (*event_handler)(struct ib_event *, void *);
814 	void		       *qp_context;
815 	u32			qp_num;
816 	enum ib_qp_type		qp_type;
817 };
818 
819 enum ib_rnr_timeout {
820 	IB_RNR_TIMER_655_36 =  0,
821 	IB_RNR_TIMER_000_01 =  1,
822 	IB_RNR_TIMER_000_02 =  2,
823 	IB_RNR_TIMER_000_03 =  3,
824 	IB_RNR_TIMER_000_04 =  4,
825 	IB_RNR_TIMER_000_06 =  5,
826 	IB_RNR_TIMER_000_08 =  6,
827 	IB_RNR_TIMER_000_12 =  7,
828 	IB_RNR_TIMER_000_16 =  8,
829 	IB_RNR_TIMER_000_24 =  9,
830 	IB_RNR_TIMER_000_32 = 10,
831 	IB_RNR_TIMER_000_48 = 11,
832 	IB_RNR_TIMER_000_64 = 12,
833 	IB_RNR_TIMER_000_96 = 13,
834 	IB_RNR_TIMER_001_28 = 14,
835 	IB_RNR_TIMER_001_92 = 15,
836 	IB_RNR_TIMER_002_56 = 16,
837 	IB_RNR_TIMER_003_84 = 17,
838 	IB_RNR_TIMER_005_12 = 18,
839 	IB_RNR_TIMER_007_68 = 19,
840 	IB_RNR_TIMER_010_24 = 20,
841 	IB_RNR_TIMER_015_36 = 21,
842 	IB_RNR_TIMER_020_48 = 22,
843 	IB_RNR_TIMER_030_72 = 23,
844 	IB_RNR_TIMER_040_96 = 24,
845 	IB_RNR_TIMER_061_44 = 25,
846 	IB_RNR_TIMER_081_92 = 26,
847 	IB_RNR_TIMER_122_88 = 27,
848 	IB_RNR_TIMER_163_84 = 28,
849 	IB_RNR_TIMER_245_76 = 29,
850 	IB_RNR_TIMER_327_68 = 30,
851 	IB_RNR_TIMER_491_52 = 31
852 };
853 
854 enum ib_qp_attr_mask {
855 	IB_QP_STATE			= 1,
856 	IB_QP_CUR_STATE			= (1<<1),
857 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
858 	IB_QP_ACCESS_FLAGS		= (1<<3),
859 	IB_QP_PKEY_INDEX		= (1<<4),
860 	IB_QP_PORT			= (1<<5),
861 	IB_QP_QKEY			= (1<<6),
862 	IB_QP_AV			= (1<<7),
863 	IB_QP_PATH_MTU			= (1<<8),
864 	IB_QP_TIMEOUT			= (1<<9),
865 	IB_QP_RETRY_CNT			= (1<<10),
866 	IB_QP_RNR_RETRY			= (1<<11),
867 	IB_QP_RQ_PSN			= (1<<12),
868 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
869 	IB_QP_ALT_PATH			= (1<<14),
870 	IB_QP_MIN_RNR_TIMER		= (1<<15),
871 	IB_QP_SQ_PSN			= (1<<16),
872 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
873 	IB_QP_PATH_MIG_STATE		= (1<<18),
874 	IB_QP_CAP			= (1<<19),
875 	IB_QP_DEST_QPN			= (1<<20),
876 	IB_QP_SMAC			= (1<<21),
877 	IB_QP_ALT_SMAC			= (1<<22),
878 	IB_QP_VID			= (1<<23),
879 	IB_QP_ALT_VID			= (1<<24),
880 };
881 
882 enum ib_qp_state {
883 	IB_QPS_RESET,
884 	IB_QPS_INIT,
885 	IB_QPS_RTR,
886 	IB_QPS_RTS,
887 	IB_QPS_SQD,
888 	IB_QPS_SQE,
889 	IB_QPS_ERR
890 };
891 
892 enum ib_mig_state {
893 	IB_MIG_MIGRATED,
894 	IB_MIG_REARM,
895 	IB_MIG_ARMED
896 };
897 
898 enum ib_mw_type {
899 	IB_MW_TYPE_1 = 1,
900 	IB_MW_TYPE_2 = 2
901 };
902 
903 struct ib_qp_attr {
904 	enum ib_qp_state	qp_state;
905 	enum ib_qp_state	cur_qp_state;
906 	enum ib_mtu		path_mtu;
907 	enum ib_mig_state	path_mig_state;
908 	u32			qkey;
909 	u32			rq_psn;
910 	u32			sq_psn;
911 	u32			dest_qp_num;
912 	int			qp_access_flags;
913 	struct ib_qp_cap	cap;
914 	struct ib_ah_attr	ah_attr;
915 	struct ib_ah_attr	alt_ah_attr;
916 	u16			pkey_index;
917 	u16			alt_pkey_index;
918 	u8			en_sqd_async_notify;
919 	u8			sq_draining;
920 	u8			max_rd_atomic;
921 	u8			max_dest_rd_atomic;
922 	u8			min_rnr_timer;
923 	u8			port_num;
924 	u8			timeout;
925 	u8			retry_cnt;
926 	u8			rnr_retry;
927 	u8			alt_port_num;
928 	u8			alt_timeout;
929 	u8			smac[ETH_ALEN];
930 	u8			alt_smac[ETH_ALEN];
931 	u16			vlan_id;
932 	u16			alt_vlan_id;
933 };
934 
935 enum ib_wr_opcode {
936 	IB_WR_RDMA_WRITE,
937 	IB_WR_RDMA_WRITE_WITH_IMM,
938 	IB_WR_SEND,
939 	IB_WR_SEND_WITH_IMM,
940 	IB_WR_RDMA_READ,
941 	IB_WR_ATOMIC_CMP_AND_SWP,
942 	IB_WR_ATOMIC_FETCH_AND_ADD,
943 	IB_WR_LSO,
944 	IB_WR_SEND_WITH_INV,
945 	IB_WR_RDMA_READ_WITH_INV,
946 	IB_WR_LOCAL_INV,
947 	IB_WR_FAST_REG_MR,
948 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
949 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
950 	IB_WR_BIND_MW,
951 	IB_WR_REG_SIG_MR,
952 	/* reserve values for low level drivers' internal use.
953 	 * These values will not be used at all in the ib core layer.
954 	 */
955 	IB_WR_RESERVED1 = 0xf0,
956 	IB_WR_RESERVED2,
957 	IB_WR_RESERVED3,
958 	IB_WR_RESERVED4,
959 	IB_WR_RESERVED5,
960 	IB_WR_RESERVED6,
961 	IB_WR_RESERVED7,
962 	IB_WR_RESERVED8,
963 	IB_WR_RESERVED9,
964 	IB_WR_RESERVED10,
965 };
966 
967 enum ib_send_flags {
968 	IB_SEND_FENCE		= 1,
969 	IB_SEND_SIGNALED	= (1<<1),
970 	IB_SEND_SOLICITED	= (1<<2),
971 	IB_SEND_INLINE		= (1<<3),
972 	IB_SEND_IP_CSUM		= (1<<4),
973 
974 	/* reserve bits 26-31 for low level drivers' internal use */
975 	IB_SEND_RESERVED_START	= (1 << 26),
976 	IB_SEND_RESERVED_END	= (1 << 31),
977 };
978 
979 struct ib_sge {
980 	u64	addr;
981 	u32	length;
982 	u32	lkey;
983 };
984 
985 struct ib_fast_reg_page_list {
986 	struct ib_device       *device;
987 	u64		       *page_list;
988 	unsigned int		max_page_list_len;
989 };
990 
991 /**
992  * struct ib_mw_bind_info - Parameters for a memory window bind operation.
993  * @mr: A memory region to bind the memory window to.
994  * @addr: The address where the memory window should begin.
995  * @length: The length of the memory window, in bytes.
996  * @mw_access_flags: Access flags from enum ib_access_flags for the window.
997  *
998  * This struct contains the shared parameters for type 1 and type 2
999  * memory window bind operations.
1000  */
1001 struct ib_mw_bind_info {
1002 	struct ib_mr   *mr;
1003 	u64		addr;
1004 	u64		length;
1005 	int		mw_access_flags;
1006 };
1007 
1008 struct ib_send_wr {
1009 	struct ib_send_wr      *next;
1010 	u64			wr_id;
1011 	struct ib_sge	       *sg_list;
1012 	int			num_sge;
1013 	enum ib_wr_opcode	opcode;
1014 	int			send_flags;
1015 	union {
1016 		__be32		imm_data;
1017 		u32		invalidate_rkey;
1018 	} ex;
1019 	union {
1020 		struct {
1021 			u64	remote_addr;
1022 			u32	rkey;
1023 		} rdma;
1024 		struct {
1025 			u64	remote_addr;
1026 			u64	compare_add;
1027 			u64	swap;
1028 			u64	compare_add_mask;
1029 			u64	swap_mask;
1030 			u32	rkey;
1031 		} atomic;
1032 		struct {
1033 			struct ib_ah *ah;
1034 			void   *header;
1035 			int     hlen;
1036 			int     mss;
1037 			u32	remote_qpn;
1038 			u32	remote_qkey;
1039 			u16	pkey_index; /* valid for GSI only */
1040 			u8	port_num;   /* valid for DR SMPs on switch only */
1041 		} ud;
1042 		struct {
1043 			u64				iova_start;
1044 			struct ib_fast_reg_page_list   *page_list;
1045 			unsigned int			page_shift;
1046 			unsigned int			page_list_len;
1047 			u32				length;
1048 			int				access_flags;
1049 			u32				rkey;
1050 		} fast_reg;
1051 		struct {
1052 			struct ib_mw            *mw;
1053 			/* The new rkey for the memory window. */
1054 			u32                      rkey;
1055 			struct ib_mw_bind_info   bind_info;
1056 		} bind_mw;
1057 		struct {
1058 			struct ib_sig_attrs    *sig_attrs;
1059 			struct ib_mr	       *sig_mr;
1060 			int			access_flags;
1061 			struct ib_sge	       *prot;
1062 		} sig_handover;
1063 	} wr;
1064 	u32			xrc_remote_srq_num;	/* XRC TGT QPs only */
1065 };
1066 
1067 struct ib_recv_wr {
1068 	struct ib_recv_wr      *next;
1069 	u64			wr_id;
1070 	struct ib_sge	       *sg_list;
1071 	int			num_sge;
1072 };
1073 
1074 enum ib_access_flags {
1075 	IB_ACCESS_LOCAL_WRITE	= 1,
1076 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1077 	IB_ACCESS_REMOTE_READ	= (1<<2),
1078 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1079 	IB_ACCESS_MW_BIND	= (1<<4),
1080 	IB_ZERO_BASED		= (1<<5)
1081 };
1082 
1083 struct ib_phys_buf {
1084 	u64      addr;
1085 	u64      size;
1086 };
1087 
1088 struct ib_mr_attr {
1089 	struct ib_pd	*pd;
1090 	u64		device_virt_addr;
1091 	u64		size;
1092 	int		mr_access_flags;
1093 	u32		lkey;
1094 	u32		rkey;
1095 };
1096 
1097 enum ib_mr_rereg_flags {
1098 	IB_MR_REREG_TRANS	= 1,
1099 	IB_MR_REREG_PD		= (1<<1),
1100 	IB_MR_REREG_ACCESS	= (1<<2)
1101 };
1102 
1103 /**
1104  * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1105  * @wr_id:      Work request id.
1106  * @send_flags: Flags from ib_send_flags enum.
1107  * @bind_info:  More parameters of the bind operation.
1108  */
1109 struct ib_mw_bind {
1110 	u64                    wr_id;
1111 	int                    send_flags;
1112 	struct ib_mw_bind_info bind_info;
1113 };
1114 
1115 struct ib_fmr_attr {
1116 	int	max_pages;
1117 	int	max_maps;
1118 	u8	page_shift;
1119 };
1120 
1121 struct ib_ucontext {
1122 	struct ib_device       *device;
1123 	struct list_head	pd_list;
1124 	struct list_head	mr_list;
1125 	struct list_head	mw_list;
1126 	struct list_head	cq_list;
1127 	struct list_head	qp_list;
1128 	struct list_head	srq_list;
1129 	struct list_head	ah_list;
1130 	struct list_head	xrcd_list;
1131 	struct list_head	rule_list;
1132 	int			closing;
1133 };
1134 
1135 struct ib_uobject {
1136 	u64			user_handle;	/* handle given to us by userspace */
1137 	struct ib_ucontext     *context;	/* associated user context */
1138 	void		       *object;		/* containing object */
1139 	struct list_head	list;		/* link to context's list */
1140 	int			id;		/* index into kernel idr */
1141 	struct kref		ref;
1142 	struct rw_semaphore	mutex;		/* protects .live */
1143 	int			live;
1144 };
1145 
1146 struct ib_udata {
1147 	const void __user *inbuf;
1148 	void __user *outbuf;
1149 	size_t       inlen;
1150 	size_t       outlen;
1151 };
1152 
1153 struct ib_pd {
1154 	struct ib_device       *device;
1155 	struct ib_uobject      *uobject;
1156 	atomic_t          	usecnt; /* count all resources */
1157 };
1158 
1159 struct ib_xrcd {
1160 	struct ib_device       *device;
1161 	atomic_t		usecnt; /* count all exposed resources */
1162 	struct inode	       *inode;
1163 
1164 	struct mutex		tgt_qp_mutex;
1165 	struct list_head	tgt_qp_list;
1166 };
1167 
1168 struct ib_ah {
1169 	struct ib_device	*device;
1170 	struct ib_pd		*pd;
1171 	struct ib_uobject	*uobject;
1172 };
1173 
1174 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1175 
1176 struct ib_cq {
1177 	struct ib_device       *device;
1178 	struct ib_uobject      *uobject;
1179 	ib_comp_handler   	comp_handler;
1180 	void                  (*event_handler)(struct ib_event *, void *);
1181 	void                   *cq_context;
1182 	int               	cqe;
1183 	atomic_t          	usecnt; /* count number of work queues */
1184 };
1185 
1186 struct ib_srq {
1187 	struct ib_device       *device;
1188 	struct ib_pd	       *pd;
1189 	struct ib_uobject      *uobject;
1190 	void		      (*event_handler)(struct ib_event *, void *);
1191 	void		       *srq_context;
1192 	enum ib_srq_type	srq_type;
1193 	atomic_t		usecnt;
1194 
1195 	union {
1196 		struct {
1197 			struct ib_xrcd *xrcd;
1198 			struct ib_cq   *cq;
1199 			u32		srq_num;
1200 		} xrc;
1201 	} ext;
1202 };
1203 
1204 struct ib_qp {
1205 	struct ib_device       *device;
1206 	struct ib_pd	       *pd;
1207 	struct ib_cq	       *send_cq;
1208 	struct ib_cq	       *recv_cq;
1209 	struct ib_srq	       *srq;
1210 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1211 	struct list_head	xrcd_list;
1212 	/* count times opened, mcast attaches, flow attaches */
1213 	atomic_t		usecnt;
1214 	struct list_head	open_list;
1215 	struct ib_qp           *real_qp;
1216 	struct ib_uobject      *uobject;
1217 	void                  (*event_handler)(struct ib_event *, void *);
1218 	void		       *qp_context;
1219 	u32			qp_num;
1220 	enum ib_qp_type		qp_type;
1221 };
1222 
1223 struct ib_mr {
1224 	struct ib_device  *device;
1225 	struct ib_pd	  *pd;
1226 	struct ib_uobject *uobject;
1227 	u32		   lkey;
1228 	u32		   rkey;
1229 	atomic_t	   usecnt; /* count number of MWs */
1230 };
1231 
1232 struct ib_mw {
1233 	struct ib_device	*device;
1234 	struct ib_pd		*pd;
1235 	struct ib_uobject	*uobject;
1236 	u32			rkey;
1237 	enum ib_mw_type         type;
1238 };
1239 
1240 struct ib_fmr {
1241 	struct ib_device	*device;
1242 	struct ib_pd		*pd;
1243 	struct list_head	list;
1244 	u32			lkey;
1245 	u32			rkey;
1246 };
1247 
1248 /* Supported steering options */
1249 enum ib_flow_attr_type {
1250 	/* steering according to rule specifications */
1251 	IB_FLOW_ATTR_NORMAL		= 0x0,
1252 	/* default unicast and multicast rule -
1253 	 * receive all Eth traffic which isn't steered to any QP
1254 	 */
1255 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1256 	/* default multicast rule -
1257 	 * receive all Eth multicast traffic which isn't steered to any QP
1258 	 */
1259 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1260 	/* sniffer rule - receive all port traffic */
1261 	IB_FLOW_ATTR_SNIFFER		= 0x3
1262 };
1263 
1264 /* Supported steering header types */
1265 enum ib_flow_spec_type {
1266 	/* L2 headers*/
1267 	IB_FLOW_SPEC_ETH	= 0x20,
1268 	IB_FLOW_SPEC_IB		= 0x22,
1269 	/* L3 header*/
1270 	IB_FLOW_SPEC_IPV4	= 0x30,
1271 	/* L4 headers*/
1272 	IB_FLOW_SPEC_TCP	= 0x40,
1273 	IB_FLOW_SPEC_UDP	= 0x41
1274 };
1275 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1276 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1277 
1278 /* Flow steering rule priority is set according to it's domain.
1279  * Lower domain value means higher priority.
1280  */
1281 enum ib_flow_domain {
1282 	IB_FLOW_DOMAIN_USER,
1283 	IB_FLOW_DOMAIN_ETHTOOL,
1284 	IB_FLOW_DOMAIN_RFS,
1285 	IB_FLOW_DOMAIN_NIC,
1286 	IB_FLOW_DOMAIN_NUM /* Must be last */
1287 };
1288 
1289 struct ib_flow_eth_filter {
1290 	u8	dst_mac[6];
1291 	u8	src_mac[6];
1292 	__be16	ether_type;
1293 	__be16	vlan_tag;
1294 };
1295 
1296 struct ib_flow_spec_eth {
1297 	enum ib_flow_spec_type	  type;
1298 	u16			  size;
1299 	struct ib_flow_eth_filter val;
1300 	struct ib_flow_eth_filter mask;
1301 };
1302 
1303 struct ib_flow_ib_filter {
1304 	__be16 dlid;
1305 	__u8   sl;
1306 };
1307 
1308 struct ib_flow_spec_ib {
1309 	enum ib_flow_spec_type	 type;
1310 	u16			 size;
1311 	struct ib_flow_ib_filter val;
1312 	struct ib_flow_ib_filter mask;
1313 };
1314 
1315 struct ib_flow_ipv4_filter {
1316 	__be32	src_ip;
1317 	__be32	dst_ip;
1318 };
1319 
1320 struct ib_flow_spec_ipv4 {
1321 	enum ib_flow_spec_type	   type;
1322 	u16			   size;
1323 	struct ib_flow_ipv4_filter val;
1324 	struct ib_flow_ipv4_filter mask;
1325 };
1326 
1327 struct ib_flow_tcp_udp_filter {
1328 	__be16	dst_port;
1329 	__be16	src_port;
1330 };
1331 
1332 struct ib_flow_spec_tcp_udp {
1333 	enum ib_flow_spec_type	      type;
1334 	u16			      size;
1335 	struct ib_flow_tcp_udp_filter val;
1336 	struct ib_flow_tcp_udp_filter mask;
1337 };
1338 
1339 union ib_flow_spec {
1340 	struct {
1341 		enum ib_flow_spec_type	type;
1342 		u16			size;
1343 	};
1344 	struct ib_flow_spec_eth		eth;
1345 	struct ib_flow_spec_ib		ib;
1346 	struct ib_flow_spec_ipv4        ipv4;
1347 	struct ib_flow_spec_tcp_udp	tcp_udp;
1348 };
1349 
1350 struct ib_flow_attr {
1351 	enum ib_flow_attr_type type;
1352 	u16	     size;
1353 	u16	     priority;
1354 	u32	     flags;
1355 	u8	     num_of_specs;
1356 	u8	     port;
1357 	/* Following are the optional layers according to user request
1358 	 * struct ib_flow_spec_xxx
1359 	 * struct ib_flow_spec_yyy
1360 	 */
1361 };
1362 
1363 struct ib_flow {
1364 	struct ib_qp		*qp;
1365 	struct ib_uobject	*uobject;
1366 };
1367 
1368 struct ib_mad;
1369 struct ib_grh;
1370 
1371 enum ib_process_mad_flags {
1372 	IB_MAD_IGNORE_MKEY	= 1,
1373 	IB_MAD_IGNORE_BKEY	= 2,
1374 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1375 };
1376 
1377 enum ib_mad_result {
1378 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1379 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1380 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1381 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1382 };
1383 
1384 #define IB_DEVICE_NAME_MAX 64
1385 
1386 struct ib_cache {
1387 	rwlock_t                lock;
1388 	struct ib_event_handler event_handler;
1389 	struct ib_pkey_cache  **pkey_cache;
1390 	struct ib_gid_cache   **gid_cache;
1391 	u8                     *lmc_cache;
1392 };
1393 
1394 struct ib_dma_mapping_ops {
1395 	int		(*mapping_error)(struct ib_device *dev,
1396 					 u64 dma_addr);
1397 	u64		(*map_single)(struct ib_device *dev,
1398 				      void *ptr, size_t size,
1399 				      enum dma_data_direction direction);
1400 	void		(*unmap_single)(struct ib_device *dev,
1401 					u64 addr, size_t size,
1402 					enum dma_data_direction direction);
1403 	u64		(*map_page)(struct ib_device *dev,
1404 				    struct page *page, unsigned long offset,
1405 				    size_t size,
1406 				    enum dma_data_direction direction);
1407 	void		(*unmap_page)(struct ib_device *dev,
1408 				      u64 addr, size_t size,
1409 				      enum dma_data_direction direction);
1410 	int		(*map_sg)(struct ib_device *dev,
1411 				  struct scatterlist *sg, int nents,
1412 				  enum dma_data_direction direction);
1413 	void		(*unmap_sg)(struct ib_device *dev,
1414 				    struct scatterlist *sg, int nents,
1415 				    enum dma_data_direction direction);
1416 	void		(*sync_single_for_cpu)(struct ib_device *dev,
1417 					       u64 dma_handle,
1418 					       size_t size,
1419 					       enum dma_data_direction dir);
1420 	void		(*sync_single_for_device)(struct ib_device *dev,
1421 						  u64 dma_handle,
1422 						  size_t size,
1423 						  enum dma_data_direction dir);
1424 	void		*(*alloc_coherent)(struct ib_device *dev,
1425 					   size_t size,
1426 					   u64 *dma_handle,
1427 					   gfp_t flag);
1428 	void		(*free_coherent)(struct ib_device *dev,
1429 					 size_t size, void *cpu_addr,
1430 					 u64 dma_handle);
1431 };
1432 
1433 struct iw_cm_verbs;
1434 
1435 struct ib_device {
1436 	struct device                *dma_device;
1437 
1438 	char                          name[IB_DEVICE_NAME_MAX];
1439 
1440 	struct list_head              event_handler_list;
1441 	spinlock_t                    event_handler_lock;
1442 
1443 	spinlock_t                    client_data_lock;
1444 	struct list_head              core_list;
1445 	struct list_head              client_data_list;
1446 
1447 	struct ib_cache               cache;
1448 	int                          *pkey_tbl_len;
1449 	int                          *gid_tbl_len;
1450 
1451 	int			      num_comp_vectors;
1452 
1453 	struct iw_cm_verbs	     *iwcm;
1454 
1455 	int		           (*get_protocol_stats)(struct ib_device *device,
1456 							 union rdma_protocol_stats *stats);
1457 	int		           (*query_device)(struct ib_device *device,
1458 						   struct ib_device_attr *device_attr);
1459 	int		           (*query_port)(struct ib_device *device,
1460 						 u8 port_num,
1461 						 struct ib_port_attr *port_attr);
1462 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1463 						     u8 port_num);
1464 	int		           (*query_gid)(struct ib_device *device,
1465 						u8 port_num, int index,
1466 						union ib_gid *gid);
1467 	int		           (*query_pkey)(struct ib_device *device,
1468 						 u8 port_num, u16 index, u16 *pkey);
1469 	int		           (*modify_device)(struct ib_device *device,
1470 						    int device_modify_mask,
1471 						    struct ib_device_modify *device_modify);
1472 	int		           (*modify_port)(struct ib_device *device,
1473 						  u8 port_num, int port_modify_mask,
1474 						  struct ib_port_modify *port_modify);
1475 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1476 						     struct ib_udata *udata);
1477 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1478 	int                        (*mmap)(struct ib_ucontext *context,
1479 					   struct vm_area_struct *vma);
1480 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1481 					       struct ib_ucontext *context,
1482 					       struct ib_udata *udata);
1483 	int                        (*dealloc_pd)(struct ib_pd *pd);
1484 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1485 						struct ib_ah_attr *ah_attr);
1486 	int                        (*modify_ah)(struct ib_ah *ah,
1487 						struct ib_ah_attr *ah_attr);
1488 	int                        (*query_ah)(struct ib_ah *ah,
1489 					       struct ib_ah_attr *ah_attr);
1490 	int                        (*destroy_ah)(struct ib_ah *ah);
1491 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1492 						 struct ib_srq_init_attr *srq_init_attr,
1493 						 struct ib_udata *udata);
1494 	int                        (*modify_srq)(struct ib_srq *srq,
1495 						 struct ib_srq_attr *srq_attr,
1496 						 enum ib_srq_attr_mask srq_attr_mask,
1497 						 struct ib_udata *udata);
1498 	int                        (*query_srq)(struct ib_srq *srq,
1499 						struct ib_srq_attr *srq_attr);
1500 	int                        (*destroy_srq)(struct ib_srq *srq);
1501 	int                        (*post_srq_recv)(struct ib_srq *srq,
1502 						    struct ib_recv_wr *recv_wr,
1503 						    struct ib_recv_wr **bad_recv_wr);
1504 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1505 						struct ib_qp_init_attr *qp_init_attr,
1506 						struct ib_udata *udata);
1507 	int                        (*modify_qp)(struct ib_qp *qp,
1508 						struct ib_qp_attr *qp_attr,
1509 						int qp_attr_mask,
1510 						struct ib_udata *udata);
1511 	int                        (*query_qp)(struct ib_qp *qp,
1512 					       struct ib_qp_attr *qp_attr,
1513 					       int qp_attr_mask,
1514 					       struct ib_qp_init_attr *qp_init_attr);
1515 	int                        (*destroy_qp)(struct ib_qp *qp);
1516 	int                        (*post_send)(struct ib_qp *qp,
1517 						struct ib_send_wr *send_wr,
1518 						struct ib_send_wr **bad_send_wr);
1519 	int                        (*post_recv)(struct ib_qp *qp,
1520 						struct ib_recv_wr *recv_wr,
1521 						struct ib_recv_wr **bad_recv_wr);
1522 	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1523 						int comp_vector,
1524 						struct ib_ucontext *context,
1525 						struct ib_udata *udata);
1526 	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1527 						u16 cq_period);
1528 	int                        (*destroy_cq)(struct ib_cq *cq);
1529 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1530 						struct ib_udata *udata);
1531 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1532 					      struct ib_wc *wc);
1533 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1534 	int                        (*req_notify_cq)(struct ib_cq *cq,
1535 						    enum ib_cq_notify_flags flags);
1536 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1537 						      int wc_cnt);
1538 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1539 						 int mr_access_flags);
1540 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1541 						  struct ib_phys_buf *phys_buf_array,
1542 						  int num_phys_buf,
1543 						  int mr_access_flags,
1544 						  u64 *iova_start);
1545 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1546 						  u64 start, u64 length,
1547 						  u64 virt_addr,
1548 						  int mr_access_flags,
1549 						  struct ib_udata *udata);
1550 	int                        (*query_mr)(struct ib_mr *mr,
1551 					       struct ib_mr_attr *mr_attr);
1552 	int                        (*dereg_mr)(struct ib_mr *mr);
1553 	int                        (*destroy_mr)(struct ib_mr *mr);
1554 	struct ib_mr *		   (*create_mr)(struct ib_pd *pd,
1555 						struct ib_mr_init_attr *mr_init_attr);
1556 	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1557 					       int max_page_list_len);
1558 	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1559 								   int page_list_len);
1560 	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1561 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1562 						    int mr_rereg_mask,
1563 						    struct ib_pd *pd,
1564 						    struct ib_phys_buf *phys_buf_array,
1565 						    int num_phys_buf,
1566 						    int mr_access_flags,
1567 						    u64 *iova_start);
1568 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
1569 					       enum ib_mw_type type);
1570 	int                        (*bind_mw)(struct ib_qp *qp,
1571 					      struct ib_mw *mw,
1572 					      struct ib_mw_bind *mw_bind);
1573 	int                        (*dealloc_mw)(struct ib_mw *mw);
1574 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1575 						int mr_access_flags,
1576 						struct ib_fmr_attr *fmr_attr);
1577 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1578 						   u64 *page_list, int list_len,
1579 						   u64 iova);
1580 	int		           (*unmap_fmr)(struct list_head *fmr_list);
1581 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1582 	int                        (*attach_mcast)(struct ib_qp *qp,
1583 						   union ib_gid *gid,
1584 						   u16 lid);
1585 	int                        (*detach_mcast)(struct ib_qp *qp,
1586 						   union ib_gid *gid,
1587 						   u16 lid);
1588 	int                        (*process_mad)(struct ib_device *device,
1589 						  int process_mad_flags,
1590 						  u8 port_num,
1591 						  struct ib_wc *in_wc,
1592 						  struct ib_grh *in_grh,
1593 						  struct ib_mad *in_mad,
1594 						  struct ib_mad *out_mad);
1595 	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
1596 						 struct ib_ucontext *ucontext,
1597 						 struct ib_udata *udata);
1598 	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1599 	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
1600 						  struct ib_flow_attr
1601 						  *flow_attr,
1602 						  int domain);
1603 	int			   (*destroy_flow)(struct ib_flow *flow_id);
1604 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1605 						      struct ib_mr_status *mr_status);
1606 
1607 	struct ib_dma_mapping_ops   *dma_ops;
1608 
1609 	struct module               *owner;
1610 	struct device                dev;
1611 	struct kobject               *ports_parent;
1612 	struct list_head             port_list;
1613 
1614 	enum {
1615 		IB_DEV_UNINITIALIZED,
1616 		IB_DEV_REGISTERED,
1617 		IB_DEV_UNREGISTERED
1618 	}                            reg_state;
1619 
1620 	int			     uverbs_abi_ver;
1621 	u64			     uverbs_cmd_mask;
1622 	u64			     uverbs_ex_cmd_mask;
1623 
1624 	char			     node_desc[64];
1625 	__be64			     node_guid;
1626 	u32			     local_dma_lkey;
1627 	u8                           node_type;
1628 	u8                           phys_port_cnt;
1629 };
1630 
1631 struct ib_client {
1632 	char  *name;
1633 	void (*add)   (struct ib_device *);
1634 	void (*remove)(struct ib_device *);
1635 
1636 	struct list_head list;
1637 };
1638 
1639 struct ib_device *ib_alloc_device(size_t size);
1640 void ib_dealloc_device(struct ib_device *device);
1641 
1642 int ib_register_device(struct ib_device *device,
1643 		       int (*port_callback)(struct ib_device *,
1644 					    u8, struct kobject *));
1645 void ib_unregister_device(struct ib_device *device);
1646 
1647 int ib_register_client   (struct ib_client *client);
1648 void ib_unregister_client(struct ib_client *client);
1649 
1650 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1651 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1652 			 void *data);
1653 
1654 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1655 {
1656 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1657 }
1658 
1659 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1660 {
1661 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1662 }
1663 
1664 /**
1665  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1666  * contains all required attributes and no attributes not allowed for
1667  * the given QP state transition.
1668  * @cur_state: Current QP state
1669  * @next_state: Next QP state
1670  * @type: QP type
1671  * @mask: Mask of supplied QP attributes
1672  * @ll : link layer of port
1673  *
1674  * This function is a helper function that a low-level driver's
1675  * modify_qp method can use to validate the consumer's input.  It
1676  * checks that cur_state and next_state are valid QP states, that a
1677  * transition from cur_state to next_state is allowed by the IB spec,
1678  * and that the attribute mask supplied is allowed for the transition.
1679  */
1680 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1681 		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
1682 		       enum rdma_link_layer ll);
1683 
1684 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1685 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1686 void ib_dispatch_event(struct ib_event *event);
1687 
1688 int ib_query_device(struct ib_device *device,
1689 		    struct ib_device_attr *device_attr);
1690 
1691 int ib_query_port(struct ib_device *device,
1692 		  u8 port_num, struct ib_port_attr *port_attr);
1693 
1694 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1695 					       u8 port_num);
1696 
1697 int ib_query_gid(struct ib_device *device,
1698 		 u8 port_num, int index, union ib_gid *gid);
1699 
1700 int ib_query_pkey(struct ib_device *device,
1701 		  u8 port_num, u16 index, u16 *pkey);
1702 
1703 int ib_modify_device(struct ib_device *device,
1704 		     int device_modify_mask,
1705 		     struct ib_device_modify *device_modify);
1706 
1707 int ib_modify_port(struct ib_device *device,
1708 		   u8 port_num, int port_modify_mask,
1709 		   struct ib_port_modify *port_modify);
1710 
1711 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1712 		u8 *port_num, u16 *index);
1713 
1714 int ib_find_pkey(struct ib_device *device,
1715 		 u8 port_num, u16 pkey, u16 *index);
1716 
1717 /**
1718  * ib_alloc_pd - Allocates an unused protection domain.
1719  * @device: The device on which to allocate the protection domain.
1720  *
1721  * A protection domain object provides an association between QPs, shared
1722  * receive queues, address handles, memory regions, and memory windows.
1723  */
1724 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1725 
1726 /**
1727  * ib_dealloc_pd - Deallocates a protection domain.
1728  * @pd: The protection domain to deallocate.
1729  */
1730 int ib_dealloc_pd(struct ib_pd *pd);
1731 
1732 /**
1733  * ib_create_ah - Creates an address handle for the given address vector.
1734  * @pd: The protection domain associated with the address handle.
1735  * @ah_attr: The attributes of the address vector.
1736  *
1737  * The address handle is used to reference a local or global destination
1738  * in all UD QP post sends.
1739  */
1740 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1741 
1742 /**
1743  * ib_init_ah_from_wc - Initializes address handle attributes from a
1744  *   work completion.
1745  * @device: Device on which the received message arrived.
1746  * @port_num: Port on which the received message arrived.
1747  * @wc: Work completion associated with the received message.
1748  * @grh: References the received global route header.  This parameter is
1749  *   ignored unless the work completion indicates that the GRH is valid.
1750  * @ah_attr: Returned attributes that can be used when creating an address
1751  *   handle for replying to the message.
1752  */
1753 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1754 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1755 
1756 /**
1757  * ib_create_ah_from_wc - Creates an address handle associated with the
1758  *   sender of the specified work completion.
1759  * @pd: The protection domain associated with the address handle.
1760  * @wc: Work completion information associated with a received message.
1761  * @grh: References the received global route header.  This parameter is
1762  *   ignored unless the work completion indicates that the GRH is valid.
1763  * @port_num: The outbound port number to associate with the address.
1764  *
1765  * The address handle is used to reference a local or global destination
1766  * in all UD QP post sends.
1767  */
1768 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1769 				   struct ib_grh *grh, u8 port_num);
1770 
1771 /**
1772  * ib_modify_ah - Modifies the address vector associated with an address
1773  *   handle.
1774  * @ah: The address handle to modify.
1775  * @ah_attr: The new address vector attributes to associate with the
1776  *   address handle.
1777  */
1778 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1779 
1780 /**
1781  * ib_query_ah - Queries the address vector associated with an address
1782  *   handle.
1783  * @ah: The address handle to query.
1784  * @ah_attr: The address vector attributes associated with the address
1785  *   handle.
1786  */
1787 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1788 
1789 /**
1790  * ib_destroy_ah - Destroys an address handle.
1791  * @ah: The address handle to destroy.
1792  */
1793 int ib_destroy_ah(struct ib_ah *ah);
1794 
1795 /**
1796  * ib_create_srq - Creates a SRQ associated with the specified protection
1797  *   domain.
1798  * @pd: The protection domain associated with the SRQ.
1799  * @srq_init_attr: A list of initial attributes required to create the
1800  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1801  *   the actual capabilities of the created SRQ.
1802  *
1803  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1804  * requested size of the SRQ, and set to the actual values allocated
1805  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1806  * will always be at least as large as the requested values.
1807  */
1808 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1809 			     struct ib_srq_init_attr *srq_init_attr);
1810 
1811 /**
1812  * ib_modify_srq - Modifies the attributes for the specified SRQ.
1813  * @srq: The SRQ to modify.
1814  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1815  *   the current values of selected SRQ attributes are returned.
1816  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1817  *   are being modified.
1818  *
1819  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1820  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1821  * the number of receives queued drops below the limit.
1822  */
1823 int ib_modify_srq(struct ib_srq *srq,
1824 		  struct ib_srq_attr *srq_attr,
1825 		  enum ib_srq_attr_mask srq_attr_mask);
1826 
1827 /**
1828  * ib_query_srq - Returns the attribute list and current values for the
1829  *   specified SRQ.
1830  * @srq: The SRQ to query.
1831  * @srq_attr: The attributes of the specified SRQ.
1832  */
1833 int ib_query_srq(struct ib_srq *srq,
1834 		 struct ib_srq_attr *srq_attr);
1835 
1836 /**
1837  * ib_destroy_srq - Destroys the specified SRQ.
1838  * @srq: The SRQ to destroy.
1839  */
1840 int ib_destroy_srq(struct ib_srq *srq);
1841 
1842 /**
1843  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1844  * @srq: The SRQ to post the work request on.
1845  * @recv_wr: A list of work requests to post on the receive queue.
1846  * @bad_recv_wr: On an immediate failure, this parameter will reference
1847  *   the work request that failed to be posted on the QP.
1848  */
1849 static inline int ib_post_srq_recv(struct ib_srq *srq,
1850 				   struct ib_recv_wr *recv_wr,
1851 				   struct ib_recv_wr **bad_recv_wr)
1852 {
1853 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1854 }
1855 
1856 /**
1857  * ib_create_qp - Creates a QP associated with the specified protection
1858  *   domain.
1859  * @pd: The protection domain associated with the QP.
1860  * @qp_init_attr: A list of initial attributes required to create the
1861  *   QP.  If QP creation succeeds, then the attributes are updated to
1862  *   the actual capabilities of the created QP.
1863  */
1864 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1865 			   struct ib_qp_init_attr *qp_init_attr);
1866 
1867 /**
1868  * ib_modify_qp - Modifies the attributes for the specified QP and then
1869  *   transitions the QP to the given state.
1870  * @qp: The QP to modify.
1871  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1872  *   the current values of selected QP attributes are returned.
1873  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1874  *   are being modified.
1875  */
1876 int ib_modify_qp(struct ib_qp *qp,
1877 		 struct ib_qp_attr *qp_attr,
1878 		 int qp_attr_mask);
1879 
1880 /**
1881  * ib_query_qp - Returns the attribute list and current values for the
1882  *   specified QP.
1883  * @qp: The QP to query.
1884  * @qp_attr: The attributes of the specified QP.
1885  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1886  * @qp_init_attr: Additional attributes of the selected QP.
1887  *
1888  * The qp_attr_mask may be used to limit the query to gathering only the
1889  * selected attributes.
1890  */
1891 int ib_query_qp(struct ib_qp *qp,
1892 		struct ib_qp_attr *qp_attr,
1893 		int qp_attr_mask,
1894 		struct ib_qp_init_attr *qp_init_attr);
1895 
1896 /**
1897  * ib_destroy_qp - Destroys the specified QP.
1898  * @qp: The QP to destroy.
1899  */
1900 int ib_destroy_qp(struct ib_qp *qp);
1901 
1902 /**
1903  * ib_open_qp - Obtain a reference to an existing sharable QP.
1904  * @xrcd - XRC domain
1905  * @qp_open_attr: Attributes identifying the QP to open.
1906  *
1907  * Returns a reference to a sharable QP.
1908  */
1909 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1910 			 struct ib_qp_open_attr *qp_open_attr);
1911 
1912 /**
1913  * ib_close_qp - Release an external reference to a QP.
1914  * @qp: The QP handle to release
1915  *
1916  * The opened QP handle is released by the caller.  The underlying
1917  * shared QP is not destroyed until all internal references are released.
1918  */
1919 int ib_close_qp(struct ib_qp *qp);
1920 
1921 /**
1922  * ib_post_send - Posts a list of work requests to the send queue of
1923  *   the specified QP.
1924  * @qp: The QP to post the work request on.
1925  * @send_wr: A list of work requests to post on the send queue.
1926  * @bad_send_wr: On an immediate failure, this parameter will reference
1927  *   the work request that failed to be posted on the QP.
1928  *
1929  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1930  * error is returned, the QP state shall not be affected,
1931  * ib_post_send() will return an immediate error after queueing any
1932  * earlier work requests in the list.
1933  */
1934 static inline int ib_post_send(struct ib_qp *qp,
1935 			       struct ib_send_wr *send_wr,
1936 			       struct ib_send_wr **bad_send_wr)
1937 {
1938 	return qp->device->post_send(qp, send_wr, bad_send_wr);
1939 }
1940 
1941 /**
1942  * ib_post_recv - Posts a list of work requests to the receive queue of
1943  *   the specified QP.
1944  * @qp: The QP to post the work request on.
1945  * @recv_wr: A list of work requests to post on the receive queue.
1946  * @bad_recv_wr: On an immediate failure, this parameter will reference
1947  *   the work request that failed to be posted on the QP.
1948  */
1949 static inline int ib_post_recv(struct ib_qp *qp,
1950 			       struct ib_recv_wr *recv_wr,
1951 			       struct ib_recv_wr **bad_recv_wr)
1952 {
1953 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1954 }
1955 
1956 /**
1957  * ib_create_cq - Creates a CQ on the specified device.
1958  * @device: The device on which to create the CQ.
1959  * @comp_handler: A user-specified callback that is invoked when a
1960  *   completion event occurs on the CQ.
1961  * @event_handler: A user-specified callback that is invoked when an
1962  *   asynchronous event not associated with a completion occurs on the CQ.
1963  * @cq_context: Context associated with the CQ returned to the user via
1964  *   the associated completion and event handlers.
1965  * @cqe: The minimum size of the CQ.
1966  * @comp_vector - Completion vector used to signal completion events.
1967  *     Must be >= 0 and < context->num_comp_vectors.
1968  *
1969  * Users can examine the cq structure to determine the actual CQ size.
1970  */
1971 struct ib_cq *ib_create_cq(struct ib_device *device,
1972 			   ib_comp_handler comp_handler,
1973 			   void (*event_handler)(struct ib_event *, void *),
1974 			   void *cq_context, int cqe, int comp_vector);
1975 
1976 /**
1977  * ib_resize_cq - Modifies the capacity of the CQ.
1978  * @cq: The CQ to resize.
1979  * @cqe: The minimum size of the CQ.
1980  *
1981  * Users can examine the cq structure to determine the actual CQ size.
1982  */
1983 int ib_resize_cq(struct ib_cq *cq, int cqe);
1984 
1985 /**
1986  * ib_modify_cq - Modifies moderation params of the CQ
1987  * @cq: The CQ to modify.
1988  * @cq_count: number of CQEs that will trigger an event
1989  * @cq_period: max period of time in usec before triggering an event
1990  *
1991  */
1992 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1993 
1994 /**
1995  * ib_destroy_cq - Destroys the specified CQ.
1996  * @cq: The CQ to destroy.
1997  */
1998 int ib_destroy_cq(struct ib_cq *cq);
1999 
2000 /**
2001  * ib_poll_cq - poll a CQ for completion(s)
2002  * @cq:the CQ being polled
2003  * @num_entries:maximum number of completions to return
2004  * @wc:array of at least @num_entries &struct ib_wc where completions
2005  *   will be returned
2006  *
2007  * Poll a CQ for (possibly multiple) completions.  If the return value
2008  * is < 0, an error occurred.  If the return value is >= 0, it is the
2009  * number of completions returned.  If the return value is
2010  * non-negative and < num_entries, then the CQ was emptied.
2011  */
2012 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2013 			     struct ib_wc *wc)
2014 {
2015 	return cq->device->poll_cq(cq, num_entries, wc);
2016 }
2017 
2018 /**
2019  * ib_peek_cq - Returns the number of unreaped completions currently
2020  *   on the specified CQ.
2021  * @cq: The CQ to peek.
2022  * @wc_cnt: A minimum number of unreaped completions to check for.
2023  *
2024  * If the number of unreaped completions is greater than or equal to wc_cnt,
2025  * this function returns wc_cnt, otherwise, it returns the actual number of
2026  * unreaped completions.
2027  */
2028 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2029 
2030 /**
2031  * ib_req_notify_cq - Request completion notification on a CQ.
2032  * @cq: The CQ to generate an event for.
2033  * @flags:
2034  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2035  *   to request an event on the next solicited event or next work
2036  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2037  *   may also be |ed in to request a hint about missed events, as
2038  *   described below.
2039  *
2040  * Return Value:
2041  *    < 0 means an error occurred while requesting notification
2042  *   == 0 means notification was requested successfully, and if
2043  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2044  *        were missed and it is safe to wait for another event.  In
2045  *        this case is it guaranteed that any work completions added
2046  *        to the CQ since the last CQ poll will trigger a completion
2047  *        notification event.
2048  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2049  *        in.  It means that the consumer must poll the CQ again to
2050  *        make sure it is empty to avoid missing an event because of a
2051  *        race between requesting notification and an entry being
2052  *        added to the CQ.  This return value means it is possible
2053  *        (but not guaranteed) that a work completion has been added
2054  *        to the CQ since the last poll without triggering a
2055  *        completion notification event.
2056  */
2057 static inline int ib_req_notify_cq(struct ib_cq *cq,
2058 				   enum ib_cq_notify_flags flags)
2059 {
2060 	return cq->device->req_notify_cq(cq, flags);
2061 }
2062 
2063 /**
2064  * ib_req_ncomp_notif - Request completion notification when there are
2065  *   at least the specified number of unreaped completions on the CQ.
2066  * @cq: The CQ to generate an event for.
2067  * @wc_cnt: The number of unreaped completions that should be on the
2068  *   CQ before an event is generated.
2069  */
2070 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2071 {
2072 	return cq->device->req_ncomp_notif ?
2073 		cq->device->req_ncomp_notif(cq, wc_cnt) :
2074 		-ENOSYS;
2075 }
2076 
2077 /**
2078  * ib_get_dma_mr - Returns a memory region for system memory that is
2079  *   usable for DMA.
2080  * @pd: The protection domain associated with the memory region.
2081  * @mr_access_flags: Specifies the memory access rights.
2082  *
2083  * Note that the ib_dma_*() functions defined below must be used
2084  * to create/destroy addresses used with the Lkey or Rkey returned
2085  * by ib_get_dma_mr().
2086  */
2087 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2088 
2089 /**
2090  * ib_dma_mapping_error - check a DMA addr for error
2091  * @dev: The device for which the dma_addr was created
2092  * @dma_addr: The DMA address to check
2093  */
2094 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2095 {
2096 	if (dev->dma_ops)
2097 		return dev->dma_ops->mapping_error(dev, dma_addr);
2098 	return dma_mapping_error(dev->dma_device, dma_addr);
2099 }
2100 
2101 /**
2102  * ib_dma_map_single - Map a kernel virtual address to DMA address
2103  * @dev: The device for which the dma_addr is to be created
2104  * @cpu_addr: The kernel virtual address
2105  * @size: The size of the region in bytes
2106  * @direction: The direction of the DMA
2107  */
2108 static inline u64 ib_dma_map_single(struct ib_device *dev,
2109 				    void *cpu_addr, size_t size,
2110 				    enum dma_data_direction direction)
2111 {
2112 	if (dev->dma_ops)
2113 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2114 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2115 }
2116 
2117 /**
2118  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2119  * @dev: The device for which the DMA address was created
2120  * @addr: The DMA address
2121  * @size: The size of the region in bytes
2122  * @direction: The direction of the DMA
2123  */
2124 static inline void ib_dma_unmap_single(struct ib_device *dev,
2125 				       u64 addr, size_t size,
2126 				       enum dma_data_direction direction)
2127 {
2128 	if (dev->dma_ops)
2129 		dev->dma_ops->unmap_single(dev, addr, size, direction);
2130 	else
2131 		dma_unmap_single(dev->dma_device, addr, size, direction);
2132 }
2133 
2134 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2135 					  void *cpu_addr, size_t size,
2136 					  enum dma_data_direction direction,
2137 					  struct dma_attrs *attrs)
2138 {
2139 	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2140 				    direction, attrs);
2141 }
2142 
2143 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2144 					     u64 addr, size_t size,
2145 					     enum dma_data_direction direction,
2146 					     struct dma_attrs *attrs)
2147 {
2148 	return dma_unmap_single_attrs(dev->dma_device, addr, size,
2149 				      direction, attrs);
2150 }
2151 
2152 /**
2153  * ib_dma_map_page - Map a physical page to DMA address
2154  * @dev: The device for which the dma_addr is to be created
2155  * @page: The page to be mapped
2156  * @offset: The offset within the page
2157  * @size: The size of the region in bytes
2158  * @direction: The direction of the DMA
2159  */
2160 static inline u64 ib_dma_map_page(struct ib_device *dev,
2161 				  struct page *page,
2162 				  unsigned long offset,
2163 				  size_t size,
2164 					 enum dma_data_direction direction)
2165 {
2166 	if (dev->dma_ops)
2167 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
2168 	return dma_map_page(dev->dma_device, page, offset, size, direction);
2169 }
2170 
2171 /**
2172  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2173  * @dev: The device for which the DMA address was created
2174  * @addr: The DMA address
2175  * @size: The size of the region in bytes
2176  * @direction: The direction of the DMA
2177  */
2178 static inline void ib_dma_unmap_page(struct ib_device *dev,
2179 				     u64 addr, size_t size,
2180 				     enum dma_data_direction direction)
2181 {
2182 	if (dev->dma_ops)
2183 		dev->dma_ops->unmap_page(dev, addr, size, direction);
2184 	else
2185 		dma_unmap_page(dev->dma_device, addr, size, direction);
2186 }
2187 
2188 /**
2189  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2190  * @dev: The device for which the DMA addresses are to be created
2191  * @sg: The array of scatter/gather entries
2192  * @nents: The number of scatter/gather entries
2193  * @direction: The direction of the DMA
2194  */
2195 static inline int ib_dma_map_sg(struct ib_device *dev,
2196 				struct scatterlist *sg, int nents,
2197 				enum dma_data_direction direction)
2198 {
2199 	if (dev->dma_ops)
2200 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
2201 	return dma_map_sg(dev->dma_device, sg, nents, direction);
2202 }
2203 
2204 /**
2205  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2206  * @dev: The device for which the DMA addresses were created
2207  * @sg: The array of scatter/gather entries
2208  * @nents: The number of scatter/gather entries
2209  * @direction: The direction of the DMA
2210  */
2211 static inline void ib_dma_unmap_sg(struct ib_device *dev,
2212 				   struct scatterlist *sg, int nents,
2213 				   enum dma_data_direction direction)
2214 {
2215 	if (dev->dma_ops)
2216 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2217 	else
2218 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
2219 }
2220 
2221 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2222 				      struct scatterlist *sg, int nents,
2223 				      enum dma_data_direction direction,
2224 				      struct dma_attrs *attrs)
2225 {
2226 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2227 }
2228 
2229 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2230 					 struct scatterlist *sg, int nents,
2231 					 enum dma_data_direction direction,
2232 					 struct dma_attrs *attrs)
2233 {
2234 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2235 }
2236 /**
2237  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2238  * @dev: The device for which the DMA addresses were created
2239  * @sg: The scatter/gather entry
2240  *
2241  * Note: this function is obsolete. To do: change all occurrences of
2242  * ib_sg_dma_address() into sg_dma_address().
2243  */
2244 static inline u64 ib_sg_dma_address(struct ib_device *dev,
2245 				    struct scatterlist *sg)
2246 {
2247 	return sg_dma_address(sg);
2248 }
2249 
2250 /**
2251  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2252  * @dev: The device for which the DMA addresses were created
2253  * @sg: The scatter/gather entry
2254  *
2255  * Note: this function is obsolete. To do: change all occurrences of
2256  * ib_sg_dma_len() into sg_dma_len().
2257  */
2258 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2259 					 struct scatterlist *sg)
2260 {
2261 	return sg_dma_len(sg);
2262 }
2263 
2264 /**
2265  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2266  * @dev: The device for which the DMA address was created
2267  * @addr: The DMA address
2268  * @size: The size of the region in bytes
2269  * @dir: The direction of the DMA
2270  */
2271 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2272 					      u64 addr,
2273 					      size_t size,
2274 					      enum dma_data_direction dir)
2275 {
2276 	if (dev->dma_ops)
2277 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2278 	else
2279 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2280 }
2281 
2282 /**
2283  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2284  * @dev: The device for which the DMA address was created
2285  * @addr: The DMA address
2286  * @size: The size of the region in bytes
2287  * @dir: The direction of the DMA
2288  */
2289 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2290 						 u64 addr,
2291 						 size_t size,
2292 						 enum dma_data_direction dir)
2293 {
2294 	if (dev->dma_ops)
2295 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2296 	else
2297 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2298 }
2299 
2300 /**
2301  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2302  * @dev: The device for which the DMA address is requested
2303  * @size: The size of the region to allocate in bytes
2304  * @dma_handle: A pointer for returning the DMA address of the region
2305  * @flag: memory allocator flags
2306  */
2307 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2308 					   size_t size,
2309 					   u64 *dma_handle,
2310 					   gfp_t flag)
2311 {
2312 	if (dev->dma_ops)
2313 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2314 	else {
2315 		dma_addr_t handle;
2316 		void *ret;
2317 
2318 		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2319 		*dma_handle = handle;
2320 		return ret;
2321 	}
2322 }
2323 
2324 /**
2325  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2326  * @dev: The device for which the DMA addresses were allocated
2327  * @size: The size of the region
2328  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2329  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2330  */
2331 static inline void ib_dma_free_coherent(struct ib_device *dev,
2332 					size_t size, void *cpu_addr,
2333 					u64 dma_handle)
2334 {
2335 	if (dev->dma_ops)
2336 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2337 	else
2338 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2339 }
2340 
2341 /**
2342  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2343  *   by an HCA.
2344  * @pd: The protection domain associated assigned to the registered region.
2345  * @phys_buf_array: Specifies a list of physical buffers to use in the
2346  *   memory region.
2347  * @num_phys_buf: Specifies the size of the phys_buf_array.
2348  * @mr_access_flags: Specifies the memory access rights.
2349  * @iova_start: The offset of the region's starting I/O virtual address.
2350  */
2351 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2352 			     struct ib_phys_buf *phys_buf_array,
2353 			     int num_phys_buf,
2354 			     int mr_access_flags,
2355 			     u64 *iova_start);
2356 
2357 /**
2358  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2359  *   Conceptually, this call performs the functions deregister memory region
2360  *   followed by register physical memory region.  Where possible,
2361  *   resources are reused instead of deallocated and reallocated.
2362  * @mr: The memory region to modify.
2363  * @mr_rereg_mask: A bit-mask used to indicate which of the following
2364  *   properties of the memory region are being modified.
2365  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2366  *   the new protection domain to associated with the memory region,
2367  *   otherwise, this parameter is ignored.
2368  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2369  *   field specifies a list of physical buffers to use in the new
2370  *   translation, otherwise, this parameter is ignored.
2371  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2372  *   field specifies the size of the phys_buf_array, otherwise, this
2373  *   parameter is ignored.
2374  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2375  *   field specifies the new memory access rights, otherwise, this
2376  *   parameter is ignored.
2377  * @iova_start: The offset of the region's starting I/O virtual address.
2378  */
2379 int ib_rereg_phys_mr(struct ib_mr *mr,
2380 		     int mr_rereg_mask,
2381 		     struct ib_pd *pd,
2382 		     struct ib_phys_buf *phys_buf_array,
2383 		     int num_phys_buf,
2384 		     int mr_access_flags,
2385 		     u64 *iova_start);
2386 
2387 /**
2388  * ib_query_mr - Retrieves information about a specific memory region.
2389  * @mr: The memory region to retrieve information about.
2390  * @mr_attr: The attributes of the specified memory region.
2391  */
2392 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2393 
2394 /**
2395  * ib_dereg_mr - Deregisters a memory region and removes it from the
2396  *   HCA translation table.
2397  * @mr: The memory region to deregister.
2398  *
2399  * This function can fail, if the memory region has memory windows bound to it.
2400  */
2401 int ib_dereg_mr(struct ib_mr *mr);
2402 
2403 
2404 /**
2405  * ib_create_mr - Allocates a memory region that may be used for
2406  *     signature handover operations.
2407  * @pd: The protection domain associated with the region.
2408  * @mr_init_attr: memory region init attributes.
2409  */
2410 struct ib_mr *ib_create_mr(struct ib_pd *pd,
2411 			   struct ib_mr_init_attr *mr_init_attr);
2412 
2413 /**
2414  * ib_destroy_mr - Destroys a memory region that was created using
2415  *     ib_create_mr and removes it from HW translation tables.
2416  * @mr: The memory region to destroy.
2417  *
2418  * This function can fail, if the memory region has memory windows bound to it.
2419  */
2420 int ib_destroy_mr(struct ib_mr *mr);
2421 
2422 /**
2423  * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2424  *   IB_WR_FAST_REG_MR send work request.
2425  * @pd: The protection domain associated with the region.
2426  * @max_page_list_len: requested max physical buffer list length to be
2427  *   used with fast register work requests for this MR.
2428  */
2429 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2430 
2431 /**
2432  * ib_alloc_fast_reg_page_list - Allocates a page list array
2433  * @device - ib device pointer.
2434  * @page_list_len - size of the page list array to be allocated.
2435  *
2436  * This allocates and returns a struct ib_fast_reg_page_list * and a
2437  * page_list array that is at least page_list_len in size.  The actual
2438  * size is returned in max_page_list_len.  The caller is responsible
2439  * for initializing the contents of the page_list array before posting
2440  * a send work request with the IB_WC_FAST_REG_MR opcode.
2441  *
2442  * The page_list array entries must be translated using one of the
2443  * ib_dma_*() functions just like the addresses passed to
2444  * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2445  * ib_fast_reg_page_list must not be modified by the caller until the
2446  * IB_WC_FAST_REG_MR work request completes.
2447  */
2448 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2449 				struct ib_device *device, int page_list_len);
2450 
2451 /**
2452  * ib_free_fast_reg_page_list - Deallocates a previously allocated
2453  *   page list array.
2454  * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2455  */
2456 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2457 
2458 /**
2459  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2460  *   R_Key and L_Key.
2461  * @mr - struct ib_mr pointer to be updated.
2462  * @newkey - new key to be used.
2463  */
2464 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2465 {
2466 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2467 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2468 }
2469 
2470 /**
2471  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2472  * for calculating a new rkey for type 2 memory windows.
2473  * @rkey - the rkey to increment.
2474  */
2475 static inline u32 ib_inc_rkey(u32 rkey)
2476 {
2477 	const u32 mask = 0x000000ff;
2478 	return ((rkey + 1) & mask) | (rkey & ~mask);
2479 }
2480 
2481 /**
2482  * ib_alloc_mw - Allocates a memory window.
2483  * @pd: The protection domain associated with the memory window.
2484  * @type: The type of the memory window (1 or 2).
2485  */
2486 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2487 
2488 /**
2489  * ib_bind_mw - Posts a work request to the send queue of the specified
2490  *   QP, which binds the memory window to the given address range and
2491  *   remote access attributes.
2492  * @qp: QP to post the bind work request on.
2493  * @mw: The memory window to bind.
2494  * @mw_bind: Specifies information about the memory window, including
2495  *   its address range, remote access rights, and associated memory region.
2496  *
2497  * If there is no immediate error, the function will update the rkey member
2498  * of the mw parameter to its new value. The bind operation can still fail
2499  * asynchronously.
2500  */
2501 static inline int ib_bind_mw(struct ib_qp *qp,
2502 			     struct ib_mw *mw,
2503 			     struct ib_mw_bind *mw_bind)
2504 {
2505 	/* XXX reference counting in corresponding MR? */
2506 	return mw->device->bind_mw ?
2507 		mw->device->bind_mw(qp, mw, mw_bind) :
2508 		-ENOSYS;
2509 }
2510 
2511 /**
2512  * ib_dealloc_mw - Deallocates a memory window.
2513  * @mw: The memory window to deallocate.
2514  */
2515 int ib_dealloc_mw(struct ib_mw *mw);
2516 
2517 /**
2518  * ib_alloc_fmr - Allocates a unmapped fast memory region.
2519  * @pd: The protection domain associated with the unmapped region.
2520  * @mr_access_flags: Specifies the memory access rights.
2521  * @fmr_attr: Attributes of the unmapped region.
2522  *
2523  * A fast memory region must be mapped before it can be used as part of
2524  * a work request.
2525  */
2526 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2527 			    int mr_access_flags,
2528 			    struct ib_fmr_attr *fmr_attr);
2529 
2530 /**
2531  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2532  * @fmr: The fast memory region to associate with the pages.
2533  * @page_list: An array of physical pages to map to the fast memory region.
2534  * @list_len: The number of pages in page_list.
2535  * @iova: The I/O virtual address to use with the mapped region.
2536  */
2537 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2538 				  u64 *page_list, int list_len,
2539 				  u64 iova)
2540 {
2541 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2542 }
2543 
2544 /**
2545  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2546  * @fmr_list: A linked list of fast memory regions to unmap.
2547  */
2548 int ib_unmap_fmr(struct list_head *fmr_list);
2549 
2550 /**
2551  * ib_dealloc_fmr - Deallocates a fast memory region.
2552  * @fmr: The fast memory region to deallocate.
2553  */
2554 int ib_dealloc_fmr(struct ib_fmr *fmr);
2555 
2556 /**
2557  * ib_attach_mcast - Attaches the specified QP to a multicast group.
2558  * @qp: QP to attach to the multicast group.  The QP must be type
2559  *   IB_QPT_UD.
2560  * @gid: Multicast group GID.
2561  * @lid: Multicast group LID in host byte order.
2562  *
2563  * In order to send and receive multicast packets, subnet
2564  * administration must have created the multicast group and configured
2565  * the fabric appropriately.  The port associated with the specified
2566  * QP must also be a member of the multicast group.
2567  */
2568 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2569 
2570 /**
2571  * ib_detach_mcast - Detaches the specified QP from a multicast group.
2572  * @qp: QP to detach from the multicast group.
2573  * @gid: Multicast group GID.
2574  * @lid: Multicast group LID in host byte order.
2575  */
2576 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2577 
2578 /**
2579  * ib_alloc_xrcd - Allocates an XRC domain.
2580  * @device: The device on which to allocate the XRC domain.
2581  */
2582 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2583 
2584 /**
2585  * ib_dealloc_xrcd - Deallocates an XRC domain.
2586  * @xrcd: The XRC domain to deallocate.
2587  */
2588 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2589 
2590 struct ib_flow *ib_create_flow(struct ib_qp *qp,
2591 			       struct ib_flow_attr *flow_attr, int domain);
2592 int ib_destroy_flow(struct ib_flow *flow_id);
2593 
2594 static inline int ib_check_mr_access(int flags)
2595 {
2596 	/*
2597 	 * Local write permission is required if remote write or
2598 	 * remote atomic permission is also requested.
2599 	 */
2600 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2601 	    !(flags & IB_ACCESS_LOCAL_WRITE))
2602 		return -EINVAL;
2603 
2604 	return 0;
2605 }
2606 
2607 /**
2608  * ib_check_mr_status: lightweight check of MR status.
2609  *     This routine may provide status checks on a selected
2610  *     ib_mr. first use is for signature status check.
2611  *
2612  * @mr: A memory region.
2613  * @check_mask: Bitmask of which checks to perform from
2614  *     ib_mr_status_check enumeration.
2615  * @mr_status: The container of relevant status checks.
2616  *     failed checks will be indicated in the status bitmask
2617  *     and the relevant info shall be in the error item.
2618  */
2619 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2620 		       struct ib_mr_status *mr_status);
2621 
2622 #endif /* IB_VERBS_H */
2623