1 /*
2  * iSER transport for the Open iSCSI Initiator & iSER transport internals
3  *
4  * Copyright (C) 2004 Dmitry Yusupov
5  * Copyright (C) 2004 Alex Aizman
6  * Copyright (C) 2005 Mike Christie
7  * based on code maintained by open-iscsi@googlegroups.com
8  *
9  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
11  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
12  *
13  * This software is available to you under a choice of one of two
14  * licenses.  You may choose to be licensed under the terms of the GNU
15  * General Public License (GPL) Version 2, available from the file
16  * COPYING in the main directory of this source tree, or the
17  * OpenIB.org BSD license below:
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *	- Redistributions of source code must retain the above
24  *	  copyright notice, this list of conditions and the following
25  *	  disclaimer.
26  *
27  *	- Redistributions in binary form must reproduce the above
28  *	  copyright notice, this list of conditions and the following
29  *	  disclaimer in the documentation and/or other materials
30  *	  provided with the distribution.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
35  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
36  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
37  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
38  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39  * SOFTWARE.
40  */
41 #ifndef __ISCSI_ISER_H__
42 #define __ISCSI_ISER_H__
43 
44 #include <linux/types.h>
45 #include <linux/net.h>
46 #include <linux/printk.h>
47 #include <scsi/libiscsi.h>
48 #include <scsi/scsi_transport_iscsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/iser.h>
52 
53 #include <linux/interrupt.h>
54 #include <linux/wait.h>
55 #include <linux/sched.h>
56 #include <linux/list.h>
57 #include <linux/slab.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/mutex.h>
60 #include <linux/mempool.h>
61 #include <linux/uio.h>
62 
63 #include <linux/socket.h>
64 #include <linux/in.h>
65 #include <linux/in6.h>
66 
67 #include <rdma/ib_verbs.h>
68 #include <rdma/ib_fmr_pool.h>
69 #include <rdma/rdma_cm.h>
70 
71 #define DRV_NAME	"iser"
72 #define PFX		DRV_NAME ": "
73 #define DRV_VER		"1.6"
74 
75 #define iser_dbg(fmt, arg...)				 \
76 	do {						 \
77 		if (unlikely(iser_debug_level > 2))	 \
78 			printk(KERN_DEBUG PFX "%s: " fmt,\
79 				__func__ , ## arg);	 \
80 	} while (0)
81 
82 #define iser_warn(fmt, arg...)				\
83 	do {						\
84 		if (unlikely(iser_debug_level > 0))	\
85 			pr_warn(PFX "%s: " fmt,		\
86 				__func__ , ## arg);	\
87 	} while (0)
88 
89 #define iser_info(fmt, arg...)				\
90 	do {						\
91 		if (unlikely(iser_debug_level > 1))	\
92 			pr_info(PFX "%s: " fmt,		\
93 				__func__ , ## arg);	\
94 	} while (0)
95 
96 #define iser_err(fmt, arg...) \
97 	pr_err(PFX "%s: " fmt, __func__ , ## arg)
98 
99 #define SHIFT_4K	12
100 #define SIZE_4K	(1ULL << SHIFT_4K)
101 #define MASK_4K	(~(SIZE_4K-1))
102 
103 /* Default support is 512KB I/O size */
104 #define ISER_DEF_MAX_SECTORS		1024
105 #define ISCSI_ISER_DEF_SG_TABLESIZE	((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
106 /* Maximum support is 8MB I/O size */
107 #define ISCSI_ISER_MAX_SG_TABLESIZE	((16384 * 512) >> SHIFT_4K)
108 
109 #define ISER_DEF_XMIT_CMDS_DEFAULT		512
110 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
111 	#define ISER_DEF_XMIT_CMDS_MAX		ISCSI_DEF_XMIT_CMDS_MAX
112 #else
113 	#define ISER_DEF_XMIT_CMDS_MAX		ISER_DEF_XMIT_CMDS_DEFAULT
114 #endif
115 #define ISER_DEF_CMD_PER_LUN		ISER_DEF_XMIT_CMDS_MAX
116 
117 /* QP settings */
118 /* Maximal bounds on received asynchronous PDUs */
119 #define ISER_MAX_RX_MISC_PDUS		4 /* NOOP_IN(2) , ASYNC_EVENT(2)   */
120 
121 #define ISER_MAX_TX_MISC_PDUS		6 /* NOOP_OUT(2), TEXT(1),         *
122 					   * SCSI_TMFUNC(2), LOGOUT(1) */
123 
124 #define ISER_QP_MAX_RECV_DTOS		(ISER_DEF_XMIT_CMDS_MAX)
125 
126 #define ISER_MIN_POSTED_RX		(ISER_DEF_XMIT_CMDS_MAX >> 2)
127 
128 /* the max TX (send) WR supported by the iSER QP is defined by                 *
129  * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect   *
130  * to have at max for SCSI command. The tx posting & completion handling code  *
131  * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
132  * send WR. D=8 comes from 64K/8K                                              */
133 
134 #define ISER_INFLIGHT_DATAOUTS		8
135 
136 #define ISER_QP_MAX_REQ_DTOS		(ISER_DEF_XMIT_CMDS_MAX *    \
137 					(1 + ISER_INFLIGHT_DATAOUTS) + \
138 					ISER_MAX_TX_MISC_PDUS        + \
139 					ISER_MAX_RX_MISC_PDUS)
140 
141 /* Max registration work requests per command */
142 #define ISER_MAX_REG_WR_PER_CMD		5
143 
144 /* For Signature we don't support DATAOUTs so no need to make room for them */
145 #define ISER_QP_SIG_MAX_REQ_DTOS	(ISER_DEF_XMIT_CMDS_MAX	*       \
146 					(1 + ISER_MAX_REG_WR_PER_CMD) + \
147 					ISER_MAX_TX_MISC_PDUS         + \
148 					ISER_MAX_RX_MISC_PDUS)
149 
150 #define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr			\
151 					 - ISER_MAX_TX_MISC_PDUS	\
152 					 - ISER_MAX_RX_MISC_PDUS) /	\
153 					 (1 + ISER_INFLIGHT_DATAOUTS))
154 
155 #define ISER_SIGNAL_CMD_COUNT 32
156 
157 /* Constant PDU lengths calculations */
158 #define ISER_HEADERS_LEN	(sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
159 
160 #define ISER_RECV_DATA_SEG_LEN	128
161 #define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
162 #define ISER_RX_LOGIN_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
163 
164 /* Length of an object name string */
165 #define ISER_OBJECT_NAME_SIZE		    64
166 
167 enum iser_conn_state {
168 	ISER_CONN_INIT,		   /* descriptor allocd, no conn          */
169 	ISER_CONN_PENDING,	   /* in the process of being established */
170 	ISER_CONN_UP,		   /* up and running                      */
171 	ISER_CONN_TERMINATING,	   /* in the process of being terminated  */
172 	ISER_CONN_DOWN,		   /* shut down                           */
173 	ISER_CONN_STATES_NUM
174 };
175 
176 enum iser_task_status {
177 	ISER_TASK_STATUS_INIT = 0,
178 	ISER_TASK_STATUS_STARTED,
179 	ISER_TASK_STATUS_COMPLETED
180 };
181 
182 enum iser_data_dir {
183 	ISER_DIR_IN = 0,	   /* to initiator */
184 	ISER_DIR_OUT,		   /* from initiator */
185 	ISER_DIRS_NUM
186 };
187 
188 /**
189  * struct iser_data_buf - iSER data buffer
190  *
191  * @sg:           pointer to the sg list
192  * @size:         num entries of this sg
193  * @data_len:     total beffer byte len
194  * @dma_nents:    returned by dma_map_sg
195  */
196 struct iser_data_buf {
197 	struct scatterlist *sg;
198 	int                size;
199 	unsigned long      data_len;
200 	unsigned int       dma_nents;
201 };
202 
203 /* fwd declarations */
204 struct iser_device;
205 struct iscsi_iser_task;
206 struct iscsi_endpoint;
207 struct iser_reg_resources;
208 
209 /**
210  * struct iser_mem_reg - iSER memory registration info
211  *
212  * @sge:          memory region sg element
213  * @rkey:         memory region remote key
214  * @mem_h:        pointer to registration context (FMR/Fastreg)
215  */
216 struct iser_mem_reg {
217 	struct ib_sge	 sge;
218 	u32		 rkey;
219 	void		*mem_h;
220 };
221 
222 enum iser_desc_type {
223 	ISCSI_TX_CONTROL ,
224 	ISCSI_TX_SCSI_COMMAND,
225 	ISCSI_TX_DATAOUT
226 };
227 
228 /* Maximum number of work requests per task:
229  * Data memory region local invalidate + fast registration
230  * Protection memory region local invalidate + fast registration
231  * Signature memory region local invalidate + fast registration
232  * PDU send
233  */
234 #define ISER_MAX_WRS 7
235 
236 /**
237  * struct iser_tx_desc - iSER TX descriptor
238  *
239  * @iser_header:   iser header
240  * @iscsi_header:  iscsi header
241  * @type:          command/control/dataout
242  * @dam_addr:      header buffer dma_address
243  * @tx_sg:         sg[0] points to iser/iscsi headers
244  *                 sg[1] optionally points to either of immediate data
245  *                 unsolicited data-out or control
246  * @num_sge:       number sges used on this TX task
247  * @mapped:        Is the task header mapped
248  * @wr_idx:        Current WR index
249  * @wrs:           Array of WRs per task
250  * @data_reg:      Data buffer registration details
251  * @prot_reg:      Protection buffer registration details
252  * @sig_attrs:     Signature attributes
253  */
254 struct iser_tx_desc {
255 	struct iser_ctrl             iser_header;
256 	struct iscsi_hdr             iscsi_header;
257 	enum   iser_desc_type        type;
258 	u64		             dma_addr;
259 	struct ib_sge		     tx_sg[2];
260 	int                          num_sge;
261 	struct ib_cqe		     cqe;
262 	bool			     mapped;
263 	u8                           wr_idx;
264 	union iser_wr {
265 		struct ib_send_wr		send;
266 		struct ib_reg_wr		fast_reg;
267 		struct ib_sig_handover_wr	sig;
268 	} wrs[ISER_MAX_WRS];
269 	struct iser_mem_reg          data_reg;
270 	struct iser_mem_reg          prot_reg;
271 	struct ib_sig_attrs          sig_attrs;
272 };
273 
274 #define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
275 				 sizeof(u64) + sizeof(struct ib_sge) + \
276 				 sizeof(struct ib_cqe)))
277 /**
278  * struct iser_rx_desc - iSER RX descriptor
279  *
280  * @iser_header:   iser header
281  * @iscsi_header:  iscsi header
282  * @data:          received data segment
283  * @dma_addr:      receive buffer dma address
284  * @rx_sg:         ib_sge of receive buffer
285  * @pad:           for sense data TODO: Modify to maximum sense length supported
286  */
287 struct iser_rx_desc {
288 	struct iser_ctrl             iser_header;
289 	struct iscsi_hdr             iscsi_header;
290 	char		             data[ISER_RECV_DATA_SEG_LEN];
291 	u64		             dma_addr;
292 	struct ib_sge		     rx_sg;
293 	struct ib_cqe		     cqe;
294 	char		             pad[ISER_RX_PAD_SIZE];
295 } __packed;
296 
297 /**
298  * struct iser_login_desc - iSER login descriptor
299  *
300  * @req:           pointer to login request buffer
301  * @resp:          pointer to login response buffer
302  * @req_dma:       DMA address of login request buffer
303  * @rsp_dma:      DMA address of login response buffer
304  * @sge:           IB sge for login post recv
305  * @cqe:           completion handler
306  */
307 struct iser_login_desc {
308 	void                         *req;
309 	void                         *rsp;
310 	u64                          req_dma;
311 	u64                          rsp_dma;
312 	struct ib_sge                sge;
313 	struct ib_cqe		     cqe;
314 } __attribute__((packed));
315 
316 struct iser_conn;
317 struct ib_conn;
318 struct iscsi_iser_task;
319 
320 /**
321  * struct iser_comp - iSER completion context
322  *
323  * @cq:         completion queue
324  * @active_qps: Number of active QPs attached
325  *              to completion context
326  */
327 struct iser_comp {
328 	struct ib_cq		*cq;
329 	int                      active_qps;
330 };
331 
332 /**
333  * struct iser_device - Memory registration operations
334  *     per-device registration schemes
335  *
336  * @alloc_reg_res:     Allocate registration resources
337  * @free_reg_res:      Free registration resources
338  * @fast_reg_mem:      Register memory buffers
339  * @unreg_mem:         Un-register memory buffers
340  * @reg_desc_get:      Get a registration descriptor for pool
341  * @reg_desc_put:      Get a registration descriptor to pool
342  */
343 struct iser_reg_ops {
344 	int            (*alloc_reg_res)(struct ib_conn *ib_conn,
345 					unsigned cmds_max,
346 					unsigned int size);
347 	void           (*free_reg_res)(struct ib_conn *ib_conn);
348 	int            (*reg_mem)(struct iscsi_iser_task *iser_task,
349 				  struct iser_data_buf *mem,
350 				  struct iser_reg_resources *rsc,
351 				  struct iser_mem_reg *reg);
352 	void           (*unreg_mem)(struct iscsi_iser_task *iser_task,
353 				    enum iser_data_dir cmd_dir);
354 	struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
355 	void           (*reg_desc_put)(struct ib_conn *ib_conn,
356 				       struct iser_fr_desc *desc);
357 };
358 
359 /**
360  * struct iser_device - iSER device handle
361  *
362  * @ib_device:     RDMA device
363  * @pd:            Protection Domain for this device
364  * @mr:            Global DMA memory region
365  * @event_handler: IB events handle routine
366  * @ig_list:	   entry in devices list
367  * @refcount:      Reference counter, dominated by open iser connections
368  * @comps_used:    Number of completion contexts used, Min between online
369  *                 cpus and device max completion vectors
370  * @comps:         Dinamically allocated array of completion handlers
371  * @reg_ops:       Registration ops
372  * @remote_inv_sup: Remote invalidate is supported on this device
373  */
374 struct iser_device {
375 	struct ib_device             *ib_device;
376 	struct ib_pd	             *pd;
377 	struct ib_event_handler      event_handler;
378 	struct list_head             ig_list;
379 	int                          refcount;
380 	int			     comps_used;
381 	struct iser_comp	     *comps;
382 	const struct iser_reg_ops    *reg_ops;
383 	bool                         remote_inv_sup;
384 };
385 
386 #define ISER_CHECK_GUARD	0xc0
387 #define ISER_CHECK_REFTAG	0x0f
388 #define ISER_CHECK_APPTAG	0x30
389 
390 /**
391  * struct iser_reg_resources - Fast registration recources
392  *
393  * @mr:         memory region
394  * @fmr_pool:   pool of fmrs
395  * @page_vec:   fast reg page list used by fmr pool
396  * @mr_valid:   is mr valid indicator
397  */
398 struct iser_reg_resources {
399 	union {
400 		struct ib_mr             *mr;
401 		struct ib_fmr_pool       *fmr_pool;
402 	};
403 	struct iser_page_vec             *page_vec;
404 	u8				  mr_valid:1;
405 };
406 
407 /**
408  * struct iser_pi_context - Protection information context
409  *
410  * @rsc:             protection buffer registration resources
411  * @sig_mr:          signature enable memory region
412  * @sig_mr_valid:    is sig_mr valid indicator
413  * @sig_protected:   is region protected indicator
414  */
415 struct iser_pi_context {
416 	struct iser_reg_resources	rsc;
417 	struct ib_mr                   *sig_mr;
418 	u8                              sig_mr_valid:1;
419 	u8                              sig_protected:1;
420 };
421 
422 /**
423  * struct iser_fr_desc - Fast registration descriptor
424  *
425  * @list:           entry in connection fastreg pool
426  * @rsc:            data buffer registration resources
427  * @pi_ctx:         protection information context
428  */
429 struct iser_fr_desc {
430 	struct list_head		  list;
431 	struct iser_reg_resources	  rsc;
432 	struct iser_pi_context		 *pi_ctx;
433 };
434 
435 /**
436  * struct iser_fr_pool: connection fast registration pool
437  *
438  * @list:                list of fastreg descriptors
439  * @lock:                protects fmr/fastreg pool
440  * @size:                size of the pool
441  */
442 struct iser_fr_pool {
443 	struct list_head        list;
444 	spinlock_t              lock;
445 	int                     size;
446 };
447 
448 /**
449  * struct ib_conn - Infiniband related objects
450  *
451  * @cma_id:              rdma_cm connection maneger handle
452  * @qp:                  Connection Queue-pair
453  * @post_recv_buf_count: post receive counter
454  * @sig_count:           send work request signal count
455  * @rx_wr:               receive work request for batch posts
456  * @device:              reference to iser device
457  * @comp:                iser completion context
458  * @fr_pool:             connection fast registration poool
459  * @pi_support:          Indicate device T10-PI support
460  */
461 struct ib_conn {
462 	struct rdma_cm_id           *cma_id;
463 	struct ib_qp	            *qp;
464 	int                          post_recv_buf_count;
465 	u8                           sig_count;
466 	struct ib_recv_wr	     rx_wr[ISER_MIN_POSTED_RX];
467 	struct iser_device          *device;
468 	struct iser_comp	    *comp;
469 	struct iser_fr_pool          fr_pool;
470 	bool			     pi_support;
471 	struct ib_cqe		     reg_cqe;
472 };
473 
474 /**
475  * struct iser_conn - iSER connection context
476  *
477  * @ib_conn:          connection RDMA resources
478  * @iscsi_conn:       link to matching iscsi connection
479  * @ep:               transport handle
480  * @state:            connection logical state
481  * @qp_max_recv_dtos: maximum number of data outs, corresponds
482  *                    to max number of post recvs
483  * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
484  * @min_posted_rx:    (qp_max_recv_dtos >> 2)
485  * @max_cmds:         maximum cmds allowed for this connection
486  * @name:             connection peer portal
487  * @release_work:     deffered work for release job
488  * @state_mutex:      protects iser onnection state
489  * @stop_completion:  conn_stop completion
490  * @ib_completion:    RDMA cleanup completion
491  * @up_completion:    connection establishment completed
492  *                    (state is ISER_CONN_UP)
493  * @conn_list:        entry in ig conn list
494  * @login_desc:       login descriptor
495  * @rx_desc_head:     head of rx_descs cyclic buffer
496  * @rx_descs:         rx buffers array (cyclic buffer)
497  * @num_rx_descs:     number of rx descriptors
498  * @scsi_sg_tablesize: scsi host sg_tablesize
499  * @scsi_max_sectors: scsi host max sectors
500  */
501 struct iser_conn {
502 	struct ib_conn		     ib_conn;
503 	struct iscsi_conn	     *iscsi_conn;
504 	struct iscsi_endpoint	     *ep;
505 	enum iser_conn_state	     state;
506 	unsigned		     qp_max_recv_dtos;
507 	unsigned		     qp_max_recv_dtos_mask;
508 	unsigned		     min_posted_rx;
509 	u16                          max_cmds;
510 	char 			     name[ISER_OBJECT_NAME_SIZE];
511 	struct work_struct	     release_work;
512 	struct mutex		     state_mutex;
513 	struct completion	     stop_completion;
514 	struct completion	     ib_completion;
515 	struct completion	     up_completion;
516 	struct list_head	     conn_list;
517 	struct iser_login_desc       login_desc;
518 	unsigned int 		     rx_desc_head;
519 	struct iser_rx_desc	     *rx_descs;
520 	u32                          num_rx_descs;
521 	unsigned short               scsi_sg_tablesize;
522 	unsigned int                 scsi_max_sectors;
523 	bool			     snd_w_inv;
524 };
525 
526 /**
527  * struct iscsi_iser_task - iser task context
528  *
529  * @desc:     TX descriptor
530  * @iser_conn:        link to iser connection
531  * @status:           current task status
532  * @sc:               link to scsi command
533  * @command_sent:     indicate if command was sent
534  * @dir:              iser data direction
535  * @rdma_reg:         task rdma registration desc
536  * @data:             iser data buffer desc
537  * @prot:             iser protection buffer desc
538  */
539 struct iscsi_iser_task {
540 	struct iser_tx_desc          desc;
541 	struct iser_conn	     *iser_conn;
542 	enum iser_task_status 	     status;
543 	struct scsi_cmnd	     *sc;
544 	int                          command_sent;
545 	int                          dir[ISER_DIRS_NUM];
546 	struct iser_mem_reg          rdma_reg[ISER_DIRS_NUM];
547 	struct iser_data_buf         data[ISER_DIRS_NUM];
548 	struct iser_data_buf         prot[ISER_DIRS_NUM];
549 };
550 
551 struct iser_page_vec {
552 	u64 *pages;
553 	int npages;
554 	struct ib_mr fake_mr;
555 };
556 
557 /**
558  * struct iser_global: iSER global context
559  *
560  * @device_list_mutex:    protects device_list
561  * @device_list:          iser devices global list
562  * @connlist_mutex:       protects connlist
563  * @connlist:             iser connections global list
564  * @desc_cache:           kmem cache for tx dataout
565  */
566 struct iser_global {
567 	struct mutex      device_list_mutex;
568 	struct list_head  device_list;
569 	struct mutex      connlist_mutex;
570 	struct list_head  connlist;
571 	struct kmem_cache *desc_cache;
572 };
573 
574 extern struct iser_global ig;
575 extern int iser_debug_level;
576 extern bool iser_pi_enable;
577 extern int iser_pi_guard;
578 extern unsigned int iser_max_sectors;
579 extern bool iser_always_reg;
580 
581 int iser_assign_reg_ops(struct iser_device *device);
582 
583 int iser_send_control(struct iscsi_conn *conn,
584 		      struct iscsi_task *task);
585 
586 int iser_send_command(struct iscsi_conn *conn,
587 		      struct iscsi_task *task);
588 
589 int iser_send_data_out(struct iscsi_conn *conn,
590 		       struct iscsi_task *task,
591 		       struct iscsi_data *hdr);
592 
593 void iscsi_iser_recv(struct iscsi_conn *conn,
594 		     struct iscsi_hdr *hdr,
595 		     char *rx_data,
596 		     int rx_data_len);
597 
598 void iser_conn_init(struct iser_conn *iser_conn);
599 
600 void iser_conn_release(struct iser_conn *iser_conn);
601 
602 int iser_conn_terminate(struct iser_conn *iser_conn);
603 
604 void iser_release_work(struct work_struct *work);
605 
606 void iser_err_comp(struct ib_wc *wc, const char *type);
607 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
608 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
609 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
610 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
611 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
612 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
613 
614 void iser_task_rdma_init(struct iscsi_iser_task *task);
615 
616 void iser_task_rdma_finalize(struct iscsi_iser_task *task);
617 
618 void iser_free_rx_descriptors(struct iser_conn *iser_conn);
619 
620 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
621 				     struct iser_data_buf *mem,
622 				     enum iser_data_dir cmd_dir);
623 
624 int iser_reg_rdma_mem(struct iscsi_iser_task *task,
625 		      enum iser_data_dir dir,
626 		      bool all_imm);
627 void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
628 			 enum iser_data_dir dir);
629 
630 int  iser_connect(struct iser_conn *iser_conn,
631 		  struct sockaddr *src_addr,
632 		  struct sockaddr *dst_addr,
633 		  int non_blocking);
634 
635 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
636 			enum iser_data_dir cmd_dir);
637 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
638 			    enum iser_data_dir cmd_dir);
639 
640 int  iser_post_recvl(struct iser_conn *iser_conn);
641 int  iser_post_recvm(struct iser_conn *iser_conn, int count);
642 int  iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
643 		    bool signal);
644 
645 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
646 			   struct iser_data_buf *data,
647 			   enum iser_data_dir iser_dir,
648 			   enum dma_data_direction dma_dir);
649 
650 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
651 			      struct iser_data_buf *data,
652 			      enum dma_data_direction dir);
653 
654 int  iser_initialize_task_headers(struct iscsi_task *task,
655 			struct iser_tx_desc *tx_desc);
656 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
657 			      struct iscsi_session *session);
658 int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
659 			unsigned cmds_max,
660 			unsigned int size);
661 void iser_free_fmr_pool(struct ib_conn *ib_conn);
662 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
663 			    unsigned cmds_max,
664 			    unsigned int size);
665 void iser_free_fastreg_pool(struct ib_conn *ib_conn);
666 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
667 			     enum iser_data_dir cmd_dir, sector_t *sector);
668 struct iser_fr_desc *
669 iser_reg_desc_get_fr(struct ib_conn *ib_conn);
670 void
671 iser_reg_desc_put_fr(struct ib_conn *ib_conn,
672 		     struct iser_fr_desc *desc);
673 struct iser_fr_desc *
674 iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
675 void
676 iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
677 		      struct iser_fr_desc *desc);
678 
679 static inline struct ib_send_wr *
680 iser_tx_next_wr(struct iser_tx_desc *tx_desc)
681 {
682 	struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
683 	struct ib_send_wr *last_wr;
684 
685 	if (tx_desc->wr_idx) {
686 		last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
687 		last_wr->next = cur_wr;
688 	}
689 	tx_desc->wr_idx++;
690 
691 	return cur_wr;
692 }
693 
694 static inline struct iser_conn *
695 to_iser_conn(struct ib_conn *ib_conn)
696 {
697 	return container_of(ib_conn, struct iser_conn, ib_conn);
698 }
699 
700 static inline struct iser_rx_desc *
701 iser_rx(struct ib_cqe *cqe)
702 {
703 	return container_of(cqe, struct iser_rx_desc, cqe);
704 }
705 
706 static inline struct iser_tx_desc *
707 iser_tx(struct ib_cqe *cqe)
708 {
709 	return container_of(cqe, struct iser_tx_desc, cqe);
710 }
711 
712 static inline struct iser_login_desc *
713 iser_login(struct ib_cqe *cqe)
714 {
715 	return container_of(cqe, struct iser_login_desc, cqe);
716 }
717 
718 #endif
719