1 /*
2  * iSER transport for the Open iSCSI Initiator & iSER transport internals
3  *
4  * Copyright (C) 2004 Dmitry Yusupov
5  * Copyright (C) 2004 Alex Aizman
6  * Copyright (C) 2005 Mike Christie
7  * based on code maintained by open-iscsi@googlegroups.com
8  *
9  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
11  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
12  *
13  * This software is available to you under a choice of one of two
14  * licenses.  You may choose to be licensed under the terms of the GNU
15  * General Public License (GPL) Version 2, available from the file
16  * COPYING in the main directory of this source tree, or the
17  * OpenIB.org BSD license below:
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *	- Redistributions of source code must retain the above
24  *	  copyright notice, this list of conditions and the following
25  *	  disclaimer.
26  *
27  *	- Redistributions in binary form must reproduce the above
28  *	  copyright notice, this list of conditions and the following
29  *	  disclaimer in the documentation and/or other materials
30  *	  provided with the distribution.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
35  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
36  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
37  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
38  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39  * SOFTWARE.
40  */
41 #ifndef __ISCSI_ISER_H__
42 #define __ISCSI_ISER_H__
43 
44 #include <linux/types.h>
45 #include <linux/net.h>
46 #include <linux/printk.h>
47 #include <scsi/libiscsi.h>
48 #include <scsi/scsi_transport_iscsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 
52 #include <linux/interrupt.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/list.h>
56 #include <linux/slab.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/mutex.h>
59 #include <linux/mempool.h>
60 #include <linux/uio.h>
61 
62 #include <linux/socket.h>
63 #include <linux/in.h>
64 #include <linux/in6.h>
65 
66 #include <rdma/ib_verbs.h>
67 #include <rdma/ib_fmr_pool.h>
68 #include <rdma/rdma_cm.h>
69 
70 #define DRV_NAME	"iser"
71 #define PFX		DRV_NAME ": "
72 #define DRV_VER		"1.6"
73 
74 #define iser_dbg(fmt, arg...)				 \
75 	do {						 \
76 		if (unlikely(iser_debug_level > 2))	 \
77 			printk(KERN_DEBUG PFX "%s: " fmt,\
78 				__func__ , ## arg);	 \
79 	} while (0)
80 
81 #define iser_warn(fmt, arg...)				\
82 	do {						\
83 		if (unlikely(iser_debug_level > 0))	\
84 			pr_warn(PFX "%s: " fmt,		\
85 				__func__ , ## arg);	\
86 	} while (0)
87 
88 #define iser_info(fmt, arg...)				\
89 	do {						\
90 		if (unlikely(iser_debug_level > 1))	\
91 			pr_info(PFX "%s: " fmt,		\
92 				__func__ , ## arg);	\
93 	} while (0)
94 
95 #define iser_err(fmt, arg...) \
96 	pr_err(PFX "%s: " fmt, __func__ , ## arg)
97 
98 #define SHIFT_4K	12
99 #define SIZE_4K	(1ULL << SHIFT_4K)
100 #define MASK_4K	(~(SIZE_4K-1))
101 
102 /* Default support is 512KB I/O size */
103 #define ISER_DEF_MAX_SECTORS		1024
104 #define ISCSI_ISER_DEF_SG_TABLESIZE	((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
105 /* Maximum support is 8MB I/O size */
106 #define ISCSI_ISER_MAX_SG_TABLESIZE	((16384 * 512) >> SHIFT_4K)
107 
108 #define ISER_DEF_XMIT_CMDS_DEFAULT		512
109 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
110 	#define ISER_DEF_XMIT_CMDS_MAX		ISCSI_DEF_XMIT_CMDS_MAX
111 #else
112 	#define ISER_DEF_XMIT_CMDS_MAX		ISER_DEF_XMIT_CMDS_DEFAULT
113 #endif
114 #define ISER_DEF_CMD_PER_LUN		ISER_DEF_XMIT_CMDS_MAX
115 
116 /* QP settings */
117 /* Maximal bounds on received asynchronous PDUs */
118 #define ISER_MAX_RX_MISC_PDUS		4 /* NOOP_IN(2) , ASYNC_EVENT(2)   */
119 
120 #define ISER_MAX_TX_MISC_PDUS		6 /* NOOP_OUT(2), TEXT(1),         *
121 					   * SCSI_TMFUNC(2), LOGOUT(1) */
122 
123 #define ISER_QP_MAX_RECV_DTOS		(ISER_DEF_XMIT_CMDS_MAX)
124 
125 #define ISER_MIN_POSTED_RX		(ISER_DEF_XMIT_CMDS_MAX >> 2)
126 
127 /* the max TX (send) WR supported by the iSER QP is defined by                 *
128  * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect   *
129  * to have at max for SCSI command. The tx posting & completion handling code  *
130  * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
131  * send WR. D=8 comes from 64K/8K                                              */
132 
133 #define ISER_INFLIGHT_DATAOUTS		8
134 
135 #define ISER_QP_MAX_REQ_DTOS		(ISER_DEF_XMIT_CMDS_MAX *    \
136 					(1 + ISER_INFLIGHT_DATAOUTS) + \
137 					ISER_MAX_TX_MISC_PDUS        + \
138 					ISER_MAX_RX_MISC_PDUS)
139 
140 /* Max registration work requests per command */
141 #define ISER_MAX_REG_WR_PER_CMD		5
142 
143 /* For Signature we don't support DATAOUTs so no need to make room for them */
144 #define ISER_QP_SIG_MAX_REQ_DTOS	(ISER_DEF_XMIT_CMDS_MAX	*       \
145 					(1 + ISER_MAX_REG_WR_PER_CMD) + \
146 					ISER_MAX_TX_MISC_PDUS         + \
147 					ISER_MAX_RX_MISC_PDUS)
148 
149 #define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr			\
150 					 - ISER_MAX_TX_MISC_PDUS	\
151 					 - ISER_MAX_RX_MISC_PDUS) /	\
152 					 (1 + ISER_INFLIGHT_DATAOUTS))
153 
154 #define ISER_SIGNAL_CMD_COUNT 32
155 
156 #define ISER_VER			0x10
157 #define ISER_WSV			0x08
158 #define ISER_RSV			0x04
159 
160 /**
161  * struct iser_hdr - iSER header
162  *
163  * @flags:        flags support (zbva, remote_inv)
164  * @rsvd:         reserved
165  * @write_stag:   write rkey
166  * @write_va:     write virtual address
167  * @reaf_stag:    read rkey
168  * @read_va:      read virtual address
169  */
170 struct iser_hdr {
171 	u8      flags;
172 	u8      rsvd[3];
173 	__be32  write_stag;
174 	__be64  write_va;
175 	__be32  read_stag;
176 	__be64  read_va;
177 } __attribute__((packed));
178 
179 
180 #define ISER_ZBVA_NOT_SUPPORTED		0x80
181 #define ISER_SEND_W_INV_NOT_SUPPORTED	0x40
182 
183 struct iser_cm_hdr {
184 	u8      flags;
185 	u8      rsvd[3];
186 } __packed;
187 
188 /* Constant PDU lengths calculations */
189 #define ISER_HEADERS_LEN  (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
190 
191 #define ISER_RECV_DATA_SEG_LEN	128
192 #define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
193 #define ISER_RX_LOGIN_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
194 
195 /* Length of an object name string */
196 #define ISER_OBJECT_NAME_SIZE		    64
197 
198 enum iser_conn_state {
199 	ISER_CONN_INIT,		   /* descriptor allocd, no conn          */
200 	ISER_CONN_PENDING,	   /* in the process of being established */
201 	ISER_CONN_UP,		   /* up and running                      */
202 	ISER_CONN_TERMINATING,	   /* in the process of being terminated  */
203 	ISER_CONN_DOWN,		   /* shut down                           */
204 	ISER_CONN_STATES_NUM
205 };
206 
207 enum iser_task_status {
208 	ISER_TASK_STATUS_INIT = 0,
209 	ISER_TASK_STATUS_STARTED,
210 	ISER_TASK_STATUS_COMPLETED
211 };
212 
213 enum iser_data_dir {
214 	ISER_DIR_IN = 0,	   /* to initiator */
215 	ISER_DIR_OUT,		   /* from initiator */
216 	ISER_DIRS_NUM
217 };
218 
219 /**
220  * struct iser_data_buf - iSER data buffer
221  *
222  * @sg:           pointer to the sg list
223  * @size:         num entries of this sg
224  * @data_len:     total beffer byte len
225  * @dma_nents:    returned by dma_map_sg
226  */
227 struct iser_data_buf {
228 	struct scatterlist *sg;
229 	int                size;
230 	unsigned long      data_len;
231 	unsigned int       dma_nents;
232 };
233 
234 /* fwd declarations */
235 struct iser_device;
236 struct iscsi_iser_task;
237 struct iscsi_endpoint;
238 struct iser_reg_resources;
239 
240 /**
241  * struct iser_mem_reg - iSER memory registration info
242  *
243  * @sge:          memory region sg element
244  * @rkey:         memory region remote key
245  * @mem_h:        pointer to registration context (FMR/Fastreg)
246  */
247 struct iser_mem_reg {
248 	struct ib_sge	 sge;
249 	u32		 rkey;
250 	void		*mem_h;
251 };
252 
253 enum iser_desc_type {
254 	ISCSI_TX_CONTROL ,
255 	ISCSI_TX_SCSI_COMMAND,
256 	ISCSI_TX_DATAOUT
257 };
258 
259 /* Maximum number of work requests per task:
260  * Data memory region local invalidate + fast registration
261  * Protection memory region local invalidate + fast registration
262  * Signature memory region local invalidate + fast registration
263  * PDU send
264  */
265 #define ISER_MAX_WRS 7
266 
267 /**
268  * struct iser_tx_desc - iSER TX descriptor
269  *
270  * @iser_header:   iser header
271  * @iscsi_header:  iscsi header
272  * @type:          command/control/dataout
273  * @dam_addr:      header buffer dma_address
274  * @tx_sg:         sg[0] points to iser/iscsi headers
275  *                 sg[1] optionally points to either of immediate data
276  *                 unsolicited data-out or control
277  * @num_sge:       number sges used on this TX task
278  * @mapped:        Is the task header mapped
279  * @wr_idx:        Current WR index
280  * @wrs:           Array of WRs per task
281  * @data_reg:      Data buffer registration details
282  * @prot_reg:      Protection buffer registration details
283  * @sig_attrs:     Signature attributes
284  */
285 struct iser_tx_desc {
286 	struct iser_hdr              iser_header;
287 	struct iscsi_hdr             iscsi_header;
288 	enum   iser_desc_type        type;
289 	u64		             dma_addr;
290 	struct ib_sge		     tx_sg[2];
291 	int                          num_sge;
292 	struct ib_cqe		     cqe;
293 	bool			     mapped;
294 	u8                           wr_idx;
295 	union iser_wr {
296 		struct ib_send_wr		send;
297 		struct ib_reg_wr		fast_reg;
298 		struct ib_sig_handover_wr	sig;
299 	} wrs[ISER_MAX_WRS];
300 	struct iser_mem_reg          data_reg;
301 	struct iser_mem_reg          prot_reg;
302 	struct ib_sig_attrs          sig_attrs;
303 };
304 
305 #define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
306 				 sizeof(u64) + sizeof(struct ib_sge) + \
307 				 sizeof(struct ib_cqe)))
308 /**
309  * struct iser_rx_desc - iSER RX descriptor
310  *
311  * @iser_header:   iser header
312  * @iscsi_header:  iscsi header
313  * @data:          received data segment
314  * @dma_addr:      receive buffer dma address
315  * @rx_sg:         ib_sge of receive buffer
316  * @pad:           for sense data TODO: Modify to maximum sense length supported
317  */
318 struct iser_rx_desc {
319 	struct iser_hdr              iser_header;
320 	struct iscsi_hdr             iscsi_header;
321 	char		             data[ISER_RECV_DATA_SEG_LEN];
322 	u64		             dma_addr;
323 	struct ib_sge		     rx_sg;
324 	struct ib_cqe		     cqe;
325 	char		             pad[ISER_RX_PAD_SIZE];
326 } __packed;
327 
328 /**
329  * struct iser_login_desc - iSER login descriptor
330  *
331  * @req:           pointer to login request buffer
332  * @resp:          pointer to login response buffer
333  * @req_dma:       DMA address of login request buffer
334  * @rsp_dma:      DMA address of login response buffer
335  * @sge:           IB sge for login post recv
336  * @cqe:           completion handler
337  */
338 struct iser_login_desc {
339 	void                         *req;
340 	void                         *rsp;
341 	u64                          req_dma;
342 	u64                          rsp_dma;
343 	struct ib_sge                sge;
344 	struct ib_cqe		     cqe;
345 } __attribute__((packed));
346 
347 struct iser_conn;
348 struct ib_conn;
349 struct iscsi_iser_task;
350 
351 /**
352  * struct iser_comp - iSER completion context
353  *
354  * @cq:         completion queue
355  * @active_qps: Number of active QPs attached
356  *              to completion context
357  */
358 struct iser_comp {
359 	struct ib_cq		*cq;
360 	int                      active_qps;
361 };
362 
363 /**
364  * struct iser_device - Memory registration operations
365  *     per-device registration schemes
366  *
367  * @alloc_reg_res:     Allocate registration resources
368  * @free_reg_res:      Free registration resources
369  * @fast_reg_mem:      Register memory buffers
370  * @unreg_mem:         Un-register memory buffers
371  * @reg_desc_get:      Get a registration descriptor for pool
372  * @reg_desc_put:      Get a registration descriptor to pool
373  */
374 struct iser_reg_ops {
375 	int            (*alloc_reg_res)(struct ib_conn *ib_conn,
376 					unsigned cmds_max,
377 					unsigned int size);
378 	void           (*free_reg_res)(struct ib_conn *ib_conn);
379 	int            (*reg_mem)(struct iscsi_iser_task *iser_task,
380 				  struct iser_data_buf *mem,
381 				  struct iser_reg_resources *rsc,
382 				  struct iser_mem_reg *reg);
383 	void           (*unreg_mem)(struct iscsi_iser_task *iser_task,
384 				    enum iser_data_dir cmd_dir);
385 	struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
386 	void           (*reg_desc_put)(struct ib_conn *ib_conn,
387 				       struct iser_fr_desc *desc);
388 };
389 
390 /**
391  * struct iser_device - iSER device handle
392  *
393  * @ib_device:     RDMA device
394  * @pd:            Protection Domain for this device
395  * @mr:            Global DMA memory region
396  * @event_handler: IB events handle routine
397  * @ig_list:	   entry in devices list
398  * @refcount:      Reference counter, dominated by open iser connections
399  * @comps_used:    Number of completion contexts used, Min between online
400  *                 cpus and device max completion vectors
401  * @comps:         Dinamically allocated array of completion handlers
402  * @reg_ops:       Registration ops
403  */
404 struct iser_device {
405 	struct ib_device             *ib_device;
406 	struct ib_pd	             *pd;
407 	struct ib_mr	             *mr;
408 	struct ib_event_handler      event_handler;
409 	struct list_head             ig_list;
410 	int                          refcount;
411 	int			     comps_used;
412 	struct iser_comp	     *comps;
413 	const struct iser_reg_ops    *reg_ops;
414 };
415 
416 #define ISER_CHECK_GUARD	0xc0
417 #define ISER_CHECK_REFTAG	0x0f
418 #define ISER_CHECK_APPTAG	0x30
419 
420 /**
421  * struct iser_reg_resources - Fast registration recources
422  *
423  * @mr:         memory region
424  * @fmr_pool:   pool of fmrs
425  * @page_vec:   fast reg page list used by fmr pool
426  * @mr_valid:   is mr valid indicator
427  */
428 struct iser_reg_resources {
429 	union {
430 		struct ib_mr             *mr;
431 		struct ib_fmr_pool       *fmr_pool;
432 	};
433 	struct iser_page_vec             *page_vec;
434 	u8				  mr_valid:1;
435 };
436 
437 /**
438  * struct iser_pi_context - Protection information context
439  *
440  * @rsc:             protection buffer registration resources
441  * @sig_mr:          signature enable memory region
442  * @sig_mr_valid:    is sig_mr valid indicator
443  * @sig_protected:   is region protected indicator
444  */
445 struct iser_pi_context {
446 	struct iser_reg_resources	rsc;
447 	struct ib_mr                   *sig_mr;
448 	u8                              sig_mr_valid:1;
449 	u8                              sig_protected:1;
450 };
451 
452 /**
453  * struct iser_fr_desc - Fast registration descriptor
454  *
455  * @list:           entry in connection fastreg pool
456  * @rsc:            data buffer registration resources
457  * @pi_ctx:         protection information context
458  */
459 struct iser_fr_desc {
460 	struct list_head		  list;
461 	struct iser_reg_resources	  rsc;
462 	struct iser_pi_context		 *pi_ctx;
463 };
464 
465 /**
466  * struct iser_fr_pool: connection fast registration pool
467  *
468  * @list:                list of fastreg descriptors
469  * @lock:                protects fmr/fastreg pool
470  * @size:                size of the pool
471  */
472 struct iser_fr_pool {
473 	struct list_head        list;
474 	spinlock_t              lock;
475 	int                     size;
476 };
477 
478 /**
479  * struct ib_conn - Infiniband related objects
480  *
481  * @cma_id:              rdma_cm connection maneger handle
482  * @qp:                  Connection Queue-pair
483  * @post_recv_buf_count: post receive counter
484  * @sig_count:           send work request signal count
485  * @rx_wr:               receive work request for batch posts
486  * @device:              reference to iser device
487  * @comp:                iser completion context
488  * @fr_pool:             connection fast registration poool
489  * @pi_support:          Indicate device T10-PI support
490  * @last:                last send wr to signal all flush errors were drained
491  * @last_cqe:            cqe handler for last wr
492  * @last_comp:           completes when all connection completions consumed
493  */
494 struct ib_conn {
495 	struct rdma_cm_id           *cma_id;
496 	struct ib_qp	            *qp;
497 	int                          post_recv_buf_count;
498 	u8                           sig_count;
499 	struct ib_recv_wr	     rx_wr[ISER_MIN_POSTED_RX];
500 	struct iser_device          *device;
501 	struct iser_comp	    *comp;
502 	struct iser_fr_pool          fr_pool;
503 	bool			     pi_support;
504 	struct ib_send_wr	     last;
505 	struct ib_cqe		     last_cqe;
506 	struct ib_cqe		     reg_cqe;
507 	struct completion	     last_comp;
508 };
509 
510 /**
511  * struct iser_conn - iSER connection context
512  *
513  * @ib_conn:          connection RDMA resources
514  * @iscsi_conn:       link to matching iscsi connection
515  * @ep:               transport handle
516  * @state:            connection logical state
517  * @qp_max_recv_dtos: maximum number of data outs, corresponds
518  *                    to max number of post recvs
519  * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
520  * @min_posted_rx:    (qp_max_recv_dtos >> 2)
521  * @max_cmds:         maximum cmds allowed for this connection
522  * @name:             connection peer portal
523  * @release_work:     deffered work for release job
524  * @state_mutex:      protects iser onnection state
525  * @stop_completion:  conn_stop completion
526  * @ib_completion:    RDMA cleanup completion
527  * @up_completion:    connection establishment completed
528  *                    (state is ISER_CONN_UP)
529  * @conn_list:        entry in ig conn list
530  * @login_desc:       login descriptor
531  * @rx_desc_head:     head of rx_descs cyclic buffer
532  * @rx_descs:         rx buffers array (cyclic buffer)
533  * @num_rx_descs:     number of rx descriptors
534  * @scsi_sg_tablesize: scsi host sg_tablesize
535  * @scsi_max_sectors: scsi host max sectors
536  */
537 struct iser_conn {
538 	struct ib_conn		     ib_conn;
539 	struct iscsi_conn	     *iscsi_conn;
540 	struct iscsi_endpoint	     *ep;
541 	enum iser_conn_state	     state;
542 	unsigned		     qp_max_recv_dtos;
543 	unsigned		     qp_max_recv_dtos_mask;
544 	unsigned		     min_posted_rx;
545 	u16                          max_cmds;
546 	char 			     name[ISER_OBJECT_NAME_SIZE];
547 	struct work_struct	     release_work;
548 	struct mutex		     state_mutex;
549 	struct completion	     stop_completion;
550 	struct completion	     ib_completion;
551 	struct completion	     up_completion;
552 	struct list_head	     conn_list;
553 	struct iser_login_desc       login_desc;
554 	unsigned int 		     rx_desc_head;
555 	struct iser_rx_desc	     *rx_descs;
556 	u32                          num_rx_descs;
557 	unsigned short               scsi_sg_tablesize;
558 	unsigned int                 scsi_max_sectors;
559 };
560 
561 /**
562  * struct iscsi_iser_task - iser task context
563  *
564  * @desc:     TX descriptor
565  * @iser_conn:        link to iser connection
566  * @status:           current task status
567  * @sc:               link to scsi command
568  * @command_sent:     indicate if command was sent
569  * @dir:              iser data direction
570  * @rdma_reg:         task rdma registration desc
571  * @data:             iser data buffer desc
572  * @prot:             iser protection buffer desc
573  */
574 struct iscsi_iser_task {
575 	struct iser_tx_desc          desc;
576 	struct iser_conn	     *iser_conn;
577 	enum iser_task_status 	     status;
578 	struct scsi_cmnd	     *sc;
579 	int                          command_sent;
580 	int                          dir[ISER_DIRS_NUM];
581 	struct iser_mem_reg          rdma_reg[ISER_DIRS_NUM];
582 	struct iser_data_buf         data[ISER_DIRS_NUM];
583 	struct iser_data_buf         prot[ISER_DIRS_NUM];
584 };
585 
586 struct iser_page_vec {
587 	u64 *pages;
588 	int length;
589 	int offset;
590 	int data_size;
591 };
592 
593 /**
594  * struct iser_global: iSER global context
595  *
596  * @device_list_mutex:    protects device_list
597  * @device_list:          iser devices global list
598  * @connlist_mutex:       protects connlist
599  * @connlist:             iser connections global list
600  * @desc_cache:           kmem cache for tx dataout
601  */
602 struct iser_global {
603 	struct mutex      device_list_mutex;
604 	struct list_head  device_list;
605 	struct mutex      connlist_mutex;
606 	struct list_head  connlist;
607 	struct kmem_cache *desc_cache;
608 };
609 
610 extern struct iser_global ig;
611 extern int iser_debug_level;
612 extern bool iser_pi_enable;
613 extern int iser_pi_guard;
614 extern unsigned int iser_max_sectors;
615 extern bool iser_always_reg;
616 
617 int iser_assign_reg_ops(struct iser_device *device);
618 
619 int iser_send_control(struct iscsi_conn *conn,
620 		      struct iscsi_task *task);
621 
622 int iser_send_command(struct iscsi_conn *conn,
623 		      struct iscsi_task *task);
624 
625 int iser_send_data_out(struct iscsi_conn *conn,
626 		       struct iscsi_task *task,
627 		       struct iscsi_data *hdr);
628 
629 void iscsi_iser_recv(struct iscsi_conn *conn,
630 		     struct iscsi_hdr *hdr,
631 		     char *rx_data,
632 		     int rx_data_len);
633 
634 void iser_conn_init(struct iser_conn *iser_conn);
635 
636 void iser_conn_release(struct iser_conn *iser_conn);
637 
638 int iser_conn_terminate(struct iser_conn *iser_conn);
639 
640 void iser_release_work(struct work_struct *work);
641 
642 void iser_err_comp(struct ib_wc *wc, const char *type);
643 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
644 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
645 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
646 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
647 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
648 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
649 void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
650 
651 void iser_task_rdma_init(struct iscsi_iser_task *task);
652 
653 void iser_task_rdma_finalize(struct iscsi_iser_task *task);
654 
655 void iser_free_rx_descriptors(struct iser_conn *iser_conn);
656 
657 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
658 				     struct iser_data_buf *mem,
659 				     enum iser_data_dir cmd_dir);
660 
661 int iser_reg_rdma_mem(struct iscsi_iser_task *task,
662 		      enum iser_data_dir dir);
663 void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
664 			 enum iser_data_dir dir);
665 
666 int  iser_connect(struct iser_conn *iser_conn,
667 		  struct sockaddr *src_addr,
668 		  struct sockaddr *dst_addr,
669 		  int non_blocking);
670 
671 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
672 			enum iser_data_dir cmd_dir);
673 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
674 			    enum iser_data_dir cmd_dir);
675 
676 int  iser_post_recvl(struct iser_conn *iser_conn);
677 int  iser_post_recvm(struct iser_conn *iser_conn, int count);
678 int  iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
679 		    bool signal);
680 
681 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
682 			   struct iser_data_buf *data,
683 			   enum iser_data_dir iser_dir,
684 			   enum dma_data_direction dma_dir);
685 
686 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
687 			      struct iser_data_buf *data,
688 			      enum dma_data_direction dir);
689 
690 int  iser_initialize_task_headers(struct iscsi_task *task,
691 			struct iser_tx_desc *tx_desc);
692 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
693 			      struct iscsi_session *session);
694 int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
695 			unsigned cmds_max,
696 			unsigned int size);
697 void iser_free_fmr_pool(struct ib_conn *ib_conn);
698 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
699 			    unsigned cmds_max,
700 			    unsigned int size);
701 void iser_free_fastreg_pool(struct ib_conn *ib_conn);
702 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
703 			     enum iser_data_dir cmd_dir, sector_t *sector);
704 struct iser_fr_desc *
705 iser_reg_desc_get_fr(struct ib_conn *ib_conn);
706 void
707 iser_reg_desc_put_fr(struct ib_conn *ib_conn,
708 		     struct iser_fr_desc *desc);
709 struct iser_fr_desc *
710 iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
711 void
712 iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
713 		      struct iser_fr_desc *desc);
714 
715 static inline struct ib_send_wr *
716 iser_tx_next_wr(struct iser_tx_desc *tx_desc)
717 {
718 	struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
719 	struct ib_send_wr *last_wr;
720 
721 	if (tx_desc->wr_idx) {
722 		last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
723 		last_wr->next = cur_wr;
724 	}
725 	tx_desc->wr_idx++;
726 
727 	return cur_wr;
728 }
729 
730 static inline struct iser_conn *
731 to_iser_conn(struct ib_conn *ib_conn)
732 {
733 	return container_of(ib_conn, struct iser_conn, ib_conn);
734 }
735 
736 static inline struct iser_rx_desc *
737 iser_rx(struct ib_cqe *cqe)
738 {
739 	return container_of(cqe, struct iser_rx_desc, cqe);
740 }
741 
742 static inline struct iser_tx_desc *
743 iser_tx(struct ib_cqe *cqe)
744 {
745 	return container_of(cqe, struct iser_tx_desc, cqe);
746 }
747 
748 static inline struct iser_login_desc *
749 iser_login(struct ib_cqe *cqe)
750 {
751 	return container_of(cqe, struct iser_login_desc, cqe);
752 }
753 
754 #endif
755