xref: /openbmc/linux/drivers/scsi/libiscsi.c (revision aadaeca4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * iSCSI lib functions
4  *
5  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
6  * Copyright (C) 2004 - 2006 Mike Christie
7  * Copyright (C) 2004 - 2005 Dmitry Yusupov
8  * Copyright (C) 2004 - 2005 Alex Aizman
9  * maintained by open-iscsi@googlegroups.com
10  */
11 #include <linux/types.h>
12 #include <linux/kfifo.h>
13 #include <linux/delay.h>
14 #include <linux/log2.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/module.h>
18 #include <asm/unaligned.h>
19 #include <net/tcp.h>
20 #include <scsi/scsi_cmnd.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_eh.h>
23 #include <scsi/scsi_tcq.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi.h>
26 #include <scsi/iscsi_proto.h>
27 #include <scsi/scsi_transport.h>
28 #include <scsi/scsi_transport_iscsi.h>
29 #include <scsi/libiscsi.h>
30 #include <trace/events/iscsi.h>
31 
32 static int iscsi_dbg_lib_conn;
33 module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
34 		   S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(debug_libiscsi_conn,
36 		 "Turn on debugging for connections in libiscsi module. "
37 		 "Set to 1 to turn on, and zero to turn off. Default is off.");
38 
39 static int iscsi_dbg_lib_session;
40 module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
41 		   S_IRUGO | S_IWUSR);
42 MODULE_PARM_DESC(debug_libiscsi_session,
43 		 "Turn on debugging for sessions in libiscsi module. "
44 		 "Set to 1 to turn on, and zero to turn off. Default is off.");
45 
46 static int iscsi_dbg_lib_eh;
47 module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
48 		   S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(debug_libiscsi_eh,
50 		 "Turn on debugging for error handling in libiscsi module. "
51 		 "Set to 1 to turn on, and zero to turn off. Default is off.");
52 
53 #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...)			\
54 	do {							\
55 		if (iscsi_dbg_lib_conn)				\
56 			iscsi_conn_printk(KERN_INFO, _conn,	\
57 					     "%s " dbg_fmt,	\
58 					     __func__, ##arg);	\
59 		iscsi_dbg_trace(trace_iscsi_dbg_conn,		\
60 				&(_conn)->cls_conn->dev,	\
61 				"%s " dbg_fmt, __func__, ##arg);\
62 	} while (0);
63 
64 #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...)			\
65 	do {								\
66 		if (iscsi_dbg_lib_session)				\
67 			iscsi_session_printk(KERN_INFO, _session,	\
68 					     "%s " dbg_fmt,		\
69 					     __func__, ##arg);		\
70 		iscsi_dbg_trace(trace_iscsi_dbg_session, 		\
71 				&(_session)->cls_session->dev,		\
72 				"%s " dbg_fmt, __func__, ##arg);	\
73 	} while (0);
74 
75 #define ISCSI_DBG_EH(_session, dbg_fmt, arg...)				\
76 	do {								\
77 		if (iscsi_dbg_lib_eh)					\
78 			iscsi_session_printk(KERN_INFO, _session,	\
79 					     "%s " dbg_fmt,		\
80 					     __func__, ##arg);		\
81 		iscsi_dbg_trace(trace_iscsi_dbg_eh,			\
82 				&(_session)->cls_session->dev,		\
83 				"%s " dbg_fmt, __func__, ##arg);	\
84 	} while (0);
85 
86 inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
87 {
88 	struct Scsi_Host *shost = conn->session->host;
89 	struct iscsi_host *ihost = shost_priv(shost);
90 
91 	if (ihost->workq)
92 		queue_work(ihost->workq, &conn->xmitwork);
93 }
94 EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
95 
96 static void __iscsi_update_cmdsn(struct iscsi_session *session,
97 				 uint32_t exp_cmdsn, uint32_t max_cmdsn)
98 {
99 	/*
100 	 * standard specifies this check for when to update expected and
101 	 * max sequence numbers
102 	 */
103 	if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
104 		return;
105 
106 	if (exp_cmdsn != session->exp_cmdsn &&
107 	    !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
108 		session->exp_cmdsn = exp_cmdsn;
109 
110 	if (max_cmdsn != session->max_cmdsn &&
111 	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
112 		session->max_cmdsn = max_cmdsn;
113 }
114 
115 void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
116 {
117 	__iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
118 			     be32_to_cpu(hdr->max_cmdsn));
119 }
120 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
121 
122 /**
123  * iscsi_prep_data_out_pdu - initialize Data-Out
124  * @task: scsi command task
125  * @r2t: R2T info
126  * @hdr: iscsi data in pdu
127  *
128  * Notes:
129  *	Initialize Data-Out within this R2T sequence and finds
130  *	proper data_offset within this SCSI command.
131  *
132  *	This function is called with connection lock taken.
133  **/
134 void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
135 			   struct iscsi_data *hdr)
136 {
137 	struct iscsi_conn *conn = task->conn;
138 	unsigned int left = r2t->data_length - r2t->sent;
139 
140 	task->hdr_len = sizeof(struct iscsi_data);
141 
142 	memset(hdr, 0, sizeof(struct iscsi_data));
143 	hdr->ttt = r2t->ttt;
144 	hdr->datasn = cpu_to_be32(r2t->datasn);
145 	r2t->datasn++;
146 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
147 	hdr->lun = task->lun;
148 	hdr->itt = task->hdr_itt;
149 	hdr->exp_statsn = r2t->exp_statsn;
150 	hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
151 	if (left > conn->max_xmit_dlength) {
152 		hton24(hdr->dlength, conn->max_xmit_dlength);
153 		r2t->data_count = conn->max_xmit_dlength;
154 		hdr->flags = 0;
155 	} else {
156 		hton24(hdr->dlength, left);
157 		r2t->data_count = left;
158 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
159 	}
160 	conn->dataout_pdus_cnt++;
161 }
162 EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
163 
164 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
165 {
166 	unsigned exp_len = task->hdr_len + len;
167 
168 	if (exp_len > task->hdr_max) {
169 		WARN_ON(1);
170 		return -EINVAL;
171 	}
172 
173 	WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
174 	task->hdr_len = exp_len;
175 	return 0;
176 }
177 
178 /*
179  * make an extended cdb AHS
180  */
181 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
182 {
183 	struct scsi_cmnd *cmd = task->sc;
184 	unsigned rlen, pad_len;
185 	unsigned short ahslength;
186 	struct iscsi_ecdb_ahdr *ecdb_ahdr;
187 	int rc;
188 
189 	ecdb_ahdr = iscsi_next_hdr(task);
190 	rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
191 
192 	BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
193 	ahslength = rlen + sizeof(ecdb_ahdr->reserved);
194 
195 	pad_len = iscsi_padding(rlen);
196 
197 	rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
198 	                   sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
199 	if (rc)
200 		return rc;
201 
202 	if (pad_len)
203 		memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
204 
205 	ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
206 	ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
207 	ecdb_ahdr->reserved = 0;
208 	memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
209 
210 	ISCSI_DBG_SESSION(task->conn->session,
211 			  "iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
212 		          "rlen %d pad_len %d ahs_length %d iscsi_headers_size "
213 		          "%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
214 		          task->hdr_len);
215 	return 0;
216 }
217 
218 /**
219  * iscsi_check_tmf_restrictions - check if a task is affected by TMF
220  * @task: iscsi task
221  * @opcode: opcode to check for
222  *
223  * During TMF a task has to be checked if it's affected.
224  * All unrelated I/O can be passed through, but I/O to the
225  * affected LUN should be restricted.
226  * If 'fast_abort' is set we won't be sending any I/O to the
227  * affected LUN.
228  * Otherwise the target is waiting for all TTTs to be completed,
229  * so we have to send all outstanding Data-Out PDUs to the target.
230  */
231 static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
232 {
233 	struct iscsi_session *session = task->conn->session;
234 	struct iscsi_tm *tmf = &session->tmhdr;
235 	u64 hdr_lun;
236 
237 	if (session->tmf_state == TMF_INITIAL)
238 		return 0;
239 
240 	if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
241 		return 0;
242 
243 	switch (ISCSI_TM_FUNC_VALUE(tmf)) {
244 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
245 		/*
246 		 * Allow PDUs for unrelated LUNs
247 		 */
248 		hdr_lun = scsilun_to_int(&tmf->lun);
249 		if (hdr_lun != task->sc->device->lun)
250 			return 0;
251 		fallthrough;
252 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
253 		/*
254 		 * Fail all SCSI cmd PDUs
255 		 */
256 		if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
257 			iscsi_session_printk(KERN_INFO, session,
258 					     "task [op %x itt 0x%x/0x%x] rejected.\n",
259 					     opcode, task->itt, task->hdr_itt);
260 			return -EACCES;
261 		}
262 		/*
263 		 * And also all data-out PDUs in response to R2T
264 		 * if fast_abort is set.
265 		 */
266 		if (session->fast_abort) {
267 			iscsi_session_printk(KERN_INFO, session,
268 					     "task [op %x itt 0x%x/0x%x] fast abort.\n",
269 					     opcode, task->itt, task->hdr_itt);
270 			return -EACCES;
271 		}
272 		break;
273 	case ISCSI_TM_FUNC_ABORT_TASK:
274 		/*
275 		 * the caller has already checked if the task
276 		 * they want to abort was in the pending queue so if
277 		 * we are here the cmd pdu has gone out already, and
278 		 * we will only hit this for data-outs
279 		 */
280 		if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
281 		    task->hdr_itt == tmf->rtt) {
282 			ISCSI_DBG_SESSION(session,
283 					  "Preventing task %x/%x from sending "
284 					  "data-out due to abort task in "
285 					  "progress\n", task->itt,
286 					  task->hdr_itt);
287 			return -EACCES;
288 		}
289 		break;
290 	}
291 
292 	return 0;
293 }
294 
295 /**
296  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
297  * @task: iscsi task
298  *
299  * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
300  * fields like dlength or final based on how much data it sends
301  */
302 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
303 {
304 	struct iscsi_conn *conn = task->conn;
305 	struct iscsi_session *session = conn->session;
306 	struct scsi_cmnd *sc = task->sc;
307 	struct iscsi_scsi_req *hdr;
308 	unsigned hdrlength, cmd_len, transfer_length;
309 	itt_t itt;
310 	int rc;
311 
312 	rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
313 	if (rc)
314 		return rc;
315 
316 	if (conn->session->tt->alloc_pdu) {
317 		rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
318 		if (rc)
319 			return rc;
320 	}
321 	hdr = (struct iscsi_scsi_req *)task->hdr;
322 	itt = hdr->itt;
323 	memset(hdr, 0, sizeof(*hdr));
324 
325 	if (session->tt->parse_pdu_itt)
326 		hdr->itt = task->hdr_itt = itt;
327 	else
328 		hdr->itt = task->hdr_itt = build_itt(task->itt,
329 						     task->conn->session->age);
330 	task->hdr_len = 0;
331 	rc = iscsi_add_hdr(task, sizeof(*hdr));
332 	if (rc)
333 		return rc;
334 	hdr->opcode = ISCSI_OP_SCSI_CMD;
335 	hdr->flags = ISCSI_ATTR_SIMPLE;
336 	int_to_scsilun(sc->device->lun, &hdr->lun);
337 	task->lun = hdr->lun;
338 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
339 	cmd_len = sc->cmd_len;
340 	if (cmd_len < ISCSI_CDB_SIZE)
341 		memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
342 	else if (cmd_len > ISCSI_CDB_SIZE) {
343 		rc = iscsi_prep_ecdb_ahs(task);
344 		if (rc)
345 			return rc;
346 		cmd_len = ISCSI_CDB_SIZE;
347 	}
348 	memcpy(hdr->cdb, sc->cmnd, cmd_len);
349 
350 	task->imm_count = 0;
351 	if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
352 		task->protected = true;
353 
354 	transfer_length = scsi_transfer_length(sc);
355 	hdr->data_length = cpu_to_be32(transfer_length);
356 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
357 		struct iscsi_r2t_info *r2t = &task->unsol_r2t;
358 
359 		hdr->flags |= ISCSI_FLAG_CMD_WRITE;
360 		/*
361 		 * Write counters:
362 		 *
363 		 *	imm_count	bytes to be sent right after
364 		 *			SCSI PDU Header
365 		 *
366 		 *	unsol_count	bytes(as Data-Out) to be sent
367 		 *			without	R2T ack right after
368 		 *			immediate data
369 		 *
370 		 *	r2t data_length bytes to be sent via R2T ack's
371 		 *
372 		 *      pad_count       bytes to be sent as zero-padding
373 		 */
374 		memset(r2t, 0, sizeof(*r2t));
375 
376 		if (session->imm_data_en) {
377 			if (transfer_length >= session->first_burst)
378 				task->imm_count = min(session->first_burst,
379 							conn->max_xmit_dlength);
380 			else
381 				task->imm_count = min(transfer_length,
382 						      conn->max_xmit_dlength);
383 			hton24(hdr->dlength, task->imm_count);
384 		} else
385 			zero_data(hdr->dlength);
386 
387 		if (!session->initial_r2t_en) {
388 			r2t->data_length = min(session->first_burst,
389 					       transfer_length) -
390 					       task->imm_count;
391 			r2t->data_offset = task->imm_count;
392 			r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
393 			r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
394 		}
395 
396 		if (!task->unsol_r2t.data_length)
397 			/* No unsolicit Data-Out's */
398 			hdr->flags |= ISCSI_FLAG_CMD_FINAL;
399 	} else {
400 		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
401 		zero_data(hdr->dlength);
402 
403 		if (sc->sc_data_direction == DMA_FROM_DEVICE)
404 			hdr->flags |= ISCSI_FLAG_CMD_READ;
405 	}
406 
407 	/* calculate size of additional header segments (AHSs) */
408 	hdrlength = task->hdr_len - sizeof(*hdr);
409 
410 	WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
411 	hdrlength /= ISCSI_PAD_LEN;
412 
413 	WARN_ON(hdrlength >= 256);
414 	hdr->hlength = hdrlength & 0xFF;
415 	hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
416 
417 	if (session->tt->init_task && session->tt->init_task(task))
418 		return -EIO;
419 
420 	task->state = ISCSI_TASK_RUNNING;
421 	session->cmdsn++;
422 
423 	conn->scsicmd_pdus_cnt++;
424 	ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
425 			  "itt 0x%x len %d cmdsn %d win %d]\n",
426 			  sc->sc_data_direction == DMA_TO_DEVICE ?
427 			  "write" : "read", conn->id, sc, sc->cmnd[0],
428 			  task->itt, transfer_length,
429 			  session->cmdsn,
430 			  session->max_cmdsn - session->exp_cmdsn + 1);
431 	return 0;
432 }
433 
434 /**
435  * iscsi_free_task - free a task
436  * @task: iscsi cmd task
437  *
438  * Must be called with session back_lock.
439  * This function returns the scsi command to scsi-ml or cleans
440  * up mgmt tasks then returns the task to the pool.
441  */
442 static void iscsi_free_task(struct iscsi_task *task)
443 {
444 	struct iscsi_conn *conn = task->conn;
445 	struct iscsi_session *session = conn->session;
446 	struct scsi_cmnd *sc = task->sc;
447 	int oldstate = task->state;
448 
449 	ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
450 			  task->itt, task->state, task->sc);
451 
452 	session->tt->cleanup_task(task);
453 	task->state = ISCSI_TASK_FREE;
454 	task->sc = NULL;
455 	/*
456 	 * login task is preallocated so do not free
457 	 */
458 	if (conn->login_task == task)
459 		return;
460 
461 	kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
462 
463 	if (sc) {
464 		/* SCSI eh reuses commands to verify us */
465 		iscsi_cmd(sc)->task = NULL;
466 		/*
467 		 * queue command may call this to free the task, so
468 		 * it will decide how to return sc to scsi-ml.
469 		 */
470 		if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
471 			scsi_done(sc);
472 	}
473 }
474 
475 void __iscsi_get_task(struct iscsi_task *task)
476 {
477 	refcount_inc(&task->refcount);
478 }
479 EXPORT_SYMBOL_GPL(__iscsi_get_task);
480 
481 void __iscsi_put_task(struct iscsi_task *task)
482 {
483 	if (refcount_dec_and_test(&task->refcount))
484 		iscsi_free_task(task);
485 }
486 EXPORT_SYMBOL_GPL(__iscsi_put_task);
487 
488 void iscsi_put_task(struct iscsi_task *task)
489 {
490 	struct iscsi_session *session = task->conn->session;
491 
492 	/* regular RX path uses back_lock */
493 	spin_lock_bh(&session->back_lock);
494 	__iscsi_put_task(task);
495 	spin_unlock_bh(&session->back_lock);
496 }
497 EXPORT_SYMBOL_GPL(iscsi_put_task);
498 
499 /**
500  * iscsi_complete_task - finish a task
501  * @task: iscsi cmd task
502  * @state: state to complete task with
503  *
504  * Must be called with session back_lock.
505  */
506 static void iscsi_complete_task(struct iscsi_task *task, int state)
507 {
508 	struct iscsi_conn *conn = task->conn;
509 
510 	ISCSI_DBG_SESSION(conn->session,
511 			  "complete task itt 0x%x state %d sc %p\n",
512 			  task->itt, task->state, task->sc);
513 	if (task->state == ISCSI_TASK_COMPLETED ||
514 	    task->state == ISCSI_TASK_ABRT_TMF ||
515 	    task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
516 	    task->state == ISCSI_TASK_REQUEUE_SCSIQ)
517 		return;
518 	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
519 	task->state = state;
520 
521 	if (READ_ONCE(conn->ping_task) == task)
522 		WRITE_ONCE(conn->ping_task, NULL);
523 
524 	/* release get from queueing */
525 	__iscsi_put_task(task);
526 }
527 
528 /**
529  * iscsi_complete_scsi_task - finish scsi task normally
530  * @task: iscsi task for scsi cmd
531  * @exp_cmdsn: expected cmd sn in cpu format
532  * @max_cmdsn: max cmd sn in cpu format
533  *
534  * This is used when drivers do not need or cannot perform
535  * lower level pdu processing.
536  *
537  * Called with session back_lock
538  */
539 void iscsi_complete_scsi_task(struct iscsi_task *task,
540 			      uint32_t exp_cmdsn, uint32_t max_cmdsn)
541 {
542 	struct iscsi_conn *conn = task->conn;
543 
544 	ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
545 
546 	conn->last_recv = jiffies;
547 	__iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
548 	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
549 }
550 EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
551 
552 /*
553  * Must be called with back and frwd lock
554  */
555 static bool cleanup_queued_task(struct iscsi_task *task)
556 {
557 	struct iscsi_conn *conn = task->conn;
558 	bool early_complete = false;
559 
560 	/* Bad target might have completed task while it was still running */
561 	if (task->state == ISCSI_TASK_COMPLETED)
562 		early_complete = true;
563 
564 	if (!list_empty(&task->running)) {
565 		list_del_init(&task->running);
566 		/*
567 		 * If it's on a list but still running, this could be from
568 		 * a bad target sending a rsp early, cleanup from a TMF, or
569 		 * session recovery.
570 		 */
571 		if (task->state == ISCSI_TASK_RUNNING ||
572 		    task->state == ISCSI_TASK_COMPLETED)
573 			__iscsi_put_task(task);
574 	}
575 
576 	if (conn->session->running_aborted_task == task) {
577 		conn->session->running_aborted_task = NULL;
578 		__iscsi_put_task(task);
579 	}
580 
581 	if (conn->task == task) {
582 		conn->task = NULL;
583 		__iscsi_put_task(task);
584 	}
585 
586 	return early_complete;
587 }
588 
589 /*
590  * session frwd lock must be held and if not called for a task that is still
591  * pending or from the xmit thread, then xmit thread must be suspended
592  */
593 static void fail_scsi_task(struct iscsi_task *task, int err)
594 {
595 	struct iscsi_conn *conn = task->conn;
596 	struct scsi_cmnd *sc;
597 	int state;
598 
599 	spin_lock_bh(&conn->session->back_lock);
600 	if (cleanup_queued_task(task)) {
601 		spin_unlock_bh(&conn->session->back_lock);
602 		return;
603 	}
604 
605 	if (task->state == ISCSI_TASK_PENDING) {
606 		/*
607 		 * cmd never made it to the xmit thread, so we should not count
608 		 * the cmd in the sequencing
609 		 */
610 		conn->session->queued_cmdsn--;
611 		/* it was never sent so just complete like normal */
612 		state = ISCSI_TASK_COMPLETED;
613 	} else if (err == DID_TRANSPORT_DISRUPTED)
614 		state = ISCSI_TASK_ABRT_SESS_RECOV;
615 	else
616 		state = ISCSI_TASK_ABRT_TMF;
617 
618 	sc = task->sc;
619 	sc->result = err << 16;
620 	scsi_set_resid(sc, scsi_bufflen(sc));
621 	iscsi_complete_task(task, state);
622 	spin_unlock_bh(&conn->session->back_lock);
623 }
624 
625 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
626 				struct iscsi_task *task)
627 {
628 	struct iscsi_session *session = conn->session;
629 	struct iscsi_hdr *hdr = task->hdr;
630 	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
631 	uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
632 
633 	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
634 		return -ENOTCONN;
635 
636 	if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
637 		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
638 	/*
639 	 * pre-format CmdSN for outgoing PDU.
640 	 */
641 	nop->cmdsn = cpu_to_be32(session->cmdsn);
642 	if (hdr->itt != RESERVED_ITT) {
643 		/*
644 		 * TODO: We always use immediate for normal session pdus.
645 		 * If we start to send tmfs or nops as non-immediate then
646 		 * we should start checking the cmdsn numbers for mgmt tasks.
647 		 *
648 		 * During discovery sessions iscsid sends TEXT as non immediate,
649 		 * but we always only send one PDU at a time.
650 		 */
651 		if (conn->c_stage == ISCSI_CONN_STARTED &&
652 		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
653 			session->queued_cmdsn++;
654 			session->cmdsn++;
655 		}
656 	}
657 
658 	if (session->tt->init_task && session->tt->init_task(task))
659 		return -EIO;
660 
661 	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
662 		session->state = ISCSI_STATE_LOGGING_OUT;
663 
664 	task->state = ISCSI_TASK_RUNNING;
665 	ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
666 			  "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
667 			  hdr->itt, task->data_count);
668 	return 0;
669 }
670 
671 static struct iscsi_task *
672 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
673 		      char *data, uint32_t data_size)
674 {
675 	struct iscsi_session *session = conn->session;
676 	struct iscsi_host *ihost = shost_priv(session->host);
677 	uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
678 	struct iscsi_task *task;
679 	itt_t itt;
680 
681 	if (session->state == ISCSI_STATE_TERMINATE)
682 		return NULL;
683 
684 	if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
685 		/*
686 		 * Login and Text are sent serially, in
687 		 * request-followed-by-response sequence.
688 		 * Same task can be used. Same ITT must be used.
689 		 * Note that login_task is preallocated at conn_create().
690 		 */
691 		if (conn->login_task->state != ISCSI_TASK_FREE) {
692 			iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
693 					  "progress. Cannot start new task.\n");
694 			return NULL;
695 		}
696 
697 		if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
698 			iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
699 			return NULL;
700 		}
701 
702 		task = conn->login_task;
703 	} else {
704 		if (session->state != ISCSI_STATE_LOGGED_IN)
705 			return NULL;
706 
707 		if (data_size != 0) {
708 			iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
709 			return NULL;
710 		}
711 
712 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
713 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
714 
715 		if (!kfifo_out(&session->cmdpool.queue,
716 				 (void*)&task, sizeof(void*)))
717 			return NULL;
718 	}
719 	/*
720 	 * released in complete pdu for task we expect a response for, and
721 	 * released by the lld when it has transmitted the task for
722 	 * pdus we do not expect a response for.
723 	 */
724 	refcount_set(&task->refcount, 1);
725 	task->conn = conn;
726 	task->sc = NULL;
727 	INIT_LIST_HEAD(&task->running);
728 	task->state = ISCSI_TASK_PENDING;
729 
730 	if (data_size) {
731 		memcpy(task->data, data, data_size);
732 		task->data_count = data_size;
733 	} else
734 		task->data_count = 0;
735 
736 	if (conn->session->tt->alloc_pdu) {
737 		if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
738 			iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
739 					 "pdu for mgmt task.\n");
740 			goto free_task;
741 		}
742 	}
743 
744 	itt = task->hdr->itt;
745 	task->hdr_len = sizeof(struct iscsi_hdr);
746 	memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
747 
748 	if (hdr->itt != RESERVED_ITT) {
749 		if (session->tt->parse_pdu_itt)
750 			task->hdr->itt = itt;
751 		else
752 			task->hdr->itt = build_itt(task->itt,
753 						   task->conn->session->age);
754 	}
755 
756 	if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
757 		WRITE_ONCE(conn->ping_task, task);
758 
759 	if (!ihost->workq) {
760 		if (iscsi_prep_mgmt_task(conn, task))
761 			goto free_task;
762 
763 		if (session->tt->xmit_task(task))
764 			goto free_task;
765 	} else {
766 		list_add_tail(&task->running, &conn->mgmtqueue);
767 		iscsi_conn_queue_work(conn);
768 	}
769 
770 	return task;
771 
772 free_task:
773 	/* regular RX path uses back_lock */
774 	spin_lock(&session->back_lock);
775 	__iscsi_put_task(task);
776 	spin_unlock(&session->back_lock);
777 	return NULL;
778 }
779 
780 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
781 			char *data, uint32_t data_size)
782 {
783 	struct iscsi_conn *conn = cls_conn->dd_data;
784 	struct iscsi_session *session = conn->session;
785 	int err = 0;
786 
787 	spin_lock_bh(&session->frwd_lock);
788 	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
789 		err = -EPERM;
790 	spin_unlock_bh(&session->frwd_lock);
791 	return err;
792 }
793 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
794 
795 /**
796  * iscsi_scsi_cmd_rsp - SCSI Command Response processing
797  * @conn: iscsi connection
798  * @hdr: iscsi header
799  * @task: scsi command task
800  * @data: cmd data buffer
801  * @datalen: len of buffer
802  *
803  * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
804  * then completes the command and task. called under back_lock
805  **/
806 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
807 			       struct iscsi_task *task, char *data,
808 			       int datalen)
809 {
810 	struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
811 	struct iscsi_session *session = conn->session;
812 	struct scsi_cmnd *sc = task->sc;
813 
814 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
815 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
816 
817 	sc->result = (DID_OK << 16) | rhdr->cmd_status;
818 
819 	if (task->protected) {
820 		sector_t sector;
821 		u8 ascq;
822 
823 		/**
824 		 * Transports that didn't implement check_protection
825 		 * callback but still published T10-PI support to scsi-mid
826 		 * deserve this BUG_ON.
827 		 **/
828 		BUG_ON(!session->tt->check_protection);
829 
830 		ascq = session->tt->check_protection(task, &sector);
831 		if (ascq) {
832 			scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq);
833 			scsi_set_sense_information(sc->sense_buffer,
834 						   SCSI_SENSE_BUFFERSIZE,
835 						   sector);
836 			goto out;
837 		}
838 	}
839 
840 	if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
841 		sc->result = DID_ERROR << 16;
842 		goto out;
843 	}
844 
845 	if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
846 		uint16_t senselen;
847 
848 		if (datalen < 2) {
849 invalid_datalen:
850 			iscsi_conn_printk(KERN_ERR,  conn,
851 					 "Got CHECK_CONDITION but invalid data "
852 					 "buffer size of %d\n", datalen);
853 			sc->result = DID_BAD_TARGET << 16;
854 			goto out;
855 		}
856 
857 		senselen = get_unaligned_be16(data);
858 		if (datalen < senselen)
859 			goto invalid_datalen;
860 
861 		memcpy(sc->sense_buffer, data + 2,
862 		       min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
863 		ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
864 				  min_t(uint16_t, senselen,
865 				  SCSI_SENSE_BUFFERSIZE));
866 	}
867 
868 	if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
869 			   ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
870 		sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
871 	}
872 
873 	if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
874 	                   ISCSI_FLAG_CMD_OVERFLOW)) {
875 		int res_count = be32_to_cpu(rhdr->residual_count);
876 
877 		if (res_count > 0 &&
878 		    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
879 		     res_count <= scsi_bufflen(sc)))
880 			/* write side for bidi or uni-io set_resid */
881 			scsi_set_resid(sc, res_count);
882 		else
883 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
884 	}
885 out:
886 	ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
887 			  sc, sc->result, task->itt);
888 	conn->scsirsp_pdus_cnt++;
889 	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
890 }
891 
892 /**
893  * iscsi_data_in_rsp - SCSI Data-In Response processing
894  * @conn: iscsi connection
895  * @hdr:  iscsi pdu
896  * @task: scsi command task
897  *
898  * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
899  * then completes the command and task. called under back_lock
900  **/
901 static void
902 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
903 		  struct iscsi_task *task)
904 {
905 	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
906 	struct scsi_cmnd *sc = task->sc;
907 
908 	if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
909 		return;
910 
911 	iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
912 	sc->result = (DID_OK << 16) | rhdr->cmd_status;
913 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
914 	if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
915 	                   ISCSI_FLAG_DATA_OVERFLOW)) {
916 		int res_count = be32_to_cpu(rhdr->residual_count);
917 
918 		if (res_count > 0 &&
919 		    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
920 		     res_count <= sc->sdb.length))
921 			scsi_set_resid(sc, res_count);
922 		else
923 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
924 	}
925 
926 	ISCSI_DBG_SESSION(conn->session, "data in with status done "
927 			  "[sc %p res %d itt 0x%x]\n",
928 			  sc, sc->result, task->itt);
929 	conn->scsirsp_pdus_cnt++;
930 	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
931 }
932 
933 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
934 {
935 	struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
936 	struct iscsi_session *session = conn->session;
937 
938 	conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
939 	conn->tmfrsp_pdus_cnt++;
940 
941 	if (session->tmf_state != TMF_QUEUED)
942 		return;
943 
944 	if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
945 		session->tmf_state = TMF_SUCCESS;
946 	else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
947 		session->tmf_state = TMF_NOT_FOUND;
948 	else
949 		session->tmf_state = TMF_FAILED;
950 	wake_up(&session->ehwait);
951 }
952 
953 static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
954 {
955         struct iscsi_nopout hdr;
956 	struct iscsi_task *task;
957 
958 	if (!rhdr) {
959 		if (READ_ONCE(conn->ping_task))
960 			return -EINVAL;
961 		WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
962 	}
963 
964 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
965 	hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
966 	hdr.flags = ISCSI_FLAG_CMD_FINAL;
967 
968 	if (rhdr) {
969 		hdr.lun = rhdr->lun;
970 		hdr.ttt = rhdr->ttt;
971 		hdr.itt = RESERVED_ITT;
972 	} else
973 		hdr.ttt = RESERVED_ITT;
974 
975 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
976 	if (!task) {
977 		if (!rhdr)
978 			WRITE_ONCE(conn->ping_task, NULL);
979 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
980 		return -EIO;
981 	} else if (!rhdr) {
982 		/* only track our nops */
983 		conn->last_ping = jiffies;
984 	}
985 
986 	return 0;
987 }
988 
989 /**
990  * iscsi_nop_out_rsp - SCSI NOP Response processing
991  * @task: scsi command task
992  * @nop: the nop structure
993  * @data: where to put the data
994  * @datalen: length of data
995  *
996  * iscsi_nop_out_rsp handles nop response from use or
997  * from user space. called under back_lock
998  **/
999 static int iscsi_nop_out_rsp(struct iscsi_task *task,
1000 			     struct iscsi_nopin *nop, char *data, int datalen)
1001 {
1002 	struct iscsi_conn *conn = task->conn;
1003 	int rc = 0;
1004 
1005 	if (READ_ONCE(conn->ping_task) != task) {
1006 		/*
1007 		 * If this is not in response to one of our
1008 		 * nops then it must be from userspace.
1009 		 */
1010 		if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
1011 				   data, datalen))
1012 			rc = ISCSI_ERR_CONN_FAILED;
1013 	} else
1014 		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
1015 	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1016 	return rc;
1017 }
1018 
1019 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1020 			       char *data, int datalen)
1021 {
1022 	struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
1023 	struct iscsi_hdr rejected_pdu;
1024 	int opcode, rc = 0;
1025 
1026 	conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
1027 
1028 	if (ntoh24(reject->dlength) > datalen ||
1029 	    ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
1030 		iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
1031 				  "pdu. Invalid data length (pdu dlength "
1032 				  "%u, datalen %d\n", ntoh24(reject->dlength),
1033 				  datalen);
1034 		return ISCSI_ERR_PROTO;
1035 	}
1036 	memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
1037 	opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
1038 
1039 	switch (reject->reason) {
1040 	case ISCSI_REASON_DATA_DIGEST_ERROR:
1041 		iscsi_conn_printk(KERN_ERR, conn,
1042 				  "pdu (op 0x%x itt 0x%x) rejected "
1043 				  "due to DataDigest error.\n",
1044 				  opcode, rejected_pdu.itt);
1045 		break;
1046 	case ISCSI_REASON_IMM_CMD_REJECT:
1047 		iscsi_conn_printk(KERN_ERR, conn,
1048 				  "pdu (op 0x%x itt 0x%x) rejected. Too many "
1049 				  "immediate commands.\n",
1050 				  opcode, rejected_pdu.itt);
1051 		/*
1052 		 * We only send one TMF at a time so if the target could not
1053 		 * handle it, then it should get fixed (RFC mandates that
1054 		 * a target can handle one immediate TMF per conn).
1055 		 *
1056 		 * For nops-outs, we could have sent more than one if
1057 		 * the target is sending us lots of nop-ins
1058 		 */
1059 		if (opcode != ISCSI_OP_NOOP_OUT)
1060 			return 0;
1061 
1062 		if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1063 			/*
1064 			 * nop-out in response to target's nop-out rejected.
1065 			 * Just resend.
1066 			 */
1067 			/* In RX path we are under back lock */
1068 			spin_unlock(&conn->session->back_lock);
1069 			spin_lock(&conn->session->frwd_lock);
1070 			iscsi_send_nopout(conn,
1071 					  (struct iscsi_nopin*)&rejected_pdu);
1072 			spin_unlock(&conn->session->frwd_lock);
1073 			spin_lock(&conn->session->back_lock);
1074 		} else {
1075 			struct iscsi_task *task;
1076 			/*
1077 			 * Our nop as ping got dropped. We know the target
1078 			 * and transport are ok so just clean up
1079 			 */
1080 			task = iscsi_itt_to_task(conn, rejected_pdu.itt);
1081 			if (!task) {
1082 				iscsi_conn_printk(KERN_ERR, conn,
1083 						 "Invalid pdu reject. Could "
1084 						 "not lookup rejected task.\n");
1085 				rc = ISCSI_ERR_BAD_ITT;
1086 			} else
1087 				rc = iscsi_nop_out_rsp(task,
1088 					(struct iscsi_nopin*)&rejected_pdu,
1089 					NULL, 0);
1090 		}
1091 		break;
1092 	default:
1093 		iscsi_conn_printk(KERN_ERR, conn,
1094 				  "pdu (op 0x%x itt 0x%x) rejected. Reason "
1095 				  "code 0x%x\n", rejected_pdu.opcode,
1096 				  rejected_pdu.itt, reject->reason);
1097 		break;
1098 	}
1099 	return rc;
1100 }
1101 
1102 /**
1103  * iscsi_itt_to_task - look up task by itt
1104  * @conn: iscsi connection
1105  * @itt: itt
1106  *
1107  * This should be used for mgmt tasks like login and nops, or if
1108  * the LDD's itt space does not include the session age.
1109  *
1110  * The session back_lock must be held.
1111  */
1112 struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
1113 {
1114 	struct iscsi_session *session = conn->session;
1115 	int i;
1116 
1117 	if (itt == RESERVED_ITT)
1118 		return NULL;
1119 
1120 	if (session->tt->parse_pdu_itt)
1121 		session->tt->parse_pdu_itt(conn, itt, &i, NULL);
1122 	else
1123 		i = get_itt(itt);
1124 	if (i >= session->cmds_max)
1125 		return NULL;
1126 
1127 	return session->cmds[i];
1128 }
1129 EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
1130 
1131 /**
1132  * __iscsi_complete_pdu - complete pdu
1133  * @conn: iscsi conn
1134  * @hdr: iscsi header
1135  * @data: data buffer
1136  * @datalen: len of data buffer
1137  *
1138  * Completes pdu processing by freeing any resources allocated at
1139  * queuecommand or send generic. session back_lock must be held and verify
1140  * itt must have been called.
1141  */
1142 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1143 			 char *data, int datalen)
1144 {
1145 	struct iscsi_session *session = conn->session;
1146 	int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
1147 	struct iscsi_task *task;
1148 	uint32_t itt;
1149 
1150 	conn->last_recv = jiffies;
1151 	rc = iscsi_verify_itt(conn, hdr->itt);
1152 	if (rc)
1153 		return rc;
1154 
1155 	if (hdr->itt != RESERVED_ITT)
1156 		itt = get_itt(hdr->itt);
1157 	else
1158 		itt = ~0U;
1159 
1160 	ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
1161 			  opcode, conn->id, itt, datalen);
1162 
1163 	if (itt == ~0U) {
1164 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1165 
1166 		switch(opcode) {
1167 		case ISCSI_OP_NOOP_IN:
1168 			if (datalen) {
1169 				rc = ISCSI_ERR_PROTO;
1170 				break;
1171 			}
1172 
1173 			if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
1174 				break;
1175 
1176 			/* In RX path we are under back lock */
1177 			spin_unlock(&session->back_lock);
1178 			spin_lock(&session->frwd_lock);
1179 			iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
1180 			spin_unlock(&session->frwd_lock);
1181 			spin_lock(&session->back_lock);
1182 			break;
1183 		case ISCSI_OP_REJECT:
1184 			rc = iscsi_handle_reject(conn, hdr, data, datalen);
1185 			break;
1186 		case ISCSI_OP_ASYNC_EVENT:
1187 			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1188 			if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1189 				rc = ISCSI_ERR_CONN_FAILED;
1190 			break;
1191 		default:
1192 			rc = ISCSI_ERR_BAD_OPCODE;
1193 			break;
1194 		}
1195 		goto out;
1196 	}
1197 
1198 	switch(opcode) {
1199 	case ISCSI_OP_SCSI_CMD_RSP:
1200 	case ISCSI_OP_SCSI_DATA_IN:
1201 		task = iscsi_itt_to_ctask(conn, hdr->itt);
1202 		if (!task)
1203 			return ISCSI_ERR_BAD_ITT;
1204 		task->last_xfer = jiffies;
1205 		break;
1206 	case ISCSI_OP_R2T:
1207 		/*
1208 		 * LLD handles R2Ts if they need to.
1209 		 */
1210 		return 0;
1211 	case ISCSI_OP_LOGOUT_RSP:
1212 	case ISCSI_OP_LOGIN_RSP:
1213 	case ISCSI_OP_TEXT_RSP:
1214 	case ISCSI_OP_SCSI_TMFUNC_RSP:
1215 	case ISCSI_OP_NOOP_IN:
1216 		task = iscsi_itt_to_task(conn, hdr->itt);
1217 		if (!task)
1218 			return ISCSI_ERR_BAD_ITT;
1219 		break;
1220 	default:
1221 		return ISCSI_ERR_BAD_OPCODE;
1222 	}
1223 
1224 	switch(opcode) {
1225 	case ISCSI_OP_SCSI_CMD_RSP:
1226 		iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
1227 		break;
1228 	case ISCSI_OP_SCSI_DATA_IN:
1229 		iscsi_data_in_rsp(conn, hdr, task);
1230 		break;
1231 	case ISCSI_OP_LOGOUT_RSP:
1232 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1233 		if (datalen) {
1234 			rc = ISCSI_ERR_PROTO;
1235 			break;
1236 		}
1237 		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1238 		goto recv_pdu;
1239 	case ISCSI_OP_LOGIN_RSP:
1240 	case ISCSI_OP_TEXT_RSP:
1241 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1242 		/*
1243 		 * login related PDU's exp_statsn is handled in
1244 		 * userspace
1245 		 */
1246 		goto recv_pdu;
1247 	case ISCSI_OP_SCSI_TMFUNC_RSP:
1248 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1249 		if (datalen) {
1250 			rc = ISCSI_ERR_PROTO;
1251 			break;
1252 		}
1253 
1254 		iscsi_tmf_rsp(conn, hdr);
1255 		iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1256 		break;
1257 	case ISCSI_OP_NOOP_IN:
1258 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1259 		if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
1260 			rc = ISCSI_ERR_PROTO;
1261 			break;
1262 		}
1263 		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1264 
1265 		rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
1266 				       data, datalen);
1267 		break;
1268 	default:
1269 		rc = ISCSI_ERR_BAD_OPCODE;
1270 		break;
1271 	}
1272 
1273 out:
1274 	return rc;
1275 recv_pdu:
1276 	if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1277 		rc = ISCSI_ERR_CONN_FAILED;
1278 	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1279 	return rc;
1280 }
1281 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
1282 
1283 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1284 		       char *data, int datalen)
1285 {
1286 	int rc;
1287 
1288 	spin_lock(&conn->session->back_lock);
1289 	rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
1290 	spin_unlock(&conn->session->back_lock);
1291 	return rc;
1292 }
1293 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
1294 
1295 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
1296 {
1297 	struct iscsi_session *session = conn->session;
1298 	int age = 0, i = 0;
1299 
1300 	if (itt == RESERVED_ITT)
1301 		return 0;
1302 
1303 	if (session->tt->parse_pdu_itt)
1304 		session->tt->parse_pdu_itt(conn, itt, &i, &age);
1305 	else {
1306 		i = get_itt(itt);
1307 		age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
1308 	}
1309 
1310 	if (age != session->age) {
1311 		iscsi_conn_printk(KERN_ERR, conn,
1312 				  "received itt %x expected session age (%x)\n",
1313 				  (__force u32)itt, session->age);
1314 		return ISCSI_ERR_BAD_ITT;
1315 	}
1316 
1317 	if (i >= session->cmds_max) {
1318 		iscsi_conn_printk(KERN_ERR, conn,
1319 				  "received invalid itt index %u (max cmds "
1320 				   "%u.\n", i, session->cmds_max);
1321 		return ISCSI_ERR_BAD_ITT;
1322 	}
1323 	return 0;
1324 }
1325 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
1326 
1327 /**
1328  * iscsi_itt_to_ctask - look up ctask by itt
1329  * @conn: iscsi connection
1330  * @itt: itt
1331  *
1332  * This should be used for cmd tasks.
1333  *
1334  * The session back_lock must be held.
1335  */
1336 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
1337 {
1338 	struct iscsi_task *task;
1339 
1340 	if (iscsi_verify_itt(conn, itt))
1341 		return NULL;
1342 
1343 	task = iscsi_itt_to_task(conn, itt);
1344 	if (!task || !task->sc)
1345 		return NULL;
1346 
1347 	if (iscsi_cmd(task->sc)->age != conn->session->age) {
1348 		iscsi_session_printk(KERN_ERR, conn->session,
1349 				  "task's session age %d, expected %d\n",
1350 				  iscsi_cmd(task->sc)->age, conn->session->age);
1351 		return NULL;
1352 	}
1353 
1354 	return task;
1355 }
1356 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
1357 
1358 void iscsi_session_failure(struct iscsi_session *session,
1359 			   enum iscsi_err err)
1360 {
1361 	struct iscsi_conn *conn;
1362 
1363 	spin_lock_bh(&session->frwd_lock);
1364 	conn = session->leadconn;
1365 	if (session->state == ISCSI_STATE_TERMINATE || !conn) {
1366 		spin_unlock_bh(&session->frwd_lock);
1367 		return;
1368 	}
1369 
1370 	iscsi_get_conn(conn->cls_conn);
1371 	spin_unlock_bh(&session->frwd_lock);
1372 	/*
1373 	 * if the host is being removed bypass the connection
1374 	 * recovery initialization because we are going to kill
1375 	 * the session.
1376 	 */
1377 	if (err == ISCSI_ERR_INVALID_HOST)
1378 		iscsi_conn_error_event(conn->cls_conn, err);
1379 	else
1380 		iscsi_conn_failure(conn, err);
1381 	iscsi_put_conn(conn->cls_conn);
1382 }
1383 EXPORT_SYMBOL_GPL(iscsi_session_failure);
1384 
1385 static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
1386 {
1387 	struct iscsi_session *session = conn->session;
1388 
1389 	if (session->state == ISCSI_STATE_FAILED)
1390 		return false;
1391 
1392 	if (conn->stop_stage == 0)
1393 		session->state = ISCSI_STATE_FAILED;
1394 
1395 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1396 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1397 	return true;
1398 }
1399 
1400 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
1401 {
1402 	struct iscsi_session *session = conn->session;
1403 	bool needs_evt;
1404 
1405 	spin_lock_bh(&session->frwd_lock);
1406 	needs_evt = iscsi_set_conn_failed(conn);
1407 	spin_unlock_bh(&session->frwd_lock);
1408 
1409 	if (needs_evt)
1410 		iscsi_conn_error_event(conn->cls_conn, err);
1411 }
1412 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
1413 
1414 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
1415 {
1416 	struct iscsi_session *session = conn->session;
1417 
1418 	/*
1419 	 * Check for iSCSI window and take care of CmdSN wrap-around
1420 	 */
1421 	if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
1422 		ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
1423 				  "%u MaxCmdSN %u CmdSN %u/%u\n",
1424 				  session->exp_cmdsn, session->max_cmdsn,
1425 				  session->cmdsn, session->queued_cmdsn);
1426 		return -ENOSPC;
1427 	}
1428 	return 0;
1429 }
1430 
1431 static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
1432 			   bool was_requeue)
1433 {
1434 	int rc;
1435 
1436 	spin_lock_bh(&conn->session->back_lock);
1437 
1438 	if (!conn->task) {
1439 		/* Take a ref so we can access it after xmit_task() */
1440 		__iscsi_get_task(task);
1441 	} else {
1442 		/* Already have a ref from when we failed to send it last call */
1443 		conn->task = NULL;
1444 	}
1445 
1446 	/*
1447 	 * If this was a requeue for a R2T we have an extra ref on the task in
1448 	 * case a bad target sends a cmd rsp before we have handled the task.
1449 	 */
1450 	if (was_requeue)
1451 		__iscsi_put_task(task);
1452 
1453 	/*
1454 	 * Do this after dropping the extra ref because if this was a requeue
1455 	 * it's removed from that list and cleanup_queued_task would miss it.
1456 	 */
1457 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1458 		/*
1459 		 * Save the task and ref in case we weren't cleaning up this
1460 		 * task and get woken up again.
1461 		 */
1462 		conn->task = task;
1463 		spin_unlock_bh(&conn->session->back_lock);
1464 		return -ENODATA;
1465 	}
1466 	spin_unlock_bh(&conn->session->back_lock);
1467 
1468 	spin_unlock_bh(&conn->session->frwd_lock);
1469 	rc = conn->session->tt->xmit_task(task);
1470 	spin_lock_bh(&conn->session->frwd_lock);
1471 	if (!rc) {
1472 		/* done with this task */
1473 		task->last_xfer = jiffies;
1474 	}
1475 	/* regular RX path uses back_lock */
1476 	spin_lock(&conn->session->back_lock);
1477 	if (rc && task->state == ISCSI_TASK_RUNNING) {
1478 		/*
1479 		 * get an extra ref that is released next time we access it
1480 		 * as conn->task above.
1481 		 */
1482 		__iscsi_get_task(task);
1483 		conn->task = task;
1484 	}
1485 
1486 	__iscsi_put_task(task);
1487 	spin_unlock(&conn->session->back_lock);
1488 	return rc;
1489 }
1490 
1491 /**
1492  * iscsi_requeue_task - requeue task to run from session workqueue
1493  * @task: task to requeue
1494  *
1495  * Callers must have taken a ref to the task that is going to be requeued.
1496  */
1497 void iscsi_requeue_task(struct iscsi_task *task)
1498 {
1499 	struct iscsi_conn *conn = task->conn;
1500 
1501 	/*
1502 	 * this may be on the requeue list already if the xmit_task callout
1503 	 * is handling the r2ts while we are adding new ones
1504 	 */
1505 	spin_lock_bh(&conn->session->frwd_lock);
1506 	if (list_empty(&task->running)) {
1507 		list_add_tail(&task->running, &conn->requeue);
1508 	} else {
1509 		/*
1510 		 * Don't need the extra ref since it's already requeued and
1511 		 * has a ref.
1512 		 */
1513 		iscsi_put_task(task);
1514 	}
1515 	iscsi_conn_queue_work(conn);
1516 	spin_unlock_bh(&conn->session->frwd_lock);
1517 }
1518 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1519 
1520 /**
1521  * iscsi_data_xmit - xmit any command into the scheduled connection
1522  * @conn: iscsi connection
1523  *
1524  * Notes:
1525  *	The function can return -EAGAIN in which case the caller must
1526  *	re-schedule it again later or recover. '0' return code means
1527  *	successful xmit.
1528  **/
1529 static int iscsi_data_xmit(struct iscsi_conn *conn)
1530 {
1531 	struct iscsi_task *task;
1532 	int rc = 0;
1533 
1534 	spin_lock_bh(&conn->session->frwd_lock);
1535 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1536 		ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1537 		spin_unlock_bh(&conn->session->frwd_lock);
1538 		return -ENODATA;
1539 	}
1540 
1541 	if (conn->task) {
1542 		rc = iscsi_xmit_task(conn, conn->task, false);
1543 	        if (rc)
1544 		        goto done;
1545 	}
1546 
1547 	/*
1548 	 * process mgmt pdus like nops before commands since we should
1549 	 * only have one nop-out as a ping from us and targets should not
1550 	 * overflow us with nop-ins
1551 	 */
1552 check_mgmt:
1553 	while (!list_empty(&conn->mgmtqueue)) {
1554 		task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
1555 				  running);
1556 		list_del_init(&task->running);
1557 		if (iscsi_prep_mgmt_task(conn, task)) {
1558 			/* regular RX path uses back_lock */
1559 			spin_lock_bh(&conn->session->back_lock);
1560 			__iscsi_put_task(task);
1561 			spin_unlock_bh(&conn->session->back_lock);
1562 			continue;
1563 		}
1564 		rc = iscsi_xmit_task(conn, task, false);
1565 		if (rc)
1566 			goto done;
1567 	}
1568 
1569 	/* process pending command queue */
1570 	while (!list_empty(&conn->cmdqueue)) {
1571 		task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1572 				  running);
1573 		list_del_init(&task->running);
1574 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1575 			fail_scsi_task(task, DID_IMM_RETRY);
1576 			continue;
1577 		}
1578 		rc = iscsi_prep_scsi_cmd_pdu(task);
1579 		if (rc) {
1580 			if (rc == -ENOMEM || rc == -EACCES)
1581 				fail_scsi_task(task, DID_IMM_RETRY);
1582 			else
1583 				fail_scsi_task(task, DID_ABORT);
1584 			continue;
1585 		}
1586 		rc = iscsi_xmit_task(conn, task, false);
1587 		if (rc)
1588 			goto done;
1589 		/*
1590 		 * we could continuously get new task requests so
1591 		 * we need to check the mgmt queue for nops that need to
1592 		 * be sent to aviod starvation
1593 		 */
1594 		if (!list_empty(&conn->mgmtqueue))
1595 			goto check_mgmt;
1596 	}
1597 
1598 	while (!list_empty(&conn->requeue)) {
1599 		/*
1600 		 * we always do fastlogout - conn stop code will clean up.
1601 		 */
1602 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1603 			break;
1604 
1605 		task = list_entry(conn->requeue.next, struct iscsi_task,
1606 				  running);
1607 
1608 		if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
1609 			break;
1610 
1611 		list_del_init(&task->running);
1612 		rc = iscsi_xmit_task(conn, task, true);
1613 		if (rc)
1614 			goto done;
1615 		if (!list_empty(&conn->mgmtqueue))
1616 			goto check_mgmt;
1617 	}
1618 	spin_unlock_bh(&conn->session->frwd_lock);
1619 	return -ENODATA;
1620 
1621 done:
1622 	spin_unlock_bh(&conn->session->frwd_lock);
1623 	return rc;
1624 }
1625 
1626 static void iscsi_xmitworker(struct work_struct *work)
1627 {
1628 	struct iscsi_conn *conn =
1629 		container_of(work, struct iscsi_conn, xmitwork);
1630 	int rc;
1631 	/*
1632 	 * serialize Xmit worker on a per-connection basis.
1633 	 */
1634 	do {
1635 		rc = iscsi_data_xmit(conn);
1636 	} while (rc >= 0 || rc == -EAGAIN);
1637 }
1638 
1639 static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1640 						  struct scsi_cmnd *sc)
1641 {
1642 	struct iscsi_task *task;
1643 
1644 	if (!kfifo_out(&conn->session->cmdpool.queue,
1645 			 (void *) &task, sizeof(void *)))
1646 		return NULL;
1647 
1648 	iscsi_cmd(sc)->age = conn->session->age;
1649 	iscsi_cmd(sc)->task = task;
1650 
1651 	refcount_set(&task->refcount, 1);
1652 	task->state = ISCSI_TASK_PENDING;
1653 	task->conn = conn;
1654 	task->sc = sc;
1655 	task->have_checked_conn = false;
1656 	task->last_timeout = jiffies;
1657 	task->last_xfer = jiffies;
1658 	task->protected = false;
1659 	INIT_LIST_HEAD(&task->running);
1660 	return task;
1661 }
1662 
1663 enum {
1664 	FAILURE_BAD_HOST = 1,
1665 	FAILURE_SESSION_FAILED,
1666 	FAILURE_SESSION_FREED,
1667 	FAILURE_WINDOW_CLOSED,
1668 	FAILURE_OOM,
1669 	FAILURE_SESSION_TERMINATE,
1670 	FAILURE_SESSION_IN_RECOVERY,
1671 	FAILURE_SESSION_RECOVERY_TIMEOUT,
1672 	FAILURE_SESSION_LOGGING_OUT,
1673 	FAILURE_SESSION_NOT_READY,
1674 };
1675 
1676 int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1677 {
1678 	struct iscsi_cls_session *cls_session;
1679 	struct iscsi_host *ihost;
1680 	int reason = 0;
1681 	struct iscsi_session *session;
1682 	struct iscsi_conn *conn;
1683 	struct iscsi_task *task = NULL;
1684 
1685 	sc->result = 0;
1686 	iscsi_cmd(sc)->task = NULL;
1687 
1688 	ihost = shost_priv(host);
1689 
1690 	cls_session = starget_to_session(scsi_target(sc->device));
1691 	session = cls_session->dd_data;
1692 	spin_lock_bh(&session->frwd_lock);
1693 
1694 	reason = iscsi_session_chkready(cls_session);
1695 	if (reason) {
1696 		sc->result = reason;
1697 		goto fault;
1698 	}
1699 
1700 	if (session->state != ISCSI_STATE_LOGGED_IN) {
1701 		/*
1702 		 * to handle the race between when we set the recovery state
1703 		 * and block the session we requeue here (commands could
1704 		 * be entering our queuecommand while a block is starting
1705 		 * up because the block code is not locked)
1706 		 */
1707 		switch (session->state) {
1708 		case ISCSI_STATE_FAILED:
1709 			/*
1710 			 * cmds should fail during shutdown, if the session
1711 			 * state is bad, allowing completion to happen
1712 			 */
1713 			if (unlikely(system_state != SYSTEM_RUNNING)) {
1714 				reason = FAILURE_SESSION_FAILED;
1715 				sc->result = DID_NO_CONNECT << 16;
1716 				break;
1717 			}
1718 			fallthrough;
1719 		case ISCSI_STATE_IN_RECOVERY:
1720 			reason = FAILURE_SESSION_IN_RECOVERY;
1721 			sc->result = DID_IMM_RETRY << 16;
1722 			break;
1723 		case ISCSI_STATE_LOGGING_OUT:
1724 			reason = FAILURE_SESSION_LOGGING_OUT;
1725 			sc->result = DID_IMM_RETRY << 16;
1726 			break;
1727 		case ISCSI_STATE_RECOVERY_FAILED:
1728 			reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1729 			sc->result = DID_TRANSPORT_FAILFAST << 16;
1730 			break;
1731 		case ISCSI_STATE_TERMINATE:
1732 			reason = FAILURE_SESSION_TERMINATE;
1733 			sc->result = DID_NO_CONNECT << 16;
1734 			break;
1735 		default:
1736 			reason = FAILURE_SESSION_FREED;
1737 			sc->result = DID_NO_CONNECT << 16;
1738 		}
1739 		goto fault;
1740 	}
1741 
1742 	conn = session->leadconn;
1743 	if (!conn) {
1744 		reason = FAILURE_SESSION_FREED;
1745 		sc->result = DID_NO_CONNECT << 16;
1746 		goto fault;
1747 	}
1748 
1749 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1750 		reason = FAILURE_SESSION_IN_RECOVERY;
1751 		sc->result = DID_REQUEUE << 16;
1752 		goto fault;
1753 	}
1754 
1755 	if (iscsi_check_cmdsn_window_closed(conn)) {
1756 		reason = FAILURE_WINDOW_CLOSED;
1757 		goto reject;
1758 	}
1759 
1760 	task = iscsi_alloc_task(conn, sc);
1761 	if (!task) {
1762 		reason = FAILURE_OOM;
1763 		goto reject;
1764 	}
1765 
1766 	if (!ihost->workq) {
1767 		reason = iscsi_prep_scsi_cmd_pdu(task);
1768 		if (reason) {
1769 			if (reason == -ENOMEM ||  reason == -EACCES) {
1770 				reason = FAILURE_OOM;
1771 				goto prepd_reject;
1772 			} else {
1773 				sc->result = DID_ABORT << 16;
1774 				goto prepd_fault;
1775 			}
1776 		}
1777 		if (session->tt->xmit_task(task)) {
1778 			session->cmdsn--;
1779 			reason = FAILURE_SESSION_NOT_READY;
1780 			goto prepd_reject;
1781 		}
1782 	} else {
1783 		list_add_tail(&task->running, &conn->cmdqueue);
1784 		iscsi_conn_queue_work(conn);
1785 	}
1786 
1787 	session->queued_cmdsn++;
1788 	spin_unlock_bh(&session->frwd_lock);
1789 	return 0;
1790 
1791 prepd_reject:
1792 	spin_lock_bh(&session->back_lock);
1793 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1794 	spin_unlock_bh(&session->back_lock);
1795 reject:
1796 	spin_unlock_bh(&session->frwd_lock);
1797 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1798 			  sc->cmnd[0], reason);
1799 	return SCSI_MLQUEUE_TARGET_BUSY;
1800 
1801 prepd_fault:
1802 	spin_lock_bh(&session->back_lock);
1803 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1804 	spin_unlock_bh(&session->back_lock);
1805 fault:
1806 	spin_unlock_bh(&session->frwd_lock);
1807 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1808 			  sc->cmnd[0], reason);
1809 	scsi_set_resid(sc, scsi_bufflen(sc));
1810 	scsi_done(sc);
1811 	return 0;
1812 }
1813 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1814 
1815 int iscsi_target_alloc(struct scsi_target *starget)
1816 {
1817 	struct iscsi_cls_session *cls_session = starget_to_session(starget);
1818 	struct iscsi_session *session = cls_session->dd_data;
1819 
1820 	starget->can_queue = session->scsi_cmds_max;
1821 	return 0;
1822 }
1823 EXPORT_SYMBOL_GPL(iscsi_target_alloc);
1824 
1825 static void iscsi_tmf_timedout(struct timer_list *t)
1826 {
1827 	struct iscsi_session *session = from_timer(session, t, tmf_timer);
1828 
1829 	spin_lock(&session->frwd_lock);
1830 	if (session->tmf_state == TMF_QUEUED) {
1831 		session->tmf_state = TMF_TIMEDOUT;
1832 		ISCSI_DBG_EH(session, "tmf timedout\n");
1833 		/* unblock eh_abort() */
1834 		wake_up(&session->ehwait);
1835 	}
1836 	spin_unlock(&session->frwd_lock);
1837 }
1838 
1839 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1840 				   struct iscsi_tm *hdr, int age,
1841 				   int timeout)
1842 	__must_hold(&session->frwd_lock)
1843 {
1844 	struct iscsi_session *session = conn->session;
1845 	struct iscsi_task *task;
1846 
1847 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1848 				      NULL, 0);
1849 	if (!task) {
1850 		spin_unlock_bh(&session->frwd_lock);
1851 		iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
1852 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1853 		spin_lock_bh(&session->frwd_lock);
1854 		return -EPERM;
1855 	}
1856 	conn->tmfcmd_pdus_cnt++;
1857 	session->tmf_timer.expires = timeout * HZ + jiffies;
1858 	add_timer(&session->tmf_timer);
1859 	ISCSI_DBG_EH(session, "tmf set timeout\n");
1860 
1861 	spin_unlock_bh(&session->frwd_lock);
1862 	mutex_unlock(&session->eh_mutex);
1863 
1864 	/*
1865 	 * block eh thread until:
1866 	 *
1867 	 * 1) tmf response
1868 	 * 2) tmf timeout
1869 	 * 3) session is terminated or restarted or userspace has
1870 	 * given up on recovery
1871 	 */
1872 	wait_event_interruptible(session->ehwait, age != session->age ||
1873 				 session->state != ISCSI_STATE_LOGGED_IN ||
1874 				 session->tmf_state != TMF_QUEUED);
1875 	if (signal_pending(current))
1876 		flush_signals(current);
1877 	del_timer_sync(&session->tmf_timer);
1878 
1879 	mutex_lock(&session->eh_mutex);
1880 	spin_lock_bh(&session->frwd_lock);
1881 	/* if the session drops it will clean up the task */
1882 	if (age != session->age ||
1883 	    session->state != ISCSI_STATE_LOGGED_IN)
1884 		return -ENOTCONN;
1885 	return 0;
1886 }
1887 
1888 /*
1889  * Fail commands. session frwd lock held and xmit thread flushed.
1890  */
1891 static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
1892 {
1893 	struct iscsi_session *session = conn->session;
1894 	struct iscsi_task *task;
1895 	int i;
1896 
1897 	spin_lock_bh(&session->back_lock);
1898 	for (i = 0; i < session->cmds_max; i++) {
1899 		task = session->cmds[i];
1900 		if (!task->sc || task->state == ISCSI_TASK_FREE)
1901 			continue;
1902 
1903 		if (lun != -1 && lun != task->sc->device->lun)
1904 			continue;
1905 
1906 		__iscsi_get_task(task);
1907 		spin_unlock_bh(&session->back_lock);
1908 
1909 		ISCSI_DBG_SESSION(session,
1910 				  "failing sc %p itt 0x%x state %d\n",
1911 				  task->sc, task->itt, task->state);
1912 		fail_scsi_task(task, error);
1913 
1914 		spin_unlock_bh(&session->frwd_lock);
1915 		iscsi_put_task(task);
1916 		spin_lock_bh(&session->frwd_lock);
1917 
1918 		spin_lock_bh(&session->back_lock);
1919 	}
1920 
1921 	spin_unlock_bh(&session->back_lock);
1922 }
1923 
1924 /**
1925  * iscsi_suspend_queue - suspend iscsi_queuecommand
1926  * @conn: iscsi conn to stop queueing IO on
1927  *
1928  * This grabs the session frwd_lock to make sure no one is in
1929  * xmit_task/queuecommand, and then sets suspend to prevent
1930  * new commands from being queued. This only needs to be called
1931  * by offload drivers that need to sync a path like ep disconnect
1932  * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
1933  * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
1934  */
1935 void iscsi_suspend_queue(struct iscsi_conn *conn)
1936 {
1937 	spin_lock_bh(&conn->session->frwd_lock);
1938 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1939 	spin_unlock_bh(&conn->session->frwd_lock);
1940 }
1941 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1942 
1943 /**
1944  * iscsi_suspend_tx - suspend iscsi_data_xmit
1945  * @conn: iscsi conn tp stop processing IO on.
1946  *
1947  * This function sets the suspend bit to prevent iscsi_data_xmit
1948  * from sending new IO, and if work is queued on the xmit thread
1949  * it will wait for it to be completed.
1950  */
1951 void iscsi_suspend_tx(struct iscsi_conn *conn)
1952 {
1953 	struct Scsi_Host *shost = conn->session->host;
1954 	struct iscsi_host *ihost = shost_priv(shost);
1955 
1956 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1957 	if (ihost->workq)
1958 		flush_workqueue(ihost->workq);
1959 }
1960 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1961 
1962 static void iscsi_start_tx(struct iscsi_conn *conn)
1963 {
1964 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1965 	iscsi_conn_queue_work(conn);
1966 }
1967 
1968 /*
1969  * We want to make sure a ping is in flight. It has timed out.
1970  * And we are not busy processing a pdu that is making
1971  * progress but got started before the ping and is taking a while
1972  * to complete so the ping is just stuck behind it in a queue.
1973  */
1974 static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1975 {
1976 	if (READ_ONCE(conn->ping_task) &&
1977 	    time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1978 			   (conn->ping_timeout * HZ), jiffies))
1979 		return 1;
1980 	else
1981 		return 0;
1982 }
1983 
1984 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1985 {
1986 	enum blk_eh_timer_return rc = BLK_EH_DONE;
1987 	struct iscsi_task *task = NULL, *running_task;
1988 	struct iscsi_cls_session *cls_session;
1989 	struct iscsi_session *session;
1990 	struct iscsi_conn *conn;
1991 	int i;
1992 
1993 	cls_session = starget_to_session(scsi_target(sc->device));
1994 	session = cls_session->dd_data;
1995 
1996 	ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1997 
1998 	spin_lock_bh(&session->frwd_lock);
1999 	spin_lock(&session->back_lock);
2000 	task = iscsi_cmd(sc)->task;
2001 	if (!task) {
2002 		/*
2003 		 * Raced with completion. Blk layer has taken ownership
2004 		 * so let timeout code complete it now.
2005 		 */
2006 		rc = BLK_EH_DONE;
2007 		spin_unlock(&session->back_lock);
2008 		goto done;
2009 	}
2010 	__iscsi_get_task(task);
2011 	spin_unlock(&session->back_lock);
2012 
2013 	if (session->state != ISCSI_STATE_LOGGED_IN) {
2014 		/*
2015 		 * During shutdown, if session is prematurely disconnected,
2016 		 * recovery won't happen and there will be hung cmds. Not
2017 		 * handling cmds would trigger EH, also bad in this case.
2018 		 * Instead, handle cmd, allow completion to happen and let
2019 		 * upper layer to deal with the result.
2020 		 */
2021 		if (unlikely(system_state != SYSTEM_RUNNING)) {
2022 			sc->result = DID_NO_CONNECT << 16;
2023 			ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
2024 			rc = BLK_EH_DONE;
2025 			goto done;
2026 		}
2027 		/*
2028 		 * We are probably in the middle of iscsi recovery so let
2029 		 * that complete and handle the error.
2030 		 */
2031 		rc = BLK_EH_RESET_TIMER;
2032 		goto done;
2033 	}
2034 
2035 	conn = session->leadconn;
2036 	if (!conn) {
2037 		/* In the middle of shuting down */
2038 		rc = BLK_EH_RESET_TIMER;
2039 		goto done;
2040 	}
2041 
2042 	/*
2043 	 * If we have sent (at least queued to the network layer) a pdu or
2044 	 * recvd one for the task since the last timeout ask for
2045 	 * more time. If on the next timeout we have not made progress
2046 	 * we can check if it is the task or connection when we send the
2047 	 * nop as a ping.
2048 	 */
2049 	if (time_after(task->last_xfer, task->last_timeout)) {
2050 		ISCSI_DBG_EH(session, "Command making progress. Asking "
2051 			     "scsi-ml for more time to complete. "
2052 			     "Last data xfer at %lu. Last timeout was at "
2053 			     "%lu\n.", task->last_xfer, task->last_timeout);
2054 		task->have_checked_conn = false;
2055 		rc = BLK_EH_RESET_TIMER;
2056 		goto done;
2057 	}
2058 
2059 	if (!conn->recv_timeout && !conn->ping_timeout)
2060 		goto done;
2061 	/*
2062 	 * if the ping timedout then we are in the middle of cleaning up
2063 	 * and can let the iscsi eh handle it
2064 	 */
2065 	if (iscsi_has_ping_timed_out(conn)) {
2066 		rc = BLK_EH_RESET_TIMER;
2067 		goto done;
2068 	}
2069 
2070 	spin_lock(&session->back_lock);
2071 	for (i = 0; i < conn->session->cmds_max; i++) {
2072 		running_task = conn->session->cmds[i];
2073 		if (!running_task->sc || running_task == task ||
2074 		     running_task->state != ISCSI_TASK_RUNNING)
2075 			continue;
2076 
2077 		/*
2078 		 * Only check if cmds started before this one have made
2079 		 * progress, or this could never fail
2080 		 */
2081 		if (time_after(running_task->sc->jiffies_at_alloc,
2082 			       task->sc->jiffies_at_alloc))
2083 			continue;
2084 
2085 		if (time_after(running_task->last_xfer, task->last_timeout)) {
2086 			/*
2087 			 * This task has not made progress, but a task
2088 			 * started before us has transferred data since
2089 			 * we started/last-checked. We could be queueing
2090 			 * too many tasks or the LU is bad.
2091 			 *
2092 			 * If the device is bad the cmds ahead of us on
2093 			 * other devs will complete, and this loop will
2094 			 * eventually fail starting the scsi eh.
2095 			 */
2096 			ISCSI_DBG_EH(session, "Command has not made progress "
2097 				     "but commands ahead of it have. "
2098 				     "Asking scsi-ml for more time to "
2099 				     "complete. Our last xfer vs running task "
2100 				     "last xfer %lu/%lu. Last check %lu.\n",
2101 				     task->last_xfer, running_task->last_xfer,
2102 				     task->last_timeout);
2103 			spin_unlock(&session->back_lock);
2104 			rc = BLK_EH_RESET_TIMER;
2105 			goto done;
2106 		}
2107 	}
2108 	spin_unlock(&session->back_lock);
2109 
2110 	/* Assumes nop timeout is shorter than scsi cmd timeout */
2111 	if (task->have_checked_conn)
2112 		goto done;
2113 
2114 	/*
2115 	 * Checking the transport already or nop from a cmd timeout still
2116 	 * running
2117 	 */
2118 	if (READ_ONCE(conn->ping_task)) {
2119 		task->have_checked_conn = true;
2120 		rc = BLK_EH_RESET_TIMER;
2121 		goto done;
2122 	}
2123 
2124 	/* Make sure there is a transport check done */
2125 	iscsi_send_nopout(conn, NULL);
2126 	task->have_checked_conn = true;
2127 	rc = BLK_EH_RESET_TIMER;
2128 
2129 done:
2130 	spin_unlock_bh(&session->frwd_lock);
2131 
2132 	if (task) {
2133 		task->last_timeout = jiffies;
2134 		iscsi_put_task(task);
2135 	}
2136 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2137 		     "timer reset" : "shutdown or nh");
2138 	return rc;
2139 }
2140 EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
2141 
2142 static void iscsi_check_transport_timeouts(struct timer_list *t)
2143 {
2144 	struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
2145 	struct iscsi_session *session = conn->session;
2146 	unsigned long recv_timeout, next_timeout = 0, last_recv;
2147 
2148 	spin_lock(&session->frwd_lock);
2149 	if (session->state != ISCSI_STATE_LOGGED_IN)
2150 		goto done;
2151 
2152 	recv_timeout = conn->recv_timeout;
2153 	if (!recv_timeout)
2154 		goto done;
2155 
2156 	recv_timeout *= HZ;
2157 	last_recv = conn->last_recv;
2158 
2159 	if (iscsi_has_ping_timed_out(conn)) {
2160 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
2161 				  "expired, recv timeout %d, last rx %lu, "
2162 				  "last ping %lu, now %lu\n",
2163 				  conn->ping_timeout, conn->recv_timeout,
2164 				  last_recv, conn->last_ping, jiffies);
2165 		spin_unlock(&session->frwd_lock);
2166 		iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT);
2167 		return;
2168 	}
2169 
2170 	if (time_before_eq(last_recv + recv_timeout, jiffies)) {
2171 		/* send a ping to try to provoke some traffic */
2172 		ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
2173 		if (iscsi_send_nopout(conn, NULL))
2174 			next_timeout = jiffies + (1 * HZ);
2175 		else
2176 			next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
2177 	} else
2178 		next_timeout = last_recv + recv_timeout;
2179 
2180 	ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
2181 	mod_timer(&conn->transport_timer, next_timeout);
2182 done:
2183 	spin_unlock(&session->frwd_lock);
2184 }
2185 
2186 /**
2187  * iscsi_conn_unbind - prevent queueing to conn.
2188  * @cls_conn: iscsi conn ep is bound to.
2189  * @is_active: is the conn in use for boot or is this for EH/termination
2190  *
2191  * This must be called by drivers implementing the ep_disconnect callout.
2192  * It disables queueing to the connection from libiscsi in preparation for
2193  * an ep_disconnect call.
2194  */
2195 void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
2196 {
2197 	struct iscsi_session *session;
2198 	struct iscsi_conn *conn;
2199 
2200 	if (!cls_conn)
2201 		return;
2202 
2203 	conn = cls_conn->dd_data;
2204 	session = conn->session;
2205 	/*
2206 	 * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
2207 	 * complete or timeout. The caller just wants to know what's running
2208 	 * is everything that needs to be cleaned up, and no cmds will be
2209 	 * queued.
2210 	 */
2211 	mutex_lock(&session->eh_mutex);
2212 
2213 	iscsi_suspend_queue(conn);
2214 	iscsi_suspend_tx(conn);
2215 
2216 	spin_lock_bh(&session->frwd_lock);
2217 	if (!is_active) {
2218 		/*
2219 		 * if logout timed out before userspace could even send a PDU
2220 		 * the state might still be in ISCSI_STATE_LOGGED_IN and
2221 		 * allowing new cmds and TMFs.
2222 		 */
2223 		if (session->state == ISCSI_STATE_LOGGED_IN)
2224 			iscsi_set_conn_failed(conn);
2225 	}
2226 	spin_unlock_bh(&session->frwd_lock);
2227 	mutex_unlock(&session->eh_mutex);
2228 }
2229 EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
2230 
2231 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
2232 				      struct iscsi_tm *hdr)
2233 {
2234 	memset(hdr, 0, sizeof(*hdr));
2235 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2236 	hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
2237 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2238 	hdr->lun = task->lun;
2239 	hdr->rtt = task->hdr_itt;
2240 	hdr->refcmdsn = task->cmdsn;
2241 }
2242 
2243 int iscsi_eh_abort(struct scsi_cmnd *sc)
2244 {
2245 	struct iscsi_cls_session *cls_session;
2246 	struct iscsi_session *session;
2247 	struct iscsi_conn *conn;
2248 	struct iscsi_task *task;
2249 	struct iscsi_tm *hdr;
2250 	int age;
2251 
2252 	cls_session = starget_to_session(scsi_target(sc->device));
2253 	session = cls_session->dd_data;
2254 
2255 	ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
2256 
2257 	mutex_lock(&session->eh_mutex);
2258 	spin_lock_bh(&session->frwd_lock);
2259 	/*
2260 	 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
2261 	 * got the command.
2262 	 */
2263 	if (!iscsi_cmd(sc)->task) {
2264 		ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
2265 				      "it completed.\n");
2266 		spin_unlock_bh(&session->frwd_lock);
2267 		mutex_unlock(&session->eh_mutex);
2268 		return SUCCESS;
2269 	}
2270 
2271 	/*
2272 	 * If we are not logged in or we have started a new session
2273 	 * then let the host reset code handle this
2274 	 */
2275 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
2276 	    iscsi_cmd(sc)->age != session->age) {
2277 		spin_unlock_bh(&session->frwd_lock);
2278 		mutex_unlock(&session->eh_mutex);
2279 		ISCSI_DBG_EH(session, "failing abort due to dropped "
2280 				  "session.\n");
2281 		return FAILED;
2282 	}
2283 
2284 	spin_lock(&session->back_lock);
2285 	task = iscsi_cmd(sc)->task;
2286 	if (!task || !task->sc) {
2287 		/* task completed before time out */
2288 		ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
2289 
2290 		spin_unlock(&session->back_lock);
2291 		spin_unlock_bh(&session->frwd_lock);
2292 		mutex_unlock(&session->eh_mutex);
2293 		return SUCCESS;
2294 	}
2295 
2296 	conn = session->leadconn;
2297 	iscsi_get_conn(conn->cls_conn);
2298 	conn->eh_abort_cnt++;
2299 	age = session->age;
2300 
2301 	ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
2302 	__iscsi_get_task(task);
2303 	spin_unlock(&session->back_lock);
2304 
2305 	if (task->state == ISCSI_TASK_PENDING) {
2306 		fail_scsi_task(task, DID_ABORT);
2307 		goto success;
2308 	}
2309 
2310 	/* only have one tmf outstanding at a time */
2311 	if (session->tmf_state != TMF_INITIAL)
2312 		goto failed;
2313 	session->tmf_state = TMF_QUEUED;
2314 
2315 	hdr = &session->tmhdr;
2316 	iscsi_prep_abort_task_pdu(task, hdr);
2317 
2318 	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
2319 		goto failed;
2320 
2321 	switch (session->tmf_state) {
2322 	case TMF_SUCCESS:
2323 		spin_unlock_bh(&session->frwd_lock);
2324 		/*
2325 		 * stop tx side incase the target had sent a abort rsp but
2326 		 * the initiator was still writing out data.
2327 		 */
2328 		iscsi_suspend_tx(conn);
2329 		/*
2330 		 * we do not stop the recv side because targets have been
2331 		 * good and have never sent us a successful tmf response
2332 		 * then sent more data for the cmd.
2333 		 */
2334 		spin_lock_bh(&session->frwd_lock);
2335 		fail_scsi_task(task, DID_ABORT);
2336 		session->tmf_state = TMF_INITIAL;
2337 		memset(hdr, 0, sizeof(*hdr));
2338 		spin_unlock_bh(&session->frwd_lock);
2339 		iscsi_start_tx(conn);
2340 		goto success_unlocked;
2341 	case TMF_TIMEDOUT:
2342 		session->running_aborted_task = task;
2343 		spin_unlock_bh(&session->frwd_lock);
2344 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2345 		goto failed_unlocked;
2346 	case TMF_NOT_FOUND:
2347 		if (iscsi_task_is_completed(task)) {
2348 			session->tmf_state = TMF_INITIAL;
2349 			memset(hdr, 0, sizeof(*hdr));
2350 			/* task completed before tmf abort response */
2351 			ISCSI_DBG_EH(session, "sc completed while abort	in "
2352 					      "progress\n");
2353 			goto success;
2354 		}
2355 		fallthrough;
2356 	default:
2357 		session->tmf_state = TMF_INITIAL;
2358 		goto failed;
2359 	}
2360 
2361 success:
2362 	spin_unlock_bh(&session->frwd_lock);
2363 success_unlocked:
2364 	ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
2365 		     sc, task->itt);
2366 	iscsi_put_task(task);
2367 	iscsi_put_conn(conn->cls_conn);
2368 	mutex_unlock(&session->eh_mutex);
2369 	return SUCCESS;
2370 
2371 failed:
2372 	spin_unlock_bh(&session->frwd_lock);
2373 failed_unlocked:
2374 	ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
2375 		     task ? task->itt : 0);
2376 	/*
2377 	 * The driver might be accessing the task so hold the ref. The conn
2378 	 * stop cleanup will drop the ref after ep_disconnect so we know the
2379 	 * driver's no longer touching the task.
2380 	 */
2381 	if (!session->running_aborted_task)
2382 		iscsi_put_task(task);
2383 
2384 	iscsi_put_conn(conn->cls_conn);
2385 	mutex_unlock(&session->eh_mutex);
2386 	return FAILED;
2387 }
2388 EXPORT_SYMBOL_GPL(iscsi_eh_abort);
2389 
2390 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2391 {
2392 	memset(hdr, 0, sizeof(*hdr));
2393 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2394 	hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2395 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2396 	int_to_scsilun(sc->device->lun, &hdr->lun);
2397 	hdr->rtt = RESERVED_ITT;
2398 }
2399 
2400 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2401 {
2402 	struct iscsi_cls_session *cls_session;
2403 	struct iscsi_session *session;
2404 	struct iscsi_conn *conn;
2405 	struct iscsi_tm *hdr;
2406 	int rc = FAILED;
2407 
2408 	cls_session = starget_to_session(scsi_target(sc->device));
2409 	session = cls_session->dd_data;
2410 
2411 	ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc,
2412 		     sc->device->lun);
2413 
2414 	mutex_lock(&session->eh_mutex);
2415 	spin_lock_bh(&session->frwd_lock);
2416 	/*
2417 	 * Just check if we are not logged in. We cannot check for
2418 	 * the phase because the reset could come from a ioctl.
2419 	 */
2420 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2421 		goto unlock;
2422 	conn = session->leadconn;
2423 
2424 	/* only have one tmf outstanding at a time */
2425 	if (session->tmf_state != TMF_INITIAL)
2426 		goto unlock;
2427 	session->tmf_state = TMF_QUEUED;
2428 
2429 	hdr = &session->tmhdr;
2430 	iscsi_prep_lun_reset_pdu(sc, hdr);
2431 
2432 	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2433 				    session->lu_reset_timeout)) {
2434 		rc = FAILED;
2435 		goto unlock;
2436 	}
2437 
2438 	switch (session->tmf_state) {
2439 	case TMF_SUCCESS:
2440 		break;
2441 	case TMF_TIMEDOUT:
2442 		spin_unlock_bh(&session->frwd_lock);
2443 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2444 		goto done;
2445 	default:
2446 		session->tmf_state = TMF_INITIAL;
2447 		goto unlock;
2448 	}
2449 
2450 	rc = SUCCESS;
2451 	spin_unlock_bh(&session->frwd_lock);
2452 
2453 	iscsi_suspend_tx(conn);
2454 
2455 	spin_lock_bh(&session->frwd_lock);
2456 	memset(hdr, 0, sizeof(*hdr));
2457 	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
2458 	session->tmf_state = TMF_INITIAL;
2459 	spin_unlock_bh(&session->frwd_lock);
2460 
2461 	iscsi_start_tx(conn);
2462 	goto done;
2463 
2464 unlock:
2465 	spin_unlock_bh(&session->frwd_lock);
2466 done:
2467 	ISCSI_DBG_EH(session, "dev reset result = %s\n",
2468 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
2469 	mutex_unlock(&session->eh_mutex);
2470 	return rc;
2471 }
2472 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
2473 
2474 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
2475 {
2476 	struct iscsi_session *session = cls_session->dd_data;
2477 
2478 	spin_lock_bh(&session->frwd_lock);
2479 	if (session->state != ISCSI_STATE_LOGGED_IN) {
2480 		session->state = ISCSI_STATE_RECOVERY_FAILED;
2481 		wake_up(&session->ehwait);
2482 	}
2483 	spin_unlock_bh(&session->frwd_lock);
2484 }
2485 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2486 
2487 /**
2488  * iscsi_eh_session_reset - drop session and attempt relogin
2489  * @sc: scsi command
2490  *
2491  * This function will wait for a relogin, session termination from
2492  * userspace, or a recovery/replacement timeout.
2493  */
2494 int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2495 {
2496 	struct iscsi_cls_session *cls_session;
2497 	struct iscsi_session *session;
2498 	struct iscsi_conn *conn;
2499 
2500 	cls_session = starget_to_session(scsi_target(sc->device));
2501 	session = cls_session->dd_data;
2502 
2503 	mutex_lock(&session->eh_mutex);
2504 	spin_lock_bh(&session->frwd_lock);
2505 	if (session->state == ISCSI_STATE_TERMINATE) {
2506 failed:
2507 		ISCSI_DBG_EH(session,
2508 			     "failing session reset: Could not log back into "
2509 			     "%s [age %d]\n", session->targetname,
2510 			     session->age);
2511 		spin_unlock_bh(&session->frwd_lock);
2512 		mutex_unlock(&session->eh_mutex);
2513 		return FAILED;
2514 	}
2515 
2516 	conn = session->leadconn;
2517 	iscsi_get_conn(conn->cls_conn);
2518 
2519 	spin_unlock_bh(&session->frwd_lock);
2520 	mutex_unlock(&session->eh_mutex);
2521 
2522 	iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2523 	iscsi_put_conn(conn->cls_conn);
2524 
2525 	ISCSI_DBG_EH(session, "wait for relogin\n");
2526 	wait_event_interruptible(session->ehwait,
2527 				 session->state == ISCSI_STATE_TERMINATE ||
2528 				 session->state == ISCSI_STATE_LOGGED_IN ||
2529 				 session->state == ISCSI_STATE_RECOVERY_FAILED);
2530 	if (signal_pending(current))
2531 		flush_signals(current);
2532 
2533 	mutex_lock(&session->eh_mutex);
2534 	spin_lock_bh(&session->frwd_lock);
2535 	if (session->state == ISCSI_STATE_LOGGED_IN) {
2536 		ISCSI_DBG_EH(session,
2537 			     "session reset succeeded for %s,%s\n",
2538 			     session->targetname, conn->persistent_address);
2539 	} else
2540 		goto failed;
2541 	spin_unlock_bh(&session->frwd_lock);
2542 	mutex_unlock(&session->eh_mutex);
2543 	return SUCCESS;
2544 }
2545 EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
2546 
2547 static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2548 {
2549 	memset(hdr, 0, sizeof(*hdr));
2550 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2551 	hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2552 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2553 	hdr->rtt = RESERVED_ITT;
2554 }
2555 
2556 /**
2557  * iscsi_eh_target_reset - reset target
2558  * @sc: scsi command
2559  *
2560  * This will attempt to send a warm target reset.
2561  */
2562 static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2563 {
2564 	struct iscsi_cls_session *cls_session;
2565 	struct iscsi_session *session;
2566 	struct iscsi_conn *conn;
2567 	struct iscsi_tm *hdr;
2568 	int rc = FAILED;
2569 
2570 	cls_session = starget_to_session(scsi_target(sc->device));
2571 	session = cls_session->dd_data;
2572 
2573 	ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
2574 		     session->targetname);
2575 
2576 	mutex_lock(&session->eh_mutex);
2577 	spin_lock_bh(&session->frwd_lock);
2578 	/*
2579 	 * Just check if we are not logged in. We cannot check for
2580 	 * the phase because the reset could come from a ioctl.
2581 	 */
2582 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2583 		goto unlock;
2584 	conn = session->leadconn;
2585 
2586 	/* only have one tmf outstanding at a time */
2587 	if (session->tmf_state != TMF_INITIAL)
2588 		goto unlock;
2589 	session->tmf_state = TMF_QUEUED;
2590 
2591 	hdr = &session->tmhdr;
2592 	iscsi_prep_tgt_reset_pdu(sc, hdr);
2593 
2594 	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2595 				    session->tgt_reset_timeout)) {
2596 		rc = FAILED;
2597 		goto unlock;
2598 	}
2599 
2600 	switch (session->tmf_state) {
2601 	case TMF_SUCCESS:
2602 		break;
2603 	case TMF_TIMEDOUT:
2604 		spin_unlock_bh(&session->frwd_lock);
2605 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2606 		goto done;
2607 	default:
2608 		session->tmf_state = TMF_INITIAL;
2609 		goto unlock;
2610 	}
2611 
2612 	rc = SUCCESS;
2613 	spin_unlock_bh(&session->frwd_lock);
2614 
2615 	iscsi_suspend_tx(conn);
2616 
2617 	spin_lock_bh(&session->frwd_lock);
2618 	memset(hdr, 0, sizeof(*hdr));
2619 	fail_scsi_tasks(conn, -1, DID_ERROR);
2620 	session->tmf_state = TMF_INITIAL;
2621 	spin_unlock_bh(&session->frwd_lock);
2622 
2623 	iscsi_start_tx(conn);
2624 	goto done;
2625 
2626 unlock:
2627 	spin_unlock_bh(&session->frwd_lock);
2628 done:
2629 	ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2630 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
2631 	mutex_unlock(&session->eh_mutex);
2632 	return rc;
2633 }
2634 
2635 /**
2636  * iscsi_eh_recover_target - reset target and possibly the session
2637  * @sc: scsi command
2638  *
2639  * This will attempt to send a warm target reset. If that fails,
2640  * we will escalate to ERL0 session recovery.
2641  */
2642 int iscsi_eh_recover_target(struct scsi_cmnd *sc)
2643 {
2644 	int rc;
2645 
2646 	rc = iscsi_eh_target_reset(sc);
2647 	if (rc == FAILED)
2648 		rc = iscsi_eh_session_reset(sc);
2649 	return rc;
2650 }
2651 EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
2652 
2653 /*
2654  * Pre-allocate a pool of @max items of @item_size. By default, the pool
2655  * should be accessed via kfifo_{get,put} on q->queue.
2656  * Optionally, the caller can obtain the array of object pointers
2657  * by passing in a non-NULL @items pointer
2658  */
2659 int
2660 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2661 {
2662 	int i, num_arrays = 1;
2663 
2664 	memset(q, 0, sizeof(*q));
2665 
2666 	q->max = max;
2667 
2668 	/* If the user passed an items pointer, he wants a copy of
2669 	 * the array. */
2670 	if (items)
2671 		num_arrays++;
2672 	q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
2673 	if (q->pool == NULL)
2674 		return -ENOMEM;
2675 
2676 	kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
2677 
2678 	for (i = 0; i < max; i++) {
2679 		q->pool[i] = kzalloc(item_size, GFP_KERNEL);
2680 		if (q->pool[i] == NULL) {
2681 			q->max = i;
2682 			goto enomem;
2683 		}
2684 		kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
2685 	}
2686 
2687 	if (items) {
2688 		*items = q->pool + max;
2689 		memcpy(*items, q->pool, max * sizeof(void *));
2690 	}
2691 
2692 	return 0;
2693 
2694 enomem:
2695 	iscsi_pool_free(q);
2696 	return -ENOMEM;
2697 }
2698 EXPORT_SYMBOL_GPL(iscsi_pool_init);
2699 
2700 void iscsi_pool_free(struct iscsi_pool *q)
2701 {
2702 	int i;
2703 
2704 	for (i = 0; i < q->max; i++)
2705 		kfree(q->pool[i]);
2706 	kvfree(q->pool);
2707 }
2708 EXPORT_SYMBOL_GPL(iscsi_pool_free);
2709 
2710 int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
2711 				 uint16_t requested_cmds_max)
2712 {
2713 	int scsi_cmds, total_cmds = requested_cmds_max;
2714 
2715 check:
2716 	if (!total_cmds)
2717 		total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
2718 	/*
2719 	 * The iscsi layer needs some tasks for nop handling and tmfs,
2720 	 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
2721 	 * + 1 command for scsi IO.
2722 	 */
2723 	if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2724 		printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n",
2725 		       total_cmds, ISCSI_TOTAL_CMDS_MIN);
2726 		return -EINVAL;
2727 	}
2728 
2729 	if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
2730 		printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n",
2731 		       requested_cmds_max, ISCSI_TOTAL_CMDS_MAX,
2732 		       ISCSI_TOTAL_CMDS_MAX);
2733 		total_cmds = ISCSI_TOTAL_CMDS_MAX;
2734 	}
2735 
2736 	if (!is_power_of_2(total_cmds)) {
2737 		total_cmds = rounddown_pow_of_two(total_cmds);
2738 		if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2739 			printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN);
2740 			return -EINVAL;
2741 		}
2742 
2743 		printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n",
2744 		       requested_cmds_max, total_cmds);
2745 	}
2746 
2747 	scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
2748 	if (shost->can_queue && scsi_cmds > shost->can_queue) {
2749 		total_cmds = shost->can_queue;
2750 
2751 		printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n",
2752 		       requested_cmds_max, shost->can_queue);
2753 		goto check;
2754 	}
2755 
2756 	return scsi_cmds;
2757 }
2758 EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds);
2759 
2760 /**
2761  * iscsi_host_add - add host to system
2762  * @shost: scsi host
2763  * @pdev: parent device
2764  *
2765  * This should be called by partial offload and software iscsi drivers
2766  * to add a host to the system.
2767  */
2768 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
2769 {
2770 	if (!shost->can_queue)
2771 		shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
2772 
2773 	if (!shost->cmd_per_lun)
2774 		shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
2775 
2776 	return scsi_add_host(shost, pdev);
2777 }
2778 EXPORT_SYMBOL_GPL(iscsi_host_add);
2779 
2780 /**
2781  * iscsi_host_alloc - allocate a host and driver data
2782  * @sht: scsi host template
2783  * @dd_data_size: driver host data size
2784  * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
2785  *
2786  * This should be called by partial offload and software iscsi drivers.
2787  * To access the driver specific memory use the iscsi_host_priv() macro.
2788  */
2789 struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
2790 				   int dd_data_size, bool xmit_can_sleep)
2791 {
2792 	struct Scsi_Host *shost;
2793 	struct iscsi_host *ihost;
2794 
2795 	shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
2796 	if (!shost)
2797 		return NULL;
2798 	ihost = shost_priv(shost);
2799 
2800 	if (xmit_can_sleep) {
2801 		ihost->workq = alloc_workqueue("iscsi_q_%d",
2802 			WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
2803 			1, shost->host_no);
2804 		if (!ihost->workq)
2805 			goto free_host;
2806 	}
2807 
2808 	spin_lock_init(&ihost->lock);
2809 	ihost->state = ISCSI_HOST_SETUP;
2810 	ihost->num_sessions = 0;
2811 	init_waitqueue_head(&ihost->session_removal_wq);
2812 	return shost;
2813 
2814 free_host:
2815 	scsi_host_put(shost);
2816 	return NULL;
2817 }
2818 EXPORT_SYMBOL_GPL(iscsi_host_alloc);
2819 
2820 static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
2821 {
2822 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
2823 }
2824 
2825 /**
2826  * iscsi_host_remove - remove host and sessions
2827  * @shost: scsi host
2828  *
2829  * If there are any sessions left, this will initiate the removal and wait
2830  * for the completion.
2831  */
2832 void iscsi_host_remove(struct Scsi_Host *shost)
2833 {
2834 	struct iscsi_host *ihost = shost_priv(shost);
2835 	unsigned long flags;
2836 
2837 	spin_lock_irqsave(&ihost->lock, flags);
2838 	ihost->state = ISCSI_HOST_REMOVED;
2839 	spin_unlock_irqrestore(&ihost->lock, flags);
2840 
2841 	iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
2842 	wait_event_interruptible(ihost->session_removal_wq,
2843 				 ihost->num_sessions == 0);
2844 	if (signal_pending(current))
2845 		flush_signals(current);
2846 
2847 	scsi_remove_host(shost);
2848 }
2849 EXPORT_SYMBOL_GPL(iscsi_host_remove);
2850 
2851 void iscsi_host_free(struct Scsi_Host *shost)
2852 {
2853 	struct iscsi_host *ihost = shost_priv(shost);
2854 
2855 	if (ihost->workq)
2856 		destroy_workqueue(ihost->workq);
2857 
2858 	kfree(ihost->netdev);
2859 	kfree(ihost->hwaddress);
2860 	kfree(ihost->initiatorname);
2861 	scsi_host_put(shost);
2862 }
2863 EXPORT_SYMBOL_GPL(iscsi_host_free);
2864 
2865 static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2866 {
2867 	struct iscsi_host *ihost = shost_priv(shost);
2868 	unsigned long flags;
2869 
2870 	shost = scsi_host_get(shost);
2871 	if (!shost) {
2872 		printk(KERN_ERR "Invalid state. Cannot notify host removal "
2873 		      "of session teardown event because host already "
2874 		      "removed.\n");
2875 		return;
2876 	}
2877 
2878 	spin_lock_irqsave(&ihost->lock, flags);
2879 	ihost->num_sessions--;
2880 	if (ihost->num_sessions == 0)
2881 		wake_up(&ihost->session_removal_wq);
2882 	spin_unlock_irqrestore(&ihost->lock, flags);
2883 	scsi_host_put(shost);
2884 }
2885 
2886 /**
2887  * iscsi_session_setup - create iscsi cls session and host and session
2888  * @iscsit: iscsi transport template
2889  * @shost: scsi host
2890  * @cmds_max: session can queue
2891  * @dd_size: private driver data size, added to session allocation size
2892  * @cmd_task_size: LLD task private data size
2893  * @initial_cmdsn: initial CmdSN
2894  * @id: target ID to add to this session
2895  *
2896  * This can be used by software iscsi_transports that allocate
2897  * a session per scsi host.
2898  *
2899  * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
2900  * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
2901  * for nop handling and login/logout requests.
2902  */
2903 struct iscsi_cls_session *
2904 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2905 		    uint16_t cmds_max, int dd_size, int cmd_task_size,
2906 		    uint32_t initial_cmdsn, unsigned int id)
2907 {
2908 	struct iscsi_host *ihost = shost_priv(shost);
2909 	struct iscsi_session *session;
2910 	struct iscsi_cls_session *cls_session;
2911 	int cmd_i, scsi_cmds;
2912 	unsigned long flags;
2913 
2914 	spin_lock_irqsave(&ihost->lock, flags);
2915 	if (ihost->state == ISCSI_HOST_REMOVED) {
2916 		spin_unlock_irqrestore(&ihost->lock, flags);
2917 		return NULL;
2918 	}
2919 	ihost->num_sessions++;
2920 	spin_unlock_irqrestore(&ihost->lock, flags);
2921 
2922 	scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
2923 	if (scsi_cmds < 0)
2924 		goto dec_session_count;
2925 
2926 	cls_session = iscsi_alloc_session(shost, iscsit,
2927 					  sizeof(struct iscsi_session) +
2928 					  dd_size);
2929 	if (!cls_session)
2930 		goto dec_session_count;
2931 	session = cls_session->dd_data;
2932 	session->cls_session = cls_session;
2933 	session->host = shost;
2934 	session->state = ISCSI_STATE_FREE;
2935 	session->fast_abort = 1;
2936 	session->tgt_reset_timeout = 30;
2937 	session->lu_reset_timeout = 15;
2938 	session->abort_timeout = 10;
2939 	session->scsi_cmds_max = scsi_cmds;
2940 	session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX;
2941 	session->queued_cmdsn = session->cmdsn = initial_cmdsn;
2942 	session->exp_cmdsn = initial_cmdsn + 1;
2943 	session->max_cmdsn = initial_cmdsn + 1;
2944 	session->max_r2t = 1;
2945 	session->tt = iscsit;
2946 	session->dd_data = cls_session->dd_data + sizeof(*session);
2947 
2948 	session->tmf_state = TMF_INITIAL;
2949 	timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
2950 	mutex_init(&session->eh_mutex);
2951 	init_waitqueue_head(&session->ehwait);
2952 
2953 	spin_lock_init(&session->frwd_lock);
2954 	spin_lock_init(&session->back_lock);
2955 
2956 	/* initialize SCSI PDU commands pool */
2957 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
2958 			    (void***)&session->cmds,
2959 			    cmd_task_size + sizeof(struct iscsi_task)))
2960 		goto cmdpool_alloc_fail;
2961 
2962 	/* pre-format cmds pool with ITT */
2963 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
2964 		struct iscsi_task *task = session->cmds[cmd_i];
2965 
2966 		if (cmd_task_size)
2967 			task->dd_data = &task[1];
2968 		task->itt = cmd_i;
2969 		task->state = ISCSI_TASK_FREE;
2970 		INIT_LIST_HEAD(&task->running);
2971 	}
2972 
2973 	if (!try_module_get(iscsit->owner))
2974 		goto module_get_fail;
2975 
2976 	if (iscsi_add_session(cls_session, id))
2977 		goto cls_session_fail;
2978 
2979 	return cls_session;
2980 
2981 cls_session_fail:
2982 	module_put(iscsit->owner);
2983 module_get_fail:
2984 	iscsi_pool_free(&session->cmdpool);
2985 cmdpool_alloc_fail:
2986 	iscsi_free_session(cls_session);
2987 dec_session_count:
2988 	iscsi_host_dec_session_cnt(shost);
2989 	return NULL;
2990 }
2991 EXPORT_SYMBOL_GPL(iscsi_session_setup);
2992 
2993 /**
2994  * iscsi_session_teardown - destroy session, host, and cls_session
2995  * @cls_session: iscsi session
2996  */
2997 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2998 {
2999 	struct iscsi_session *session = cls_session->dd_data;
3000 	struct module *owner = cls_session->transport->owner;
3001 	struct Scsi_Host *shost = session->host;
3002 
3003 	iscsi_remove_session(cls_session);
3004 
3005 	iscsi_pool_free(&session->cmdpool);
3006 	kfree(session->password);
3007 	kfree(session->password_in);
3008 	kfree(session->username);
3009 	kfree(session->username_in);
3010 	kfree(session->targetname);
3011 	kfree(session->targetalias);
3012 	kfree(session->initiatorname);
3013 	kfree(session->boot_root);
3014 	kfree(session->boot_nic);
3015 	kfree(session->boot_target);
3016 	kfree(session->ifacename);
3017 	kfree(session->portal_type);
3018 	kfree(session->discovery_parent_type);
3019 
3020 	iscsi_free_session(cls_session);
3021 
3022 	iscsi_host_dec_session_cnt(shost);
3023 	module_put(owner);
3024 }
3025 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
3026 
3027 /**
3028  * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
3029  * @cls_session: iscsi_cls_session
3030  * @dd_size: private driver data size
3031  * @conn_idx: cid
3032  */
3033 struct iscsi_cls_conn *
3034 iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
3035 		 uint32_t conn_idx)
3036 {
3037 	struct iscsi_session *session = cls_session->dd_data;
3038 	struct iscsi_conn *conn;
3039 	struct iscsi_cls_conn *cls_conn;
3040 	char *data;
3041 	int err;
3042 
3043 	cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size,
3044 				     conn_idx);
3045 	if (!cls_conn)
3046 		return NULL;
3047 	conn = cls_conn->dd_data;
3048 	memset(conn, 0, sizeof(*conn) + dd_size);
3049 
3050 	conn->dd_data = cls_conn->dd_data + sizeof(*conn);
3051 	conn->session = session;
3052 	conn->cls_conn = cls_conn;
3053 	conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
3054 	conn->id = conn_idx;
3055 	conn->exp_statsn = 0;
3056 
3057 	timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
3058 
3059 	INIT_LIST_HEAD(&conn->mgmtqueue);
3060 	INIT_LIST_HEAD(&conn->cmdqueue);
3061 	INIT_LIST_HEAD(&conn->requeue);
3062 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
3063 
3064 	/* allocate login_task used for the login/text sequences */
3065 	spin_lock_bh(&session->frwd_lock);
3066 	if (!kfifo_out(&session->cmdpool.queue,
3067                          (void*)&conn->login_task,
3068 			 sizeof(void*))) {
3069 		spin_unlock_bh(&session->frwd_lock);
3070 		goto login_task_alloc_fail;
3071 	}
3072 	spin_unlock_bh(&session->frwd_lock);
3073 
3074 	data = (char *) __get_free_pages(GFP_KERNEL,
3075 					 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3076 	if (!data)
3077 		goto login_task_data_alloc_fail;
3078 	conn->login_task->data = conn->data = data;
3079 
3080 	err = iscsi_add_conn(cls_conn);
3081 	if (err)
3082 		goto login_task_add_dev_fail;
3083 
3084 	return cls_conn;
3085 
3086 login_task_add_dev_fail:
3087 	free_pages((unsigned long) conn->data,
3088 		   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3089 
3090 login_task_data_alloc_fail:
3091 	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
3092 		    sizeof(void*));
3093 login_task_alloc_fail:
3094 	iscsi_put_conn(cls_conn);
3095 	return NULL;
3096 }
3097 EXPORT_SYMBOL_GPL(iscsi_conn_setup);
3098 
3099 /**
3100  * iscsi_conn_teardown - teardown iscsi connection
3101  * @cls_conn: iscsi class connection
3102  *
3103  * TODO: we may need to make this into a two step process
3104  * like scsi-mls remove + put host
3105  */
3106 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
3107 {
3108 	struct iscsi_conn *conn = cls_conn->dd_data;
3109 	struct iscsi_session *session = conn->session;
3110 
3111 	iscsi_remove_conn(cls_conn);
3112 
3113 	del_timer_sync(&conn->transport_timer);
3114 
3115 	mutex_lock(&session->eh_mutex);
3116 	spin_lock_bh(&session->frwd_lock);
3117 	conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
3118 	if (session->leadconn == conn) {
3119 		/*
3120 		 * leading connection? then give up on recovery.
3121 		 */
3122 		session->state = ISCSI_STATE_TERMINATE;
3123 		wake_up(&session->ehwait);
3124 	}
3125 	spin_unlock_bh(&session->frwd_lock);
3126 
3127 	/* flush queued up work because we free the connection below */
3128 	iscsi_suspend_tx(conn);
3129 
3130 	spin_lock_bh(&session->frwd_lock);
3131 	free_pages((unsigned long) conn->data,
3132 		   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3133 	kfree(conn->persistent_address);
3134 	kfree(conn->local_ipaddr);
3135 	/* regular RX path uses back_lock */
3136 	spin_lock_bh(&session->back_lock);
3137 	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
3138 		    sizeof(void*));
3139 	spin_unlock_bh(&session->back_lock);
3140 	if (session->leadconn == conn)
3141 		session->leadconn = NULL;
3142 	spin_unlock_bh(&session->frwd_lock);
3143 	mutex_unlock(&session->eh_mutex);
3144 
3145 	iscsi_put_conn(cls_conn);
3146 }
3147 EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
3148 
3149 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
3150 {
3151 	struct iscsi_conn *conn = cls_conn->dd_data;
3152 	struct iscsi_session *session = conn->session;
3153 
3154 	if (!session) {
3155 		iscsi_conn_printk(KERN_ERR, conn,
3156 				  "can't start unbound connection\n");
3157 		return -EPERM;
3158 	}
3159 
3160 	if ((session->imm_data_en || !session->initial_r2t_en) &&
3161 	     session->first_burst > session->max_burst) {
3162 		iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
3163 				  "first_burst %d max_burst %d\n",
3164 				  session->first_burst, session->max_burst);
3165 		return -EINVAL;
3166 	}
3167 
3168 	if (conn->ping_timeout && !conn->recv_timeout) {
3169 		iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
3170 				  "zero. Using 5 seconds\n.");
3171 		conn->recv_timeout = 5;
3172 	}
3173 
3174 	if (conn->recv_timeout && !conn->ping_timeout) {
3175 		iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
3176 				  "zero. Using 5 seconds.\n");
3177 		conn->ping_timeout = 5;
3178 	}
3179 
3180 	spin_lock_bh(&session->frwd_lock);
3181 	conn->c_stage = ISCSI_CONN_STARTED;
3182 	session->state = ISCSI_STATE_LOGGED_IN;
3183 	session->queued_cmdsn = session->cmdsn;
3184 
3185 	conn->last_recv = jiffies;
3186 	conn->last_ping = jiffies;
3187 	if (conn->recv_timeout && conn->ping_timeout)
3188 		mod_timer(&conn->transport_timer,
3189 			  jiffies + (conn->recv_timeout * HZ));
3190 
3191 	switch(conn->stop_stage) {
3192 	case STOP_CONN_RECOVER:
3193 		/*
3194 		 * unblock eh_abort() if it is blocked. re-try all
3195 		 * commands after successful recovery
3196 		 */
3197 		conn->stop_stage = 0;
3198 		session->tmf_state = TMF_INITIAL;
3199 		session->age++;
3200 		if (session->age == 16)
3201 			session->age = 0;
3202 		break;
3203 	case STOP_CONN_TERM:
3204 		conn->stop_stage = 0;
3205 		break;
3206 	default:
3207 		break;
3208 	}
3209 	spin_unlock_bh(&session->frwd_lock);
3210 
3211 	iscsi_unblock_session(session->cls_session);
3212 	wake_up(&session->ehwait);
3213 	return 0;
3214 }
3215 EXPORT_SYMBOL_GPL(iscsi_conn_start);
3216 
3217 static void
3218 fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
3219 {
3220 	struct iscsi_task *task;
3221 	int i, state;
3222 
3223 	for (i = 0; i < conn->session->cmds_max; i++) {
3224 		task = conn->session->cmds[i];
3225 		if (task->sc)
3226 			continue;
3227 
3228 		if (task->state == ISCSI_TASK_FREE)
3229 			continue;
3230 
3231 		ISCSI_DBG_SESSION(conn->session,
3232 				  "failing mgmt itt 0x%x state %d\n",
3233 				  task->itt, task->state);
3234 
3235 		spin_lock_bh(&session->back_lock);
3236 		if (cleanup_queued_task(task)) {
3237 			spin_unlock_bh(&session->back_lock);
3238 			continue;
3239 		}
3240 
3241 		state = ISCSI_TASK_ABRT_SESS_RECOV;
3242 		if (task->state == ISCSI_TASK_PENDING)
3243 			state = ISCSI_TASK_COMPLETED;
3244 		iscsi_complete_task(task, state);
3245 		spin_unlock_bh(&session->back_lock);
3246 	}
3247 }
3248 
3249 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
3250 {
3251 	struct iscsi_conn *conn = cls_conn->dd_data;
3252 	struct iscsi_session *session = conn->session;
3253 	int old_stop_stage;
3254 
3255 	mutex_lock(&session->eh_mutex);
3256 	spin_lock_bh(&session->frwd_lock);
3257 	if (conn->stop_stage == STOP_CONN_TERM) {
3258 		spin_unlock_bh(&session->frwd_lock);
3259 		mutex_unlock(&session->eh_mutex);
3260 		return;
3261 	}
3262 
3263 	/*
3264 	 * When this is called for the in_login state, we only want to clean
3265 	 * up the login task and connection. We do not need to block and set
3266 	 * the recovery state again
3267 	 */
3268 	if (flag == STOP_CONN_TERM)
3269 		session->state = ISCSI_STATE_TERMINATE;
3270 	else if (conn->stop_stage != STOP_CONN_RECOVER)
3271 		session->state = ISCSI_STATE_IN_RECOVERY;
3272 
3273 	old_stop_stage = conn->stop_stage;
3274 	conn->stop_stage = flag;
3275 	spin_unlock_bh(&session->frwd_lock);
3276 
3277 	del_timer_sync(&conn->transport_timer);
3278 	iscsi_suspend_tx(conn);
3279 
3280 	spin_lock_bh(&session->frwd_lock);
3281 	conn->c_stage = ISCSI_CONN_STOPPED;
3282 	spin_unlock_bh(&session->frwd_lock);
3283 
3284 	/*
3285 	 * for connection level recovery we should not calculate
3286 	 * header digest. conn->hdr_size used for optimization
3287 	 * in hdr_extract() and will be re-negotiated at
3288 	 * set_param() time.
3289 	 */
3290 	if (flag == STOP_CONN_RECOVER) {
3291 		conn->hdrdgst_en = 0;
3292 		conn->datadgst_en = 0;
3293 		if (session->state == ISCSI_STATE_IN_RECOVERY &&
3294 		    old_stop_stage != STOP_CONN_RECOVER) {
3295 			ISCSI_DBG_SESSION(session, "blocking session\n");
3296 			iscsi_block_session(session->cls_session);
3297 		}
3298 	}
3299 
3300 	/*
3301 	 * flush queues.
3302 	 */
3303 	spin_lock_bh(&session->frwd_lock);
3304 	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
3305 	fail_mgmt_tasks(session, conn);
3306 	memset(&session->tmhdr, 0, sizeof(session->tmhdr));
3307 	spin_unlock_bh(&session->frwd_lock);
3308 	mutex_unlock(&session->eh_mutex);
3309 }
3310 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
3311 
3312 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
3313 		    struct iscsi_cls_conn *cls_conn, int is_leading)
3314 {
3315 	struct iscsi_session *session = cls_session->dd_data;
3316 	struct iscsi_conn *conn = cls_conn->dd_data;
3317 
3318 	spin_lock_bh(&session->frwd_lock);
3319 	if (is_leading)
3320 		session->leadconn = conn;
3321 	spin_unlock_bh(&session->frwd_lock);
3322 
3323 	/*
3324 	 * The target could have reduced it's window size between logins, so
3325 	 * we have to reset max/exp cmdsn so we can see the new values.
3326 	 */
3327 	spin_lock_bh(&session->back_lock);
3328 	session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1;
3329 	spin_unlock_bh(&session->back_lock);
3330 	/*
3331 	 * Unblock xmitworker(), Login Phase will pass through.
3332 	 */
3333 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
3334 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
3335 	return 0;
3336 }
3337 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
3338 
3339 int iscsi_switch_str_param(char **param, char *new_val_buf)
3340 {
3341 	char *new_val;
3342 
3343 	if (*param) {
3344 		if (!strcmp(*param, new_val_buf))
3345 			return 0;
3346 	}
3347 
3348 	new_val = kstrdup(new_val_buf, GFP_NOIO);
3349 	if (!new_val)
3350 		return -ENOMEM;
3351 
3352 	kfree(*param);
3353 	*param = new_val;
3354 	return 0;
3355 }
3356 EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
3357 
3358 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3359 		    enum iscsi_param param, char *buf, int buflen)
3360 {
3361 	struct iscsi_conn *conn = cls_conn->dd_data;
3362 	struct iscsi_session *session = conn->session;
3363 	int val;
3364 
3365 	switch(param) {
3366 	case ISCSI_PARAM_FAST_ABORT:
3367 		sscanf(buf, "%d", &session->fast_abort);
3368 		break;
3369 	case ISCSI_PARAM_ABORT_TMO:
3370 		sscanf(buf, "%d", &session->abort_timeout);
3371 		break;
3372 	case ISCSI_PARAM_LU_RESET_TMO:
3373 		sscanf(buf, "%d", &session->lu_reset_timeout);
3374 		break;
3375 	case ISCSI_PARAM_TGT_RESET_TMO:
3376 		sscanf(buf, "%d", &session->tgt_reset_timeout);
3377 		break;
3378 	case ISCSI_PARAM_PING_TMO:
3379 		sscanf(buf, "%d", &conn->ping_timeout);
3380 		break;
3381 	case ISCSI_PARAM_RECV_TMO:
3382 		sscanf(buf, "%d", &conn->recv_timeout);
3383 		break;
3384 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
3385 		sscanf(buf, "%d", &conn->max_recv_dlength);
3386 		break;
3387 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3388 		sscanf(buf, "%d", &conn->max_xmit_dlength);
3389 		break;
3390 	case ISCSI_PARAM_HDRDGST_EN:
3391 		sscanf(buf, "%d", &conn->hdrdgst_en);
3392 		break;
3393 	case ISCSI_PARAM_DATADGST_EN:
3394 		sscanf(buf, "%d", &conn->datadgst_en);
3395 		break;
3396 	case ISCSI_PARAM_INITIAL_R2T_EN:
3397 		sscanf(buf, "%d", &session->initial_r2t_en);
3398 		break;
3399 	case ISCSI_PARAM_MAX_R2T:
3400 		sscanf(buf, "%hu", &session->max_r2t);
3401 		break;
3402 	case ISCSI_PARAM_IMM_DATA_EN:
3403 		sscanf(buf, "%d", &session->imm_data_en);
3404 		break;
3405 	case ISCSI_PARAM_FIRST_BURST:
3406 		sscanf(buf, "%d", &session->first_burst);
3407 		break;
3408 	case ISCSI_PARAM_MAX_BURST:
3409 		sscanf(buf, "%d", &session->max_burst);
3410 		break;
3411 	case ISCSI_PARAM_PDU_INORDER_EN:
3412 		sscanf(buf, "%d", &session->pdu_inorder_en);
3413 		break;
3414 	case ISCSI_PARAM_DATASEQ_INORDER_EN:
3415 		sscanf(buf, "%d", &session->dataseq_inorder_en);
3416 		break;
3417 	case ISCSI_PARAM_ERL:
3418 		sscanf(buf, "%d", &session->erl);
3419 		break;
3420 	case ISCSI_PARAM_EXP_STATSN:
3421 		sscanf(buf, "%u", &conn->exp_statsn);
3422 		break;
3423 	case ISCSI_PARAM_USERNAME:
3424 		return iscsi_switch_str_param(&session->username, buf);
3425 	case ISCSI_PARAM_USERNAME_IN:
3426 		return iscsi_switch_str_param(&session->username_in, buf);
3427 	case ISCSI_PARAM_PASSWORD:
3428 		return iscsi_switch_str_param(&session->password, buf);
3429 	case ISCSI_PARAM_PASSWORD_IN:
3430 		return iscsi_switch_str_param(&session->password_in, buf);
3431 	case ISCSI_PARAM_TARGET_NAME:
3432 		return iscsi_switch_str_param(&session->targetname, buf);
3433 	case ISCSI_PARAM_TARGET_ALIAS:
3434 		return iscsi_switch_str_param(&session->targetalias, buf);
3435 	case ISCSI_PARAM_TPGT:
3436 		sscanf(buf, "%d", &session->tpgt);
3437 		break;
3438 	case ISCSI_PARAM_PERSISTENT_PORT:
3439 		sscanf(buf, "%d", &conn->persistent_port);
3440 		break;
3441 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
3442 		return iscsi_switch_str_param(&conn->persistent_address, buf);
3443 	case ISCSI_PARAM_IFACE_NAME:
3444 		return iscsi_switch_str_param(&session->ifacename, buf);
3445 	case ISCSI_PARAM_INITIATOR_NAME:
3446 		return iscsi_switch_str_param(&session->initiatorname, buf);
3447 	case ISCSI_PARAM_BOOT_ROOT:
3448 		return iscsi_switch_str_param(&session->boot_root, buf);
3449 	case ISCSI_PARAM_BOOT_NIC:
3450 		return iscsi_switch_str_param(&session->boot_nic, buf);
3451 	case ISCSI_PARAM_BOOT_TARGET:
3452 		return iscsi_switch_str_param(&session->boot_target, buf);
3453 	case ISCSI_PARAM_PORTAL_TYPE:
3454 		return iscsi_switch_str_param(&session->portal_type, buf);
3455 	case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3456 		return iscsi_switch_str_param(&session->discovery_parent_type,
3457 					      buf);
3458 	case ISCSI_PARAM_DISCOVERY_SESS:
3459 		sscanf(buf, "%d", &val);
3460 		session->discovery_sess = !!val;
3461 		break;
3462 	case ISCSI_PARAM_LOCAL_IPADDR:
3463 		return iscsi_switch_str_param(&conn->local_ipaddr, buf);
3464 	default:
3465 		return -ENOSYS;
3466 	}
3467 
3468 	return 0;
3469 }
3470 EXPORT_SYMBOL_GPL(iscsi_set_param);
3471 
3472 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3473 			    enum iscsi_param param, char *buf)
3474 {
3475 	struct iscsi_session *session = cls_session->dd_data;
3476 	int len;
3477 
3478 	switch(param) {
3479 	case ISCSI_PARAM_FAST_ABORT:
3480 		len = sysfs_emit(buf, "%d\n", session->fast_abort);
3481 		break;
3482 	case ISCSI_PARAM_ABORT_TMO:
3483 		len = sysfs_emit(buf, "%d\n", session->abort_timeout);
3484 		break;
3485 	case ISCSI_PARAM_LU_RESET_TMO:
3486 		len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
3487 		break;
3488 	case ISCSI_PARAM_TGT_RESET_TMO:
3489 		len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
3490 		break;
3491 	case ISCSI_PARAM_INITIAL_R2T_EN:
3492 		len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
3493 		break;
3494 	case ISCSI_PARAM_MAX_R2T:
3495 		len = sysfs_emit(buf, "%hu\n", session->max_r2t);
3496 		break;
3497 	case ISCSI_PARAM_IMM_DATA_EN:
3498 		len = sysfs_emit(buf, "%d\n", session->imm_data_en);
3499 		break;
3500 	case ISCSI_PARAM_FIRST_BURST:
3501 		len = sysfs_emit(buf, "%u\n", session->first_burst);
3502 		break;
3503 	case ISCSI_PARAM_MAX_BURST:
3504 		len = sysfs_emit(buf, "%u\n", session->max_burst);
3505 		break;
3506 	case ISCSI_PARAM_PDU_INORDER_EN:
3507 		len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
3508 		break;
3509 	case ISCSI_PARAM_DATASEQ_INORDER_EN:
3510 		len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
3511 		break;
3512 	case ISCSI_PARAM_DEF_TASKMGMT_TMO:
3513 		len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
3514 		break;
3515 	case ISCSI_PARAM_ERL:
3516 		len = sysfs_emit(buf, "%d\n", session->erl);
3517 		break;
3518 	case ISCSI_PARAM_TARGET_NAME:
3519 		len = sysfs_emit(buf, "%s\n", session->targetname);
3520 		break;
3521 	case ISCSI_PARAM_TARGET_ALIAS:
3522 		len = sysfs_emit(buf, "%s\n", session->targetalias);
3523 		break;
3524 	case ISCSI_PARAM_TPGT:
3525 		len = sysfs_emit(buf, "%d\n", session->tpgt);
3526 		break;
3527 	case ISCSI_PARAM_USERNAME:
3528 		len = sysfs_emit(buf, "%s\n", session->username);
3529 		break;
3530 	case ISCSI_PARAM_USERNAME_IN:
3531 		len = sysfs_emit(buf, "%s\n", session->username_in);
3532 		break;
3533 	case ISCSI_PARAM_PASSWORD:
3534 		len = sysfs_emit(buf, "%s\n", session->password);
3535 		break;
3536 	case ISCSI_PARAM_PASSWORD_IN:
3537 		len = sysfs_emit(buf, "%s\n", session->password_in);
3538 		break;
3539 	case ISCSI_PARAM_IFACE_NAME:
3540 		len = sysfs_emit(buf, "%s\n", session->ifacename);
3541 		break;
3542 	case ISCSI_PARAM_INITIATOR_NAME:
3543 		len = sysfs_emit(buf, "%s\n", session->initiatorname);
3544 		break;
3545 	case ISCSI_PARAM_BOOT_ROOT:
3546 		len = sysfs_emit(buf, "%s\n", session->boot_root);
3547 		break;
3548 	case ISCSI_PARAM_BOOT_NIC:
3549 		len = sysfs_emit(buf, "%s\n", session->boot_nic);
3550 		break;
3551 	case ISCSI_PARAM_BOOT_TARGET:
3552 		len = sysfs_emit(buf, "%s\n", session->boot_target);
3553 		break;
3554 	case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
3555 		len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
3556 		break;
3557 	case ISCSI_PARAM_DISCOVERY_SESS:
3558 		len = sysfs_emit(buf, "%u\n", session->discovery_sess);
3559 		break;
3560 	case ISCSI_PARAM_PORTAL_TYPE:
3561 		len = sysfs_emit(buf, "%s\n", session->portal_type);
3562 		break;
3563 	case ISCSI_PARAM_CHAP_AUTH_EN:
3564 		len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
3565 		break;
3566 	case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
3567 		len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
3568 		break;
3569 	case ISCSI_PARAM_BIDI_CHAP_EN:
3570 		len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
3571 		break;
3572 	case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
3573 		len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
3574 		break;
3575 	case ISCSI_PARAM_DEF_TIME2WAIT:
3576 		len = sysfs_emit(buf, "%d\n", session->time2wait);
3577 		break;
3578 	case ISCSI_PARAM_DEF_TIME2RETAIN:
3579 		len = sysfs_emit(buf, "%d\n", session->time2retain);
3580 		break;
3581 	case ISCSI_PARAM_TSID:
3582 		len = sysfs_emit(buf, "%u\n", session->tsid);
3583 		break;
3584 	case ISCSI_PARAM_ISID:
3585 		len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
3586 			      session->isid[0], session->isid[1],
3587 			      session->isid[2], session->isid[3],
3588 			      session->isid[4], session->isid[5]);
3589 		break;
3590 	case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
3591 		len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
3592 		break;
3593 	case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3594 		if (session->discovery_parent_type)
3595 			len = sysfs_emit(buf, "%s\n",
3596 				      session->discovery_parent_type);
3597 		else
3598 			len = sysfs_emit(buf, "\n");
3599 		break;
3600 	default:
3601 		return -ENOSYS;
3602 	}
3603 
3604 	return len;
3605 }
3606 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
3607 
3608 int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
3609 			      enum iscsi_param param, char *buf)
3610 {
3611 	struct sockaddr_in6 *sin6 = NULL;
3612 	struct sockaddr_in *sin = NULL;
3613 	int len;
3614 
3615 	switch (addr->ss_family) {
3616 	case AF_INET:
3617 		sin = (struct sockaddr_in *)addr;
3618 		break;
3619 	case AF_INET6:
3620 		sin6 = (struct sockaddr_in6 *)addr;
3621 		break;
3622 	default:
3623 		return -EINVAL;
3624 	}
3625 
3626 	switch (param) {
3627 	case ISCSI_PARAM_CONN_ADDRESS:
3628 	case ISCSI_HOST_PARAM_IPADDRESS:
3629 		if (sin)
3630 			len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
3631 		else
3632 			len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
3633 		break;
3634 	case ISCSI_PARAM_CONN_PORT:
3635 	case ISCSI_PARAM_LOCAL_PORT:
3636 		if (sin)
3637 			len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
3638 		else
3639 			len = sysfs_emit(buf, "%hu\n",
3640 				      be16_to_cpu(sin6->sin6_port));
3641 		break;
3642 	default:
3643 		return -EINVAL;
3644 	}
3645 
3646 	return len;
3647 }
3648 EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
3649 
3650 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3651 			 enum iscsi_param param, char *buf)
3652 {
3653 	struct iscsi_conn *conn = cls_conn->dd_data;
3654 	int len;
3655 
3656 	switch(param) {
3657 	case ISCSI_PARAM_PING_TMO:
3658 		len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
3659 		break;
3660 	case ISCSI_PARAM_RECV_TMO:
3661 		len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
3662 		break;
3663 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
3664 		len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
3665 		break;
3666 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3667 		len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
3668 		break;
3669 	case ISCSI_PARAM_HDRDGST_EN:
3670 		len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
3671 		break;
3672 	case ISCSI_PARAM_DATADGST_EN:
3673 		len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
3674 		break;
3675 	case ISCSI_PARAM_IFMARKER_EN:
3676 		len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
3677 		break;
3678 	case ISCSI_PARAM_OFMARKER_EN:
3679 		len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
3680 		break;
3681 	case ISCSI_PARAM_EXP_STATSN:
3682 		len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
3683 		break;
3684 	case ISCSI_PARAM_PERSISTENT_PORT:
3685 		len = sysfs_emit(buf, "%d\n", conn->persistent_port);
3686 		break;
3687 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
3688 		len = sysfs_emit(buf, "%s\n", conn->persistent_address);
3689 		break;
3690 	case ISCSI_PARAM_STATSN:
3691 		len = sysfs_emit(buf, "%u\n", conn->statsn);
3692 		break;
3693 	case ISCSI_PARAM_MAX_SEGMENT_SIZE:
3694 		len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
3695 		break;
3696 	case ISCSI_PARAM_KEEPALIVE_TMO:
3697 		len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
3698 		break;
3699 	case ISCSI_PARAM_LOCAL_PORT:
3700 		len = sysfs_emit(buf, "%u\n", conn->local_port);
3701 		break;
3702 	case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
3703 		len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
3704 		break;
3705 	case ISCSI_PARAM_TCP_NAGLE_DISABLE:
3706 		len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
3707 		break;
3708 	case ISCSI_PARAM_TCP_WSF_DISABLE:
3709 		len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
3710 		break;
3711 	case ISCSI_PARAM_TCP_TIMER_SCALE:
3712 		len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
3713 		break;
3714 	case ISCSI_PARAM_TCP_TIMESTAMP_EN:
3715 		len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
3716 		break;
3717 	case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
3718 		len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
3719 		break;
3720 	case ISCSI_PARAM_IPV4_TOS:
3721 		len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
3722 		break;
3723 	case ISCSI_PARAM_IPV6_TC:
3724 		len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
3725 		break;
3726 	case ISCSI_PARAM_IPV6_FLOW_LABEL:
3727 		len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
3728 		break;
3729 	case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
3730 		len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
3731 		break;
3732 	case ISCSI_PARAM_TCP_XMIT_WSF:
3733 		len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
3734 		break;
3735 	case ISCSI_PARAM_TCP_RECV_WSF:
3736 		len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
3737 		break;
3738 	case ISCSI_PARAM_LOCAL_IPADDR:
3739 		len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
3740 		break;
3741 	default:
3742 		return -ENOSYS;
3743 	}
3744 
3745 	return len;
3746 }
3747 EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
3748 
3749 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3750 			 char *buf)
3751 {
3752 	struct iscsi_host *ihost = shost_priv(shost);
3753 	int len;
3754 
3755 	switch (param) {
3756 	case ISCSI_HOST_PARAM_NETDEV_NAME:
3757 		len = sysfs_emit(buf, "%s\n", ihost->netdev);
3758 		break;
3759 	case ISCSI_HOST_PARAM_HWADDRESS:
3760 		len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
3761 		break;
3762 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
3763 		len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
3764 		break;
3765 	default:
3766 		return -ENOSYS;
3767 	}
3768 
3769 	return len;
3770 }
3771 EXPORT_SYMBOL_GPL(iscsi_host_get_param);
3772 
3773 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3774 			 char *buf, int buflen)
3775 {
3776 	struct iscsi_host *ihost = shost_priv(shost);
3777 
3778 	switch (param) {
3779 	case ISCSI_HOST_PARAM_NETDEV_NAME:
3780 		return iscsi_switch_str_param(&ihost->netdev, buf);
3781 	case ISCSI_HOST_PARAM_HWADDRESS:
3782 		return iscsi_switch_str_param(&ihost->hwaddress, buf);
3783 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
3784 		return iscsi_switch_str_param(&ihost->initiatorname, buf);
3785 	default:
3786 		return -ENOSYS;
3787 	}
3788 
3789 	return 0;
3790 }
3791 EXPORT_SYMBOL_GPL(iscsi_host_set_param);
3792 
3793 MODULE_AUTHOR("Mike Christie");
3794 MODULE_DESCRIPTION("iSCSI library functions");
3795 MODULE_LICENSE("GPL");
3796