1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * This file contains main functions related to the iSCSI Target Core Driver.
4  *
5  * (c) Copyright 2007-2013 Datera, Inc.
6  *
7  * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8  *
9  ******************************************************************************/
10 
11 #include <crypto/hash.h>
12 #include <linux/string.h>
13 #include <linux/kthread.h>
14 #include <linux/completion.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/idr.h>
18 #include <linux/delay.h>
19 #include <linux/sched/signal.h>
20 #include <asm/unaligned.h>
21 #include <linux/inet.h>
22 #include <net/ipv6.h>
23 #include <scsi/scsi_proto.h>
24 #include <scsi/iscsi_proto.h>
25 #include <scsi/scsi_tcq.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28 
29 #include <target/target_core_backend.h>
30 #include <target/iscsi/iscsi_target_core.h>
31 #include "iscsi_target_parameters.h"
32 #include "iscsi_target_seq_pdu_list.h"
33 #include "iscsi_target_datain_values.h"
34 #include "iscsi_target_erl0.h"
35 #include "iscsi_target_erl1.h"
36 #include "iscsi_target_erl2.h"
37 #include "iscsi_target_login.h"
38 #include "iscsi_target_tmr.h"
39 #include "iscsi_target_tpg.h"
40 #include "iscsi_target_util.h"
41 #include "iscsi_target.h"
42 #include "iscsi_target_device.h"
43 #include <target/iscsi/iscsi_target_stat.h>
44 
45 #include <target/iscsi/iscsi_transport.h>
46 
47 static LIST_HEAD(g_tiqn_list);
48 static LIST_HEAD(g_np_list);
49 static DEFINE_SPINLOCK(tiqn_lock);
50 static DEFINE_MUTEX(np_lock);
51 
52 static struct idr tiqn_idr;
53 DEFINE_IDA(sess_ida);
54 struct mutex auth_id_lock;
55 
56 struct iscsit_global *iscsit_global;
57 
58 struct kmem_cache *lio_qr_cache;
59 struct kmem_cache *lio_dr_cache;
60 struct kmem_cache *lio_ooo_cache;
61 struct kmem_cache *lio_r2t_cache;
62 
63 static int iscsit_handle_immediate_data(struct iscsit_cmd *,
64 			struct iscsi_scsi_req *, u32);
65 
66 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
67 {
68 	struct iscsi_tiqn *tiqn = NULL;
69 
70 	spin_lock(&tiqn_lock);
71 	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
72 		if (!strcmp(tiqn->tiqn, buf)) {
73 
74 			spin_lock(&tiqn->tiqn_state_lock);
75 			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
76 				tiqn->tiqn_access_count++;
77 				spin_unlock(&tiqn->tiqn_state_lock);
78 				spin_unlock(&tiqn_lock);
79 				return tiqn;
80 			}
81 			spin_unlock(&tiqn->tiqn_state_lock);
82 		}
83 	}
84 	spin_unlock(&tiqn_lock);
85 
86 	return NULL;
87 }
88 
89 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
90 {
91 	spin_lock(&tiqn->tiqn_state_lock);
92 	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
93 		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
94 		spin_unlock(&tiqn->tiqn_state_lock);
95 		return 0;
96 	}
97 	spin_unlock(&tiqn->tiqn_state_lock);
98 
99 	return -1;
100 }
101 
102 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
103 {
104 	spin_lock(&tiqn->tiqn_state_lock);
105 	tiqn->tiqn_access_count--;
106 	spin_unlock(&tiqn->tiqn_state_lock);
107 }
108 
109 /*
110  * Note that IQN formatting is expected to be done in userspace, and
111  * no explict IQN format checks are done here.
112  */
113 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
114 {
115 	struct iscsi_tiqn *tiqn = NULL;
116 	int ret;
117 
118 	if (strlen(buf) >= ISCSI_IQN_LEN) {
119 		pr_err("Target IQN exceeds %d bytes\n",
120 				ISCSI_IQN_LEN);
121 		return ERR_PTR(-EINVAL);
122 	}
123 
124 	tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
125 	if (!tiqn)
126 		return ERR_PTR(-ENOMEM);
127 
128 	sprintf(tiqn->tiqn, "%s", buf);
129 	INIT_LIST_HEAD(&tiqn->tiqn_list);
130 	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
131 	spin_lock_init(&tiqn->tiqn_state_lock);
132 	spin_lock_init(&tiqn->tiqn_tpg_lock);
133 	spin_lock_init(&tiqn->sess_err_stats.lock);
134 	spin_lock_init(&tiqn->login_stats.lock);
135 	spin_lock_init(&tiqn->logout_stats.lock);
136 
137 	tiqn->tiqn_state = TIQN_STATE_ACTIVE;
138 
139 	idr_preload(GFP_KERNEL);
140 	spin_lock(&tiqn_lock);
141 
142 	ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
143 	if (ret < 0) {
144 		pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
145 		spin_unlock(&tiqn_lock);
146 		idr_preload_end();
147 		kfree(tiqn);
148 		return ERR_PTR(ret);
149 	}
150 	tiqn->tiqn_index = ret;
151 	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
152 
153 	spin_unlock(&tiqn_lock);
154 	idr_preload_end();
155 
156 	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
157 
158 	return tiqn;
159 
160 }
161 
162 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
163 {
164 	/*
165 	 * Wait for accesses to said struct iscsi_tiqn to end.
166 	 */
167 	spin_lock(&tiqn->tiqn_state_lock);
168 	while (tiqn->tiqn_access_count != 0) {
169 		spin_unlock(&tiqn->tiqn_state_lock);
170 		msleep(10);
171 		spin_lock(&tiqn->tiqn_state_lock);
172 	}
173 	spin_unlock(&tiqn->tiqn_state_lock);
174 }
175 
176 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
177 {
178 	/*
179 	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
180 	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
181 	 * attempts to access this struct iscsi_tiqn will fail from both transport
182 	 * fabric and control code paths.
183 	 */
184 	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
185 		pr_err("iscsit_set_tiqn_shutdown() failed\n");
186 		return;
187 	}
188 
189 	iscsit_wait_for_tiqn(tiqn);
190 
191 	spin_lock(&tiqn_lock);
192 	list_del(&tiqn->tiqn_list);
193 	idr_remove(&tiqn_idr, tiqn->tiqn_index);
194 	spin_unlock(&tiqn_lock);
195 
196 	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
197 			tiqn->tiqn);
198 	kfree(tiqn);
199 }
200 
201 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
202 {
203 	int ret;
204 	/*
205 	 * Determine if the network portal is accepting storage traffic.
206 	 */
207 	spin_lock_bh(&np->np_thread_lock);
208 	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
209 		spin_unlock_bh(&np->np_thread_lock);
210 		return -1;
211 	}
212 	spin_unlock_bh(&np->np_thread_lock);
213 	/*
214 	 * Determine if the portal group is accepting storage traffic.
215 	 */
216 	spin_lock_bh(&tpg->tpg_state_lock);
217 	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
218 		spin_unlock_bh(&tpg->tpg_state_lock);
219 		return -1;
220 	}
221 	spin_unlock_bh(&tpg->tpg_state_lock);
222 
223 	/*
224 	 * Here we serialize access across the TIQN+TPG Tuple.
225 	 */
226 	ret = down_interruptible(&tpg->np_login_sem);
227 	if (ret != 0)
228 		return -1;
229 
230 	spin_lock_bh(&tpg->tpg_state_lock);
231 	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
232 		spin_unlock_bh(&tpg->tpg_state_lock);
233 		up(&tpg->np_login_sem);
234 		return -1;
235 	}
236 	spin_unlock_bh(&tpg->tpg_state_lock);
237 
238 	return 0;
239 }
240 
241 void iscsit_login_kref_put(struct kref *kref)
242 {
243 	struct iscsi_tpg_np *tpg_np = container_of(kref,
244 				struct iscsi_tpg_np, tpg_np_kref);
245 
246 	complete(&tpg_np->tpg_np_comp);
247 }
248 
249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
250 		       struct iscsi_tpg_np *tpg_np)
251 {
252 	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
253 
254 	up(&tpg->np_login_sem);
255 
256 	if (tpg_np)
257 		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
258 
259 	if (tiqn)
260 		iscsit_put_tiqn_for_login(tiqn);
261 
262 	return 0;
263 }
264 
265 bool iscsit_check_np_match(
266 	struct sockaddr_storage *sockaddr,
267 	struct iscsi_np *np,
268 	int network_transport)
269 {
270 	struct sockaddr_in *sock_in, *sock_in_e;
271 	struct sockaddr_in6 *sock_in6, *sock_in6_e;
272 	bool ip_match = false;
273 	u16 port, port_e;
274 
275 	if (sockaddr->ss_family == AF_INET6) {
276 		sock_in6 = (struct sockaddr_in6 *)sockaddr;
277 		sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
278 
279 		if (!memcmp(&sock_in6->sin6_addr.in6_u,
280 			    &sock_in6_e->sin6_addr.in6_u,
281 			    sizeof(struct in6_addr)))
282 			ip_match = true;
283 
284 		port = ntohs(sock_in6->sin6_port);
285 		port_e = ntohs(sock_in6_e->sin6_port);
286 	} else {
287 		sock_in = (struct sockaddr_in *)sockaddr;
288 		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
289 
290 		if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
291 			ip_match = true;
292 
293 		port = ntohs(sock_in->sin_port);
294 		port_e = ntohs(sock_in_e->sin_port);
295 	}
296 
297 	if (ip_match && (port_e == port) &&
298 	    (np->np_network_transport == network_transport))
299 		return true;
300 
301 	return false;
302 }
303 
304 static struct iscsi_np *iscsit_get_np(
305 	struct sockaddr_storage *sockaddr,
306 	int network_transport)
307 {
308 	struct iscsi_np *np;
309 	bool match;
310 
311 	lockdep_assert_held(&np_lock);
312 
313 	list_for_each_entry(np, &g_np_list, np_list) {
314 		spin_lock_bh(&np->np_thread_lock);
315 		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
316 			spin_unlock_bh(&np->np_thread_lock);
317 			continue;
318 		}
319 
320 		match = iscsit_check_np_match(sockaddr, np, network_transport);
321 		if (match) {
322 			/*
323 			 * Increment the np_exports reference count now to
324 			 * prevent iscsit_del_np() below from being called
325 			 * while iscsi_tpg_add_network_portal() is called.
326 			 */
327 			np->np_exports++;
328 			spin_unlock_bh(&np->np_thread_lock);
329 			return np;
330 		}
331 		spin_unlock_bh(&np->np_thread_lock);
332 	}
333 
334 	return NULL;
335 }
336 
337 struct iscsi_np *iscsit_add_np(
338 	struct sockaddr_storage *sockaddr,
339 	int network_transport)
340 {
341 	struct iscsi_np *np;
342 	int ret;
343 
344 	mutex_lock(&np_lock);
345 
346 	/*
347 	 * Locate the existing struct iscsi_np if already active..
348 	 */
349 	np = iscsit_get_np(sockaddr, network_transport);
350 	if (np) {
351 		mutex_unlock(&np_lock);
352 		return np;
353 	}
354 
355 	np = kzalloc(sizeof(*np), GFP_KERNEL);
356 	if (!np) {
357 		mutex_unlock(&np_lock);
358 		return ERR_PTR(-ENOMEM);
359 	}
360 
361 	np->np_flags |= NPF_IP_NETWORK;
362 	np->np_network_transport = network_transport;
363 	spin_lock_init(&np->np_thread_lock);
364 	init_completion(&np->np_restart_comp);
365 	INIT_LIST_HEAD(&np->np_list);
366 
367 	timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
368 
369 	ret = iscsi_target_setup_login_socket(np, sockaddr);
370 	if (ret != 0) {
371 		kfree(np);
372 		mutex_unlock(&np_lock);
373 		return ERR_PTR(ret);
374 	}
375 
376 	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
377 	if (IS_ERR(np->np_thread)) {
378 		pr_err("Unable to create kthread: iscsi_np\n");
379 		ret = PTR_ERR(np->np_thread);
380 		kfree(np);
381 		mutex_unlock(&np_lock);
382 		return ERR_PTR(ret);
383 	}
384 	/*
385 	 * Increment the np_exports reference count now to prevent
386 	 * iscsit_del_np() below from being run while a new call to
387 	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
388 	 * active.  We don't need to hold np->np_thread_lock at this
389 	 * point because iscsi_np has not been added to g_np_list yet.
390 	 */
391 	np->np_exports = 1;
392 	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
393 
394 	list_add_tail(&np->np_list, &g_np_list);
395 	mutex_unlock(&np_lock);
396 
397 	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
398 		&np->np_sockaddr, np->np_transport->name);
399 
400 	return np;
401 }
402 
403 int iscsit_reset_np_thread(
404 	struct iscsi_np *np,
405 	struct iscsi_tpg_np *tpg_np,
406 	struct iscsi_portal_group *tpg,
407 	bool shutdown)
408 {
409 	spin_lock_bh(&np->np_thread_lock);
410 	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
411 		spin_unlock_bh(&np->np_thread_lock);
412 		return 0;
413 	}
414 	np->np_thread_state = ISCSI_NP_THREAD_RESET;
415 	atomic_inc(&np->np_reset_count);
416 
417 	if (np->np_thread) {
418 		spin_unlock_bh(&np->np_thread_lock);
419 		send_sig(SIGINT, np->np_thread, 1);
420 		wait_for_completion(&np->np_restart_comp);
421 		spin_lock_bh(&np->np_thread_lock);
422 	}
423 	spin_unlock_bh(&np->np_thread_lock);
424 
425 	if (tpg_np && shutdown) {
426 		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
427 
428 		wait_for_completion(&tpg_np->tpg_np_comp);
429 	}
430 
431 	return 0;
432 }
433 
434 static void iscsit_free_np(struct iscsi_np *np)
435 {
436 	if (np->np_socket)
437 		sock_release(np->np_socket);
438 }
439 
440 int iscsit_del_np(struct iscsi_np *np)
441 {
442 	spin_lock_bh(&np->np_thread_lock);
443 	np->np_exports--;
444 	if (np->np_exports) {
445 		np->enabled = true;
446 		spin_unlock_bh(&np->np_thread_lock);
447 		return 0;
448 	}
449 	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
450 	spin_unlock_bh(&np->np_thread_lock);
451 
452 	if (np->np_thread) {
453 		/*
454 		 * We need to send the signal to wakeup Linux/Net
455 		 * which may be sleeping in sock_accept()..
456 		 */
457 		send_sig(SIGINT, np->np_thread, 1);
458 		kthread_stop(np->np_thread);
459 		np->np_thread = NULL;
460 	}
461 
462 	np->np_transport->iscsit_free_np(np);
463 
464 	mutex_lock(&np_lock);
465 	list_del(&np->np_list);
466 	mutex_unlock(&np_lock);
467 
468 	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
469 		&np->np_sockaddr, np->np_transport->name);
470 
471 	iscsit_put_transport(np->np_transport);
472 	kfree(np);
473 	return 0;
474 }
475 
476 static void iscsit_get_rx_pdu(struct iscsit_conn *);
477 
478 int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
479 {
480 	return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
481 }
482 EXPORT_SYMBOL(iscsit_queue_rsp);
483 
484 void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
485 {
486 	spin_lock_bh(&conn->cmd_lock);
487 	if (!list_empty(&cmd->i_conn_node))
488 		list_del_init(&cmd->i_conn_node);
489 	spin_unlock_bh(&conn->cmd_lock);
490 
491 	__iscsit_free_cmd(cmd, true);
492 }
493 EXPORT_SYMBOL(iscsit_aborted_task);
494 
495 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
496 				      u32, u32, const void *, void *);
497 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
498 
499 static int
500 iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
501 			  const void *data_buf, u32 data_buf_len)
502 {
503 	struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
504 	struct kvec *iov;
505 	u32 niov = 0, tx_size = ISCSI_HDR_LEN;
506 	int ret;
507 
508 	iov = &cmd->iov_misc[0];
509 	iov[niov].iov_base	= cmd->pdu;
510 	iov[niov++].iov_len	= ISCSI_HDR_LEN;
511 
512 	if (conn->conn_ops->HeaderDigest) {
513 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
514 
515 		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
516 					  ISCSI_HDR_LEN, 0, NULL,
517 					  header_digest);
518 
519 		iov[0].iov_len += ISCSI_CRC_LEN;
520 		tx_size += ISCSI_CRC_LEN;
521 		pr_debug("Attaching CRC32C HeaderDigest"
522 			 " to opcode 0x%x 0x%08x\n",
523 			 hdr->opcode, *header_digest);
524 	}
525 
526 	if (data_buf_len) {
527 		u32 padding = ((-data_buf_len) & 3);
528 
529 		iov[niov].iov_base	= (void *)data_buf;
530 		iov[niov++].iov_len	= data_buf_len;
531 		tx_size += data_buf_len;
532 
533 		if (padding != 0) {
534 			iov[niov].iov_base = &cmd->pad_bytes;
535 			iov[niov++].iov_len = padding;
536 			tx_size += padding;
537 			pr_debug("Attaching %u additional"
538 				 " padding bytes.\n", padding);
539 		}
540 
541 		if (conn->conn_ops->DataDigest) {
542 			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
543 						  data_buf, data_buf_len,
544 						  padding, &cmd->pad_bytes,
545 						  &cmd->data_crc);
546 
547 			iov[niov].iov_base = &cmd->data_crc;
548 			iov[niov++].iov_len = ISCSI_CRC_LEN;
549 			tx_size += ISCSI_CRC_LEN;
550 			pr_debug("Attached DataDigest for %u"
551 				 " bytes opcode 0x%x, CRC 0x%08x\n",
552 				 data_buf_len, hdr->opcode, cmd->data_crc);
553 		}
554 	}
555 
556 	cmd->iov_misc_count = niov;
557 	cmd->tx_size = tx_size;
558 
559 	ret = iscsit_send_tx_data(cmd, conn, 1);
560 	if (ret < 0) {
561 		iscsit_tx_thread_wait_for_tcp(conn);
562 		return ret;
563 	}
564 
565 	return 0;
566 }
567 
568 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
569 			    u32 data_offset, u32 data_length);
570 static void iscsit_unmap_iovec(struct iscsit_cmd *);
571 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
572 				    u32, u32, u32, u8 *);
573 static int
574 iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
575 		       const struct iscsi_datain *datain)
576 {
577 	struct kvec *iov;
578 	u32 iov_count = 0, tx_size = 0;
579 	int ret, iov_ret;
580 
581 	iov = &cmd->iov_data[0];
582 	iov[iov_count].iov_base	= cmd->pdu;
583 	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
584 	tx_size += ISCSI_HDR_LEN;
585 
586 	if (conn->conn_ops->HeaderDigest) {
587 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
588 
589 		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
590 					  ISCSI_HDR_LEN, 0, NULL,
591 					  header_digest);
592 
593 		iov[0].iov_len += ISCSI_CRC_LEN;
594 		tx_size += ISCSI_CRC_LEN;
595 
596 		pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
597 			 *header_digest);
598 	}
599 
600 	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
601 				   cmd->orig_iov_data_count - (iov_count + 2),
602 				   datain->offset, datain->length);
603 	if (iov_ret < 0)
604 		return -1;
605 
606 	iov_count += iov_ret;
607 	tx_size += datain->length;
608 
609 	cmd->padding = ((-datain->length) & 3);
610 	if (cmd->padding) {
611 		iov[iov_count].iov_base		= cmd->pad_bytes;
612 		iov[iov_count++].iov_len	= cmd->padding;
613 		tx_size += cmd->padding;
614 
615 		pr_debug("Attaching %u padding bytes\n", cmd->padding);
616 	}
617 
618 	if (conn->conn_ops->DataDigest) {
619 		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
620 							 cmd, datain->offset,
621 							 datain->length,
622 							 cmd->padding,
623 							 cmd->pad_bytes);
624 
625 		iov[iov_count].iov_base	= &cmd->data_crc;
626 		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
627 		tx_size += ISCSI_CRC_LEN;
628 
629 		pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
630 			 datain->length + cmd->padding, cmd->data_crc);
631 	}
632 
633 	cmd->iov_data_count = iov_count;
634 	cmd->tx_size = tx_size;
635 
636 	ret = iscsit_fe_sendpage_sg(cmd, conn);
637 
638 	iscsit_unmap_iovec(cmd);
639 
640 	if (ret < 0) {
641 		iscsit_tx_thread_wait_for_tcp(conn);
642 		return ret;
643 	}
644 
645 	return 0;
646 }
647 
648 static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
649 			   struct iscsi_datain_req *dr, const void *buf,
650 			   u32 buf_len)
651 {
652 	if (dr)
653 		return iscsit_xmit_datain_pdu(conn, cmd, buf);
654 	else
655 		return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
656 }
657 
658 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
659 {
660 	return TARGET_PROT_NORMAL;
661 }
662 
663 static struct iscsit_transport iscsi_target_transport = {
664 	.name			= "iSCSI/TCP",
665 	.transport_type		= ISCSI_TCP,
666 	.rdma_shutdown		= false,
667 	.owner			= NULL,
668 	.iscsit_setup_np	= iscsit_setup_np,
669 	.iscsit_accept_np	= iscsit_accept_np,
670 	.iscsit_free_np		= iscsit_free_np,
671 	.iscsit_get_login_rx	= iscsit_get_login_rx,
672 	.iscsit_put_login_tx	= iscsit_put_login_tx,
673 	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
674 	.iscsit_immediate_queue	= iscsit_immediate_queue,
675 	.iscsit_response_queue	= iscsit_response_queue,
676 	.iscsit_queue_data_in	= iscsit_queue_rsp,
677 	.iscsit_queue_status	= iscsit_queue_rsp,
678 	.iscsit_aborted_task	= iscsit_aborted_task,
679 	.iscsit_xmit_pdu	= iscsit_xmit_pdu,
680 	.iscsit_get_rx_pdu	= iscsit_get_rx_pdu,
681 	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
682 };
683 
684 static int __init iscsi_target_init_module(void)
685 {
686 	int ret = 0, size;
687 
688 	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
689 	iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
690 	if (!iscsit_global)
691 		return -1;
692 
693 	spin_lock_init(&iscsit_global->ts_bitmap_lock);
694 	mutex_init(&auth_id_lock);
695 	idr_init(&tiqn_idr);
696 
697 	ret = target_register_template(&iscsi_ops);
698 	if (ret)
699 		goto out;
700 
701 	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
702 	iscsit_global->ts_bitmap = vzalloc(size);
703 	if (!iscsit_global->ts_bitmap)
704 		goto configfs_out;
705 
706 	if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
707 		pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
708 		goto bitmap_out;
709 	}
710 	cpumask_setall(iscsit_global->allowed_cpumask);
711 
712 	lio_qr_cache = kmem_cache_create("lio_qr_cache",
713 			sizeof(struct iscsi_queue_req),
714 			__alignof__(struct iscsi_queue_req), 0, NULL);
715 	if (!lio_qr_cache) {
716 		pr_err("Unable to kmem_cache_create() for"
717 				" lio_qr_cache\n");
718 		goto cpumask_out;
719 	}
720 
721 	lio_dr_cache = kmem_cache_create("lio_dr_cache",
722 			sizeof(struct iscsi_datain_req),
723 			__alignof__(struct iscsi_datain_req), 0, NULL);
724 	if (!lio_dr_cache) {
725 		pr_err("Unable to kmem_cache_create() for"
726 				" lio_dr_cache\n");
727 		goto qr_out;
728 	}
729 
730 	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
731 			sizeof(struct iscsi_ooo_cmdsn),
732 			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
733 	if (!lio_ooo_cache) {
734 		pr_err("Unable to kmem_cache_create() for"
735 				" lio_ooo_cache\n");
736 		goto dr_out;
737 	}
738 
739 	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
740 			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
741 			0, NULL);
742 	if (!lio_r2t_cache) {
743 		pr_err("Unable to kmem_cache_create() for"
744 				" lio_r2t_cache\n");
745 		goto ooo_out;
746 	}
747 
748 	iscsit_register_transport(&iscsi_target_transport);
749 
750 	if (iscsit_load_discovery_tpg() < 0)
751 		goto r2t_out;
752 
753 	return ret;
754 r2t_out:
755 	iscsit_unregister_transport(&iscsi_target_transport);
756 	kmem_cache_destroy(lio_r2t_cache);
757 ooo_out:
758 	kmem_cache_destroy(lio_ooo_cache);
759 dr_out:
760 	kmem_cache_destroy(lio_dr_cache);
761 qr_out:
762 	kmem_cache_destroy(lio_qr_cache);
763 cpumask_out:
764 	free_cpumask_var(iscsit_global->allowed_cpumask);
765 bitmap_out:
766 	vfree(iscsit_global->ts_bitmap);
767 configfs_out:
768 	/* XXX: this probably wants it to be it's own unwind step.. */
769 	if (iscsit_global->discovery_tpg)
770 		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
771 	target_unregister_template(&iscsi_ops);
772 out:
773 	kfree(iscsit_global);
774 	return -ENOMEM;
775 }
776 
777 static void __exit iscsi_target_cleanup_module(void)
778 {
779 	iscsit_release_discovery_tpg();
780 	iscsit_unregister_transport(&iscsi_target_transport);
781 	kmem_cache_destroy(lio_qr_cache);
782 	kmem_cache_destroy(lio_dr_cache);
783 	kmem_cache_destroy(lio_ooo_cache);
784 	kmem_cache_destroy(lio_r2t_cache);
785 
786 	/*
787 	 * Shutdown discovery sessions and disable discovery TPG
788 	 */
789 	if (iscsit_global->discovery_tpg)
790 		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
791 
792 	target_unregister_template(&iscsi_ops);
793 
794 	free_cpumask_var(iscsit_global->allowed_cpumask);
795 	vfree(iscsit_global->ts_bitmap);
796 	kfree(iscsit_global);
797 }
798 
799 int iscsit_add_reject(
800 	struct iscsit_conn *conn,
801 	u8 reason,
802 	unsigned char *buf)
803 {
804 	struct iscsit_cmd *cmd;
805 
806 	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
807 	if (!cmd)
808 		return -1;
809 
810 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
811 	cmd->reject_reason = reason;
812 
813 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
814 	if (!cmd->buf_ptr) {
815 		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
816 		iscsit_free_cmd(cmd, false);
817 		return -1;
818 	}
819 
820 	spin_lock_bh(&conn->cmd_lock);
821 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
822 	spin_unlock_bh(&conn->cmd_lock);
823 
824 	cmd->i_state = ISTATE_SEND_REJECT;
825 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
826 
827 	return -1;
828 }
829 EXPORT_SYMBOL(iscsit_add_reject);
830 
831 static int iscsit_add_reject_from_cmd(
832 	struct iscsit_cmd *cmd,
833 	u8 reason,
834 	bool add_to_conn,
835 	unsigned char *buf)
836 {
837 	struct iscsit_conn *conn;
838 	const bool do_put = cmd->se_cmd.se_tfo != NULL;
839 
840 	if (!cmd->conn) {
841 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
842 				cmd->init_task_tag);
843 		return -1;
844 	}
845 	conn = cmd->conn;
846 
847 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
848 	cmd->reject_reason = reason;
849 
850 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
851 	if (!cmd->buf_ptr) {
852 		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
853 		iscsit_free_cmd(cmd, false);
854 		return -1;
855 	}
856 
857 	if (add_to_conn) {
858 		spin_lock_bh(&conn->cmd_lock);
859 		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
860 		spin_unlock_bh(&conn->cmd_lock);
861 	}
862 
863 	cmd->i_state = ISTATE_SEND_REJECT;
864 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
865 	/*
866 	 * Perform the kref_put now if se_cmd has already been setup by
867 	 * scsit_setup_scsi_cmd()
868 	 */
869 	if (do_put) {
870 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
871 		target_put_sess_cmd(&cmd->se_cmd);
872 	}
873 	return -1;
874 }
875 
876 static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
877 				 unsigned char *buf)
878 {
879 	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
880 }
881 
882 int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
883 {
884 	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
885 }
886 EXPORT_SYMBOL(iscsit_reject_cmd);
887 
888 /*
889  * Map some portion of the allocated scatterlist to an iovec, suitable for
890  * kernel sockets to copy data in/out.
891  */
892 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
893 			    u32 data_offset, u32 data_length)
894 {
895 	u32 i = 0, orig_data_length = data_length;
896 	struct scatterlist *sg;
897 	unsigned int page_off;
898 
899 	/*
900 	 * We know each entry in t_data_sg contains a page.
901 	 */
902 	u32 ent = data_offset / PAGE_SIZE;
903 
904 	if (!data_length)
905 		return 0;
906 
907 	if (ent >= cmd->se_cmd.t_data_nents) {
908 		pr_err("Initial page entry out-of-bounds\n");
909 		goto overflow;
910 	}
911 
912 	sg = &cmd->se_cmd.t_data_sg[ent];
913 	page_off = (data_offset % PAGE_SIZE);
914 
915 	cmd->first_data_sg = sg;
916 	cmd->first_data_sg_off = page_off;
917 
918 	while (data_length) {
919 		u32 cur_len;
920 
921 		if (WARN_ON_ONCE(!sg || i >= nvec))
922 			goto overflow;
923 
924 		cur_len = min_t(u32, data_length, sg->length - page_off);
925 
926 		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
927 		iov[i].iov_len = cur_len;
928 
929 		data_length -= cur_len;
930 		page_off = 0;
931 		sg = sg_next(sg);
932 		i++;
933 	}
934 
935 	cmd->kmapped_nents = i;
936 
937 	return i;
938 
939 overflow:
940 	pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
941 	       data_offset, orig_data_length, i, nvec);
942 	for_each_sg(cmd->se_cmd.t_data_sg, sg,
943 		    cmd->se_cmd.t_data_nents, i) {
944 		pr_err("[%d] off %d len %d\n",
945 		       i, sg->offset, sg->length);
946 	}
947 	return -1;
948 }
949 
950 static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
951 {
952 	u32 i;
953 	struct scatterlist *sg;
954 
955 	sg = cmd->first_data_sg;
956 
957 	for (i = 0; i < cmd->kmapped_nents; i++)
958 		kunmap(sg_page(&sg[i]));
959 }
960 
961 static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
962 {
963 	LIST_HEAD(ack_list);
964 	struct iscsit_cmd *cmd, *cmd_p;
965 
966 	conn->exp_statsn = exp_statsn;
967 
968 	if (conn->sess->sess_ops->RDMAExtensions)
969 		return;
970 
971 	spin_lock_bh(&conn->cmd_lock);
972 	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
973 		spin_lock(&cmd->istate_lock);
974 		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
975 		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
976 			cmd->i_state = ISTATE_REMOVE;
977 			spin_unlock(&cmd->istate_lock);
978 			list_move_tail(&cmd->i_conn_node, &ack_list);
979 			continue;
980 		}
981 		spin_unlock(&cmd->istate_lock);
982 	}
983 	spin_unlock_bh(&conn->cmd_lock);
984 
985 	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
986 		list_del_init(&cmd->i_conn_node);
987 		iscsit_free_cmd(cmd, false);
988 	}
989 }
990 
991 static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
992 {
993 	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
994 
995 	iov_count += ISCSI_IOV_DATA_BUFFER;
996 	cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
997 	if (!cmd->iov_data)
998 		return -ENOMEM;
999 
1000 	cmd->orig_iov_data_count = iov_count;
1001 	return 0;
1002 }
1003 
1004 int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1005 			  unsigned char *buf)
1006 {
1007 	int data_direction, payload_length;
1008 	struct iscsi_ecdb_ahdr *ecdb_ahdr;
1009 	struct iscsi_scsi_req *hdr;
1010 	int iscsi_task_attr;
1011 	unsigned char *cdb;
1012 	int sam_task_attr;
1013 
1014 	atomic_long_inc(&conn->sess->cmd_pdus);
1015 
1016 	hdr			= (struct iscsi_scsi_req *) buf;
1017 	payload_length		= ntoh24(hdr->dlength);
1018 
1019 	/* FIXME; Add checks for AdditionalHeaderSegment */
1020 
1021 	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
1022 	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1023 		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
1024 				" not set. Bad iSCSI Initiator.\n");
1025 		return iscsit_add_reject_cmd(cmd,
1026 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1027 	}
1028 
1029 	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1030 	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1031 		/*
1032 		 * From RFC-3720 Section 10.3.1:
1033 		 *
1034 		 * "Either or both of R and W MAY be 1 when either the
1035 		 *  Expected Data Transfer Length and/or Bidirectional Read
1036 		 *  Expected Data Transfer Length are 0"
1037 		 *
1038 		 * For this case, go ahead and clear the unnecssary bits
1039 		 * to avoid any confusion with ->data_direction.
1040 		 */
1041 		hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1042 		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1043 
1044 		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1045 			" set when Expected Data Transfer Length is 0 for"
1046 			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1047 	}
1048 
1049 	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1050 	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1051 		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
1052 			" MUST be set if Expected Data Transfer Length is not 0."
1053 			" Bad iSCSI Initiator\n");
1054 		return iscsit_add_reject_cmd(cmd,
1055 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1056 	}
1057 
1058 	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
1059 	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
1060 		pr_err("Bidirectional operations not supported!\n");
1061 		return iscsit_add_reject_cmd(cmd,
1062 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1063 	}
1064 
1065 	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1066 		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
1067 				" Scsi Command PDU.\n");
1068 		return iscsit_add_reject_cmd(cmd,
1069 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1070 	}
1071 
1072 	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
1073 		pr_err("ImmediateData=No but DataSegmentLength=%u,"
1074 			" protocol error.\n", payload_length);
1075 		return iscsit_add_reject_cmd(cmd,
1076 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1077 	}
1078 
1079 	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1080 	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
1081 		pr_err("Expected Data Transfer Length and Length of"
1082 			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
1083 			" bit is not set protocol error\n");
1084 		return iscsit_add_reject_cmd(cmd,
1085 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1086 	}
1087 
1088 	if (payload_length > be32_to_cpu(hdr->data_length)) {
1089 		pr_err("DataSegmentLength: %u is greater than"
1090 			" EDTL: %u, protocol error.\n", payload_length,
1091 				hdr->data_length);
1092 		return iscsit_add_reject_cmd(cmd,
1093 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1094 	}
1095 
1096 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1097 		pr_err("DataSegmentLength: %u is greater than"
1098 			" MaxXmitDataSegmentLength: %u, protocol error.\n",
1099 			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1100 		return iscsit_add_reject_cmd(cmd,
1101 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1102 	}
1103 
1104 	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
1105 		pr_err("DataSegmentLength: %u is greater than"
1106 			" FirstBurstLength: %u, protocol error.\n",
1107 			payload_length, conn->sess->sess_ops->FirstBurstLength);
1108 		return iscsit_add_reject_cmd(cmd,
1109 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1110 	}
1111 
1112 	cdb = hdr->cdb;
1113 
1114 	if (hdr->hlength) {
1115 		ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
1116 		if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
1117 			pr_err("Additional Header Segment type %d not supported!\n",
1118 			       ecdb_ahdr->ahstype);
1119 			return iscsit_add_reject_cmd(cmd,
1120 				ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
1121 		}
1122 
1123 		cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
1124 			      GFP_KERNEL);
1125 		if (cdb == NULL)
1126 			return iscsit_add_reject_cmd(cmd,
1127 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1128 		memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
1129 		memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
1130 		       be16_to_cpu(ecdb_ahdr->ahslength) - 1);
1131 	}
1132 
1133 	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
1134 			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
1135 			  DMA_NONE;
1136 
1137 	cmd->data_direction = data_direction;
1138 	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
1139 	/*
1140 	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
1141 	 */
1142 	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
1143 	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1144 		sam_task_attr = TCM_SIMPLE_TAG;
1145 	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1146 		sam_task_attr = TCM_ORDERED_TAG;
1147 	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1148 		sam_task_attr = TCM_HEAD_TAG;
1149 	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1150 		sam_task_attr = TCM_ACA_TAG;
1151 	else {
1152 		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1153 			" TCM_SIMPLE_TAG\n", iscsi_task_attr);
1154 		sam_task_attr = TCM_SIMPLE_TAG;
1155 	}
1156 
1157 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
1158 	cmd->i_state		= ISTATE_NEW_CMD;
1159 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1160 	cmd->immediate_data	= (payload_length) ? 1 : 0;
1161 	cmd->unsolicited_data	= ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
1162 				     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
1163 	if (cmd->unsolicited_data)
1164 		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
1165 
1166 	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1167 	if (hdr->flags & ISCSI_FLAG_CMD_READ)
1168 		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1169 	else
1170 		cmd->targ_xfer_tag = 0xFFFFFFFF;
1171 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
1172 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1173 	cmd->first_burst_len	= payload_length;
1174 
1175 	if (!conn->sess->sess_ops->RDMAExtensions &&
1176 	     cmd->data_direction == DMA_FROM_DEVICE) {
1177 		struct iscsi_datain_req *dr;
1178 
1179 		dr = iscsit_allocate_datain_req();
1180 		if (!dr) {
1181 			if (cdb != hdr->cdb)
1182 				kfree(cdb);
1183 			return iscsit_add_reject_cmd(cmd,
1184 					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1185 		}
1186 
1187 		iscsit_attach_datain_req(cmd, dr);
1188 	}
1189 
1190 	/*
1191 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1192 	 */
1193 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
1194 			  conn->sess->se_sess, be32_to_cpu(hdr->data_length),
1195 			  cmd->data_direction, sam_task_attr,
1196 			  cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
1197 			  conn->cmd_cnt);
1198 
1199 	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
1200 		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1201 		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1202 		conn->cid);
1203 
1204 	target_get_sess_cmd(&cmd->se_cmd, true);
1205 
1206 	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1207 	cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
1208 						GFP_KERNEL);
1209 
1210 	if (cdb != hdr->cdb)
1211 		kfree(cdb);
1212 
1213 	if (cmd->sense_reason) {
1214 		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1215 			return iscsit_add_reject_cmd(cmd,
1216 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1217 		}
1218 
1219 		goto attach_cmd;
1220 	}
1221 
1222 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
1223 	if (cmd->sense_reason)
1224 		goto attach_cmd;
1225 
1226 	cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
1227 	if (cmd->sense_reason)
1228 		goto attach_cmd;
1229 
1230 	if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1231 		return iscsit_add_reject_cmd(cmd,
1232 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1233 	}
1234 
1235 attach_cmd:
1236 	spin_lock_bh(&conn->cmd_lock);
1237 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1238 	spin_unlock_bh(&conn->cmd_lock);
1239 	/*
1240 	 * Check if we need to delay processing because of ALUA
1241 	 * Active/NonOptimized primary access state..
1242 	 */
1243 	core_alua_check_nonop_delay(&cmd->se_cmd);
1244 
1245 	return 0;
1246 }
1247 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1248 
1249 void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
1250 {
1251 	iscsit_set_dataout_sequence_values(cmd);
1252 
1253 	spin_lock_bh(&cmd->dataout_timeout_lock);
1254 	iscsit_start_dataout_timer(cmd, cmd->conn);
1255 	spin_unlock_bh(&cmd->dataout_timeout_lock);
1256 }
1257 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
1258 
1259 int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1260 			    struct iscsi_scsi_req *hdr)
1261 {
1262 	int cmdsn_ret = 0;
1263 	/*
1264 	 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1265 	 * the Immediate Bit is not set, and no Immediate
1266 	 * Data is attached.
1267 	 *
1268 	 * A PDU/CmdSN carrying Immediate Data can only
1269 	 * be processed after the DataCRC has passed.
1270 	 * If the DataCRC fails, the CmdSN MUST NOT
1271 	 * be acknowledged. (See below)
1272 	 */
1273 	if (!cmd->immediate_data) {
1274 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1275 					(unsigned char *)hdr, hdr->cmdsn);
1276 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1277 			return -1;
1278 		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1279 			target_put_sess_cmd(&cmd->se_cmd);
1280 			return 0;
1281 		}
1282 	}
1283 
1284 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1285 
1286 	/*
1287 	 * If no Immediate Data is attached, it's OK to return now.
1288 	 */
1289 	if (!cmd->immediate_data) {
1290 		if (!cmd->sense_reason && cmd->unsolicited_data)
1291 			iscsit_set_unsolicited_dataout(cmd);
1292 		if (!cmd->sense_reason)
1293 			return 0;
1294 
1295 		target_put_sess_cmd(&cmd->se_cmd);
1296 		return 0;
1297 	}
1298 
1299 	/*
1300 	 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1301 	 * execution.  These exceptions are processed in CmdSN order using
1302 	 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1303 	 */
1304 	if (cmd->sense_reason)
1305 		return 1;
1306 	/*
1307 	 * Call directly into transport_generic_new_cmd() to perform
1308 	 * the backend memory allocation.
1309 	 */
1310 	cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1311 	if (cmd->sense_reason)
1312 		return 1;
1313 
1314 	return 0;
1315 }
1316 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1317 
1318 static int
1319 iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
1320 			  bool dump_payload)
1321 {
1322 	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1323 	int rc;
1324 
1325 	/*
1326 	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1327 	 */
1328 	if (dump_payload) {
1329 		u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
1330 				 cmd->first_burst_len);
1331 
1332 		pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
1333 			 cmd->se_cmd.data_length, cmd->write_data_done,
1334 			 cmd->first_burst_len, length);
1335 		rc = iscsit_dump_data_payload(cmd->conn, length, 1);
1336 		pr_debug("Finished dumping immediate data\n");
1337 		if (rc < 0)
1338 			immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
1339 	} else {
1340 		immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1341 							 cmd->first_burst_len);
1342 	}
1343 
1344 	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1345 		/*
1346 		 * A PDU/CmdSN carrying Immediate Data passed
1347 		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1348 		 * Immediate Bit is not set.
1349 		 */
1350 		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1351 					(unsigned char *)hdr, hdr->cmdsn);
1352 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1353 			return -1;
1354 
1355 		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1356 			target_put_sess_cmd(&cmd->se_cmd);
1357 
1358 			return 0;
1359 		} else if (cmd->unsolicited_data)
1360 			iscsit_set_unsolicited_dataout(cmd);
1361 
1362 	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1363 		/*
1364 		 * Immediate Data failed DataCRC and ERL>=1,
1365 		 * silently drop this PDU and let the initiator
1366 		 * plug the CmdSN gap.
1367 		 *
1368 		 * FIXME: Send Unsolicited NOPIN with reserved
1369 		 * TTT here to help the initiator figure out
1370 		 * the missing CmdSN, although they should be
1371 		 * intelligent enough to determine the missing
1372 		 * CmdSN and issue a retry to plug the sequence.
1373 		 */
1374 		cmd->i_state = ISTATE_REMOVE;
1375 		iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1376 	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1377 		return -1;
1378 
1379 	return 0;
1380 }
1381 
1382 static int
1383 iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1384 			   unsigned char *buf)
1385 {
1386 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1387 	int rc, immed_data;
1388 	bool dump_payload = false;
1389 
1390 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1391 	if (rc < 0)
1392 		return 0;
1393 	/*
1394 	 * Allocation iovecs needed for struct socket operations for
1395 	 * traditional iSCSI block I/O.
1396 	 */
1397 	if (iscsit_allocate_iovecs(cmd) < 0) {
1398 		return iscsit_reject_cmd(cmd,
1399 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1400 	}
1401 	immed_data = cmd->immediate_data;
1402 
1403 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1404 	if (rc < 0)
1405 		return rc;
1406 	else if (rc > 0)
1407 		dump_payload = true;
1408 
1409 	if (!immed_data)
1410 		return 0;
1411 
1412 	return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1413 }
1414 
1415 static u32 iscsit_do_crypto_hash_sg(
1416 	struct ahash_request *hash,
1417 	struct iscsit_cmd *cmd,
1418 	u32 data_offset,
1419 	u32 data_length,
1420 	u32 padding,
1421 	u8 *pad_bytes)
1422 {
1423 	u32 data_crc;
1424 	struct scatterlist *sg;
1425 	unsigned int page_off;
1426 
1427 	crypto_ahash_init(hash);
1428 
1429 	sg = cmd->first_data_sg;
1430 	page_off = cmd->first_data_sg_off;
1431 
1432 	if (data_length && page_off) {
1433 		struct scatterlist first_sg;
1434 		u32 len = min_t(u32, data_length, sg->length - page_off);
1435 
1436 		sg_init_table(&first_sg, 1);
1437 		sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
1438 
1439 		ahash_request_set_crypt(hash, &first_sg, NULL, len);
1440 		crypto_ahash_update(hash);
1441 
1442 		data_length -= len;
1443 		sg = sg_next(sg);
1444 	}
1445 
1446 	while (data_length) {
1447 		u32 cur_len = min_t(u32, data_length, sg->length);
1448 
1449 		ahash_request_set_crypt(hash, sg, NULL, cur_len);
1450 		crypto_ahash_update(hash);
1451 
1452 		data_length -= cur_len;
1453 		/* iscsit_map_iovec has already checked for invalid sg pointers */
1454 		sg = sg_next(sg);
1455 	}
1456 
1457 	if (padding) {
1458 		struct scatterlist pad_sg;
1459 
1460 		sg_init_one(&pad_sg, pad_bytes, padding);
1461 		ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
1462 					padding);
1463 		crypto_ahash_finup(hash);
1464 	} else {
1465 		ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
1466 		crypto_ahash_final(hash);
1467 	}
1468 
1469 	return data_crc;
1470 }
1471 
1472 static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1473 	const void *buf, u32 payload_length, u32 padding,
1474 	const void *pad_bytes, void *data_crc)
1475 {
1476 	struct scatterlist sg[2];
1477 
1478 	sg_init_table(sg, ARRAY_SIZE(sg));
1479 	sg_set_buf(sg, buf, payload_length);
1480 	if (padding)
1481 		sg_set_buf(sg + 1, pad_bytes, padding);
1482 
1483 	ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1484 
1485 	crypto_ahash_digest(hash);
1486 }
1487 
1488 int
1489 __iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1490 			   struct iscsit_cmd *cmd, u32 payload_length,
1491 			   bool *success)
1492 {
1493 	struct iscsi_data *hdr = buf;
1494 	struct se_cmd *se_cmd;
1495 	int rc;
1496 
1497 	/* iSCSI write */
1498 	atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1499 
1500 	pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1501 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1502 		hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1503 		payload_length, conn->cid);
1504 
1505 	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1506 		pr_err("Command ITT: 0x%08x received DataOUT after"
1507 			" last DataOUT received, dumping payload\n",
1508 			cmd->init_task_tag);
1509 		return iscsit_dump_data_payload(conn, payload_length, 1);
1510 	}
1511 
1512 	if (cmd->data_direction != DMA_TO_DEVICE) {
1513 		pr_err("Command ITT: 0x%08x received DataOUT for a"
1514 			" NON-WRITE command.\n", cmd->init_task_tag);
1515 		return iscsit_dump_data_payload(conn, payload_length, 1);
1516 	}
1517 	se_cmd = &cmd->se_cmd;
1518 	iscsit_mod_dataout_timer(cmd);
1519 
1520 	if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1521 		pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
1522 		       be32_to_cpu(hdr->offset), payload_length,
1523 		       cmd->se_cmd.data_length);
1524 		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1525 	}
1526 
1527 	if (cmd->unsolicited_data) {
1528 		int dump_unsolicited_data = 0;
1529 
1530 		if (conn->sess->sess_ops->InitialR2T) {
1531 			pr_err("Received unexpected unsolicited data"
1532 				" while InitialR2T=Yes, protocol error.\n");
1533 			transport_send_check_condition_and_sense(&cmd->se_cmd,
1534 					TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1535 			return -1;
1536 		}
1537 		/*
1538 		 * Special case for dealing with Unsolicited DataOUT
1539 		 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1540 		 * failures;
1541 		 */
1542 
1543 		/* Something's amiss if we're not in WRITE_PENDING state... */
1544 		WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1545 		if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1546 			dump_unsolicited_data = 1;
1547 
1548 		if (dump_unsolicited_data) {
1549 			/*
1550 			 * Check if a delayed TASK_ABORTED status needs to
1551 			 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1552 			 * received with the unsolicited data out.
1553 			 */
1554 			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1555 				iscsit_stop_dataout_timer(cmd);
1556 
1557 			return iscsit_dump_data_payload(conn, payload_length, 1);
1558 		}
1559 	} else {
1560 		/*
1561 		 * For the normal solicited data path:
1562 		 *
1563 		 * Check for a delayed TASK_ABORTED status and dump any
1564 		 * incoming data out payload if one exists.  Also, when the
1565 		 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1566 		 * data out sequence, we decrement outstanding_r2ts.  Once
1567 		 * outstanding_r2ts reaches zero, go ahead and send the delayed
1568 		 * TASK_ABORTED status.
1569 		 */
1570 		if (se_cmd->transport_state & CMD_T_ABORTED) {
1571 			if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1572 			    --cmd->outstanding_r2ts < 1)
1573 				iscsit_stop_dataout_timer(cmd);
1574 
1575 			return iscsit_dump_data_payload(conn, payload_length, 1);
1576 		}
1577 	}
1578 	/*
1579 	 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1580 	 * within-command recovery checks before receiving the payload.
1581 	 */
1582 	rc = iscsit_check_pre_dataout(cmd, buf);
1583 	if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1584 		return 0;
1585 	else if (rc == DATAOUT_CANNOT_RECOVER)
1586 		return -1;
1587 	*success = true;
1588 	return 0;
1589 }
1590 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
1591 
1592 int
1593 iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1594 			 struct iscsit_cmd **out_cmd)
1595 {
1596 	struct iscsi_data *hdr = buf;
1597 	struct iscsit_cmd *cmd;
1598 	u32 payload_length = ntoh24(hdr->dlength);
1599 	int rc;
1600 	bool success = false;
1601 
1602 	if (!payload_length) {
1603 		pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
1604 		return 0;
1605 	}
1606 
1607 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1608 		pr_err_ratelimited("DataSegmentLength: %u is greater than"
1609 			" MaxXmitDataSegmentLength: %u\n", payload_length,
1610 			conn->conn_ops->MaxXmitDataSegmentLength);
1611 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
1612 	}
1613 
1614 	cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
1615 	if (!cmd)
1616 		return 0;
1617 
1618 	rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
1619 
1620 	if (success)
1621 		*out_cmd = cmd;
1622 
1623 	return rc;
1624 }
1625 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1626 
1627 static int
1628 iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1629 		   struct iscsi_data *hdr)
1630 {
1631 	struct kvec *iov;
1632 	u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1633 	u32 payload_length;
1634 	int iov_ret, data_crc_failed = 0;
1635 
1636 	payload_length = min_t(u32, cmd->se_cmd.data_length,
1637 			       ntoh24(hdr->dlength));
1638 	rx_size += payload_length;
1639 	iov = &cmd->iov_data[0];
1640 
1641 	iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
1642 				   be32_to_cpu(hdr->offset), payload_length);
1643 	if (iov_ret < 0)
1644 		return -1;
1645 
1646 	iov_count += iov_ret;
1647 
1648 	padding = ((-payload_length) & 3);
1649 	if (padding != 0) {
1650 		iov[iov_count].iov_base	= cmd->pad_bytes;
1651 		iov[iov_count++].iov_len = padding;
1652 		rx_size += padding;
1653 		pr_debug("Receiving %u padding bytes.\n", padding);
1654 	}
1655 
1656 	if (conn->conn_ops->DataDigest) {
1657 		iov[iov_count].iov_base = &checksum;
1658 		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1659 		rx_size += ISCSI_CRC_LEN;
1660 	}
1661 
1662 	WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
1663 	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1664 
1665 	iscsit_unmap_iovec(cmd);
1666 
1667 	if (rx_got != rx_size)
1668 		return -1;
1669 
1670 	if (conn->conn_ops->DataDigest) {
1671 		u32 data_crc;
1672 
1673 		data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
1674 						    be32_to_cpu(hdr->offset),
1675 						    payload_length, padding,
1676 						    cmd->pad_bytes);
1677 
1678 		if (checksum != data_crc) {
1679 			pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1680 				" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1681 				" does not match computed 0x%08x\n",
1682 				hdr->itt, hdr->offset, payload_length,
1683 				hdr->datasn, checksum, data_crc);
1684 			data_crc_failed = 1;
1685 		} else {
1686 			pr_debug("Got CRC32C DataDigest 0x%08x for"
1687 				" %u bytes of Data Out\n", checksum,
1688 				payload_length);
1689 		}
1690 	}
1691 
1692 	return data_crc_failed;
1693 }
1694 
1695 int
1696 iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
1697 			     bool data_crc_failed)
1698 {
1699 	struct iscsit_conn *conn = cmd->conn;
1700 	int rc, ooo_cmdsn;
1701 	/*
1702 	 * Increment post receive data and CRC values or perform
1703 	 * within-command recovery.
1704 	 */
1705 	rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1706 	if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1707 		return 0;
1708 	else if (rc == DATAOUT_SEND_R2T) {
1709 		iscsit_set_dataout_sequence_values(cmd);
1710 		conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1711 	} else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1712 		/*
1713 		 * Handle extra special case for out of order
1714 		 * Unsolicited Data Out.
1715 		 */
1716 		spin_lock_bh(&cmd->istate_lock);
1717 		ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1718 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1719 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1720 		spin_unlock_bh(&cmd->istate_lock);
1721 
1722 		iscsit_stop_dataout_timer(cmd);
1723 		if (ooo_cmdsn)
1724 			return 0;
1725 		target_execute_cmd(&cmd->se_cmd);
1726 		return 0;
1727 	} else /* DATAOUT_CANNOT_RECOVER */
1728 		return -1;
1729 
1730 	return 0;
1731 }
1732 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1733 
1734 static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
1735 {
1736 	struct iscsit_cmd *cmd = NULL;
1737 	struct iscsi_data *hdr = (struct iscsi_data *)buf;
1738 	int rc;
1739 	bool data_crc_failed = false;
1740 
1741 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1742 	if (rc < 0)
1743 		return 0;
1744 	else if (!cmd)
1745 		return 0;
1746 
1747 	rc = iscsit_get_dataout(conn, cmd, hdr);
1748 	if (rc < 0)
1749 		return rc;
1750 	else if (rc > 0)
1751 		data_crc_failed = true;
1752 
1753 	return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1754 }
1755 
1756 int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1757 			 struct iscsi_nopout *hdr)
1758 {
1759 	u32 payload_length = ntoh24(hdr->dlength);
1760 
1761 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1762 		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1763 		if (!cmd)
1764 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1765 						 (unsigned char *)hdr);
1766 
1767 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1768 					 (unsigned char *)hdr);
1769 	}
1770 
1771 	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1772 		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1773 			" not set, protocol error.\n");
1774 		if (!cmd)
1775 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1776 						 (unsigned char *)hdr);
1777 
1778 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1779 					 (unsigned char *)hdr);
1780 	}
1781 
1782 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1783 		pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1784 			" greater than MaxXmitDataSegmentLength: %u, protocol"
1785 			" error.\n", payload_length,
1786 			conn->conn_ops->MaxXmitDataSegmentLength);
1787 		if (!cmd)
1788 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1789 						 (unsigned char *)hdr);
1790 
1791 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1792 					 (unsigned char *)hdr);
1793 	}
1794 
1795 	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1796 		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1797 		hdr->itt == RESERVED_ITT ? "Response" : "Request",
1798 		hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1799 		payload_length);
1800 	/*
1801 	 * This is not a response to a Unsolicited NopIN, which means
1802 	 * it can either be a NOPOUT ping request (with a valid ITT),
1803 	 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1804 	 * Either way, make sure we allocate an struct iscsit_cmd, as both
1805 	 * can contain ping data.
1806 	 */
1807 	if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1808 		cmd->iscsi_opcode	= ISCSI_OP_NOOP_OUT;
1809 		cmd->i_state		= ISTATE_SEND_NOPIN;
1810 		cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1811 						1 : 0);
1812 		conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1813 		cmd->targ_xfer_tag	= 0xFFFFFFFF;
1814 		cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
1815 		cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1816 		cmd->data_direction	= DMA_NONE;
1817 	}
1818 
1819 	return 0;
1820 }
1821 EXPORT_SYMBOL(iscsit_setup_nop_out);
1822 
1823 int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1824 			   struct iscsi_nopout *hdr)
1825 {
1826 	struct iscsit_cmd *cmd_p = NULL;
1827 	int cmdsn_ret = 0;
1828 	/*
1829 	 * Initiator is expecting a NopIN ping reply..
1830 	 */
1831 	if (hdr->itt != RESERVED_ITT) {
1832 		if (!cmd)
1833 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1834 						(unsigned char *)hdr);
1835 
1836 		spin_lock_bh(&conn->cmd_lock);
1837 		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1838 		spin_unlock_bh(&conn->cmd_lock);
1839 
1840 		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1841 
1842 		if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1843 			iscsit_add_cmd_to_response_queue(cmd, conn,
1844 							 cmd->i_state);
1845 			return 0;
1846 		}
1847 
1848 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1849 				(unsigned char *)hdr, hdr->cmdsn);
1850                 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1851 			return 0;
1852 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1853 			return -1;
1854 
1855 		return 0;
1856 	}
1857 	/*
1858 	 * This was a response to a unsolicited NOPIN ping.
1859 	 */
1860 	if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1861 		cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1862 		if (!cmd_p)
1863 			return -EINVAL;
1864 
1865 		iscsit_stop_nopin_response_timer(conn);
1866 
1867 		cmd_p->i_state = ISTATE_REMOVE;
1868 		iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1869 
1870 		iscsit_start_nopin_timer(conn);
1871 		return 0;
1872 	}
1873 	/*
1874 	 * Otherwise, initiator is not expecting a NOPIN is response.
1875 	 * Just ignore for now.
1876 	 */
1877 
1878 	if (cmd)
1879 		iscsit_free_cmd(cmd, false);
1880 
1881         return 0;
1882 }
1883 EXPORT_SYMBOL(iscsit_process_nop_out);
1884 
1885 static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1886 				 unsigned char *buf)
1887 {
1888 	unsigned char *ping_data = NULL;
1889 	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1890 	struct kvec *iov = NULL;
1891 	u32 payload_length = ntoh24(hdr->dlength);
1892 	int ret;
1893 
1894 	ret = iscsit_setup_nop_out(conn, cmd, hdr);
1895 	if (ret < 0)
1896 		return 0;
1897 	/*
1898 	 * Handle NOP-OUT payload for traditional iSCSI sockets
1899 	 */
1900 	if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1901 		u32 checksum, data_crc, padding = 0;
1902 		int niov = 0, rx_got, rx_size = payload_length;
1903 
1904 		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1905 		if (!ping_data) {
1906 			ret = -1;
1907 			goto out;
1908 		}
1909 
1910 		iov = &cmd->iov_misc[0];
1911 		iov[niov].iov_base	= ping_data;
1912 		iov[niov++].iov_len	= payload_length;
1913 
1914 		padding = ((-payload_length) & 3);
1915 		if (padding != 0) {
1916 			pr_debug("Receiving %u additional bytes"
1917 				" for padding.\n", padding);
1918 			iov[niov].iov_base	= &cmd->pad_bytes;
1919 			iov[niov++].iov_len	= padding;
1920 			rx_size += padding;
1921 		}
1922 		if (conn->conn_ops->DataDigest) {
1923 			iov[niov].iov_base	= &checksum;
1924 			iov[niov++].iov_len	= ISCSI_CRC_LEN;
1925 			rx_size += ISCSI_CRC_LEN;
1926 		}
1927 
1928 		WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
1929 		rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1930 		if (rx_got != rx_size) {
1931 			ret = -1;
1932 			goto out;
1933 		}
1934 
1935 		if (conn->conn_ops->DataDigest) {
1936 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
1937 						  payload_length, padding,
1938 						  cmd->pad_bytes, &data_crc);
1939 
1940 			if (checksum != data_crc) {
1941 				pr_err("Ping data CRC32C DataDigest"
1942 				" 0x%08x does not match computed 0x%08x\n",
1943 					checksum, data_crc);
1944 				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1945 					pr_err("Unable to recover from"
1946 					" NOPOUT Ping DataCRC failure while in"
1947 						" ERL=0.\n");
1948 					ret = -1;
1949 					goto out;
1950 				} else {
1951 					/*
1952 					 * Silently drop this PDU and let the
1953 					 * initiator plug the CmdSN gap.
1954 					 */
1955 					pr_debug("Dropping NOPOUT"
1956 					" Command CmdSN: 0x%08x due to"
1957 					" DataCRC error.\n", hdr->cmdsn);
1958 					ret = 0;
1959 					goto out;
1960 				}
1961 			} else {
1962 				pr_debug("Got CRC32C DataDigest"
1963 				" 0x%08x for %u bytes of ping data.\n",
1964 					checksum, payload_length);
1965 			}
1966 		}
1967 
1968 		ping_data[payload_length] = '\0';
1969 		/*
1970 		 * Attach ping data to struct iscsit_cmd->buf_ptr.
1971 		 */
1972 		cmd->buf_ptr = ping_data;
1973 		cmd->buf_ptr_size = payload_length;
1974 
1975 		pr_debug("Got %u bytes of NOPOUT ping"
1976 			" data.\n", payload_length);
1977 		pr_debug("Ping Data: \"%s\"\n", ping_data);
1978 	}
1979 
1980 	return iscsit_process_nop_out(conn, cmd, hdr);
1981 out:
1982 	if (cmd)
1983 		iscsit_free_cmd(cmd, false);
1984 
1985 	kfree(ping_data);
1986 	return ret;
1987 }
1988 
1989 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
1990 {
1991 	switch (iscsi_tmf) {
1992 	case ISCSI_TM_FUNC_ABORT_TASK:
1993 		return TMR_ABORT_TASK;
1994 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
1995 		return TMR_ABORT_TASK_SET;
1996 	case ISCSI_TM_FUNC_CLEAR_ACA:
1997 		return TMR_CLEAR_ACA;
1998 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1999 		return TMR_CLEAR_TASK_SET;
2000 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2001 		return TMR_LUN_RESET;
2002 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2003 		return TMR_TARGET_WARM_RESET;
2004 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2005 		return TMR_TARGET_COLD_RESET;
2006 	default:
2007 		return TMR_UNKNOWN;
2008 	}
2009 }
2010 
2011 int
2012 iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2013 			   unsigned char *buf)
2014 {
2015 	struct se_tmr_req *se_tmr;
2016 	struct iscsi_tmr_req *tmr_req;
2017 	struct iscsi_tm *hdr;
2018 	int out_of_order_cmdsn = 0, ret;
2019 	u8 function, tcm_function = TMR_UNKNOWN;
2020 
2021 	hdr			= (struct iscsi_tm *) buf;
2022 	hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2023 	function = hdr->flags;
2024 
2025 	pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
2026 		" 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
2027 		" 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
2028 		hdr->rtt, hdr->refcmdsn, conn->cid);
2029 
2030 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2031 	    ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2032 	     hdr->rtt != RESERVED_ITT)) {
2033 		pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
2034 		hdr->rtt = RESERVED_ITT;
2035 	}
2036 
2037 	if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
2038 			!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2039 		pr_err("Task Management Request TASK_REASSIGN not"
2040 			" issued as immediate command, bad iSCSI Initiator"
2041 				"implementation\n");
2042 		return iscsit_add_reject_cmd(cmd,
2043 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
2044 	}
2045 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2046 	    be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
2047 		hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
2048 
2049 	cmd->data_direction = DMA_NONE;
2050 	cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
2051 	if (!cmd->tmr_req) {
2052 		return iscsit_add_reject_cmd(cmd,
2053 					     ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2054 					     buf);
2055 	}
2056 
2057 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
2058 			  conn->sess->se_sess, 0, DMA_NONE,
2059 			  TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
2060 			  scsilun_to_int(&hdr->lun),
2061 			  conn->cmd_cnt);
2062 
2063 	target_get_sess_cmd(&cmd->se_cmd, true);
2064 
2065 	/*
2066 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
2067 	 * LIO-Target $FABRIC_MOD
2068 	 */
2069 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2070 		tcm_function = iscsit_convert_tmf(function);
2071 		if (tcm_function == TMR_UNKNOWN) {
2072 			pr_err("Unknown iSCSI TMR Function:"
2073 			       " 0x%02x\n", function);
2074 			return iscsit_add_reject_cmd(cmd,
2075 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2076 		}
2077 	}
2078 	ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2079 				 GFP_KERNEL);
2080 	if (ret < 0)
2081 		return iscsit_add_reject_cmd(cmd,
2082 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2083 
2084 	cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2085 
2086 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
2087 	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
2088 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2089 	cmd->init_task_tag	= hdr->itt;
2090 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2091 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
2092 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
2093 	se_tmr			= cmd->se_cmd.se_tmr_req;
2094 	tmr_req			= cmd->tmr_req;
2095 	/*
2096 	 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
2097 	 */
2098 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2099 		ret = transport_lookup_tmr_lun(&cmd->se_cmd);
2100 		if (ret < 0) {
2101 			se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
2102 			goto attach;
2103 		}
2104 	}
2105 
2106 	switch (function) {
2107 	case ISCSI_TM_FUNC_ABORT_TASK:
2108 		se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
2109 		if (se_tmr->response)
2110 			goto attach;
2111 		break;
2112 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
2113 	case ISCSI_TM_FUNC_CLEAR_ACA:
2114 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
2115 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2116 		break;
2117 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2118 		if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
2119 			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2120 			goto attach;
2121 		}
2122 		break;
2123 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2124 		if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
2125 			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2126 			goto attach;
2127 		}
2128 		break;
2129 	case ISCSI_TM_FUNC_TASK_REASSIGN:
2130 		se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
2131 		/*
2132 		 * Perform sanity checks on the ExpDataSN only if the
2133 		 * TASK_REASSIGN was successful.
2134 		 */
2135 		if (se_tmr->response)
2136 			break;
2137 
2138 		if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
2139 			return iscsit_add_reject_cmd(cmd,
2140 					ISCSI_REASON_BOOKMARK_INVALID, buf);
2141 		break;
2142 	default:
2143 		pr_err("Unknown TMR function: 0x%02x, protocol"
2144 			" error.\n", function);
2145 		se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
2146 		goto attach;
2147 	}
2148 
2149 	if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2150 	    (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
2151 		se_tmr->call_transport = 1;
2152 attach:
2153 	spin_lock_bh(&conn->cmd_lock);
2154 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2155 	spin_unlock_bh(&conn->cmd_lock);
2156 
2157 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2158 		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2159 		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2160 			out_of_order_cmdsn = 1;
2161 		} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2162 			target_put_sess_cmd(&cmd->se_cmd);
2163 			return 0;
2164 		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2165 			return -1;
2166 		}
2167 	}
2168 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2169 
2170 	if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2171 		return 0;
2172 	/*
2173 	 * Found the referenced task, send to transport for processing.
2174 	 */
2175 	if (se_tmr->call_transport)
2176 		return transport_generic_handle_tmr(&cmd->se_cmd);
2177 
2178 	/*
2179 	 * Could not find the referenced LUN, task, or Task Management
2180 	 * command not authorized or supported.  Change state and
2181 	 * let the tx_thread send the response.
2182 	 *
2183 	 * For connection recovery, this is also the default action for
2184 	 * TMR TASK_REASSIGN.
2185 	 */
2186 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2187 	target_put_sess_cmd(&cmd->se_cmd);
2188 	return 0;
2189 }
2190 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
2191 
2192 /* #warning FIXME: Support Text Command parameters besides SendTargets */
2193 int
2194 iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2195 		      struct iscsi_text *hdr)
2196 {
2197 	u32 payload_length = ntoh24(hdr->dlength);
2198 
2199 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2200 		pr_err("Unable to accept text parameter length: %u"
2201 			"greater than MaxXmitDataSegmentLength %u.\n",
2202 		       payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2203 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2204 					 (unsigned char *)hdr);
2205 	}
2206 
2207 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
2208 	     (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
2209 		pr_err("Multi sequence text commands currently not supported\n");
2210 		return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
2211 					(unsigned char *)hdr);
2212 	}
2213 
2214 	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
2215 		" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
2216 		hdr->exp_statsn, payload_length);
2217 
2218 	cmd->iscsi_opcode	= ISCSI_OP_TEXT;
2219 	cmd->i_state		= ISTATE_SEND_TEXTRSP;
2220 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2221 	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
2222 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2223 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
2224 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
2225 	cmd->data_direction	= DMA_NONE;
2226 	kfree(cmd->text_in_ptr);
2227 	cmd->text_in_ptr	= NULL;
2228 
2229 	return 0;
2230 }
2231 EXPORT_SYMBOL(iscsit_setup_text_cmd);
2232 
2233 int
2234 iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2235 			struct iscsi_text *hdr)
2236 {
2237 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
2238 	int cmdsn_ret;
2239 
2240 	if (!text_in) {
2241 		cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2242 		if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2243 			pr_err("Unable to locate text_in buffer for sendtargets"
2244 			       " discovery\n");
2245 			goto reject;
2246 		}
2247 		goto empty_sendtargets;
2248 	}
2249 	if (strncmp("SendTargets=", text_in, 12) != 0) {
2250 		pr_err("Received Text Data that is not"
2251 			" SendTargets, cannot continue.\n");
2252 		goto reject;
2253 	}
2254 	/* '=' confirmed in strncmp */
2255 	text_ptr = strchr(text_in, '=');
2256 	BUG_ON(!text_ptr);
2257 	if (!strncmp("=All", text_ptr, 5)) {
2258 		cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2259 	} else if (!strncmp("=iqn.", text_ptr, 5) ||
2260 		   !strncmp("=eui.", text_ptr, 5)) {
2261 		cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2262 	} else {
2263 		pr_err("Unable to locate valid SendTargets%s value\n",
2264 		       text_ptr);
2265 		goto reject;
2266 	}
2267 
2268 	spin_lock_bh(&conn->cmd_lock);
2269 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2270 	spin_unlock_bh(&conn->cmd_lock);
2271 
2272 empty_sendtargets:
2273 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2274 
2275 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2276 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2277 				(unsigned char *)hdr, hdr->cmdsn);
2278 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2279 			return -1;
2280 
2281 		return 0;
2282 	}
2283 
2284 	return iscsit_execute_cmd(cmd, 0);
2285 
2286 reject:
2287 	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2288 				 (unsigned char *)hdr);
2289 }
2290 EXPORT_SYMBOL(iscsit_process_text_cmd);
2291 
2292 static int
2293 iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2294 		       unsigned char *buf)
2295 {
2296 	struct iscsi_text *hdr = (struct iscsi_text *)buf;
2297 	char *text_in = NULL;
2298 	u32 payload_length = ntoh24(hdr->dlength);
2299 	int rx_size, rc;
2300 
2301 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2302 	if (rc < 0)
2303 		return 0;
2304 
2305 	rx_size = payload_length;
2306 	if (payload_length) {
2307 		u32 checksum = 0, data_crc = 0;
2308 		u32 padding = 0;
2309 		int niov = 0, rx_got;
2310 		struct kvec iov[2];
2311 
2312 		rx_size = ALIGN(payload_length, 4);
2313 		text_in = kzalloc(rx_size, GFP_KERNEL);
2314 		if (!text_in)
2315 			goto reject;
2316 
2317 		cmd->text_in_ptr = text_in;
2318 
2319 		memset(iov, 0, sizeof(iov));
2320 		iov[niov].iov_base	= text_in;
2321 		iov[niov++].iov_len	= rx_size;
2322 
2323 		padding = rx_size - payload_length;
2324 		if (padding)
2325 			pr_debug("Receiving %u additional bytes"
2326 					" for padding.\n", padding);
2327 		if (conn->conn_ops->DataDigest) {
2328 			iov[niov].iov_base	= &checksum;
2329 			iov[niov++].iov_len	= ISCSI_CRC_LEN;
2330 			rx_size += ISCSI_CRC_LEN;
2331 		}
2332 
2333 		WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
2334 		rx_got = rx_data(conn, &iov[0], niov, rx_size);
2335 		if (rx_got != rx_size)
2336 			goto reject;
2337 
2338 		if (conn->conn_ops->DataDigest) {
2339 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
2340 						  text_in, rx_size, 0, NULL,
2341 						  &data_crc);
2342 
2343 			if (checksum != data_crc) {
2344 				pr_err("Text data CRC32C DataDigest"
2345 					" 0x%08x does not match computed"
2346 					" 0x%08x\n", checksum, data_crc);
2347 				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2348 					pr_err("Unable to recover from"
2349 					" Text Data digest failure while in"
2350 						" ERL=0.\n");
2351 					goto reject;
2352 				} else {
2353 					/*
2354 					 * Silently drop this PDU and let the
2355 					 * initiator plug the CmdSN gap.
2356 					 */
2357 					pr_debug("Dropping Text"
2358 					" Command CmdSN: 0x%08x due to"
2359 					" DataCRC error.\n", hdr->cmdsn);
2360 					kfree(text_in);
2361 					return 0;
2362 				}
2363 			} else {
2364 				pr_debug("Got CRC32C DataDigest"
2365 					" 0x%08x for %u bytes of text data.\n",
2366 						checksum, payload_length);
2367 			}
2368 		}
2369 		text_in[payload_length - 1] = '\0';
2370 		pr_debug("Successfully read %d bytes of text"
2371 				" data.\n", payload_length);
2372 	}
2373 
2374 	return iscsit_process_text_cmd(conn, cmd, hdr);
2375 
2376 reject:
2377 	kfree(cmd->text_in_ptr);
2378 	cmd->text_in_ptr = NULL;
2379 	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2380 }
2381 
2382 int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2383 {
2384 	struct iscsit_conn *conn_p;
2385 	struct iscsit_session *sess = conn->sess;
2386 
2387 	pr_debug("Received logout request CLOSESESSION on CID: %hu"
2388 		" for SID: %u.\n", conn->cid, conn->sess->sid);
2389 
2390 	atomic_set(&sess->session_logout, 1);
2391 	atomic_set(&conn->conn_logout_remove, 1);
2392 	conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2393 
2394 	iscsit_inc_conn_usage_count(conn);
2395 	iscsit_inc_session_usage_count(sess);
2396 
2397 	spin_lock_bh(&sess->conn_lock);
2398 	list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2399 		if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2400 			continue;
2401 
2402 		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2403 		conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2404 	}
2405 	spin_unlock_bh(&sess->conn_lock);
2406 
2407 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2408 
2409 	return 0;
2410 }
2411 
2412 int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2413 {
2414 	struct iscsit_conn *l_conn;
2415 	struct iscsit_session *sess = conn->sess;
2416 
2417 	pr_debug("Received logout request CLOSECONNECTION for CID:"
2418 		" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2419 
2420 	/*
2421 	 * A Logout Request with a CLOSECONNECTION reason code for a CID
2422 	 * can arrive on a connection with a differing CID.
2423 	 */
2424 	if (conn->cid == cmd->logout_cid) {
2425 		spin_lock_bh(&conn->state_lock);
2426 		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2427 		conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2428 
2429 		atomic_set(&conn->conn_logout_remove, 1);
2430 		conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2431 		iscsit_inc_conn_usage_count(conn);
2432 
2433 		spin_unlock_bh(&conn->state_lock);
2434 	} else {
2435 		/*
2436 		 * Handle all different cid CLOSECONNECTION requests in
2437 		 * iscsit_logout_post_handler_diffcid() as to give enough
2438 		 * time for any non immediate command's CmdSN to be
2439 		 * acknowledged on the connection in question.
2440 		 *
2441 		 * Here we simply make sure the CID is still around.
2442 		 */
2443 		l_conn = iscsit_get_conn_from_cid(sess,
2444 				cmd->logout_cid);
2445 		if (!l_conn) {
2446 			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2447 			iscsit_add_cmd_to_response_queue(cmd, conn,
2448 					cmd->i_state);
2449 			return 0;
2450 		}
2451 
2452 		iscsit_dec_conn_usage_count(l_conn);
2453 	}
2454 
2455 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2456 
2457 	return 0;
2458 }
2459 
2460 int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2461 {
2462 	struct iscsit_session *sess = conn->sess;
2463 
2464 	pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2465 		" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2466 
2467 	if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2468 		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2469 			" while ERL!=2.\n");
2470 		cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2471 		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2472 		return 0;
2473 	}
2474 
2475 	if (conn->cid == cmd->logout_cid) {
2476 		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2477 			" with CID: %hu on CID: %hu, implementation error.\n",
2478 				cmd->logout_cid, conn->cid);
2479 		cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2480 		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2481 		return 0;
2482 	}
2483 
2484 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2485 
2486 	return 0;
2487 }
2488 
2489 int
2490 iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2491 			unsigned char *buf)
2492 {
2493 	int cmdsn_ret, logout_remove = 0;
2494 	u8 reason_code = 0;
2495 	struct iscsi_logout *hdr;
2496 	struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2497 
2498 	hdr			= (struct iscsi_logout *) buf;
2499 	reason_code		= (hdr->flags & 0x7f);
2500 
2501 	if (tiqn) {
2502 		spin_lock(&tiqn->logout_stats.lock);
2503 		if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2504 			tiqn->logout_stats.normal_logouts++;
2505 		else
2506 			tiqn->logout_stats.abnormal_logouts++;
2507 		spin_unlock(&tiqn->logout_stats.lock);
2508 	}
2509 
2510 	pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2511 		" ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2512 		hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2513 		hdr->cid, conn->cid);
2514 
2515 	if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2516 		pr_err("Received logout request on connection that"
2517 			" is not in logged in state, ignoring request.\n");
2518 		iscsit_free_cmd(cmd, false);
2519 		return 0;
2520 	}
2521 
2522 	cmd->iscsi_opcode       = ISCSI_OP_LOGOUT;
2523 	cmd->i_state            = ISTATE_SEND_LOGOUTRSP;
2524 	cmd->immediate_cmd      = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2525 	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
2526 	cmd->targ_xfer_tag      = 0xFFFFFFFF;
2527 	cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
2528 	cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
2529 	cmd->logout_cid         = be16_to_cpu(hdr->cid);
2530 	cmd->logout_reason      = reason_code;
2531 	cmd->data_direction     = DMA_NONE;
2532 
2533 	/*
2534 	 * We need to sleep in these cases (by returning 1) until the Logout
2535 	 * Response gets sent in the tx thread.
2536 	 */
2537 	if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2538 	   ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2539 	    be16_to_cpu(hdr->cid) == conn->cid))
2540 		logout_remove = 1;
2541 
2542 	spin_lock_bh(&conn->cmd_lock);
2543 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2544 	spin_unlock_bh(&conn->cmd_lock);
2545 
2546 	if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2547 		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2548 
2549 	/*
2550 	 * Immediate commands are executed, well, immediately.
2551 	 * Non-Immediate Logout Commands are executed in CmdSN order.
2552 	 */
2553 	if (cmd->immediate_cmd) {
2554 		int ret = iscsit_execute_cmd(cmd, 0);
2555 
2556 		if (ret < 0)
2557 			return ret;
2558 	} else {
2559 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2560 		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2561 			logout_remove = 0;
2562 		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2563 			return -1;
2564 	}
2565 
2566 	return logout_remove;
2567 }
2568 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2569 
2570 int iscsit_handle_snack(
2571 	struct iscsit_conn *conn,
2572 	unsigned char *buf)
2573 {
2574 	struct iscsi_snack *hdr;
2575 
2576 	hdr			= (struct iscsi_snack *) buf;
2577 	hdr->flags		&= ~ISCSI_FLAG_CMD_FINAL;
2578 
2579 	pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2580 		" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2581 		" CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2582 			hdr->begrun, hdr->runlength, conn->cid);
2583 
2584 	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2585 		pr_err("Initiator sent SNACK request while in"
2586 			" ErrorRecoveryLevel=0.\n");
2587 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2588 					 buf);
2589 	}
2590 	/*
2591 	 * SNACK_DATA and SNACK_R2T are both 0,  so check which function to
2592 	 * call from inside iscsi_send_recovery_datain_or_r2t().
2593 	 */
2594 	switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2595 	case 0:
2596 		return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2597 			hdr->itt,
2598 			be32_to_cpu(hdr->ttt),
2599 			be32_to_cpu(hdr->begrun),
2600 			be32_to_cpu(hdr->runlength));
2601 	case ISCSI_FLAG_SNACK_TYPE_STATUS:
2602 		return iscsit_handle_status_snack(conn, hdr->itt,
2603 			be32_to_cpu(hdr->ttt),
2604 			be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2605 	case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2606 		return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2607 			be32_to_cpu(hdr->begrun),
2608 			be32_to_cpu(hdr->runlength));
2609 	case ISCSI_FLAG_SNACK_TYPE_RDATA:
2610 		/* FIXME: Support R-Data SNACK */
2611 		pr_err("R-Data SNACK Not Supported.\n");
2612 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2613 					 buf);
2614 	default:
2615 		pr_err("Unknown SNACK type 0x%02x, protocol"
2616 			" error.\n", hdr->flags & 0x0f);
2617 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2618 					 buf);
2619 	}
2620 
2621 	return 0;
2622 }
2623 EXPORT_SYMBOL(iscsit_handle_snack);
2624 
2625 static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
2626 {
2627 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2628 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2629 		wait_for_completion_interruptible_timeout(
2630 					&conn->rx_half_close_comp,
2631 					ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2632 	}
2633 }
2634 
2635 static int iscsit_handle_immediate_data(
2636 	struct iscsit_cmd *cmd,
2637 	struct iscsi_scsi_req *hdr,
2638 	u32 length)
2639 {
2640 	int iov_ret, rx_got = 0, rx_size = 0;
2641 	u32 checksum, iov_count = 0, padding = 0;
2642 	struct iscsit_conn *conn = cmd->conn;
2643 	struct kvec *iov;
2644 	void *overflow_buf = NULL;
2645 
2646 	BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
2647 	rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
2648 	iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
2649 				   cmd->orig_iov_data_count - 2,
2650 				   cmd->write_data_done, rx_size);
2651 	if (iov_ret < 0)
2652 		return IMMEDIATE_DATA_CANNOT_RECOVER;
2653 
2654 	iov_count = iov_ret;
2655 	iov = &cmd->iov_data[0];
2656 	if (rx_size < length) {
2657 		/*
2658 		 * Special case: length of immediate data exceeds the data
2659 		 * buffer size derived from the CDB.
2660 		 */
2661 		overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
2662 		if (!overflow_buf) {
2663 			iscsit_unmap_iovec(cmd);
2664 			return IMMEDIATE_DATA_CANNOT_RECOVER;
2665 		}
2666 		cmd->overflow_buf = overflow_buf;
2667 		iov[iov_count].iov_base = overflow_buf;
2668 		iov[iov_count].iov_len = length - rx_size;
2669 		iov_count++;
2670 		rx_size = length;
2671 	}
2672 
2673 	padding = ((-length) & 3);
2674 	if (padding != 0) {
2675 		iov[iov_count].iov_base	= cmd->pad_bytes;
2676 		iov[iov_count++].iov_len = padding;
2677 		rx_size += padding;
2678 	}
2679 
2680 	if (conn->conn_ops->DataDigest) {
2681 		iov[iov_count].iov_base		= &checksum;
2682 		iov[iov_count++].iov_len	= ISCSI_CRC_LEN;
2683 		rx_size += ISCSI_CRC_LEN;
2684 	}
2685 
2686 	WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
2687 	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2688 
2689 	iscsit_unmap_iovec(cmd);
2690 
2691 	if (rx_got != rx_size) {
2692 		iscsit_rx_thread_wait_for_tcp(conn);
2693 		return IMMEDIATE_DATA_CANNOT_RECOVER;
2694 	}
2695 
2696 	if (conn->conn_ops->DataDigest) {
2697 		u32 data_crc;
2698 
2699 		data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
2700 						    cmd->write_data_done, length, padding,
2701 						    cmd->pad_bytes);
2702 
2703 		if (checksum != data_crc) {
2704 			pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2705 				" does not match computed 0x%08x\n", checksum,
2706 				data_crc);
2707 
2708 			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2709 				pr_err("Unable to recover from"
2710 					" Immediate Data digest failure while"
2711 					" in ERL=0.\n");
2712 				iscsit_reject_cmd(cmd,
2713 						ISCSI_REASON_DATA_DIGEST_ERROR,
2714 						(unsigned char *)hdr);
2715 				return IMMEDIATE_DATA_CANNOT_RECOVER;
2716 			} else {
2717 				iscsit_reject_cmd(cmd,
2718 						ISCSI_REASON_DATA_DIGEST_ERROR,
2719 						(unsigned char *)hdr);
2720 				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2721 			}
2722 		} else {
2723 			pr_debug("Got CRC32C DataDigest 0x%08x for"
2724 				" %u bytes of Immediate Data\n", checksum,
2725 				length);
2726 		}
2727 	}
2728 
2729 	cmd->write_data_done += length;
2730 
2731 	if (cmd->write_data_done == cmd->se_cmd.data_length) {
2732 		spin_lock_bh(&cmd->istate_lock);
2733 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2734 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2735 		spin_unlock_bh(&cmd->istate_lock);
2736 	}
2737 
2738 	return IMMEDIATE_DATA_NORMAL_OPERATION;
2739 }
2740 
2741 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2742 	with active network interface */
2743 static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
2744 {
2745 	struct iscsit_cmd *cmd;
2746 	struct iscsit_conn *conn_p;
2747 	bool found = false;
2748 
2749 	lockdep_assert_held(&conn->sess->conn_lock);
2750 
2751 	/*
2752 	 * Only send a Asynchronous Message on connections whos network
2753 	 * interface is still functional.
2754 	 */
2755 	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2756 		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2757 			iscsit_inc_conn_usage_count(conn_p);
2758 			found = true;
2759 			break;
2760 		}
2761 	}
2762 
2763 	if (!found)
2764 		return;
2765 
2766 	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2767 	if (!cmd) {
2768 		iscsit_dec_conn_usage_count(conn_p);
2769 		return;
2770 	}
2771 
2772 	cmd->logout_cid = conn->cid;
2773 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2774 	cmd->i_state = ISTATE_SEND_ASYNCMSG;
2775 
2776 	spin_lock_bh(&conn_p->cmd_lock);
2777 	list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2778 	spin_unlock_bh(&conn_p->cmd_lock);
2779 
2780 	iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2781 	iscsit_dec_conn_usage_count(conn_p);
2782 }
2783 
2784 static int iscsit_send_conn_drop_async_message(
2785 	struct iscsit_cmd *cmd,
2786 	struct iscsit_conn *conn)
2787 {
2788 	struct iscsi_async *hdr;
2789 
2790 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2791 
2792 	hdr			= (struct iscsi_async *) cmd->pdu;
2793 	hdr->opcode		= ISCSI_OP_ASYNC_EVENT;
2794 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
2795 	cmd->init_task_tag	= RESERVED_ITT;
2796 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2797 	put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2798 	cmd->stat_sn		= conn->stat_sn++;
2799 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
2800 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
2801 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2802 	hdr->async_event	= ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2803 	hdr->param1		= cpu_to_be16(cmd->logout_cid);
2804 	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2805 	hdr->param3		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2806 
2807 	pr_debug("Sending Connection Dropped Async Message StatSN:"
2808 		" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2809 			cmd->logout_cid, conn->cid);
2810 
2811 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2812 }
2813 
2814 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
2815 {
2816 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2817 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2818 		wait_for_completion_interruptible_timeout(
2819 					&conn->tx_half_close_comp,
2820 					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2821 	}
2822 }
2823 
2824 void
2825 iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2826 			struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2827 			bool set_statsn)
2828 {
2829 	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
2830 	hdr->flags		= datain->flags;
2831 	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2832 		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2833 			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2834 			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2835 		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2836 			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2837 			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2838 		}
2839 	}
2840 	hton24(hdr->dlength, datain->length);
2841 	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2842 		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2843 				(struct scsi_lun *)&hdr->lun);
2844 	else
2845 		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2846 
2847 	hdr->itt		= cmd->init_task_tag;
2848 
2849 	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2850 		hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
2851 	else
2852 		hdr->ttt		= cpu_to_be32(0xFFFFFFFF);
2853 	if (set_statsn)
2854 		hdr->statsn		= cpu_to_be32(cmd->stat_sn);
2855 	else
2856 		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
2857 
2858 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
2859 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2860 	hdr->datasn		= cpu_to_be32(datain->data_sn);
2861 	hdr->offset		= cpu_to_be32(datain->offset);
2862 
2863 	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2864 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2865 		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2866 		ntohl(hdr->offset), datain->length, conn->cid);
2867 }
2868 EXPORT_SYMBOL(iscsit_build_datain_pdu);
2869 
2870 static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2871 {
2872 	struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2873 	struct iscsi_datain datain;
2874 	struct iscsi_datain_req *dr;
2875 	int eodr = 0, ret;
2876 	bool set_statsn = false;
2877 
2878 	memset(&datain, 0, sizeof(struct iscsi_datain));
2879 	dr = iscsit_get_datain_values(cmd, &datain);
2880 	if (!dr) {
2881 		pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2882 				cmd->init_task_tag);
2883 		return -1;
2884 	}
2885 	/*
2886 	 * Be paranoid and double check the logic for now.
2887 	 */
2888 	if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2889 		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2890 			" datain.length: %u exceeds cmd->data_length: %u\n",
2891 			cmd->init_task_tag, datain.offset, datain.length,
2892 			cmd->se_cmd.data_length);
2893 		return -1;
2894 	}
2895 
2896 	atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2897 	/*
2898 	 * Special case for successfully execution w/ both DATAIN
2899 	 * and Sense Data.
2900 	 */
2901 	if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2902 	    (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2903 		datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2904 	else {
2905 		if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2906 		    (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2907 			iscsit_increment_maxcmdsn(cmd, conn->sess);
2908 			cmd->stat_sn = conn->stat_sn++;
2909 			set_statsn = true;
2910 		} else if (dr->dr_complete ==
2911 			   DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2912 			set_statsn = true;
2913 	}
2914 
2915 	iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2916 
2917 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2918 	if (ret < 0)
2919 		return ret;
2920 
2921 	if (dr->dr_complete) {
2922 		eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2923 				2 : 1;
2924 		iscsit_free_datain_req(cmd, dr);
2925 	}
2926 
2927 	return eodr;
2928 }
2929 
2930 int
2931 iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2932 			struct iscsi_logout_rsp *hdr)
2933 {
2934 	struct iscsit_conn *logout_conn = NULL;
2935 	struct iscsi_conn_recovery *cr = NULL;
2936 	struct iscsit_session *sess = conn->sess;
2937 	/*
2938 	 * The actual shutting down of Sessions and/or Connections
2939 	 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2940 	 * is done in scsi_logout_post_handler().
2941 	 */
2942 	switch (cmd->logout_reason) {
2943 	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2944 		pr_debug("iSCSI session logout successful, setting"
2945 			" logout response to ISCSI_LOGOUT_SUCCESS.\n");
2946 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2947 		break;
2948 	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2949 		if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2950 			break;
2951 		/*
2952 		 * For CLOSECONNECTION logout requests carrying
2953 		 * a matching logout CID -> local CID, the reference
2954 		 * for the local CID will have been incremented in
2955 		 * iscsi_logout_closeconnection().
2956 		 *
2957 		 * For CLOSECONNECTION logout requests carrying
2958 		 * a different CID than the connection it arrived
2959 		 * on, the connection responding to cmd->logout_cid
2960 		 * is stopped in iscsit_logout_post_handler_diffcid().
2961 		 */
2962 
2963 		pr_debug("iSCSI CID: %hu logout on CID: %hu"
2964 			" successful.\n", cmd->logout_cid, conn->cid);
2965 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2966 		break;
2967 	case ISCSI_LOGOUT_REASON_RECOVERY:
2968 		if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2969 		    (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2970 			break;
2971 		/*
2972 		 * If the connection is still active from our point of view
2973 		 * force connection recovery to occur.
2974 		 */
2975 		logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2976 				cmd->logout_cid);
2977 		if (logout_conn) {
2978 			iscsit_connection_reinstatement_rcfr(logout_conn);
2979 			iscsit_dec_conn_usage_count(logout_conn);
2980 		}
2981 
2982 		cr = iscsit_get_inactive_connection_recovery_entry(
2983 				conn->sess, cmd->logout_cid);
2984 		if (!cr) {
2985 			pr_err("Unable to locate CID: %hu for"
2986 			" REMOVECONNFORRECOVERY Logout Request.\n",
2987 				cmd->logout_cid);
2988 			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2989 			break;
2990 		}
2991 
2992 		iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2993 
2994 		pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2995 			" for recovery for CID: %hu on CID: %hu successful.\n",
2996 				cmd->logout_cid, conn->cid);
2997 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2998 		break;
2999 	default:
3000 		pr_err("Unknown cmd->logout_reason: 0x%02x\n",
3001 				cmd->logout_reason);
3002 		return -1;
3003 	}
3004 
3005 	hdr->opcode		= ISCSI_OP_LOGOUT_RSP;
3006 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3007 	hdr->response		= cmd->logout_response;
3008 	hdr->itt		= cmd->init_task_tag;
3009 	cmd->stat_sn		= conn->stat_sn++;
3010 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3011 
3012 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3013 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3014 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3015 
3016 	pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
3017 		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
3018 		cmd->init_task_tag, cmd->stat_sn, hdr->response,
3019 		cmd->logout_cid, conn->cid);
3020 
3021 	return 0;
3022 }
3023 EXPORT_SYMBOL(iscsit_build_logout_rsp);
3024 
3025 static int
3026 iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3027 {
3028 	int rc;
3029 
3030 	rc = iscsit_build_logout_rsp(cmd, conn,
3031 			(struct iscsi_logout_rsp *)&cmd->pdu[0]);
3032 	if (rc < 0)
3033 		return rc;
3034 
3035 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3036 }
3037 
3038 void
3039 iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3040 		       struct iscsi_nopin *hdr, bool nopout_response)
3041 {
3042 	hdr->opcode		= ISCSI_OP_NOOP_IN;
3043 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3044         hton24(hdr->dlength, cmd->buf_ptr_size);
3045 	if (nopout_response)
3046 		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
3047 	hdr->itt		= cmd->init_task_tag;
3048 	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
3049 	cmd->stat_sn		= (nopout_response) ? conn->stat_sn++ :
3050 				  conn->stat_sn;
3051 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3052 
3053 	if (nopout_response)
3054 		iscsit_increment_maxcmdsn(cmd, conn->sess);
3055 
3056 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3057 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3058 
3059 	pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
3060 		" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
3061 		"Solicited" : "Unsolicited", cmd->init_task_tag,
3062 		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
3063 }
3064 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
3065 
3066 /*
3067  *	Unsolicited NOPIN, either requesting a response or not.
3068  */
3069 static int iscsit_send_unsolicited_nopin(
3070 	struct iscsit_cmd *cmd,
3071 	struct iscsit_conn *conn,
3072 	int want_response)
3073 {
3074 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3075 	int ret;
3076 
3077 	iscsit_build_nopin_rsp(cmd, conn, hdr, false);
3078 
3079 	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
3080 		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
3081 
3082 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3083 	if (ret < 0)
3084 		return ret;
3085 
3086 	spin_lock_bh(&cmd->istate_lock);
3087 	cmd->i_state = want_response ?
3088 		ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
3089 	spin_unlock_bh(&cmd->istate_lock);
3090 
3091 	return 0;
3092 }
3093 
3094 static int
3095 iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3096 {
3097 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3098 
3099 	iscsit_build_nopin_rsp(cmd, conn, hdr, true);
3100 
3101 	/*
3102 	 * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
3103 	 * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
3104 	 */
3105 	pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
3106 
3107 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3108 						     cmd->buf_ptr,
3109 						     cmd->buf_ptr_size);
3110 }
3111 
3112 static int iscsit_send_r2t(
3113 	struct iscsit_cmd *cmd,
3114 	struct iscsit_conn *conn)
3115 {
3116 	struct iscsi_r2t *r2t;
3117 	struct iscsi_r2t_rsp *hdr;
3118 	int ret;
3119 
3120 	r2t = iscsit_get_r2t_from_list(cmd);
3121 	if (!r2t)
3122 		return -1;
3123 
3124 	hdr			= (struct iscsi_r2t_rsp *) cmd->pdu;
3125 	memset(hdr, 0, ISCSI_HDR_LEN);
3126 	hdr->opcode		= ISCSI_OP_R2T;
3127 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3128 	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3129 			(struct scsi_lun *)&hdr->lun);
3130 	hdr->itt		= cmd->init_task_tag;
3131 	if (conn->conn_transport->iscsit_get_r2t_ttt)
3132 		conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3133 	else
3134 		r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3135 	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
3136 	hdr->statsn		= cpu_to_be32(conn->stat_sn);
3137 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3138 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3139 	hdr->r2tsn		= cpu_to_be32(r2t->r2t_sn);
3140 	hdr->data_offset	= cpu_to_be32(r2t->offset);
3141 	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
3142 
3143 	pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3144 		" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3145 		(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3146 		r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3147 			r2t->offset, r2t->xfer_len, conn->cid);
3148 
3149 	spin_lock_bh(&cmd->r2t_lock);
3150 	r2t->sent_r2t = 1;
3151 	spin_unlock_bh(&cmd->r2t_lock);
3152 
3153 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3154 	if (ret < 0) {
3155 		return ret;
3156 	}
3157 
3158 	spin_lock_bh(&cmd->dataout_timeout_lock);
3159 	iscsit_start_dataout_timer(cmd, conn);
3160 	spin_unlock_bh(&cmd->dataout_timeout_lock);
3161 
3162 	return 0;
3163 }
3164 
3165 /*
3166  *	@recovery: If called from iscsi_task_reassign_complete_write() for
3167  *		connection recovery.
3168  */
3169 int iscsit_build_r2ts_for_cmd(
3170 	struct iscsit_conn *conn,
3171 	struct iscsit_cmd *cmd,
3172 	bool recovery)
3173 {
3174 	int first_r2t = 1;
3175 	u32 offset = 0, xfer_len = 0;
3176 
3177 	spin_lock_bh(&cmd->r2t_lock);
3178 	if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3179 		spin_unlock_bh(&cmd->r2t_lock);
3180 		return 0;
3181 	}
3182 
3183 	if (conn->sess->sess_ops->DataSequenceInOrder &&
3184 	    !recovery)
3185 		cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3186 
3187 	while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3188 		if (conn->sess->sess_ops->DataSequenceInOrder) {
3189 			offset = cmd->r2t_offset;
3190 
3191 			if (first_r2t && recovery) {
3192 				int new_data_end = offset +
3193 					conn->sess->sess_ops->MaxBurstLength -
3194 					cmd->next_burst_len;
3195 
3196 				if (new_data_end > cmd->se_cmd.data_length)
3197 					xfer_len = cmd->se_cmd.data_length - offset;
3198 				else
3199 					xfer_len =
3200 						conn->sess->sess_ops->MaxBurstLength -
3201 						cmd->next_burst_len;
3202 			} else {
3203 				int new_data_end = offset +
3204 					conn->sess->sess_ops->MaxBurstLength;
3205 
3206 				if (new_data_end > cmd->se_cmd.data_length)
3207 					xfer_len = cmd->se_cmd.data_length - offset;
3208 				else
3209 					xfer_len = conn->sess->sess_ops->MaxBurstLength;
3210 			}
3211 
3212 			if ((s32)xfer_len < 0) {
3213 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3214 				break;
3215 			}
3216 
3217 			cmd->r2t_offset += xfer_len;
3218 
3219 			if (cmd->r2t_offset == cmd->se_cmd.data_length)
3220 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3221 		} else {
3222 			struct iscsi_seq *seq;
3223 
3224 			seq = iscsit_get_seq_holder_for_r2t(cmd);
3225 			if (!seq) {
3226 				spin_unlock_bh(&cmd->r2t_lock);
3227 				return -1;
3228 			}
3229 
3230 			offset = seq->offset;
3231 			xfer_len = seq->xfer_len;
3232 
3233 			if (cmd->seq_send_order == cmd->seq_count)
3234 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3235 		}
3236 		cmd->outstanding_r2ts++;
3237 		first_r2t = 0;
3238 
3239 		if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3240 			spin_unlock_bh(&cmd->r2t_lock);
3241 			return -1;
3242 		}
3243 
3244 		if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3245 			break;
3246 	}
3247 	spin_unlock_bh(&cmd->r2t_lock);
3248 
3249 	return 0;
3250 }
3251 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3252 
3253 void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3254 			bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3255 {
3256 	if (inc_stat_sn)
3257 		cmd->stat_sn = conn->stat_sn++;
3258 
3259 	atomic_long_inc(&conn->sess->rsp_pdus);
3260 
3261 	memset(hdr, 0, ISCSI_HDR_LEN);
3262 	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP;
3263 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3264 	if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3265 		hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3266 		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3267 	} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3268 		hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3269 		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3270 	}
3271 	hdr->response		= cmd->iscsi_response;
3272 	hdr->cmd_status		= cmd->se_cmd.scsi_status;
3273 	hdr->itt		= cmd->init_task_tag;
3274 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3275 
3276 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3277 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3278 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3279 
3280 	pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3281 		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3282 		cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3283 		cmd->se_cmd.scsi_status, conn->cid);
3284 }
3285 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3286 
3287 static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3288 {
3289 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3290 	bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3291 	void *data_buf = NULL;
3292 	u32 padding = 0, data_buf_len = 0;
3293 
3294 	iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3295 
3296 	/*
3297 	 * Attach SENSE DATA payload to iSCSI Response PDU
3298 	 */
3299 	if (cmd->se_cmd.sense_buffer &&
3300 	   ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3301 	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3302 		put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3303 		cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3304 
3305 		padding		= -(cmd->se_cmd.scsi_sense_length) & 3;
3306 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3307 		data_buf = cmd->sense_buffer;
3308 		data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3309 
3310 		if (padding) {
3311 			memset(cmd->sense_buffer +
3312 				cmd->se_cmd.scsi_sense_length, 0, padding);
3313 			pr_debug("Adding %u bytes of padding to"
3314 				" SENSE.\n", padding);
3315 		}
3316 
3317 		pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3318 				" Response PDU\n",
3319 				cmd->se_cmd.scsi_sense_length);
3320 	}
3321 
3322 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3323 						     data_buf_len);
3324 }
3325 
3326 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3327 {
3328 	switch (se_tmr->response) {
3329 	case TMR_FUNCTION_COMPLETE:
3330 		return ISCSI_TMF_RSP_COMPLETE;
3331 	case TMR_TASK_DOES_NOT_EXIST:
3332 		return ISCSI_TMF_RSP_NO_TASK;
3333 	case TMR_LUN_DOES_NOT_EXIST:
3334 		return ISCSI_TMF_RSP_NO_LUN;
3335 	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3336 		return ISCSI_TMF_RSP_NOT_SUPPORTED;
3337 	case TMR_FUNCTION_REJECTED:
3338 	default:
3339 		return ISCSI_TMF_RSP_REJECTED;
3340 	}
3341 }
3342 
3343 void
3344 iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3345 			  struct iscsi_tm_rsp *hdr)
3346 {
3347 	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3348 
3349 	hdr->opcode		= ISCSI_OP_SCSI_TMFUNC_RSP;
3350 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
3351 	hdr->response		= iscsit_convert_tcm_tmr_rsp(se_tmr);
3352 	hdr->itt		= cmd->init_task_tag;
3353 	cmd->stat_sn		= conn->stat_sn++;
3354 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3355 
3356 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3357 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3358 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3359 
3360 	pr_debug("Built Task Management Response ITT: 0x%08x,"
3361 		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3362 		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3363 }
3364 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3365 
3366 static int
3367 iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3368 {
3369 	struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3370 
3371 	iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3372 
3373 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3374 }
3375 
3376 #define SENDTARGETS_BUF_LIMIT 32768U
3377 
3378 static int
3379 iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
3380 				  enum iscsit_transport_type network_transport,
3381 				  int skip_bytes, bool *completed)
3382 {
3383 	char *payload = NULL;
3384 	struct iscsit_conn *conn = cmd->conn;
3385 	struct iscsi_portal_group *tpg;
3386 	struct iscsi_tiqn *tiqn;
3387 	struct iscsi_tpg_np *tpg_np;
3388 	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3389 	int target_name_printed;
3390 	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3391 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3392 	bool active;
3393 
3394 	buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3395 			 SENDTARGETS_BUF_LIMIT);
3396 
3397 	payload = kzalloc(buffer_len, GFP_KERNEL);
3398 	if (!payload)
3399 		return -ENOMEM;
3400 
3401 	/*
3402 	 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3403 	 * explicit case..
3404 	 */
3405 	if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3406 		text_ptr = strchr(text_in, '=');
3407 		if (!text_ptr) {
3408 			pr_err("Unable to locate '=' string in text_in:"
3409 			       " %s\n", text_in);
3410 			kfree(payload);
3411 			return -EINVAL;
3412 		}
3413 		/*
3414 		 * Skip over '=' character..
3415 		 */
3416 		text_ptr += 1;
3417 	}
3418 
3419 	spin_lock(&tiqn_lock);
3420 	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3421 		if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3422 		     strcmp(tiqn->tiqn, text_ptr)) {
3423 			continue;
3424 		}
3425 
3426 		target_name_printed = 0;
3427 
3428 		spin_lock(&tiqn->tiqn_tpg_lock);
3429 		list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3430 
3431 			/* If demo_mode_discovery=0 and generate_node_acls=0
3432 			 * (demo mode dislabed) do not return
3433 			 * TargetName+TargetAddress unless a NodeACL exists.
3434 			 */
3435 
3436 			if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3437 			    (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3438 			    (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3439 				cmd->conn->sess->sess_ops->InitiatorName))) {
3440 				continue;
3441 			}
3442 
3443 			spin_lock(&tpg->tpg_state_lock);
3444 			active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3445 			spin_unlock(&tpg->tpg_state_lock);
3446 
3447 			if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3448 				continue;
3449 
3450 			spin_lock(&tpg->tpg_np_lock);
3451 			list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3452 						tpg_np_list) {
3453 				struct iscsi_np *np = tpg_np->tpg_np;
3454 				struct sockaddr_storage *sockaddr;
3455 
3456 				if (np->np_network_transport != network_transport)
3457 					continue;
3458 
3459 				if (!target_name_printed) {
3460 					len = sprintf(buf, "TargetName=%s",
3461 						      tiqn->tiqn);
3462 					len += 1;
3463 
3464 					if ((len + payload_len) > buffer_len) {
3465 						spin_unlock(&tpg->tpg_np_lock);
3466 						spin_unlock(&tiqn->tiqn_tpg_lock);
3467 						end_of_buf = 1;
3468 						goto eob;
3469 					}
3470 
3471 					if (skip_bytes && len <= skip_bytes) {
3472 						skip_bytes -= len;
3473 					} else {
3474 						memcpy(payload + payload_len, buf, len);
3475 						payload_len += len;
3476 						target_name_printed = 1;
3477 						if (len > skip_bytes)
3478 							skip_bytes = 0;
3479 					}
3480 				}
3481 
3482 				if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
3483 					sockaddr = &conn->local_sockaddr;
3484 				else
3485 					sockaddr = &np->np_sockaddr;
3486 
3487 				len = sprintf(buf, "TargetAddress="
3488 					      "%pISpc,%hu",
3489 					      sockaddr,
3490 					      tpg->tpgt);
3491 				len += 1;
3492 
3493 				if ((len + payload_len) > buffer_len) {
3494 					spin_unlock(&tpg->tpg_np_lock);
3495 					spin_unlock(&tiqn->tiqn_tpg_lock);
3496 					end_of_buf = 1;
3497 					goto eob;
3498 				}
3499 
3500 				if (skip_bytes && len <= skip_bytes) {
3501 					skip_bytes -= len;
3502 				} else {
3503 					memcpy(payload + payload_len, buf, len);
3504 					payload_len += len;
3505 					if (len > skip_bytes)
3506 						skip_bytes = 0;
3507 				}
3508 			}
3509 			spin_unlock(&tpg->tpg_np_lock);
3510 		}
3511 		spin_unlock(&tiqn->tiqn_tpg_lock);
3512 eob:
3513 		if (end_of_buf) {
3514 			*completed = false;
3515 			break;
3516 		}
3517 
3518 		if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3519 			break;
3520 	}
3521 	spin_unlock(&tiqn_lock);
3522 
3523 	cmd->buf_ptr = payload;
3524 
3525 	return payload_len;
3526 }
3527 
3528 int
3529 iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3530 		      struct iscsi_text_rsp *hdr,
3531 		      enum iscsit_transport_type network_transport)
3532 {
3533 	int text_length, padding;
3534 	bool completed = true;
3535 
3536 	text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3537 							cmd->read_data_done,
3538 							&completed);
3539 	if (text_length < 0)
3540 		return text_length;
3541 
3542 	if (completed) {
3543 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
3544 	} else {
3545 		hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3546 		cmd->read_data_done += text_length;
3547 		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3548 			cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3549 	}
3550 	hdr->opcode = ISCSI_OP_TEXT_RSP;
3551 	padding = ((-text_length) & 3);
3552 	hton24(hdr->dlength, text_length);
3553 	hdr->itt = cmd->init_task_tag;
3554 	hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3555 	cmd->stat_sn = conn->stat_sn++;
3556 	hdr->statsn = cpu_to_be32(cmd->stat_sn);
3557 
3558 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3559 	/*
3560 	 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3561 	 * correctly increment MaxCmdSN for each response answering a
3562 	 * non immediate text request with a valid CmdSN.
3563 	 */
3564 	cmd->maxcmdsn_inc = 0;
3565 	hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3566 	hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3567 
3568 	pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3569 		" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3570 		cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3571 		!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3572 		!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3573 
3574 	return text_length + padding;
3575 }
3576 EXPORT_SYMBOL(iscsit_build_text_rsp);
3577 
3578 static int iscsit_send_text_rsp(
3579 	struct iscsit_cmd *cmd,
3580 	struct iscsit_conn *conn)
3581 {
3582 	struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3583 	int text_length;
3584 
3585 	text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3586 				conn->conn_transport->transport_type);
3587 	if (text_length < 0)
3588 		return text_length;
3589 
3590 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3591 						     cmd->buf_ptr,
3592 						     text_length);
3593 }
3594 
3595 void
3596 iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3597 		    struct iscsi_reject *hdr)
3598 {
3599 	hdr->opcode		= ISCSI_OP_REJECT;
3600 	hdr->reason		= cmd->reject_reason;
3601 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3602 	hton24(hdr->dlength, ISCSI_HDR_LEN);
3603 	hdr->ffffffff		= cpu_to_be32(0xffffffff);
3604 	cmd->stat_sn		= conn->stat_sn++;
3605 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3606 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3607 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3608 
3609 }
3610 EXPORT_SYMBOL(iscsit_build_reject);
3611 
3612 static int iscsit_send_reject(
3613 	struct iscsit_cmd *cmd,
3614 	struct iscsit_conn *conn)
3615 {
3616 	struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3617 
3618 	iscsit_build_reject(cmd, conn, hdr);
3619 
3620 	pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3621 		" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3622 
3623 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3624 						     cmd->buf_ptr,
3625 						     ISCSI_HDR_LEN);
3626 }
3627 
3628 void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
3629 {
3630 	int ord, cpu;
3631 	cpumask_var_t conn_allowed_cpumask;
3632 
3633 	/*
3634 	 * bitmap_id is assigned from iscsit_global->ts_bitmap from
3635 	 * within iscsit_start_kthreads()
3636 	 *
3637 	 * Here we use bitmap_id to determine which CPU that this
3638 	 * iSCSI connection's RX/TX threads will be scheduled to
3639 	 * execute upon.
3640 	 */
3641 	if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
3642 		ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
3643 		for_each_online_cpu(cpu) {
3644 			if (ord-- == 0) {
3645 				cpumask_set_cpu(cpu, conn->conn_cpumask);
3646 				return;
3647 			}
3648 		}
3649 	} else {
3650 		cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
3651 			cpu_online_mask);
3652 
3653 		cpumask_clear(conn->conn_cpumask);
3654 		ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
3655 		for_each_cpu(cpu, conn_allowed_cpumask) {
3656 			if (ord-- == 0) {
3657 				cpumask_set_cpu(cpu, conn->conn_cpumask);
3658 				free_cpumask_var(conn_allowed_cpumask);
3659 				return;
3660 			}
3661 		}
3662 		free_cpumask_var(conn_allowed_cpumask);
3663 	}
3664 	/*
3665 	 * This should never be reached..
3666 	 */
3667 	dump_stack();
3668 	cpumask_setall(conn->conn_cpumask);
3669 }
3670 
3671 static void iscsit_thread_reschedule(struct iscsit_conn *conn)
3672 {
3673 	/*
3674 	 * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
3675 	 * connection's RX/TX threads update conn->allowed_cpumask.
3676 	 */
3677 	if (!cpumask_equal(iscsit_global->allowed_cpumask,
3678 			   conn->allowed_cpumask)) {
3679 		iscsit_thread_get_cpumask(conn);
3680 		conn->conn_tx_reset_cpumask = 1;
3681 		conn->conn_rx_reset_cpumask = 1;
3682 		cpumask_copy(conn->allowed_cpumask,
3683 			     iscsit_global->allowed_cpumask);
3684 	}
3685 }
3686 
3687 void iscsit_thread_check_cpumask(
3688 	struct iscsit_conn *conn,
3689 	struct task_struct *p,
3690 	int mode)
3691 {
3692 	/*
3693 	 * The TX and RX threads maybe call iscsit_thread_check_cpumask()
3694 	 * at the same time. The RX thread might be faster and return from
3695 	 * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
3696 	 * Then the TX thread sets it back to 1.
3697 	 * The next time the RX thread loops, it sees conn_rx_reset_cpumask
3698 	 * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
3699 	 */
3700 	iscsit_thread_reschedule(conn);
3701 
3702 	/*
3703 	 * mode == 1 signals iscsi_target_tx_thread() usage.
3704 	 * mode == 0 signals iscsi_target_rx_thread() usage.
3705 	 */
3706 	if (mode == 1) {
3707 		if (!conn->conn_tx_reset_cpumask)
3708 			return;
3709 	} else {
3710 		if (!conn->conn_rx_reset_cpumask)
3711 			return;
3712 	}
3713 
3714 	/*
3715 	 * Update the CPU mask for this single kthread so that
3716 	 * both TX and RX kthreads are scheduled to run on the
3717 	 * same CPU.
3718 	 */
3719 	set_cpus_allowed_ptr(p, conn->conn_cpumask);
3720 	if (mode == 1)
3721 		conn->conn_tx_reset_cpumask = 0;
3722 	else
3723 		conn->conn_rx_reset_cpumask = 0;
3724 }
3725 EXPORT_SYMBOL(iscsit_thread_check_cpumask);
3726 
3727 int
3728 iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3729 {
3730 	int ret;
3731 
3732 	switch (state) {
3733 	case ISTATE_SEND_R2T:
3734 		ret = iscsit_send_r2t(cmd, conn);
3735 		if (ret < 0)
3736 			goto err;
3737 		break;
3738 	case ISTATE_REMOVE:
3739 		spin_lock_bh(&conn->cmd_lock);
3740 		list_del_init(&cmd->i_conn_node);
3741 		spin_unlock_bh(&conn->cmd_lock);
3742 
3743 		iscsit_free_cmd(cmd, false);
3744 		break;
3745 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3746 		iscsit_mod_nopin_response_timer(conn);
3747 		ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3748 		if (ret < 0)
3749 			goto err;
3750 		break;
3751 	case ISTATE_SEND_NOPIN_NO_RESPONSE:
3752 		ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3753 		if (ret < 0)
3754 			goto err;
3755 		break;
3756 	default:
3757 		pr_err("Unknown Opcode: 0x%02x ITT:"
3758 		       " 0x%08x, i_state: %d on CID: %hu\n",
3759 		       cmd->iscsi_opcode, cmd->init_task_tag, state,
3760 		       conn->cid);
3761 		goto err;
3762 	}
3763 
3764 	return 0;
3765 
3766 err:
3767 	return -1;
3768 }
3769 EXPORT_SYMBOL(iscsit_immediate_queue);
3770 
3771 static int
3772 iscsit_handle_immediate_queue(struct iscsit_conn *conn)
3773 {
3774 	struct iscsit_transport *t = conn->conn_transport;
3775 	struct iscsi_queue_req *qr;
3776 	struct iscsit_cmd *cmd;
3777 	u8 state;
3778 	int ret;
3779 
3780 	while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3781 		atomic_set(&conn->check_immediate_queue, 0);
3782 		cmd = qr->cmd;
3783 		state = qr->state;
3784 		kmem_cache_free(lio_qr_cache, qr);
3785 
3786 		ret = t->iscsit_immediate_queue(conn, cmd, state);
3787 		if (ret < 0)
3788 			return ret;
3789 	}
3790 
3791 	return 0;
3792 }
3793 
3794 int
3795 iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3796 {
3797 	int ret;
3798 
3799 check_rsp_state:
3800 	switch (state) {
3801 	case ISTATE_SEND_DATAIN:
3802 		ret = iscsit_send_datain(cmd, conn);
3803 		if (ret < 0)
3804 			goto err;
3805 		else if (!ret)
3806 			/* more drs */
3807 			goto check_rsp_state;
3808 		else if (ret == 1) {
3809 			/* all done */
3810 			spin_lock_bh(&cmd->istate_lock);
3811 			cmd->i_state = ISTATE_SENT_STATUS;
3812 			spin_unlock_bh(&cmd->istate_lock);
3813 
3814 			if (atomic_read(&conn->check_immediate_queue))
3815 				return 1;
3816 
3817 			return 0;
3818 		} else if (ret == 2) {
3819 			/* Still must send status,
3820 			   SCF_TRANSPORT_TASK_SENSE was set */
3821 			spin_lock_bh(&cmd->istate_lock);
3822 			cmd->i_state = ISTATE_SEND_STATUS;
3823 			spin_unlock_bh(&cmd->istate_lock);
3824 			state = ISTATE_SEND_STATUS;
3825 			goto check_rsp_state;
3826 		}
3827 
3828 		break;
3829 	case ISTATE_SEND_STATUS:
3830 	case ISTATE_SEND_STATUS_RECOVERY:
3831 		ret = iscsit_send_response(cmd, conn);
3832 		break;
3833 	case ISTATE_SEND_LOGOUTRSP:
3834 		ret = iscsit_send_logout(cmd, conn);
3835 		break;
3836 	case ISTATE_SEND_ASYNCMSG:
3837 		ret = iscsit_send_conn_drop_async_message(
3838 			cmd, conn);
3839 		break;
3840 	case ISTATE_SEND_NOPIN:
3841 		ret = iscsit_send_nopin(cmd, conn);
3842 		break;
3843 	case ISTATE_SEND_REJECT:
3844 		ret = iscsit_send_reject(cmd, conn);
3845 		break;
3846 	case ISTATE_SEND_TASKMGTRSP:
3847 		ret = iscsit_send_task_mgt_rsp(cmd, conn);
3848 		if (ret != 0)
3849 			break;
3850 		ret = iscsit_tmr_post_handler(cmd, conn);
3851 		if (ret != 0)
3852 			iscsit_fall_back_to_erl0(conn->sess);
3853 		break;
3854 	case ISTATE_SEND_TEXTRSP:
3855 		ret = iscsit_send_text_rsp(cmd, conn);
3856 		break;
3857 	default:
3858 		pr_err("Unknown Opcode: 0x%02x ITT:"
3859 		       " 0x%08x, i_state: %d on CID: %hu\n",
3860 		       cmd->iscsi_opcode, cmd->init_task_tag,
3861 		       state, conn->cid);
3862 		goto err;
3863 	}
3864 	if (ret < 0)
3865 		goto err;
3866 
3867 	switch (state) {
3868 	case ISTATE_SEND_LOGOUTRSP:
3869 		if (!iscsit_logout_post_handler(cmd, conn))
3870 			return -ECONNRESET;
3871 		fallthrough;
3872 	case ISTATE_SEND_STATUS:
3873 	case ISTATE_SEND_ASYNCMSG:
3874 	case ISTATE_SEND_NOPIN:
3875 	case ISTATE_SEND_STATUS_RECOVERY:
3876 	case ISTATE_SEND_TEXTRSP:
3877 	case ISTATE_SEND_TASKMGTRSP:
3878 	case ISTATE_SEND_REJECT:
3879 		spin_lock_bh(&cmd->istate_lock);
3880 		cmd->i_state = ISTATE_SENT_STATUS;
3881 		spin_unlock_bh(&cmd->istate_lock);
3882 		break;
3883 	default:
3884 		pr_err("Unknown Opcode: 0x%02x ITT:"
3885 		       " 0x%08x, i_state: %d on CID: %hu\n",
3886 		       cmd->iscsi_opcode, cmd->init_task_tag,
3887 		       cmd->i_state, conn->cid);
3888 		goto err;
3889 	}
3890 
3891 	if (atomic_read(&conn->check_immediate_queue))
3892 		return 1;
3893 
3894 	return 0;
3895 
3896 err:
3897 	return -1;
3898 }
3899 EXPORT_SYMBOL(iscsit_response_queue);
3900 
3901 static int iscsit_handle_response_queue(struct iscsit_conn *conn)
3902 {
3903 	struct iscsit_transport *t = conn->conn_transport;
3904 	struct iscsi_queue_req *qr;
3905 	struct iscsit_cmd *cmd;
3906 	u8 state;
3907 	int ret;
3908 
3909 	while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3910 		cmd = qr->cmd;
3911 		state = qr->state;
3912 		kmem_cache_free(lio_qr_cache, qr);
3913 
3914 		ret = t->iscsit_response_queue(conn, cmd, state);
3915 		if (ret == 1 || ret < 0)
3916 			return ret;
3917 	}
3918 
3919 	return 0;
3920 }
3921 
3922 int iscsi_target_tx_thread(void *arg)
3923 {
3924 	int ret = 0;
3925 	struct iscsit_conn *conn = arg;
3926 	bool conn_freed = false;
3927 
3928 	/*
3929 	 * Allow ourselves to be interrupted by SIGINT so that a
3930 	 * connection recovery / failure event can be triggered externally.
3931 	 */
3932 	allow_signal(SIGINT);
3933 
3934 	while (!kthread_should_stop()) {
3935 		/*
3936 		 * Ensure that both TX and RX per connection kthreads
3937 		 * are scheduled to run on the same CPU.
3938 		 */
3939 		iscsit_thread_check_cpumask(conn, current, 1);
3940 
3941 		wait_event_interruptible(conn->queues_wq,
3942 					 !iscsit_conn_all_queues_empty(conn));
3943 
3944 		if (signal_pending(current))
3945 			goto transport_err;
3946 
3947 get_immediate:
3948 		ret = iscsit_handle_immediate_queue(conn);
3949 		if (ret < 0)
3950 			goto transport_err;
3951 
3952 		ret = iscsit_handle_response_queue(conn);
3953 		if (ret == 1) {
3954 			goto get_immediate;
3955 		} else if (ret == -ECONNRESET) {
3956 			conn_freed = true;
3957 			goto out;
3958 		} else if (ret < 0) {
3959 			goto transport_err;
3960 		}
3961 	}
3962 
3963 transport_err:
3964 	/*
3965 	 * Avoid the normal connection failure code-path if this connection
3966 	 * is still within LOGIN mode, and iscsi_np process context is
3967 	 * responsible for cleaning up the early connection failure.
3968 	 */
3969 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3970 		iscsit_take_action_for_connection_exit(conn, &conn_freed);
3971 out:
3972 	if (!conn_freed) {
3973 		while (!kthread_should_stop()) {
3974 			msleep(100);
3975 		}
3976 	}
3977 	return 0;
3978 }
3979 
3980 static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
3981 {
3982 	struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3983 	struct iscsit_cmd *cmd;
3984 	int ret = 0;
3985 
3986 	switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3987 	case ISCSI_OP_SCSI_CMD:
3988 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3989 		if (!cmd)
3990 			goto reject;
3991 
3992 		ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3993 		break;
3994 	case ISCSI_OP_SCSI_DATA_OUT:
3995 		ret = iscsit_handle_data_out(conn, buf);
3996 		break;
3997 	case ISCSI_OP_NOOP_OUT:
3998 		cmd = NULL;
3999 		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
4000 			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4001 			if (!cmd)
4002 				goto reject;
4003 		}
4004 		ret = iscsit_handle_nop_out(conn, cmd, buf);
4005 		break;
4006 	case ISCSI_OP_SCSI_TMFUNC:
4007 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4008 		if (!cmd)
4009 			goto reject;
4010 
4011 		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4012 		break;
4013 	case ISCSI_OP_TEXT:
4014 		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4015 			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4016 			if (!cmd)
4017 				goto reject;
4018 		} else {
4019 			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4020 			if (!cmd)
4021 				goto reject;
4022 		}
4023 
4024 		ret = iscsit_handle_text_cmd(conn, cmd, buf);
4025 		break;
4026 	case ISCSI_OP_LOGOUT:
4027 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4028 		if (!cmd)
4029 			goto reject;
4030 
4031 		ret = iscsit_handle_logout_cmd(conn, cmd, buf);
4032 		if (ret > 0)
4033 			wait_for_completion_timeout(&conn->conn_logout_comp,
4034 					SECONDS_FOR_LOGOUT_COMP * HZ);
4035 		break;
4036 	case ISCSI_OP_SNACK:
4037 		ret = iscsit_handle_snack(conn, buf);
4038 		break;
4039 	default:
4040 		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
4041 		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
4042 			pr_err("Cannot recover from unknown"
4043 			" opcode while ERL=0, closing iSCSI connection.\n");
4044 			return -1;
4045 		}
4046 		pr_err("Unable to recover from unknown opcode while OFMarker=No,"
4047 		       " closing iSCSI connection.\n");
4048 		ret = -1;
4049 		break;
4050 	}
4051 
4052 	return ret;
4053 reject:
4054 	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4055 }
4056 
4057 static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
4058 {
4059 	bool ret;
4060 
4061 	spin_lock_bh(&conn->state_lock);
4062 	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4063 	spin_unlock_bh(&conn->state_lock);
4064 
4065 	return ret;
4066 }
4067 
4068 static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
4069 {
4070 	int ret;
4071 	u8 *buffer, *tmp_buf, opcode;
4072 	u32 checksum = 0, digest = 0;
4073 	struct iscsi_hdr *hdr;
4074 	struct kvec iov;
4075 
4076 	buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
4077 	if (!buffer)
4078 		return;
4079 
4080 	while (!kthread_should_stop()) {
4081 		/*
4082 		 * Ensure that both TX and RX per connection kthreads
4083 		 * are scheduled to run on the same CPU.
4084 		 */
4085 		iscsit_thread_check_cpumask(conn, current, 0);
4086 
4087 		memset(&iov, 0, sizeof(struct kvec));
4088 
4089 		iov.iov_base	= buffer;
4090 		iov.iov_len	= ISCSI_HDR_LEN;
4091 
4092 		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4093 		if (ret != ISCSI_HDR_LEN) {
4094 			iscsit_rx_thread_wait_for_tcp(conn);
4095 			break;
4096 		}
4097 
4098 		hdr = (struct iscsi_hdr *) buffer;
4099 		if (hdr->hlength) {
4100 			iov.iov_len = hdr->hlength * 4;
4101 			tmp_buf = krealloc(buffer,
4102 					  ISCSI_HDR_LEN + iov.iov_len,
4103 					  GFP_KERNEL);
4104 			if (!tmp_buf)
4105 				break;
4106 
4107 			buffer = tmp_buf;
4108 			iov.iov_base = &buffer[ISCSI_HDR_LEN];
4109 
4110 			ret = rx_data(conn, &iov, 1, iov.iov_len);
4111 			if (ret != iov.iov_len) {
4112 				iscsit_rx_thread_wait_for_tcp(conn);
4113 				break;
4114 			}
4115 		}
4116 
4117 		if (conn->conn_ops->HeaderDigest) {
4118 			iov.iov_base	= &digest;
4119 			iov.iov_len	= ISCSI_CRC_LEN;
4120 
4121 			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4122 			if (ret != ISCSI_CRC_LEN) {
4123 				iscsit_rx_thread_wait_for_tcp(conn);
4124 				break;
4125 			}
4126 
4127 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
4128 						  ISCSI_HDR_LEN, 0, NULL,
4129 						  &checksum);
4130 
4131 			if (digest != checksum) {
4132 				pr_err("HeaderDigest CRC32C failed,"
4133 					" received 0x%08x, computed 0x%08x\n",
4134 					digest, checksum);
4135 				/*
4136 				 * Set the PDU to 0xff so it will intentionally
4137 				 * hit default in the switch below.
4138 				 */
4139 				memset(buffer, 0xff, ISCSI_HDR_LEN);
4140 				atomic_long_inc(&conn->sess->conn_digest_errors);
4141 			} else {
4142 				pr_debug("Got HeaderDigest CRC32C"
4143 						" 0x%08x\n", checksum);
4144 			}
4145 		}
4146 
4147 		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4148 			break;
4149 
4150 		opcode = buffer[0] & ISCSI_OPCODE_MASK;
4151 
4152 		if (conn->sess->sess_ops->SessionType &&
4153 		   ((!(opcode & ISCSI_OP_TEXT)) ||
4154 		    (!(opcode & ISCSI_OP_LOGOUT)))) {
4155 			pr_err("Received illegal iSCSI Opcode: 0x%02x"
4156 			" while in Discovery Session, rejecting.\n", opcode);
4157 			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4158 					  buffer);
4159 			break;
4160 		}
4161 
4162 		ret = iscsi_target_rx_opcode(conn, buffer);
4163 		if (ret < 0)
4164 			break;
4165 	}
4166 
4167 	kfree(buffer);
4168 }
4169 
4170 int iscsi_target_rx_thread(void *arg)
4171 {
4172 	int rc;
4173 	struct iscsit_conn *conn = arg;
4174 	bool conn_freed = false;
4175 
4176 	/*
4177 	 * Allow ourselves to be interrupted by SIGINT so that a
4178 	 * connection recovery / failure event can be triggered externally.
4179 	 */
4180 	allow_signal(SIGINT);
4181 	/*
4182 	 * Wait for iscsi_post_login_handler() to complete before allowing
4183 	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4184 	 */
4185 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4186 	if (rc < 0 || iscsi_target_check_conn_state(conn))
4187 		goto out;
4188 
4189 	if (!conn->conn_transport->iscsit_get_rx_pdu)
4190 		return 0;
4191 
4192 	conn->conn_transport->iscsit_get_rx_pdu(conn);
4193 
4194 	if (!signal_pending(current))
4195 		atomic_set(&conn->transport_failed, 1);
4196 	iscsit_take_action_for_connection_exit(conn, &conn_freed);
4197 
4198 out:
4199 	if (!conn_freed) {
4200 		while (!kthread_should_stop()) {
4201 			msleep(100);
4202 		}
4203 	}
4204 
4205 	return 0;
4206 }
4207 
4208 static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
4209 {
4210 	LIST_HEAD(tmp_list);
4211 	struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
4212 	struct iscsit_session *sess = conn->sess;
4213 	/*
4214 	 * We expect this function to only ever be called from either RX or TX
4215 	 * thread context via iscsit_close_connection() once the other context
4216 	 * has been reset -> returned sleeping pre-handler state.
4217 	 */
4218 	spin_lock_bh(&conn->cmd_lock);
4219 	list_splice_init(&conn->conn_cmd_list, &tmp_list);
4220 
4221 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4222 		struct se_cmd *se_cmd = &cmd->se_cmd;
4223 
4224 		if (!se_cmd->se_tfo)
4225 			continue;
4226 
4227 		spin_lock_irq(&se_cmd->t_state_lock);
4228 		if (se_cmd->transport_state & CMD_T_ABORTED) {
4229 			if (!(se_cmd->transport_state & CMD_T_TAS))
4230 				/*
4231 				 * LIO's abort path owns the cleanup for this,
4232 				 * so put it back on the list and let
4233 				 * aborted_task handle it.
4234 				 */
4235 				list_move_tail(&cmd->i_conn_node,
4236 					       &conn->conn_cmd_list);
4237 		} else {
4238 			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4239 		}
4240 
4241 		if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
4242 			/*
4243 			 * We never submitted the cmd to LIO core, so we have
4244 			 * to tell LIO to perform the completion process.
4245 			 */
4246 			spin_unlock_irq(&se_cmd->t_state_lock);
4247 			target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
4248 			continue;
4249 		}
4250 		spin_unlock_irq(&se_cmd->t_state_lock);
4251 	}
4252 	spin_unlock_bh(&conn->cmd_lock);
4253 
4254 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4255 		list_del_init(&cmd->i_conn_node);
4256 
4257 		iscsit_increment_maxcmdsn(cmd, sess);
4258 		iscsit_free_cmd(cmd, true);
4259 
4260 	}
4261 
4262 	/*
4263 	 * Wait on commands that were cleaned up via the aborted_task path.
4264 	 * LLDs that implement iscsit_wait_conn will already have waited for
4265 	 * commands.
4266 	 */
4267 	if (!conn->conn_transport->iscsit_wait_conn) {
4268 		target_stop_cmd_counter(conn->cmd_cnt);
4269 		target_wait_for_cmds(conn->cmd_cnt);
4270 	}
4271 }
4272 
4273 static void iscsit_stop_timers_for_cmds(
4274 	struct iscsit_conn *conn)
4275 {
4276 	struct iscsit_cmd *cmd;
4277 
4278 	spin_lock_bh(&conn->cmd_lock);
4279 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4280 		if (cmd->data_direction == DMA_TO_DEVICE)
4281 			iscsit_stop_dataout_timer(cmd);
4282 	}
4283 	spin_unlock_bh(&conn->cmd_lock);
4284 }
4285 
4286 int iscsit_close_connection(
4287 	struct iscsit_conn *conn)
4288 {
4289 	int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4290 	struct iscsit_session	*sess = conn->sess;
4291 
4292 	pr_debug("Closing iSCSI connection CID %hu on SID:"
4293 		" %u\n", conn->cid, sess->sid);
4294 	/*
4295 	 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4296 	 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4297 	 * sleeping and the logout response never got sent because the
4298 	 * connection failed.
4299 	 *
4300 	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4301 	 * to signal logout response TX interrupt completion.  Go ahead and skip
4302 	 * this for iser since isert_rx_opcode() does not wait on logout failure,
4303 	 * and to avoid iscsit_conn pointer dereference in iser-target code.
4304 	 */
4305 	if (!conn->conn_transport->rdma_shutdown)
4306 		complete(&conn->conn_logout_comp);
4307 
4308 	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
4309 		if (conn->tx_thread &&
4310 		    cmpxchg(&conn->tx_thread_active, true, false)) {
4311 			send_sig(SIGINT, conn->tx_thread, 1);
4312 			kthread_stop(conn->tx_thread);
4313 		}
4314 	} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
4315 		if (conn->rx_thread &&
4316 		    cmpxchg(&conn->rx_thread_active, true, false)) {
4317 			send_sig(SIGINT, conn->rx_thread, 1);
4318 			kthread_stop(conn->rx_thread);
4319 		}
4320 	}
4321 
4322 	spin_lock(&iscsit_global->ts_bitmap_lock);
4323 	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4324 			      get_order(1));
4325 	spin_unlock(&iscsit_global->ts_bitmap_lock);
4326 
4327 	iscsit_stop_timers_for_cmds(conn);
4328 	iscsit_stop_nopin_response_timer(conn);
4329 	iscsit_stop_nopin_timer(conn);
4330 
4331 	if (conn->conn_transport->iscsit_wait_conn)
4332 		conn->conn_transport->iscsit_wait_conn(conn);
4333 
4334 	/*
4335 	 * During Connection recovery drop unacknowledged out of order
4336 	 * commands for this connection, and prepare the other commands
4337 	 * for reallegiance.
4338 	 *
4339 	 * During normal operation clear the out of order commands (but
4340 	 * do not free the struct iscsi_ooo_cmdsn's) and release all
4341 	 * struct iscsit_cmds.
4342 	 */
4343 	if (atomic_read(&conn->connection_recovery)) {
4344 		iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4345 		iscsit_prepare_cmds_for_reallegiance(conn);
4346 	} else {
4347 		iscsit_clear_ooo_cmdsns_for_conn(conn);
4348 		iscsit_release_commands_from_conn(conn);
4349 	}
4350 	iscsit_free_queue_reqs_for_conn(conn);
4351 
4352 	/*
4353 	 * Handle decrementing session or connection usage count if
4354 	 * a logout response was not able to be sent because the
4355 	 * connection failed.  Fall back to Session Recovery here.
4356 	 */
4357 	if (atomic_read(&conn->conn_logout_remove)) {
4358 		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4359 			iscsit_dec_conn_usage_count(conn);
4360 			iscsit_dec_session_usage_count(sess);
4361 		}
4362 		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4363 			iscsit_dec_conn_usage_count(conn);
4364 
4365 		atomic_set(&conn->conn_logout_remove, 0);
4366 		atomic_set(&sess->session_reinstatement, 0);
4367 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4368 	}
4369 
4370 	spin_lock_bh(&sess->conn_lock);
4371 	list_del(&conn->conn_list);
4372 
4373 	/*
4374 	 * Attempt to let the Initiator know this connection failed by
4375 	 * sending an Connection Dropped Async Message on another
4376 	 * active connection.
4377 	 */
4378 	if (atomic_read(&conn->connection_recovery))
4379 		iscsit_build_conn_drop_async_message(conn);
4380 
4381 	spin_unlock_bh(&sess->conn_lock);
4382 
4383 	/*
4384 	 * If connection reinstatement is being performed on this connection,
4385 	 * up the connection reinstatement semaphore that is being blocked on
4386 	 * in iscsit_cause_connection_reinstatement().
4387 	 */
4388 	spin_lock_bh(&conn->state_lock);
4389 	if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4390 		spin_unlock_bh(&conn->state_lock);
4391 		complete(&conn->conn_wait_comp);
4392 		wait_for_completion(&conn->conn_post_wait_comp);
4393 		spin_lock_bh(&conn->state_lock);
4394 	}
4395 
4396 	/*
4397 	 * If connection reinstatement is being performed on this connection
4398 	 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4399 	 * connection wait rcfr semaphore that is being blocked on
4400 	 * an iscsit_connection_reinstatement_rcfr().
4401 	 */
4402 	if (atomic_read(&conn->connection_wait_rcfr)) {
4403 		spin_unlock_bh(&conn->state_lock);
4404 		complete(&conn->conn_wait_rcfr_comp);
4405 		wait_for_completion(&conn->conn_post_wait_comp);
4406 		spin_lock_bh(&conn->state_lock);
4407 	}
4408 	atomic_set(&conn->connection_reinstatement, 1);
4409 	spin_unlock_bh(&conn->state_lock);
4410 
4411 	/*
4412 	 * If any other processes are accessing this connection pointer we
4413 	 * must wait until they have completed.
4414 	 */
4415 	iscsit_check_conn_usage_count(conn);
4416 
4417 	ahash_request_free(conn->conn_tx_hash);
4418 	if (conn->conn_rx_hash) {
4419 		struct crypto_ahash *tfm;
4420 
4421 		tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
4422 		ahash_request_free(conn->conn_rx_hash);
4423 		crypto_free_ahash(tfm);
4424 	}
4425 
4426 	if (conn->sock)
4427 		sock_release(conn->sock);
4428 
4429 	if (conn->conn_transport->iscsit_free_conn)
4430 		conn->conn_transport->iscsit_free_conn(conn);
4431 
4432 	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4433 	conn->conn_state = TARG_CONN_STATE_FREE;
4434 	iscsit_free_conn(conn);
4435 
4436 	spin_lock_bh(&sess->conn_lock);
4437 	atomic_dec(&sess->nconn);
4438 	pr_debug("Decremented iSCSI connection count to %d from node:"
4439 		" %s\n", atomic_read(&sess->nconn),
4440 		sess->sess_ops->InitiatorName);
4441 	/*
4442 	 * Make sure that if one connection fails in an non ERL=2 iSCSI
4443 	 * Session that they all fail.
4444 	 */
4445 	if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4446 	     !atomic_read(&sess->session_logout))
4447 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4448 
4449 	/*
4450 	 * If this was not the last connection in the session, and we are
4451 	 * performing session reinstatement or falling back to ERL=0, call
4452 	 * iscsit_stop_session() without sleeping to shutdown the other
4453 	 * active connections.
4454 	 */
4455 	if (atomic_read(&sess->nconn)) {
4456 		if (!atomic_read(&sess->session_reinstatement) &&
4457 		    !atomic_read(&sess->session_fall_back_to_erl0)) {
4458 			spin_unlock_bh(&sess->conn_lock);
4459 			return 0;
4460 		}
4461 		if (!atomic_read(&sess->session_stop_active)) {
4462 			atomic_set(&sess->session_stop_active, 1);
4463 			spin_unlock_bh(&sess->conn_lock);
4464 			iscsit_stop_session(sess, 0, 0);
4465 			return 0;
4466 		}
4467 		spin_unlock_bh(&sess->conn_lock);
4468 		return 0;
4469 	}
4470 
4471 	/*
4472 	 * If this was the last connection in the session and one of the
4473 	 * following is occurring:
4474 	 *
4475 	 * Session Reinstatement is not being performed, and are falling back
4476 	 * to ERL=0 call iscsit_close_session().
4477 	 *
4478 	 * Session Logout was requested.  iscsit_close_session() will be called
4479 	 * elsewhere.
4480 	 *
4481 	 * Session Continuation is not being performed, start the Time2Retain
4482 	 * handler and check if sleep_on_sess_wait_sem is active.
4483 	 */
4484 	if (!atomic_read(&sess->session_reinstatement) &&
4485 	     atomic_read(&sess->session_fall_back_to_erl0)) {
4486 		spin_unlock_bh(&sess->conn_lock);
4487 		complete_all(&sess->session_wait_comp);
4488 		iscsit_close_session(sess, true);
4489 
4490 		return 0;
4491 	} else if (atomic_read(&sess->session_logout)) {
4492 		pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4493 		sess->session_state = TARG_SESS_STATE_FREE;
4494 
4495 		if (atomic_read(&sess->session_close)) {
4496 			spin_unlock_bh(&sess->conn_lock);
4497 			complete_all(&sess->session_wait_comp);
4498 			iscsit_close_session(sess, true);
4499 		} else {
4500 			spin_unlock_bh(&sess->conn_lock);
4501 		}
4502 
4503 		return 0;
4504 	} else {
4505 		pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4506 		sess->session_state = TARG_SESS_STATE_FAILED;
4507 
4508 		if (!atomic_read(&sess->session_continuation))
4509 			iscsit_start_time2retain_handler(sess);
4510 
4511 		if (atomic_read(&sess->session_close)) {
4512 			spin_unlock_bh(&sess->conn_lock);
4513 			complete_all(&sess->session_wait_comp);
4514 			iscsit_close_session(sess, true);
4515 		} else {
4516 			spin_unlock_bh(&sess->conn_lock);
4517 		}
4518 
4519 		return 0;
4520 	}
4521 }
4522 
4523 /*
4524  * If the iSCSI Session for the iSCSI Initiator Node exists,
4525  * forcefully shutdown the iSCSI NEXUS.
4526  */
4527 int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
4528 {
4529 	struct iscsi_portal_group *tpg = sess->tpg;
4530 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4531 
4532 	if (atomic_read(&sess->nconn)) {
4533 		pr_err("%d connection(s) still exist for iSCSI session"
4534 			" to %s\n", atomic_read(&sess->nconn),
4535 			sess->sess_ops->InitiatorName);
4536 		BUG();
4537 	}
4538 
4539 	spin_lock_bh(&se_tpg->session_lock);
4540 	atomic_set(&sess->session_logout, 1);
4541 	atomic_set(&sess->session_reinstatement, 1);
4542 	iscsit_stop_time2retain_timer(sess);
4543 	spin_unlock_bh(&se_tpg->session_lock);
4544 
4545 	if (sess->sess_ops->ErrorRecoveryLevel == 2)
4546 		iscsit_free_connection_recovery_entries(sess);
4547 
4548 	/*
4549 	 * transport_deregister_session_configfs() will clear the
4550 	 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4551 	 * can be setting it again with __transport_register_session() in
4552 	 * iscsi_post_login_handler() again after the iscsit_stop_session()
4553 	 * completes in iscsi_np context.
4554 	 */
4555 	transport_deregister_session_configfs(sess->se_sess);
4556 
4557 	/*
4558 	 * If any other processes are accessing this session pointer we must
4559 	 * wait until they have completed.  If we are in an interrupt (the
4560 	 * time2retain handler) and contain and active session usage count we
4561 	 * restart the timer and exit.
4562 	 */
4563 	if (iscsit_check_session_usage_count(sess, can_sleep)) {
4564 		atomic_set(&sess->session_logout, 0);
4565 		iscsit_start_time2retain_handler(sess);
4566 		return 0;
4567 	}
4568 
4569 	transport_deregister_session(sess->se_sess);
4570 
4571 	iscsit_free_all_ooo_cmdsns(sess);
4572 
4573 	spin_lock_bh(&se_tpg->session_lock);
4574 	pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4575 	sess->session_state = TARG_SESS_STATE_FREE;
4576 	pr_debug("Released iSCSI session from node: %s\n",
4577 			sess->sess_ops->InitiatorName);
4578 	tpg->nsessions--;
4579 	if (tpg->tpg_tiqn)
4580 		tpg->tpg_tiqn->tiqn_nsessions--;
4581 
4582 	pr_debug("Decremented number of active iSCSI Sessions on"
4583 		" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4584 
4585 	ida_free(&sess_ida, sess->session_index);
4586 	kfree(sess->sess_ops);
4587 	sess->sess_ops = NULL;
4588 	spin_unlock_bh(&se_tpg->session_lock);
4589 
4590 	kfree(sess);
4591 	return 0;
4592 }
4593 
4594 static void iscsit_logout_post_handler_closesession(
4595 	struct iscsit_conn *conn)
4596 {
4597 	struct iscsit_session *sess = conn->sess;
4598 	int sleep = 1;
4599 	/*
4600 	 * Traditional iscsi/tcp will invoke this logic from TX thread
4601 	 * context during session logout, so clear tx_thread_active and
4602 	 * sleep if iscsit_close_connection() has not already occured.
4603 	 *
4604 	 * Since iser-target invokes this logic from it's own workqueue,
4605 	 * always sleep waiting for RX/TX thread shutdown to complete
4606 	 * within iscsit_close_connection().
4607 	 */
4608 	if (!conn->conn_transport->rdma_shutdown) {
4609 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
4610 		if (!sleep)
4611 			return;
4612 	}
4613 
4614 	atomic_set(&conn->conn_logout_remove, 0);
4615 	complete(&conn->conn_logout_comp);
4616 
4617 	iscsit_dec_conn_usage_count(conn);
4618 	atomic_set(&sess->session_close, 1);
4619 	iscsit_stop_session(sess, sleep, sleep);
4620 	iscsit_dec_session_usage_count(sess);
4621 }
4622 
4623 static void iscsit_logout_post_handler_samecid(
4624 	struct iscsit_conn *conn)
4625 {
4626 	int sleep = 1;
4627 
4628 	if (!conn->conn_transport->rdma_shutdown) {
4629 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
4630 		if (!sleep)
4631 			return;
4632 	}
4633 
4634 	atomic_set(&conn->conn_logout_remove, 0);
4635 	complete(&conn->conn_logout_comp);
4636 
4637 	iscsit_cause_connection_reinstatement(conn, sleep);
4638 	iscsit_dec_conn_usage_count(conn);
4639 }
4640 
4641 static void iscsit_logout_post_handler_diffcid(
4642 	struct iscsit_conn *conn,
4643 	u16 cid)
4644 {
4645 	struct iscsit_conn *l_conn;
4646 	struct iscsit_session *sess = conn->sess;
4647 	bool conn_found = false;
4648 
4649 	if (!sess)
4650 		return;
4651 
4652 	spin_lock_bh(&sess->conn_lock);
4653 	list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4654 		if (l_conn->cid == cid) {
4655 			iscsit_inc_conn_usage_count(l_conn);
4656 			conn_found = true;
4657 			break;
4658 		}
4659 	}
4660 	spin_unlock_bh(&sess->conn_lock);
4661 
4662 	if (!conn_found)
4663 		return;
4664 
4665 	if (l_conn->sock)
4666 		l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4667 
4668 	spin_lock_bh(&l_conn->state_lock);
4669 	pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4670 	l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4671 	spin_unlock_bh(&l_conn->state_lock);
4672 
4673 	iscsit_cause_connection_reinstatement(l_conn, 1);
4674 	iscsit_dec_conn_usage_count(l_conn);
4675 }
4676 
4677 /*
4678  *	Return of 0 causes the TX thread to restart.
4679  */
4680 int iscsit_logout_post_handler(
4681 	struct iscsit_cmd *cmd,
4682 	struct iscsit_conn *conn)
4683 {
4684 	int ret = 0;
4685 
4686 	switch (cmd->logout_reason) {
4687 	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4688 		switch (cmd->logout_response) {
4689 		case ISCSI_LOGOUT_SUCCESS:
4690 		case ISCSI_LOGOUT_CLEANUP_FAILED:
4691 		default:
4692 			iscsit_logout_post_handler_closesession(conn);
4693 			break;
4694 		}
4695 		break;
4696 	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4697 		if (conn->cid == cmd->logout_cid) {
4698 			switch (cmd->logout_response) {
4699 			case ISCSI_LOGOUT_SUCCESS:
4700 			case ISCSI_LOGOUT_CLEANUP_FAILED:
4701 			default:
4702 				iscsit_logout_post_handler_samecid(conn);
4703 				break;
4704 			}
4705 		} else {
4706 			switch (cmd->logout_response) {
4707 			case ISCSI_LOGOUT_SUCCESS:
4708 				iscsit_logout_post_handler_diffcid(conn,
4709 					cmd->logout_cid);
4710 				break;
4711 			case ISCSI_LOGOUT_CID_NOT_FOUND:
4712 			case ISCSI_LOGOUT_CLEANUP_FAILED:
4713 			default:
4714 				break;
4715 			}
4716 			ret = 1;
4717 		}
4718 		break;
4719 	case ISCSI_LOGOUT_REASON_RECOVERY:
4720 		switch (cmd->logout_response) {
4721 		case ISCSI_LOGOUT_SUCCESS:
4722 		case ISCSI_LOGOUT_CID_NOT_FOUND:
4723 		case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4724 		case ISCSI_LOGOUT_CLEANUP_FAILED:
4725 		default:
4726 			break;
4727 		}
4728 		ret = 1;
4729 		break;
4730 	default:
4731 		break;
4732 
4733 	}
4734 	return ret;
4735 }
4736 EXPORT_SYMBOL(iscsit_logout_post_handler);
4737 
4738 void iscsit_fail_session(struct iscsit_session *sess)
4739 {
4740 	struct iscsit_conn *conn;
4741 
4742 	spin_lock_bh(&sess->conn_lock);
4743 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4744 		pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4745 		conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4746 	}
4747 	spin_unlock_bh(&sess->conn_lock);
4748 
4749 	pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4750 	sess->session_state = TARG_SESS_STATE_FAILED;
4751 }
4752 
4753 void iscsit_stop_session(
4754 	struct iscsit_session *sess,
4755 	int session_sleep,
4756 	int connection_sleep)
4757 {
4758 	u16 conn_count = atomic_read(&sess->nconn);
4759 	struct iscsit_conn *conn, *conn_tmp = NULL;
4760 	int is_last;
4761 
4762 	spin_lock_bh(&sess->conn_lock);
4763 
4764 	if (connection_sleep) {
4765 		list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4766 				conn_list) {
4767 			if (conn_count == 0)
4768 				break;
4769 
4770 			if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4771 				is_last = 1;
4772 			} else {
4773 				iscsit_inc_conn_usage_count(conn_tmp);
4774 				is_last = 0;
4775 			}
4776 			iscsit_inc_conn_usage_count(conn);
4777 
4778 			spin_unlock_bh(&sess->conn_lock);
4779 			iscsit_cause_connection_reinstatement(conn, 1);
4780 			spin_lock_bh(&sess->conn_lock);
4781 
4782 			iscsit_dec_conn_usage_count(conn);
4783 			if (is_last == 0)
4784 				iscsit_dec_conn_usage_count(conn_tmp);
4785 			conn_count--;
4786 		}
4787 	} else {
4788 		list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4789 			iscsit_cause_connection_reinstatement(conn, 0);
4790 	}
4791 
4792 	if (session_sleep && atomic_read(&sess->nconn)) {
4793 		spin_unlock_bh(&sess->conn_lock);
4794 		wait_for_completion(&sess->session_wait_comp);
4795 	} else
4796 		spin_unlock_bh(&sess->conn_lock);
4797 }
4798 
4799 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4800 {
4801 	struct iscsit_session *sess;
4802 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4803 	struct se_session *se_sess, *se_sess_tmp;
4804 	LIST_HEAD(free_list);
4805 	int session_count = 0;
4806 
4807 	spin_lock_bh(&se_tpg->session_lock);
4808 	if (tpg->nsessions && !force) {
4809 		spin_unlock_bh(&se_tpg->session_lock);
4810 		return -1;
4811 	}
4812 
4813 	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4814 			sess_list) {
4815 		sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4816 
4817 		spin_lock(&sess->conn_lock);
4818 		if (atomic_read(&sess->session_fall_back_to_erl0) ||
4819 		    atomic_read(&sess->session_logout) ||
4820 		    atomic_read(&sess->session_close) ||
4821 		    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4822 			spin_unlock(&sess->conn_lock);
4823 			continue;
4824 		}
4825 		iscsit_inc_session_usage_count(sess);
4826 		atomic_set(&sess->session_reinstatement, 1);
4827 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4828 		atomic_set(&sess->session_close, 1);
4829 		spin_unlock(&sess->conn_lock);
4830 
4831 		list_move_tail(&se_sess->sess_list, &free_list);
4832 	}
4833 	spin_unlock_bh(&se_tpg->session_lock);
4834 
4835 	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4836 		sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4837 
4838 		list_del_init(&se_sess->sess_list);
4839 		iscsit_stop_session(sess, 1, 1);
4840 		iscsit_dec_session_usage_count(sess);
4841 		session_count++;
4842 	}
4843 
4844 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
4845 			" Group: %hu\n", session_count, tpg->tpgt);
4846 	return 0;
4847 }
4848 
4849 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4850 MODULE_VERSION("4.1.x");
4851 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4852 MODULE_LICENSE("GPL");
4853 
4854 module_init(iscsi_target_init_module);
4855 module_exit(iscsi_target_cleanup_module);
4856