xref: /openbmc/linux/drivers/scsi/iscsi_tcp.c (revision 06ba8020)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * iSCSI Initiator over TCP/IP Data-Path
4  *
5  * Copyright (C) 2004 Dmitry Yusupov
6  * Copyright (C) 2004 Alex Aizman
7  * Copyright (C) 2005 - 2006 Mike Christie
8  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
9  * maintained by open-iscsi@googlegroups.com
10  *
11  * See the file COPYING included with this distribution for more details.
12  *
13  * Credits:
14  *	Christoph Hellwig
15  *	FUJITA Tomonori
16  *	Arne Redlich
17  *	Zhenyu Wang
18  */
19 
20 #include <crypto/hash.h>
21 #include <linux/types.h>
22 #include <linux/inet.h>
23 #include <linux/slab.h>
24 #include <linux/sched/mm.h>
25 #include <linux/file.h>
26 #include <linux/blkdev.h>
27 #include <linux/delay.h>
28 #include <linux/kfifo.h>
29 #include <linux/scatterlist.h>
30 #include <linux/module.h>
31 #include <linux/backing-dev.h>
32 #include <net/tcp.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_transport_iscsi.h>
38 #include <trace/events/iscsi.h>
39 #include <trace/events/sock.h>
40 
41 #include "iscsi_tcp.h"
42 
43 MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
44 	      "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
45 	      "Alex Aizman <itn780@yahoo.com>");
46 MODULE_DESCRIPTION("iSCSI/TCP data-path");
47 MODULE_LICENSE("GPL");
48 
49 static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
50 static const struct scsi_host_template iscsi_sw_tcp_sht;
51 static struct iscsi_transport iscsi_sw_tcp_transport;
52 
53 static unsigned int iscsi_max_lun = ~0;
54 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
55 
56 static bool iscsi_recv_from_iscsi_q;
57 module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
58 MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
59 
60 static int iscsi_sw_tcp_dbg;
61 module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
62 		   S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
64 		 "Set to 1 to turn on, and zero to turn off. Default is off.");
65 
66 #define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...)		\
67 	do {							\
68 		if (iscsi_sw_tcp_dbg)				\
69 			iscsi_conn_printk(KERN_INFO, _conn,	\
70 					     "%s " dbg_fmt,	\
71 					     __func__, ##arg);	\
72 		iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp,		\
73 				&(_conn)->cls_conn->dev,	\
74 				"%s " dbg_fmt, __func__, ##arg);\
75 	} while (0);
76 
77 
78 /**
79  * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
80  * @rd_desc: read descriptor
81  * @skb: socket buffer
82  * @offset: offset in skb
83  * @len: skb->len - offset
84  */
85 static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
86 			     unsigned int offset, size_t len)
87 {
88 	struct iscsi_conn *conn = rd_desc->arg.data;
89 	unsigned int consumed, total_consumed = 0;
90 	int status;
91 
92 	ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
93 
94 	do {
95 		status = 0;
96 		consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
97 		offset += consumed;
98 		total_consumed += consumed;
99 	} while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
100 
101 	ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n",
102 			 skb->len - offset, status);
103 	return total_consumed;
104 }
105 
106 /**
107  * iscsi_sw_sk_state_check - check socket state
108  * @sk: socket
109  *
110  * If the socket is in CLOSE or CLOSE_WAIT we should
111  * not close the connection if there is still some
112  * data pending.
113  *
114  * Must be called with sk_callback_lock.
115  */
116 static inline int iscsi_sw_sk_state_check(struct sock *sk)
117 {
118 	struct iscsi_conn *conn = sk->sk_user_data;
119 
120 	if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
121 	    (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
122 	    !atomic_read(&sk->sk_rmem_alloc)) {
123 		ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
124 		iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
125 		return -ECONNRESET;
126 	}
127 	return 0;
128 }
129 
130 static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
131 {
132 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
133 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
134 	struct sock *sk = tcp_sw_conn->sock->sk;
135 	read_descriptor_t rd_desc;
136 
137 	/*
138 	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
139 	 * We set count to 1 because we want the network layer to
140 	 * hand us all the skbs that are available. iscsi_tcp_recv
141 	 * handled pdus that cross buffers or pdus that still need data.
142 	 */
143 	rd_desc.arg.data = conn;
144 	rd_desc.count = 1;
145 
146 	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
147 
148 	/* If we had to (atomically) map a highmem page,
149 	 * unmap it now. */
150 	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
151 
152 	iscsi_sw_sk_state_check(sk);
153 }
154 
155 static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
156 {
157 	struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
158 					       recvwork);
159 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
160 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
161 	struct sock *sk = tcp_sw_conn->sock->sk;
162 
163 	lock_sock(sk);
164 	iscsi_sw_tcp_recv_data(conn);
165 	release_sock(sk);
166 }
167 
168 static void iscsi_sw_tcp_data_ready(struct sock *sk)
169 {
170 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
171 	struct iscsi_tcp_conn *tcp_conn;
172 	struct iscsi_conn *conn;
173 
174 	trace_sk_data_ready(sk);
175 
176 	read_lock_bh(&sk->sk_callback_lock);
177 	conn = sk->sk_user_data;
178 	if (!conn) {
179 		read_unlock_bh(&sk->sk_callback_lock);
180 		return;
181 	}
182 	tcp_conn = conn->dd_data;
183 	tcp_sw_conn = tcp_conn->dd_data;
184 
185 	if (tcp_sw_conn->queue_recv)
186 		iscsi_conn_queue_recv(conn);
187 	else
188 		iscsi_sw_tcp_recv_data(conn);
189 	read_unlock_bh(&sk->sk_callback_lock);
190 }
191 
192 static void iscsi_sw_tcp_state_change(struct sock *sk)
193 {
194 	struct iscsi_tcp_conn *tcp_conn;
195 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
196 	struct iscsi_conn *conn;
197 	void (*old_state_change)(struct sock *);
198 
199 	read_lock_bh(&sk->sk_callback_lock);
200 	conn = sk->sk_user_data;
201 	if (!conn) {
202 		read_unlock_bh(&sk->sk_callback_lock);
203 		return;
204 	}
205 
206 	iscsi_sw_sk_state_check(sk);
207 
208 	tcp_conn = conn->dd_data;
209 	tcp_sw_conn = tcp_conn->dd_data;
210 	old_state_change = tcp_sw_conn->old_state_change;
211 
212 	read_unlock_bh(&sk->sk_callback_lock);
213 
214 	old_state_change(sk);
215 }
216 
217 /**
218  * iscsi_sw_tcp_write_space - Called when more output buffer space is available
219  * @sk: socket space is available for
220  **/
221 static void iscsi_sw_tcp_write_space(struct sock *sk)
222 {
223 	struct iscsi_conn *conn;
224 	struct iscsi_tcp_conn *tcp_conn;
225 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
226 	void (*old_write_space)(struct sock *);
227 
228 	read_lock_bh(&sk->sk_callback_lock);
229 	conn = sk->sk_user_data;
230 	if (!conn) {
231 		read_unlock_bh(&sk->sk_callback_lock);
232 		return;
233 	}
234 
235 	tcp_conn = conn->dd_data;
236 	tcp_sw_conn = tcp_conn->dd_data;
237 	old_write_space = tcp_sw_conn->old_write_space;
238 	read_unlock_bh(&sk->sk_callback_lock);
239 
240 	old_write_space(sk);
241 
242 	ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
243 	iscsi_conn_queue_xmit(conn);
244 }
245 
246 static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
247 {
248 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
249 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
250 	struct sock *sk = tcp_sw_conn->sock->sk;
251 
252 	/* assign new callbacks */
253 	write_lock_bh(&sk->sk_callback_lock);
254 	sk->sk_user_data = conn;
255 	tcp_sw_conn->old_data_ready = sk->sk_data_ready;
256 	tcp_sw_conn->old_state_change = sk->sk_state_change;
257 	tcp_sw_conn->old_write_space = sk->sk_write_space;
258 	sk->sk_data_ready = iscsi_sw_tcp_data_ready;
259 	sk->sk_state_change = iscsi_sw_tcp_state_change;
260 	sk->sk_write_space = iscsi_sw_tcp_write_space;
261 	write_unlock_bh(&sk->sk_callback_lock);
262 }
263 
264 static void
265 iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
266 {
267 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
268 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
269 	struct sock *sk = tcp_sw_conn->sock->sk;
270 
271 	/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
272 	write_lock_bh(&sk->sk_callback_lock);
273 	sk->sk_user_data    = NULL;
274 	sk->sk_data_ready   = tcp_sw_conn->old_data_ready;
275 	sk->sk_state_change = tcp_sw_conn->old_state_change;
276 	sk->sk_write_space  = tcp_sw_conn->old_write_space;
277 	sk->sk_no_check_tx = 0;
278 	write_unlock_bh(&sk->sk_callback_lock);
279 }
280 
281 /**
282  * iscsi_sw_tcp_xmit_segment - transmit segment
283  * @tcp_conn: the iSCSI TCP connection
284  * @segment: the buffer to transmnit
285  *
286  * This function transmits as much of the buffer as
287  * the network layer will accept, and returns the number of
288  * bytes transmitted.
289  *
290  * If CRC hashing is enabled, the function will compute the
291  * hash as it goes. When the entire segment has been transmitted,
292  * it will retrieve the hash value and send it as well.
293  */
294 static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
295 				     struct iscsi_segment *segment)
296 {
297 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
298 	struct socket *sk = tcp_sw_conn->sock;
299 	unsigned int copied = 0;
300 	int r = 0;
301 
302 	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
303 		struct scatterlist *sg;
304 		unsigned int offset, copy;
305 		int flags = 0;
306 
307 		r = 0;
308 		offset = segment->copied;
309 		copy = segment->size - offset;
310 
311 		if (segment->total_copied + segment->size < segment->total_size)
312 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
313 
314 		if (tcp_sw_conn->queue_recv)
315 			flags |= MSG_DONTWAIT;
316 
317 		/* Use sendpage if we can; else fall back to sendmsg */
318 		if (!segment->data) {
319 			sg = segment->sg;
320 			offset += segment->sg_offset + sg->offset;
321 			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
322 						  copy, flags);
323 		} else {
324 			struct msghdr msg = { .msg_flags = flags };
325 			struct kvec iov = {
326 				.iov_base = segment->data + offset,
327 				.iov_len = copy
328 			};
329 
330 			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
331 		}
332 
333 		if (r < 0) {
334 			iscsi_tcp_segment_unmap(segment);
335 			return r;
336 		}
337 		copied += r;
338 	}
339 	return copied;
340 }
341 
342 /**
343  * iscsi_sw_tcp_xmit - TCP transmit
344  * @conn: iscsi connection
345  **/
346 static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
347 {
348 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
349 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
350 	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
351 	unsigned int consumed = 0;
352 	int rc = 0;
353 
354 	while (1) {
355 		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
356 		/*
357 		 * We may not have been able to send data because the conn
358 		 * is getting stopped. libiscsi will know so propagate err
359 		 * for it to do the right thing.
360 		 */
361 		if (rc == -EAGAIN)
362 			return rc;
363 		else if (rc < 0) {
364 			rc = ISCSI_ERR_XMIT_FAILED;
365 			goto error;
366 		} else if (rc == 0)
367 			break;
368 
369 		consumed += rc;
370 
371 		if (segment->total_copied >= segment->total_size) {
372 			if (segment->done != NULL) {
373 				rc = segment->done(tcp_conn, segment);
374 				if (rc != 0)
375 					goto error;
376 			}
377 		}
378 	}
379 
380 	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);
381 
382 	conn->txdata_octets += consumed;
383 	return consumed;
384 
385 error:
386 	/* Transmit error. We could initiate error recovery
387 	 * here. */
388 	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
389 	iscsi_conn_failure(conn, rc);
390 	return -EIO;
391 }
392 
393 /**
394  * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
395  * @conn: iscsi connection
396  */
397 static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
398 {
399 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
400 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
401 	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
402 
403 	return segment->total_copied - segment->total_size;
404 }
405 
406 static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
407 {
408 	struct iscsi_conn *conn = task->conn;
409 	unsigned int noreclaim_flag;
410 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
411 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
412 	int rc = 0;
413 
414 	if (!tcp_sw_conn->sock) {
415 		iscsi_conn_printk(KERN_ERR, conn,
416 				  "Transport not bound to socket!\n");
417 		return -EINVAL;
418 	}
419 
420 	noreclaim_flag = memalloc_noreclaim_save();
421 
422 	while (iscsi_sw_tcp_xmit_qlen(conn)) {
423 		rc = iscsi_sw_tcp_xmit(conn);
424 		if (rc == 0) {
425 			rc = -EAGAIN;
426 			break;
427 		}
428 		if (rc < 0)
429 			break;
430 		rc = 0;
431 	}
432 
433 	memalloc_noreclaim_restore(noreclaim_flag);
434 	return rc;
435 }
436 
437 /*
438  * This is called when we're done sending the header.
439  * Simply copy the data_segment to the send segment, and return.
440  */
441 static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
442 				      struct iscsi_segment *segment)
443 {
444 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
445 
446 	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
447 	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
448 			 "Header done. Next segment size %u total_size %u\n",
449 			 tcp_sw_conn->out.segment.size,
450 			 tcp_sw_conn->out.segment.total_size);
451 	return 0;
452 }
453 
454 static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
455 				       size_t hdrlen)
456 {
457 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
458 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
459 
460 	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
461 			 "digest enabled" : "digest disabled");
462 
463 	/* Clear the data segment - needs to be filled in by the
464 	 * caller using iscsi_tcp_send_data_prep() */
465 	memset(&tcp_sw_conn->out.data_segment, 0,
466 	       sizeof(struct iscsi_segment));
467 
468 	/* If header digest is enabled, compute the CRC and
469 	 * place the digest into the same buffer. We make
470 	 * sure that both iscsi_tcp_task and mtask have
471 	 * sufficient room.
472 	 */
473 	if (conn->hdrdgst_en) {
474 		iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
475 				      hdr + hdrlen);
476 		hdrlen += ISCSI_DIGEST_SIZE;
477 	}
478 
479 	/* Remember header pointer for later, when we need
480 	 * to decide whether there's a payload to go along
481 	 * with the header. */
482 	tcp_sw_conn->out.hdr = hdr;
483 
484 	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
485 				  iscsi_sw_tcp_send_hdr_done, NULL);
486 }
487 
488 /*
489  * Prepare the send buffer for the payload data.
490  * Padding and checksumming will all be taken care
491  * of by the iscsi_segment routines.
492  */
493 static int
494 iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
495 			    unsigned int count, unsigned int offset,
496 			    unsigned int len)
497 {
498 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
499 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
500 	struct ahash_request *tx_hash = NULL;
501 	unsigned int hdr_spec_len;
502 
503 	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
504 			 conn->datadgst_en ?
505 			 "digest enabled" : "digest disabled");
506 
507 	/* Make sure the datalen matches what the caller
508 	   said he would send. */
509 	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
510 	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
511 
512 	if (conn->datadgst_en)
513 		tx_hash = tcp_sw_conn->tx_hash;
514 
515 	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
516 				     sg, count, offset, len,
517 				     NULL, tx_hash);
518 }
519 
520 static void
521 iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
522 				   size_t len)
523 {
524 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
525 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
526 	struct ahash_request *tx_hash = NULL;
527 	unsigned int hdr_spec_len;
528 
529 	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
530 			 "digest enabled" : "digest disabled");
531 
532 	/* Make sure the datalen matches what the caller
533 	   said he would send. */
534 	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
535 	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
536 
537 	if (conn->datadgst_en)
538 		tx_hash = tcp_sw_conn->tx_hash;
539 
540 	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
541 				data, len, NULL, tx_hash);
542 }
543 
544 static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
545 				 unsigned int offset, unsigned int count)
546 {
547 	struct iscsi_conn *conn = task->conn;
548 	int err = 0;
549 
550 	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
551 
552 	if (!count)
553 		return 0;
554 
555 	if (!task->sc)
556 		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
557 	else {
558 		struct scsi_data_buffer *sdb = &task->sc->sdb;
559 
560 		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
561 						  sdb->table.nents, offset,
562 						  count);
563 	}
564 
565 	if (err) {
566 		/* got invalid offset/len */
567 		return -EIO;
568 	}
569 	return 0;
570 }
571 
572 static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
573 {
574 	struct iscsi_tcp_task *tcp_task = task->dd_data;
575 
576 	task->hdr = task->dd_data + sizeof(*tcp_task);
577 	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
578 	return 0;
579 }
580 
581 static struct iscsi_cls_conn *
582 iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
583 			 uint32_t conn_idx)
584 {
585 	struct iscsi_conn *conn;
586 	struct iscsi_cls_conn *cls_conn;
587 	struct iscsi_tcp_conn *tcp_conn;
588 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
589 	struct crypto_ahash *tfm;
590 
591 	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
592 					conn_idx);
593 	if (!cls_conn)
594 		return NULL;
595 	conn = cls_conn->dd_data;
596 	tcp_conn = conn->dd_data;
597 	tcp_sw_conn = tcp_conn->dd_data;
598 	INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
599 	tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
600 
601 	mutex_init(&tcp_sw_conn->sock_lock);
602 
603 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
604 	if (IS_ERR(tfm))
605 		goto free_conn;
606 
607 	tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
608 	if (!tcp_sw_conn->tx_hash)
609 		goto free_tfm;
610 	ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
611 
612 	tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
613 	if (!tcp_sw_conn->rx_hash)
614 		goto free_tx_hash;
615 	ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
616 
617 	tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
618 
619 	return cls_conn;
620 
621 free_tx_hash:
622 	ahash_request_free(tcp_sw_conn->tx_hash);
623 free_tfm:
624 	crypto_free_ahash(tfm);
625 free_conn:
626 	iscsi_conn_printk(KERN_ERR, conn,
627 			  "Could not create connection due to crc32c "
628 			  "loading error. Make sure the crc32c "
629 			  "module is built as a module or into the "
630 			  "kernel\n");
631 	iscsi_tcp_conn_teardown(cls_conn);
632 	return NULL;
633 }
634 
635 static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
636 {
637 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
638 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
639 	struct socket *sock = tcp_sw_conn->sock;
640 
641 	/*
642 	 * The iscsi transport class will make sure we are not called in
643 	 * parallel with start, stop, bind and destroys. However, this can be
644 	 * called twice if userspace does a stop then a destroy.
645 	 */
646 	if (!sock)
647 		return;
648 
649 	/*
650 	 * Make sure we start socket shutdown now in case userspace is up
651 	 * but delayed in releasing the socket.
652 	 */
653 	kernel_sock_shutdown(sock, SHUT_RDWR);
654 
655 	sock_hold(sock->sk);
656 	iscsi_sw_tcp_conn_restore_callbacks(conn);
657 	sock_put(sock->sk);
658 
659 	iscsi_suspend_rx(conn);
660 
661 	mutex_lock(&tcp_sw_conn->sock_lock);
662 	tcp_sw_conn->sock = NULL;
663 	mutex_unlock(&tcp_sw_conn->sock_lock);
664 	sockfd_put(sock);
665 }
666 
667 static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
668 {
669 	struct iscsi_conn *conn = cls_conn->dd_data;
670 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
671 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
672 
673 	iscsi_sw_tcp_release_conn(conn);
674 
675 	ahash_request_free(tcp_sw_conn->rx_hash);
676 	if (tcp_sw_conn->tx_hash) {
677 		struct crypto_ahash *tfm;
678 
679 		tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
680 		ahash_request_free(tcp_sw_conn->tx_hash);
681 		crypto_free_ahash(tfm);
682 	}
683 
684 	iscsi_tcp_conn_teardown(cls_conn);
685 }
686 
687 static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
688 {
689 	struct iscsi_conn *conn = cls_conn->dd_data;
690 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
691 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
692 	struct socket *sock = tcp_sw_conn->sock;
693 
694 	/* userspace may have goofed up and not bound us */
695 	if (!sock)
696 		return;
697 
698 	sock->sk->sk_err = EIO;
699 	wake_up_interruptible(sk_sleep(sock->sk));
700 
701 	/* stop xmit side */
702 	iscsi_suspend_tx(conn);
703 
704 	/* stop recv side and release socket */
705 	iscsi_sw_tcp_release_conn(conn);
706 
707 	iscsi_conn_stop(cls_conn, flag);
708 }
709 
710 static int
711 iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
712 		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
713 		       int is_leading)
714 {
715 	struct iscsi_conn *conn = cls_conn->dd_data;
716 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
717 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
718 	struct sock *sk;
719 	struct socket *sock;
720 	int err;
721 
722 	/* lookup for existing socket */
723 	sock = sockfd_lookup((int)transport_eph, &err);
724 	if (!sock) {
725 		iscsi_conn_printk(KERN_ERR, conn,
726 				  "sockfd_lookup failed %d\n", err);
727 		return -EEXIST;
728 	}
729 
730 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
731 	if (err)
732 		goto free_socket;
733 
734 	mutex_lock(&tcp_sw_conn->sock_lock);
735 	/* bind iSCSI connection and socket */
736 	tcp_sw_conn->sock = sock;
737 	mutex_unlock(&tcp_sw_conn->sock_lock);
738 
739 	/* setup Socket parameters */
740 	sk = sock->sk;
741 	sk->sk_reuse = SK_CAN_REUSE;
742 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
743 	sk->sk_allocation = GFP_ATOMIC;
744 	sk->sk_use_task_frag = false;
745 	sk_set_memalloc(sk);
746 	sock_no_linger(sk);
747 
748 	iscsi_sw_tcp_conn_set_callbacks(conn);
749 	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
750 	/*
751 	 * set receive state machine into initial state
752 	 */
753 	iscsi_tcp_hdr_recv_prep(tcp_conn);
754 	return 0;
755 
756 free_socket:
757 	sockfd_put(sock);
758 	return err;
759 }
760 
761 static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
762 				       enum iscsi_param param, char *buf,
763 				       int buflen)
764 {
765 	struct iscsi_conn *conn = cls_conn->dd_data;
766 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
767 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
768 
769 	switch(param) {
770 	case ISCSI_PARAM_HDRDGST_EN:
771 		iscsi_set_param(cls_conn, param, buf, buflen);
772 		break;
773 	case ISCSI_PARAM_DATADGST_EN:
774 		mutex_lock(&tcp_sw_conn->sock_lock);
775 		if (!tcp_sw_conn->sock) {
776 			mutex_unlock(&tcp_sw_conn->sock_lock);
777 			return -ENOTCONN;
778 		}
779 		iscsi_set_param(cls_conn, param, buf, buflen);
780 		tcp_sw_conn->sendpage = conn->datadgst_en ?
781 			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
782 		mutex_unlock(&tcp_sw_conn->sock_lock);
783 		break;
784 	case ISCSI_PARAM_MAX_R2T:
785 		return iscsi_tcp_set_max_r2t(conn, buf);
786 	default:
787 		return iscsi_set_param(cls_conn, param, buf, buflen);
788 	}
789 
790 	return 0;
791 }
792 
793 static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
794 				       enum iscsi_param param, char *buf)
795 {
796 	struct iscsi_conn *conn = cls_conn->dd_data;
797 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
798 	struct iscsi_tcp_conn *tcp_conn;
799 	struct sockaddr_in6 addr;
800 	struct socket *sock;
801 	int rc;
802 
803 	switch(param) {
804 	case ISCSI_PARAM_CONN_PORT:
805 	case ISCSI_PARAM_CONN_ADDRESS:
806 	case ISCSI_PARAM_LOCAL_PORT:
807 		spin_lock_bh(&conn->session->frwd_lock);
808 		if (!conn->session->leadconn) {
809 			spin_unlock_bh(&conn->session->frwd_lock);
810 			return -ENOTCONN;
811 		}
812 		/*
813 		 * The conn has been setup and bound, so just grab a ref
814 		 * incase a destroy runs while we are in the net layer.
815 		 */
816 		iscsi_get_conn(conn->cls_conn);
817 		spin_unlock_bh(&conn->session->frwd_lock);
818 
819 		tcp_conn = conn->dd_data;
820 		tcp_sw_conn = tcp_conn->dd_data;
821 
822 		mutex_lock(&tcp_sw_conn->sock_lock);
823 		sock = tcp_sw_conn->sock;
824 		if (!sock) {
825 			rc = -ENOTCONN;
826 			goto sock_unlock;
827 		}
828 
829 		if (param == ISCSI_PARAM_LOCAL_PORT)
830 			rc = kernel_getsockname(sock,
831 						(struct sockaddr *)&addr);
832 		else
833 			rc = kernel_getpeername(sock,
834 						(struct sockaddr *)&addr);
835 sock_unlock:
836 		mutex_unlock(&tcp_sw_conn->sock_lock);
837 		iscsi_put_conn(conn->cls_conn);
838 		if (rc < 0)
839 			return rc;
840 
841 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
842 						 &addr, param, buf);
843 	default:
844 		return iscsi_conn_get_param(cls_conn, param, buf);
845 	}
846 
847 	return 0;
848 }
849 
850 static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
851 				       enum iscsi_host_param param, char *buf)
852 {
853 	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
854 	struct iscsi_session *session;
855 	struct iscsi_conn *conn;
856 	struct iscsi_tcp_conn *tcp_conn;
857 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
858 	struct sockaddr_in6 addr;
859 	struct socket *sock;
860 	int rc;
861 
862 	switch (param) {
863 	case ISCSI_HOST_PARAM_IPADDRESS:
864 		session = tcp_sw_host->session;
865 		if (!session)
866 			return -ENOTCONN;
867 
868 		spin_lock_bh(&session->frwd_lock);
869 		conn = session->leadconn;
870 		if (!conn) {
871 			spin_unlock_bh(&session->frwd_lock);
872 			return -ENOTCONN;
873 		}
874 		tcp_conn = conn->dd_data;
875 		tcp_sw_conn = tcp_conn->dd_data;
876 		/*
877 		 * The conn has been setup and bound, so just grab a ref
878 		 * incase a destroy runs while we are in the net layer.
879 		 */
880 		iscsi_get_conn(conn->cls_conn);
881 		spin_unlock_bh(&session->frwd_lock);
882 
883 		mutex_lock(&tcp_sw_conn->sock_lock);
884 		sock = tcp_sw_conn->sock;
885 		if (!sock)
886 			rc = -ENOTCONN;
887 		else
888 			rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
889 		mutex_unlock(&tcp_sw_conn->sock_lock);
890 		iscsi_put_conn(conn->cls_conn);
891 		if (rc < 0)
892 			return rc;
893 
894 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
895 						 &addr,
896 						 (enum iscsi_param)param, buf);
897 	default:
898 		return iscsi_host_get_param(shost, param, buf);
899 	}
900 
901 	return 0;
902 }
903 
904 static void
905 iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
906 			    struct iscsi_stats *stats)
907 {
908 	struct iscsi_conn *conn = cls_conn->dd_data;
909 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
910 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
911 
912 	stats->custom_length = 3;
913 	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
914 	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
915 	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
916 	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
917 	strcpy(stats->custom[2].desc, "eh_abort_cnt");
918 	stats->custom[2].value = conn->eh_abort_cnt;
919 
920 	iscsi_tcp_conn_get_stats(cls_conn, stats);
921 }
922 
923 static struct iscsi_cls_session *
924 iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
925 			    uint16_t qdepth, uint32_t initial_cmdsn)
926 {
927 	struct iscsi_cls_session *cls_session;
928 	struct iscsi_session *session;
929 	struct iscsi_sw_tcp_host *tcp_sw_host;
930 	struct Scsi_Host *shost;
931 	int rc;
932 
933 	if (ep) {
934 		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
935 		return NULL;
936 	}
937 
938 	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
939 				 sizeof(struct iscsi_sw_tcp_host), 1);
940 	if (!shost)
941 		return NULL;
942 	shost->transportt = iscsi_sw_tcp_scsi_transport;
943 	shost->cmd_per_lun = qdepth;
944 	shost->max_lun = iscsi_max_lun;
945 	shost->max_id = 0;
946 	shost->max_channel = 0;
947 	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
948 
949 	rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
950 	if (rc < 0)
951 		goto free_host;
952 	shost->can_queue = rc;
953 
954 	if (iscsi_host_add(shost, NULL))
955 		goto free_host;
956 
957 	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
958 					  cmds_max, 0,
959 					  sizeof(struct iscsi_tcp_task) +
960 					  sizeof(struct iscsi_sw_tcp_hdrbuf),
961 					  initial_cmdsn, 0);
962 	if (!cls_session)
963 		goto remove_host;
964 	session = cls_session->dd_data;
965 
966 	if (iscsi_tcp_r2tpool_alloc(session))
967 		goto remove_session;
968 
969 	/* We are now fully setup so expose the session to sysfs. */
970 	tcp_sw_host = iscsi_host_priv(shost);
971 	tcp_sw_host->session = session;
972 	return cls_session;
973 
974 remove_session:
975 	iscsi_session_teardown(cls_session);
976 remove_host:
977 	iscsi_host_remove(shost, false);
978 free_host:
979 	iscsi_host_free(shost);
980 	return NULL;
981 }
982 
983 static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
984 {
985 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
986 	struct iscsi_session *session = cls_session->dd_data;
987 
988 	if (WARN_ON_ONCE(session->leadconn))
989 		return;
990 
991 	iscsi_session_remove(cls_session);
992 	/*
993 	 * Our get_host_param needs to access the session, so remove the
994 	 * host from sysfs before freeing the session to make sure userspace
995 	 * is no longer accessing the callout.
996 	 */
997 	iscsi_host_remove(shost, false);
998 
999 	iscsi_tcp_r2tpool_free(cls_session->dd_data);
1000 
1001 	iscsi_session_free(cls_session);
1002 	iscsi_host_free(shost);
1003 }
1004 
1005 static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
1006 {
1007 	switch (param_type) {
1008 	case ISCSI_HOST_PARAM:
1009 		switch (param) {
1010 		case ISCSI_HOST_PARAM_NETDEV_NAME:
1011 		case ISCSI_HOST_PARAM_HWADDRESS:
1012 		case ISCSI_HOST_PARAM_IPADDRESS:
1013 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
1014 			return S_IRUGO;
1015 		default:
1016 			return 0;
1017 		}
1018 	case ISCSI_PARAM:
1019 		switch (param) {
1020 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
1021 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1022 		case ISCSI_PARAM_HDRDGST_EN:
1023 		case ISCSI_PARAM_DATADGST_EN:
1024 		case ISCSI_PARAM_CONN_ADDRESS:
1025 		case ISCSI_PARAM_CONN_PORT:
1026 		case ISCSI_PARAM_LOCAL_PORT:
1027 		case ISCSI_PARAM_EXP_STATSN:
1028 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
1029 		case ISCSI_PARAM_PERSISTENT_PORT:
1030 		case ISCSI_PARAM_PING_TMO:
1031 		case ISCSI_PARAM_RECV_TMO:
1032 		case ISCSI_PARAM_INITIAL_R2T_EN:
1033 		case ISCSI_PARAM_MAX_R2T:
1034 		case ISCSI_PARAM_IMM_DATA_EN:
1035 		case ISCSI_PARAM_FIRST_BURST:
1036 		case ISCSI_PARAM_MAX_BURST:
1037 		case ISCSI_PARAM_PDU_INORDER_EN:
1038 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
1039 		case ISCSI_PARAM_ERL:
1040 		case ISCSI_PARAM_TARGET_NAME:
1041 		case ISCSI_PARAM_TPGT:
1042 		case ISCSI_PARAM_USERNAME:
1043 		case ISCSI_PARAM_PASSWORD:
1044 		case ISCSI_PARAM_USERNAME_IN:
1045 		case ISCSI_PARAM_PASSWORD_IN:
1046 		case ISCSI_PARAM_FAST_ABORT:
1047 		case ISCSI_PARAM_ABORT_TMO:
1048 		case ISCSI_PARAM_LU_RESET_TMO:
1049 		case ISCSI_PARAM_TGT_RESET_TMO:
1050 		case ISCSI_PARAM_IFACE_NAME:
1051 		case ISCSI_PARAM_INITIATOR_NAME:
1052 			return S_IRUGO;
1053 		default:
1054 			return 0;
1055 		}
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
1062 {
1063 	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
1064 	struct iscsi_session *session = tcp_sw_host->session;
1065 	struct iscsi_conn *conn = session->leadconn;
1066 
1067 	if (conn->datadgst_en)
1068 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
1069 				   sdev->request_queue);
1070 	blk_queue_dma_alignment(sdev->request_queue, 0);
1071 	return 0;
1072 }
1073 
1074 static const struct scsi_host_template iscsi_sw_tcp_sht = {
1075 	.module			= THIS_MODULE,
1076 	.name			= "iSCSI Initiator over TCP/IP",
1077 	.queuecommand           = iscsi_queuecommand,
1078 	.change_queue_depth	= scsi_change_queue_depth,
1079 	.can_queue		= ISCSI_TOTAL_CMDS_MAX,
1080 	.sg_tablesize		= 4096,
1081 	.max_sectors		= 0xFFFF,
1082 	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
1083 	.eh_timed_out		= iscsi_eh_cmd_timed_out,
1084 	.eh_abort_handler       = iscsi_eh_abort,
1085 	.eh_device_reset_handler= iscsi_eh_device_reset,
1086 	.eh_target_reset_handler = iscsi_eh_recover_target,
1087 	.dma_boundary		= PAGE_SIZE - 1,
1088 	.slave_configure        = iscsi_sw_tcp_slave_configure,
1089 	.proc_name		= "iscsi_tcp",
1090 	.this_id		= -1,
1091 	.track_queue_depth	= 1,
1092 	.cmd_size		= sizeof(struct iscsi_cmd),
1093 };
1094 
1095 static struct iscsi_transport iscsi_sw_tcp_transport = {
1096 	.owner			= THIS_MODULE,
1097 	.name			= "tcp",
1098 	.caps			= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
1099 				  | CAP_DATADGST,
1100 	/* session management */
1101 	.create_session		= iscsi_sw_tcp_session_create,
1102 	.destroy_session	= iscsi_sw_tcp_session_destroy,
1103 	/* connection management */
1104 	.create_conn		= iscsi_sw_tcp_conn_create,
1105 	.bind_conn		= iscsi_sw_tcp_conn_bind,
1106 	.destroy_conn		= iscsi_sw_tcp_conn_destroy,
1107 	.attr_is_visible	= iscsi_sw_tcp_attr_is_visible,
1108 	.set_param		= iscsi_sw_tcp_conn_set_param,
1109 	.get_conn_param		= iscsi_sw_tcp_conn_get_param,
1110 	.get_session_param	= iscsi_session_get_param,
1111 	.start_conn		= iscsi_conn_start,
1112 	.stop_conn		= iscsi_sw_tcp_conn_stop,
1113 	/* iscsi host params */
1114 	.get_host_param		= iscsi_sw_tcp_host_get_param,
1115 	.set_host_param		= iscsi_host_set_param,
1116 	/* IO */
1117 	.send_pdu		= iscsi_conn_send_pdu,
1118 	.get_stats		= iscsi_sw_tcp_conn_get_stats,
1119 	/* iscsi task/cmd helpers */
1120 	.init_task		= iscsi_tcp_task_init,
1121 	.xmit_task		= iscsi_tcp_task_xmit,
1122 	.cleanup_task		= iscsi_tcp_cleanup_task,
1123 	/* low level pdu helpers */
1124 	.xmit_pdu		= iscsi_sw_tcp_pdu_xmit,
1125 	.init_pdu		= iscsi_sw_tcp_pdu_init,
1126 	.alloc_pdu		= iscsi_sw_tcp_pdu_alloc,
1127 	/* recovery */
1128 	.session_recovery_timedout = iscsi_session_recovery_timedout,
1129 };
1130 
1131 static int __init iscsi_sw_tcp_init(void)
1132 {
1133 	if (iscsi_max_lun < 1) {
1134 		printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
1135 		       iscsi_max_lun);
1136 		return -EINVAL;
1137 	}
1138 
1139 	iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
1140 						&iscsi_sw_tcp_transport);
1141 	if (!iscsi_sw_tcp_scsi_transport)
1142 		return -ENODEV;
1143 
1144 	return 0;
1145 }
1146 
1147 static void __exit iscsi_sw_tcp_exit(void)
1148 {
1149 	iscsi_unregister_transport(&iscsi_sw_tcp_transport);
1150 }
1151 
1152 module_init(iscsi_sw_tcp_init);
1153 module_exit(iscsi_sw_tcp_exit);
1154