xref: /openbmc/linux/drivers/scsi/iscsi_tcp.c (revision b85f82f3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * iSCSI Initiator over TCP/IP Data-Path
4  *
5  * Copyright (C) 2004 Dmitry Yusupov
6  * Copyright (C) 2004 Alex Aizman
7  * Copyright (C) 2005 - 2006 Mike Christie
8  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
9  * maintained by open-iscsi@googlegroups.com
10  *
11  * See the file COPYING included with this distribution for more details.
12  *
13  * Credits:
14  *	Christoph Hellwig
15  *	FUJITA Tomonori
16  *	Arne Redlich
17  *	Zhenyu Wang
18  */
19 
20 #include <crypto/hash.h>
21 #include <linux/types.h>
22 #include <linux/inet.h>
23 #include <linux/slab.h>
24 #include <linux/sched/mm.h>
25 #include <linux/file.h>
26 #include <linux/blkdev.h>
27 #include <linux/delay.h>
28 #include <linux/kfifo.h>
29 #include <linux/scatterlist.h>
30 #include <linux/module.h>
31 #include <linux/backing-dev.h>
32 #include <net/tcp.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_transport_iscsi.h>
38 #include <trace/events/iscsi.h>
39 #include <trace/events/sock.h>
40 
41 #include "iscsi_tcp.h"
42 
43 MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
44 	      "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
45 	      "Alex Aizman <itn780@yahoo.com>");
46 MODULE_DESCRIPTION("iSCSI/TCP data-path");
47 MODULE_LICENSE("GPL");
48 
49 static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
50 static const struct scsi_host_template iscsi_sw_tcp_sht;
51 static struct iscsi_transport iscsi_sw_tcp_transport;
52 
53 static unsigned int iscsi_max_lun = ~0;
54 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
55 
56 static bool iscsi_recv_from_iscsi_q;
57 module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
58 MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
59 
60 static int iscsi_sw_tcp_dbg;
61 module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
62 		   S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
64 		 "Set to 1 to turn on, and zero to turn off. Default is off.");
65 
66 #define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...)		\
67 	do {							\
68 		if (iscsi_sw_tcp_dbg)				\
69 			iscsi_conn_printk(KERN_INFO, _conn,	\
70 					     "%s " dbg_fmt,	\
71 					     __func__, ##arg);	\
72 		iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp,		\
73 				&(_conn)->cls_conn->dev,	\
74 				"%s " dbg_fmt, __func__, ##arg);\
75 	} while (0);
76 
77 
78 /**
79  * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
80  * @rd_desc: read descriptor
81  * @skb: socket buffer
82  * @offset: offset in skb
83  * @len: skb->len - offset
84  */
85 static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
86 			     unsigned int offset, size_t len)
87 {
88 	struct iscsi_conn *conn = rd_desc->arg.data;
89 	unsigned int consumed, total_consumed = 0;
90 	int status;
91 
92 	ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
93 
94 	do {
95 		status = 0;
96 		consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
97 		offset += consumed;
98 		total_consumed += consumed;
99 	} while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
100 
101 	ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n",
102 			 skb->len - offset, status);
103 	return total_consumed;
104 }
105 
106 /**
107  * iscsi_sw_sk_state_check - check socket state
108  * @sk: socket
109  *
110  * If the socket is in CLOSE or CLOSE_WAIT we should
111  * not close the connection if there is still some
112  * data pending.
113  *
114  * Must be called with sk_callback_lock.
115  */
116 static inline int iscsi_sw_sk_state_check(struct sock *sk)
117 {
118 	struct iscsi_conn *conn = sk->sk_user_data;
119 
120 	if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
121 	    (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
122 	    !atomic_read(&sk->sk_rmem_alloc)) {
123 		ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
124 		iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
125 		return -ECONNRESET;
126 	}
127 	return 0;
128 }
129 
130 static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
131 {
132 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
133 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
134 	struct sock *sk = tcp_sw_conn->sock->sk;
135 	read_descriptor_t rd_desc;
136 
137 	/*
138 	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
139 	 * We set count to 1 because we want the network layer to
140 	 * hand us all the skbs that are available. iscsi_tcp_recv
141 	 * handled pdus that cross buffers or pdus that still need data.
142 	 */
143 	rd_desc.arg.data = conn;
144 	rd_desc.count = 1;
145 
146 	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
147 
148 	/* If we had to (atomically) map a highmem page,
149 	 * unmap it now. */
150 	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
151 
152 	iscsi_sw_sk_state_check(sk);
153 }
154 
155 static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
156 {
157 	struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
158 					       recvwork);
159 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
160 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
161 	struct sock *sk = tcp_sw_conn->sock->sk;
162 
163 	lock_sock(sk);
164 	iscsi_sw_tcp_recv_data(conn);
165 	release_sock(sk);
166 }
167 
168 static void iscsi_sw_tcp_data_ready(struct sock *sk)
169 {
170 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
171 	struct iscsi_tcp_conn *tcp_conn;
172 	struct iscsi_conn *conn;
173 
174 	trace_sk_data_ready(sk);
175 
176 	read_lock_bh(&sk->sk_callback_lock);
177 	conn = sk->sk_user_data;
178 	if (!conn) {
179 		read_unlock_bh(&sk->sk_callback_lock);
180 		return;
181 	}
182 	tcp_conn = conn->dd_data;
183 	tcp_sw_conn = tcp_conn->dd_data;
184 
185 	if (tcp_sw_conn->queue_recv)
186 		iscsi_conn_queue_recv(conn);
187 	else
188 		iscsi_sw_tcp_recv_data(conn);
189 	read_unlock_bh(&sk->sk_callback_lock);
190 }
191 
192 static void iscsi_sw_tcp_state_change(struct sock *sk)
193 {
194 	struct iscsi_tcp_conn *tcp_conn;
195 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
196 	struct iscsi_conn *conn;
197 	void (*old_state_change)(struct sock *);
198 
199 	read_lock_bh(&sk->sk_callback_lock);
200 	conn = sk->sk_user_data;
201 	if (!conn) {
202 		read_unlock_bh(&sk->sk_callback_lock);
203 		return;
204 	}
205 
206 	iscsi_sw_sk_state_check(sk);
207 
208 	tcp_conn = conn->dd_data;
209 	tcp_sw_conn = tcp_conn->dd_data;
210 	old_state_change = tcp_sw_conn->old_state_change;
211 
212 	read_unlock_bh(&sk->sk_callback_lock);
213 
214 	old_state_change(sk);
215 }
216 
217 /**
218  * iscsi_sw_tcp_write_space - Called when more output buffer space is available
219  * @sk: socket space is available for
220  **/
221 static void iscsi_sw_tcp_write_space(struct sock *sk)
222 {
223 	struct iscsi_conn *conn;
224 	struct iscsi_tcp_conn *tcp_conn;
225 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
226 	void (*old_write_space)(struct sock *);
227 
228 	read_lock_bh(&sk->sk_callback_lock);
229 	conn = sk->sk_user_data;
230 	if (!conn) {
231 		read_unlock_bh(&sk->sk_callback_lock);
232 		return;
233 	}
234 
235 	tcp_conn = conn->dd_data;
236 	tcp_sw_conn = tcp_conn->dd_data;
237 	old_write_space = tcp_sw_conn->old_write_space;
238 	read_unlock_bh(&sk->sk_callback_lock);
239 
240 	old_write_space(sk);
241 
242 	ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
243 	iscsi_conn_queue_xmit(conn);
244 }
245 
246 static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
247 {
248 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
249 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
250 	struct sock *sk = tcp_sw_conn->sock->sk;
251 
252 	/* assign new callbacks */
253 	write_lock_bh(&sk->sk_callback_lock);
254 	sk->sk_user_data = conn;
255 	tcp_sw_conn->old_data_ready = sk->sk_data_ready;
256 	tcp_sw_conn->old_state_change = sk->sk_state_change;
257 	tcp_sw_conn->old_write_space = sk->sk_write_space;
258 	sk->sk_data_ready = iscsi_sw_tcp_data_ready;
259 	sk->sk_state_change = iscsi_sw_tcp_state_change;
260 	sk->sk_write_space = iscsi_sw_tcp_write_space;
261 	write_unlock_bh(&sk->sk_callback_lock);
262 }
263 
264 static void
265 iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
266 {
267 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
268 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
269 	struct sock *sk = tcp_sw_conn->sock->sk;
270 
271 	/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
272 	write_lock_bh(&sk->sk_callback_lock);
273 	sk->sk_user_data    = NULL;
274 	sk->sk_data_ready   = tcp_sw_conn->old_data_ready;
275 	sk->sk_state_change = tcp_sw_conn->old_state_change;
276 	sk->sk_write_space  = tcp_sw_conn->old_write_space;
277 	sk->sk_no_check_tx = 0;
278 	write_unlock_bh(&sk->sk_callback_lock);
279 }
280 
281 /**
282  * iscsi_sw_tcp_xmit_segment - transmit segment
283  * @tcp_conn: the iSCSI TCP connection
284  * @segment: the buffer to transmnit
285  *
286  * This function transmits as much of the buffer as
287  * the network layer will accept, and returns the number of
288  * bytes transmitted.
289  *
290  * If CRC hashing is enabled, the function will compute the
291  * hash as it goes. When the entire segment has been transmitted,
292  * it will retrieve the hash value and send it as well.
293  */
294 static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
295 				     struct iscsi_segment *segment)
296 {
297 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
298 	struct socket *sk = tcp_sw_conn->sock;
299 	unsigned int copied = 0;
300 	int r = 0;
301 
302 	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
303 		struct scatterlist *sg;
304 		unsigned int offset, copy;
305 		int flags = 0;
306 
307 		r = 0;
308 		offset = segment->copied;
309 		copy = segment->size - offset;
310 
311 		if (segment->total_copied + segment->size < segment->total_size)
312 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
313 
314 		if (tcp_sw_conn->queue_recv)
315 			flags |= MSG_DONTWAIT;
316 
317 		/* Use sendpage if we can; else fall back to sendmsg */
318 		if (!segment->data) {
319 			sg = segment->sg;
320 			offset += segment->sg_offset + sg->offset;
321 			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
322 						  copy, flags);
323 		} else {
324 			struct msghdr msg = { .msg_flags = flags };
325 			struct kvec iov = {
326 				.iov_base = segment->data + offset,
327 				.iov_len = copy
328 			};
329 
330 			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
331 		}
332 
333 		if (r < 0) {
334 			iscsi_tcp_segment_unmap(segment);
335 			return r;
336 		}
337 		copied += r;
338 	}
339 	return copied;
340 }
341 
342 /**
343  * iscsi_sw_tcp_xmit - TCP transmit
344  * @conn: iscsi connection
345  **/
346 static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
347 {
348 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
349 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
350 	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
351 	unsigned int consumed = 0;
352 	int rc = 0;
353 
354 	while (1) {
355 		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
356 		/*
357 		 * We may not have been able to send data because the conn
358 		 * is getting stopped. libiscsi will know so propagate err
359 		 * for it to do the right thing.
360 		 */
361 		if (rc == -EAGAIN)
362 			return rc;
363 		else if (rc < 0) {
364 			rc = ISCSI_ERR_XMIT_FAILED;
365 			goto error;
366 		} else if (rc == 0)
367 			break;
368 
369 		consumed += rc;
370 
371 		if (segment->total_copied >= segment->total_size) {
372 			if (segment->done != NULL) {
373 				rc = segment->done(tcp_conn, segment);
374 				if (rc != 0)
375 					goto error;
376 			}
377 		}
378 	}
379 
380 	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);
381 
382 	conn->txdata_octets += consumed;
383 	return consumed;
384 
385 error:
386 	/* Transmit error. We could initiate error recovery
387 	 * here. */
388 	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
389 	iscsi_conn_failure(conn, rc);
390 	return -EIO;
391 }
392 
393 /**
394  * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
395  * @conn: iscsi connection
396  */
397 static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
398 {
399 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
400 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
401 	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
402 
403 	return segment->total_copied - segment->total_size;
404 }
405 
406 static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
407 {
408 	struct iscsi_conn *conn = task->conn;
409 	unsigned int noreclaim_flag;
410 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
411 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
412 	int rc = 0;
413 
414 	if (!tcp_sw_conn->sock) {
415 		iscsi_conn_printk(KERN_ERR, conn,
416 				  "Transport not bound to socket!\n");
417 		return -EINVAL;
418 	}
419 
420 	noreclaim_flag = memalloc_noreclaim_save();
421 
422 	while (iscsi_sw_tcp_xmit_qlen(conn)) {
423 		rc = iscsi_sw_tcp_xmit(conn);
424 		if (rc == 0) {
425 			rc = -EAGAIN;
426 			break;
427 		}
428 		if (rc < 0)
429 			break;
430 		rc = 0;
431 	}
432 
433 	memalloc_noreclaim_restore(noreclaim_flag);
434 	return rc;
435 }
436 
437 /*
438  * This is called when we're done sending the header.
439  * Simply copy the data_segment to the send segment, and return.
440  */
441 static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
442 				      struct iscsi_segment *segment)
443 {
444 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
445 
446 	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
447 	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
448 			 "Header done. Next segment size %u total_size %u\n",
449 			 tcp_sw_conn->out.segment.size,
450 			 tcp_sw_conn->out.segment.total_size);
451 	return 0;
452 }
453 
454 static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
455 				       size_t hdrlen)
456 {
457 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
458 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
459 
460 	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
461 			 "digest enabled" : "digest disabled");
462 
463 	/* Clear the data segment - needs to be filled in by the
464 	 * caller using iscsi_tcp_send_data_prep() */
465 	memset(&tcp_sw_conn->out.data_segment, 0,
466 	       sizeof(struct iscsi_segment));
467 
468 	/* If header digest is enabled, compute the CRC and
469 	 * place the digest into the same buffer. We make
470 	 * sure that both iscsi_tcp_task and mtask have
471 	 * sufficient room.
472 	 */
473 	if (conn->hdrdgst_en) {
474 		iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
475 				      hdr + hdrlen);
476 		hdrlen += ISCSI_DIGEST_SIZE;
477 	}
478 
479 	/* Remember header pointer for later, when we need
480 	 * to decide whether there's a payload to go along
481 	 * with the header. */
482 	tcp_sw_conn->out.hdr = hdr;
483 
484 	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
485 				  iscsi_sw_tcp_send_hdr_done, NULL);
486 }
487 
488 /*
489  * Prepare the send buffer for the payload data.
490  * Padding and checksumming will all be taken care
491  * of by the iscsi_segment routines.
492  */
493 static int
494 iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
495 			    unsigned int count, unsigned int offset,
496 			    unsigned int len)
497 {
498 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
499 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
500 	struct ahash_request *tx_hash = NULL;
501 	unsigned int hdr_spec_len;
502 
503 	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
504 			 conn->datadgst_en ?
505 			 "digest enabled" : "digest disabled");
506 
507 	/* Make sure the datalen matches what the caller
508 	   said he would send. */
509 	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
510 	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
511 
512 	if (conn->datadgst_en)
513 		tx_hash = tcp_sw_conn->tx_hash;
514 
515 	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
516 				     sg, count, offset, len,
517 				     NULL, tx_hash);
518 }
519 
520 static void
521 iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
522 				   size_t len)
523 {
524 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
525 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
526 	struct ahash_request *tx_hash = NULL;
527 	unsigned int hdr_spec_len;
528 
529 	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
530 			 "digest enabled" : "digest disabled");
531 
532 	/* Make sure the datalen matches what the caller
533 	   said he would send. */
534 	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
535 	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
536 
537 	if (conn->datadgst_en)
538 		tx_hash = tcp_sw_conn->tx_hash;
539 
540 	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
541 				data, len, NULL, tx_hash);
542 }
543 
544 static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
545 				 unsigned int offset, unsigned int count)
546 {
547 	struct iscsi_conn *conn = task->conn;
548 	int err = 0;
549 
550 	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
551 
552 	if (!count)
553 		return 0;
554 
555 	if (!task->sc)
556 		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
557 	else {
558 		struct scsi_data_buffer *sdb = &task->sc->sdb;
559 
560 		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
561 						  sdb->table.nents, offset,
562 						  count);
563 	}
564 
565 	if (err) {
566 		/* got invalid offset/len */
567 		return -EIO;
568 	}
569 	return 0;
570 }
571 
572 static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
573 {
574 	struct iscsi_tcp_task *tcp_task = task->dd_data;
575 
576 	task->hdr = task->dd_data + sizeof(*tcp_task);
577 	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
578 	return 0;
579 }
580 
581 static struct iscsi_cls_conn *
582 iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
583 			 uint32_t conn_idx)
584 {
585 	struct iscsi_conn *conn;
586 	struct iscsi_cls_conn *cls_conn;
587 	struct iscsi_tcp_conn *tcp_conn;
588 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
589 	struct crypto_ahash *tfm;
590 
591 	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
592 					conn_idx);
593 	if (!cls_conn)
594 		return NULL;
595 	conn = cls_conn->dd_data;
596 	tcp_conn = conn->dd_data;
597 	tcp_sw_conn = tcp_conn->dd_data;
598 	INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
599 	tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
600 
601 	mutex_init(&tcp_sw_conn->sock_lock);
602 
603 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
604 	if (IS_ERR(tfm))
605 		goto free_conn;
606 
607 	tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
608 	if (!tcp_sw_conn->tx_hash)
609 		goto free_tfm;
610 	ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
611 
612 	tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
613 	if (!tcp_sw_conn->rx_hash)
614 		goto free_tx_hash;
615 	ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
616 
617 	tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
618 
619 	return cls_conn;
620 
621 free_tx_hash:
622 	ahash_request_free(tcp_sw_conn->tx_hash);
623 free_tfm:
624 	crypto_free_ahash(tfm);
625 free_conn:
626 	iscsi_conn_printk(KERN_ERR, conn,
627 			  "Could not create connection due to crc32c "
628 			  "loading error. Make sure the crc32c "
629 			  "module is built as a module or into the "
630 			  "kernel\n");
631 	iscsi_tcp_conn_teardown(cls_conn);
632 	return NULL;
633 }
634 
635 static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
636 {
637 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
638 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
639 	struct socket *sock = tcp_sw_conn->sock;
640 
641 	/*
642 	 * The iscsi transport class will make sure we are not called in
643 	 * parallel with start, stop, bind and destroys. However, this can be
644 	 * called twice if userspace does a stop then a destroy.
645 	 */
646 	if (!sock)
647 		return;
648 
649 	/*
650 	 * Make sure we start socket shutdown now in case userspace is up
651 	 * but delayed in releasing the socket.
652 	 */
653 	kernel_sock_shutdown(sock, SHUT_RDWR);
654 
655 	sock_hold(sock->sk);
656 	iscsi_sw_tcp_conn_restore_callbacks(conn);
657 	sock_put(sock->sk);
658 
659 	iscsi_suspend_rx(conn);
660 
661 	mutex_lock(&tcp_sw_conn->sock_lock);
662 	tcp_sw_conn->sock = NULL;
663 	mutex_unlock(&tcp_sw_conn->sock_lock);
664 	sockfd_put(sock);
665 }
666 
667 static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
668 {
669 	struct iscsi_conn *conn = cls_conn->dd_data;
670 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
671 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
672 
673 	iscsi_sw_tcp_release_conn(conn);
674 
675 	ahash_request_free(tcp_sw_conn->rx_hash);
676 	if (tcp_sw_conn->tx_hash) {
677 		struct crypto_ahash *tfm;
678 
679 		tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
680 		ahash_request_free(tcp_sw_conn->tx_hash);
681 		crypto_free_ahash(tfm);
682 	}
683 
684 	iscsi_tcp_conn_teardown(cls_conn);
685 }
686 
687 static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
688 {
689 	struct iscsi_conn *conn = cls_conn->dd_data;
690 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
691 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
692 	struct socket *sock = tcp_sw_conn->sock;
693 
694 	/* userspace may have goofed up and not bound us */
695 	if (!sock)
696 		return;
697 
698 	sock->sk->sk_err = EIO;
699 	wake_up_interruptible(sk_sleep(sock->sk));
700 
701 	/* stop xmit side */
702 	iscsi_suspend_tx(conn);
703 
704 	/* stop recv side and release socket */
705 	iscsi_sw_tcp_release_conn(conn);
706 
707 	iscsi_conn_stop(cls_conn, flag);
708 }
709 
710 static int
711 iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
712 		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
713 		       int is_leading)
714 {
715 	struct iscsi_conn *conn = cls_conn->dd_data;
716 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
717 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
718 	struct sock *sk;
719 	struct socket *sock;
720 	int err;
721 
722 	/* lookup for existing socket */
723 	sock = sockfd_lookup((int)transport_eph, &err);
724 	if (!sock) {
725 		iscsi_conn_printk(KERN_ERR, conn,
726 				  "sockfd_lookup failed %d\n", err);
727 		return -EEXIST;
728 	}
729 
730 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
731 	if (err)
732 		goto free_socket;
733 
734 	mutex_lock(&tcp_sw_conn->sock_lock);
735 	/* bind iSCSI connection and socket */
736 	tcp_sw_conn->sock = sock;
737 	mutex_unlock(&tcp_sw_conn->sock_lock);
738 
739 	/* setup Socket parameters */
740 	sk = sock->sk;
741 	sk->sk_reuse = SK_CAN_REUSE;
742 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
743 	sk->sk_allocation = GFP_ATOMIC;
744 	sk->sk_use_task_frag = false;
745 	sk_set_memalloc(sk);
746 	sock_no_linger(sk);
747 
748 	iscsi_sw_tcp_conn_set_callbacks(conn);
749 	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
750 	/*
751 	 * set receive state machine into initial state
752 	 */
753 	iscsi_tcp_hdr_recv_prep(tcp_conn);
754 	return 0;
755 
756 free_socket:
757 	sockfd_put(sock);
758 	return err;
759 }
760 
761 static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
762 				       enum iscsi_param param, char *buf,
763 				       int buflen)
764 {
765 	struct iscsi_conn *conn = cls_conn->dd_data;
766 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
767 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
768 
769 	switch(param) {
770 	case ISCSI_PARAM_HDRDGST_EN:
771 		iscsi_set_param(cls_conn, param, buf, buflen);
772 		break;
773 	case ISCSI_PARAM_DATADGST_EN:
774 		iscsi_set_param(cls_conn, param, buf, buflen);
775 
776 		mutex_lock(&tcp_sw_conn->sock_lock);
777 		if (!tcp_sw_conn->sock) {
778 			mutex_unlock(&tcp_sw_conn->sock_lock);
779 			return -ENOTCONN;
780 		}
781 		tcp_sw_conn->sendpage = conn->datadgst_en ?
782 			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
783 		mutex_unlock(&tcp_sw_conn->sock_lock);
784 		break;
785 	case ISCSI_PARAM_MAX_R2T:
786 		return iscsi_tcp_set_max_r2t(conn, buf);
787 	default:
788 		return iscsi_set_param(cls_conn, param, buf, buflen);
789 	}
790 
791 	return 0;
792 }
793 
794 static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
795 				       enum iscsi_param param, char *buf)
796 {
797 	struct iscsi_conn *conn = cls_conn->dd_data;
798 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
799 	struct iscsi_tcp_conn *tcp_conn;
800 	struct sockaddr_in6 addr;
801 	struct socket *sock;
802 	int rc;
803 
804 	switch(param) {
805 	case ISCSI_PARAM_CONN_PORT:
806 	case ISCSI_PARAM_CONN_ADDRESS:
807 	case ISCSI_PARAM_LOCAL_PORT:
808 		spin_lock_bh(&conn->session->frwd_lock);
809 		if (!conn->session->leadconn) {
810 			spin_unlock_bh(&conn->session->frwd_lock);
811 			return -ENOTCONN;
812 		}
813 		/*
814 		 * The conn has been setup and bound, so just grab a ref
815 		 * incase a destroy runs while we are in the net layer.
816 		 */
817 		iscsi_get_conn(conn->cls_conn);
818 		spin_unlock_bh(&conn->session->frwd_lock);
819 
820 		tcp_conn = conn->dd_data;
821 		tcp_sw_conn = tcp_conn->dd_data;
822 
823 		mutex_lock(&tcp_sw_conn->sock_lock);
824 		sock = tcp_sw_conn->sock;
825 		if (!sock) {
826 			rc = -ENOTCONN;
827 			goto sock_unlock;
828 		}
829 
830 		if (param == ISCSI_PARAM_LOCAL_PORT)
831 			rc = kernel_getsockname(sock,
832 						(struct sockaddr *)&addr);
833 		else
834 			rc = kernel_getpeername(sock,
835 						(struct sockaddr *)&addr);
836 sock_unlock:
837 		mutex_unlock(&tcp_sw_conn->sock_lock);
838 		iscsi_put_conn(conn->cls_conn);
839 		if (rc < 0)
840 			return rc;
841 
842 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
843 						 &addr, param, buf);
844 	default:
845 		return iscsi_conn_get_param(cls_conn, param, buf);
846 	}
847 
848 	return 0;
849 }
850 
851 static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
852 				       enum iscsi_host_param param, char *buf)
853 {
854 	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
855 	struct iscsi_session *session;
856 	struct iscsi_conn *conn;
857 	struct iscsi_tcp_conn *tcp_conn;
858 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
859 	struct sockaddr_in6 addr;
860 	struct socket *sock;
861 	int rc;
862 
863 	switch (param) {
864 	case ISCSI_HOST_PARAM_IPADDRESS:
865 		session = tcp_sw_host->session;
866 		if (!session)
867 			return -ENOTCONN;
868 
869 		spin_lock_bh(&session->frwd_lock);
870 		conn = session->leadconn;
871 		if (!conn) {
872 			spin_unlock_bh(&session->frwd_lock);
873 			return -ENOTCONN;
874 		}
875 		tcp_conn = conn->dd_data;
876 		tcp_sw_conn = tcp_conn->dd_data;
877 		/*
878 		 * The conn has been setup and bound, so just grab a ref
879 		 * incase a destroy runs while we are in the net layer.
880 		 */
881 		iscsi_get_conn(conn->cls_conn);
882 		spin_unlock_bh(&session->frwd_lock);
883 
884 		mutex_lock(&tcp_sw_conn->sock_lock);
885 		sock = tcp_sw_conn->sock;
886 		if (!sock)
887 			rc = -ENOTCONN;
888 		else
889 			rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
890 		mutex_unlock(&tcp_sw_conn->sock_lock);
891 		iscsi_put_conn(conn->cls_conn);
892 		if (rc < 0)
893 			return rc;
894 
895 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
896 						 &addr,
897 						 (enum iscsi_param)param, buf);
898 	default:
899 		return iscsi_host_get_param(shost, param, buf);
900 	}
901 
902 	return 0;
903 }
904 
905 static void
906 iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
907 			    struct iscsi_stats *stats)
908 {
909 	struct iscsi_conn *conn = cls_conn->dd_data;
910 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
911 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
912 
913 	stats->custom_length = 3;
914 	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
915 	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
916 	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
917 	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
918 	strcpy(stats->custom[2].desc, "eh_abort_cnt");
919 	stats->custom[2].value = conn->eh_abort_cnt;
920 
921 	iscsi_tcp_conn_get_stats(cls_conn, stats);
922 }
923 
924 static struct iscsi_cls_session *
925 iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
926 			    uint16_t qdepth, uint32_t initial_cmdsn)
927 {
928 	struct iscsi_cls_session *cls_session;
929 	struct iscsi_session *session;
930 	struct iscsi_sw_tcp_host *tcp_sw_host;
931 	struct Scsi_Host *shost;
932 	int rc;
933 
934 	if (ep) {
935 		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
936 		return NULL;
937 	}
938 
939 	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
940 				 sizeof(struct iscsi_sw_tcp_host), 1);
941 	if (!shost)
942 		return NULL;
943 	shost->transportt = iscsi_sw_tcp_scsi_transport;
944 	shost->cmd_per_lun = qdepth;
945 	shost->max_lun = iscsi_max_lun;
946 	shost->max_id = 0;
947 	shost->max_channel = 0;
948 	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
949 
950 	rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
951 	if (rc < 0)
952 		goto free_host;
953 	shost->can_queue = rc;
954 
955 	if (iscsi_host_add(shost, NULL))
956 		goto free_host;
957 
958 	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
959 					  cmds_max, 0,
960 					  sizeof(struct iscsi_tcp_task) +
961 					  sizeof(struct iscsi_sw_tcp_hdrbuf),
962 					  initial_cmdsn, 0);
963 	if (!cls_session)
964 		goto remove_host;
965 	session = cls_session->dd_data;
966 
967 	if (iscsi_tcp_r2tpool_alloc(session))
968 		goto remove_session;
969 
970 	/* We are now fully setup so expose the session to sysfs. */
971 	tcp_sw_host = iscsi_host_priv(shost);
972 	tcp_sw_host->session = session;
973 	return cls_session;
974 
975 remove_session:
976 	iscsi_session_teardown(cls_session);
977 remove_host:
978 	iscsi_host_remove(shost, false);
979 free_host:
980 	iscsi_host_free(shost);
981 	return NULL;
982 }
983 
984 static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
985 {
986 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
987 	struct iscsi_session *session = cls_session->dd_data;
988 
989 	if (WARN_ON_ONCE(session->leadconn))
990 		return;
991 
992 	iscsi_session_remove(cls_session);
993 	/*
994 	 * Our get_host_param needs to access the session, so remove the
995 	 * host from sysfs before freeing the session to make sure userspace
996 	 * is no longer accessing the callout.
997 	 */
998 	iscsi_host_remove(shost, false);
999 
1000 	iscsi_tcp_r2tpool_free(cls_session->dd_data);
1001 
1002 	iscsi_session_free(cls_session);
1003 	iscsi_host_free(shost);
1004 }
1005 
1006 static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
1007 {
1008 	switch (param_type) {
1009 	case ISCSI_HOST_PARAM:
1010 		switch (param) {
1011 		case ISCSI_HOST_PARAM_NETDEV_NAME:
1012 		case ISCSI_HOST_PARAM_HWADDRESS:
1013 		case ISCSI_HOST_PARAM_IPADDRESS:
1014 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
1015 			return S_IRUGO;
1016 		default:
1017 			return 0;
1018 		}
1019 	case ISCSI_PARAM:
1020 		switch (param) {
1021 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
1022 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1023 		case ISCSI_PARAM_HDRDGST_EN:
1024 		case ISCSI_PARAM_DATADGST_EN:
1025 		case ISCSI_PARAM_CONN_ADDRESS:
1026 		case ISCSI_PARAM_CONN_PORT:
1027 		case ISCSI_PARAM_LOCAL_PORT:
1028 		case ISCSI_PARAM_EXP_STATSN:
1029 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
1030 		case ISCSI_PARAM_PERSISTENT_PORT:
1031 		case ISCSI_PARAM_PING_TMO:
1032 		case ISCSI_PARAM_RECV_TMO:
1033 		case ISCSI_PARAM_INITIAL_R2T_EN:
1034 		case ISCSI_PARAM_MAX_R2T:
1035 		case ISCSI_PARAM_IMM_DATA_EN:
1036 		case ISCSI_PARAM_FIRST_BURST:
1037 		case ISCSI_PARAM_MAX_BURST:
1038 		case ISCSI_PARAM_PDU_INORDER_EN:
1039 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
1040 		case ISCSI_PARAM_ERL:
1041 		case ISCSI_PARAM_TARGET_NAME:
1042 		case ISCSI_PARAM_TPGT:
1043 		case ISCSI_PARAM_USERNAME:
1044 		case ISCSI_PARAM_PASSWORD:
1045 		case ISCSI_PARAM_USERNAME_IN:
1046 		case ISCSI_PARAM_PASSWORD_IN:
1047 		case ISCSI_PARAM_FAST_ABORT:
1048 		case ISCSI_PARAM_ABORT_TMO:
1049 		case ISCSI_PARAM_LU_RESET_TMO:
1050 		case ISCSI_PARAM_TGT_RESET_TMO:
1051 		case ISCSI_PARAM_IFACE_NAME:
1052 		case ISCSI_PARAM_INITIATOR_NAME:
1053 			return S_IRUGO;
1054 		default:
1055 			return 0;
1056 		}
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
1063 {
1064 	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
1065 	struct iscsi_session *session = tcp_sw_host->session;
1066 	struct iscsi_conn *conn = session->leadconn;
1067 
1068 	if (conn->datadgst_en)
1069 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
1070 				   sdev->request_queue);
1071 	blk_queue_dma_alignment(sdev->request_queue, 0);
1072 	return 0;
1073 }
1074 
1075 static const struct scsi_host_template iscsi_sw_tcp_sht = {
1076 	.module			= THIS_MODULE,
1077 	.name			= "iSCSI Initiator over TCP/IP",
1078 	.queuecommand           = iscsi_queuecommand,
1079 	.change_queue_depth	= scsi_change_queue_depth,
1080 	.can_queue		= ISCSI_TOTAL_CMDS_MAX,
1081 	.sg_tablesize		= 4096,
1082 	.max_sectors		= 0xFFFF,
1083 	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
1084 	.eh_timed_out		= iscsi_eh_cmd_timed_out,
1085 	.eh_abort_handler       = iscsi_eh_abort,
1086 	.eh_device_reset_handler= iscsi_eh_device_reset,
1087 	.eh_target_reset_handler = iscsi_eh_recover_target,
1088 	.dma_boundary		= PAGE_SIZE - 1,
1089 	.slave_configure        = iscsi_sw_tcp_slave_configure,
1090 	.proc_name		= "iscsi_tcp",
1091 	.this_id		= -1,
1092 	.track_queue_depth	= 1,
1093 	.cmd_size		= sizeof(struct iscsi_cmd),
1094 };
1095 
1096 static struct iscsi_transport iscsi_sw_tcp_transport = {
1097 	.owner			= THIS_MODULE,
1098 	.name			= "tcp",
1099 	.caps			= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
1100 				  | CAP_DATADGST,
1101 	/* session management */
1102 	.create_session		= iscsi_sw_tcp_session_create,
1103 	.destroy_session	= iscsi_sw_tcp_session_destroy,
1104 	/* connection management */
1105 	.create_conn		= iscsi_sw_tcp_conn_create,
1106 	.bind_conn		= iscsi_sw_tcp_conn_bind,
1107 	.destroy_conn		= iscsi_sw_tcp_conn_destroy,
1108 	.attr_is_visible	= iscsi_sw_tcp_attr_is_visible,
1109 	.set_param		= iscsi_sw_tcp_conn_set_param,
1110 	.get_conn_param		= iscsi_sw_tcp_conn_get_param,
1111 	.get_session_param	= iscsi_session_get_param,
1112 	.start_conn		= iscsi_conn_start,
1113 	.stop_conn		= iscsi_sw_tcp_conn_stop,
1114 	/* iscsi host params */
1115 	.get_host_param		= iscsi_sw_tcp_host_get_param,
1116 	.set_host_param		= iscsi_host_set_param,
1117 	/* IO */
1118 	.send_pdu		= iscsi_conn_send_pdu,
1119 	.get_stats		= iscsi_sw_tcp_conn_get_stats,
1120 	/* iscsi task/cmd helpers */
1121 	.init_task		= iscsi_tcp_task_init,
1122 	.xmit_task		= iscsi_tcp_task_xmit,
1123 	.cleanup_task		= iscsi_tcp_cleanup_task,
1124 	/* low level pdu helpers */
1125 	.xmit_pdu		= iscsi_sw_tcp_pdu_xmit,
1126 	.init_pdu		= iscsi_sw_tcp_pdu_init,
1127 	.alloc_pdu		= iscsi_sw_tcp_pdu_alloc,
1128 	/* recovery */
1129 	.session_recovery_timedout = iscsi_session_recovery_timedout,
1130 };
1131 
1132 static int __init iscsi_sw_tcp_init(void)
1133 {
1134 	if (iscsi_max_lun < 1) {
1135 		printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
1136 		       iscsi_max_lun);
1137 		return -EINVAL;
1138 	}
1139 
1140 	iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
1141 						&iscsi_sw_tcp_transport);
1142 	if (!iscsi_sw_tcp_scsi_transport)
1143 		return -ENODEV;
1144 
1145 	return 0;
1146 }
1147 
1148 static void __exit iscsi_sw_tcp_exit(void)
1149 {
1150 	iscsi_unregister_transport(&iscsi_sw_tcp_transport);
1151 }
1152 
1153 module_init(iscsi_sw_tcp_init);
1154 module_exit(iscsi_sw_tcp_exit);
1155