xref: /openbmc/linux/drivers/nvme/target/tcp.c (revision 8d1af5c6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP target.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17 #include <trace/events/sock.h>
18 
19 #include "nvmet.h"
20 
21 #define NVMET_TCP_DEF_INLINE_DATA_SIZE	(4 * PAGE_SIZE)
22 #define NVMET_TCP_MAXH2CDATA		0x400000 /* 16M arbitrary limit */
23 
24 static int param_store_val(const char *str, int *val, int min, int max)
25 {
26 	int ret, new_val;
27 
28 	ret = kstrtoint(str, 10, &new_val);
29 	if (ret)
30 		return -EINVAL;
31 
32 	if (new_val < min || new_val > max)
33 		return -EINVAL;
34 
35 	*val = new_val;
36 	return 0;
37 }
38 
39 static int set_params(const char *str, const struct kernel_param *kp)
40 {
41 	return param_store_val(str, kp->arg, 0, INT_MAX);
42 }
43 
44 static const struct kernel_param_ops set_param_ops = {
45 	.set	= set_params,
46 	.get	= param_get_int,
47 };
48 
49 /* Define the socket priority to use for connections were it is desirable
50  * that the NIC consider performing optimized packet processing or filtering.
51  * A non-zero value being sufficient to indicate general consideration of any
52  * possible optimization.  Making it a module param allows for alternative
53  * values that may be unique for some NIC implementations.
54  */
55 static int so_priority;
56 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
57 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
58 
59 /* Define a time period (in usecs) that io_work() shall sample an activated
60  * queue before determining it to be idle.  This optional module behavior
61  * can enable NIC solutions that support socket optimized packet processing
62  * using advanced interrupt moderation techniques.
63  */
64 static int idle_poll_period_usecs;
65 device_param_cb(idle_poll_period_usecs, &set_param_ops,
66 		&idle_poll_period_usecs, 0644);
67 MODULE_PARM_DESC(idle_poll_period_usecs,
68 		"nvmet tcp io_work poll till idle time period in usecs: Default 0");
69 
70 #define NVMET_TCP_RECV_BUDGET		8
71 #define NVMET_TCP_SEND_BUDGET		8
72 #define NVMET_TCP_IO_WORK_BUDGET	64
73 
74 enum nvmet_tcp_send_state {
75 	NVMET_TCP_SEND_DATA_PDU,
76 	NVMET_TCP_SEND_DATA,
77 	NVMET_TCP_SEND_R2T,
78 	NVMET_TCP_SEND_DDGST,
79 	NVMET_TCP_SEND_RESPONSE
80 };
81 
82 enum nvmet_tcp_recv_state {
83 	NVMET_TCP_RECV_PDU,
84 	NVMET_TCP_RECV_DATA,
85 	NVMET_TCP_RECV_DDGST,
86 	NVMET_TCP_RECV_ERR,
87 };
88 
89 enum {
90 	NVMET_TCP_F_INIT_FAILED = (1 << 0),
91 };
92 
93 struct nvmet_tcp_cmd {
94 	struct nvmet_tcp_queue		*queue;
95 	struct nvmet_req		req;
96 
97 	struct nvme_tcp_cmd_pdu		*cmd_pdu;
98 	struct nvme_tcp_rsp_pdu		*rsp_pdu;
99 	struct nvme_tcp_data_pdu	*data_pdu;
100 	struct nvme_tcp_r2t_pdu		*r2t_pdu;
101 
102 	u32				rbytes_done;
103 	u32				wbytes_done;
104 
105 	u32				pdu_len;
106 	u32				pdu_recv;
107 	int				sg_idx;
108 	struct msghdr			recv_msg;
109 	struct bio_vec			*iov;
110 	u32				flags;
111 
112 	struct list_head		entry;
113 	struct llist_node		lentry;
114 
115 	/* send state */
116 	u32				offset;
117 	struct scatterlist		*cur_sg;
118 	enum nvmet_tcp_send_state	state;
119 
120 	__le32				exp_ddgst;
121 	__le32				recv_ddgst;
122 };
123 
124 enum nvmet_tcp_queue_state {
125 	NVMET_TCP_Q_CONNECTING,
126 	NVMET_TCP_Q_LIVE,
127 	NVMET_TCP_Q_DISCONNECTING,
128 };
129 
130 struct nvmet_tcp_queue {
131 	struct socket		*sock;
132 	struct nvmet_tcp_port	*port;
133 	struct work_struct	io_work;
134 	struct nvmet_cq		nvme_cq;
135 	struct nvmet_sq		nvme_sq;
136 
137 	/* send state */
138 	struct nvmet_tcp_cmd	*cmds;
139 	unsigned int		nr_cmds;
140 	struct list_head	free_list;
141 	struct llist_head	resp_list;
142 	struct list_head	resp_send_list;
143 	int			send_list_len;
144 	struct nvmet_tcp_cmd	*snd_cmd;
145 
146 	/* recv state */
147 	int			offset;
148 	int			left;
149 	enum nvmet_tcp_recv_state rcv_state;
150 	struct nvmet_tcp_cmd	*cmd;
151 	union nvme_tcp_pdu	pdu;
152 
153 	/* digest state */
154 	bool			hdr_digest;
155 	bool			data_digest;
156 	struct ahash_request	*snd_hash;
157 	struct ahash_request	*rcv_hash;
158 
159 	unsigned long           poll_end;
160 
161 	spinlock_t		state_lock;
162 	enum nvmet_tcp_queue_state state;
163 
164 	struct sockaddr_storage	sockaddr;
165 	struct sockaddr_storage	sockaddr_peer;
166 	struct work_struct	release_work;
167 
168 	int			idx;
169 	struct list_head	queue_list;
170 
171 	struct nvmet_tcp_cmd	connect;
172 
173 	struct page_frag_cache	pf_cache;
174 
175 	void (*data_ready)(struct sock *);
176 	void (*state_change)(struct sock *);
177 	void (*write_space)(struct sock *);
178 };
179 
180 struct nvmet_tcp_port {
181 	struct socket		*sock;
182 	struct work_struct	accept_work;
183 	struct nvmet_port	*nport;
184 	struct sockaddr_storage addr;
185 	void (*data_ready)(struct sock *);
186 };
187 
188 static DEFINE_IDA(nvmet_tcp_queue_ida);
189 static LIST_HEAD(nvmet_tcp_queue_list);
190 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
191 
192 static struct workqueue_struct *nvmet_tcp_wq;
193 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
194 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
195 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
196 
197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
198 		struct nvmet_tcp_cmd *cmd)
199 {
200 	if (unlikely(!queue->nr_cmds)) {
201 		/* We didn't allocate cmds yet, send 0xffff */
202 		return USHRT_MAX;
203 	}
204 
205 	return cmd - queue->cmds;
206 }
207 
208 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
209 {
210 	return nvme_is_write(cmd->req.cmd) &&
211 		cmd->rbytes_done < cmd->req.transfer_len;
212 }
213 
214 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
215 {
216 	return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
217 }
218 
219 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
220 {
221 	return !nvme_is_write(cmd->req.cmd) &&
222 		cmd->req.transfer_len > 0 &&
223 		!cmd->req.cqe->status;
224 }
225 
226 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
227 {
228 	return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
229 		!cmd->rbytes_done;
230 }
231 
232 static inline struct nvmet_tcp_cmd *
233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
234 {
235 	struct nvmet_tcp_cmd *cmd;
236 
237 	cmd = list_first_entry_or_null(&queue->free_list,
238 				struct nvmet_tcp_cmd, entry);
239 	if (!cmd)
240 		return NULL;
241 	list_del_init(&cmd->entry);
242 
243 	cmd->rbytes_done = cmd->wbytes_done = 0;
244 	cmd->pdu_len = 0;
245 	cmd->pdu_recv = 0;
246 	cmd->iov = NULL;
247 	cmd->flags = 0;
248 	return cmd;
249 }
250 
251 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
252 {
253 	if (unlikely(cmd == &cmd->queue->connect))
254 		return;
255 
256 	list_add_tail(&cmd->entry, &cmd->queue->free_list);
257 }
258 
259 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
260 {
261 	return queue->sock->sk->sk_incoming_cpu;
262 }
263 
264 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
265 {
266 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
267 }
268 
269 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
270 {
271 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
272 }
273 
274 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
275 		void *pdu, size_t len)
276 {
277 	struct scatterlist sg;
278 
279 	sg_init_one(&sg, pdu, len);
280 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
281 	crypto_ahash_digest(hash);
282 }
283 
284 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
285 	void *pdu, size_t len)
286 {
287 	struct nvme_tcp_hdr *hdr = pdu;
288 	__le32 recv_digest;
289 	__le32 exp_digest;
290 
291 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
292 		pr_err("queue %d: header digest enabled but no header digest\n",
293 			queue->idx);
294 		return -EPROTO;
295 	}
296 
297 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
298 	nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
299 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
300 	if (recv_digest != exp_digest) {
301 		pr_err("queue %d: header digest error: recv %#x expected %#x\n",
302 			queue->idx, le32_to_cpu(recv_digest),
303 			le32_to_cpu(exp_digest));
304 		return -EPROTO;
305 	}
306 
307 	return 0;
308 }
309 
310 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
311 {
312 	struct nvme_tcp_hdr *hdr = pdu;
313 	u8 digest_len = nvmet_tcp_hdgst_len(queue);
314 	u32 len;
315 
316 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
317 		(hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
318 
319 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
320 		pr_err("queue %d: data digest flag is cleared\n", queue->idx);
321 		return -EPROTO;
322 	}
323 
324 	return 0;
325 }
326 
327 /* If cmd buffers are NULL, no operation is performed */
328 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
329 {
330 	kfree(cmd->iov);
331 	sgl_free(cmd->req.sg);
332 	cmd->iov = NULL;
333 	cmd->req.sg = NULL;
334 }
335 
336 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
337 {
338 	struct bio_vec *iov = cmd->iov;
339 	struct scatterlist *sg;
340 	u32 length, offset, sg_offset;
341 	int nr_pages;
342 
343 	length = cmd->pdu_len;
344 	nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
345 	offset = cmd->rbytes_done;
346 	cmd->sg_idx = offset / PAGE_SIZE;
347 	sg_offset = offset % PAGE_SIZE;
348 	sg = &cmd->req.sg[cmd->sg_idx];
349 
350 	while (length) {
351 		u32 iov_len = min_t(u32, length, sg->length - sg_offset);
352 
353 		bvec_set_page(iov, sg_page(sg), iov_len,
354 				sg->offset + sg_offset);
355 
356 		length -= iov_len;
357 		sg = sg_next(sg);
358 		iov++;
359 		sg_offset = 0;
360 	}
361 
362 	iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
363 		      nr_pages, cmd->pdu_len);
364 }
365 
366 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
367 {
368 	queue->rcv_state = NVMET_TCP_RECV_ERR;
369 	if (queue->nvme_sq.ctrl)
370 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
371 	else
372 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
373 }
374 
375 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
376 {
377 	queue->rcv_state = NVMET_TCP_RECV_ERR;
378 	if (status == -EPIPE || status == -ECONNRESET)
379 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
380 	else
381 		nvmet_tcp_fatal_error(queue);
382 }
383 
384 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
385 {
386 	struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
387 	u32 len = le32_to_cpu(sgl->length);
388 
389 	if (!len)
390 		return 0;
391 
392 	if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
393 			  NVME_SGL_FMT_OFFSET)) {
394 		if (!nvme_is_write(cmd->req.cmd))
395 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
396 
397 		if (len > cmd->req.port->inline_data_size)
398 			return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
399 		cmd->pdu_len = len;
400 	}
401 	cmd->req.transfer_len += len;
402 
403 	cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
404 	if (!cmd->req.sg)
405 		return NVME_SC_INTERNAL;
406 	cmd->cur_sg = cmd->req.sg;
407 
408 	if (nvmet_tcp_has_data_in(cmd)) {
409 		cmd->iov = kmalloc_array(cmd->req.sg_cnt,
410 				sizeof(*cmd->iov), GFP_KERNEL);
411 		if (!cmd->iov)
412 			goto err;
413 	}
414 
415 	return 0;
416 err:
417 	nvmet_tcp_free_cmd_buffers(cmd);
418 	return NVME_SC_INTERNAL;
419 }
420 
421 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
422 		struct nvmet_tcp_cmd *cmd)
423 {
424 	ahash_request_set_crypt(hash, cmd->req.sg,
425 		(void *)&cmd->exp_ddgst, cmd->req.transfer_len);
426 	crypto_ahash_digest(hash);
427 }
428 
429 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
430 {
431 	struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
432 	struct nvmet_tcp_queue *queue = cmd->queue;
433 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
434 	u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
435 
436 	cmd->offset = 0;
437 	cmd->state = NVMET_TCP_SEND_DATA_PDU;
438 
439 	pdu->hdr.type = nvme_tcp_c2h_data;
440 	pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
441 						NVME_TCP_F_DATA_SUCCESS : 0);
442 	pdu->hdr.hlen = sizeof(*pdu);
443 	pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
444 	pdu->hdr.plen =
445 		cpu_to_le32(pdu->hdr.hlen + hdgst +
446 				cmd->req.transfer_len + ddgst);
447 	pdu->command_id = cmd->req.cqe->command_id;
448 	pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
449 	pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
450 
451 	if (queue->data_digest) {
452 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
453 		nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
454 	}
455 
456 	if (cmd->queue->hdr_digest) {
457 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
458 		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
459 	}
460 }
461 
462 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
463 {
464 	struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
465 	struct nvmet_tcp_queue *queue = cmd->queue;
466 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
467 
468 	cmd->offset = 0;
469 	cmd->state = NVMET_TCP_SEND_R2T;
470 
471 	pdu->hdr.type = nvme_tcp_r2t;
472 	pdu->hdr.flags = 0;
473 	pdu->hdr.hlen = sizeof(*pdu);
474 	pdu->hdr.pdo = 0;
475 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
476 
477 	pdu->command_id = cmd->req.cmd->common.command_id;
478 	pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
479 	pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
480 	pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
481 	if (cmd->queue->hdr_digest) {
482 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
483 		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
484 	}
485 }
486 
487 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
488 {
489 	struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
490 	struct nvmet_tcp_queue *queue = cmd->queue;
491 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
492 
493 	cmd->offset = 0;
494 	cmd->state = NVMET_TCP_SEND_RESPONSE;
495 
496 	pdu->hdr.type = nvme_tcp_rsp;
497 	pdu->hdr.flags = 0;
498 	pdu->hdr.hlen = sizeof(*pdu);
499 	pdu->hdr.pdo = 0;
500 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
501 	if (cmd->queue->hdr_digest) {
502 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
503 		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
504 	}
505 }
506 
507 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
508 {
509 	struct llist_node *node;
510 	struct nvmet_tcp_cmd *cmd;
511 
512 	for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
513 		cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
514 		list_add(&cmd->entry, &queue->resp_send_list);
515 		queue->send_list_len++;
516 	}
517 }
518 
519 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
520 {
521 	queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
522 				struct nvmet_tcp_cmd, entry);
523 	if (!queue->snd_cmd) {
524 		nvmet_tcp_process_resp_list(queue);
525 		queue->snd_cmd =
526 			list_first_entry_or_null(&queue->resp_send_list,
527 					struct nvmet_tcp_cmd, entry);
528 		if (unlikely(!queue->snd_cmd))
529 			return NULL;
530 	}
531 
532 	list_del_init(&queue->snd_cmd->entry);
533 	queue->send_list_len--;
534 
535 	if (nvmet_tcp_need_data_out(queue->snd_cmd))
536 		nvmet_setup_c2h_data_pdu(queue->snd_cmd);
537 	else if (nvmet_tcp_need_data_in(queue->snd_cmd))
538 		nvmet_setup_r2t_pdu(queue->snd_cmd);
539 	else
540 		nvmet_setup_response_pdu(queue->snd_cmd);
541 
542 	return queue->snd_cmd;
543 }
544 
545 static void nvmet_tcp_queue_response(struct nvmet_req *req)
546 {
547 	struct nvmet_tcp_cmd *cmd =
548 		container_of(req, struct nvmet_tcp_cmd, req);
549 	struct nvmet_tcp_queue	*queue = cmd->queue;
550 	struct nvme_sgl_desc *sgl;
551 	u32 len;
552 
553 	if (unlikely(cmd == queue->cmd)) {
554 		sgl = &cmd->req.cmd->common.dptr.sgl;
555 		len = le32_to_cpu(sgl->length);
556 
557 		/*
558 		 * Wait for inline data before processing the response.
559 		 * Avoid using helpers, this might happen before
560 		 * nvmet_req_init is completed.
561 		 */
562 		if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
563 		    len && len <= cmd->req.port->inline_data_size &&
564 		    nvme_is_write(cmd->req.cmd))
565 			return;
566 	}
567 
568 	llist_add(&cmd->lentry, &queue->resp_list);
569 	queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
570 }
571 
572 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
573 {
574 	if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
575 		nvmet_tcp_queue_response(&cmd->req);
576 	else
577 		cmd->req.execute(&cmd->req);
578 }
579 
580 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
581 {
582 	struct msghdr msg = {
583 		.msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
584 	};
585 	struct bio_vec bvec;
586 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
587 	int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
588 	int ret;
589 
590 	bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
591 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
592 	ret = sock_sendmsg(cmd->queue->sock, &msg);
593 	if (ret <= 0)
594 		return ret;
595 
596 	cmd->offset += ret;
597 	left -= ret;
598 
599 	if (left)
600 		return -EAGAIN;
601 
602 	cmd->state = NVMET_TCP_SEND_DATA;
603 	cmd->offset  = 0;
604 	return 1;
605 }
606 
607 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
608 {
609 	struct nvmet_tcp_queue *queue = cmd->queue;
610 	int ret;
611 
612 	while (cmd->cur_sg) {
613 		struct msghdr msg = {
614 			.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
615 		};
616 		struct page *page = sg_page(cmd->cur_sg);
617 		struct bio_vec bvec;
618 		u32 left = cmd->cur_sg->length - cmd->offset;
619 
620 		if ((!last_in_batch && cmd->queue->send_list_len) ||
621 		    cmd->wbytes_done + left < cmd->req.transfer_len ||
622 		    queue->data_digest || !queue->nvme_sq.sqhd_disabled)
623 			msg.msg_flags |= MSG_MORE;
624 
625 		bvec_set_page(&bvec, page, left, cmd->offset);
626 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
627 		ret = sock_sendmsg(cmd->queue->sock, &msg);
628 		if (ret <= 0)
629 			return ret;
630 
631 		cmd->offset += ret;
632 		cmd->wbytes_done += ret;
633 
634 		/* Done with sg?*/
635 		if (cmd->offset == cmd->cur_sg->length) {
636 			cmd->cur_sg = sg_next(cmd->cur_sg);
637 			cmd->offset = 0;
638 		}
639 	}
640 
641 	if (queue->data_digest) {
642 		cmd->state = NVMET_TCP_SEND_DDGST;
643 		cmd->offset = 0;
644 	} else {
645 		if (queue->nvme_sq.sqhd_disabled) {
646 			cmd->queue->snd_cmd = NULL;
647 			nvmet_tcp_put_cmd(cmd);
648 		} else {
649 			nvmet_setup_response_pdu(cmd);
650 		}
651 	}
652 
653 	if (queue->nvme_sq.sqhd_disabled)
654 		nvmet_tcp_free_cmd_buffers(cmd);
655 
656 	return 1;
657 
658 }
659 
660 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
661 		bool last_in_batch)
662 {
663 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
664 	struct bio_vec bvec;
665 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
666 	int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
667 	int ret;
668 
669 	if (!last_in_batch && cmd->queue->send_list_len)
670 		msg.msg_flags |= MSG_MORE;
671 	else
672 		msg.msg_flags |= MSG_EOR;
673 
674 	bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
675 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
676 	ret = sock_sendmsg(cmd->queue->sock, &msg);
677 	if (ret <= 0)
678 		return ret;
679 	cmd->offset += ret;
680 	left -= ret;
681 
682 	if (left)
683 		return -EAGAIN;
684 
685 	nvmet_tcp_free_cmd_buffers(cmd);
686 	cmd->queue->snd_cmd = NULL;
687 	nvmet_tcp_put_cmd(cmd);
688 	return 1;
689 }
690 
691 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
692 {
693 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
694 	struct bio_vec bvec;
695 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
696 	int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
697 	int ret;
698 
699 	if (!last_in_batch && cmd->queue->send_list_len)
700 		msg.msg_flags |= MSG_MORE;
701 	else
702 		msg.msg_flags |= MSG_EOR;
703 
704 	bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
705 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
706 	ret = sock_sendmsg(cmd->queue->sock, &msg);
707 	if (ret <= 0)
708 		return ret;
709 	cmd->offset += ret;
710 	left -= ret;
711 
712 	if (left)
713 		return -EAGAIN;
714 
715 	cmd->queue->snd_cmd = NULL;
716 	return 1;
717 }
718 
719 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
720 {
721 	struct nvmet_tcp_queue *queue = cmd->queue;
722 	int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
723 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
724 	struct kvec iov = {
725 		.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
726 		.iov_len = left
727 	};
728 	int ret;
729 
730 	if (!last_in_batch && cmd->queue->send_list_len)
731 		msg.msg_flags |= MSG_MORE;
732 	else
733 		msg.msg_flags |= MSG_EOR;
734 
735 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
736 	if (unlikely(ret <= 0))
737 		return ret;
738 
739 	cmd->offset += ret;
740 	left -= ret;
741 
742 	if (left)
743 		return -EAGAIN;
744 
745 	if (queue->nvme_sq.sqhd_disabled) {
746 		cmd->queue->snd_cmd = NULL;
747 		nvmet_tcp_put_cmd(cmd);
748 	} else {
749 		nvmet_setup_response_pdu(cmd);
750 	}
751 	return 1;
752 }
753 
754 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
755 		bool last_in_batch)
756 {
757 	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
758 	int ret = 0;
759 
760 	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
761 		cmd = nvmet_tcp_fetch_cmd(queue);
762 		if (unlikely(!cmd))
763 			return 0;
764 	}
765 
766 	if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
767 		ret = nvmet_try_send_data_pdu(cmd);
768 		if (ret <= 0)
769 			goto done_send;
770 	}
771 
772 	if (cmd->state == NVMET_TCP_SEND_DATA) {
773 		ret = nvmet_try_send_data(cmd, last_in_batch);
774 		if (ret <= 0)
775 			goto done_send;
776 	}
777 
778 	if (cmd->state == NVMET_TCP_SEND_DDGST) {
779 		ret = nvmet_try_send_ddgst(cmd, last_in_batch);
780 		if (ret <= 0)
781 			goto done_send;
782 	}
783 
784 	if (cmd->state == NVMET_TCP_SEND_R2T) {
785 		ret = nvmet_try_send_r2t(cmd, last_in_batch);
786 		if (ret <= 0)
787 			goto done_send;
788 	}
789 
790 	if (cmd->state == NVMET_TCP_SEND_RESPONSE)
791 		ret = nvmet_try_send_response(cmd, last_in_batch);
792 
793 done_send:
794 	if (ret < 0) {
795 		if (ret == -EAGAIN)
796 			return 0;
797 		return ret;
798 	}
799 
800 	return 1;
801 }
802 
803 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
804 		int budget, int *sends)
805 {
806 	int i, ret = 0;
807 
808 	for (i = 0; i < budget; i++) {
809 		ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
810 		if (unlikely(ret < 0)) {
811 			nvmet_tcp_socket_error(queue, ret);
812 			goto done;
813 		} else if (ret == 0) {
814 			break;
815 		}
816 		(*sends)++;
817 	}
818 done:
819 	return ret;
820 }
821 
822 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
823 {
824 	queue->offset = 0;
825 	queue->left = sizeof(struct nvme_tcp_hdr);
826 	queue->cmd = NULL;
827 	queue->rcv_state = NVMET_TCP_RECV_PDU;
828 }
829 
830 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
831 {
832 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
833 
834 	ahash_request_free(queue->rcv_hash);
835 	ahash_request_free(queue->snd_hash);
836 	crypto_free_ahash(tfm);
837 }
838 
839 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
840 {
841 	struct crypto_ahash *tfm;
842 
843 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
844 	if (IS_ERR(tfm))
845 		return PTR_ERR(tfm);
846 
847 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
848 	if (!queue->snd_hash)
849 		goto free_tfm;
850 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
851 
852 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
853 	if (!queue->rcv_hash)
854 		goto free_snd_hash;
855 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
856 
857 	return 0;
858 free_snd_hash:
859 	ahash_request_free(queue->snd_hash);
860 free_tfm:
861 	crypto_free_ahash(tfm);
862 	return -ENOMEM;
863 }
864 
865 
866 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
867 {
868 	struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
869 	struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
870 	struct msghdr msg = {};
871 	struct kvec iov;
872 	int ret;
873 
874 	if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
875 		pr_err("bad nvme-tcp pdu length (%d)\n",
876 			le32_to_cpu(icreq->hdr.plen));
877 		nvmet_tcp_fatal_error(queue);
878 		return -EPROTO;
879 	}
880 
881 	if (icreq->pfv != NVME_TCP_PFV_1_0) {
882 		pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
883 		return -EPROTO;
884 	}
885 
886 	if (icreq->hpda != 0) {
887 		pr_err("queue %d: unsupported hpda %d\n", queue->idx,
888 			icreq->hpda);
889 		return -EPROTO;
890 	}
891 
892 	queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
893 	queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
894 	if (queue->hdr_digest || queue->data_digest) {
895 		ret = nvmet_tcp_alloc_crypto(queue);
896 		if (ret)
897 			return ret;
898 	}
899 
900 	memset(icresp, 0, sizeof(*icresp));
901 	icresp->hdr.type = nvme_tcp_icresp;
902 	icresp->hdr.hlen = sizeof(*icresp);
903 	icresp->hdr.pdo = 0;
904 	icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
905 	icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
906 	icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
907 	icresp->cpda = 0;
908 	if (queue->hdr_digest)
909 		icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
910 	if (queue->data_digest)
911 		icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
912 
913 	iov.iov_base = icresp;
914 	iov.iov_len = sizeof(*icresp);
915 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
916 	if (ret < 0)
917 		return ret; /* queue removal will cleanup */
918 
919 	queue->state = NVMET_TCP_Q_LIVE;
920 	nvmet_prepare_receive_pdu(queue);
921 	return 0;
922 }
923 
924 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
925 		struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
926 {
927 	size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
928 	int ret;
929 
930 	/*
931 	 * This command has not been processed yet, hence we are trying to
932 	 * figure out if there is still pending data left to receive. If
933 	 * we don't, we can simply prepare for the next pdu and bail out,
934 	 * otherwise we will need to prepare a buffer and receive the
935 	 * stale data before continuing forward.
936 	 */
937 	if (!nvme_is_write(cmd->req.cmd) || !data_len ||
938 	    data_len > cmd->req.port->inline_data_size) {
939 		nvmet_prepare_receive_pdu(queue);
940 		return;
941 	}
942 
943 	ret = nvmet_tcp_map_data(cmd);
944 	if (unlikely(ret)) {
945 		pr_err("queue %d: failed to map data\n", queue->idx);
946 		nvmet_tcp_fatal_error(queue);
947 		return;
948 	}
949 
950 	queue->rcv_state = NVMET_TCP_RECV_DATA;
951 	nvmet_tcp_build_pdu_iovec(cmd);
952 	cmd->flags |= NVMET_TCP_F_INIT_FAILED;
953 }
954 
955 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
956 {
957 	struct nvme_tcp_data_pdu *data = &queue->pdu.data;
958 	struct nvmet_tcp_cmd *cmd;
959 	unsigned int exp_data_len;
960 
961 	if (likely(queue->nr_cmds)) {
962 		if (unlikely(data->ttag >= queue->nr_cmds)) {
963 			pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
964 				queue->idx, data->ttag, queue->nr_cmds);
965 			nvmet_tcp_fatal_error(queue);
966 			return -EPROTO;
967 		}
968 		cmd = &queue->cmds[data->ttag];
969 	} else {
970 		cmd = &queue->connect;
971 	}
972 
973 	if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
974 		pr_err("ttag %u unexpected data offset %u (expected %u)\n",
975 			data->ttag, le32_to_cpu(data->data_offset),
976 			cmd->rbytes_done);
977 		/* FIXME: use path and transport errors */
978 		nvmet_tcp_fatal_error(queue);
979 		return -EPROTO;
980 	}
981 
982 	exp_data_len = le32_to_cpu(data->hdr.plen) -
983 			nvmet_tcp_hdgst_len(queue) -
984 			nvmet_tcp_ddgst_len(queue) -
985 			sizeof(*data);
986 
987 	cmd->pdu_len = le32_to_cpu(data->data_length);
988 	if (unlikely(cmd->pdu_len != exp_data_len ||
989 		     cmd->pdu_len == 0 ||
990 		     cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
991 		pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
992 		/* FIXME: use proper transport errors */
993 		nvmet_tcp_fatal_error(queue);
994 		return -EPROTO;
995 	}
996 	cmd->pdu_recv = 0;
997 	nvmet_tcp_build_pdu_iovec(cmd);
998 	queue->cmd = cmd;
999 	queue->rcv_state = NVMET_TCP_RECV_DATA;
1000 
1001 	return 0;
1002 }
1003 
1004 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1005 {
1006 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1007 	struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1008 	struct nvmet_req *req;
1009 	int ret;
1010 
1011 	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1012 		if (hdr->type != nvme_tcp_icreq) {
1013 			pr_err("unexpected pdu type (%d) before icreq\n",
1014 				hdr->type);
1015 			nvmet_tcp_fatal_error(queue);
1016 			return -EPROTO;
1017 		}
1018 		return nvmet_tcp_handle_icreq(queue);
1019 	}
1020 
1021 	if (unlikely(hdr->type == nvme_tcp_icreq)) {
1022 		pr_err("queue %d: received icreq pdu in state %d\n",
1023 			queue->idx, queue->state);
1024 		nvmet_tcp_fatal_error(queue);
1025 		return -EPROTO;
1026 	}
1027 
1028 	if (hdr->type == nvme_tcp_h2c_data) {
1029 		ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1030 		if (unlikely(ret))
1031 			return ret;
1032 		return 0;
1033 	}
1034 
1035 	queue->cmd = nvmet_tcp_get_cmd(queue);
1036 	if (unlikely(!queue->cmd)) {
1037 		/* This should never happen */
1038 		pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1039 			queue->idx, queue->nr_cmds, queue->send_list_len,
1040 			nvme_cmd->common.opcode);
1041 		nvmet_tcp_fatal_error(queue);
1042 		return -ENOMEM;
1043 	}
1044 
1045 	req = &queue->cmd->req;
1046 	memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1047 
1048 	if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1049 			&queue->nvme_sq, &nvmet_tcp_ops))) {
1050 		pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1051 			req->cmd, req->cmd->common.command_id,
1052 			req->cmd->common.opcode,
1053 			le32_to_cpu(req->cmd->common.dptr.sgl.length));
1054 
1055 		nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1056 		return 0;
1057 	}
1058 
1059 	ret = nvmet_tcp_map_data(queue->cmd);
1060 	if (unlikely(ret)) {
1061 		pr_err("queue %d: failed to map data\n", queue->idx);
1062 		if (nvmet_tcp_has_inline_data(queue->cmd))
1063 			nvmet_tcp_fatal_error(queue);
1064 		else
1065 			nvmet_req_complete(req, ret);
1066 		ret = -EAGAIN;
1067 		goto out;
1068 	}
1069 
1070 	if (nvmet_tcp_need_data_in(queue->cmd)) {
1071 		if (nvmet_tcp_has_inline_data(queue->cmd)) {
1072 			queue->rcv_state = NVMET_TCP_RECV_DATA;
1073 			nvmet_tcp_build_pdu_iovec(queue->cmd);
1074 			return 0;
1075 		}
1076 		/* send back R2T */
1077 		nvmet_tcp_queue_response(&queue->cmd->req);
1078 		goto out;
1079 	}
1080 
1081 	queue->cmd->req.execute(&queue->cmd->req);
1082 out:
1083 	nvmet_prepare_receive_pdu(queue);
1084 	return ret;
1085 }
1086 
1087 static const u8 nvme_tcp_pdu_sizes[] = {
1088 	[nvme_tcp_icreq]	= sizeof(struct nvme_tcp_icreq_pdu),
1089 	[nvme_tcp_cmd]		= sizeof(struct nvme_tcp_cmd_pdu),
1090 	[nvme_tcp_h2c_data]	= sizeof(struct nvme_tcp_data_pdu),
1091 };
1092 
1093 static inline u8 nvmet_tcp_pdu_size(u8 type)
1094 {
1095 	size_t idx = type;
1096 
1097 	return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1098 		nvme_tcp_pdu_sizes[idx]) ?
1099 			nvme_tcp_pdu_sizes[idx] : 0;
1100 }
1101 
1102 static inline bool nvmet_tcp_pdu_valid(u8 type)
1103 {
1104 	switch (type) {
1105 	case nvme_tcp_icreq:
1106 	case nvme_tcp_cmd:
1107 	case nvme_tcp_h2c_data:
1108 		/* fallthru */
1109 		return true;
1110 	}
1111 
1112 	return false;
1113 }
1114 
1115 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1116 {
1117 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1118 	int len;
1119 	struct kvec iov;
1120 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1121 
1122 recv:
1123 	iov.iov_base = (void *)&queue->pdu + queue->offset;
1124 	iov.iov_len = queue->left;
1125 	len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1126 			iov.iov_len, msg.msg_flags);
1127 	if (unlikely(len < 0))
1128 		return len;
1129 
1130 	queue->offset += len;
1131 	queue->left -= len;
1132 	if (queue->left)
1133 		return -EAGAIN;
1134 
1135 	if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1136 		u8 hdgst = nvmet_tcp_hdgst_len(queue);
1137 
1138 		if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1139 			pr_err("unexpected pdu type %d\n", hdr->type);
1140 			nvmet_tcp_fatal_error(queue);
1141 			return -EIO;
1142 		}
1143 
1144 		if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1145 			pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1146 			return -EIO;
1147 		}
1148 
1149 		queue->left = hdr->hlen - queue->offset + hdgst;
1150 		goto recv;
1151 	}
1152 
1153 	if (queue->hdr_digest &&
1154 	    nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1155 		nvmet_tcp_fatal_error(queue); /* fatal */
1156 		return -EPROTO;
1157 	}
1158 
1159 	if (queue->data_digest &&
1160 	    nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1161 		nvmet_tcp_fatal_error(queue); /* fatal */
1162 		return -EPROTO;
1163 	}
1164 
1165 	return nvmet_tcp_done_recv_pdu(queue);
1166 }
1167 
1168 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1169 {
1170 	struct nvmet_tcp_queue *queue = cmd->queue;
1171 
1172 	nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1173 	queue->offset = 0;
1174 	queue->left = NVME_TCP_DIGEST_LENGTH;
1175 	queue->rcv_state = NVMET_TCP_RECV_DDGST;
1176 }
1177 
1178 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1179 {
1180 	struct nvmet_tcp_cmd  *cmd = queue->cmd;
1181 	int ret;
1182 
1183 	while (msg_data_left(&cmd->recv_msg)) {
1184 		ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1185 			cmd->recv_msg.msg_flags);
1186 		if (ret <= 0)
1187 			return ret;
1188 
1189 		cmd->pdu_recv += ret;
1190 		cmd->rbytes_done += ret;
1191 	}
1192 
1193 	if (queue->data_digest) {
1194 		nvmet_tcp_prep_recv_ddgst(cmd);
1195 		return 0;
1196 	}
1197 
1198 	if (cmd->rbytes_done == cmd->req.transfer_len)
1199 		nvmet_tcp_execute_request(cmd);
1200 
1201 	nvmet_prepare_receive_pdu(queue);
1202 	return 0;
1203 }
1204 
1205 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1206 {
1207 	struct nvmet_tcp_cmd *cmd = queue->cmd;
1208 	int ret;
1209 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1210 	struct kvec iov = {
1211 		.iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1212 		.iov_len = queue->left
1213 	};
1214 
1215 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1216 			iov.iov_len, msg.msg_flags);
1217 	if (unlikely(ret < 0))
1218 		return ret;
1219 
1220 	queue->offset += ret;
1221 	queue->left -= ret;
1222 	if (queue->left)
1223 		return -EAGAIN;
1224 
1225 	if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1226 		pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1227 			queue->idx, cmd->req.cmd->common.command_id,
1228 			queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1229 			le32_to_cpu(cmd->exp_ddgst));
1230 		nvmet_req_uninit(&cmd->req);
1231 		nvmet_tcp_free_cmd_buffers(cmd);
1232 		nvmet_tcp_fatal_error(queue);
1233 		ret = -EPROTO;
1234 		goto out;
1235 	}
1236 
1237 	if (cmd->rbytes_done == cmd->req.transfer_len)
1238 		nvmet_tcp_execute_request(cmd);
1239 
1240 	ret = 0;
1241 out:
1242 	nvmet_prepare_receive_pdu(queue);
1243 	return ret;
1244 }
1245 
1246 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1247 {
1248 	int result = 0;
1249 
1250 	if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1251 		return 0;
1252 
1253 	if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1254 		result = nvmet_tcp_try_recv_pdu(queue);
1255 		if (result != 0)
1256 			goto done_recv;
1257 	}
1258 
1259 	if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1260 		result = nvmet_tcp_try_recv_data(queue);
1261 		if (result != 0)
1262 			goto done_recv;
1263 	}
1264 
1265 	if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1266 		result = nvmet_tcp_try_recv_ddgst(queue);
1267 		if (result != 0)
1268 			goto done_recv;
1269 	}
1270 
1271 done_recv:
1272 	if (result < 0) {
1273 		if (result == -EAGAIN)
1274 			return 0;
1275 		return result;
1276 	}
1277 	return 1;
1278 }
1279 
1280 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1281 		int budget, int *recvs)
1282 {
1283 	int i, ret = 0;
1284 
1285 	for (i = 0; i < budget; i++) {
1286 		ret = nvmet_tcp_try_recv_one(queue);
1287 		if (unlikely(ret < 0)) {
1288 			nvmet_tcp_socket_error(queue, ret);
1289 			goto done;
1290 		} else if (ret == 0) {
1291 			break;
1292 		}
1293 		(*recvs)++;
1294 	}
1295 done:
1296 	return ret;
1297 }
1298 
1299 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1300 {
1301 	spin_lock(&queue->state_lock);
1302 	if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1303 		queue->state = NVMET_TCP_Q_DISCONNECTING;
1304 		queue_work(nvmet_wq, &queue->release_work);
1305 	}
1306 	spin_unlock(&queue->state_lock);
1307 }
1308 
1309 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1310 {
1311 	queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1312 }
1313 
1314 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1315 		int ops)
1316 {
1317 	if (!idle_poll_period_usecs)
1318 		return false;
1319 
1320 	if (ops)
1321 		nvmet_tcp_arm_queue_deadline(queue);
1322 
1323 	return !time_after(jiffies, queue->poll_end);
1324 }
1325 
1326 static void nvmet_tcp_io_work(struct work_struct *w)
1327 {
1328 	struct nvmet_tcp_queue *queue =
1329 		container_of(w, struct nvmet_tcp_queue, io_work);
1330 	bool pending;
1331 	int ret, ops = 0;
1332 
1333 	do {
1334 		pending = false;
1335 
1336 		ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1337 		if (ret > 0)
1338 			pending = true;
1339 		else if (ret < 0)
1340 			return;
1341 
1342 		ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1343 		if (ret > 0)
1344 			pending = true;
1345 		else if (ret < 0)
1346 			return;
1347 
1348 	} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1349 
1350 	/*
1351 	 * Requeue the worker if idle deadline period is in progress or any
1352 	 * ops activity was recorded during the do-while loop above.
1353 	 */
1354 	if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1355 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1356 }
1357 
1358 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1359 		struct nvmet_tcp_cmd *c)
1360 {
1361 	u8 hdgst = nvmet_tcp_hdgst_len(queue);
1362 
1363 	c->queue = queue;
1364 	c->req.port = queue->port->nport;
1365 
1366 	c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1367 			sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1368 	if (!c->cmd_pdu)
1369 		return -ENOMEM;
1370 	c->req.cmd = &c->cmd_pdu->cmd;
1371 
1372 	c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1373 			sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1374 	if (!c->rsp_pdu)
1375 		goto out_free_cmd;
1376 	c->req.cqe = &c->rsp_pdu->cqe;
1377 
1378 	c->data_pdu = page_frag_alloc(&queue->pf_cache,
1379 			sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1380 	if (!c->data_pdu)
1381 		goto out_free_rsp;
1382 
1383 	c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1384 			sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1385 	if (!c->r2t_pdu)
1386 		goto out_free_data;
1387 
1388 	c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1389 
1390 	list_add_tail(&c->entry, &queue->free_list);
1391 
1392 	return 0;
1393 out_free_data:
1394 	page_frag_free(c->data_pdu);
1395 out_free_rsp:
1396 	page_frag_free(c->rsp_pdu);
1397 out_free_cmd:
1398 	page_frag_free(c->cmd_pdu);
1399 	return -ENOMEM;
1400 }
1401 
1402 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1403 {
1404 	page_frag_free(c->r2t_pdu);
1405 	page_frag_free(c->data_pdu);
1406 	page_frag_free(c->rsp_pdu);
1407 	page_frag_free(c->cmd_pdu);
1408 }
1409 
1410 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1411 {
1412 	struct nvmet_tcp_cmd *cmds;
1413 	int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1414 
1415 	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1416 	if (!cmds)
1417 		goto out;
1418 
1419 	for (i = 0; i < nr_cmds; i++) {
1420 		ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1421 		if (ret)
1422 			goto out_free;
1423 	}
1424 
1425 	queue->cmds = cmds;
1426 
1427 	return 0;
1428 out_free:
1429 	while (--i >= 0)
1430 		nvmet_tcp_free_cmd(cmds + i);
1431 	kfree(cmds);
1432 out:
1433 	return ret;
1434 }
1435 
1436 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1437 {
1438 	struct nvmet_tcp_cmd *cmds = queue->cmds;
1439 	int i;
1440 
1441 	for (i = 0; i < queue->nr_cmds; i++)
1442 		nvmet_tcp_free_cmd(cmds + i);
1443 
1444 	nvmet_tcp_free_cmd(&queue->connect);
1445 	kfree(cmds);
1446 }
1447 
1448 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1449 {
1450 	struct socket *sock = queue->sock;
1451 
1452 	write_lock_bh(&sock->sk->sk_callback_lock);
1453 	sock->sk->sk_data_ready =  queue->data_ready;
1454 	sock->sk->sk_state_change = queue->state_change;
1455 	sock->sk->sk_write_space = queue->write_space;
1456 	sock->sk->sk_user_data = NULL;
1457 	write_unlock_bh(&sock->sk->sk_callback_lock);
1458 }
1459 
1460 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1461 {
1462 	struct nvmet_tcp_cmd *cmd = queue->cmds;
1463 	int i;
1464 
1465 	for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1466 		if (nvmet_tcp_need_data_in(cmd))
1467 			nvmet_req_uninit(&cmd->req);
1468 	}
1469 
1470 	if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1471 		/* failed in connect */
1472 		nvmet_req_uninit(&queue->connect.req);
1473 	}
1474 }
1475 
1476 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1477 {
1478 	struct nvmet_tcp_cmd *cmd = queue->cmds;
1479 	int i;
1480 
1481 	for (i = 0; i < queue->nr_cmds; i++, cmd++)
1482 		nvmet_tcp_free_cmd_buffers(cmd);
1483 	nvmet_tcp_free_cmd_buffers(&queue->connect);
1484 }
1485 
1486 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1487 {
1488 	struct page *page;
1489 	struct nvmet_tcp_queue *queue =
1490 		container_of(w, struct nvmet_tcp_queue, release_work);
1491 
1492 	mutex_lock(&nvmet_tcp_queue_mutex);
1493 	list_del_init(&queue->queue_list);
1494 	mutex_unlock(&nvmet_tcp_queue_mutex);
1495 
1496 	nvmet_tcp_restore_socket_callbacks(queue);
1497 	cancel_work_sync(&queue->io_work);
1498 	/* stop accepting incoming data */
1499 	queue->rcv_state = NVMET_TCP_RECV_ERR;
1500 
1501 	nvmet_tcp_uninit_data_in_cmds(queue);
1502 	nvmet_sq_destroy(&queue->nvme_sq);
1503 	cancel_work_sync(&queue->io_work);
1504 	nvmet_tcp_free_cmd_data_in_buffers(queue);
1505 	sock_release(queue->sock);
1506 	nvmet_tcp_free_cmds(queue);
1507 	if (queue->hdr_digest || queue->data_digest)
1508 		nvmet_tcp_free_crypto(queue);
1509 	ida_free(&nvmet_tcp_queue_ida, queue->idx);
1510 
1511 	page = virt_to_head_page(queue->pf_cache.va);
1512 	__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1513 	kfree(queue);
1514 }
1515 
1516 static void nvmet_tcp_data_ready(struct sock *sk)
1517 {
1518 	struct nvmet_tcp_queue *queue;
1519 
1520 	trace_sk_data_ready(sk);
1521 
1522 	read_lock_bh(&sk->sk_callback_lock);
1523 	queue = sk->sk_user_data;
1524 	if (likely(queue))
1525 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1526 	read_unlock_bh(&sk->sk_callback_lock);
1527 }
1528 
1529 static void nvmet_tcp_write_space(struct sock *sk)
1530 {
1531 	struct nvmet_tcp_queue *queue;
1532 
1533 	read_lock_bh(&sk->sk_callback_lock);
1534 	queue = sk->sk_user_data;
1535 	if (unlikely(!queue))
1536 		goto out;
1537 
1538 	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1539 		queue->write_space(sk);
1540 		goto out;
1541 	}
1542 
1543 	if (sk_stream_is_writeable(sk)) {
1544 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1545 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1546 	}
1547 out:
1548 	read_unlock_bh(&sk->sk_callback_lock);
1549 }
1550 
1551 static void nvmet_tcp_state_change(struct sock *sk)
1552 {
1553 	struct nvmet_tcp_queue *queue;
1554 
1555 	read_lock_bh(&sk->sk_callback_lock);
1556 	queue = sk->sk_user_data;
1557 	if (!queue)
1558 		goto done;
1559 
1560 	switch (sk->sk_state) {
1561 	case TCP_FIN_WAIT2:
1562 	case TCP_LAST_ACK:
1563 		break;
1564 	case TCP_FIN_WAIT1:
1565 	case TCP_CLOSE_WAIT:
1566 	case TCP_CLOSE:
1567 		/* FALLTHRU */
1568 		nvmet_tcp_schedule_release_queue(queue);
1569 		break;
1570 	default:
1571 		pr_warn("queue %d unhandled state %d\n",
1572 			queue->idx, sk->sk_state);
1573 	}
1574 done:
1575 	read_unlock_bh(&sk->sk_callback_lock);
1576 }
1577 
1578 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1579 {
1580 	struct socket *sock = queue->sock;
1581 	struct inet_sock *inet = inet_sk(sock->sk);
1582 	int ret;
1583 
1584 	ret = kernel_getsockname(sock,
1585 		(struct sockaddr *)&queue->sockaddr);
1586 	if (ret < 0)
1587 		return ret;
1588 
1589 	ret = kernel_getpeername(sock,
1590 		(struct sockaddr *)&queue->sockaddr_peer);
1591 	if (ret < 0)
1592 		return ret;
1593 
1594 	/*
1595 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
1596 	 * close. This is done to prevent stale data from being sent should
1597 	 * the network connection be restored before TCP times out.
1598 	 */
1599 	sock_no_linger(sock->sk);
1600 
1601 	if (so_priority > 0)
1602 		sock_set_priority(sock->sk, so_priority);
1603 
1604 	/* Set socket type of service */
1605 	if (inet->rcv_tos > 0)
1606 		ip_sock_set_tos(sock->sk, inet->rcv_tos);
1607 
1608 	ret = 0;
1609 	write_lock_bh(&sock->sk->sk_callback_lock);
1610 	if (sock->sk->sk_state != TCP_ESTABLISHED) {
1611 		/*
1612 		 * If the socket is already closing, don't even start
1613 		 * consuming it
1614 		 */
1615 		ret = -ENOTCONN;
1616 	} else {
1617 		sock->sk->sk_user_data = queue;
1618 		queue->data_ready = sock->sk->sk_data_ready;
1619 		sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1620 		queue->state_change = sock->sk->sk_state_change;
1621 		sock->sk->sk_state_change = nvmet_tcp_state_change;
1622 		queue->write_space = sock->sk->sk_write_space;
1623 		sock->sk->sk_write_space = nvmet_tcp_write_space;
1624 		if (idle_poll_period_usecs)
1625 			nvmet_tcp_arm_queue_deadline(queue);
1626 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1627 	}
1628 	write_unlock_bh(&sock->sk->sk_callback_lock);
1629 
1630 	return ret;
1631 }
1632 
1633 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1634 		struct socket *newsock)
1635 {
1636 	struct nvmet_tcp_queue *queue;
1637 	int ret;
1638 
1639 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1640 	if (!queue)
1641 		return -ENOMEM;
1642 
1643 	INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1644 	INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1645 	queue->sock = newsock;
1646 	queue->port = port;
1647 	queue->nr_cmds = 0;
1648 	spin_lock_init(&queue->state_lock);
1649 	queue->state = NVMET_TCP_Q_CONNECTING;
1650 	INIT_LIST_HEAD(&queue->free_list);
1651 	init_llist_head(&queue->resp_list);
1652 	INIT_LIST_HEAD(&queue->resp_send_list);
1653 
1654 	queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1655 	if (queue->idx < 0) {
1656 		ret = queue->idx;
1657 		goto out_free_queue;
1658 	}
1659 
1660 	ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1661 	if (ret)
1662 		goto out_ida_remove;
1663 
1664 	ret = nvmet_sq_init(&queue->nvme_sq);
1665 	if (ret)
1666 		goto out_free_connect;
1667 
1668 	nvmet_prepare_receive_pdu(queue);
1669 
1670 	mutex_lock(&nvmet_tcp_queue_mutex);
1671 	list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1672 	mutex_unlock(&nvmet_tcp_queue_mutex);
1673 
1674 	ret = nvmet_tcp_set_queue_sock(queue);
1675 	if (ret)
1676 		goto out_destroy_sq;
1677 
1678 	return 0;
1679 out_destroy_sq:
1680 	mutex_lock(&nvmet_tcp_queue_mutex);
1681 	list_del_init(&queue->queue_list);
1682 	mutex_unlock(&nvmet_tcp_queue_mutex);
1683 	nvmet_sq_destroy(&queue->nvme_sq);
1684 out_free_connect:
1685 	nvmet_tcp_free_cmd(&queue->connect);
1686 out_ida_remove:
1687 	ida_free(&nvmet_tcp_queue_ida, queue->idx);
1688 out_free_queue:
1689 	kfree(queue);
1690 	return ret;
1691 }
1692 
1693 static void nvmet_tcp_accept_work(struct work_struct *w)
1694 {
1695 	struct nvmet_tcp_port *port =
1696 		container_of(w, struct nvmet_tcp_port, accept_work);
1697 	struct socket *newsock;
1698 	int ret;
1699 
1700 	while (true) {
1701 		ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1702 		if (ret < 0) {
1703 			if (ret != -EAGAIN)
1704 				pr_warn("failed to accept err=%d\n", ret);
1705 			return;
1706 		}
1707 		ret = nvmet_tcp_alloc_queue(port, newsock);
1708 		if (ret) {
1709 			pr_err("failed to allocate queue\n");
1710 			sock_release(newsock);
1711 		}
1712 	}
1713 }
1714 
1715 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1716 {
1717 	struct nvmet_tcp_port *port;
1718 
1719 	trace_sk_data_ready(sk);
1720 
1721 	read_lock_bh(&sk->sk_callback_lock);
1722 	port = sk->sk_user_data;
1723 	if (!port)
1724 		goto out;
1725 
1726 	if (sk->sk_state == TCP_LISTEN)
1727 		queue_work(nvmet_wq, &port->accept_work);
1728 out:
1729 	read_unlock_bh(&sk->sk_callback_lock);
1730 }
1731 
1732 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1733 {
1734 	struct nvmet_tcp_port *port;
1735 	__kernel_sa_family_t af;
1736 	int ret;
1737 
1738 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1739 	if (!port)
1740 		return -ENOMEM;
1741 
1742 	switch (nport->disc_addr.adrfam) {
1743 	case NVMF_ADDR_FAMILY_IP4:
1744 		af = AF_INET;
1745 		break;
1746 	case NVMF_ADDR_FAMILY_IP6:
1747 		af = AF_INET6;
1748 		break;
1749 	default:
1750 		pr_err("address family %d not supported\n",
1751 				nport->disc_addr.adrfam);
1752 		ret = -EINVAL;
1753 		goto err_port;
1754 	}
1755 
1756 	ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1757 			nport->disc_addr.trsvcid, &port->addr);
1758 	if (ret) {
1759 		pr_err("malformed ip/port passed: %s:%s\n",
1760 			nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1761 		goto err_port;
1762 	}
1763 
1764 	port->nport = nport;
1765 	INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1766 	if (port->nport->inline_data_size < 0)
1767 		port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1768 
1769 	ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1770 				IPPROTO_TCP, &port->sock);
1771 	if (ret) {
1772 		pr_err("failed to create a socket\n");
1773 		goto err_port;
1774 	}
1775 
1776 	port->sock->sk->sk_user_data = port;
1777 	port->data_ready = port->sock->sk->sk_data_ready;
1778 	port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1779 	sock_set_reuseaddr(port->sock->sk);
1780 	tcp_sock_set_nodelay(port->sock->sk);
1781 	if (so_priority > 0)
1782 		sock_set_priority(port->sock->sk, so_priority);
1783 
1784 	ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1785 			sizeof(port->addr));
1786 	if (ret) {
1787 		pr_err("failed to bind port socket %d\n", ret);
1788 		goto err_sock;
1789 	}
1790 
1791 	ret = kernel_listen(port->sock, 128);
1792 	if (ret) {
1793 		pr_err("failed to listen %d on port sock\n", ret);
1794 		goto err_sock;
1795 	}
1796 
1797 	nport->priv = port;
1798 	pr_info("enabling port %d (%pISpc)\n",
1799 		le16_to_cpu(nport->disc_addr.portid), &port->addr);
1800 
1801 	return 0;
1802 
1803 err_sock:
1804 	sock_release(port->sock);
1805 err_port:
1806 	kfree(port);
1807 	return ret;
1808 }
1809 
1810 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1811 {
1812 	struct nvmet_tcp_queue *queue;
1813 
1814 	mutex_lock(&nvmet_tcp_queue_mutex);
1815 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1816 		if (queue->port == port)
1817 			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1818 	mutex_unlock(&nvmet_tcp_queue_mutex);
1819 }
1820 
1821 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1822 {
1823 	struct nvmet_tcp_port *port = nport->priv;
1824 
1825 	write_lock_bh(&port->sock->sk->sk_callback_lock);
1826 	port->sock->sk->sk_data_ready = port->data_ready;
1827 	port->sock->sk->sk_user_data = NULL;
1828 	write_unlock_bh(&port->sock->sk->sk_callback_lock);
1829 	cancel_work_sync(&port->accept_work);
1830 	/*
1831 	 * Destroy the remaining queues, which are not belong to any
1832 	 * controller yet.
1833 	 */
1834 	nvmet_tcp_destroy_port_queues(port);
1835 
1836 	sock_release(port->sock);
1837 	kfree(port);
1838 }
1839 
1840 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1841 {
1842 	struct nvmet_tcp_queue *queue;
1843 
1844 	mutex_lock(&nvmet_tcp_queue_mutex);
1845 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1846 		if (queue->nvme_sq.ctrl == ctrl)
1847 			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1848 	mutex_unlock(&nvmet_tcp_queue_mutex);
1849 }
1850 
1851 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1852 {
1853 	struct nvmet_tcp_queue *queue =
1854 		container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1855 
1856 	if (sq->qid == 0) {
1857 		/* Let inflight controller teardown complete */
1858 		flush_workqueue(nvmet_wq);
1859 	}
1860 
1861 	queue->nr_cmds = sq->size * 2;
1862 	if (nvmet_tcp_alloc_cmds(queue))
1863 		return NVME_SC_INTERNAL;
1864 	return 0;
1865 }
1866 
1867 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1868 		struct nvmet_port *nport, char *traddr)
1869 {
1870 	struct nvmet_tcp_port *port = nport->priv;
1871 
1872 	if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1873 		struct nvmet_tcp_cmd *cmd =
1874 			container_of(req, struct nvmet_tcp_cmd, req);
1875 		struct nvmet_tcp_queue *queue = cmd->queue;
1876 
1877 		sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1878 	} else {
1879 		memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1880 	}
1881 }
1882 
1883 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1884 	.owner			= THIS_MODULE,
1885 	.type			= NVMF_TRTYPE_TCP,
1886 	.msdbd			= 1,
1887 	.add_port		= nvmet_tcp_add_port,
1888 	.remove_port		= nvmet_tcp_remove_port,
1889 	.queue_response		= nvmet_tcp_queue_response,
1890 	.delete_ctrl		= nvmet_tcp_delete_ctrl,
1891 	.install_queue		= nvmet_tcp_install_queue,
1892 	.disc_traddr		= nvmet_tcp_disc_port_addr,
1893 };
1894 
1895 static int __init nvmet_tcp_init(void)
1896 {
1897 	int ret;
1898 
1899 	nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1900 				WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1901 	if (!nvmet_tcp_wq)
1902 		return -ENOMEM;
1903 
1904 	ret = nvmet_register_transport(&nvmet_tcp_ops);
1905 	if (ret)
1906 		goto err;
1907 
1908 	return 0;
1909 err:
1910 	destroy_workqueue(nvmet_tcp_wq);
1911 	return ret;
1912 }
1913 
1914 static void __exit nvmet_tcp_exit(void)
1915 {
1916 	struct nvmet_tcp_queue *queue;
1917 
1918 	nvmet_unregister_transport(&nvmet_tcp_ops);
1919 
1920 	flush_workqueue(nvmet_wq);
1921 	mutex_lock(&nvmet_tcp_queue_mutex);
1922 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1923 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1924 	mutex_unlock(&nvmet_tcp_queue_mutex);
1925 	flush_workqueue(nvmet_wq);
1926 
1927 	destroy_workqueue(nvmet_tcp_wq);
1928 	ida_destroy(&nvmet_tcp_queue_ida);
1929 }
1930 
1931 module_init(nvmet_tcp_init);
1932 module_exit(nvmet_tcp_exit);
1933 
1934 MODULE_LICENSE("GPL v2");
1935 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
1936