xref: /openbmc/linux/drivers/nvme/host/tcp.c (revision 2022ca0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP host.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
17 
18 #include "nvme.h"
19 #include "fabrics.h"
20 
21 struct nvme_tcp_queue;
22 
23 enum nvme_tcp_send_state {
24 	NVME_TCP_SEND_CMD_PDU = 0,
25 	NVME_TCP_SEND_H2C_PDU,
26 	NVME_TCP_SEND_DATA,
27 	NVME_TCP_SEND_DDGST,
28 };
29 
30 struct nvme_tcp_request {
31 	struct nvme_request	req;
32 	void			*pdu;
33 	struct nvme_tcp_queue	*queue;
34 	u32			data_len;
35 	u32			pdu_len;
36 	u32			pdu_sent;
37 	u16			ttag;
38 	struct list_head	entry;
39 	__le32			ddgst;
40 
41 	struct bio		*curr_bio;
42 	struct iov_iter		iter;
43 
44 	/* send state */
45 	size_t			offset;
46 	size_t			data_sent;
47 	enum nvme_tcp_send_state state;
48 };
49 
50 enum nvme_tcp_queue_flags {
51 	NVME_TCP_Q_ALLOCATED	= 0,
52 	NVME_TCP_Q_LIVE		= 1,
53 };
54 
55 enum nvme_tcp_recv_state {
56 	NVME_TCP_RECV_PDU = 0,
57 	NVME_TCP_RECV_DATA,
58 	NVME_TCP_RECV_DDGST,
59 };
60 
61 struct nvme_tcp_ctrl;
62 struct nvme_tcp_queue {
63 	struct socket		*sock;
64 	struct work_struct	io_work;
65 	int			io_cpu;
66 
67 	spinlock_t		lock;
68 	struct list_head	send_list;
69 
70 	/* recv state */
71 	void			*pdu;
72 	int			pdu_remaining;
73 	int			pdu_offset;
74 	size_t			data_remaining;
75 	size_t			ddgst_remaining;
76 	unsigned int		nr_cqe;
77 
78 	/* send state */
79 	struct nvme_tcp_request *request;
80 
81 	int			queue_size;
82 	size_t			cmnd_capsule_len;
83 	struct nvme_tcp_ctrl	*ctrl;
84 	unsigned long		flags;
85 	bool			rd_enabled;
86 
87 	bool			hdr_digest;
88 	bool			data_digest;
89 	struct ahash_request	*rcv_hash;
90 	struct ahash_request	*snd_hash;
91 	__le32			exp_ddgst;
92 	__le32			recv_ddgst;
93 
94 	struct page_frag_cache	pf_cache;
95 
96 	void (*state_change)(struct sock *);
97 	void (*data_ready)(struct sock *);
98 	void (*write_space)(struct sock *);
99 };
100 
101 struct nvme_tcp_ctrl {
102 	/* read only in the hot path */
103 	struct nvme_tcp_queue	*queues;
104 	struct blk_mq_tag_set	tag_set;
105 
106 	/* other member variables */
107 	struct list_head	list;
108 	struct blk_mq_tag_set	admin_tag_set;
109 	struct sockaddr_storage addr;
110 	struct sockaddr_storage src_addr;
111 	struct nvme_ctrl	ctrl;
112 
113 	struct work_struct	err_work;
114 	struct delayed_work	connect_work;
115 	struct nvme_tcp_request async_req;
116 	u32			io_queues[HCTX_MAX_TYPES];
117 };
118 
119 static LIST_HEAD(nvme_tcp_ctrl_list);
120 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
121 static struct workqueue_struct *nvme_tcp_wq;
122 static struct blk_mq_ops nvme_tcp_mq_ops;
123 static struct blk_mq_ops nvme_tcp_admin_mq_ops;
124 
125 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
126 {
127 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
128 }
129 
130 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
131 {
132 	return queue - queue->ctrl->queues;
133 }
134 
135 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
136 {
137 	u32 queue_idx = nvme_tcp_queue_id(queue);
138 
139 	if (queue_idx == 0)
140 		return queue->ctrl->admin_tag_set.tags[queue_idx];
141 	return queue->ctrl->tag_set.tags[queue_idx - 1];
142 }
143 
144 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
145 {
146 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
147 }
148 
149 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
150 {
151 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
152 }
153 
154 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
155 {
156 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
157 }
158 
159 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
160 {
161 	return req == &req->queue->ctrl->async_req;
162 }
163 
164 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
165 {
166 	struct request *rq;
167 	unsigned int bytes;
168 
169 	if (unlikely(nvme_tcp_async_req(req)))
170 		return false; /* async events don't have a request */
171 
172 	rq = blk_mq_rq_from_pdu(req);
173 	bytes = blk_rq_payload_bytes(rq);
174 
175 	return rq_data_dir(rq) == WRITE && bytes &&
176 		bytes <= nvme_tcp_inline_data_size(req->queue);
177 }
178 
179 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
180 {
181 	return req->iter.bvec->bv_page;
182 }
183 
184 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
185 {
186 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
187 }
188 
189 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
190 {
191 	return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
192 			req->pdu_len - req->pdu_sent);
193 }
194 
195 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
196 {
197 	return req->iter.iov_offset;
198 }
199 
200 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
201 {
202 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
203 			req->pdu_len - req->pdu_sent : 0;
204 }
205 
206 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
207 		int len)
208 {
209 	return nvme_tcp_pdu_data_left(req) <= len;
210 }
211 
212 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
213 		unsigned int dir)
214 {
215 	struct request *rq = blk_mq_rq_from_pdu(req);
216 	struct bio_vec *vec;
217 	unsigned int size;
218 	int nsegs;
219 	size_t offset;
220 
221 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
222 		vec = &rq->special_vec;
223 		nsegs = 1;
224 		size = blk_rq_payload_bytes(rq);
225 		offset = 0;
226 	} else {
227 		struct bio *bio = req->curr_bio;
228 
229 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
230 		nsegs = bio_segments(bio);
231 		size = bio->bi_iter.bi_size;
232 		offset = bio->bi_iter.bi_bvec_done;
233 	}
234 
235 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
236 	req->iter.iov_offset = offset;
237 }
238 
239 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
240 		int len)
241 {
242 	req->data_sent += len;
243 	req->pdu_sent += len;
244 	iov_iter_advance(&req->iter, len);
245 	if (!iov_iter_count(&req->iter) &&
246 	    req->data_sent < req->data_len) {
247 		req->curr_bio = req->curr_bio->bi_next;
248 		nvme_tcp_init_iter(req, WRITE);
249 	}
250 }
251 
252 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
253 {
254 	struct nvme_tcp_queue *queue = req->queue;
255 
256 	spin_lock(&queue->lock);
257 	list_add_tail(&req->entry, &queue->send_list);
258 	spin_unlock(&queue->lock);
259 
260 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
261 }
262 
263 static inline struct nvme_tcp_request *
264 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
265 {
266 	struct nvme_tcp_request *req;
267 
268 	spin_lock(&queue->lock);
269 	req = list_first_entry_or_null(&queue->send_list,
270 			struct nvme_tcp_request, entry);
271 	if (req)
272 		list_del(&req->entry);
273 	spin_unlock(&queue->lock);
274 
275 	return req;
276 }
277 
278 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
279 		__le32 *dgst)
280 {
281 	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
282 	crypto_ahash_final(hash);
283 }
284 
285 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
286 		struct page *page, off_t off, size_t len)
287 {
288 	struct scatterlist sg;
289 
290 	sg_init_marker(&sg, 1);
291 	sg_set_page(&sg, page, len, off);
292 	ahash_request_set_crypt(hash, &sg, NULL, len);
293 	crypto_ahash_update(hash);
294 }
295 
296 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
297 		void *pdu, size_t len)
298 {
299 	struct scatterlist sg;
300 
301 	sg_init_one(&sg, pdu, len);
302 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
303 	crypto_ahash_digest(hash);
304 }
305 
306 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
307 		void *pdu, size_t pdu_len)
308 {
309 	struct nvme_tcp_hdr *hdr = pdu;
310 	__le32 recv_digest;
311 	__le32 exp_digest;
312 
313 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
314 		dev_err(queue->ctrl->ctrl.device,
315 			"queue %d: header digest flag is cleared\n",
316 			nvme_tcp_queue_id(queue));
317 		return -EPROTO;
318 	}
319 
320 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
321 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
322 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
323 	if (recv_digest != exp_digest) {
324 		dev_err(queue->ctrl->ctrl.device,
325 			"header digest error: recv %#x expected %#x\n",
326 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
327 		return -EIO;
328 	}
329 
330 	return 0;
331 }
332 
333 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
334 {
335 	struct nvme_tcp_hdr *hdr = pdu;
336 	u8 digest_len = nvme_tcp_hdgst_len(queue);
337 	u32 len;
338 
339 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
340 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
341 
342 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
343 		dev_err(queue->ctrl->ctrl.device,
344 			"queue %d: data digest flag is cleared\n",
345 		nvme_tcp_queue_id(queue));
346 		return -EPROTO;
347 	}
348 	crypto_ahash_init(queue->rcv_hash);
349 
350 	return 0;
351 }
352 
353 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
354 		struct request *rq, unsigned int hctx_idx)
355 {
356 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
357 
358 	page_frag_free(req->pdu);
359 }
360 
361 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
362 		struct request *rq, unsigned int hctx_idx,
363 		unsigned int numa_node)
364 {
365 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
366 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
367 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
368 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
369 	u8 hdgst = nvme_tcp_hdgst_len(queue);
370 
371 	req->pdu = page_frag_alloc(&queue->pf_cache,
372 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
373 		GFP_KERNEL | __GFP_ZERO);
374 	if (!req->pdu)
375 		return -ENOMEM;
376 
377 	req->queue = queue;
378 	nvme_req(rq)->ctrl = &ctrl->ctrl;
379 
380 	return 0;
381 }
382 
383 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
384 		unsigned int hctx_idx)
385 {
386 	struct nvme_tcp_ctrl *ctrl = data;
387 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
388 
389 	hctx->driver_data = queue;
390 	return 0;
391 }
392 
393 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
394 		unsigned int hctx_idx)
395 {
396 	struct nvme_tcp_ctrl *ctrl = data;
397 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
398 
399 	hctx->driver_data = queue;
400 	return 0;
401 }
402 
403 static enum nvme_tcp_recv_state
404 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
405 {
406 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
407 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
408 		NVME_TCP_RECV_DATA;
409 }
410 
411 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
412 {
413 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
414 				nvme_tcp_hdgst_len(queue);
415 	queue->pdu_offset = 0;
416 	queue->data_remaining = -1;
417 	queue->ddgst_remaining = 0;
418 }
419 
420 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
421 {
422 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
423 		return;
424 
425 	queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
426 }
427 
428 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
429 		struct nvme_completion *cqe)
430 {
431 	struct request *rq;
432 
433 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
434 	if (!rq) {
435 		dev_err(queue->ctrl->ctrl.device,
436 			"queue %d tag 0x%x not found\n",
437 			nvme_tcp_queue_id(queue), cqe->command_id);
438 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
439 		return -EINVAL;
440 	}
441 
442 	nvme_end_request(rq, cqe->status, cqe->result);
443 	queue->nr_cqe++;
444 
445 	return 0;
446 }
447 
448 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
449 		struct nvme_tcp_data_pdu *pdu)
450 {
451 	struct request *rq;
452 
453 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
454 	if (!rq) {
455 		dev_err(queue->ctrl->ctrl.device,
456 			"queue %d tag %#x not found\n",
457 			nvme_tcp_queue_id(queue), pdu->command_id);
458 		return -ENOENT;
459 	}
460 
461 	if (!blk_rq_payload_bytes(rq)) {
462 		dev_err(queue->ctrl->ctrl.device,
463 			"queue %d tag %#x unexpected data\n",
464 			nvme_tcp_queue_id(queue), rq->tag);
465 		return -EIO;
466 	}
467 
468 	queue->data_remaining = le32_to_cpu(pdu->data_length);
469 
470 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
471 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
472 		dev_err(queue->ctrl->ctrl.device,
473 			"queue %d tag %#x SUCCESS set but not last PDU\n",
474 			nvme_tcp_queue_id(queue), rq->tag);
475 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
476 		return -EPROTO;
477 	}
478 
479 	return 0;
480 }
481 
482 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
483 		struct nvme_tcp_rsp_pdu *pdu)
484 {
485 	struct nvme_completion *cqe = &pdu->cqe;
486 	int ret = 0;
487 
488 	/*
489 	 * AEN requests are special as they don't time out and can
490 	 * survive any kind of queue freeze and often don't respond to
491 	 * aborts.  We don't even bother to allocate a struct request
492 	 * for them but rather special case them here.
493 	 */
494 	if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
495 	    cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
496 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
497 				&cqe->result);
498 	else
499 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
500 
501 	return ret;
502 }
503 
504 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
505 		struct nvme_tcp_r2t_pdu *pdu)
506 {
507 	struct nvme_tcp_data_pdu *data = req->pdu;
508 	struct nvme_tcp_queue *queue = req->queue;
509 	struct request *rq = blk_mq_rq_from_pdu(req);
510 	u8 hdgst = nvme_tcp_hdgst_len(queue);
511 	u8 ddgst = nvme_tcp_ddgst_len(queue);
512 
513 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
514 	req->pdu_sent = 0;
515 
516 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
517 		dev_err(queue->ctrl->ctrl.device,
518 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
519 			rq->tag, req->pdu_len, req->data_len,
520 			req->data_sent);
521 		return -EPROTO;
522 	}
523 
524 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
525 		dev_err(queue->ctrl->ctrl.device,
526 			"req %d unexpected r2t offset %u (expected %zu)\n",
527 			rq->tag, le32_to_cpu(pdu->r2t_offset),
528 			req->data_sent);
529 		return -EPROTO;
530 	}
531 
532 	memset(data, 0, sizeof(*data));
533 	data->hdr.type = nvme_tcp_h2c_data;
534 	data->hdr.flags = NVME_TCP_F_DATA_LAST;
535 	if (queue->hdr_digest)
536 		data->hdr.flags |= NVME_TCP_F_HDGST;
537 	if (queue->data_digest)
538 		data->hdr.flags |= NVME_TCP_F_DDGST;
539 	data->hdr.hlen = sizeof(*data);
540 	data->hdr.pdo = data->hdr.hlen + hdgst;
541 	data->hdr.plen =
542 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
543 	data->ttag = pdu->ttag;
544 	data->command_id = rq->tag;
545 	data->data_offset = cpu_to_le32(req->data_sent);
546 	data->data_length = cpu_to_le32(req->pdu_len);
547 	return 0;
548 }
549 
550 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
551 		struct nvme_tcp_r2t_pdu *pdu)
552 {
553 	struct nvme_tcp_request *req;
554 	struct request *rq;
555 	int ret;
556 
557 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
558 	if (!rq) {
559 		dev_err(queue->ctrl->ctrl.device,
560 			"queue %d tag %#x not found\n",
561 			nvme_tcp_queue_id(queue), pdu->command_id);
562 		return -ENOENT;
563 	}
564 	req = blk_mq_rq_to_pdu(rq);
565 
566 	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
567 	if (unlikely(ret))
568 		return ret;
569 
570 	req->state = NVME_TCP_SEND_H2C_PDU;
571 	req->offset = 0;
572 
573 	nvme_tcp_queue_request(req);
574 
575 	return 0;
576 }
577 
578 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
579 		unsigned int *offset, size_t *len)
580 {
581 	struct nvme_tcp_hdr *hdr;
582 	char *pdu = queue->pdu;
583 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
584 	int ret;
585 
586 	ret = skb_copy_bits(skb, *offset,
587 		&pdu[queue->pdu_offset], rcv_len);
588 	if (unlikely(ret))
589 		return ret;
590 
591 	queue->pdu_remaining -= rcv_len;
592 	queue->pdu_offset += rcv_len;
593 	*offset += rcv_len;
594 	*len -= rcv_len;
595 	if (queue->pdu_remaining)
596 		return 0;
597 
598 	hdr = queue->pdu;
599 	if (queue->hdr_digest) {
600 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
601 		if (unlikely(ret))
602 			return ret;
603 	}
604 
605 
606 	if (queue->data_digest) {
607 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
608 		if (unlikely(ret))
609 			return ret;
610 	}
611 
612 	switch (hdr->type) {
613 	case nvme_tcp_c2h_data:
614 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
615 	case nvme_tcp_rsp:
616 		nvme_tcp_init_recv_ctx(queue);
617 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
618 	case nvme_tcp_r2t:
619 		nvme_tcp_init_recv_ctx(queue);
620 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
621 	default:
622 		dev_err(queue->ctrl->ctrl.device,
623 			"unsupported pdu type (%d)\n", hdr->type);
624 		return -EINVAL;
625 	}
626 }
627 
628 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
629 {
630 	union nvme_result res = {};
631 
632 	nvme_end_request(rq, cpu_to_le16(status << 1), res);
633 }
634 
635 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
636 			      unsigned int *offset, size_t *len)
637 {
638 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
639 	struct nvme_tcp_request *req;
640 	struct request *rq;
641 
642 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
643 	if (!rq) {
644 		dev_err(queue->ctrl->ctrl.device,
645 			"queue %d tag %#x not found\n",
646 			nvme_tcp_queue_id(queue), pdu->command_id);
647 		return -ENOENT;
648 	}
649 	req = blk_mq_rq_to_pdu(rq);
650 
651 	while (true) {
652 		int recv_len, ret;
653 
654 		recv_len = min_t(size_t, *len, queue->data_remaining);
655 		if (!recv_len)
656 			break;
657 
658 		if (!iov_iter_count(&req->iter)) {
659 			req->curr_bio = req->curr_bio->bi_next;
660 
661 			/*
662 			 * If we don`t have any bios it means that controller
663 			 * sent more data than we requested, hence error
664 			 */
665 			if (!req->curr_bio) {
666 				dev_err(queue->ctrl->ctrl.device,
667 					"queue %d no space in request %#x",
668 					nvme_tcp_queue_id(queue), rq->tag);
669 				nvme_tcp_init_recv_ctx(queue);
670 				return -EIO;
671 			}
672 			nvme_tcp_init_iter(req, READ);
673 		}
674 
675 		/* we can read only from what is left in this bio */
676 		recv_len = min_t(size_t, recv_len,
677 				iov_iter_count(&req->iter));
678 
679 		if (queue->data_digest)
680 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
681 				&req->iter, recv_len, queue->rcv_hash);
682 		else
683 			ret = skb_copy_datagram_iter(skb, *offset,
684 					&req->iter, recv_len);
685 		if (ret) {
686 			dev_err(queue->ctrl->ctrl.device,
687 				"queue %d failed to copy request %#x data",
688 				nvme_tcp_queue_id(queue), rq->tag);
689 			return ret;
690 		}
691 
692 		*len -= recv_len;
693 		*offset += recv_len;
694 		queue->data_remaining -= recv_len;
695 	}
696 
697 	if (!queue->data_remaining) {
698 		if (queue->data_digest) {
699 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
700 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
701 		} else {
702 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
703 				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
704 				queue->nr_cqe++;
705 			}
706 			nvme_tcp_init_recv_ctx(queue);
707 		}
708 	}
709 
710 	return 0;
711 }
712 
713 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
714 		struct sk_buff *skb, unsigned int *offset, size_t *len)
715 {
716 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
717 	char *ddgst = (char *)&queue->recv_ddgst;
718 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
719 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
720 	int ret;
721 
722 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
723 	if (unlikely(ret))
724 		return ret;
725 
726 	queue->ddgst_remaining -= recv_len;
727 	*offset += recv_len;
728 	*len -= recv_len;
729 	if (queue->ddgst_remaining)
730 		return 0;
731 
732 	if (queue->recv_ddgst != queue->exp_ddgst) {
733 		dev_err(queue->ctrl->ctrl.device,
734 			"data digest error: recv %#x expected %#x\n",
735 			le32_to_cpu(queue->recv_ddgst),
736 			le32_to_cpu(queue->exp_ddgst));
737 		return -EIO;
738 	}
739 
740 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
741 		struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
742 						pdu->command_id);
743 
744 		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
745 		queue->nr_cqe++;
746 	}
747 
748 	nvme_tcp_init_recv_ctx(queue);
749 	return 0;
750 }
751 
752 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
753 			     unsigned int offset, size_t len)
754 {
755 	struct nvme_tcp_queue *queue = desc->arg.data;
756 	size_t consumed = len;
757 	int result;
758 
759 	while (len) {
760 		switch (nvme_tcp_recv_state(queue)) {
761 		case NVME_TCP_RECV_PDU:
762 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
763 			break;
764 		case NVME_TCP_RECV_DATA:
765 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
766 			break;
767 		case NVME_TCP_RECV_DDGST:
768 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
769 			break;
770 		default:
771 			result = -EFAULT;
772 		}
773 		if (result) {
774 			dev_err(queue->ctrl->ctrl.device,
775 				"receive failed:  %d\n", result);
776 			queue->rd_enabled = false;
777 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
778 			return result;
779 		}
780 	}
781 
782 	return consumed;
783 }
784 
785 static void nvme_tcp_data_ready(struct sock *sk)
786 {
787 	struct nvme_tcp_queue *queue;
788 
789 	read_lock(&sk->sk_callback_lock);
790 	queue = sk->sk_user_data;
791 	if (likely(queue && queue->rd_enabled))
792 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
793 	read_unlock(&sk->sk_callback_lock);
794 }
795 
796 static void nvme_tcp_write_space(struct sock *sk)
797 {
798 	struct nvme_tcp_queue *queue;
799 
800 	read_lock_bh(&sk->sk_callback_lock);
801 	queue = sk->sk_user_data;
802 	if (likely(queue && sk_stream_is_writeable(sk))) {
803 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
804 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
805 	}
806 	read_unlock_bh(&sk->sk_callback_lock);
807 }
808 
809 static void nvme_tcp_state_change(struct sock *sk)
810 {
811 	struct nvme_tcp_queue *queue;
812 
813 	read_lock(&sk->sk_callback_lock);
814 	queue = sk->sk_user_data;
815 	if (!queue)
816 		goto done;
817 
818 	switch (sk->sk_state) {
819 	case TCP_CLOSE:
820 	case TCP_CLOSE_WAIT:
821 	case TCP_LAST_ACK:
822 	case TCP_FIN_WAIT1:
823 	case TCP_FIN_WAIT2:
824 		/* fallthrough */
825 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
826 		break;
827 	default:
828 		dev_info(queue->ctrl->ctrl.device,
829 			"queue %d socket state %d\n",
830 			nvme_tcp_queue_id(queue), sk->sk_state);
831 	}
832 
833 	queue->state_change(sk);
834 done:
835 	read_unlock(&sk->sk_callback_lock);
836 }
837 
838 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
839 {
840 	queue->request = NULL;
841 }
842 
843 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
844 {
845 	nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
846 }
847 
848 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
849 {
850 	struct nvme_tcp_queue *queue = req->queue;
851 
852 	while (true) {
853 		struct page *page = nvme_tcp_req_cur_page(req);
854 		size_t offset = nvme_tcp_req_cur_offset(req);
855 		size_t len = nvme_tcp_req_cur_length(req);
856 		bool last = nvme_tcp_pdu_last_send(req, len);
857 		int ret, flags = MSG_DONTWAIT;
858 
859 		if (last && !queue->data_digest)
860 			flags |= MSG_EOR;
861 		else
862 			flags |= MSG_MORE;
863 
864 		/* can't zcopy slab pages */
865 		if (unlikely(PageSlab(page))) {
866 			ret = sock_no_sendpage(queue->sock, page, offset, len,
867 					flags);
868 		} else {
869 			ret = kernel_sendpage(queue->sock, page, offset, len,
870 					flags);
871 		}
872 		if (ret <= 0)
873 			return ret;
874 
875 		nvme_tcp_advance_req(req, ret);
876 		if (queue->data_digest)
877 			nvme_tcp_ddgst_update(queue->snd_hash, page,
878 					offset, ret);
879 
880 		/* fully successful last write*/
881 		if (last && ret == len) {
882 			if (queue->data_digest) {
883 				nvme_tcp_ddgst_final(queue->snd_hash,
884 					&req->ddgst);
885 				req->state = NVME_TCP_SEND_DDGST;
886 				req->offset = 0;
887 			} else {
888 				nvme_tcp_done_send_req(queue);
889 			}
890 			return 1;
891 		}
892 	}
893 	return -EAGAIN;
894 }
895 
896 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
897 {
898 	struct nvme_tcp_queue *queue = req->queue;
899 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
900 	bool inline_data = nvme_tcp_has_inline_data(req);
901 	int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
902 	u8 hdgst = nvme_tcp_hdgst_len(queue);
903 	int len = sizeof(*pdu) + hdgst - req->offset;
904 	int ret;
905 
906 	if (queue->hdr_digest && !req->offset)
907 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
908 
909 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
910 			offset_in_page(pdu) + req->offset, len,  flags);
911 	if (unlikely(ret <= 0))
912 		return ret;
913 
914 	len -= ret;
915 	if (!len) {
916 		if (inline_data) {
917 			req->state = NVME_TCP_SEND_DATA;
918 			if (queue->data_digest)
919 				crypto_ahash_init(queue->snd_hash);
920 			nvme_tcp_init_iter(req, WRITE);
921 		} else {
922 			nvme_tcp_done_send_req(queue);
923 		}
924 		return 1;
925 	}
926 	req->offset += ret;
927 
928 	return -EAGAIN;
929 }
930 
931 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
932 {
933 	struct nvme_tcp_queue *queue = req->queue;
934 	struct nvme_tcp_data_pdu *pdu = req->pdu;
935 	u8 hdgst = nvme_tcp_hdgst_len(queue);
936 	int len = sizeof(*pdu) - req->offset + hdgst;
937 	int ret;
938 
939 	if (queue->hdr_digest && !req->offset)
940 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
941 
942 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
943 			offset_in_page(pdu) + req->offset, len,
944 			MSG_DONTWAIT | MSG_MORE);
945 	if (unlikely(ret <= 0))
946 		return ret;
947 
948 	len -= ret;
949 	if (!len) {
950 		req->state = NVME_TCP_SEND_DATA;
951 		if (queue->data_digest)
952 			crypto_ahash_init(queue->snd_hash);
953 		if (!req->data_sent)
954 			nvme_tcp_init_iter(req, WRITE);
955 		return 1;
956 	}
957 	req->offset += ret;
958 
959 	return -EAGAIN;
960 }
961 
962 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
963 {
964 	struct nvme_tcp_queue *queue = req->queue;
965 	int ret;
966 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
967 	struct kvec iov = {
968 		.iov_base = &req->ddgst + req->offset,
969 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
970 	};
971 
972 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
973 	if (unlikely(ret <= 0))
974 		return ret;
975 
976 	if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
977 		nvme_tcp_done_send_req(queue);
978 		return 1;
979 	}
980 
981 	req->offset += ret;
982 	return -EAGAIN;
983 }
984 
985 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
986 {
987 	struct nvme_tcp_request *req;
988 	int ret = 1;
989 
990 	if (!queue->request) {
991 		queue->request = nvme_tcp_fetch_request(queue);
992 		if (!queue->request)
993 			return 0;
994 	}
995 	req = queue->request;
996 
997 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
998 		ret = nvme_tcp_try_send_cmd_pdu(req);
999 		if (ret <= 0)
1000 			goto done;
1001 		if (!nvme_tcp_has_inline_data(req))
1002 			return ret;
1003 	}
1004 
1005 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
1006 		ret = nvme_tcp_try_send_data_pdu(req);
1007 		if (ret <= 0)
1008 			goto done;
1009 	}
1010 
1011 	if (req->state == NVME_TCP_SEND_DATA) {
1012 		ret = nvme_tcp_try_send_data(req);
1013 		if (ret <= 0)
1014 			goto done;
1015 	}
1016 
1017 	if (req->state == NVME_TCP_SEND_DDGST)
1018 		ret = nvme_tcp_try_send_ddgst(req);
1019 done:
1020 	if (ret == -EAGAIN)
1021 		ret = 0;
1022 	return ret;
1023 }
1024 
1025 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1026 {
1027 	struct socket *sock = queue->sock;
1028 	struct sock *sk = sock->sk;
1029 	read_descriptor_t rd_desc;
1030 	int consumed;
1031 
1032 	rd_desc.arg.data = queue;
1033 	rd_desc.count = 1;
1034 	lock_sock(sk);
1035 	queue->nr_cqe = 0;
1036 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1037 	release_sock(sk);
1038 	return consumed;
1039 }
1040 
1041 static void nvme_tcp_io_work(struct work_struct *w)
1042 {
1043 	struct nvme_tcp_queue *queue =
1044 		container_of(w, struct nvme_tcp_queue, io_work);
1045 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
1046 
1047 	do {
1048 		bool pending = false;
1049 		int result;
1050 
1051 		result = nvme_tcp_try_send(queue);
1052 		if (result > 0) {
1053 			pending = true;
1054 		} else if (unlikely(result < 0)) {
1055 			dev_err(queue->ctrl->ctrl.device,
1056 				"failed to send request %d\n", result);
1057 			if (result != -EPIPE)
1058 				nvme_tcp_fail_request(queue->request);
1059 			nvme_tcp_done_send_req(queue);
1060 			return;
1061 		}
1062 
1063 		result = nvme_tcp_try_recv(queue);
1064 		if (result > 0)
1065 			pending = true;
1066 
1067 		if (!pending)
1068 			return;
1069 
1070 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
1071 
1072 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1073 }
1074 
1075 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1076 {
1077 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1078 
1079 	ahash_request_free(queue->rcv_hash);
1080 	ahash_request_free(queue->snd_hash);
1081 	crypto_free_ahash(tfm);
1082 }
1083 
1084 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1085 {
1086 	struct crypto_ahash *tfm;
1087 
1088 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1089 	if (IS_ERR(tfm))
1090 		return PTR_ERR(tfm);
1091 
1092 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1093 	if (!queue->snd_hash)
1094 		goto free_tfm;
1095 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1096 
1097 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1098 	if (!queue->rcv_hash)
1099 		goto free_snd_hash;
1100 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1101 
1102 	return 0;
1103 free_snd_hash:
1104 	ahash_request_free(queue->snd_hash);
1105 free_tfm:
1106 	crypto_free_ahash(tfm);
1107 	return -ENOMEM;
1108 }
1109 
1110 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1111 {
1112 	struct nvme_tcp_request *async = &ctrl->async_req;
1113 
1114 	page_frag_free(async->pdu);
1115 }
1116 
1117 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1118 {
1119 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
1120 	struct nvme_tcp_request *async = &ctrl->async_req;
1121 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1122 
1123 	async->pdu = page_frag_alloc(&queue->pf_cache,
1124 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1125 		GFP_KERNEL | __GFP_ZERO);
1126 	if (!async->pdu)
1127 		return -ENOMEM;
1128 
1129 	async->queue = &ctrl->queues[0];
1130 	return 0;
1131 }
1132 
1133 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1134 {
1135 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1136 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1137 
1138 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1139 		return;
1140 
1141 	if (queue->hdr_digest || queue->data_digest)
1142 		nvme_tcp_free_crypto(queue);
1143 
1144 	sock_release(queue->sock);
1145 	kfree(queue->pdu);
1146 }
1147 
1148 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1149 {
1150 	struct nvme_tcp_icreq_pdu *icreq;
1151 	struct nvme_tcp_icresp_pdu *icresp;
1152 	struct msghdr msg = {};
1153 	struct kvec iov;
1154 	bool ctrl_hdgst, ctrl_ddgst;
1155 	int ret;
1156 
1157 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1158 	if (!icreq)
1159 		return -ENOMEM;
1160 
1161 	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1162 	if (!icresp) {
1163 		ret = -ENOMEM;
1164 		goto free_icreq;
1165 	}
1166 
1167 	icreq->hdr.type = nvme_tcp_icreq;
1168 	icreq->hdr.hlen = sizeof(*icreq);
1169 	icreq->hdr.pdo = 0;
1170 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1171 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1172 	icreq->maxr2t = 0; /* single inflight r2t supported */
1173 	icreq->hpda = 0; /* no alignment constraint */
1174 	if (queue->hdr_digest)
1175 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1176 	if (queue->data_digest)
1177 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1178 
1179 	iov.iov_base = icreq;
1180 	iov.iov_len = sizeof(*icreq);
1181 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1182 	if (ret < 0)
1183 		goto free_icresp;
1184 
1185 	memset(&msg, 0, sizeof(msg));
1186 	iov.iov_base = icresp;
1187 	iov.iov_len = sizeof(*icresp);
1188 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1189 			iov.iov_len, msg.msg_flags);
1190 	if (ret < 0)
1191 		goto free_icresp;
1192 
1193 	ret = -EINVAL;
1194 	if (icresp->hdr.type != nvme_tcp_icresp) {
1195 		pr_err("queue %d: bad type returned %d\n",
1196 			nvme_tcp_queue_id(queue), icresp->hdr.type);
1197 		goto free_icresp;
1198 	}
1199 
1200 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1201 		pr_err("queue %d: bad pdu length returned %d\n",
1202 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
1203 		goto free_icresp;
1204 	}
1205 
1206 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
1207 		pr_err("queue %d: bad pfv returned %d\n",
1208 			nvme_tcp_queue_id(queue), icresp->pfv);
1209 		goto free_icresp;
1210 	}
1211 
1212 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1213 	if ((queue->data_digest && !ctrl_ddgst) ||
1214 	    (!queue->data_digest && ctrl_ddgst)) {
1215 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1216 			nvme_tcp_queue_id(queue),
1217 			queue->data_digest ? "enabled" : "disabled",
1218 			ctrl_ddgst ? "enabled" : "disabled");
1219 		goto free_icresp;
1220 	}
1221 
1222 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1223 	if ((queue->hdr_digest && !ctrl_hdgst) ||
1224 	    (!queue->hdr_digest && ctrl_hdgst)) {
1225 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1226 			nvme_tcp_queue_id(queue),
1227 			queue->hdr_digest ? "enabled" : "disabled",
1228 			ctrl_hdgst ? "enabled" : "disabled");
1229 		goto free_icresp;
1230 	}
1231 
1232 	if (icresp->cpda != 0) {
1233 		pr_err("queue %d: unsupported cpda returned %d\n",
1234 			nvme_tcp_queue_id(queue), icresp->cpda);
1235 		goto free_icresp;
1236 	}
1237 
1238 	ret = 0;
1239 free_icresp:
1240 	kfree(icresp);
1241 free_icreq:
1242 	kfree(icreq);
1243 	return ret;
1244 }
1245 
1246 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1247 		int qid, size_t queue_size)
1248 {
1249 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1250 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1251 	struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1252 	int ret, opt, rcv_pdu_size, n;
1253 
1254 	queue->ctrl = ctrl;
1255 	INIT_LIST_HEAD(&queue->send_list);
1256 	spin_lock_init(&queue->lock);
1257 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1258 	queue->queue_size = queue_size;
1259 
1260 	if (qid > 0)
1261 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1262 	else
1263 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1264 						NVME_TCP_ADMIN_CCSZ;
1265 
1266 	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1267 			IPPROTO_TCP, &queue->sock);
1268 	if (ret) {
1269 		dev_err(nctrl->device,
1270 			"failed to create socket: %d\n", ret);
1271 		return ret;
1272 	}
1273 
1274 	/* Single syn retry */
1275 	opt = 1;
1276 	ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1277 			(char *)&opt, sizeof(opt));
1278 	if (ret) {
1279 		dev_err(nctrl->device,
1280 			"failed to set TCP_SYNCNT sock opt %d\n", ret);
1281 		goto err_sock;
1282 	}
1283 
1284 	/* Set TCP no delay */
1285 	opt = 1;
1286 	ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1287 			TCP_NODELAY, (char *)&opt, sizeof(opt));
1288 	if (ret) {
1289 		dev_err(nctrl->device,
1290 			"failed to set TCP_NODELAY sock opt %d\n", ret);
1291 		goto err_sock;
1292 	}
1293 
1294 	/*
1295 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
1296 	 * close. This is done to prevent stale data from being sent should
1297 	 * the network connection be restored before TCP times out.
1298 	 */
1299 	ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1300 			(char *)&sol, sizeof(sol));
1301 	if (ret) {
1302 		dev_err(nctrl->device,
1303 			"failed to set SO_LINGER sock opt %d\n", ret);
1304 		goto err_sock;
1305 	}
1306 
1307 	/* Set socket type of service */
1308 	if (nctrl->opts->tos >= 0) {
1309 		opt = nctrl->opts->tos;
1310 		ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
1311 				(char *)&opt, sizeof(opt));
1312 		if (ret) {
1313 			dev_err(nctrl->device,
1314 				"failed to set IP_TOS sock opt %d\n", ret);
1315 			goto err_sock;
1316 		}
1317 	}
1318 
1319 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
1320 	if (!qid)
1321 		n = 0;
1322 	else
1323 		n = (qid - 1) % num_online_cpus();
1324 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1325 	queue->request = NULL;
1326 	queue->data_remaining = 0;
1327 	queue->ddgst_remaining = 0;
1328 	queue->pdu_remaining = 0;
1329 	queue->pdu_offset = 0;
1330 	sk_set_memalloc(queue->sock->sk);
1331 
1332 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1333 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1334 			sizeof(ctrl->src_addr));
1335 		if (ret) {
1336 			dev_err(nctrl->device,
1337 				"failed to bind queue %d socket %d\n",
1338 				qid, ret);
1339 			goto err_sock;
1340 		}
1341 	}
1342 
1343 	queue->hdr_digest = nctrl->opts->hdr_digest;
1344 	queue->data_digest = nctrl->opts->data_digest;
1345 	if (queue->hdr_digest || queue->data_digest) {
1346 		ret = nvme_tcp_alloc_crypto(queue);
1347 		if (ret) {
1348 			dev_err(nctrl->device,
1349 				"failed to allocate queue %d crypto\n", qid);
1350 			goto err_sock;
1351 		}
1352 	}
1353 
1354 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1355 			nvme_tcp_hdgst_len(queue);
1356 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1357 	if (!queue->pdu) {
1358 		ret = -ENOMEM;
1359 		goto err_crypto;
1360 	}
1361 
1362 	dev_dbg(nctrl->device, "connecting queue %d\n",
1363 			nvme_tcp_queue_id(queue));
1364 
1365 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1366 		sizeof(ctrl->addr), 0);
1367 	if (ret) {
1368 		dev_err(nctrl->device,
1369 			"failed to connect socket: %d\n", ret);
1370 		goto err_rcv_pdu;
1371 	}
1372 
1373 	ret = nvme_tcp_init_connection(queue);
1374 	if (ret)
1375 		goto err_init_connect;
1376 
1377 	queue->rd_enabled = true;
1378 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1379 	nvme_tcp_init_recv_ctx(queue);
1380 
1381 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
1382 	queue->sock->sk->sk_user_data = queue;
1383 	queue->state_change = queue->sock->sk->sk_state_change;
1384 	queue->data_ready = queue->sock->sk->sk_data_ready;
1385 	queue->write_space = queue->sock->sk->sk_write_space;
1386 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1387 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1388 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1389 	queue->sock->sk->sk_ll_usec = 1;
1390 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1391 
1392 	return 0;
1393 
1394 err_init_connect:
1395 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1396 err_rcv_pdu:
1397 	kfree(queue->pdu);
1398 err_crypto:
1399 	if (queue->hdr_digest || queue->data_digest)
1400 		nvme_tcp_free_crypto(queue);
1401 err_sock:
1402 	sock_release(queue->sock);
1403 	queue->sock = NULL;
1404 	return ret;
1405 }
1406 
1407 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1408 {
1409 	struct socket *sock = queue->sock;
1410 
1411 	write_lock_bh(&sock->sk->sk_callback_lock);
1412 	sock->sk->sk_user_data  = NULL;
1413 	sock->sk->sk_data_ready = queue->data_ready;
1414 	sock->sk->sk_state_change = queue->state_change;
1415 	sock->sk->sk_write_space  = queue->write_space;
1416 	write_unlock_bh(&sock->sk->sk_callback_lock);
1417 }
1418 
1419 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1420 {
1421 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1422 	nvme_tcp_restore_sock_calls(queue);
1423 	cancel_work_sync(&queue->io_work);
1424 }
1425 
1426 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1427 {
1428 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1429 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1430 
1431 	if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1432 		return;
1433 
1434 	__nvme_tcp_stop_queue(queue);
1435 }
1436 
1437 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1438 {
1439 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1440 	int ret;
1441 
1442 	if (idx)
1443 		ret = nvmf_connect_io_queue(nctrl, idx, false);
1444 	else
1445 		ret = nvmf_connect_admin_queue(nctrl);
1446 
1447 	if (!ret) {
1448 		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1449 	} else {
1450 		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1451 			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
1452 		dev_err(nctrl->device,
1453 			"failed to connect queue: %d ret=%d\n", idx, ret);
1454 	}
1455 	return ret;
1456 }
1457 
1458 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1459 		bool admin)
1460 {
1461 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1462 	struct blk_mq_tag_set *set;
1463 	int ret;
1464 
1465 	if (admin) {
1466 		set = &ctrl->admin_tag_set;
1467 		memset(set, 0, sizeof(*set));
1468 		set->ops = &nvme_tcp_admin_mq_ops;
1469 		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1470 		set->reserved_tags = 2; /* connect + keep-alive */
1471 		set->numa_node = NUMA_NO_NODE;
1472 		set->cmd_size = sizeof(struct nvme_tcp_request);
1473 		set->driver_data = ctrl;
1474 		set->nr_hw_queues = 1;
1475 		set->timeout = ADMIN_TIMEOUT;
1476 	} else {
1477 		set = &ctrl->tag_set;
1478 		memset(set, 0, sizeof(*set));
1479 		set->ops = &nvme_tcp_mq_ops;
1480 		set->queue_depth = nctrl->sqsize + 1;
1481 		set->reserved_tags = 1; /* fabric connect */
1482 		set->numa_node = NUMA_NO_NODE;
1483 		set->flags = BLK_MQ_F_SHOULD_MERGE;
1484 		set->cmd_size = sizeof(struct nvme_tcp_request);
1485 		set->driver_data = ctrl;
1486 		set->nr_hw_queues = nctrl->queue_count - 1;
1487 		set->timeout = NVME_IO_TIMEOUT;
1488 		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1489 	}
1490 
1491 	ret = blk_mq_alloc_tag_set(set);
1492 	if (ret)
1493 		return ERR_PTR(ret);
1494 
1495 	return set;
1496 }
1497 
1498 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1499 {
1500 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1501 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1502 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1503 	}
1504 
1505 	nvme_tcp_free_queue(ctrl, 0);
1506 }
1507 
1508 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1509 {
1510 	int i;
1511 
1512 	for (i = 1; i < ctrl->queue_count; i++)
1513 		nvme_tcp_free_queue(ctrl, i);
1514 }
1515 
1516 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1517 {
1518 	int i;
1519 
1520 	for (i = 1; i < ctrl->queue_count; i++)
1521 		nvme_tcp_stop_queue(ctrl, i);
1522 }
1523 
1524 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1525 {
1526 	int i, ret = 0;
1527 
1528 	for (i = 1; i < ctrl->queue_count; i++) {
1529 		ret = nvme_tcp_start_queue(ctrl, i);
1530 		if (ret)
1531 			goto out_stop_queues;
1532 	}
1533 
1534 	return 0;
1535 
1536 out_stop_queues:
1537 	for (i--; i >= 1; i--)
1538 		nvme_tcp_stop_queue(ctrl, i);
1539 	return ret;
1540 }
1541 
1542 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1543 {
1544 	int ret;
1545 
1546 	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1547 	if (ret)
1548 		return ret;
1549 
1550 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1551 	if (ret)
1552 		goto out_free_queue;
1553 
1554 	return 0;
1555 
1556 out_free_queue:
1557 	nvme_tcp_free_queue(ctrl, 0);
1558 	return ret;
1559 }
1560 
1561 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1562 {
1563 	int i, ret;
1564 
1565 	for (i = 1; i < ctrl->queue_count; i++) {
1566 		ret = nvme_tcp_alloc_queue(ctrl, i,
1567 				ctrl->sqsize + 1);
1568 		if (ret)
1569 			goto out_free_queues;
1570 	}
1571 
1572 	return 0;
1573 
1574 out_free_queues:
1575 	for (i--; i >= 1; i--)
1576 		nvme_tcp_free_queue(ctrl, i);
1577 
1578 	return ret;
1579 }
1580 
1581 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1582 {
1583 	unsigned int nr_io_queues;
1584 
1585 	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1586 	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1587 	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1588 
1589 	return nr_io_queues;
1590 }
1591 
1592 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1593 		unsigned int nr_io_queues)
1594 {
1595 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1596 	struct nvmf_ctrl_options *opts = nctrl->opts;
1597 
1598 	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1599 		/*
1600 		 * separate read/write queues
1601 		 * hand out dedicated default queues only after we have
1602 		 * sufficient read queues.
1603 		 */
1604 		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1605 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1606 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1607 			min(opts->nr_write_queues, nr_io_queues);
1608 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1609 	} else {
1610 		/*
1611 		 * shared read/write queues
1612 		 * either no write queues were requested, or we don't have
1613 		 * sufficient queue count to have dedicated default queues.
1614 		 */
1615 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1616 			min(opts->nr_io_queues, nr_io_queues);
1617 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1618 	}
1619 
1620 	if (opts->nr_poll_queues && nr_io_queues) {
1621 		/* map dedicated poll queues only if we have queues left */
1622 		ctrl->io_queues[HCTX_TYPE_POLL] =
1623 			min(opts->nr_poll_queues, nr_io_queues);
1624 	}
1625 }
1626 
1627 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1628 {
1629 	unsigned int nr_io_queues;
1630 	int ret;
1631 
1632 	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1633 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1634 	if (ret)
1635 		return ret;
1636 
1637 	ctrl->queue_count = nr_io_queues + 1;
1638 	if (ctrl->queue_count < 2)
1639 		return 0;
1640 
1641 	dev_info(ctrl->device,
1642 		"creating %d I/O queues.\n", nr_io_queues);
1643 
1644 	nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1645 
1646 	return __nvme_tcp_alloc_io_queues(ctrl);
1647 }
1648 
1649 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1650 {
1651 	nvme_tcp_stop_io_queues(ctrl);
1652 	if (remove) {
1653 		blk_cleanup_queue(ctrl->connect_q);
1654 		blk_mq_free_tag_set(ctrl->tagset);
1655 	}
1656 	nvme_tcp_free_io_queues(ctrl);
1657 }
1658 
1659 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1660 {
1661 	int ret;
1662 
1663 	ret = nvme_tcp_alloc_io_queues(ctrl);
1664 	if (ret)
1665 		return ret;
1666 
1667 	if (new) {
1668 		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1669 		if (IS_ERR(ctrl->tagset)) {
1670 			ret = PTR_ERR(ctrl->tagset);
1671 			goto out_free_io_queues;
1672 		}
1673 
1674 		ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1675 		if (IS_ERR(ctrl->connect_q)) {
1676 			ret = PTR_ERR(ctrl->connect_q);
1677 			goto out_free_tag_set;
1678 		}
1679 	} else {
1680 		blk_mq_update_nr_hw_queues(ctrl->tagset,
1681 			ctrl->queue_count - 1);
1682 	}
1683 
1684 	ret = nvme_tcp_start_io_queues(ctrl);
1685 	if (ret)
1686 		goto out_cleanup_connect_q;
1687 
1688 	return 0;
1689 
1690 out_cleanup_connect_q:
1691 	if (new)
1692 		blk_cleanup_queue(ctrl->connect_q);
1693 out_free_tag_set:
1694 	if (new)
1695 		blk_mq_free_tag_set(ctrl->tagset);
1696 out_free_io_queues:
1697 	nvme_tcp_free_io_queues(ctrl);
1698 	return ret;
1699 }
1700 
1701 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1702 {
1703 	nvme_tcp_stop_queue(ctrl, 0);
1704 	if (remove) {
1705 		blk_cleanup_queue(ctrl->admin_q);
1706 		blk_cleanup_queue(ctrl->fabrics_q);
1707 		blk_mq_free_tag_set(ctrl->admin_tagset);
1708 	}
1709 	nvme_tcp_free_admin_queue(ctrl);
1710 }
1711 
1712 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1713 {
1714 	int error;
1715 
1716 	error = nvme_tcp_alloc_admin_queue(ctrl);
1717 	if (error)
1718 		return error;
1719 
1720 	if (new) {
1721 		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1722 		if (IS_ERR(ctrl->admin_tagset)) {
1723 			error = PTR_ERR(ctrl->admin_tagset);
1724 			goto out_free_queue;
1725 		}
1726 
1727 		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1728 		if (IS_ERR(ctrl->fabrics_q)) {
1729 			error = PTR_ERR(ctrl->fabrics_q);
1730 			goto out_free_tagset;
1731 		}
1732 
1733 		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1734 		if (IS_ERR(ctrl->admin_q)) {
1735 			error = PTR_ERR(ctrl->admin_q);
1736 			goto out_cleanup_fabrics_q;
1737 		}
1738 	}
1739 
1740 	error = nvme_tcp_start_queue(ctrl, 0);
1741 	if (error)
1742 		goto out_cleanup_queue;
1743 
1744 	error = nvme_enable_ctrl(ctrl);
1745 	if (error)
1746 		goto out_stop_queue;
1747 
1748 	blk_mq_unquiesce_queue(ctrl->admin_q);
1749 
1750 	error = nvme_init_identify(ctrl);
1751 	if (error)
1752 		goto out_stop_queue;
1753 
1754 	return 0;
1755 
1756 out_stop_queue:
1757 	nvme_tcp_stop_queue(ctrl, 0);
1758 out_cleanup_queue:
1759 	if (new)
1760 		blk_cleanup_queue(ctrl->admin_q);
1761 out_cleanup_fabrics_q:
1762 	if (new)
1763 		blk_cleanup_queue(ctrl->fabrics_q);
1764 out_free_tagset:
1765 	if (new)
1766 		blk_mq_free_tag_set(ctrl->admin_tagset);
1767 out_free_queue:
1768 	nvme_tcp_free_admin_queue(ctrl);
1769 	return error;
1770 }
1771 
1772 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1773 		bool remove)
1774 {
1775 	blk_mq_quiesce_queue(ctrl->admin_q);
1776 	nvme_tcp_stop_queue(ctrl, 0);
1777 	if (ctrl->admin_tagset) {
1778 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1779 			nvme_cancel_request, ctrl);
1780 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1781 	}
1782 	if (remove)
1783 		blk_mq_unquiesce_queue(ctrl->admin_q);
1784 	nvme_tcp_destroy_admin_queue(ctrl, remove);
1785 }
1786 
1787 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1788 		bool remove)
1789 {
1790 	if (ctrl->queue_count <= 1)
1791 		return;
1792 	nvme_stop_queues(ctrl);
1793 	nvme_tcp_stop_io_queues(ctrl);
1794 	if (ctrl->tagset) {
1795 		blk_mq_tagset_busy_iter(ctrl->tagset,
1796 			nvme_cancel_request, ctrl);
1797 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
1798 	}
1799 	if (remove)
1800 		nvme_start_queues(ctrl);
1801 	nvme_tcp_destroy_io_queues(ctrl, remove);
1802 }
1803 
1804 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1805 {
1806 	/* If we are resetting/deleting then do nothing */
1807 	if (ctrl->state != NVME_CTRL_CONNECTING) {
1808 		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1809 			ctrl->state == NVME_CTRL_LIVE);
1810 		return;
1811 	}
1812 
1813 	if (nvmf_should_reconnect(ctrl)) {
1814 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1815 			ctrl->opts->reconnect_delay);
1816 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1817 				ctrl->opts->reconnect_delay * HZ);
1818 	} else {
1819 		dev_info(ctrl->device, "Removing controller...\n");
1820 		nvme_delete_ctrl(ctrl);
1821 	}
1822 }
1823 
1824 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1825 {
1826 	struct nvmf_ctrl_options *opts = ctrl->opts;
1827 	int ret;
1828 
1829 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
1830 	if (ret)
1831 		return ret;
1832 
1833 	if (ctrl->icdoff) {
1834 		dev_err(ctrl->device, "icdoff is not supported!\n");
1835 		goto destroy_admin;
1836 	}
1837 
1838 	if (opts->queue_size > ctrl->sqsize + 1)
1839 		dev_warn(ctrl->device,
1840 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
1841 			opts->queue_size, ctrl->sqsize + 1);
1842 
1843 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1844 		dev_warn(ctrl->device,
1845 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
1846 			ctrl->sqsize + 1, ctrl->maxcmd);
1847 		ctrl->sqsize = ctrl->maxcmd - 1;
1848 	}
1849 
1850 	if (ctrl->queue_count > 1) {
1851 		ret = nvme_tcp_configure_io_queues(ctrl, new);
1852 		if (ret)
1853 			goto destroy_admin;
1854 	}
1855 
1856 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1857 		/* state change failure is ok if we're in DELETING state */
1858 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1859 		ret = -EINVAL;
1860 		goto destroy_io;
1861 	}
1862 
1863 	nvme_start_ctrl(ctrl);
1864 	return 0;
1865 
1866 destroy_io:
1867 	if (ctrl->queue_count > 1)
1868 		nvme_tcp_destroy_io_queues(ctrl, new);
1869 destroy_admin:
1870 	nvme_tcp_stop_queue(ctrl, 0);
1871 	nvme_tcp_destroy_admin_queue(ctrl, new);
1872 	return ret;
1873 }
1874 
1875 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1876 {
1877 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1878 			struct nvme_tcp_ctrl, connect_work);
1879 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1880 
1881 	++ctrl->nr_reconnects;
1882 
1883 	if (nvme_tcp_setup_ctrl(ctrl, false))
1884 		goto requeue;
1885 
1886 	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1887 			ctrl->nr_reconnects);
1888 
1889 	ctrl->nr_reconnects = 0;
1890 
1891 	return;
1892 
1893 requeue:
1894 	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1895 			ctrl->nr_reconnects);
1896 	nvme_tcp_reconnect_or_remove(ctrl);
1897 }
1898 
1899 static void nvme_tcp_error_recovery_work(struct work_struct *work)
1900 {
1901 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1902 				struct nvme_tcp_ctrl, err_work);
1903 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1904 
1905 	nvme_stop_keep_alive(ctrl);
1906 	nvme_tcp_teardown_io_queues(ctrl, false);
1907 	/* unquiesce to fail fast pending requests */
1908 	nvme_start_queues(ctrl);
1909 	nvme_tcp_teardown_admin_queue(ctrl, false);
1910 	blk_mq_unquiesce_queue(ctrl->admin_q);
1911 
1912 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1913 		/* state change failure is ok if we're in DELETING state */
1914 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1915 		return;
1916 	}
1917 
1918 	nvme_tcp_reconnect_or_remove(ctrl);
1919 }
1920 
1921 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1922 {
1923 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1924 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1925 
1926 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
1927 	blk_mq_quiesce_queue(ctrl->admin_q);
1928 	if (shutdown)
1929 		nvme_shutdown_ctrl(ctrl);
1930 	else
1931 		nvme_disable_ctrl(ctrl);
1932 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1933 }
1934 
1935 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1936 {
1937 	nvme_tcp_teardown_ctrl(ctrl, true);
1938 }
1939 
1940 static void nvme_reset_ctrl_work(struct work_struct *work)
1941 {
1942 	struct nvme_ctrl *ctrl =
1943 		container_of(work, struct nvme_ctrl, reset_work);
1944 
1945 	nvme_stop_ctrl(ctrl);
1946 	nvme_tcp_teardown_ctrl(ctrl, false);
1947 
1948 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1949 		/* state change failure is ok if we're in DELETING state */
1950 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1951 		return;
1952 	}
1953 
1954 	if (nvme_tcp_setup_ctrl(ctrl, false))
1955 		goto out_fail;
1956 
1957 	return;
1958 
1959 out_fail:
1960 	++ctrl->nr_reconnects;
1961 	nvme_tcp_reconnect_or_remove(ctrl);
1962 }
1963 
1964 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1965 {
1966 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1967 
1968 	if (list_empty(&ctrl->list))
1969 		goto free_ctrl;
1970 
1971 	mutex_lock(&nvme_tcp_ctrl_mutex);
1972 	list_del(&ctrl->list);
1973 	mutex_unlock(&nvme_tcp_ctrl_mutex);
1974 
1975 	nvmf_free_options(nctrl->opts);
1976 free_ctrl:
1977 	kfree(ctrl->queues);
1978 	kfree(ctrl);
1979 }
1980 
1981 static void nvme_tcp_set_sg_null(struct nvme_command *c)
1982 {
1983 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1984 
1985 	sg->addr = 0;
1986 	sg->length = 0;
1987 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1988 			NVME_SGL_FMT_TRANSPORT_A;
1989 }
1990 
1991 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1992 		struct nvme_command *c, u32 data_len)
1993 {
1994 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1995 
1996 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1997 	sg->length = cpu_to_le32(data_len);
1998 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1999 }
2000 
2001 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2002 		u32 data_len)
2003 {
2004 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2005 
2006 	sg->addr = 0;
2007 	sg->length = cpu_to_le32(data_len);
2008 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2009 			NVME_SGL_FMT_TRANSPORT_A;
2010 }
2011 
2012 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2013 {
2014 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2015 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
2016 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2017 	struct nvme_command *cmd = &pdu->cmd;
2018 	u8 hdgst = nvme_tcp_hdgst_len(queue);
2019 
2020 	memset(pdu, 0, sizeof(*pdu));
2021 	pdu->hdr.type = nvme_tcp_cmd;
2022 	if (queue->hdr_digest)
2023 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
2024 	pdu->hdr.hlen = sizeof(*pdu);
2025 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2026 
2027 	cmd->common.opcode = nvme_admin_async_event;
2028 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2029 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
2030 	nvme_tcp_set_sg_null(cmd);
2031 
2032 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2033 	ctrl->async_req.offset = 0;
2034 	ctrl->async_req.curr_bio = NULL;
2035 	ctrl->async_req.data_len = 0;
2036 
2037 	nvme_tcp_queue_request(&ctrl->async_req);
2038 }
2039 
2040 static enum blk_eh_timer_return
2041 nvme_tcp_timeout(struct request *rq, bool reserved)
2042 {
2043 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2044 	struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2045 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2046 
2047 	dev_warn(ctrl->ctrl.device,
2048 		"queue %d: timeout request %#x type %d\n",
2049 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2050 
2051 	if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2052 		/*
2053 		 * Teardown immediately if controller times out while starting
2054 		 * or we are already started error recovery. all outstanding
2055 		 * requests are completed on shutdown, so we return BLK_EH_DONE.
2056 		 */
2057 		flush_work(&ctrl->err_work);
2058 		nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2059 		nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2060 		return BLK_EH_DONE;
2061 	}
2062 
2063 	dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2064 	nvme_tcp_error_recovery(&ctrl->ctrl);
2065 
2066 	return BLK_EH_RESET_TIMER;
2067 }
2068 
2069 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2070 			struct request *rq)
2071 {
2072 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2073 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2074 	struct nvme_command *c = &pdu->cmd;
2075 
2076 	c->common.flags |= NVME_CMD_SGL_METABUF;
2077 
2078 	if (rq_data_dir(rq) == WRITE && req->data_len &&
2079 	    req->data_len <= nvme_tcp_inline_data_size(queue))
2080 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
2081 	else
2082 		nvme_tcp_set_sg_host_data(c, req->data_len);
2083 
2084 	return 0;
2085 }
2086 
2087 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2088 		struct request *rq)
2089 {
2090 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2091 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2092 	struct nvme_tcp_queue *queue = req->queue;
2093 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2094 	blk_status_t ret;
2095 
2096 	ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2097 	if (ret)
2098 		return ret;
2099 
2100 	req->state = NVME_TCP_SEND_CMD_PDU;
2101 	req->offset = 0;
2102 	req->data_sent = 0;
2103 	req->pdu_len = 0;
2104 	req->pdu_sent = 0;
2105 	req->data_len = blk_rq_payload_bytes(rq);
2106 	req->curr_bio = rq->bio;
2107 
2108 	if (rq_data_dir(rq) == WRITE &&
2109 	    req->data_len <= nvme_tcp_inline_data_size(queue))
2110 		req->pdu_len = req->data_len;
2111 	else if (req->curr_bio)
2112 		nvme_tcp_init_iter(req, READ);
2113 
2114 	pdu->hdr.type = nvme_tcp_cmd;
2115 	pdu->hdr.flags = 0;
2116 	if (queue->hdr_digest)
2117 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
2118 	if (queue->data_digest && req->pdu_len) {
2119 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
2120 		ddgst = nvme_tcp_ddgst_len(queue);
2121 	}
2122 	pdu->hdr.hlen = sizeof(*pdu);
2123 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2124 	pdu->hdr.plen =
2125 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2126 
2127 	ret = nvme_tcp_map_data(queue, rq);
2128 	if (unlikely(ret)) {
2129 		dev_err(queue->ctrl->ctrl.device,
2130 			"Failed to map data (%d)\n", ret);
2131 		return ret;
2132 	}
2133 
2134 	return 0;
2135 }
2136 
2137 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2138 		const struct blk_mq_queue_data *bd)
2139 {
2140 	struct nvme_ns *ns = hctx->queue->queuedata;
2141 	struct nvme_tcp_queue *queue = hctx->driver_data;
2142 	struct request *rq = bd->rq;
2143 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2144 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2145 	blk_status_t ret;
2146 
2147 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2148 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2149 
2150 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2151 	if (unlikely(ret))
2152 		return ret;
2153 
2154 	blk_mq_start_request(rq);
2155 
2156 	nvme_tcp_queue_request(req);
2157 
2158 	return BLK_STS_OK;
2159 }
2160 
2161 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2162 {
2163 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
2164 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2165 
2166 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2167 		/* separate read/write queues */
2168 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
2169 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2170 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2171 		set->map[HCTX_TYPE_READ].nr_queues =
2172 			ctrl->io_queues[HCTX_TYPE_READ];
2173 		set->map[HCTX_TYPE_READ].queue_offset =
2174 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2175 	} else {
2176 		/* shared read/write queues */
2177 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
2178 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2179 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2180 		set->map[HCTX_TYPE_READ].nr_queues =
2181 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2182 		set->map[HCTX_TYPE_READ].queue_offset = 0;
2183 	}
2184 	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2185 	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2186 
2187 	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2188 		/* map dedicated poll queues only if we have queues left */
2189 		set->map[HCTX_TYPE_POLL].nr_queues =
2190 				ctrl->io_queues[HCTX_TYPE_POLL];
2191 		set->map[HCTX_TYPE_POLL].queue_offset =
2192 			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2193 			ctrl->io_queues[HCTX_TYPE_READ];
2194 		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2195 	}
2196 
2197 	dev_info(ctrl->ctrl.device,
2198 		"mapped %d/%d/%d default/read/poll queues.\n",
2199 		ctrl->io_queues[HCTX_TYPE_DEFAULT],
2200 		ctrl->io_queues[HCTX_TYPE_READ],
2201 		ctrl->io_queues[HCTX_TYPE_POLL]);
2202 
2203 	return 0;
2204 }
2205 
2206 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2207 {
2208 	struct nvme_tcp_queue *queue = hctx->driver_data;
2209 	struct sock *sk = queue->sock->sk;
2210 
2211 	if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
2212 		sk_busy_loop(sk, true);
2213 	nvme_tcp_try_recv(queue);
2214 	return queue->nr_cqe;
2215 }
2216 
2217 static struct blk_mq_ops nvme_tcp_mq_ops = {
2218 	.queue_rq	= nvme_tcp_queue_rq,
2219 	.complete	= nvme_complete_rq,
2220 	.init_request	= nvme_tcp_init_request,
2221 	.exit_request	= nvme_tcp_exit_request,
2222 	.init_hctx	= nvme_tcp_init_hctx,
2223 	.timeout	= nvme_tcp_timeout,
2224 	.map_queues	= nvme_tcp_map_queues,
2225 	.poll		= nvme_tcp_poll,
2226 };
2227 
2228 static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2229 	.queue_rq	= nvme_tcp_queue_rq,
2230 	.complete	= nvme_complete_rq,
2231 	.init_request	= nvme_tcp_init_request,
2232 	.exit_request	= nvme_tcp_exit_request,
2233 	.init_hctx	= nvme_tcp_init_admin_hctx,
2234 	.timeout	= nvme_tcp_timeout,
2235 };
2236 
2237 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2238 	.name			= "tcp",
2239 	.module			= THIS_MODULE,
2240 	.flags			= NVME_F_FABRICS,
2241 	.reg_read32		= nvmf_reg_read32,
2242 	.reg_read64		= nvmf_reg_read64,
2243 	.reg_write32		= nvmf_reg_write32,
2244 	.free_ctrl		= nvme_tcp_free_ctrl,
2245 	.submit_async_event	= nvme_tcp_submit_async_event,
2246 	.delete_ctrl		= nvme_tcp_delete_ctrl,
2247 	.get_address		= nvmf_get_address,
2248 };
2249 
2250 static bool
2251 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2252 {
2253 	struct nvme_tcp_ctrl *ctrl;
2254 	bool found = false;
2255 
2256 	mutex_lock(&nvme_tcp_ctrl_mutex);
2257 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2258 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2259 		if (found)
2260 			break;
2261 	}
2262 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2263 
2264 	return found;
2265 }
2266 
2267 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2268 		struct nvmf_ctrl_options *opts)
2269 {
2270 	struct nvme_tcp_ctrl *ctrl;
2271 	int ret;
2272 
2273 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2274 	if (!ctrl)
2275 		return ERR_PTR(-ENOMEM);
2276 
2277 	INIT_LIST_HEAD(&ctrl->list);
2278 	ctrl->ctrl.opts = opts;
2279 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2280 				opts->nr_poll_queues + 1;
2281 	ctrl->ctrl.sqsize = opts->queue_size - 1;
2282 	ctrl->ctrl.kato = opts->kato;
2283 
2284 	INIT_DELAYED_WORK(&ctrl->connect_work,
2285 			nvme_tcp_reconnect_ctrl_work);
2286 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2287 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2288 
2289 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2290 		opts->trsvcid =
2291 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2292 		if (!opts->trsvcid) {
2293 			ret = -ENOMEM;
2294 			goto out_free_ctrl;
2295 		}
2296 		opts->mask |= NVMF_OPT_TRSVCID;
2297 	}
2298 
2299 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2300 			opts->traddr, opts->trsvcid, &ctrl->addr);
2301 	if (ret) {
2302 		pr_err("malformed address passed: %s:%s\n",
2303 			opts->traddr, opts->trsvcid);
2304 		goto out_free_ctrl;
2305 	}
2306 
2307 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2308 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2309 			opts->host_traddr, NULL, &ctrl->src_addr);
2310 		if (ret) {
2311 			pr_err("malformed src address passed: %s\n",
2312 			       opts->host_traddr);
2313 			goto out_free_ctrl;
2314 		}
2315 	}
2316 
2317 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2318 		ret = -EALREADY;
2319 		goto out_free_ctrl;
2320 	}
2321 
2322 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2323 				GFP_KERNEL);
2324 	if (!ctrl->queues) {
2325 		ret = -ENOMEM;
2326 		goto out_free_ctrl;
2327 	}
2328 
2329 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2330 	if (ret)
2331 		goto out_kfree_queues;
2332 
2333 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2334 		WARN_ON_ONCE(1);
2335 		ret = -EINTR;
2336 		goto out_uninit_ctrl;
2337 	}
2338 
2339 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2340 	if (ret)
2341 		goto out_uninit_ctrl;
2342 
2343 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2344 		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2345 
2346 	nvme_get_ctrl(&ctrl->ctrl);
2347 
2348 	mutex_lock(&nvme_tcp_ctrl_mutex);
2349 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2350 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2351 
2352 	return &ctrl->ctrl;
2353 
2354 out_uninit_ctrl:
2355 	nvme_uninit_ctrl(&ctrl->ctrl);
2356 	nvme_put_ctrl(&ctrl->ctrl);
2357 	if (ret > 0)
2358 		ret = -EIO;
2359 	return ERR_PTR(ret);
2360 out_kfree_queues:
2361 	kfree(ctrl->queues);
2362 out_free_ctrl:
2363 	kfree(ctrl);
2364 	return ERR_PTR(ret);
2365 }
2366 
2367 static struct nvmf_transport_ops nvme_tcp_transport = {
2368 	.name		= "tcp",
2369 	.module		= THIS_MODULE,
2370 	.required_opts	= NVMF_OPT_TRADDR,
2371 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2372 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2373 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2374 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2375 			  NVMF_OPT_TOS,
2376 	.create_ctrl	= nvme_tcp_create_ctrl,
2377 };
2378 
2379 static int __init nvme_tcp_init_module(void)
2380 {
2381 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2382 			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2383 	if (!nvme_tcp_wq)
2384 		return -ENOMEM;
2385 
2386 	nvmf_register_transport(&nvme_tcp_transport);
2387 	return 0;
2388 }
2389 
2390 static void __exit nvme_tcp_cleanup_module(void)
2391 {
2392 	struct nvme_tcp_ctrl *ctrl;
2393 
2394 	nvmf_unregister_transport(&nvme_tcp_transport);
2395 
2396 	mutex_lock(&nvme_tcp_ctrl_mutex);
2397 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2398 		nvme_delete_ctrl(&ctrl->ctrl);
2399 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2400 	flush_workqueue(nvme_delete_wq);
2401 
2402 	destroy_workqueue(nvme_tcp_wq);
2403 }
2404 
2405 module_init(nvme_tcp_init_module);
2406 module_exit(nvme_tcp_cleanup_module);
2407 
2408 MODULE_LICENSE("GPL v2");
2409