xref: /openbmc/linux/drivers/nvme/host/tcp.c (revision 462b8b2d)
13f2304f8SSagi Grimberg // SPDX-License-Identifier: GPL-2.0
23f2304f8SSagi Grimberg /*
33f2304f8SSagi Grimberg  * NVMe over Fabrics TCP host.
43f2304f8SSagi Grimberg  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
53f2304f8SSagi Grimberg  */
63f2304f8SSagi Grimberg #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73f2304f8SSagi Grimberg #include <linux/module.h>
83f2304f8SSagi Grimberg #include <linux/init.h>
93f2304f8SSagi Grimberg #include <linux/slab.h>
103f2304f8SSagi Grimberg #include <linux/err.h>
113f2304f8SSagi Grimberg #include <linux/nvme-tcp.h>
123f2304f8SSagi Grimberg #include <net/sock.h>
133f2304f8SSagi Grimberg #include <net/tcp.h>
143f2304f8SSagi Grimberg #include <linux/blk-mq.h>
153f2304f8SSagi Grimberg #include <crypto/hash.h>
161a9460ceSSagi Grimberg #include <net/busy_poll.h>
173f2304f8SSagi Grimberg 
183f2304f8SSagi Grimberg #include "nvme.h"
193f2304f8SSagi Grimberg #include "fabrics.h"
203f2304f8SSagi Grimberg 
213f2304f8SSagi Grimberg struct nvme_tcp_queue;
223f2304f8SSagi Grimberg 
239912ade3SWunderlich, Mark /* Define the socket priority to use for connections were it is desirable
249912ade3SWunderlich, Mark  * that the NIC consider performing optimized packet processing or filtering.
259912ade3SWunderlich, Mark  * A non-zero value being sufficient to indicate general consideration of any
269912ade3SWunderlich, Mark  * possible optimization.  Making it a module param allows for alternative
279912ade3SWunderlich, Mark  * values that may be unique for some NIC implementations.
289912ade3SWunderlich, Mark  */
299912ade3SWunderlich, Mark static int so_priority;
309912ade3SWunderlich, Mark module_param(so_priority, int, 0644);
319912ade3SWunderlich, Mark MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
329912ade3SWunderlich, Mark 
333f2304f8SSagi Grimberg enum nvme_tcp_send_state {
343f2304f8SSagi Grimberg 	NVME_TCP_SEND_CMD_PDU = 0,
353f2304f8SSagi Grimberg 	NVME_TCP_SEND_H2C_PDU,
363f2304f8SSagi Grimberg 	NVME_TCP_SEND_DATA,
373f2304f8SSagi Grimberg 	NVME_TCP_SEND_DDGST,
383f2304f8SSagi Grimberg };
393f2304f8SSagi Grimberg 
403f2304f8SSagi Grimberg struct nvme_tcp_request {
413f2304f8SSagi Grimberg 	struct nvme_request	req;
423f2304f8SSagi Grimberg 	void			*pdu;
433f2304f8SSagi Grimberg 	struct nvme_tcp_queue	*queue;
443f2304f8SSagi Grimberg 	u32			data_len;
453f2304f8SSagi Grimberg 	u32			pdu_len;
463f2304f8SSagi Grimberg 	u32			pdu_sent;
473f2304f8SSagi Grimberg 	u16			ttag;
481ba2e507SDaniel Wagner 	__le16			status;
493f2304f8SSagi Grimberg 	struct list_head	entry;
5015ec928aSSagi Grimberg 	struct llist_node	lentry;
51a7273d40SChristoph Hellwig 	__le32			ddgst;
523f2304f8SSagi Grimberg 
533f2304f8SSagi Grimberg 	struct bio		*curr_bio;
543f2304f8SSagi Grimberg 	struct iov_iter		iter;
553f2304f8SSagi Grimberg 
563f2304f8SSagi Grimberg 	/* send state */
573f2304f8SSagi Grimberg 	size_t			offset;
583f2304f8SSagi Grimberg 	size_t			data_sent;
593f2304f8SSagi Grimberg 	enum nvme_tcp_send_state state;
603f2304f8SSagi Grimberg };
613f2304f8SSagi Grimberg 
623f2304f8SSagi Grimberg enum nvme_tcp_queue_flags {
633f2304f8SSagi Grimberg 	NVME_TCP_Q_ALLOCATED	= 0,
643f2304f8SSagi Grimberg 	NVME_TCP_Q_LIVE		= 1,
6572e5d757SSagi Grimberg 	NVME_TCP_Q_POLLING	= 2,
663f2304f8SSagi Grimberg };
673f2304f8SSagi Grimberg 
683f2304f8SSagi Grimberg enum nvme_tcp_recv_state {
693f2304f8SSagi Grimberg 	NVME_TCP_RECV_PDU = 0,
703f2304f8SSagi Grimberg 	NVME_TCP_RECV_DATA,
713f2304f8SSagi Grimberg 	NVME_TCP_RECV_DDGST,
723f2304f8SSagi Grimberg };
733f2304f8SSagi Grimberg 
743f2304f8SSagi Grimberg struct nvme_tcp_ctrl;
753f2304f8SSagi Grimberg struct nvme_tcp_queue {
763f2304f8SSagi Grimberg 	struct socket		*sock;
773f2304f8SSagi Grimberg 	struct work_struct	io_work;
783f2304f8SSagi Grimberg 	int			io_cpu;
793f2304f8SSagi Grimberg 
809ebbfe49SChao Leng 	struct mutex		queue_lock;
81db5ad6b7SSagi Grimberg 	struct mutex		send_mutex;
8215ec928aSSagi Grimberg 	struct llist_head	req_list;
833f2304f8SSagi Grimberg 	struct list_head	send_list;
84122e5b9fSSagi Grimberg 	bool			more_requests;
853f2304f8SSagi Grimberg 
863f2304f8SSagi Grimberg 	/* recv state */
873f2304f8SSagi Grimberg 	void			*pdu;
883f2304f8SSagi Grimberg 	int			pdu_remaining;
893f2304f8SSagi Grimberg 	int			pdu_offset;
903f2304f8SSagi Grimberg 	size_t			data_remaining;
913f2304f8SSagi Grimberg 	size_t			ddgst_remaining;
921a9460ceSSagi Grimberg 	unsigned int		nr_cqe;
933f2304f8SSagi Grimberg 
943f2304f8SSagi Grimberg 	/* send state */
953f2304f8SSagi Grimberg 	struct nvme_tcp_request *request;
963f2304f8SSagi Grimberg 
973f2304f8SSagi Grimberg 	int			queue_size;
983f2304f8SSagi Grimberg 	size_t			cmnd_capsule_len;
993f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl	*ctrl;
1003f2304f8SSagi Grimberg 	unsigned long		flags;
1013f2304f8SSagi Grimberg 	bool			rd_enabled;
1023f2304f8SSagi Grimberg 
1033f2304f8SSagi Grimberg 	bool			hdr_digest;
1043f2304f8SSagi Grimberg 	bool			data_digest;
1053f2304f8SSagi Grimberg 	struct ahash_request	*rcv_hash;
1063f2304f8SSagi Grimberg 	struct ahash_request	*snd_hash;
1073f2304f8SSagi Grimberg 	__le32			exp_ddgst;
1083f2304f8SSagi Grimberg 	__le32			recv_ddgst;
1093f2304f8SSagi Grimberg 
1103f2304f8SSagi Grimberg 	struct page_frag_cache	pf_cache;
1113f2304f8SSagi Grimberg 
1123f2304f8SSagi Grimberg 	void (*state_change)(struct sock *);
1133f2304f8SSagi Grimberg 	void (*data_ready)(struct sock *);
1143f2304f8SSagi Grimberg 	void (*write_space)(struct sock *);
1153f2304f8SSagi Grimberg };
1163f2304f8SSagi Grimberg 
1173f2304f8SSagi Grimberg struct nvme_tcp_ctrl {
1183f2304f8SSagi Grimberg 	/* read only in the hot path */
1193f2304f8SSagi Grimberg 	struct nvme_tcp_queue	*queues;
1203f2304f8SSagi Grimberg 	struct blk_mq_tag_set	tag_set;
1213f2304f8SSagi Grimberg 
1223f2304f8SSagi Grimberg 	/* other member variables */
1233f2304f8SSagi Grimberg 	struct list_head	list;
1243f2304f8SSagi Grimberg 	struct blk_mq_tag_set	admin_tag_set;
1253f2304f8SSagi Grimberg 	struct sockaddr_storage addr;
1263f2304f8SSagi Grimberg 	struct sockaddr_storage src_addr;
1273f2304f8SSagi Grimberg 	struct nvme_ctrl	ctrl;
1283f2304f8SSagi Grimberg 
1293f2304f8SSagi Grimberg 	struct work_struct	err_work;
1303f2304f8SSagi Grimberg 	struct delayed_work	connect_work;
1313f2304f8SSagi Grimberg 	struct nvme_tcp_request async_req;
13264861993SSagi Grimberg 	u32			io_queues[HCTX_MAX_TYPES];
1333f2304f8SSagi Grimberg };
1343f2304f8SSagi Grimberg 
1353f2304f8SSagi Grimberg static LIST_HEAD(nvme_tcp_ctrl_list);
1363f2304f8SSagi Grimberg static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
1373f2304f8SSagi Grimberg static struct workqueue_struct *nvme_tcp_wq;
1386acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops;
1396acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
140db5ad6b7SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
1413f2304f8SSagi Grimberg 
1423f2304f8SSagi Grimberg static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
1433f2304f8SSagi Grimberg {
1443f2304f8SSagi Grimberg 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
1453f2304f8SSagi Grimberg }
1463f2304f8SSagi Grimberg 
1473f2304f8SSagi Grimberg static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
1483f2304f8SSagi Grimberg {
1493f2304f8SSagi Grimberg 	return queue - queue->ctrl->queues;
1503f2304f8SSagi Grimberg }
1513f2304f8SSagi Grimberg 
1523f2304f8SSagi Grimberg static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
1533f2304f8SSagi Grimberg {
1543f2304f8SSagi Grimberg 	u32 queue_idx = nvme_tcp_queue_id(queue);
1553f2304f8SSagi Grimberg 
1563f2304f8SSagi Grimberg 	if (queue_idx == 0)
1573f2304f8SSagi Grimberg 		return queue->ctrl->admin_tag_set.tags[queue_idx];
1583f2304f8SSagi Grimberg 	return queue->ctrl->tag_set.tags[queue_idx - 1];
1593f2304f8SSagi Grimberg }
1603f2304f8SSagi Grimberg 
1613f2304f8SSagi Grimberg static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
1623f2304f8SSagi Grimberg {
1633f2304f8SSagi Grimberg 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
1643f2304f8SSagi Grimberg }
1653f2304f8SSagi Grimberg 
1663f2304f8SSagi Grimberg static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
1673f2304f8SSagi Grimberg {
1683f2304f8SSagi Grimberg 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
1693f2304f8SSagi Grimberg }
1703f2304f8SSagi Grimberg 
1713f2304f8SSagi Grimberg static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
1723f2304f8SSagi Grimberg {
1733f2304f8SSagi Grimberg 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
1743f2304f8SSagi Grimberg }
1753f2304f8SSagi Grimberg 
1763f2304f8SSagi Grimberg static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
1773f2304f8SSagi Grimberg {
1783f2304f8SSagi Grimberg 	return req == &req->queue->ctrl->async_req;
1793f2304f8SSagi Grimberg }
1803f2304f8SSagi Grimberg 
1813f2304f8SSagi Grimberg static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
1823f2304f8SSagi Grimberg {
1833f2304f8SSagi Grimberg 	struct request *rq;
1843f2304f8SSagi Grimberg 
1853f2304f8SSagi Grimberg 	if (unlikely(nvme_tcp_async_req(req)))
1863f2304f8SSagi Grimberg 		return false; /* async events don't have a request */
1873f2304f8SSagi Grimberg 
1883f2304f8SSagi Grimberg 	rq = blk_mq_rq_from_pdu(req);
1893f2304f8SSagi Grimberg 
19025e5cb78SSagi Grimberg 	return rq_data_dir(rq) == WRITE && req->data_len &&
19125e5cb78SSagi Grimberg 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
1923f2304f8SSagi Grimberg }
1933f2304f8SSagi Grimberg 
1943f2304f8SSagi Grimberg static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
1953f2304f8SSagi Grimberg {
1963f2304f8SSagi Grimberg 	return req->iter.bvec->bv_page;
1973f2304f8SSagi Grimberg }
1983f2304f8SSagi Grimberg 
1993f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
2003f2304f8SSagi Grimberg {
2013f2304f8SSagi Grimberg 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
2023f2304f8SSagi Grimberg }
2033f2304f8SSagi Grimberg 
2043f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
2053f2304f8SSagi Grimberg {
206ca1ff67dSSagi Grimberg 	return min_t(size_t, iov_iter_single_seg_count(&req->iter),
2073f2304f8SSagi Grimberg 			req->pdu_len - req->pdu_sent);
2083f2304f8SSagi Grimberg }
2093f2304f8SSagi Grimberg 
2103f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
2113f2304f8SSagi Grimberg {
2123f2304f8SSagi Grimberg 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
2133f2304f8SSagi Grimberg 			req->pdu_len - req->pdu_sent : 0;
2143f2304f8SSagi Grimberg }
2153f2304f8SSagi Grimberg 
2163f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
2173f2304f8SSagi Grimberg 		int len)
2183f2304f8SSagi Grimberg {
2193f2304f8SSagi Grimberg 	return nvme_tcp_pdu_data_left(req) <= len;
2203f2304f8SSagi Grimberg }
2213f2304f8SSagi Grimberg 
2223f2304f8SSagi Grimberg static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
2233f2304f8SSagi Grimberg 		unsigned int dir)
2243f2304f8SSagi Grimberg {
2253f2304f8SSagi Grimberg 	struct request *rq = blk_mq_rq_from_pdu(req);
2263f2304f8SSagi Grimberg 	struct bio_vec *vec;
2273f2304f8SSagi Grimberg 	unsigned int size;
2280dc9edafSSagi Grimberg 	int nr_bvec;
2293f2304f8SSagi Grimberg 	size_t offset;
2303f2304f8SSagi Grimberg 
2313f2304f8SSagi Grimberg 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
2323f2304f8SSagi Grimberg 		vec = &rq->special_vec;
2330dc9edafSSagi Grimberg 		nr_bvec = 1;
2343f2304f8SSagi Grimberg 		size = blk_rq_payload_bytes(rq);
2353f2304f8SSagi Grimberg 		offset = 0;
2363f2304f8SSagi Grimberg 	} else {
2373f2304f8SSagi Grimberg 		struct bio *bio = req->curr_bio;
2380dc9edafSSagi Grimberg 		struct bvec_iter bi;
2390dc9edafSSagi Grimberg 		struct bio_vec bv;
2403f2304f8SSagi Grimberg 
2413f2304f8SSagi Grimberg 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
2420dc9edafSSagi Grimberg 		nr_bvec = 0;
2430dc9edafSSagi Grimberg 		bio_for_each_bvec(bv, bio, bi) {
2440dc9edafSSagi Grimberg 			nr_bvec++;
2450dc9edafSSagi Grimberg 		}
2463f2304f8SSagi Grimberg 		size = bio->bi_iter.bi_size;
2473f2304f8SSagi Grimberg 		offset = bio->bi_iter.bi_bvec_done;
2483f2304f8SSagi Grimberg 	}
2493f2304f8SSagi Grimberg 
2500dc9edafSSagi Grimberg 	iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
2513f2304f8SSagi Grimberg 	req->iter.iov_offset = offset;
2523f2304f8SSagi Grimberg }
2533f2304f8SSagi Grimberg 
2543f2304f8SSagi Grimberg static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
2553f2304f8SSagi Grimberg 		int len)
2563f2304f8SSagi Grimberg {
2573f2304f8SSagi Grimberg 	req->data_sent += len;
2583f2304f8SSagi Grimberg 	req->pdu_sent += len;
2593f2304f8SSagi Grimberg 	iov_iter_advance(&req->iter, len);
2603f2304f8SSagi Grimberg 	if (!iov_iter_count(&req->iter) &&
2613f2304f8SSagi Grimberg 	    req->data_sent < req->data_len) {
2623f2304f8SSagi Grimberg 		req->curr_bio = req->curr_bio->bi_next;
2633f2304f8SSagi Grimberg 		nvme_tcp_init_iter(req, WRITE);
2643f2304f8SSagi Grimberg 	}
2653f2304f8SSagi Grimberg }
2663f2304f8SSagi Grimberg 
2675c11f7d9SSagi Grimberg static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
2685c11f7d9SSagi Grimberg {
2695c11f7d9SSagi Grimberg 	int ret;
2705c11f7d9SSagi Grimberg 
2715c11f7d9SSagi Grimberg 	/* drain the send queue as much as we can... */
2725c11f7d9SSagi Grimberg 	do {
2735c11f7d9SSagi Grimberg 		ret = nvme_tcp_try_send(queue);
2745c11f7d9SSagi Grimberg 	} while (ret > 0);
2755c11f7d9SSagi Grimberg }
2765c11f7d9SSagi Grimberg 
27770f437fbSKeith Busch static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
27870f437fbSKeith Busch {
27970f437fbSKeith Busch 	return !list_empty(&queue->send_list) ||
28070f437fbSKeith Busch 		!llist_empty(&queue->req_list) || queue->more_requests;
28170f437fbSKeith Busch }
28270f437fbSKeith Busch 
283db5ad6b7SSagi Grimberg static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
28486f0348aSSagi Grimberg 		bool sync, bool last)
2853f2304f8SSagi Grimberg {
2863f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
287db5ad6b7SSagi Grimberg 	bool empty;
2883f2304f8SSagi Grimberg 
28915ec928aSSagi Grimberg 	empty = llist_add(&req->lentry, &queue->req_list) &&
29015ec928aSSagi Grimberg 		list_empty(&queue->send_list) && !queue->request;
2913f2304f8SSagi Grimberg 
292db5ad6b7SSagi Grimberg 	/*
293db5ad6b7SSagi Grimberg 	 * if we're the first on the send_list and we can try to send
294db5ad6b7SSagi Grimberg 	 * directly, otherwise queue io_work. Also, only do that if we
295db5ad6b7SSagi Grimberg 	 * are on the same cpu, so we don't introduce contention.
296db5ad6b7SSagi Grimberg 	 */
297bb833370SSagi Grimberg 	if (queue->io_cpu == raw_smp_processor_id() &&
298db5ad6b7SSagi Grimberg 	    sync && empty && mutex_trylock(&queue->send_mutex)) {
299122e5b9fSSagi Grimberg 		queue->more_requests = !last;
3005c11f7d9SSagi Grimberg 		nvme_tcp_send_all(queue);
301122e5b9fSSagi Grimberg 		queue->more_requests = false;
302db5ad6b7SSagi Grimberg 		mutex_unlock(&queue->send_mutex);
3033f2304f8SSagi Grimberg 	}
30470f437fbSKeith Busch 
30570f437fbSKeith Busch 	if (last && nvme_tcp_queue_more(queue))
30670f437fbSKeith Busch 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
307db5ad6b7SSagi Grimberg }
3083f2304f8SSagi Grimberg 
30915ec928aSSagi Grimberg static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
31015ec928aSSagi Grimberg {
31115ec928aSSagi Grimberg 	struct nvme_tcp_request *req;
31215ec928aSSagi Grimberg 	struct llist_node *node;
31315ec928aSSagi Grimberg 
31415ec928aSSagi Grimberg 	for (node = llist_del_all(&queue->req_list); node; node = node->next) {
31515ec928aSSagi Grimberg 		req = llist_entry(node, struct nvme_tcp_request, lentry);
31615ec928aSSagi Grimberg 		list_add(&req->entry, &queue->send_list);
31715ec928aSSagi Grimberg 	}
31815ec928aSSagi Grimberg }
31915ec928aSSagi Grimberg 
3203f2304f8SSagi Grimberg static inline struct nvme_tcp_request *
3213f2304f8SSagi Grimberg nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
3223f2304f8SSagi Grimberg {
3233f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
3243f2304f8SSagi Grimberg 
3253f2304f8SSagi Grimberg 	req = list_first_entry_or_null(&queue->send_list,
3263f2304f8SSagi Grimberg 			struct nvme_tcp_request, entry);
32715ec928aSSagi Grimberg 	if (!req) {
32815ec928aSSagi Grimberg 		nvme_tcp_process_req_list(queue);
32915ec928aSSagi Grimberg 		req = list_first_entry_or_null(&queue->send_list,
33015ec928aSSagi Grimberg 				struct nvme_tcp_request, entry);
33115ec928aSSagi Grimberg 		if (unlikely(!req))
33215ec928aSSagi Grimberg 			return NULL;
33315ec928aSSagi Grimberg 	}
3343f2304f8SSagi Grimberg 
33515ec928aSSagi Grimberg 	list_del(&req->entry);
3363f2304f8SSagi Grimberg 	return req;
3373f2304f8SSagi Grimberg }
3383f2304f8SSagi Grimberg 
339a7273d40SChristoph Hellwig static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
340a7273d40SChristoph Hellwig 		__le32 *dgst)
3413f2304f8SSagi Grimberg {
3423f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
3433f2304f8SSagi Grimberg 	crypto_ahash_final(hash);
3443f2304f8SSagi Grimberg }
3453f2304f8SSagi Grimberg 
3463f2304f8SSagi Grimberg static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
3473f2304f8SSagi Grimberg 		struct page *page, off_t off, size_t len)
3483f2304f8SSagi Grimberg {
3493f2304f8SSagi Grimberg 	struct scatterlist sg;
3503f2304f8SSagi Grimberg 
3513f2304f8SSagi Grimberg 	sg_init_marker(&sg, 1);
3523f2304f8SSagi Grimberg 	sg_set_page(&sg, page, len, off);
3533f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, &sg, NULL, len);
3543f2304f8SSagi Grimberg 	crypto_ahash_update(hash);
3553f2304f8SSagi Grimberg }
3563f2304f8SSagi Grimberg 
3573f2304f8SSagi Grimberg static inline void nvme_tcp_hdgst(struct ahash_request *hash,
3583f2304f8SSagi Grimberg 		void *pdu, size_t len)
3593f2304f8SSagi Grimberg {
3603f2304f8SSagi Grimberg 	struct scatterlist sg;
3613f2304f8SSagi Grimberg 
3623f2304f8SSagi Grimberg 	sg_init_one(&sg, pdu, len);
3633f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
3643f2304f8SSagi Grimberg 	crypto_ahash_digest(hash);
3653f2304f8SSagi Grimberg }
3663f2304f8SSagi Grimberg 
3673f2304f8SSagi Grimberg static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
3683f2304f8SSagi Grimberg 		void *pdu, size_t pdu_len)
3693f2304f8SSagi Grimberg {
3703f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr = pdu;
3713f2304f8SSagi Grimberg 	__le32 recv_digest;
3723f2304f8SSagi Grimberg 	__le32 exp_digest;
3733f2304f8SSagi Grimberg 
3743f2304f8SSagi Grimberg 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
3753f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3763f2304f8SSagi Grimberg 			"queue %d: header digest flag is cleared\n",
3773f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue));
3783f2304f8SSagi Grimberg 		return -EPROTO;
3793f2304f8SSagi Grimberg 	}
3803f2304f8SSagi Grimberg 
3813f2304f8SSagi Grimberg 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
3823f2304f8SSagi Grimberg 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
3833f2304f8SSagi Grimberg 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
3843f2304f8SSagi Grimberg 	if (recv_digest != exp_digest) {
3853f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3863f2304f8SSagi Grimberg 			"header digest error: recv %#x expected %#x\n",
3873f2304f8SSagi Grimberg 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
3883f2304f8SSagi Grimberg 		return -EIO;
3893f2304f8SSagi Grimberg 	}
3903f2304f8SSagi Grimberg 
3913f2304f8SSagi Grimberg 	return 0;
3923f2304f8SSagi Grimberg }
3933f2304f8SSagi Grimberg 
3943f2304f8SSagi Grimberg static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
3953f2304f8SSagi Grimberg {
3963f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr = pdu;
3973f2304f8SSagi Grimberg 	u8 digest_len = nvme_tcp_hdgst_len(queue);
3983f2304f8SSagi Grimberg 	u32 len;
3993f2304f8SSagi Grimberg 
4003f2304f8SSagi Grimberg 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
4013f2304f8SSagi Grimberg 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
4023f2304f8SSagi Grimberg 
4033f2304f8SSagi Grimberg 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
4043f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
4053f2304f8SSagi Grimberg 			"queue %d: data digest flag is cleared\n",
4063f2304f8SSagi Grimberg 		nvme_tcp_queue_id(queue));
4073f2304f8SSagi Grimberg 		return -EPROTO;
4083f2304f8SSagi Grimberg 	}
4093f2304f8SSagi Grimberg 	crypto_ahash_init(queue->rcv_hash);
4103f2304f8SSagi Grimberg 
4113f2304f8SSagi Grimberg 	return 0;
4123f2304f8SSagi Grimberg }
4133f2304f8SSagi Grimberg 
4143f2304f8SSagi Grimberg static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
4153f2304f8SSagi Grimberg 		struct request *rq, unsigned int hctx_idx)
4163f2304f8SSagi Grimberg {
4173f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
4183f2304f8SSagi Grimberg 
4193f2304f8SSagi Grimberg 	page_frag_free(req->pdu);
4203f2304f8SSagi Grimberg }
4213f2304f8SSagi Grimberg 
4223f2304f8SSagi Grimberg static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
4233f2304f8SSagi Grimberg 		struct request *rq, unsigned int hctx_idx,
4243f2304f8SSagi Grimberg 		unsigned int numa_node)
4253f2304f8SSagi Grimberg {
4263f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
4273f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
428f4b9e6c9SKeith Busch 	struct nvme_tcp_cmd_pdu *pdu;
4293f2304f8SSagi Grimberg 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
4303f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
4313f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
4323f2304f8SSagi Grimberg 
4333f2304f8SSagi Grimberg 	req->pdu = page_frag_alloc(&queue->pf_cache,
4343f2304f8SSagi Grimberg 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
4353f2304f8SSagi Grimberg 		GFP_KERNEL | __GFP_ZERO);
4363f2304f8SSagi Grimberg 	if (!req->pdu)
4373f2304f8SSagi Grimberg 		return -ENOMEM;
4383f2304f8SSagi Grimberg 
439f4b9e6c9SKeith Busch 	pdu = req->pdu;
4403f2304f8SSagi Grimberg 	req->queue = queue;
4413f2304f8SSagi Grimberg 	nvme_req(rq)->ctrl = &ctrl->ctrl;
442f4b9e6c9SKeith Busch 	nvme_req(rq)->cmd = &pdu->cmd;
4433f2304f8SSagi Grimberg 
4443f2304f8SSagi Grimberg 	return 0;
4453f2304f8SSagi Grimberg }
4463f2304f8SSagi Grimberg 
4473f2304f8SSagi Grimberg static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
4483f2304f8SSagi Grimberg 		unsigned int hctx_idx)
4493f2304f8SSagi Grimberg {
4503f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = data;
4513f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
4523f2304f8SSagi Grimberg 
4533f2304f8SSagi Grimberg 	hctx->driver_data = queue;
4543f2304f8SSagi Grimberg 	return 0;
4553f2304f8SSagi Grimberg }
4563f2304f8SSagi Grimberg 
4573f2304f8SSagi Grimberg static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
4583f2304f8SSagi Grimberg 		unsigned int hctx_idx)
4593f2304f8SSagi Grimberg {
4603f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = data;
4613f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
4623f2304f8SSagi Grimberg 
4633f2304f8SSagi Grimberg 	hctx->driver_data = queue;
4643f2304f8SSagi Grimberg 	return 0;
4653f2304f8SSagi Grimberg }
4663f2304f8SSagi Grimberg 
4673f2304f8SSagi Grimberg static enum nvme_tcp_recv_state
4683f2304f8SSagi Grimberg nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
4693f2304f8SSagi Grimberg {
4703f2304f8SSagi Grimberg 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
4713f2304f8SSagi Grimberg 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
4723f2304f8SSagi Grimberg 		NVME_TCP_RECV_DATA;
4733f2304f8SSagi Grimberg }
4743f2304f8SSagi Grimberg 
4753f2304f8SSagi Grimberg static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
4763f2304f8SSagi Grimberg {
4773f2304f8SSagi Grimberg 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
4783f2304f8SSagi Grimberg 				nvme_tcp_hdgst_len(queue);
4793f2304f8SSagi Grimberg 	queue->pdu_offset = 0;
4803f2304f8SSagi Grimberg 	queue->data_remaining = -1;
4813f2304f8SSagi Grimberg 	queue->ddgst_remaining = 0;
4823f2304f8SSagi Grimberg }
4833f2304f8SSagi Grimberg 
4843f2304f8SSagi Grimberg static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
4853f2304f8SSagi Grimberg {
4863f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4873f2304f8SSagi Grimberg 		return;
4883f2304f8SSagi Grimberg 
489236187c4SSagi Grimberg 	dev_warn(ctrl->device, "starting error recovery\n");
49097b2512aSNigel Kirkland 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
4913f2304f8SSagi Grimberg }
4923f2304f8SSagi Grimberg 
4933f2304f8SSagi Grimberg static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
4943f2304f8SSagi Grimberg 		struct nvme_completion *cqe)
4953f2304f8SSagi Grimberg {
4961ba2e507SDaniel Wagner 	struct nvme_tcp_request *req;
4973f2304f8SSagi Grimberg 	struct request *rq;
4983f2304f8SSagi Grimberg 
499e7006de6SSagi Grimberg 	rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
5003f2304f8SSagi Grimberg 	if (!rq) {
5013f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
502e7006de6SSagi Grimberg 			"got bad cqe.command_id %#x on queue %d\n",
503e7006de6SSagi Grimberg 			cqe->command_id, nvme_tcp_queue_id(queue));
5043f2304f8SSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
5053f2304f8SSagi Grimberg 		return -EINVAL;
5063f2304f8SSagi Grimberg 	}
5073f2304f8SSagi Grimberg 
5081ba2e507SDaniel Wagner 	req = blk_mq_rq_to_pdu(rq);
5091ba2e507SDaniel Wagner 	if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
5101ba2e507SDaniel Wagner 		req->status = cqe->status;
5111ba2e507SDaniel Wagner 
5121ba2e507SDaniel Wagner 	if (!nvme_try_complete_req(rq, req->status, cqe->result))
513ff029451SChristoph Hellwig 		nvme_complete_rq(rq);
5141a9460ceSSagi Grimberg 	queue->nr_cqe++;
5153f2304f8SSagi Grimberg 
5163f2304f8SSagi Grimberg 	return 0;
5173f2304f8SSagi Grimberg }
5183f2304f8SSagi Grimberg 
5193f2304f8SSagi Grimberg static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
5203f2304f8SSagi Grimberg 		struct nvme_tcp_data_pdu *pdu)
5213f2304f8SSagi Grimberg {
5223f2304f8SSagi Grimberg 	struct request *rq;
5233f2304f8SSagi Grimberg 
524e7006de6SSagi Grimberg 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
5253f2304f8SSagi Grimberg 	if (!rq) {
5263f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
527e7006de6SSagi Grimberg 			"got bad c2hdata.command_id %#x on queue %d\n",
528e7006de6SSagi Grimberg 			pdu->command_id, nvme_tcp_queue_id(queue));
5293f2304f8SSagi Grimberg 		return -ENOENT;
5303f2304f8SSagi Grimberg 	}
5313f2304f8SSagi Grimberg 
5323f2304f8SSagi Grimberg 	if (!blk_rq_payload_bytes(rq)) {
5333f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5343f2304f8SSagi Grimberg 			"queue %d tag %#x unexpected data\n",
5353f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), rq->tag);
5363f2304f8SSagi Grimberg 		return -EIO;
5373f2304f8SSagi Grimberg 	}
5383f2304f8SSagi Grimberg 
5393f2304f8SSagi Grimberg 	queue->data_remaining = le32_to_cpu(pdu->data_length);
5403f2304f8SSagi Grimberg 
541602d674cSSagi Grimberg 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
542602d674cSSagi Grimberg 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
543602d674cSSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
544602d674cSSagi Grimberg 			"queue %d tag %#x SUCCESS set but not last PDU\n",
545602d674cSSagi Grimberg 			nvme_tcp_queue_id(queue), rq->tag);
546602d674cSSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
547602d674cSSagi Grimberg 		return -EPROTO;
548602d674cSSagi Grimberg 	}
549602d674cSSagi Grimberg 
5503f2304f8SSagi Grimberg 	return 0;
5513f2304f8SSagi Grimberg }
5523f2304f8SSagi Grimberg 
5533f2304f8SSagi Grimberg static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
5543f2304f8SSagi Grimberg 		struct nvme_tcp_rsp_pdu *pdu)
5553f2304f8SSagi Grimberg {
5563f2304f8SSagi Grimberg 	struct nvme_completion *cqe = &pdu->cqe;
5573f2304f8SSagi Grimberg 	int ret = 0;
5583f2304f8SSagi Grimberg 
5593f2304f8SSagi Grimberg 	/*
5603f2304f8SSagi Grimberg 	 * AEN requests are special as they don't time out and can
5613f2304f8SSagi Grimberg 	 * survive any kind of queue freeze and often don't respond to
5623f2304f8SSagi Grimberg 	 * aborts.  We don't even bother to allocate a struct request
5633f2304f8SSagi Grimberg 	 * for them but rather special case them here.
5643f2304f8SSagi Grimberg 	 */
56558a8df67SIsrael Rukshin 	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
56658a8df67SIsrael Rukshin 				     cqe->command_id)))
5673f2304f8SSagi Grimberg 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
5683f2304f8SSagi Grimberg 				&cqe->result);
5693f2304f8SSagi Grimberg 	else
5703f2304f8SSagi Grimberg 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
5713f2304f8SSagi Grimberg 
5723f2304f8SSagi Grimberg 	return ret;
5733f2304f8SSagi Grimberg }
5743f2304f8SSagi Grimberg 
5751d3ef9c3SVarun Prakash static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
5763f2304f8SSagi Grimberg 		struct nvme_tcp_r2t_pdu *pdu)
5773f2304f8SSagi Grimberg {
5783f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *data = req->pdu;
5793f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
5803f2304f8SSagi Grimberg 	struct request *rq = blk_mq_rq_from_pdu(req);
5813f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
5823f2304f8SSagi Grimberg 	u8 ddgst = nvme_tcp_ddgst_len(queue);
5833f2304f8SSagi Grimberg 
5841d3ef9c3SVarun Prakash 	req->state = NVME_TCP_SEND_H2C_PDU;
5851d3ef9c3SVarun Prakash 	req->offset = 0;
5863f2304f8SSagi Grimberg 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
5873f2304f8SSagi Grimberg 	req->pdu_sent = 0;
5883f2304f8SSagi Grimberg 
5893f2304f8SSagi Grimberg 	memset(data, 0, sizeof(*data));
5903f2304f8SSagi Grimberg 	data->hdr.type = nvme_tcp_h2c_data;
5913f2304f8SSagi Grimberg 	data->hdr.flags = NVME_TCP_F_DATA_LAST;
5923f2304f8SSagi Grimberg 	if (queue->hdr_digest)
5933f2304f8SSagi Grimberg 		data->hdr.flags |= NVME_TCP_F_HDGST;
5943f2304f8SSagi Grimberg 	if (queue->data_digest)
5953f2304f8SSagi Grimberg 		data->hdr.flags |= NVME_TCP_F_DDGST;
5963f2304f8SSagi Grimberg 	data->hdr.hlen = sizeof(*data);
5973f2304f8SSagi Grimberg 	data->hdr.pdo = data->hdr.hlen + hdgst;
5983f2304f8SSagi Grimberg 	data->hdr.plen =
5993f2304f8SSagi Grimberg 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
6003f2304f8SSagi Grimberg 	data->ttag = pdu->ttag;
601e7006de6SSagi Grimberg 	data->command_id = nvme_cid(rq);
602e371af03SSagi Grimberg 	data->data_offset = pdu->r2t_offset;
6033f2304f8SSagi Grimberg 	data->data_length = cpu_to_le32(req->pdu_len);
6043f2304f8SSagi Grimberg }
6053f2304f8SSagi Grimberg 
6063f2304f8SSagi Grimberg static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
6073f2304f8SSagi Grimberg 		struct nvme_tcp_r2t_pdu *pdu)
6083f2304f8SSagi Grimberg {
6093f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
6103f2304f8SSagi Grimberg 	struct request *rq;
6111d3ef9c3SVarun Prakash 	u32 r2t_length = le32_to_cpu(pdu->r2t_length);
6123f2304f8SSagi Grimberg 
613e7006de6SSagi Grimberg 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
6143f2304f8SSagi Grimberg 	if (!rq) {
6153f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
616e7006de6SSagi Grimberg 			"got bad r2t.command_id %#x on queue %d\n",
617e7006de6SSagi Grimberg 			pdu->command_id, nvme_tcp_queue_id(queue));
6183f2304f8SSagi Grimberg 		return -ENOENT;
6193f2304f8SSagi Grimberg 	}
6203f2304f8SSagi Grimberg 	req = blk_mq_rq_to_pdu(rq);
6213f2304f8SSagi Grimberg 
6221d3ef9c3SVarun Prakash 	if (unlikely(!r2t_length)) {
6231d3ef9c3SVarun Prakash 		dev_err(queue->ctrl->ctrl.device,
6241d3ef9c3SVarun Prakash 			"req %d r2t len is %u, probably a bug...\n",
6251d3ef9c3SVarun Prakash 			rq->tag, r2t_length);
6261d3ef9c3SVarun Prakash 		return -EPROTO;
6271d3ef9c3SVarun Prakash 	}
6283f2304f8SSagi Grimberg 
6291d3ef9c3SVarun Prakash 	if (unlikely(req->data_sent + r2t_length > req->data_len)) {
6301d3ef9c3SVarun Prakash 		dev_err(queue->ctrl->ctrl.device,
6311d3ef9c3SVarun Prakash 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
6321d3ef9c3SVarun Prakash 			rq->tag, r2t_length, req->data_len, req->data_sent);
6331d3ef9c3SVarun Prakash 		return -EPROTO;
6341d3ef9c3SVarun Prakash 	}
6353f2304f8SSagi Grimberg 
6361d3ef9c3SVarun Prakash 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
6371d3ef9c3SVarun Prakash 		dev_err(queue->ctrl->ctrl.device,
6381d3ef9c3SVarun Prakash 			"req %d unexpected r2t offset %u (expected %zu)\n",
6391d3ef9c3SVarun Prakash 			rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
6401d3ef9c3SVarun Prakash 		return -EPROTO;
6411d3ef9c3SVarun Prakash 	}
6421d3ef9c3SVarun Prakash 
6431d3ef9c3SVarun Prakash 	nvme_tcp_setup_h2c_data_pdu(req, pdu);
64486f0348aSSagi Grimberg 	nvme_tcp_queue_request(req, false, true);
6453f2304f8SSagi Grimberg 
6463f2304f8SSagi Grimberg 	return 0;
6473f2304f8SSagi Grimberg }
6483f2304f8SSagi Grimberg 
6493f2304f8SSagi Grimberg static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
6503f2304f8SSagi Grimberg 		unsigned int *offset, size_t *len)
6513f2304f8SSagi Grimberg {
6523f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr;
6533f2304f8SSagi Grimberg 	char *pdu = queue->pdu;
6543f2304f8SSagi Grimberg 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
6553f2304f8SSagi Grimberg 	int ret;
6563f2304f8SSagi Grimberg 
6573f2304f8SSagi Grimberg 	ret = skb_copy_bits(skb, *offset,
6583f2304f8SSagi Grimberg 		&pdu[queue->pdu_offset], rcv_len);
6593f2304f8SSagi Grimberg 	if (unlikely(ret))
6603f2304f8SSagi Grimberg 		return ret;
6613f2304f8SSagi Grimberg 
6623f2304f8SSagi Grimberg 	queue->pdu_remaining -= rcv_len;
6633f2304f8SSagi Grimberg 	queue->pdu_offset += rcv_len;
6643f2304f8SSagi Grimberg 	*offset += rcv_len;
6653f2304f8SSagi Grimberg 	*len -= rcv_len;
6663f2304f8SSagi Grimberg 	if (queue->pdu_remaining)
6673f2304f8SSagi Grimberg 		return 0;
6683f2304f8SSagi Grimberg 
6693f2304f8SSagi Grimberg 	hdr = queue->pdu;
6703f2304f8SSagi Grimberg 	if (queue->hdr_digest) {
6713f2304f8SSagi Grimberg 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
6723f2304f8SSagi Grimberg 		if (unlikely(ret))
6733f2304f8SSagi Grimberg 			return ret;
6743f2304f8SSagi Grimberg 	}
6753f2304f8SSagi Grimberg 
6763f2304f8SSagi Grimberg 
6773f2304f8SSagi Grimberg 	if (queue->data_digest) {
6783f2304f8SSagi Grimberg 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
6793f2304f8SSagi Grimberg 		if (unlikely(ret))
6803f2304f8SSagi Grimberg 			return ret;
6813f2304f8SSagi Grimberg 	}
6823f2304f8SSagi Grimberg 
6833f2304f8SSagi Grimberg 	switch (hdr->type) {
6843f2304f8SSagi Grimberg 	case nvme_tcp_c2h_data:
6856be18260SSagi Grimberg 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
6863f2304f8SSagi Grimberg 	case nvme_tcp_rsp:
6873f2304f8SSagi Grimberg 		nvme_tcp_init_recv_ctx(queue);
6886be18260SSagi Grimberg 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
6893f2304f8SSagi Grimberg 	case nvme_tcp_r2t:
6903f2304f8SSagi Grimberg 		nvme_tcp_init_recv_ctx(queue);
6916be18260SSagi Grimberg 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
6923f2304f8SSagi Grimberg 	default:
6933f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
6943f2304f8SSagi Grimberg 			"unsupported pdu type (%d)\n", hdr->type);
6953f2304f8SSagi Grimberg 		return -EINVAL;
6963f2304f8SSagi Grimberg 	}
6973f2304f8SSagi Grimberg }
6983f2304f8SSagi Grimberg 
699988aef9eSChristoph Hellwig static inline void nvme_tcp_end_request(struct request *rq, u16 status)
700602d674cSSagi Grimberg {
701602d674cSSagi Grimberg 	union nvme_result res = {};
702602d674cSSagi Grimberg 
7032eb81a33SChristoph Hellwig 	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
704ff029451SChristoph Hellwig 		nvme_complete_rq(rq);
705602d674cSSagi Grimberg }
706602d674cSSagi Grimberg 
7073f2304f8SSagi Grimberg static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
7083f2304f8SSagi Grimberg 			      unsigned int *offset, size_t *len)
7093f2304f8SSagi Grimberg {
7103f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
7113b01a9d0SSagi Grimberg 	struct request *rq =
712e7006de6SSagi Grimberg 		nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
7133b01a9d0SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
7143f2304f8SSagi Grimberg 
7153f2304f8SSagi Grimberg 	while (true) {
7163f2304f8SSagi Grimberg 		int recv_len, ret;
7173f2304f8SSagi Grimberg 
7183f2304f8SSagi Grimberg 		recv_len = min_t(size_t, *len, queue->data_remaining);
7193f2304f8SSagi Grimberg 		if (!recv_len)
7203f2304f8SSagi Grimberg 			break;
7213f2304f8SSagi Grimberg 
7223f2304f8SSagi Grimberg 		if (!iov_iter_count(&req->iter)) {
7233f2304f8SSagi Grimberg 			req->curr_bio = req->curr_bio->bi_next;
7243f2304f8SSagi Grimberg 
7253f2304f8SSagi Grimberg 			/*
7263f2304f8SSagi Grimberg 			 * If we don`t have any bios it means that controller
7273f2304f8SSagi Grimberg 			 * sent more data than we requested, hence error
7283f2304f8SSagi Grimberg 			 */
7293f2304f8SSagi Grimberg 			if (!req->curr_bio) {
7303f2304f8SSagi Grimberg 				dev_err(queue->ctrl->ctrl.device,
7313f2304f8SSagi Grimberg 					"queue %d no space in request %#x",
7323f2304f8SSagi Grimberg 					nvme_tcp_queue_id(queue), rq->tag);
7333f2304f8SSagi Grimberg 				nvme_tcp_init_recv_ctx(queue);
7343f2304f8SSagi Grimberg 				return -EIO;
7353f2304f8SSagi Grimberg 			}
7363f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, READ);
7373f2304f8SSagi Grimberg 		}
7383f2304f8SSagi Grimberg 
7393f2304f8SSagi Grimberg 		/* we can read only from what is left in this bio */
7403f2304f8SSagi Grimberg 		recv_len = min_t(size_t, recv_len,
7413f2304f8SSagi Grimberg 				iov_iter_count(&req->iter));
7423f2304f8SSagi Grimberg 
7433f2304f8SSagi Grimberg 		if (queue->data_digest)
7443f2304f8SSagi Grimberg 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
7453f2304f8SSagi Grimberg 				&req->iter, recv_len, queue->rcv_hash);
7463f2304f8SSagi Grimberg 		else
7473f2304f8SSagi Grimberg 			ret = skb_copy_datagram_iter(skb, *offset,
7483f2304f8SSagi Grimberg 					&req->iter, recv_len);
7493f2304f8SSagi Grimberg 		if (ret) {
7503f2304f8SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
7513f2304f8SSagi Grimberg 				"queue %d failed to copy request %#x data",
7523f2304f8SSagi Grimberg 				nvme_tcp_queue_id(queue), rq->tag);
7533f2304f8SSagi Grimberg 			return ret;
7543f2304f8SSagi Grimberg 		}
7553f2304f8SSagi Grimberg 
7563f2304f8SSagi Grimberg 		*len -= recv_len;
7573f2304f8SSagi Grimberg 		*offset += recv_len;
7583f2304f8SSagi Grimberg 		queue->data_remaining -= recv_len;
7593f2304f8SSagi Grimberg 	}
7603f2304f8SSagi Grimberg 
7613f2304f8SSagi Grimberg 	if (!queue->data_remaining) {
7623f2304f8SSagi Grimberg 		if (queue->data_digest) {
7633f2304f8SSagi Grimberg 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
7643f2304f8SSagi Grimberg 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
7653f2304f8SSagi Grimberg 		} else {
7661a9460ceSSagi Grimberg 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
7671ba2e507SDaniel Wagner 				nvme_tcp_end_request(rq,
7681ba2e507SDaniel Wagner 						le16_to_cpu(req->status));
7691a9460ceSSagi Grimberg 				queue->nr_cqe++;
7701a9460ceSSagi Grimberg 			}
7713f2304f8SSagi Grimberg 			nvme_tcp_init_recv_ctx(queue);
7723f2304f8SSagi Grimberg 		}
7733f2304f8SSagi Grimberg 	}
7743f2304f8SSagi Grimberg 
7753f2304f8SSagi Grimberg 	return 0;
7763f2304f8SSagi Grimberg }
7773f2304f8SSagi Grimberg 
7783f2304f8SSagi Grimberg static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
7793f2304f8SSagi Grimberg 		struct sk_buff *skb, unsigned int *offset, size_t *len)
7803f2304f8SSagi Grimberg {
781602d674cSSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
7823f2304f8SSagi Grimberg 	char *ddgst = (char *)&queue->recv_ddgst;
7833f2304f8SSagi Grimberg 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
7843f2304f8SSagi Grimberg 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
7853f2304f8SSagi Grimberg 	int ret;
7863f2304f8SSagi Grimberg 
7873f2304f8SSagi Grimberg 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
7883f2304f8SSagi Grimberg 	if (unlikely(ret))
7893f2304f8SSagi Grimberg 		return ret;
7903f2304f8SSagi Grimberg 
7913f2304f8SSagi Grimberg 	queue->ddgst_remaining -= recv_len;
7923f2304f8SSagi Grimberg 	*offset += recv_len;
7933f2304f8SSagi Grimberg 	*len -= recv_len;
7943f2304f8SSagi Grimberg 	if (queue->ddgst_remaining)
7953f2304f8SSagi Grimberg 		return 0;
7963f2304f8SSagi Grimberg 
7973f2304f8SSagi Grimberg 	if (queue->recv_ddgst != queue->exp_ddgst) {
7981ba2e507SDaniel Wagner 		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
7991ba2e507SDaniel Wagner 					pdu->command_id);
8001ba2e507SDaniel Wagner 		struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
8011ba2e507SDaniel Wagner 
8021ba2e507SDaniel Wagner 		req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
8031ba2e507SDaniel Wagner 
8043f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
8053f2304f8SSagi Grimberg 			"data digest error: recv %#x expected %#x\n",
8063f2304f8SSagi Grimberg 			le32_to_cpu(queue->recv_ddgst),
8073f2304f8SSagi Grimberg 			le32_to_cpu(queue->exp_ddgst));
8083f2304f8SSagi Grimberg 	}
8093f2304f8SSagi Grimberg 
810602d674cSSagi Grimberg 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
811e7006de6SSagi Grimberg 		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
812602d674cSSagi Grimberg 					pdu->command_id);
8131ba2e507SDaniel Wagner 		struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
814602d674cSSagi Grimberg 
8151ba2e507SDaniel Wagner 		nvme_tcp_end_request(rq, le16_to_cpu(req->status));
8161a9460ceSSagi Grimberg 		queue->nr_cqe++;
817602d674cSSagi Grimberg 	}
818602d674cSSagi Grimberg 
8193f2304f8SSagi Grimberg 	nvme_tcp_init_recv_ctx(queue);
8203f2304f8SSagi Grimberg 	return 0;
8213f2304f8SSagi Grimberg }
8223f2304f8SSagi Grimberg 
8233f2304f8SSagi Grimberg static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
8243f2304f8SSagi Grimberg 			     unsigned int offset, size_t len)
8253f2304f8SSagi Grimberg {
8263f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = desc->arg.data;
8273f2304f8SSagi Grimberg 	size_t consumed = len;
8283f2304f8SSagi Grimberg 	int result;
8293f2304f8SSagi Grimberg 
8303f2304f8SSagi Grimberg 	while (len) {
8313f2304f8SSagi Grimberg 		switch (nvme_tcp_recv_state(queue)) {
8323f2304f8SSagi Grimberg 		case NVME_TCP_RECV_PDU:
8333f2304f8SSagi Grimberg 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
8343f2304f8SSagi Grimberg 			break;
8353f2304f8SSagi Grimberg 		case NVME_TCP_RECV_DATA:
8363f2304f8SSagi Grimberg 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
8373f2304f8SSagi Grimberg 			break;
8383f2304f8SSagi Grimberg 		case NVME_TCP_RECV_DDGST:
8393f2304f8SSagi Grimberg 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
8403f2304f8SSagi Grimberg 			break;
8413f2304f8SSagi Grimberg 		default:
8423f2304f8SSagi Grimberg 			result = -EFAULT;
8433f2304f8SSagi Grimberg 		}
8443f2304f8SSagi Grimberg 		if (result) {
8453f2304f8SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
8463f2304f8SSagi Grimberg 				"receive failed:  %d\n", result);
8473f2304f8SSagi Grimberg 			queue->rd_enabled = false;
8483f2304f8SSagi Grimberg 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8493f2304f8SSagi Grimberg 			return result;
8503f2304f8SSagi Grimberg 		}
8513f2304f8SSagi Grimberg 	}
8523f2304f8SSagi Grimberg 
8533f2304f8SSagi Grimberg 	return consumed;
8543f2304f8SSagi Grimberg }
8553f2304f8SSagi Grimberg 
8563f2304f8SSagi Grimberg static void nvme_tcp_data_ready(struct sock *sk)
8573f2304f8SSagi Grimberg {
8583f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8593f2304f8SSagi Grimberg 
860386e5e6eSSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8613f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
86272e5d757SSagi Grimberg 	if (likely(queue && queue->rd_enabled) &&
86372e5d757SSagi Grimberg 	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
8643f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
865386e5e6eSSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
8663f2304f8SSagi Grimberg }
8673f2304f8SSagi Grimberg 
8683f2304f8SSagi Grimberg static void nvme_tcp_write_space(struct sock *sk)
8693f2304f8SSagi Grimberg {
8703f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8713f2304f8SSagi Grimberg 
8723f2304f8SSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8733f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
8743f2304f8SSagi Grimberg 	if (likely(queue && sk_stream_is_writeable(sk))) {
8753f2304f8SSagi Grimberg 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
8763f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
8773f2304f8SSagi Grimberg 	}
8783f2304f8SSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
8793f2304f8SSagi Grimberg }
8803f2304f8SSagi Grimberg 
8813f2304f8SSagi Grimberg static void nvme_tcp_state_change(struct sock *sk)
8823f2304f8SSagi Grimberg {
8833f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8843f2304f8SSagi Grimberg 
8858b73b45dSSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8863f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
8873f2304f8SSagi Grimberg 	if (!queue)
8883f2304f8SSagi Grimberg 		goto done;
8893f2304f8SSagi Grimberg 
8903f2304f8SSagi Grimberg 	switch (sk->sk_state) {
8913f2304f8SSagi Grimberg 	case TCP_CLOSE:
8923f2304f8SSagi Grimberg 	case TCP_CLOSE_WAIT:
8933f2304f8SSagi Grimberg 	case TCP_LAST_ACK:
8943f2304f8SSagi Grimberg 	case TCP_FIN_WAIT1:
8953f2304f8SSagi Grimberg 	case TCP_FIN_WAIT2:
8963f2304f8SSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8973f2304f8SSagi Grimberg 		break;
8983f2304f8SSagi Grimberg 	default:
8993f2304f8SSagi Grimberg 		dev_info(queue->ctrl->ctrl.device,
9003f2304f8SSagi Grimberg 			"queue %d socket state %d\n",
9013f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), sk->sk_state);
9023f2304f8SSagi Grimberg 	}
9033f2304f8SSagi Grimberg 
9043f2304f8SSagi Grimberg 	queue->state_change(sk);
9053f2304f8SSagi Grimberg done:
9068b73b45dSSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
9073f2304f8SSagi Grimberg }
9083f2304f8SSagi Grimberg 
9093f2304f8SSagi Grimberg static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
9103f2304f8SSagi Grimberg {
9113f2304f8SSagi Grimberg 	queue->request = NULL;
9123f2304f8SSagi Grimberg }
9133f2304f8SSagi Grimberg 
9143f2304f8SSagi Grimberg static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
9153f2304f8SSagi Grimberg {
91616686010SSagi Grimberg 	nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
9173f2304f8SSagi Grimberg }
9183f2304f8SSagi Grimberg 
9193f2304f8SSagi Grimberg static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
9203f2304f8SSagi Grimberg {
9213f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
92225e1f67eSSagi Grimberg 	int req_data_len = req->data_len;
9233f2304f8SSagi Grimberg 
9243f2304f8SSagi Grimberg 	while (true) {
9253f2304f8SSagi Grimberg 		struct page *page = nvme_tcp_req_cur_page(req);
9263f2304f8SSagi Grimberg 		size_t offset = nvme_tcp_req_cur_offset(req);
9273f2304f8SSagi Grimberg 		size_t len = nvme_tcp_req_cur_length(req);
9283f2304f8SSagi Grimberg 		bool last = nvme_tcp_pdu_last_send(req, len);
92925e1f67eSSagi Grimberg 		int req_data_sent = req->data_sent;
9303f2304f8SSagi Grimberg 		int ret, flags = MSG_DONTWAIT;
9313f2304f8SSagi Grimberg 
932122e5b9fSSagi Grimberg 		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
9333f2304f8SSagi Grimberg 			flags |= MSG_EOR;
9343f2304f8SSagi Grimberg 		else
9355bb052d7SSagi Grimberg 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
9363f2304f8SSagi Grimberg 
9377d4194abSColy Li 		if (sendpage_ok(page)) {
9387d4194abSColy Li 			ret = kernel_sendpage(queue->sock, page, offset, len,
93937c15219SMikhail Skorzhinskii 					flags);
94037c15219SMikhail Skorzhinskii 		} else {
9417d4194abSColy Li 			ret = sock_no_sendpage(queue->sock, page, offset, len,
94237c15219SMikhail Skorzhinskii 					flags);
94337c15219SMikhail Skorzhinskii 		}
9443f2304f8SSagi Grimberg 		if (ret <= 0)
9453f2304f8SSagi Grimberg 			return ret;
9463f2304f8SSagi Grimberg 
9473f2304f8SSagi Grimberg 		if (queue->data_digest)
9483f2304f8SSagi Grimberg 			nvme_tcp_ddgst_update(queue->snd_hash, page,
9493f2304f8SSagi Grimberg 					offset, ret);
9503f2304f8SSagi Grimberg 
951e371af03SSagi Grimberg 		/*
952e371af03SSagi Grimberg 		 * update the request iterator except for the last payload send
953e371af03SSagi Grimberg 		 * in the request where we don't want to modify it as we may
954e371af03SSagi Grimberg 		 * compete with the RX path completing the request.
955e371af03SSagi Grimberg 		 */
95625e1f67eSSagi Grimberg 		if (req_data_sent + ret < req_data_len)
957e371af03SSagi Grimberg 			nvme_tcp_advance_req(req, ret);
958e371af03SSagi Grimberg 
959e371af03SSagi Grimberg 		/* fully successful last send in current PDU */
9603f2304f8SSagi Grimberg 		if (last && ret == len) {
9613f2304f8SSagi Grimberg 			if (queue->data_digest) {
9623f2304f8SSagi Grimberg 				nvme_tcp_ddgst_final(queue->snd_hash,
9633f2304f8SSagi Grimberg 					&req->ddgst);
9643f2304f8SSagi Grimberg 				req->state = NVME_TCP_SEND_DDGST;
9653f2304f8SSagi Grimberg 				req->offset = 0;
9663f2304f8SSagi Grimberg 			} else {
9673f2304f8SSagi Grimberg 				nvme_tcp_done_send_req(queue);
9683f2304f8SSagi Grimberg 			}
9693f2304f8SSagi Grimberg 			return 1;
9703f2304f8SSagi Grimberg 		}
9713f2304f8SSagi Grimberg 	}
9723f2304f8SSagi Grimberg 	return -EAGAIN;
9733f2304f8SSagi Grimberg }
9743f2304f8SSagi Grimberg 
9753f2304f8SSagi Grimberg static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
9763f2304f8SSagi Grimberg {
9773f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9783f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
9793f2304f8SSagi Grimberg 	bool inline_data = nvme_tcp_has_inline_data(req);
9803f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
9813f2304f8SSagi Grimberg 	int len = sizeof(*pdu) + hdgst - req->offset;
9825bb052d7SSagi Grimberg 	int flags = MSG_DONTWAIT;
9833f2304f8SSagi Grimberg 	int ret;
9843f2304f8SSagi Grimberg 
985122e5b9fSSagi Grimberg 	if (inline_data || nvme_tcp_queue_more(queue))
9865bb052d7SSagi Grimberg 		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
9875bb052d7SSagi Grimberg 	else
9885bb052d7SSagi Grimberg 		flags |= MSG_EOR;
9895bb052d7SSagi Grimberg 
9903f2304f8SSagi Grimberg 	if (queue->hdr_digest && !req->offset)
9913f2304f8SSagi Grimberg 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
9923f2304f8SSagi Grimberg 
9933f2304f8SSagi Grimberg 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
9943f2304f8SSagi Grimberg 			offset_in_page(pdu) + req->offset, len,  flags);
9953f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
9963f2304f8SSagi Grimberg 		return ret;
9973f2304f8SSagi Grimberg 
9983f2304f8SSagi Grimberg 	len -= ret;
9993f2304f8SSagi Grimberg 	if (!len) {
10003f2304f8SSagi Grimberg 		if (inline_data) {
10013f2304f8SSagi Grimberg 			req->state = NVME_TCP_SEND_DATA;
10023f2304f8SSagi Grimberg 			if (queue->data_digest)
10033f2304f8SSagi Grimberg 				crypto_ahash_init(queue->snd_hash);
10043f2304f8SSagi Grimberg 		} else {
10053f2304f8SSagi Grimberg 			nvme_tcp_done_send_req(queue);
10063f2304f8SSagi Grimberg 		}
10073f2304f8SSagi Grimberg 		return 1;
10083f2304f8SSagi Grimberg 	}
10093f2304f8SSagi Grimberg 	req->offset += ret;
10103f2304f8SSagi Grimberg 
10113f2304f8SSagi Grimberg 	return -EAGAIN;
10123f2304f8SSagi Grimberg }
10133f2304f8SSagi Grimberg 
10143f2304f8SSagi Grimberg static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
10153f2304f8SSagi Grimberg {
10163f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
10173f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = req->pdu;
10183f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
10193f2304f8SSagi Grimberg 	int len = sizeof(*pdu) - req->offset + hdgst;
10203f2304f8SSagi Grimberg 	int ret;
10213f2304f8SSagi Grimberg 
10223f2304f8SSagi Grimberg 	if (queue->hdr_digest && !req->offset)
10233f2304f8SSagi Grimberg 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
10243f2304f8SSagi Grimberg 
10253f2304f8SSagi Grimberg 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
10263f2304f8SSagi Grimberg 			offset_in_page(pdu) + req->offset, len,
10275bb052d7SSagi Grimberg 			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
10283f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
10293f2304f8SSagi Grimberg 		return ret;
10303f2304f8SSagi Grimberg 
10313f2304f8SSagi Grimberg 	len -= ret;
10323f2304f8SSagi Grimberg 	if (!len) {
10333f2304f8SSagi Grimberg 		req->state = NVME_TCP_SEND_DATA;
10343f2304f8SSagi Grimberg 		if (queue->data_digest)
10353f2304f8SSagi Grimberg 			crypto_ahash_init(queue->snd_hash);
10363f2304f8SSagi Grimberg 		return 1;
10373f2304f8SSagi Grimberg 	}
10383f2304f8SSagi Grimberg 	req->offset += ret;
10393f2304f8SSagi Grimberg 
10403f2304f8SSagi Grimberg 	return -EAGAIN;
10413f2304f8SSagi Grimberg }
10423f2304f8SSagi Grimberg 
10433f2304f8SSagi Grimberg static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
10443f2304f8SSagi Grimberg {
10453f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
1046ce7723e9SVarun Prakash 	size_t offset = req->offset;
10473f2304f8SSagi Grimberg 	int ret;
1048122e5b9fSSagi Grimberg 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
10493f2304f8SSagi Grimberg 	struct kvec iov = {
1050d89b9f3bSVarun Prakash 		.iov_base = (u8 *)&req->ddgst + req->offset,
10513f2304f8SSagi Grimberg 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
10523f2304f8SSagi Grimberg 	};
10533f2304f8SSagi Grimberg 
1054122e5b9fSSagi Grimberg 	if (nvme_tcp_queue_more(queue))
1055122e5b9fSSagi Grimberg 		msg.msg_flags |= MSG_MORE;
1056122e5b9fSSagi Grimberg 	else
1057122e5b9fSSagi Grimberg 		msg.msg_flags |= MSG_EOR;
1058122e5b9fSSagi Grimberg 
10593f2304f8SSagi Grimberg 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
10603f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
10613f2304f8SSagi Grimberg 		return ret;
10623f2304f8SSagi Grimberg 
1063ce7723e9SVarun Prakash 	if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
10643f2304f8SSagi Grimberg 		nvme_tcp_done_send_req(queue);
10653f2304f8SSagi Grimberg 		return 1;
10663f2304f8SSagi Grimberg 	}
10673f2304f8SSagi Grimberg 
10683f2304f8SSagi Grimberg 	req->offset += ret;
10693f2304f8SSagi Grimberg 	return -EAGAIN;
10703f2304f8SSagi Grimberg }
10713f2304f8SSagi Grimberg 
10723f2304f8SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
10733f2304f8SSagi Grimberg {
10743f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
10753f2304f8SSagi Grimberg 	int ret = 1;
10763f2304f8SSagi Grimberg 
10773f2304f8SSagi Grimberg 	if (!queue->request) {
10783f2304f8SSagi Grimberg 		queue->request = nvme_tcp_fetch_request(queue);
10793f2304f8SSagi Grimberg 		if (!queue->request)
10803f2304f8SSagi Grimberg 			return 0;
10813f2304f8SSagi Grimberg 	}
10823f2304f8SSagi Grimberg 	req = queue->request;
10833f2304f8SSagi Grimberg 
10843f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
10853f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_cmd_pdu(req);
10863f2304f8SSagi Grimberg 		if (ret <= 0)
10873f2304f8SSagi Grimberg 			goto done;
10883f2304f8SSagi Grimberg 		if (!nvme_tcp_has_inline_data(req))
10893f2304f8SSagi Grimberg 			return ret;
10903f2304f8SSagi Grimberg 	}
10913f2304f8SSagi Grimberg 
10923f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
10933f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_data_pdu(req);
10943f2304f8SSagi Grimberg 		if (ret <= 0)
10953f2304f8SSagi Grimberg 			goto done;
10963f2304f8SSagi Grimberg 	}
10973f2304f8SSagi Grimberg 
10983f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_DATA) {
10993f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_data(req);
11003f2304f8SSagi Grimberg 		if (ret <= 0)
11013f2304f8SSagi Grimberg 			goto done;
11023f2304f8SSagi Grimberg 	}
11033f2304f8SSagi Grimberg 
11043f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_DDGST)
11053f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_ddgst(req);
11063f2304f8SSagi Grimberg done:
11075ff4e112SSagi Grimberg 	if (ret == -EAGAIN) {
11083f2304f8SSagi Grimberg 		ret = 0;
11095ff4e112SSagi Grimberg 	} else if (ret < 0) {
11105ff4e112SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
11115ff4e112SSagi Grimberg 			"failed to send request %d\n", ret);
11125ff4e112SSagi Grimberg 		if (ret != -EPIPE && ret != -ECONNRESET)
11135ff4e112SSagi Grimberg 			nvme_tcp_fail_request(queue->request);
11145ff4e112SSagi Grimberg 		nvme_tcp_done_send_req(queue);
11155ff4e112SSagi Grimberg 	}
11163f2304f8SSagi Grimberg 	return ret;
11173f2304f8SSagi Grimberg }
11183f2304f8SSagi Grimberg 
11193f2304f8SSagi Grimberg static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
11203f2304f8SSagi Grimberg {
112110407ec9SPotnuri Bharat Teja 	struct socket *sock = queue->sock;
112210407ec9SPotnuri Bharat Teja 	struct sock *sk = sock->sk;
11233f2304f8SSagi Grimberg 	read_descriptor_t rd_desc;
11243f2304f8SSagi Grimberg 	int consumed;
11253f2304f8SSagi Grimberg 
11263f2304f8SSagi Grimberg 	rd_desc.arg.data = queue;
11273f2304f8SSagi Grimberg 	rd_desc.count = 1;
11283f2304f8SSagi Grimberg 	lock_sock(sk);
11291a9460ceSSagi Grimberg 	queue->nr_cqe = 0;
113010407ec9SPotnuri Bharat Teja 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
11313f2304f8SSagi Grimberg 	release_sock(sk);
11323f2304f8SSagi Grimberg 	return consumed;
11333f2304f8SSagi Grimberg }
11343f2304f8SSagi Grimberg 
11353f2304f8SSagi Grimberg static void nvme_tcp_io_work(struct work_struct *w)
11363f2304f8SSagi Grimberg {
11373f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue =
11383f2304f8SSagi Grimberg 		container_of(w, struct nvme_tcp_queue, io_work);
1139ddef2957SWunderlich, Mark 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
11403f2304f8SSagi Grimberg 
11413f2304f8SSagi Grimberg 	do {
11423f2304f8SSagi Grimberg 		bool pending = false;
11433f2304f8SSagi Grimberg 		int result;
11443f2304f8SSagi Grimberg 
1145db5ad6b7SSagi Grimberg 		if (mutex_trylock(&queue->send_mutex)) {
11463f2304f8SSagi Grimberg 			result = nvme_tcp_try_send(queue);
1147db5ad6b7SSagi Grimberg 			mutex_unlock(&queue->send_mutex);
11485ff4e112SSagi Grimberg 			if (result > 0)
11493f2304f8SSagi Grimberg 				pending = true;
11505ff4e112SSagi Grimberg 			else if (unlikely(result < 0))
11515ff4e112SSagi Grimberg 				break;
115270f437fbSKeith Busch 		}
11533f2304f8SSagi Grimberg 
11543f2304f8SSagi Grimberg 		result = nvme_tcp_try_recv(queue);
11553f2304f8SSagi Grimberg 		if (result > 0)
11563f2304f8SSagi Grimberg 			pending = true;
1157761ad26cSSagi Grimberg 		else if (unlikely(result < 0))
115839d06079SSagi Grimberg 			return;
11593f2304f8SSagi Grimberg 
11603f2304f8SSagi Grimberg 		if (!pending)
11613f2304f8SSagi Grimberg 			return;
11623f2304f8SSagi Grimberg 
1163ddef2957SWunderlich, Mark 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
11643f2304f8SSagi Grimberg 
11653f2304f8SSagi Grimberg 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
11663f2304f8SSagi Grimberg }
11673f2304f8SSagi Grimberg 
11683f2304f8SSagi Grimberg static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
11693f2304f8SSagi Grimberg {
11703f2304f8SSagi Grimberg 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
11713f2304f8SSagi Grimberg 
11723f2304f8SSagi Grimberg 	ahash_request_free(queue->rcv_hash);
11733f2304f8SSagi Grimberg 	ahash_request_free(queue->snd_hash);
11743f2304f8SSagi Grimberg 	crypto_free_ahash(tfm);
11753f2304f8SSagi Grimberg }
11763f2304f8SSagi Grimberg 
11773f2304f8SSagi Grimberg static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
11783f2304f8SSagi Grimberg {
11793f2304f8SSagi Grimberg 	struct crypto_ahash *tfm;
11803f2304f8SSagi Grimberg 
11813f2304f8SSagi Grimberg 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
11823f2304f8SSagi Grimberg 	if (IS_ERR(tfm))
11833f2304f8SSagi Grimberg 		return PTR_ERR(tfm);
11843f2304f8SSagi Grimberg 
11853f2304f8SSagi Grimberg 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
11863f2304f8SSagi Grimberg 	if (!queue->snd_hash)
11873f2304f8SSagi Grimberg 		goto free_tfm;
11883f2304f8SSagi Grimberg 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
11893f2304f8SSagi Grimberg 
11903f2304f8SSagi Grimberg 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
11913f2304f8SSagi Grimberg 	if (!queue->rcv_hash)
11923f2304f8SSagi Grimberg 		goto free_snd_hash;
11933f2304f8SSagi Grimberg 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
11943f2304f8SSagi Grimberg 
11953f2304f8SSagi Grimberg 	return 0;
11963f2304f8SSagi Grimberg free_snd_hash:
11973f2304f8SSagi Grimberg 	ahash_request_free(queue->snd_hash);
11983f2304f8SSagi Grimberg free_tfm:
11993f2304f8SSagi Grimberg 	crypto_free_ahash(tfm);
12003f2304f8SSagi Grimberg 	return -ENOMEM;
12013f2304f8SSagi Grimberg }
12023f2304f8SSagi Grimberg 
12033f2304f8SSagi Grimberg static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
12043f2304f8SSagi Grimberg {
12053f2304f8SSagi Grimberg 	struct nvme_tcp_request *async = &ctrl->async_req;
12063f2304f8SSagi Grimberg 
12073f2304f8SSagi Grimberg 	page_frag_free(async->pdu);
12083f2304f8SSagi Grimberg }
12093f2304f8SSagi Grimberg 
12103f2304f8SSagi Grimberg static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
12113f2304f8SSagi Grimberg {
12123f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
12133f2304f8SSagi Grimberg 	struct nvme_tcp_request *async = &ctrl->async_req;
12143f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
12153f2304f8SSagi Grimberg 
12163f2304f8SSagi Grimberg 	async->pdu = page_frag_alloc(&queue->pf_cache,
12173f2304f8SSagi Grimberg 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
12183f2304f8SSagi Grimberg 		GFP_KERNEL | __GFP_ZERO);
12193f2304f8SSagi Grimberg 	if (!async->pdu)
12203f2304f8SSagi Grimberg 		return -ENOMEM;
12213f2304f8SSagi Grimberg 
12223f2304f8SSagi Grimberg 	async->queue = &ctrl->queues[0];
12233f2304f8SSagi Grimberg 	return 0;
12243f2304f8SSagi Grimberg }
12253f2304f8SSagi Grimberg 
12263f2304f8SSagi Grimberg static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
12273f2304f8SSagi Grimberg {
1228a5053c92SMaurizio Lombardi 	struct page *page;
12293f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
12303f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
12313f2304f8SSagi Grimberg 
12323f2304f8SSagi Grimberg 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
12333f2304f8SSagi Grimberg 		return;
12343f2304f8SSagi Grimberg 
12353f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest)
12363f2304f8SSagi Grimberg 		nvme_tcp_free_crypto(queue);
12373f2304f8SSagi Grimberg 
1238a5053c92SMaurizio Lombardi 	if (queue->pf_cache.va) {
1239a5053c92SMaurizio Lombardi 		page = virt_to_head_page(queue->pf_cache.va);
1240a5053c92SMaurizio Lombardi 		__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1241a5053c92SMaurizio Lombardi 		queue->pf_cache.va = NULL;
1242a5053c92SMaurizio Lombardi 	}
12433f2304f8SSagi Grimberg 	sock_release(queue->sock);
12443f2304f8SSagi Grimberg 	kfree(queue->pdu);
1245d48f92cdSKeith Busch 	mutex_destroy(&queue->send_mutex);
12469ebbfe49SChao Leng 	mutex_destroy(&queue->queue_lock);
12473f2304f8SSagi Grimberg }
12483f2304f8SSagi Grimberg 
12493f2304f8SSagi Grimberg static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
12503f2304f8SSagi Grimberg {
12513f2304f8SSagi Grimberg 	struct nvme_tcp_icreq_pdu *icreq;
12523f2304f8SSagi Grimberg 	struct nvme_tcp_icresp_pdu *icresp;
12533f2304f8SSagi Grimberg 	struct msghdr msg = {};
12543f2304f8SSagi Grimberg 	struct kvec iov;
12553f2304f8SSagi Grimberg 	bool ctrl_hdgst, ctrl_ddgst;
12563f2304f8SSagi Grimberg 	int ret;
12573f2304f8SSagi Grimberg 
12583f2304f8SSagi Grimberg 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
12593f2304f8SSagi Grimberg 	if (!icreq)
12603f2304f8SSagi Grimberg 		return -ENOMEM;
12613f2304f8SSagi Grimberg 
12623f2304f8SSagi Grimberg 	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
12633f2304f8SSagi Grimberg 	if (!icresp) {
12643f2304f8SSagi Grimberg 		ret = -ENOMEM;
12653f2304f8SSagi Grimberg 		goto free_icreq;
12663f2304f8SSagi Grimberg 	}
12673f2304f8SSagi Grimberg 
12683f2304f8SSagi Grimberg 	icreq->hdr.type = nvme_tcp_icreq;
12693f2304f8SSagi Grimberg 	icreq->hdr.hlen = sizeof(*icreq);
12703f2304f8SSagi Grimberg 	icreq->hdr.pdo = 0;
12713f2304f8SSagi Grimberg 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
12723f2304f8SSagi Grimberg 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
12733f2304f8SSagi Grimberg 	icreq->maxr2t = 0; /* single inflight r2t supported */
12743f2304f8SSagi Grimberg 	icreq->hpda = 0; /* no alignment constraint */
12753f2304f8SSagi Grimberg 	if (queue->hdr_digest)
12763f2304f8SSagi Grimberg 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
12773f2304f8SSagi Grimberg 	if (queue->data_digest)
12783f2304f8SSagi Grimberg 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
12793f2304f8SSagi Grimberg 
12803f2304f8SSagi Grimberg 	iov.iov_base = icreq;
12813f2304f8SSagi Grimberg 	iov.iov_len = sizeof(*icreq);
12823f2304f8SSagi Grimberg 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
12833f2304f8SSagi Grimberg 	if (ret < 0)
12843f2304f8SSagi Grimberg 		goto free_icresp;
12853f2304f8SSagi Grimberg 
12863f2304f8SSagi Grimberg 	memset(&msg, 0, sizeof(msg));
12873f2304f8SSagi Grimberg 	iov.iov_base = icresp;
12883f2304f8SSagi Grimberg 	iov.iov_len = sizeof(*icresp);
12893f2304f8SSagi Grimberg 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
12903f2304f8SSagi Grimberg 			iov.iov_len, msg.msg_flags);
12913f2304f8SSagi Grimberg 	if (ret < 0)
12923f2304f8SSagi Grimberg 		goto free_icresp;
12933f2304f8SSagi Grimberg 
12943f2304f8SSagi Grimberg 	ret = -EINVAL;
12953f2304f8SSagi Grimberg 	if (icresp->hdr.type != nvme_tcp_icresp) {
12963f2304f8SSagi Grimberg 		pr_err("queue %d: bad type returned %d\n",
12973f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->hdr.type);
12983f2304f8SSagi Grimberg 		goto free_icresp;
12993f2304f8SSagi Grimberg 	}
13003f2304f8SSagi Grimberg 
13013f2304f8SSagi Grimberg 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
13023f2304f8SSagi Grimberg 		pr_err("queue %d: bad pdu length returned %d\n",
13033f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
13043f2304f8SSagi Grimberg 		goto free_icresp;
13053f2304f8SSagi Grimberg 	}
13063f2304f8SSagi Grimberg 
13073f2304f8SSagi Grimberg 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
13083f2304f8SSagi Grimberg 		pr_err("queue %d: bad pfv returned %d\n",
13093f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->pfv);
13103f2304f8SSagi Grimberg 		goto free_icresp;
13113f2304f8SSagi Grimberg 	}
13123f2304f8SSagi Grimberg 
13133f2304f8SSagi Grimberg 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
13143f2304f8SSagi Grimberg 	if ((queue->data_digest && !ctrl_ddgst) ||
13153f2304f8SSagi Grimberg 	    (!queue->data_digest && ctrl_ddgst)) {
13163f2304f8SSagi Grimberg 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
13173f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue),
13183f2304f8SSagi Grimberg 			queue->data_digest ? "enabled" : "disabled",
13193f2304f8SSagi Grimberg 			ctrl_ddgst ? "enabled" : "disabled");
13203f2304f8SSagi Grimberg 		goto free_icresp;
13213f2304f8SSagi Grimberg 	}
13223f2304f8SSagi Grimberg 
13233f2304f8SSagi Grimberg 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
13243f2304f8SSagi Grimberg 	if ((queue->hdr_digest && !ctrl_hdgst) ||
13253f2304f8SSagi Grimberg 	    (!queue->hdr_digest && ctrl_hdgst)) {
13263f2304f8SSagi Grimberg 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
13273f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue),
13283f2304f8SSagi Grimberg 			queue->hdr_digest ? "enabled" : "disabled",
13293f2304f8SSagi Grimberg 			ctrl_hdgst ? "enabled" : "disabled");
13303f2304f8SSagi Grimberg 		goto free_icresp;
13313f2304f8SSagi Grimberg 	}
13323f2304f8SSagi Grimberg 
13333f2304f8SSagi Grimberg 	if (icresp->cpda != 0) {
13343f2304f8SSagi Grimberg 		pr_err("queue %d: unsupported cpda returned %d\n",
13353f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->cpda);
13363f2304f8SSagi Grimberg 		goto free_icresp;
13373f2304f8SSagi Grimberg 	}
13383f2304f8SSagi Grimberg 
13393f2304f8SSagi Grimberg 	ret = 0;
13403f2304f8SSagi Grimberg free_icresp:
13413f2304f8SSagi Grimberg 	kfree(icresp);
13423f2304f8SSagi Grimberg free_icreq:
13433f2304f8SSagi Grimberg 	kfree(icreq);
13443f2304f8SSagi Grimberg 	return ret;
13453f2304f8SSagi Grimberg }
13463f2304f8SSagi Grimberg 
134740510a63SSagi Grimberg static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
134840510a63SSagi Grimberg {
134940510a63SSagi Grimberg 	return nvme_tcp_queue_id(queue) == 0;
135040510a63SSagi Grimberg }
135140510a63SSagi Grimberg 
135240510a63SSagi Grimberg static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
135340510a63SSagi Grimberg {
135440510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
135540510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
135640510a63SSagi Grimberg 
135740510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
135840510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
135940510a63SSagi Grimberg }
136040510a63SSagi Grimberg 
136140510a63SSagi Grimberg static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
136240510a63SSagi Grimberg {
136340510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
136440510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
136540510a63SSagi Grimberg 
136640510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
136740510a63SSagi Grimberg 		!nvme_tcp_default_queue(queue) &&
136840510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
136940510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_READ];
137040510a63SSagi Grimberg }
137140510a63SSagi Grimberg 
137240510a63SSagi Grimberg static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
137340510a63SSagi Grimberg {
137440510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
137540510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
137640510a63SSagi Grimberg 
137740510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
137840510a63SSagi Grimberg 		!nvme_tcp_default_queue(queue) &&
137940510a63SSagi Grimberg 		!nvme_tcp_read_queue(queue) &&
138040510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
138140510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_READ] +
138240510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_POLL];
138340510a63SSagi Grimberg }
138440510a63SSagi Grimberg 
138540510a63SSagi Grimberg static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
138640510a63SSagi Grimberg {
138740510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
138840510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
138940510a63SSagi Grimberg 	int n = 0;
139040510a63SSagi Grimberg 
139140510a63SSagi Grimberg 	if (nvme_tcp_default_queue(queue))
139240510a63SSagi Grimberg 		n = qid - 1;
139340510a63SSagi Grimberg 	else if (nvme_tcp_read_queue(queue))
139440510a63SSagi Grimberg 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
139540510a63SSagi Grimberg 	else if (nvme_tcp_poll_queue(queue))
139640510a63SSagi Grimberg 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
139740510a63SSagi Grimberg 				ctrl->io_queues[HCTX_TYPE_READ] - 1;
139840510a63SSagi Grimberg 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
139940510a63SSagi Grimberg }
140040510a63SSagi Grimberg 
14013f2304f8SSagi Grimberg static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
14023f2304f8SSagi Grimberg 		int qid, size_t queue_size)
14033f2304f8SSagi Grimberg {
14043f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
14053f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
14066ebf71baSChristoph Hellwig 	int ret, rcv_pdu_size;
14073f2304f8SSagi Grimberg 
14089ebbfe49SChao Leng 	mutex_init(&queue->queue_lock);
14093f2304f8SSagi Grimberg 	queue->ctrl = ctrl;
141015ec928aSSagi Grimberg 	init_llist_head(&queue->req_list);
14113f2304f8SSagi Grimberg 	INIT_LIST_HEAD(&queue->send_list);
1412db5ad6b7SSagi Grimberg 	mutex_init(&queue->send_mutex);
14133f2304f8SSagi Grimberg 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
14143f2304f8SSagi Grimberg 	queue->queue_size = queue_size;
14153f2304f8SSagi Grimberg 
14163f2304f8SSagi Grimberg 	if (qid > 0)
14179924b030SIsrael Rukshin 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
14183f2304f8SSagi Grimberg 	else
14193f2304f8SSagi Grimberg 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
14203f2304f8SSagi Grimberg 						NVME_TCP_ADMIN_CCSZ;
14213f2304f8SSagi Grimberg 
14223f2304f8SSagi Grimberg 	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
14233f2304f8SSagi Grimberg 			IPPROTO_TCP, &queue->sock);
14243f2304f8SSagi Grimberg 	if (ret) {
14259924b030SIsrael Rukshin 		dev_err(nctrl->device,
14263f2304f8SSagi Grimberg 			"failed to create socket: %d\n", ret);
14279ebbfe49SChao Leng 		goto err_destroy_mutex;
14283f2304f8SSagi Grimberg 	}
14293f2304f8SSagi Grimberg 
14303f2304f8SSagi Grimberg 	/* Single syn retry */
1431557eadfcSChristoph Hellwig 	tcp_sock_set_syncnt(queue->sock->sk, 1);
14323f2304f8SSagi Grimberg 
14333f2304f8SSagi Grimberg 	/* Set TCP no delay */
143412abc5eeSChristoph Hellwig 	tcp_sock_set_nodelay(queue->sock->sk);
14353f2304f8SSagi Grimberg 
14363f2304f8SSagi Grimberg 	/*
14373f2304f8SSagi Grimberg 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
14383f2304f8SSagi Grimberg 	 * close. This is done to prevent stale data from being sent should
14393f2304f8SSagi Grimberg 	 * the network connection be restored before TCP times out.
14403f2304f8SSagi Grimberg 	 */
1441c433594cSChristoph Hellwig 	sock_no_linger(queue->sock->sk);
14423f2304f8SSagi Grimberg 
14436e434967SChristoph Hellwig 	if (so_priority > 0)
14446e434967SChristoph Hellwig 		sock_set_priority(queue->sock->sk, so_priority);
14459912ade3SWunderlich, Mark 
1446bb13985dSIsrael Rukshin 	/* Set socket type of service */
14476ebf71baSChristoph Hellwig 	if (nctrl->opts->tos >= 0)
14486ebf71baSChristoph Hellwig 		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1449bb13985dSIsrael Rukshin 
1450adc99fd3SSagi Grimberg 	/* Set 10 seconds timeout for icresp recvmsg */
1451adc99fd3SSagi Grimberg 	queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1452adc99fd3SSagi Grimberg 
14533f2304f8SSagi Grimberg 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
145440510a63SSagi Grimberg 	nvme_tcp_set_queue_io_cpu(queue);
14553f2304f8SSagi Grimberg 	queue->request = NULL;
14563f2304f8SSagi Grimberg 	queue->data_remaining = 0;
14573f2304f8SSagi Grimberg 	queue->ddgst_remaining = 0;
14583f2304f8SSagi Grimberg 	queue->pdu_remaining = 0;
14593f2304f8SSagi Grimberg 	queue->pdu_offset = 0;
14603f2304f8SSagi Grimberg 	sk_set_memalloc(queue->sock->sk);
14613f2304f8SSagi Grimberg 
14629924b030SIsrael Rukshin 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
14633f2304f8SSagi Grimberg 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
14643f2304f8SSagi Grimberg 			sizeof(ctrl->src_addr));
14653f2304f8SSagi Grimberg 		if (ret) {
14669924b030SIsrael Rukshin 			dev_err(nctrl->device,
14673f2304f8SSagi Grimberg 				"failed to bind queue %d socket %d\n",
14683f2304f8SSagi Grimberg 				qid, ret);
14693f2304f8SSagi Grimberg 			goto err_sock;
14703f2304f8SSagi Grimberg 		}
14713f2304f8SSagi Grimberg 	}
14723f2304f8SSagi Grimberg 
14733ede8f72SMartin Belanger 	if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
14743ede8f72SMartin Belanger 		char *iface = nctrl->opts->host_iface;
14753ede8f72SMartin Belanger 		sockptr_t optval = KERNEL_SOCKPTR(iface);
14763ede8f72SMartin Belanger 
14773ede8f72SMartin Belanger 		ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
14783ede8f72SMartin Belanger 				      optval, strlen(iface));
14793ede8f72SMartin Belanger 		if (ret) {
14803ede8f72SMartin Belanger 			dev_err(nctrl->device,
14813ede8f72SMartin Belanger 			  "failed to bind to interface %s queue %d err %d\n",
14823ede8f72SMartin Belanger 			  iface, qid, ret);
14833ede8f72SMartin Belanger 			goto err_sock;
14843ede8f72SMartin Belanger 		}
14853ede8f72SMartin Belanger 	}
14863ede8f72SMartin Belanger 
14873f2304f8SSagi Grimberg 	queue->hdr_digest = nctrl->opts->hdr_digest;
14883f2304f8SSagi Grimberg 	queue->data_digest = nctrl->opts->data_digest;
14893f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest) {
14903f2304f8SSagi Grimberg 		ret = nvme_tcp_alloc_crypto(queue);
14913f2304f8SSagi Grimberg 		if (ret) {
14929924b030SIsrael Rukshin 			dev_err(nctrl->device,
14933f2304f8SSagi Grimberg 				"failed to allocate queue %d crypto\n", qid);
14943f2304f8SSagi Grimberg 			goto err_sock;
14953f2304f8SSagi Grimberg 		}
14963f2304f8SSagi Grimberg 	}
14973f2304f8SSagi Grimberg 
14983f2304f8SSagi Grimberg 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
14993f2304f8SSagi Grimberg 			nvme_tcp_hdgst_len(queue);
15003f2304f8SSagi Grimberg 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
15013f2304f8SSagi Grimberg 	if (!queue->pdu) {
15023f2304f8SSagi Grimberg 		ret = -ENOMEM;
15033f2304f8SSagi Grimberg 		goto err_crypto;
15043f2304f8SSagi Grimberg 	}
15053f2304f8SSagi Grimberg 
15069924b030SIsrael Rukshin 	dev_dbg(nctrl->device, "connecting queue %d\n",
15073f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue));
15083f2304f8SSagi Grimberg 
15093f2304f8SSagi Grimberg 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
15103f2304f8SSagi Grimberg 		sizeof(ctrl->addr), 0);
15113f2304f8SSagi Grimberg 	if (ret) {
15129924b030SIsrael Rukshin 		dev_err(nctrl->device,
15133f2304f8SSagi Grimberg 			"failed to connect socket: %d\n", ret);
15143f2304f8SSagi Grimberg 		goto err_rcv_pdu;
15153f2304f8SSagi Grimberg 	}
15163f2304f8SSagi Grimberg 
15173f2304f8SSagi Grimberg 	ret = nvme_tcp_init_connection(queue);
15183f2304f8SSagi Grimberg 	if (ret)
15193f2304f8SSagi Grimberg 		goto err_init_connect;
15203f2304f8SSagi Grimberg 
15213f2304f8SSagi Grimberg 	queue->rd_enabled = true;
15223f2304f8SSagi Grimberg 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
15233f2304f8SSagi Grimberg 	nvme_tcp_init_recv_ctx(queue);
15243f2304f8SSagi Grimberg 
15253f2304f8SSagi Grimberg 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
15263f2304f8SSagi Grimberg 	queue->sock->sk->sk_user_data = queue;
15273f2304f8SSagi Grimberg 	queue->state_change = queue->sock->sk->sk_state_change;
15283f2304f8SSagi Grimberg 	queue->data_ready = queue->sock->sk->sk_data_ready;
15293f2304f8SSagi Grimberg 	queue->write_space = queue->sock->sk->sk_write_space;
15303f2304f8SSagi Grimberg 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
15313f2304f8SSagi Grimberg 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
15323f2304f8SSagi Grimberg 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1533ac1c4e18SSebastian Andrzej Siewior #ifdef CONFIG_NET_RX_BUSY_POLL
15341a9460ceSSagi Grimberg 	queue->sock->sk->sk_ll_usec = 1;
1535ac1c4e18SSebastian Andrzej Siewior #endif
15363f2304f8SSagi Grimberg 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
15373f2304f8SSagi Grimberg 
15383f2304f8SSagi Grimberg 	return 0;
15393f2304f8SSagi Grimberg 
15403f2304f8SSagi Grimberg err_init_connect:
15413f2304f8SSagi Grimberg 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
15423f2304f8SSagi Grimberg err_rcv_pdu:
15433f2304f8SSagi Grimberg 	kfree(queue->pdu);
15443f2304f8SSagi Grimberg err_crypto:
15453f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest)
15463f2304f8SSagi Grimberg 		nvme_tcp_free_crypto(queue);
15473f2304f8SSagi Grimberg err_sock:
15483f2304f8SSagi Grimberg 	sock_release(queue->sock);
15493f2304f8SSagi Grimberg 	queue->sock = NULL;
15509ebbfe49SChao Leng err_destroy_mutex:
1551d48f92cdSKeith Busch 	mutex_destroy(&queue->send_mutex);
15529ebbfe49SChao Leng 	mutex_destroy(&queue->queue_lock);
15533f2304f8SSagi Grimberg 	return ret;
15543f2304f8SSagi Grimberg }
15553f2304f8SSagi Grimberg 
15563f2304f8SSagi Grimberg static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
15573f2304f8SSagi Grimberg {
15583f2304f8SSagi Grimberg 	struct socket *sock = queue->sock;
15593f2304f8SSagi Grimberg 
15603f2304f8SSagi Grimberg 	write_lock_bh(&sock->sk->sk_callback_lock);
15613f2304f8SSagi Grimberg 	sock->sk->sk_user_data  = NULL;
15623f2304f8SSagi Grimberg 	sock->sk->sk_data_ready = queue->data_ready;
15633f2304f8SSagi Grimberg 	sock->sk->sk_state_change = queue->state_change;
15643f2304f8SSagi Grimberg 	sock->sk->sk_write_space  = queue->write_space;
15653f2304f8SSagi Grimberg 	write_unlock_bh(&sock->sk->sk_callback_lock);
15663f2304f8SSagi Grimberg }
15673f2304f8SSagi Grimberg 
15683f2304f8SSagi Grimberg static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
15693f2304f8SSagi Grimberg {
15703f2304f8SSagi Grimberg 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
15713f2304f8SSagi Grimberg 	nvme_tcp_restore_sock_calls(queue);
15723f2304f8SSagi Grimberg 	cancel_work_sync(&queue->io_work);
15733f2304f8SSagi Grimberg }
15743f2304f8SSagi Grimberg 
15753f2304f8SSagi Grimberg static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
15763f2304f8SSagi Grimberg {
15773f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15783f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
15793f2304f8SSagi Grimberg 
15809ebbfe49SChao Leng 	mutex_lock(&queue->queue_lock);
15819ebbfe49SChao Leng 	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
15823f2304f8SSagi Grimberg 		__nvme_tcp_stop_queue(queue);
15839ebbfe49SChao Leng 	mutex_unlock(&queue->queue_lock);
15843f2304f8SSagi Grimberg }
15853f2304f8SSagi Grimberg 
15863f2304f8SSagi Grimberg static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
15873f2304f8SSagi Grimberg {
15883f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15893f2304f8SSagi Grimberg 	int ret;
15903f2304f8SSagi Grimberg 
15913f2304f8SSagi Grimberg 	if (idx)
1592be42a33bSKeith Busch 		ret = nvmf_connect_io_queue(nctrl, idx);
15933f2304f8SSagi Grimberg 	else
15943f2304f8SSagi Grimberg 		ret = nvmf_connect_admin_queue(nctrl);
15953f2304f8SSagi Grimberg 
15963f2304f8SSagi Grimberg 	if (!ret) {
15973f2304f8SSagi Grimberg 		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
15983f2304f8SSagi Grimberg 	} else {
1599f34e2589SSagi Grimberg 		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
16003f2304f8SSagi Grimberg 			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
16013f2304f8SSagi Grimberg 		dev_err(nctrl->device,
16023f2304f8SSagi Grimberg 			"failed to connect queue: %d ret=%d\n", idx, ret);
16033f2304f8SSagi Grimberg 	}
16043f2304f8SSagi Grimberg 	return ret;
16053f2304f8SSagi Grimberg }
16063f2304f8SSagi Grimberg 
16073f2304f8SSagi Grimberg static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
16083f2304f8SSagi Grimberg 		bool admin)
16093f2304f8SSagi Grimberg {
16103f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
16113f2304f8SSagi Grimberg 	struct blk_mq_tag_set *set;
16123f2304f8SSagi Grimberg 	int ret;
16133f2304f8SSagi Grimberg 
16143f2304f8SSagi Grimberg 	if (admin) {
16153f2304f8SSagi Grimberg 		set = &ctrl->admin_tag_set;
16163f2304f8SSagi Grimberg 		memset(set, 0, sizeof(*set));
16173f2304f8SSagi Grimberg 		set->ops = &nvme_tcp_admin_mq_ops;
16183f2304f8SSagi Grimberg 		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1619ed01fee2SChristoph Hellwig 		set->reserved_tags = NVMF_RESERVED_TAGS;
1620610c8235SMax Gurtovoy 		set->numa_node = nctrl->numa_node;
1621db5ad6b7SSagi Grimberg 		set->flags = BLK_MQ_F_BLOCKING;
16223f2304f8SSagi Grimberg 		set->cmd_size = sizeof(struct nvme_tcp_request);
16233f2304f8SSagi Grimberg 		set->driver_data = ctrl;
16243f2304f8SSagi Grimberg 		set->nr_hw_queues = 1;
1625dc96f938SChaitanya Kulkarni 		set->timeout = NVME_ADMIN_TIMEOUT;
16263f2304f8SSagi Grimberg 	} else {
16273f2304f8SSagi Grimberg 		set = &ctrl->tag_set;
16283f2304f8SSagi Grimberg 		memset(set, 0, sizeof(*set));
16293f2304f8SSagi Grimberg 		set->ops = &nvme_tcp_mq_ops;
16303f2304f8SSagi Grimberg 		set->queue_depth = nctrl->sqsize + 1;
1631ed01fee2SChristoph Hellwig 		set->reserved_tags = NVMF_RESERVED_TAGS;
1632610c8235SMax Gurtovoy 		set->numa_node = nctrl->numa_node;
1633db5ad6b7SSagi Grimberg 		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
16343f2304f8SSagi Grimberg 		set->cmd_size = sizeof(struct nvme_tcp_request);
16353f2304f8SSagi Grimberg 		set->driver_data = ctrl;
16363f2304f8SSagi Grimberg 		set->nr_hw_queues = nctrl->queue_count - 1;
16373f2304f8SSagi Grimberg 		set->timeout = NVME_IO_TIMEOUT;
16381a9460ceSSagi Grimberg 		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
16393f2304f8SSagi Grimberg 	}
16403f2304f8SSagi Grimberg 
16413f2304f8SSagi Grimberg 	ret = blk_mq_alloc_tag_set(set);
16423f2304f8SSagi Grimberg 	if (ret)
16433f2304f8SSagi Grimberg 		return ERR_PTR(ret);
16443f2304f8SSagi Grimberg 
16453f2304f8SSagi Grimberg 	return set;
16463f2304f8SSagi Grimberg }
16473f2304f8SSagi Grimberg 
16483f2304f8SSagi Grimberg static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
16493f2304f8SSagi Grimberg {
16503f2304f8SSagi Grimberg 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1651ceb1e087SDavid Milburn 		cancel_work_sync(&ctrl->async_event_work);
16523f2304f8SSagi Grimberg 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
16533f2304f8SSagi Grimberg 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
16543f2304f8SSagi Grimberg 	}
16553f2304f8SSagi Grimberg 
16563f2304f8SSagi Grimberg 	nvme_tcp_free_queue(ctrl, 0);
16573f2304f8SSagi Grimberg }
16583f2304f8SSagi Grimberg 
16593f2304f8SSagi Grimberg static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
16603f2304f8SSagi Grimberg {
16613f2304f8SSagi Grimberg 	int i;
16623f2304f8SSagi Grimberg 
16633f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++)
16643f2304f8SSagi Grimberg 		nvme_tcp_free_queue(ctrl, i);
16653f2304f8SSagi Grimberg }
16663f2304f8SSagi Grimberg 
16673f2304f8SSagi Grimberg static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
16683f2304f8SSagi Grimberg {
16693f2304f8SSagi Grimberg 	int i;
16703f2304f8SSagi Grimberg 
16713f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++)
16723f2304f8SSagi Grimberg 		nvme_tcp_stop_queue(ctrl, i);
16733f2304f8SSagi Grimberg }
16743f2304f8SSagi Grimberg 
16753f2304f8SSagi Grimberg static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
16763f2304f8SSagi Grimberg {
1677*462b8b2dSChaitanya Kulkarni 	int i, ret;
16783f2304f8SSagi Grimberg 
16793f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++) {
16803f2304f8SSagi Grimberg 		ret = nvme_tcp_start_queue(ctrl, i);
16813f2304f8SSagi Grimberg 		if (ret)
16823f2304f8SSagi Grimberg 			goto out_stop_queues;
16833f2304f8SSagi Grimberg 	}
16843f2304f8SSagi Grimberg 
16853f2304f8SSagi Grimberg 	return 0;
16863f2304f8SSagi Grimberg 
16873f2304f8SSagi Grimberg out_stop_queues:
16883f2304f8SSagi Grimberg 	for (i--; i >= 1; i--)
16893f2304f8SSagi Grimberg 		nvme_tcp_stop_queue(ctrl, i);
16903f2304f8SSagi Grimberg 	return ret;
16913f2304f8SSagi Grimberg }
16923f2304f8SSagi Grimberg 
16933f2304f8SSagi Grimberg static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
16943f2304f8SSagi Grimberg {
16953f2304f8SSagi Grimberg 	int ret;
16963f2304f8SSagi Grimberg 
16973f2304f8SSagi Grimberg 	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
16983f2304f8SSagi Grimberg 	if (ret)
16993f2304f8SSagi Grimberg 		return ret;
17003f2304f8SSagi Grimberg 
17013f2304f8SSagi Grimberg 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
17023f2304f8SSagi Grimberg 	if (ret)
17033f2304f8SSagi Grimberg 		goto out_free_queue;
17043f2304f8SSagi Grimberg 
17053f2304f8SSagi Grimberg 	return 0;
17063f2304f8SSagi Grimberg 
17073f2304f8SSagi Grimberg out_free_queue:
17083f2304f8SSagi Grimberg 	nvme_tcp_free_queue(ctrl, 0);
17093f2304f8SSagi Grimberg 	return ret;
17103f2304f8SSagi Grimberg }
17113f2304f8SSagi Grimberg 
1712efb973b1SSagi Grimberg static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
17133f2304f8SSagi Grimberg {
17143f2304f8SSagi Grimberg 	int i, ret;
17153f2304f8SSagi Grimberg 
17163f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++) {
17173f2304f8SSagi Grimberg 		ret = nvme_tcp_alloc_queue(ctrl, i,
17183f2304f8SSagi Grimberg 				ctrl->sqsize + 1);
17193f2304f8SSagi Grimberg 		if (ret)
17203f2304f8SSagi Grimberg 			goto out_free_queues;
17213f2304f8SSagi Grimberg 	}
17223f2304f8SSagi Grimberg 
17233f2304f8SSagi Grimberg 	return 0;
17243f2304f8SSagi Grimberg 
17253f2304f8SSagi Grimberg out_free_queues:
17263f2304f8SSagi Grimberg 	for (i--; i >= 1; i--)
17273f2304f8SSagi Grimberg 		nvme_tcp_free_queue(ctrl, i);
17283f2304f8SSagi Grimberg 
17293f2304f8SSagi Grimberg 	return ret;
17303f2304f8SSagi Grimberg }
17313f2304f8SSagi Grimberg 
17323f2304f8SSagi Grimberg static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
17333f2304f8SSagi Grimberg {
1734873946f4SSagi Grimberg 	unsigned int nr_io_queues;
1735873946f4SSagi Grimberg 
1736873946f4SSagi Grimberg 	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1737873946f4SSagi Grimberg 	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
17381a9460ceSSagi Grimberg 	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1739873946f4SSagi Grimberg 
1740873946f4SSagi Grimberg 	return nr_io_queues;
17413f2304f8SSagi Grimberg }
17423f2304f8SSagi Grimberg 
174364861993SSagi Grimberg static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
174464861993SSagi Grimberg 		unsigned int nr_io_queues)
174564861993SSagi Grimberg {
174664861993SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
174764861993SSagi Grimberg 	struct nvmf_ctrl_options *opts = nctrl->opts;
174864861993SSagi Grimberg 
174964861993SSagi Grimberg 	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
175064861993SSagi Grimberg 		/*
175164861993SSagi Grimberg 		 * separate read/write queues
175264861993SSagi Grimberg 		 * hand out dedicated default queues only after we have
175364861993SSagi Grimberg 		 * sufficient read queues.
175464861993SSagi Grimberg 		 */
175564861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
175664861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
175764861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
175864861993SSagi Grimberg 			min(opts->nr_write_queues, nr_io_queues);
175964861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
176064861993SSagi Grimberg 	} else {
176164861993SSagi Grimberg 		/*
176264861993SSagi Grimberg 		 * shared read/write queues
176364861993SSagi Grimberg 		 * either no write queues were requested, or we don't have
176464861993SSagi Grimberg 		 * sufficient queue count to have dedicated default queues.
176564861993SSagi Grimberg 		 */
176664861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
176764861993SSagi Grimberg 			min(opts->nr_io_queues, nr_io_queues);
176864861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
176964861993SSagi Grimberg 	}
17701a9460ceSSagi Grimberg 
17711a9460ceSSagi Grimberg 	if (opts->nr_poll_queues && nr_io_queues) {
17721a9460ceSSagi Grimberg 		/* map dedicated poll queues only if we have queues left */
17731a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_POLL] =
17741a9460ceSSagi Grimberg 			min(opts->nr_poll_queues, nr_io_queues);
17751a9460ceSSagi Grimberg 	}
177664861993SSagi Grimberg }
177764861993SSagi Grimberg 
1778efb973b1SSagi Grimberg static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
17793f2304f8SSagi Grimberg {
17803f2304f8SSagi Grimberg 	unsigned int nr_io_queues;
17813f2304f8SSagi Grimberg 	int ret;
17823f2304f8SSagi Grimberg 
17833f2304f8SSagi Grimberg 	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
17843f2304f8SSagi Grimberg 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
17853f2304f8SSagi Grimberg 	if (ret)
17863f2304f8SSagi Grimberg 		return ret;
17873f2304f8SSagi Grimberg 
1788664227fdSRuozhu Li 	if (nr_io_queues == 0) {
178972f57242SSagi Grimberg 		dev_err(ctrl->device,
179072f57242SSagi Grimberg 			"unable to set any I/O queues\n");
179172f57242SSagi Grimberg 		return -ENOMEM;
179272f57242SSagi Grimberg 	}
17933f2304f8SSagi Grimberg 
1794664227fdSRuozhu Li 	ctrl->queue_count = nr_io_queues + 1;
17953f2304f8SSagi Grimberg 	dev_info(ctrl->device,
17963f2304f8SSagi Grimberg 		"creating %d I/O queues.\n", nr_io_queues);
17973f2304f8SSagi Grimberg 
179864861993SSagi Grimberg 	nvme_tcp_set_io_queues(ctrl, nr_io_queues);
179964861993SSagi Grimberg 
1800efb973b1SSagi Grimberg 	return __nvme_tcp_alloc_io_queues(ctrl);
18013f2304f8SSagi Grimberg }
18023f2304f8SSagi Grimberg 
18033f2304f8SSagi Grimberg static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
18043f2304f8SSagi Grimberg {
18053f2304f8SSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
18063f2304f8SSagi Grimberg 	if (remove) {
18073f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->connect_q);
18083f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->tagset);
18093f2304f8SSagi Grimberg 	}
18103f2304f8SSagi Grimberg 	nvme_tcp_free_io_queues(ctrl);
18113f2304f8SSagi Grimberg }
18123f2304f8SSagi Grimberg 
18133f2304f8SSagi Grimberg static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
18143f2304f8SSagi Grimberg {
18153f2304f8SSagi Grimberg 	int ret;
18163f2304f8SSagi Grimberg 
1817efb973b1SSagi Grimberg 	ret = nvme_tcp_alloc_io_queues(ctrl);
18183f2304f8SSagi Grimberg 	if (ret)
18193f2304f8SSagi Grimberg 		return ret;
18203f2304f8SSagi Grimberg 
18213f2304f8SSagi Grimberg 	if (new) {
18223f2304f8SSagi Grimberg 		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
18233f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->tagset)) {
18243f2304f8SSagi Grimberg 			ret = PTR_ERR(ctrl->tagset);
18253f2304f8SSagi Grimberg 			goto out_free_io_queues;
18263f2304f8SSagi Grimberg 		}
18273f2304f8SSagi Grimberg 
182872e8b5cdSChaitanya Kulkarni 		ret = nvme_ctrl_init_connect_q(ctrl);
182972e8b5cdSChaitanya Kulkarni 		if (ret)
18303f2304f8SSagi Grimberg 			goto out_free_tag_set;
18313f2304f8SSagi Grimberg 	}
18323f2304f8SSagi Grimberg 
18333f2304f8SSagi Grimberg 	ret = nvme_tcp_start_io_queues(ctrl);
18343f2304f8SSagi Grimberg 	if (ret)
18353f2304f8SSagi Grimberg 		goto out_cleanup_connect_q;
18363f2304f8SSagi Grimberg 
18372875b0aeSSagi Grimberg 	if (!new) {
18382875b0aeSSagi Grimberg 		nvme_start_queues(ctrl);
1839e5c01f4fSSagi Grimberg 		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1840e5c01f4fSSagi Grimberg 			/*
1841e5c01f4fSSagi Grimberg 			 * If we timed out waiting for freeze we are likely to
1842e5c01f4fSSagi Grimberg 			 * be stuck.  Fail the controller initialization just
1843e5c01f4fSSagi Grimberg 			 * to be safe.
1844e5c01f4fSSagi Grimberg 			 */
1845e5c01f4fSSagi Grimberg 			ret = -ENODEV;
1846e5c01f4fSSagi Grimberg 			goto out_wait_freeze_timed_out;
1847e5c01f4fSSagi Grimberg 		}
18482875b0aeSSagi Grimberg 		blk_mq_update_nr_hw_queues(ctrl->tagset,
18492875b0aeSSagi Grimberg 			ctrl->queue_count - 1);
18502875b0aeSSagi Grimberg 		nvme_unfreeze(ctrl);
18512875b0aeSSagi Grimberg 	}
18522875b0aeSSagi Grimberg 
18533f2304f8SSagi Grimberg 	return 0;
18543f2304f8SSagi Grimberg 
1855e5c01f4fSSagi Grimberg out_wait_freeze_timed_out:
1856e5c01f4fSSagi Grimberg 	nvme_stop_queues(ctrl);
185770a99574SChao Leng 	nvme_sync_io_queues(ctrl);
1858e5c01f4fSSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
18593f2304f8SSagi Grimberg out_cleanup_connect_q:
186070a99574SChao Leng 	nvme_cancel_tagset(ctrl);
1861e85037a2SSagi Grimberg 	if (new)
18623f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->connect_q);
18633f2304f8SSagi Grimberg out_free_tag_set:
18643f2304f8SSagi Grimberg 	if (new)
18653f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->tagset);
18663f2304f8SSagi Grimberg out_free_io_queues:
18673f2304f8SSagi Grimberg 	nvme_tcp_free_io_queues(ctrl);
18683f2304f8SSagi Grimberg 	return ret;
18693f2304f8SSagi Grimberg }
18703f2304f8SSagi Grimberg 
18713f2304f8SSagi Grimberg static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
18723f2304f8SSagi Grimberg {
18733f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
18743f2304f8SSagi Grimberg 	if (remove) {
18753f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->admin_q);
1876e7832cb4SSagi Grimberg 		blk_cleanup_queue(ctrl->fabrics_q);
18773f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->admin_tagset);
18783f2304f8SSagi Grimberg 	}
18793f2304f8SSagi Grimberg 	nvme_tcp_free_admin_queue(ctrl);
18803f2304f8SSagi Grimberg }
18813f2304f8SSagi Grimberg 
18823f2304f8SSagi Grimberg static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
18833f2304f8SSagi Grimberg {
18843f2304f8SSagi Grimberg 	int error;
18853f2304f8SSagi Grimberg 
18863f2304f8SSagi Grimberg 	error = nvme_tcp_alloc_admin_queue(ctrl);
18873f2304f8SSagi Grimberg 	if (error)
18883f2304f8SSagi Grimberg 		return error;
18893f2304f8SSagi Grimberg 
18903f2304f8SSagi Grimberg 	if (new) {
18913f2304f8SSagi Grimberg 		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
18923f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->admin_tagset)) {
18933f2304f8SSagi Grimberg 			error = PTR_ERR(ctrl->admin_tagset);
18943f2304f8SSagi Grimberg 			goto out_free_queue;
18953f2304f8SSagi Grimberg 		}
18963f2304f8SSagi Grimberg 
1897e7832cb4SSagi Grimberg 		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1898e7832cb4SSagi Grimberg 		if (IS_ERR(ctrl->fabrics_q)) {
1899e7832cb4SSagi Grimberg 			error = PTR_ERR(ctrl->fabrics_q);
1900e7832cb4SSagi Grimberg 			goto out_free_tagset;
1901e7832cb4SSagi Grimberg 		}
1902e7832cb4SSagi Grimberg 
19033f2304f8SSagi Grimberg 		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
19043f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->admin_q)) {
19053f2304f8SSagi Grimberg 			error = PTR_ERR(ctrl->admin_q);
1906e7832cb4SSagi Grimberg 			goto out_cleanup_fabrics_q;
19073f2304f8SSagi Grimberg 		}
19083f2304f8SSagi Grimberg 	}
19093f2304f8SSagi Grimberg 
19103f2304f8SSagi Grimberg 	error = nvme_tcp_start_queue(ctrl, 0);
19113f2304f8SSagi Grimberg 	if (error)
19123f2304f8SSagi Grimberg 		goto out_cleanup_queue;
19133f2304f8SSagi Grimberg 
1914c0f2f45bSSagi Grimberg 	error = nvme_enable_ctrl(ctrl);
19153f2304f8SSagi Grimberg 	if (error)
19163f2304f8SSagi Grimberg 		goto out_stop_queue;
19173f2304f8SSagi Grimberg 
19186ca1d902SMing Lei 	nvme_start_admin_queue(ctrl);
1919e7832cb4SSagi Grimberg 
1920f21c4769SChaitanya Kulkarni 	error = nvme_init_ctrl_finish(ctrl);
19213f2304f8SSagi Grimberg 	if (error)
192270a99574SChao Leng 		goto out_quiesce_queue;
19233f2304f8SSagi Grimberg 
19243f2304f8SSagi Grimberg 	return 0;
19253f2304f8SSagi Grimberg 
192670a99574SChao Leng out_quiesce_queue:
19276ca1d902SMing Lei 	nvme_stop_admin_queue(ctrl);
192870a99574SChao Leng 	blk_sync_queue(ctrl->admin_q);
19293f2304f8SSagi Grimberg out_stop_queue:
19303f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
193170a99574SChao Leng 	nvme_cancel_admin_tagset(ctrl);
19323f2304f8SSagi Grimberg out_cleanup_queue:
19333f2304f8SSagi Grimberg 	if (new)
19343f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->admin_q);
1935e7832cb4SSagi Grimberg out_cleanup_fabrics_q:
1936e7832cb4SSagi Grimberg 	if (new)
1937e7832cb4SSagi Grimberg 		blk_cleanup_queue(ctrl->fabrics_q);
19383f2304f8SSagi Grimberg out_free_tagset:
19393f2304f8SSagi Grimberg 	if (new)
19403f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->admin_tagset);
19413f2304f8SSagi Grimberg out_free_queue:
19423f2304f8SSagi Grimberg 	nvme_tcp_free_admin_queue(ctrl);
19433f2304f8SSagi Grimberg 	return error;
19443f2304f8SSagi Grimberg }
19453f2304f8SSagi Grimberg 
19463f2304f8SSagi Grimberg static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
19473f2304f8SSagi Grimberg 		bool remove)
19483f2304f8SSagi Grimberg {
19496ca1d902SMing Lei 	nvme_stop_admin_queue(ctrl);
1950d6f66210SChao Leng 	blk_sync_queue(ctrl->admin_q);
19513f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
1952563c8158SChao Leng 	nvme_cancel_admin_tagset(ctrl);
1953e7832cb4SSagi Grimberg 	if (remove)
19546ca1d902SMing Lei 		nvme_start_admin_queue(ctrl);
19553f2304f8SSagi Grimberg 	nvme_tcp_destroy_admin_queue(ctrl, remove);
19563f2304f8SSagi Grimberg }
19573f2304f8SSagi Grimberg 
19583f2304f8SSagi Grimberg static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
19593f2304f8SSagi Grimberg 		bool remove)
19603f2304f8SSagi Grimberg {
19613f2304f8SSagi Grimberg 	if (ctrl->queue_count <= 1)
1962d6f66210SChao Leng 		return;
19636ca1d902SMing Lei 	nvme_stop_admin_queue(ctrl);
19642875b0aeSSagi Grimberg 	nvme_start_freeze(ctrl);
19653f2304f8SSagi Grimberg 	nvme_stop_queues(ctrl);
1966d6f66210SChao Leng 	nvme_sync_io_queues(ctrl);
19673f2304f8SSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
1968563c8158SChao Leng 	nvme_cancel_tagset(ctrl);
19693f2304f8SSagi Grimberg 	if (remove)
19703f2304f8SSagi Grimberg 		nvme_start_queues(ctrl);
19713f2304f8SSagi Grimberg 	nvme_tcp_destroy_io_queues(ctrl, remove);
19723f2304f8SSagi Grimberg }
19733f2304f8SSagi Grimberg 
19743f2304f8SSagi Grimberg static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
19753f2304f8SSagi Grimberg {
19763f2304f8SSagi Grimberg 	/* If we are resetting/deleting then do nothing */
19773f2304f8SSagi Grimberg 	if (ctrl->state != NVME_CTRL_CONNECTING) {
19783f2304f8SSagi Grimberg 		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
19793f2304f8SSagi Grimberg 			ctrl->state == NVME_CTRL_LIVE);
19803f2304f8SSagi Grimberg 		return;
19813f2304f8SSagi Grimberg 	}
19823f2304f8SSagi Grimberg 
19833f2304f8SSagi Grimberg 	if (nvmf_should_reconnect(ctrl)) {
19843f2304f8SSagi Grimberg 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
19853f2304f8SSagi Grimberg 			ctrl->opts->reconnect_delay);
19863f2304f8SSagi Grimberg 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
19873f2304f8SSagi Grimberg 				ctrl->opts->reconnect_delay * HZ);
19883f2304f8SSagi Grimberg 	} else {
19893f2304f8SSagi Grimberg 		dev_info(ctrl->device, "Removing controller...\n");
19903f2304f8SSagi Grimberg 		nvme_delete_ctrl(ctrl);
19913f2304f8SSagi Grimberg 	}
19923f2304f8SSagi Grimberg }
19933f2304f8SSagi Grimberg 
19943f2304f8SSagi Grimberg static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
19953f2304f8SSagi Grimberg {
19963f2304f8SSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->opts;
1997312910f4SColin Ian King 	int ret;
19983f2304f8SSagi Grimberg 
19993f2304f8SSagi Grimberg 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
20003f2304f8SSagi Grimberg 	if (ret)
20013f2304f8SSagi Grimberg 		return ret;
20023f2304f8SSagi Grimberg 
20033f2304f8SSagi Grimberg 	if (ctrl->icdoff) {
2004522af60cSDan Carpenter 		ret = -EOPNOTSUPP;
20053f2304f8SSagi Grimberg 		dev_err(ctrl->device, "icdoff is not supported!\n");
20063f2304f8SSagi Grimberg 		goto destroy_admin;
20073f2304f8SSagi Grimberg 	}
20083f2304f8SSagi Grimberg 
20093b54064fSChaitanya Kulkarni 	if (!nvme_ctrl_sgl_supported(ctrl)) {
2010522af60cSDan Carpenter 		ret = -EOPNOTSUPP;
201173ffcefcSMax Gurtovoy 		dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
201273ffcefcSMax Gurtovoy 		goto destroy_admin;
201373ffcefcSMax Gurtovoy 	}
201473ffcefcSMax Gurtovoy 
20153f2304f8SSagi Grimberg 	if (opts->queue_size > ctrl->sqsize + 1)
20163f2304f8SSagi Grimberg 		dev_warn(ctrl->device,
20173f2304f8SSagi Grimberg 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
20183f2304f8SSagi Grimberg 			opts->queue_size, ctrl->sqsize + 1);
20193f2304f8SSagi Grimberg 
20203f2304f8SSagi Grimberg 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
20213f2304f8SSagi Grimberg 		dev_warn(ctrl->device,
20223f2304f8SSagi Grimberg 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
20233f2304f8SSagi Grimberg 			ctrl->sqsize + 1, ctrl->maxcmd);
20243f2304f8SSagi Grimberg 		ctrl->sqsize = ctrl->maxcmd - 1;
20253f2304f8SSagi Grimberg 	}
20263f2304f8SSagi Grimberg 
20273f2304f8SSagi Grimberg 	if (ctrl->queue_count > 1) {
20283f2304f8SSagi Grimberg 		ret = nvme_tcp_configure_io_queues(ctrl, new);
20293f2304f8SSagi Grimberg 		if (ret)
20303f2304f8SSagi Grimberg 			goto destroy_admin;
20313f2304f8SSagi Grimberg 	}
20323f2304f8SSagi Grimberg 
20333f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2034bea54ef5SIsrael Rukshin 		/*
2035ecca390eSSagi Grimberg 		 * state change failure is ok if we started ctrl delete,
2036bea54ef5SIsrael Rukshin 		 * unless we're during creation of a new controller to
2037bea54ef5SIsrael Rukshin 		 * avoid races with teardown flow.
2038bea54ef5SIsrael Rukshin 		 */
2039ecca390eSSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2040ecca390eSSagi Grimberg 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2041bea54ef5SIsrael Rukshin 		WARN_ON_ONCE(new);
20423f2304f8SSagi Grimberg 		ret = -EINVAL;
20433f2304f8SSagi Grimberg 		goto destroy_io;
20443f2304f8SSagi Grimberg 	}
20453f2304f8SSagi Grimberg 
20463f2304f8SSagi Grimberg 	nvme_start_ctrl(ctrl);
20473f2304f8SSagi Grimberg 	return 0;
20483f2304f8SSagi Grimberg 
20493f2304f8SSagi Grimberg destroy_io:
205070a99574SChao Leng 	if (ctrl->queue_count > 1) {
205170a99574SChao Leng 		nvme_stop_queues(ctrl);
205270a99574SChao Leng 		nvme_sync_io_queues(ctrl);
205370a99574SChao Leng 		nvme_tcp_stop_io_queues(ctrl);
205470a99574SChao Leng 		nvme_cancel_tagset(ctrl);
20553f2304f8SSagi Grimberg 		nvme_tcp_destroy_io_queues(ctrl, new);
205670a99574SChao Leng 	}
20573f2304f8SSagi Grimberg destroy_admin:
20586ca1d902SMing Lei 	nvme_stop_admin_queue(ctrl);
205970a99574SChao Leng 	blk_sync_queue(ctrl->admin_q);
20603f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
206170a99574SChao Leng 	nvme_cancel_admin_tagset(ctrl);
20623f2304f8SSagi Grimberg 	nvme_tcp_destroy_admin_queue(ctrl, new);
20633f2304f8SSagi Grimberg 	return ret;
20643f2304f8SSagi Grimberg }
20653f2304f8SSagi Grimberg 
20663f2304f8SSagi Grimberg static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
20673f2304f8SSagi Grimberg {
20683f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
20693f2304f8SSagi Grimberg 			struct nvme_tcp_ctrl, connect_work);
20703f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
20713f2304f8SSagi Grimberg 
20723f2304f8SSagi Grimberg 	++ctrl->nr_reconnects;
20733f2304f8SSagi Grimberg 
20743f2304f8SSagi Grimberg 	if (nvme_tcp_setup_ctrl(ctrl, false))
20753f2304f8SSagi Grimberg 		goto requeue;
20763f2304f8SSagi Grimberg 
207756a77d26SColin Ian King 	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
20783f2304f8SSagi Grimberg 			ctrl->nr_reconnects);
20793f2304f8SSagi Grimberg 
20803f2304f8SSagi Grimberg 	ctrl->nr_reconnects = 0;
20813f2304f8SSagi Grimberg 
20823f2304f8SSagi Grimberg 	return;
20833f2304f8SSagi Grimberg 
20843f2304f8SSagi Grimberg requeue:
20853f2304f8SSagi Grimberg 	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
20863f2304f8SSagi Grimberg 			ctrl->nr_reconnects);
20873f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
20883f2304f8SSagi Grimberg }
20893f2304f8SSagi Grimberg 
20903f2304f8SSagi Grimberg static void nvme_tcp_error_recovery_work(struct work_struct *work)
20913f2304f8SSagi Grimberg {
20923f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
20933f2304f8SSagi Grimberg 				struct nvme_tcp_ctrl, err_work);
20943f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
20953f2304f8SSagi Grimberg 
20963f2304f8SSagi Grimberg 	nvme_stop_keep_alive(ctrl);
20973f2304f8SSagi Grimberg 	nvme_tcp_teardown_io_queues(ctrl, false);
20983f2304f8SSagi Grimberg 	/* unquiesce to fail fast pending requests */
20993f2304f8SSagi Grimberg 	nvme_start_queues(ctrl);
21003f2304f8SSagi Grimberg 	nvme_tcp_teardown_admin_queue(ctrl, false);
21016ca1d902SMing Lei 	nvme_start_admin_queue(ctrl);
21023f2304f8SSagi Grimberg 
21033f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2104ecca390eSSagi Grimberg 		/* state change failure is ok if we started ctrl delete */
2105ecca390eSSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2106ecca390eSSagi Grimberg 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
21073f2304f8SSagi Grimberg 		return;
21083f2304f8SSagi Grimberg 	}
21093f2304f8SSagi Grimberg 
21103f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
21113f2304f8SSagi Grimberg }
21123f2304f8SSagi Grimberg 
21133f2304f8SSagi Grimberg static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
21143f2304f8SSagi Grimberg {
2115794a4cb3SSagi Grimberg 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2116794a4cb3SSagi Grimberg 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2117794a4cb3SSagi Grimberg 
21183f2304f8SSagi Grimberg 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
21196ca1d902SMing Lei 	nvme_stop_admin_queue(ctrl);
21203f2304f8SSagi Grimberg 	if (shutdown)
21213f2304f8SSagi Grimberg 		nvme_shutdown_ctrl(ctrl);
21223f2304f8SSagi Grimberg 	else
2123b5b05048SSagi Grimberg 		nvme_disable_ctrl(ctrl);
21243f2304f8SSagi Grimberg 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
21253f2304f8SSagi Grimberg }
21263f2304f8SSagi Grimberg 
21273f2304f8SSagi Grimberg static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
21283f2304f8SSagi Grimberg {
21293f2304f8SSagi Grimberg 	nvme_tcp_teardown_ctrl(ctrl, true);
21303f2304f8SSagi Grimberg }
21313f2304f8SSagi Grimberg 
21323f2304f8SSagi Grimberg static void nvme_reset_ctrl_work(struct work_struct *work)
21333f2304f8SSagi Grimberg {
21343f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl =
21353f2304f8SSagi Grimberg 		container_of(work, struct nvme_ctrl, reset_work);
21363f2304f8SSagi Grimberg 
21373f2304f8SSagi Grimberg 	nvme_stop_ctrl(ctrl);
21383f2304f8SSagi Grimberg 	nvme_tcp_teardown_ctrl(ctrl, false);
21393f2304f8SSagi Grimberg 
21403f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2141ecca390eSSagi Grimberg 		/* state change failure is ok if we started ctrl delete */
2142ecca390eSSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2143ecca390eSSagi Grimberg 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
21443f2304f8SSagi Grimberg 		return;
21453f2304f8SSagi Grimberg 	}
21463f2304f8SSagi Grimberg 
21473f2304f8SSagi Grimberg 	if (nvme_tcp_setup_ctrl(ctrl, false))
21483f2304f8SSagi Grimberg 		goto out_fail;
21493f2304f8SSagi Grimberg 
21503f2304f8SSagi Grimberg 	return;
21513f2304f8SSagi Grimberg 
21523f2304f8SSagi Grimberg out_fail:
21533f2304f8SSagi Grimberg 	++ctrl->nr_reconnects;
21543f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
21553f2304f8SSagi Grimberg }
21563f2304f8SSagi Grimberg 
21573f2304f8SSagi Grimberg static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
21583f2304f8SSagi Grimberg {
21593f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
21603f2304f8SSagi Grimberg 
21613f2304f8SSagi Grimberg 	if (list_empty(&ctrl->list))
21623f2304f8SSagi Grimberg 		goto free_ctrl;
21633f2304f8SSagi Grimberg 
21643f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
21653f2304f8SSagi Grimberg 	list_del(&ctrl->list);
21663f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
21673f2304f8SSagi Grimberg 
21683f2304f8SSagi Grimberg 	nvmf_free_options(nctrl->opts);
21693f2304f8SSagi Grimberg free_ctrl:
21703f2304f8SSagi Grimberg 	kfree(ctrl->queues);
21713f2304f8SSagi Grimberg 	kfree(ctrl);
21723f2304f8SSagi Grimberg }
21733f2304f8SSagi Grimberg 
21743f2304f8SSagi Grimberg static void nvme_tcp_set_sg_null(struct nvme_command *c)
21753f2304f8SSagi Grimberg {
21763f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
21773f2304f8SSagi Grimberg 
21783f2304f8SSagi Grimberg 	sg->addr = 0;
21793f2304f8SSagi Grimberg 	sg->length = 0;
21803f2304f8SSagi Grimberg 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
21813f2304f8SSagi Grimberg 			NVME_SGL_FMT_TRANSPORT_A;
21823f2304f8SSagi Grimberg }
21833f2304f8SSagi Grimberg 
21843f2304f8SSagi Grimberg static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
21853f2304f8SSagi Grimberg 		struct nvme_command *c, u32 data_len)
21863f2304f8SSagi Grimberg {
21873f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
21883f2304f8SSagi Grimberg 
21893f2304f8SSagi Grimberg 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
21903f2304f8SSagi Grimberg 	sg->length = cpu_to_le32(data_len);
21913f2304f8SSagi Grimberg 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
21923f2304f8SSagi Grimberg }
21933f2304f8SSagi Grimberg 
21943f2304f8SSagi Grimberg static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
21953f2304f8SSagi Grimberg 		u32 data_len)
21963f2304f8SSagi Grimberg {
21973f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
21983f2304f8SSagi Grimberg 
21993f2304f8SSagi Grimberg 	sg->addr = 0;
22003f2304f8SSagi Grimberg 	sg->length = cpu_to_le32(data_len);
22013f2304f8SSagi Grimberg 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
22023f2304f8SSagi Grimberg 			NVME_SGL_FMT_TRANSPORT_A;
22033f2304f8SSagi Grimberg }
22043f2304f8SSagi Grimberg 
22053f2304f8SSagi Grimberg static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
22063f2304f8SSagi Grimberg {
22073f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
22083f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
22093f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
22103f2304f8SSagi Grimberg 	struct nvme_command *cmd = &pdu->cmd;
22113f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
22123f2304f8SSagi Grimberg 
22133f2304f8SSagi Grimberg 	memset(pdu, 0, sizeof(*pdu));
22143f2304f8SSagi Grimberg 	pdu->hdr.type = nvme_tcp_cmd;
22153f2304f8SSagi Grimberg 	if (queue->hdr_digest)
22163f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
22173f2304f8SSagi Grimberg 	pdu->hdr.hlen = sizeof(*pdu);
22183f2304f8SSagi Grimberg 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
22193f2304f8SSagi Grimberg 
22203f2304f8SSagi Grimberg 	cmd->common.opcode = nvme_admin_async_event;
22213f2304f8SSagi Grimberg 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
22223f2304f8SSagi Grimberg 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
22233f2304f8SSagi Grimberg 	nvme_tcp_set_sg_null(cmd);
22243f2304f8SSagi Grimberg 
22253f2304f8SSagi Grimberg 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
22263f2304f8SSagi Grimberg 	ctrl->async_req.offset = 0;
22273f2304f8SSagi Grimberg 	ctrl->async_req.curr_bio = NULL;
22283f2304f8SSagi Grimberg 	ctrl->async_req.data_len = 0;
22293f2304f8SSagi Grimberg 
223086f0348aSSagi Grimberg 	nvme_tcp_queue_request(&ctrl->async_req, true, true);
22313f2304f8SSagi Grimberg }
22323f2304f8SSagi Grimberg 
2233236187c4SSagi Grimberg static void nvme_tcp_complete_timed_out(struct request *rq)
2234236187c4SSagi Grimberg {
2235236187c4SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2236236187c4SSagi Grimberg 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2237236187c4SSagi Grimberg 
2238236187c4SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
22390a8a2c85SSagi Grimberg 	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2240236187c4SSagi Grimberg 		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2241236187c4SSagi Grimberg 		blk_mq_complete_request(rq);
2242236187c4SSagi Grimberg 	}
2243236187c4SSagi Grimberg }
2244236187c4SSagi Grimberg 
22453f2304f8SSagi Grimberg static enum blk_eh_timer_return
22463f2304f8SSagi Grimberg nvme_tcp_timeout(struct request *rq, bool reserved)
22473f2304f8SSagi Grimberg {
22483f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2249236187c4SSagi Grimberg 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
22503f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
22513f2304f8SSagi Grimberg 
2252236187c4SSagi Grimberg 	dev_warn(ctrl->device,
22533f2304f8SSagi Grimberg 		"queue %d: timeout request %#x type %d\n",
225439d57757SSagi Grimberg 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
22553f2304f8SSagi Grimberg 
2256236187c4SSagi Grimberg 	if (ctrl->state != NVME_CTRL_LIVE) {
225739d57757SSagi Grimberg 		/*
2258236187c4SSagi Grimberg 		 * If we are resetting, connecting or deleting we should
2259236187c4SSagi Grimberg 		 * complete immediately because we may block controller
2260236187c4SSagi Grimberg 		 * teardown or setup sequence
2261236187c4SSagi Grimberg 		 * - ctrl disable/shutdown fabrics requests
2262236187c4SSagi Grimberg 		 * - connect requests
2263236187c4SSagi Grimberg 		 * - initialization admin requests
2264236187c4SSagi Grimberg 		 * - I/O requests that entered after unquiescing and
2265236187c4SSagi Grimberg 		 *   the controller stopped responding
2266236187c4SSagi Grimberg 		 *
2267236187c4SSagi Grimberg 		 * All other requests should be cancelled by the error
2268236187c4SSagi Grimberg 		 * recovery work, so it's fine that we fail it here.
226939d57757SSagi Grimberg 		 */
2270236187c4SSagi Grimberg 		nvme_tcp_complete_timed_out(rq);
22713f2304f8SSagi Grimberg 		return BLK_EH_DONE;
22723f2304f8SSagi Grimberg 	}
22733f2304f8SSagi Grimberg 
2274236187c4SSagi Grimberg 	/*
2275236187c4SSagi Grimberg 	 * LIVE state should trigger the normal error recovery which will
2276236187c4SSagi Grimberg 	 * handle completing this request.
2277236187c4SSagi Grimberg 	 */
2278236187c4SSagi Grimberg 	nvme_tcp_error_recovery(ctrl);
22793f2304f8SSagi Grimberg 	return BLK_EH_RESET_TIMER;
22803f2304f8SSagi Grimberg }
22813f2304f8SSagi Grimberg 
22823f2304f8SSagi Grimberg static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
22833f2304f8SSagi Grimberg 			struct request *rq)
22843f2304f8SSagi Grimberg {
22853f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
22863f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
22873f2304f8SSagi Grimberg 	struct nvme_command *c = &pdu->cmd;
22883f2304f8SSagi Grimberg 
22893f2304f8SSagi Grimberg 	c->common.flags |= NVME_CMD_SGL_METABUF;
22903f2304f8SSagi Grimberg 
229125e5cb78SSagi Grimberg 	if (!blk_rq_nr_phys_segments(rq))
229225e5cb78SSagi Grimberg 		nvme_tcp_set_sg_null(c);
229325e5cb78SSagi Grimberg 	else if (rq_data_dir(rq) == WRITE &&
22943f2304f8SSagi Grimberg 	    req->data_len <= nvme_tcp_inline_data_size(queue))
22953f2304f8SSagi Grimberg 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
22963f2304f8SSagi Grimberg 	else
22973f2304f8SSagi Grimberg 		nvme_tcp_set_sg_host_data(c, req->data_len);
22983f2304f8SSagi Grimberg 
22993f2304f8SSagi Grimberg 	return 0;
23003f2304f8SSagi Grimberg }
23013f2304f8SSagi Grimberg 
23023f2304f8SSagi Grimberg static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
23033f2304f8SSagi Grimberg 		struct request *rq)
23043f2304f8SSagi Grimberg {
23053f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
23063f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
23073f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
23083f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
23093f2304f8SSagi Grimberg 	blk_status_t ret;
23103f2304f8SSagi Grimberg 
2311f4b9e6c9SKeith Busch 	ret = nvme_setup_cmd(ns, rq);
23123f2304f8SSagi Grimberg 	if (ret)
23133f2304f8SSagi Grimberg 		return ret;
23143f2304f8SSagi Grimberg 
23153f2304f8SSagi Grimberg 	req->state = NVME_TCP_SEND_CMD_PDU;
23161ba2e507SDaniel Wagner 	req->status = cpu_to_le16(NVME_SC_SUCCESS);
23173f2304f8SSagi Grimberg 	req->offset = 0;
23183f2304f8SSagi Grimberg 	req->data_sent = 0;
23193f2304f8SSagi Grimberg 	req->pdu_len = 0;
23203f2304f8SSagi Grimberg 	req->pdu_sent = 0;
232125e5cb78SSagi Grimberg 	req->data_len = blk_rq_nr_phys_segments(rq) ?
232225e5cb78SSagi Grimberg 				blk_rq_payload_bytes(rq) : 0;
23233f2304f8SSagi Grimberg 	req->curr_bio = rq->bio;
2324e11e5116SSagi Grimberg 	if (req->curr_bio && req->data_len)
2325cb9b870fSSagi Grimberg 		nvme_tcp_init_iter(req, rq_data_dir(rq));
23263f2304f8SSagi Grimberg 
23273f2304f8SSagi Grimberg 	if (rq_data_dir(rq) == WRITE &&
23283f2304f8SSagi Grimberg 	    req->data_len <= nvme_tcp_inline_data_size(queue))
23293f2304f8SSagi Grimberg 		req->pdu_len = req->data_len;
23303f2304f8SSagi Grimberg 
23313f2304f8SSagi Grimberg 	pdu->hdr.type = nvme_tcp_cmd;
23323f2304f8SSagi Grimberg 	pdu->hdr.flags = 0;
23333f2304f8SSagi Grimberg 	if (queue->hdr_digest)
23343f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
23353f2304f8SSagi Grimberg 	if (queue->data_digest && req->pdu_len) {
23363f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
23373f2304f8SSagi Grimberg 		ddgst = nvme_tcp_ddgst_len(queue);
23383f2304f8SSagi Grimberg 	}
23393f2304f8SSagi Grimberg 	pdu->hdr.hlen = sizeof(*pdu);
23403f2304f8SSagi Grimberg 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
23413f2304f8SSagi Grimberg 	pdu->hdr.plen =
23423f2304f8SSagi Grimberg 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
23433f2304f8SSagi Grimberg 
23443f2304f8SSagi Grimberg 	ret = nvme_tcp_map_data(queue, rq);
23453f2304f8SSagi Grimberg 	if (unlikely(ret)) {
234628a4cac4SMax Gurtovoy 		nvme_cleanup_cmd(rq);
23473f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
23483f2304f8SSagi Grimberg 			"Failed to map data (%d)\n", ret);
23493f2304f8SSagi Grimberg 		return ret;
23503f2304f8SSagi Grimberg 	}
23513f2304f8SSagi Grimberg 
23523f2304f8SSagi Grimberg 	return 0;
23533f2304f8SSagi Grimberg }
23543f2304f8SSagi Grimberg 
235586f0348aSSagi Grimberg static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
235686f0348aSSagi Grimberg {
235786f0348aSSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
235886f0348aSSagi Grimberg 
235986f0348aSSagi Grimberg 	if (!llist_empty(&queue->req_list))
236086f0348aSSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
236186f0348aSSagi Grimberg }
236286f0348aSSagi Grimberg 
23633f2304f8SSagi Grimberg static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
23643f2304f8SSagi Grimberg 		const struct blk_mq_queue_data *bd)
23653f2304f8SSagi Grimberg {
23663f2304f8SSagi Grimberg 	struct nvme_ns *ns = hctx->queue->queuedata;
23673f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
23683f2304f8SSagi Grimberg 	struct request *rq = bd->rq;
23693f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
23703f2304f8SSagi Grimberg 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
23713f2304f8SSagi Grimberg 	blk_status_t ret;
23723f2304f8SSagi Grimberg 
2373a9715744STao Chiu 	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2374a9715744STao Chiu 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
23753f2304f8SSagi Grimberg 
23763f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
23773f2304f8SSagi Grimberg 	if (unlikely(ret))
23783f2304f8SSagi Grimberg 		return ret;
23793f2304f8SSagi Grimberg 
23803f2304f8SSagi Grimberg 	blk_mq_start_request(rq);
23813f2304f8SSagi Grimberg 
238286f0348aSSagi Grimberg 	nvme_tcp_queue_request(req, true, bd->last);
23833f2304f8SSagi Grimberg 
23843f2304f8SSagi Grimberg 	return BLK_STS_OK;
23853f2304f8SSagi Grimberg }
23863f2304f8SSagi Grimberg 
2387873946f4SSagi Grimberg static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2388873946f4SSagi Grimberg {
2389873946f4SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
239064861993SSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2391873946f4SSagi Grimberg 
239264861993SSagi Grimberg 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2393873946f4SSagi Grimberg 		/* separate read/write queues */
2394873946f4SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
239564861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
239664861993SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
239764861993SSagi Grimberg 		set->map[HCTX_TYPE_READ].nr_queues =
239864861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_READ];
2399873946f4SSagi Grimberg 		set->map[HCTX_TYPE_READ].queue_offset =
240064861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2401873946f4SSagi Grimberg 	} else {
240264861993SSagi Grimberg 		/* shared read/write queues */
2403873946f4SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
240464861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
240564861993SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
240664861993SSagi Grimberg 		set->map[HCTX_TYPE_READ].nr_queues =
240764861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2408873946f4SSagi Grimberg 		set->map[HCTX_TYPE_READ].queue_offset = 0;
2409873946f4SSagi Grimberg 	}
2410873946f4SSagi Grimberg 	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2411873946f4SSagi Grimberg 	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
241264861993SSagi Grimberg 
24131a9460ceSSagi Grimberg 	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
24141a9460ceSSagi Grimberg 		/* map dedicated poll queues only if we have queues left */
24151a9460ceSSagi Grimberg 		set->map[HCTX_TYPE_POLL].nr_queues =
24161a9460ceSSagi Grimberg 				ctrl->io_queues[HCTX_TYPE_POLL];
24171a9460ceSSagi Grimberg 		set->map[HCTX_TYPE_POLL].queue_offset =
24181a9460ceSSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
24191a9460ceSSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_READ];
24201a9460ceSSagi Grimberg 		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
24211a9460ceSSagi Grimberg 	}
24221a9460ceSSagi Grimberg 
242364861993SSagi Grimberg 	dev_info(ctrl->ctrl.device,
24241a9460ceSSagi Grimberg 		"mapped %d/%d/%d default/read/poll queues.\n",
242564861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT],
24261a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_READ],
24271a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_POLL]);
242864861993SSagi Grimberg 
2429873946f4SSagi Grimberg 	return 0;
2430873946f4SSagi Grimberg }
2431873946f4SSagi Grimberg 
24325a72e899SJens Axboe static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
24331a9460ceSSagi Grimberg {
24341a9460ceSSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
24351a9460ceSSagi Grimberg 	struct sock *sk = queue->sock->sk;
24361a9460ceSSagi Grimberg 
2437f86e5bf8SSagi Grimberg 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2438f86e5bf8SSagi Grimberg 		return 0;
2439f86e5bf8SSagi Grimberg 
244072e5d757SSagi Grimberg 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
24413f926af3SEric Dumazet 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
24421a9460ceSSagi Grimberg 		sk_busy_loop(sk, true);
24431a9460ceSSagi Grimberg 	nvme_tcp_try_recv(queue);
244472e5d757SSagi Grimberg 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
24451a9460ceSSagi Grimberg 	return queue->nr_cqe;
24461a9460ceSSagi Grimberg }
24471a9460ceSSagi Grimberg 
24486acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops = {
24493f2304f8SSagi Grimberg 	.queue_rq	= nvme_tcp_queue_rq,
245086f0348aSSagi Grimberg 	.commit_rqs	= nvme_tcp_commit_rqs,
24513f2304f8SSagi Grimberg 	.complete	= nvme_complete_rq,
24523f2304f8SSagi Grimberg 	.init_request	= nvme_tcp_init_request,
24533f2304f8SSagi Grimberg 	.exit_request	= nvme_tcp_exit_request,
24543f2304f8SSagi Grimberg 	.init_hctx	= nvme_tcp_init_hctx,
24553f2304f8SSagi Grimberg 	.timeout	= nvme_tcp_timeout,
2456873946f4SSagi Grimberg 	.map_queues	= nvme_tcp_map_queues,
24571a9460ceSSagi Grimberg 	.poll		= nvme_tcp_poll,
24583f2304f8SSagi Grimberg };
24593f2304f8SSagi Grimberg 
24606acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
24613f2304f8SSagi Grimberg 	.queue_rq	= nvme_tcp_queue_rq,
24623f2304f8SSagi Grimberg 	.complete	= nvme_complete_rq,
24633f2304f8SSagi Grimberg 	.init_request	= nvme_tcp_init_request,
24643f2304f8SSagi Grimberg 	.exit_request	= nvme_tcp_exit_request,
24653f2304f8SSagi Grimberg 	.init_hctx	= nvme_tcp_init_admin_hctx,
24663f2304f8SSagi Grimberg 	.timeout	= nvme_tcp_timeout,
24673f2304f8SSagi Grimberg };
24683f2304f8SSagi Grimberg 
24693f2304f8SSagi Grimberg static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
24703f2304f8SSagi Grimberg 	.name			= "tcp",
24713f2304f8SSagi Grimberg 	.module			= THIS_MODULE,
24723f2304f8SSagi Grimberg 	.flags			= NVME_F_FABRICS,
24733f2304f8SSagi Grimberg 	.reg_read32		= nvmf_reg_read32,
24743f2304f8SSagi Grimberg 	.reg_read64		= nvmf_reg_read64,
24753f2304f8SSagi Grimberg 	.reg_write32		= nvmf_reg_write32,
24763f2304f8SSagi Grimberg 	.free_ctrl		= nvme_tcp_free_ctrl,
24773f2304f8SSagi Grimberg 	.submit_async_event	= nvme_tcp_submit_async_event,
24783f2304f8SSagi Grimberg 	.delete_ctrl		= nvme_tcp_delete_ctrl,
24793f2304f8SSagi Grimberg 	.get_address		= nvmf_get_address,
24803f2304f8SSagi Grimberg };
24813f2304f8SSagi Grimberg 
24823f2304f8SSagi Grimberg static bool
24833f2304f8SSagi Grimberg nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
24843f2304f8SSagi Grimberg {
24853f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
24863f2304f8SSagi Grimberg 	bool found = false;
24873f2304f8SSagi Grimberg 
24883f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
24893f2304f8SSagi Grimberg 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
24903f2304f8SSagi Grimberg 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
24913f2304f8SSagi Grimberg 		if (found)
24923f2304f8SSagi Grimberg 			break;
24933f2304f8SSagi Grimberg 	}
24943f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
24953f2304f8SSagi Grimberg 
24963f2304f8SSagi Grimberg 	return found;
24973f2304f8SSagi Grimberg }
24983f2304f8SSagi Grimberg 
24993f2304f8SSagi Grimberg static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
25003f2304f8SSagi Grimberg 		struct nvmf_ctrl_options *opts)
25013f2304f8SSagi Grimberg {
25023f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
25033f2304f8SSagi Grimberg 	int ret;
25043f2304f8SSagi Grimberg 
25053f2304f8SSagi Grimberg 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
25063f2304f8SSagi Grimberg 	if (!ctrl)
25073f2304f8SSagi Grimberg 		return ERR_PTR(-ENOMEM);
25083f2304f8SSagi Grimberg 
25093f2304f8SSagi Grimberg 	INIT_LIST_HEAD(&ctrl->list);
25103f2304f8SSagi Grimberg 	ctrl->ctrl.opts = opts;
25111a9460ceSSagi Grimberg 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
25121a9460ceSSagi Grimberg 				opts->nr_poll_queues + 1;
25133f2304f8SSagi Grimberg 	ctrl->ctrl.sqsize = opts->queue_size - 1;
25143f2304f8SSagi Grimberg 	ctrl->ctrl.kato = opts->kato;
25153f2304f8SSagi Grimberg 
25163f2304f8SSagi Grimberg 	INIT_DELAYED_WORK(&ctrl->connect_work,
25173f2304f8SSagi Grimberg 			nvme_tcp_reconnect_ctrl_work);
25183f2304f8SSagi Grimberg 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
25193f2304f8SSagi Grimberg 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
25203f2304f8SSagi Grimberg 
25213f2304f8SSagi Grimberg 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
25223f2304f8SSagi Grimberg 		opts->trsvcid =
25233f2304f8SSagi Grimberg 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
25243f2304f8SSagi Grimberg 		if (!opts->trsvcid) {
25253f2304f8SSagi Grimberg 			ret = -ENOMEM;
25263f2304f8SSagi Grimberg 			goto out_free_ctrl;
25273f2304f8SSagi Grimberg 		}
25283f2304f8SSagi Grimberg 		opts->mask |= NVMF_OPT_TRSVCID;
25293f2304f8SSagi Grimberg 	}
25303f2304f8SSagi Grimberg 
25313f2304f8SSagi Grimberg 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
25323f2304f8SSagi Grimberg 			opts->traddr, opts->trsvcid, &ctrl->addr);
25333f2304f8SSagi Grimberg 	if (ret) {
25343f2304f8SSagi Grimberg 		pr_err("malformed address passed: %s:%s\n",
25353f2304f8SSagi Grimberg 			opts->traddr, opts->trsvcid);
25363f2304f8SSagi Grimberg 		goto out_free_ctrl;
25373f2304f8SSagi Grimberg 	}
25383f2304f8SSagi Grimberg 
25393f2304f8SSagi Grimberg 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
25403f2304f8SSagi Grimberg 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
25413f2304f8SSagi Grimberg 			opts->host_traddr, NULL, &ctrl->src_addr);
25423f2304f8SSagi Grimberg 		if (ret) {
25433f2304f8SSagi Grimberg 			pr_err("malformed src address passed: %s\n",
25443f2304f8SSagi Grimberg 			       opts->host_traddr);
25453f2304f8SSagi Grimberg 			goto out_free_ctrl;
25463f2304f8SSagi Grimberg 		}
25473f2304f8SSagi Grimberg 	}
25483f2304f8SSagi Grimberg 
25493ede8f72SMartin Belanger 	if (opts->mask & NVMF_OPT_HOST_IFACE) {
25508b43ced6SPrabhakar Kushwaha 		if (!__dev_get_by_name(&init_net, opts->host_iface)) {
25513ede8f72SMartin Belanger 			pr_err("invalid interface passed: %s\n",
25523ede8f72SMartin Belanger 			       opts->host_iface);
25533ede8f72SMartin Belanger 			ret = -ENODEV;
25543ede8f72SMartin Belanger 			goto out_free_ctrl;
25553ede8f72SMartin Belanger 		}
25563ede8f72SMartin Belanger 	}
25573ede8f72SMartin Belanger 
25583f2304f8SSagi Grimberg 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
25593f2304f8SSagi Grimberg 		ret = -EALREADY;
25603f2304f8SSagi Grimberg 		goto out_free_ctrl;
25613f2304f8SSagi Grimberg 	}
25623f2304f8SSagi Grimberg 
2563873946f4SSagi Grimberg 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
25643f2304f8SSagi Grimberg 				GFP_KERNEL);
25653f2304f8SSagi Grimberg 	if (!ctrl->queues) {
25663f2304f8SSagi Grimberg 		ret = -ENOMEM;
25673f2304f8SSagi Grimberg 		goto out_free_ctrl;
25683f2304f8SSagi Grimberg 	}
25693f2304f8SSagi Grimberg 
25703f2304f8SSagi Grimberg 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
25713f2304f8SSagi Grimberg 	if (ret)
25723f2304f8SSagi Grimberg 		goto out_kfree_queues;
25733f2304f8SSagi Grimberg 
25743f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
25753f2304f8SSagi Grimberg 		WARN_ON_ONCE(1);
25763f2304f8SSagi Grimberg 		ret = -EINTR;
25773f2304f8SSagi Grimberg 		goto out_uninit_ctrl;
25783f2304f8SSagi Grimberg 	}
25793f2304f8SSagi Grimberg 
25803f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
25813f2304f8SSagi Grimberg 	if (ret)
25823f2304f8SSagi Grimberg 		goto out_uninit_ctrl;
25833f2304f8SSagi Grimberg 
25843f2304f8SSagi Grimberg 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2585e5ea42faSHannes Reinecke 		nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
25863f2304f8SSagi Grimberg 
25873f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
25883f2304f8SSagi Grimberg 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
25893f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
25903f2304f8SSagi Grimberg 
25913f2304f8SSagi Grimberg 	return &ctrl->ctrl;
25923f2304f8SSagi Grimberg 
25933f2304f8SSagi Grimberg out_uninit_ctrl:
25943f2304f8SSagi Grimberg 	nvme_uninit_ctrl(&ctrl->ctrl);
25953f2304f8SSagi Grimberg 	nvme_put_ctrl(&ctrl->ctrl);
25963f2304f8SSagi Grimberg 	if (ret > 0)
25973f2304f8SSagi Grimberg 		ret = -EIO;
25983f2304f8SSagi Grimberg 	return ERR_PTR(ret);
25993f2304f8SSagi Grimberg out_kfree_queues:
26003f2304f8SSagi Grimberg 	kfree(ctrl->queues);
26013f2304f8SSagi Grimberg out_free_ctrl:
26023f2304f8SSagi Grimberg 	kfree(ctrl);
26033f2304f8SSagi Grimberg 	return ERR_PTR(ret);
26043f2304f8SSagi Grimberg }
26053f2304f8SSagi Grimberg 
26063f2304f8SSagi Grimberg static struct nvmf_transport_ops nvme_tcp_transport = {
26073f2304f8SSagi Grimberg 	.name		= "tcp",
26083f2304f8SSagi Grimberg 	.module		= THIS_MODULE,
26093f2304f8SSagi Grimberg 	.required_opts	= NVMF_OPT_TRADDR,
26103f2304f8SSagi Grimberg 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
26113f2304f8SSagi Grimberg 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2612873946f4SSagi Grimberg 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2613bb13985dSIsrael Rukshin 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
26143ede8f72SMartin Belanger 			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
26153f2304f8SSagi Grimberg 	.create_ctrl	= nvme_tcp_create_ctrl,
26163f2304f8SSagi Grimberg };
26173f2304f8SSagi Grimberg 
26183f2304f8SSagi Grimberg static int __init nvme_tcp_init_module(void)
26193f2304f8SSagi Grimberg {
26203f2304f8SSagi Grimberg 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
26213f2304f8SSagi Grimberg 			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
26223f2304f8SSagi Grimberg 	if (!nvme_tcp_wq)
26233f2304f8SSagi Grimberg 		return -ENOMEM;
26243f2304f8SSagi Grimberg 
26253f2304f8SSagi Grimberg 	nvmf_register_transport(&nvme_tcp_transport);
26263f2304f8SSagi Grimberg 	return 0;
26273f2304f8SSagi Grimberg }
26283f2304f8SSagi Grimberg 
26293f2304f8SSagi Grimberg static void __exit nvme_tcp_cleanup_module(void)
26303f2304f8SSagi Grimberg {
26313f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
26323f2304f8SSagi Grimberg 
26333f2304f8SSagi Grimberg 	nvmf_unregister_transport(&nvme_tcp_transport);
26343f2304f8SSagi Grimberg 
26353f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
26363f2304f8SSagi Grimberg 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
26373f2304f8SSagi Grimberg 		nvme_delete_ctrl(&ctrl->ctrl);
26383f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
26393f2304f8SSagi Grimberg 	flush_workqueue(nvme_delete_wq);
26403f2304f8SSagi Grimberg 
26413f2304f8SSagi Grimberg 	destroy_workqueue(nvme_tcp_wq);
26423f2304f8SSagi Grimberg }
26433f2304f8SSagi Grimberg 
26443f2304f8SSagi Grimberg module_init(nvme_tcp_init_module);
26453f2304f8SSagi Grimberg module_exit(nvme_tcp_cleanup_module);
26463f2304f8SSagi Grimberg 
26473f2304f8SSagi Grimberg MODULE_LICENSE("GPL v2");
2648