xref: /openbmc/linux/drivers/nvme/host/tcp.c (revision 5c11f7d9)
13f2304f8SSagi Grimberg // SPDX-License-Identifier: GPL-2.0
23f2304f8SSagi Grimberg /*
33f2304f8SSagi Grimberg  * NVMe over Fabrics TCP host.
43f2304f8SSagi Grimberg  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
53f2304f8SSagi Grimberg  */
63f2304f8SSagi Grimberg #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73f2304f8SSagi Grimberg #include <linux/module.h>
83f2304f8SSagi Grimberg #include <linux/init.h>
93f2304f8SSagi Grimberg #include <linux/slab.h>
103f2304f8SSagi Grimberg #include <linux/err.h>
113f2304f8SSagi Grimberg #include <linux/nvme-tcp.h>
123f2304f8SSagi Grimberg #include <net/sock.h>
133f2304f8SSagi Grimberg #include <net/tcp.h>
143f2304f8SSagi Grimberg #include <linux/blk-mq.h>
153f2304f8SSagi Grimberg #include <crypto/hash.h>
161a9460ceSSagi Grimberg #include <net/busy_poll.h>
173f2304f8SSagi Grimberg 
183f2304f8SSagi Grimberg #include "nvme.h"
193f2304f8SSagi Grimberg #include "fabrics.h"
203f2304f8SSagi Grimberg 
213f2304f8SSagi Grimberg struct nvme_tcp_queue;
223f2304f8SSagi Grimberg 
239912ade3SWunderlich, Mark /* Define the socket priority to use for connections were it is desirable
249912ade3SWunderlich, Mark  * that the NIC consider performing optimized packet processing or filtering.
259912ade3SWunderlich, Mark  * A non-zero value being sufficient to indicate general consideration of any
269912ade3SWunderlich, Mark  * possible optimization.  Making it a module param allows for alternative
279912ade3SWunderlich, Mark  * values that may be unique for some NIC implementations.
289912ade3SWunderlich, Mark  */
299912ade3SWunderlich, Mark static int so_priority;
309912ade3SWunderlich, Mark module_param(so_priority, int, 0644);
319912ade3SWunderlich, Mark MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
329912ade3SWunderlich, Mark 
333f2304f8SSagi Grimberg enum nvme_tcp_send_state {
343f2304f8SSagi Grimberg 	NVME_TCP_SEND_CMD_PDU = 0,
353f2304f8SSagi Grimberg 	NVME_TCP_SEND_H2C_PDU,
363f2304f8SSagi Grimberg 	NVME_TCP_SEND_DATA,
373f2304f8SSagi Grimberg 	NVME_TCP_SEND_DDGST,
383f2304f8SSagi Grimberg };
393f2304f8SSagi Grimberg 
403f2304f8SSagi Grimberg struct nvme_tcp_request {
413f2304f8SSagi Grimberg 	struct nvme_request	req;
423f2304f8SSagi Grimberg 	void			*pdu;
433f2304f8SSagi Grimberg 	struct nvme_tcp_queue	*queue;
443f2304f8SSagi Grimberg 	u32			data_len;
453f2304f8SSagi Grimberg 	u32			pdu_len;
463f2304f8SSagi Grimberg 	u32			pdu_sent;
473f2304f8SSagi Grimberg 	u16			ttag;
483f2304f8SSagi Grimberg 	struct list_head	entry;
4915ec928aSSagi Grimberg 	struct llist_node	lentry;
50a7273d40SChristoph Hellwig 	__le32			ddgst;
513f2304f8SSagi Grimberg 
523f2304f8SSagi Grimberg 	struct bio		*curr_bio;
533f2304f8SSagi Grimberg 	struct iov_iter		iter;
543f2304f8SSagi Grimberg 
553f2304f8SSagi Grimberg 	/* send state */
563f2304f8SSagi Grimberg 	size_t			offset;
573f2304f8SSagi Grimberg 	size_t			data_sent;
583f2304f8SSagi Grimberg 	enum nvme_tcp_send_state state;
593f2304f8SSagi Grimberg };
603f2304f8SSagi Grimberg 
613f2304f8SSagi Grimberg enum nvme_tcp_queue_flags {
623f2304f8SSagi Grimberg 	NVME_TCP_Q_ALLOCATED	= 0,
633f2304f8SSagi Grimberg 	NVME_TCP_Q_LIVE		= 1,
6472e5d757SSagi Grimberg 	NVME_TCP_Q_POLLING	= 2,
653f2304f8SSagi Grimberg };
663f2304f8SSagi Grimberg 
673f2304f8SSagi Grimberg enum nvme_tcp_recv_state {
683f2304f8SSagi Grimberg 	NVME_TCP_RECV_PDU = 0,
693f2304f8SSagi Grimberg 	NVME_TCP_RECV_DATA,
703f2304f8SSagi Grimberg 	NVME_TCP_RECV_DDGST,
713f2304f8SSagi Grimberg };
723f2304f8SSagi Grimberg 
733f2304f8SSagi Grimberg struct nvme_tcp_ctrl;
743f2304f8SSagi Grimberg struct nvme_tcp_queue {
753f2304f8SSagi Grimberg 	struct socket		*sock;
763f2304f8SSagi Grimberg 	struct work_struct	io_work;
773f2304f8SSagi Grimberg 	int			io_cpu;
783f2304f8SSagi Grimberg 
79db5ad6b7SSagi Grimberg 	struct mutex		send_mutex;
8015ec928aSSagi Grimberg 	struct llist_head	req_list;
813f2304f8SSagi Grimberg 	struct list_head	send_list;
82122e5b9fSSagi Grimberg 	bool			more_requests;
833f2304f8SSagi Grimberg 
843f2304f8SSagi Grimberg 	/* recv state */
853f2304f8SSagi Grimberg 	void			*pdu;
863f2304f8SSagi Grimberg 	int			pdu_remaining;
873f2304f8SSagi Grimberg 	int			pdu_offset;
883f2304f8SSagi Grimberg 	size_t			data_remaining;
893f2304f8SSagi Grimberg 	size_t			ddgst_remaining;
901a9460ceSSagi Grimberg 	unsigned int		nr_cqe;
913f2304f8SSagi Grimberg 
923f2304f8SSagi Grimberg 	/* send state */
933f2304f8SSagi Grimberg 	struct nvme_tcp_request *request;
943f2304f8SSagi Grimberg 
953f2304f8SSagi Grimberg 	int			queue_size;
963f2304f8SSagi Grimberg 	size_t			cmnd_capsule_len;
973f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl	*ctrl;
983f2304f8SSagi Grimberg 	unsigned long		flags;
993f2304f8SSagi Grimberg 	bool			rd_enabled;
1003f2304f8SSagi Grimberg 
1013f2304f8SSagi Grimberg 	bool			hdr_digest;
1023f2304f8SSagi Grimberg 	bool			data_digest;
1033f2304f8SSagi Grimberg 	struct ahash_request	*rcv_hash;
1043f2304f8SSagi Grimberg 	struct ahash_request	*snd_hash;
1053f2304f8SSagi Grimberg 	__le32			exp_ddgst;
1063f2304f8SSagi Grimberg 	__le32			recv_ddgst;
1073f2304f8SSagi Grimberg 
1083f2304f8SSagi Grimberg 	struct page_frag_cache	pf_cache;
1093f2304f8SSagi Grimberg 
1103f2304f8SSagi Grimberg 	void (*state_change)(struct sock *);
1113f2304f8SSagi Grimberg 	void (*data_ready)(struct sock *);
1123f2304f8SSagi Grimberg 	void (*write_space)(struct sock *);
1133f2304f8SSagi Grimberg };
1143f2304f8SSagi Grimberg 
1153f2304f8SSagi Grimberg struct nvme_tcp_ctrl {
1163f2304f8SSagi Grimberg 	/* read only in the hot path */
1173f2304f8SSagi Grimberg 	struct nvme_tcp_queue	*queues;
1183f2304f8SSagi Grimberg 	struct blk_mq_tag_set	tag_set;
1193f2304f8SSagi Grimberg 
1203f2304f8SSagi Grimberg 	/* other member variables */
1213f2304f8SSagi Grimberg 	struct list_head	list;
1223f2304f8SSagi Grimberg 	struct blk_mq_tag_set	admin_tag_set;
1233f2304f8SSagi Grimberg 	struct sockaddr_storage addr;
1243f2304f8SSagi Grimberg 	struct sockaddr_storage src_addr;
1253f2304f8SSagi Grimberg 	struct nvme_ctrl	ctrl;
1263f2304f8SSagi Grimberg 
1273f2304f8SSagi Grimberg 	struct work_struct	err_work;
1283f2304f8SSagi Grimberg 	struct delayed_work	connect_work;
1293f2304f8SSagi Grimberg 	struct nvme_tcp_request async_req;
13064861993SSagi Grimberg 	u32			io_queues[HCTX_MAX_TYPES];
1313f2304f8SSagi Grimberg };
1323f2304f8SSagi Grimberg 
1333f2304f8SSagi Grimberg static LIST_HEAD(nvme_tcp_ctrl_list);
1343f2304f8SSagi Grimberg static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
1353f2304f8SSagi Grimberg static struct workqueue_struct *nvme_tcp_wq;
1366acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops;
1376acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
138db5ad6b7SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
1393f2304f8SSagi Grimberg 
1403f2304f8SSagi Grimberg static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
1413f2304f8SSagi Grimberg {
1423f2304f8SSagi Grimberg 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
1433f2304f8SSagi Grimberg }
1443f2304f8SSagi Grimberg 
1453f2304f8SSagi Grimberg static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
1463f2304f8SSagi Grimberg {
1473f2304f8SSagi Grimberg 	return queue - queue->ctrl->queues;
1483f2304f8SSagi Grimberg }
1493f2304f8SSagi Grimberg 
1503f2304f8SSagi Grimberg static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
1513f2304f8SSagi Grimberg {
1523f2304f8SSagi Grimberg 	u32 queue_idx = nvme_tcp_queue_id(queue);
1533f2304f8SSagi Grimberg 
1543f2304f8SSagi Grimberg 	if (queue_idx == 0)
1553f2304f8SSagi Grimberg 		return queue->ctrl->admin_tag_set.tags[queue_idx];
1563f2304f8SSagi Grimberg 	return queue->ctrl->tag_set.tags[queue_idx - 1];
1573f2304f8SSagi Grimberg }
1583f2304f8SSagi Grimberg 
1593f2304f8SSagi Grimberg static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
1603f2304f8SSagi Grimberg {
1613f2304f8SSagi Grimberg 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
1623f2304f8SSagi Grimberg }
1633f2304f8SSagi Grimberg 
1643f2304f8SSagi Grimberg static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
1653f2304f8SSagi Grimberg {
1663f2304f8SSagi Grimberg 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
1673f2304f8SSagi Grimberg }
1683f2304f8SSagi Grimberg 
1693f2304f8SSagi Grimberg static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
1703f2304f8SSagi Grimberg {
1713f2304f8SSagi Grimberg 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
1723f2304f8SSagi Grimberg }
1733f2304f8SSagi Grimberg 
1743f2304f8SSagi Grimberg static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
1753f2304f8SSagi Grimberg {
1763f2304f8SSagi Grimberg 	return req == &req->queue->ctrl->async_req;
1773f2304f8SSagi Grimberg }
1783f2304f8SSagi Grimberg 
1793f2304f8SSagi Grimberg static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
1803f2304f8SSagi Grimberg {
1813f2304f8SSagi Grimberg 	struct request *rq;
1823f2304f8SSagi Grimberg 
1833f2304f8SSagi Grimberg 	if (unlikely(nvme_tcp_async_req(req)))
1843f2304f8SSagi Grimberg 		return false; /* async events don't have a request */
1853f2304f8SSagi Grimberg 
1863f2304f8SSagi Grimberg 	rq = blk_mq_rq_from_pdu(req);
1873f2304f8SSagi Grimberg 
18825e5cb78SSagi Grimberg 	return rq_data_dir(rq) == WRITE && req->data_len &&
18925e5cb78SSagi Grimberg 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
1903f2304f8SSagi Grimberg }
1913f2304f8SSagi Grimberg 
1923f2304f8SSagi Grimberg static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
1933f2304f8SSagi Grimberg {
1943f2304f8SSagi Grimberg 	return req->iter.bvec->bv_page;
1953f2304f8SSagi Grimberg }
1963f2304f8SSagi Grimberg 
1973f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
1983f2304f8SSagi Grimberg {
1993f2304f8SSagi Grimberg 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
2003f2304f8SSagi Grimberg }
2013f2304f8SSagi Grimberg 
2023f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
2033f2304f8SSagi Grimberg {
2043f2304f8SSagi Grimberg 	return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
2053f2304f8SSagi Grimberg 			req->pdu_len - req->pdu_sent);
2063f2304f8SSagi Grimberg }
2073f2304f8SSagi Grimberg 
2083f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
2093f2304f8SSagi Grimberg {
2103f2304f8SSagi Grimberg 	return req->iter.iov_offset;
2113f2304f8SSagi Grimberg }
2123f2304f8SSagi Grimberg 
2133f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
2143f2304f8SSagi Grimberg {
2153f2304f8SSagi Grimberg 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
2163f2304f8SSagi Grimberg 			req->pdu_len - req->pdu_sent : 0;
2173f2304f8SSagi Grimberg }
2183f2304f8SSagi Grimberg 
2193f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
2203f2304f8SSagi Grimberg 		int len)
2213f2304f8SSagi Grimberg {
2223f2304f8SSagi Grimberg 	return nvme_tcp_pdu_data_left(req) <= len;
2233f2304f8SSagi Grimberg }
2243f2304f8SSagi Grimberg 
2253f2304f8SSagi Grimberg static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
2263f2304f8SSagi Grimberg 		unsigned int dir)
2273f2304f8SSagi Grimberg {
2283f2304f8SSagi Grimberg 	struct request *rq = blk_mq_rq_from_pdu(req);
2293f2304f8SSagi Grimberg 	struct bio_vec *vec;
2303f2304f8SSagi Grimberg 	unsigned int size;
2313f2304f8SSagi Grimberg 	int nsegs;
2323f2304f8SSagi Grimberg 	size_t offset;
2333f2304f8SSagi Grimberg 
2343f2304f8SSagi Grimberg 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
2353f2304f8SSagi Grimberg 		vec = &rq->special_vec;
2363f2304f8SSagi Grimberg 		nsegs = 1;
2373f2304f8SSagi Grimberg 		size = blk_rq_payload_bytes(rq);
2383f2304f8SSagi Grimberg 		offset = 0;
2393f2304f8SSagi Grimberg 	} else {
2403f2304f8SSagi Grimberg 		struct bio *bio = req->curr_bio;
2413f2304f8SSagi Grimberg 
2423f2304f8SSagi Grimberg 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
2433f2304f8SSagi Grimberg 		nsegs = bio_segments(bio);
2443f2304f8SSagi Grimberg 		size = bio->bi_iter.bi_size;
2453f2304f8SSagi Grimberg 		offset = bio->bi_iter.bi_bvec_done;
2463f2304f8SSagi Grimberg 	}
2473f2304f8SSagi Grimberg 
2483f2304f8SSagi Grimberg 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
2493f2304f8SSagi Grimberg 	req->iter.iov_offset = offset;
2503f2304f8SSagi Grimberg }
2513f2304f8SSagi Grimberg 
2523f2304f8SSagi Grimberg static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
2533f2304f8SSagi Grimberg 		int len)
2543f2304f8SSagi Grimberg {
2553f2304f8SSagi Grimberg 	req->data_sent += len;
2563f2304f8SSagi Grimberg 	req->pdu_sent += len;
2573f2304f8SSagi Grimberg 	iov_iter_advance(&req->iter, len);
2583f2304f8SSagi Grimberg 	if (!iov_iter_count(&req->iter) &&
2593f2304f8SSagi Grimberg 	    req->data_sent < req->data_len) {
2603f2304f8SSagi Grimberg 		req->curr_bio = req->curr_bio->bi_next;
2613f2304f8SSagi Grimberg 		nvme_tcp_init_iter(req, WRITE);
2623f2304f8SSagi Grimberg 	}
2633f2304f8SSagi Grimberg }
2643f2304f8SSagi Grimberg 
265*5c11f7d9SSagi Grimberg static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
266*5c11f7d9SSagi Grimberg {
267*5c11f7d9SSagi Grimberg 	int ret;
268*5c11f7d9SSagi Grimberg 
269*5c11f7d9SSagi Grimberg 	/* drain the send queue as much as we can... */
270*5c11f7d9SSagi Grimberg 	do {
271*5c11f7d9SSagi Grimberg 		ret = nvme_tcp_try_send(queue);
272*5c11f7d9SSagi Grimberg 	} while (ret > 0);
273*5c11f7d9SSagi Grimberg }
274*5c11f7d9SSagi Grimberg 
275db5ad6b7SSagi Grimberg static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
27686f0348aSSagi Grimberg 		bool sync, bool last)
2773f2304f8SSagi Grimberg {
2783f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
279db5ad6b7SSagi Grimberg 	bool empty;
2803f2304f8SSagi Grimberg 
28115ec928aSSagi Grimberg 	empty = llist_add(&req->lentry, &queue->req_list) &&
28215ec928aSSagi Grimberg 		list_empty(&queue->send_list) && !queue->request;
2833f2304f8SSagi Grimberg 
284db5ad6b7SSagi Grimberg 	/*
285db5ad6b7SSagi Grimberg 	 * if we're the first on the send_list and we can try to send
286db5ad6b7SSagi Grimberg 	 * directly, otherwise queue io_work. Also, only do that if we
287db5ad6b7SSagi Grimberg 	 * are on the same cpu, so we don't introduce contention.
288db5ad6b7SSagi Grimberg 	 */
289db5ad6b7SSagi Grimberg 	if (queue->io_cpu == smp_processor_id() &&
290db5ad6b7SSagi Grimberg 	    sync && empty && mutex_trylock(&queue->send_mutex)) {
291122e5b9fSSagi Grimberg 		queue->more_requests = !last;
292*5c11f7d9SSagi Grimberg 		nvme_tcp_send_all(queue);
293122e5b9fSSagi Grimberg 		queue->more_requests = false;
294db5ad6b7SSagi Grimberg 		mutex_unlock(&queue->send_mutex);
29586f0348aSSagi Grimberg 	} else if (last) {
2963f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2973f2304f8SSagi Grimberg 	}
298db5ad6b7SSagi Grimberg }
2993f2304f8SSagi Grimberg 
30015ec928aSSagi Grimberg static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
30115ec928aSSagi Grimberg {
30215ec928aSSagi Grimberg 	struct nvme_tcp_request *req;
30315ec928aSSagi Grimberg 	struct llist_node *node;
30415ec928aSSagi Grimberg 
30515ec928aSSagi Grimberg 	for (node = llist_del_all(&queue->req_list); node; node = node->next) {
30615ec928aSSagi Grimberg 		req = llist_entry(node, struct nvme_tcp_request, lentry);
30715ec928aSSagi Grimberg 		list_add(&req->entry, &queue->send_list);
30815ec928aSSagi Grimberg 	}
30915ec928aSSagi Grimberg }
31015ec928aSSagi Grimberg 
3113f2304f8SSagi Grimberg static inline struct nvme_tcp_request *
3123f2304f8SSagi Grimberg nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
3133f2304f8SSagi Grimberg {
3143f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
3153f2304f8SSagi Grimberg 
3163f2304f8SSagi Grimberg 	req = list_first_entry_or_null(&queue->send_list,
3173f2304f8SSagi Grimberg 			struct nvme_tcp_request, entry);
31815ec928aSSagi Grimberg 	if (!req) {
31915ec928aSSagi Grimberg 		nvme_tcp_process_req_list(queue);
32015ec928aSSagi Grimberg 		req = list_first_entry_or_null(&queue->send_list,
32115ec928aSSagi Grimberg 				struct nvme_tcp_request, entry);
32215ec928aSSagi Grimberg 		if (unlikely(!req))
32315ec928aSSagi Grimberg 			return NULL;
32415ec928aSSagi Grimberg 	}
3253f2304f8SSagi Grimberg 
32615ec928aSSagi Grimberg 	list_del(&req->entry);
3273f2304f8SSagi Grimberg 	return req;
3283f2304f8SSagi Grimberg }
3293f2304f8SSagi Grimberg 
330a7273d40SChristoph Hellwig static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
331a7273d40SChristoph Hellwig 		__le32 *dgst)
3323f2304f8SSagi Grimberg {
3333f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
3343f2304f8SSagi Grimberg 	crypto_ahash_final(hash);
3353f2304f8SSagi Grimberg }
3363f2304f8SSagi Grimberg 
3373f2304f8SSagi Grimberg static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
3383f2304f8SSagi Grimberg 		struct page *page, off_t off, size_t len)
3393f2304f8SSagi Grimberg {
3403f2304f8SSagi Grimberg 	struct scatterlist sg;
3413f2304f8SSagi Grimberg 
3423f2304f8SSagi Grimberg 	sg_init_marker(&sg, 1);
3433f2304f8SSagi Grimberg 	sg_set_page(&sg, page, len, off);
3443f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, &sg, NULL, len);
3453f2304f8SSagi Grimberg 	crypto_ahash_update(hash);
3463f2304f8SSagi Grimberg }
3473f2304f8SSagi Grimberg 
3483f2304f8SSagi Grimberg static inline void nvme_tcp_hdgst(struct ahash_request *hash,
3493f2304f8SSagi Grimberg 		void *pdu, size_t len)
3503f2304f8SSagi Grimberg {
3513f2304f8SSagi Grimberg 	struct scatterlist sg;
3523f2304f8SSagi Grimberg 
3533f2304f8SSagi Grimberg 	sg_init_one(&sg, pdu, len);
3543f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
3553f2304f8SSagi Grimberg 	crypto_ahash_digest(hash);
3563f2304f8SSagi Grimberg }
3573f2304f8SSagi Grimberg 
3583f2304f8SSagi Grimberg static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
3593f2304f8SSagi Grimberg 		void *pdu, size_t pdu_len)
3603f2304f8SSagi Grimberg {
3613f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr = pdu;
3623f2304f8SSagi Grimberg 	__le32 recv_digest;
3633f2304f8SSagi Grimberg 	__le32 exp_digest;
3643f2304f8SSagi Grimberg 
3653f2304f8SSagi Grimberg 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
3663f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3673f2304f8SSagi Grimberg 			"queue %d: header digest flag is cleared\n",
3683f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue));
3693f2304f8SSagi Grimberg 		return -EPROTO;
3703f2304f8SSagi Grimberg 	}
3713f2304f8SSagi Grimberg 
3723f2304f8SSagi Grimberg 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
3733f2304f8SSagi Grimberg 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
3743f2304f8SSagi Grimberg 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
3753f2304f8SSagi Grimberg 	if (recv_digest != exp_digest) {
3763f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3773f2304f8SSagi Grimberg 			"header digest error: recv %#x expected %#x\n",
3783f2304f8SSagi Grimberg 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
3793f2304f8SSagi Grimberg 		return -EIO;
3803f2304f8SSagi Grimberg 	}
3813f2304f8SSagi Grimberg 
3823f2304f8SSagi Grimberg 	return 0;
3833f2304f8SSagi Grimberg }
3843f2304f8SSagi Grimberg 
3853f2304f8SSagi Grimberg static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
3863f2304f8SSagi Grimberg {
3873f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr = pdu;
3883f2304f8SSagi Grimberg 	u8 digest_len = nvme_tcp_hdgst_len(queue);
3893f2304f8SSagi Grimberg 	u32 len;
3903f2304f8SSagi Grimberg 
3913f2304f8SSagi Grimberg 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
3923f2304f8SSagi Grimberg 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
3933f2304f8SSagi Grimberg 
3943f2304f8SSagi Grimberg 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
3953f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3963f2304f8SSagi Grimberg 			"queue %d: data digest flag is cleared\n",
3973f2304f8SSagi Grimberg 		nvme_tcp_queue_id(queue));
3983f2304f8SSagi Grimberg 		return -EPROTO;
3993f2304f8SSagi Grimberg 	}
4003f2304f8SSagi Grimberg 	crypto_ahash_init(queue->rcv_hash);
4013f2304f8SSagi Grimberg 
4023f2304f8SSagi Grimberg 	return 0;
4033f2304f8SSagi Grimberg }
4043f2304f8SSagi Grimberg 
4053f2304f8SSagi Grimberg static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
4063f2304f8SSagi Grimberg 		struct request *rq, unsigned int hctx_idx)
4073f2304f8SSagi Grimberg {
4083f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
4093f2304f8SSagi Grimberg 
4103f2304f8SSagi Grimberg 	page_frag_free(req->pdu);
4113f2304f8SSagi Grimberg }
4123f2304f8SSagi Grimberg 
4133f2304f8SSagi Grimberg static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
4143f2304f8SSagi Grimberg 		struct request *rq, unsigned int hctx_idx,
4153f2304f8SSagi Grimberg 		unsigned int numa_node)
4163f2304f8SSagi Grimberg {
4173f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
4183f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
4193f2304f8SSagi Grimberg 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
4203f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
4213f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
4223f2304f8SSagi Grimberg 
4233f2304f8SSagi Grimberg 	req->pdu = page_frag_alloc(&queue->pf_cache,
4243f2304f8SSagi Grimberg 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
4253f2304f8SSagi Grimberg 		GFP_KERNEL | __GFP_ZERO);
4263f2304f8SSagi Grimberg 	if (!req->pdu)
4273f2304f8SSagi Grimberg 		return -ENOMEM;
4283f2304f8SSagi Grimberg 
4293f2304f8SSagi Grimberg 	req->queue = queue;
4303f2304f8SSagi Grimberg 	nvme_req(rq)->ctrl = &ctrl->ctrl;
4313f2304f8SSagi Grimberg 
4323f2304f8SSagi Grimberg 	return 0;
4333f2304f8SSagi Grimberg }
4343f2304f8SSagi Grimberg 
4353f2304f8SSagi Grimberg static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
4363f2304f8SSagi Grimberg 		unsigned int hctx_idx)
4373f2304f8SSagi Grimberg {
4383f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = data;
4393f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
4403f2304f8SSagi Grimberg 
4413f2304f8SSagi Grimberg 	hctx->driver_data = queue;
4423f2304f8SSagi Grimberg 	return 0;
4433f2304f8SSagi Grimberg }
4443f2304f8SSagi Grimberg 
4453f2304f8SSagi Grimberg static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
4463f2304f8SSagi Grimberg 		unsigned int hctx_idx)
4473f2304f8SSagi Grimberg {
4483f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = data;
4493f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
4503f2304f8SSagi Grimberg 
4513f2304f8SSagi Grimberg 	hctx->driver_data = queue;
4523f2304f8SSagi Grimberg 	return 0;
4533f2304f8SSagi Grimberg }
4543f2304f8SSagi Grimberg 
4553f2304f8SSagi Grimberg static enum nvme_tcp_recv_state
4563f2304f8SSagi Grimberg nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
4573f2304f8SSagi Grimberg {
4583f2304f8SSagi Grimberg 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
4593f2304f8SSagi Grimberg 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
4603f2304f8SSagi Grimberg 		NVME_TCP_RECV_DATA;
4613f2304f8SSagi Grimberg }
4623f2304f8SSagi Grimberg 
4633f2304f8SSagi Grimberg static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
4643f2304f8SSagi Grimberg {
4653f2304f8SSagi Grimberg 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
4663f2304f8SSagi Grimberg 				nvme_tcp_hdgst_len(queue);
4673f2304f8SSagi Grimberg 	queue->pdu_offset = 0;
4683f2304f8SSagi Grimberg 	queue->data_remaining = -1;
4693f2304f8SSagi Grimberg 	queue->ddgst_remaining = 0;
4703f2304f8SSagi Grimberg }
4713f2304f8SSagi Grimberg 
4723f2304f8SSagi Grimberg static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
4733f2304f8SSagi Grimberg {
4743f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4753f2304f8SSagi Grimberg 		return;
4763f2304f8SSagi Grimberg 
477236187c4SSagi Grimberg 	dev_warn(ctrl->device, "starting error recovery\n");
47897b2512aSNigel Kirkland 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
4793f2304f8SSagi Grimberg }
4803f2304f8SSagi Grimberg 
4813f2304f8SSagi Grimberg static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
4823f2304f8SSagi Grimberg 		struct nvme_completion *cqe)
4833f2304f8SSagi Grimberg {
4843f2304f8SSagi Grimberg 	struct request *rq;
4853f2304f8SSagi Grimberg 
4863f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
4873f2304f8SSagi Grimberg 	if (!rq) {
4883f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
4893f2304f8SSagi Grimberg 			"queue %d tag 0x%x not found\n",
4903f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), cqe->command_id);
4913f2304f8SSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
4923f2304f8SSagi Grimberg 		return -EINVAL;
4933f2304f8SSagi Grimberg 	}
4943f2304f8SSagi Grimberg 
4952eb81a33SChristoph Hellwig 	if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
496ff029451SChristoph Hellwig 		nvme_complete_rq(rq);
4971a9460ceSSagi Grimberg 	queue->nr_cqe++;
4983f2304f8SSagi Grimberg 
4993f2304f8SSagi Grimberg 	return 0;
5003f2304f8SSagi Grimberg }
5013f2304f8SSagi Grimberg 
5023f2304f8SSagi Grimberg static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
5033f2304f8SSagi Grimberg 		struct nvme_tcp_data_pdu *pdu)
5043f2304f8SSagi Grimberg {
5053f2304f8SSagi Grimberg 	struct request *rq;
5063f2304f8SSagi Grimberg 
5073f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
5083f2304f8SSagi Grimberg 	if (!rq) {
5093f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5103f2304f8SSagi Grimberg 			"queue %d tag %#x not found\n",
5113f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), pdu->command_id);
5123f2304f8SSagi Grimberg 		return -ENOENT;
5133f2304f8SSagi Grimberg 	}
5143f2304f8SSagi Grimberg 
5153f2304f8SSagi Grimberg 	if (!blk_rq_payload_bytes(rq)) {
5163f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5173f2304f8SSagi Grimberg 			"queue %d tag %#x unexpected data\n",
5183f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), rq->tag);
5193f2304f8SSagi Grimberg 		return -EIO;
5203f2304f8SSagi Grimberg 	}
5213f2304f8SSagi Grimberg 
5223f2304f8SSagi Grimberg 	queue->data_remaining = le32_to_cpu(pdu->data_length);
5233f2304f8SSagi Grimberg 
524602d674cSSagi Grimberg 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
525602d674cSSagi Grimberg 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
526602d674cSSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
527602d674cSSagi Grimberg 			"queue %d tag %#x SUCCESS set but not last PDU\n",
528602d674cSSagi Grimberg 			nvme_tcp_queue_id(queue), rq->tag);
529602d674cSSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
530602d674cSSagi Grimberg 		return -EPROTO;
531602d674cSSagi Grimberg 	}
532602d674cSSagi Grimberg 
5333f2304f8SSagi Grimberg 	return 0;
5343f2304f8SSagi Grimberg }
5353f2304f8SSagi Grimberg 
5363f2304f8SSagi Grimberg static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
5373f2304f8SSagi Grimberg 		struct nvme_tcp_rsp_pdu *pdu)
5383f2304f8SSagi Grimberg {
5393f2304f8SSagi Grimberg 	struct nvme_completion *cqe = &pdu->cqe;
5403f2304f8SSagi Grimberg 	int ret = 0;
5413f2304f8SSagi Grimberg 
5423f2304f8SSagi Grimberg 	/*
5433f2304f8SSagi Grimberg 	 * AEN requests are special as they don't time out and can
5443f2304f8SSagi Grimberg 	 * survive any kind of queue freeze and often don't respond to
5453f2304f8SSagi Grimberg 	 * aborts.  We don't even bother to allocate a struct request
5463f2304f8SSagi Grimberg 	 * for them but rather special case them here.
5473f2304f8SSagi Grimberg 	 */
54858a8df67SIsrael Rukshin 	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
54958a8df67SIsrael Rukshin 				     cqe->command_id)))
5503f2304f8SSagi Grimberg 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
5513f2304f8SSagi Grimberg 				&cqe->result);
5523f2304f8SSagi Grimberg 	else
5533f2304f8SSagi Grimberg 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
5543f2304f8SSagi Grimberg 
5553f2304f8SSagi Grimberg 	return ret;
5563f2304f8SSagi Grimberg }
5573f2304f8SSagi Grimberg 
5583f2304f8SSagi Grimberg static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
5593f2304f8SSagi Grimberg 		struct nvme_tcp_r2t_pdu *pdu)
5603f2304f8SSagi Grimberg {
5613f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *data = req->pdu;
5623f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
5633f2304f8SSagi Grimberg 	struct request *rq = blk_mq_rq_from_pdu(req);
5643f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
5653f2304f8SSagi Grimberg 	u8 ddgst = nvme_tcp_ddgst_len(queue);
5663f2304f8SSagi Grimberg 
5673f2304f8SSagi Grimberg 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
5683f2304f8SSagi Grimberg 	req->pdu_sent = 0;
5693f2304f8SSagi Grimberg 
5703f2304f8SSagi Grimberg 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
5713f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5723f2304f8SSagi Grimberg 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
5733f2304f8SSagi Grimberg 			rq->tag, req->pdu_len, req->data_len,
5743f2304f8SSagi Grimberg 			req->data_sent);
5753f2304f8SSagi Grimberg 		return -EPROTO;
5763f2304f8SSagi Grimberg 	}
5773f2304f8SSagi Grimberg 
5783f2304f8SSagi Grimberg 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
5793f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5803f2304f8SSagi Grimberg 			"req %d unexpected r2t offset %u (expected %zu)\n",
5813f2304f8SSagi Grimberg 			rq->tag, le32_to_cpu(pdu->r2t_offset),
5823f2304f8SSagi Grimberg 			req->data_sent);
5833f2304f8SSagi Grimberg 		return -EPROTO;
5843f2304f8SSagi Grimberg 	}
5853f2304f8SSagi Grimberg 
5863f2304f8SSagi Grimberg 	memset(data, 0, sizeof(*data));
5873f2304f8SSagi Grimberg 	data->hdr.type = nvme_tcp_h2c_data;
5883f2304f8SSagi Grimberg 	data->hdr.flags = NVME_TCP_F_DATA_LAST;
5893f2304f8SSagi Grimberg 	if (queue->hdr_digest)
5903f2304f8SSagi Grimberg 		data->hdr.flags |= NVME_TCP_F_HDGST;
5913f2304f8SSagi Grimberg 	if (queue->data_digest)
5923f2304f8SSagi Grimberg 		data->hdr.flags |= NVME_TCP_F_DDGST;
5933f2304f8SSagi Grimberg 	data->hdr.hlen = sizeof(*data);
5943f2304f8SSagi Grimberg 	data->hdr.pdo = data->hdr.hlen + hdgst;
5953f2304f8SSagi Grimberg 	data->hdr.plen =
5963f2304f8SSagi Grimberg 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
5973f2304f8SSagi Grimberg 	data->ttag = pdu->ttag;
5983f2304f8SSagi Grimberg 	data->command_id = rq->tag;
5993f2304f8SSagi Grimberg 	data->data_offset = cpu_to_le32(req->data_sent);
6003f2304f8SSagi Grimberg 	data->data_length = cpu_to_le32(req->pdu_len);
6013f2304f8SSagi Grimberg 	return 0;
6023f2304f8SSagi Grimberg }
6033f2304f8SSagi Grimberg 
6043f2304f8SSagi Grimberg static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
6053f2304f8SSagi Grimberg 		struct nvme_tcp_r2t_pdu *pdu)
6063f2304f8SSagi Grimberg {
6073f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
6083f2304f8SSagi Grimberg 	struct request *rq;
6093f2304f8SSagi Grimberg 	int ret;
6103f2304f8SSagi Grimberg 
6113f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
6123f2304f8SSagi Grimberg 	if (!rq) {
6133f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
6143f2304f8SSagi Grimberg 			"queue %d tag %#x not found\n",
6153f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), pdu->command_id);
6163f2304f8SSagi Grimberg 		return -ENOENT;
6173f2304f8SSagi Grimberg 	}
6183f2304f8SSagi Grimberg 	req = blk_mq_rq_to_pdu(rq);
6193f2304f8SSagi Grimberg 
6203f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
6213f2304f8SSagi Grimberg 	if (unlikely(ret))
6223f2304f8SSagi Grimberg 		return ret;
6233f2304f8SSagi Grimberg 
6243f2304f8SSagi Grimberg 	req->state = NVME_TCP_SEND_H2C_PDU;
6253f2304f8SSagi Grimberg 	req->offset = 0;
6263f2304f8SSagi Grimberg 
62786f0348aSSagi Grimberg 	nvme_tcp_queue_request(req, false, true);
6283f2304f8SSagi Grimberg 
6293f2304f8SSagi Grimberg 	return 0;
6303f2304f8SSagi Grimberg }
6313f2304f8SSagi Grimberg 
6323f2304f8SSagi Grimberg static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
6333f2304f8SSagi Grimberg 		unsigned int *offset, size_t *len)
6343f2304f8SSagi Grimberg {
6353f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr;
6363f2304f8SSagi Grimberg 	char *pdu = queue->pdu;
6373f2304f8SSagi Grimberg 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
6383f2304f8SSagi Grimberg 	int ret;
6393f2304f8SSagi Grimberg 
6403f2304f8SSagi Grimberg 	ret = skb_copy_bits(skb, *offset,
6413f2304f8SSagi Grimberg 		&pdu[queue->pdu_offset], rcv_len);
6423f2304f8SSagi Grimberg 	if (unlikely(ret))
6433f2304f8SSagi Grimberg 		return ret;
6443f2304f8SSagi Grimberg 
6453f2304f8SSagi Grimberg 	queue->pdu_remaining -= rcv_len;
6463f2304f8SSagi Grimberg 	queue->pdu_offset += rcv_len;
6473f2304f8SSagi Grimberg 	*offset += rcv_len;
6483f2304f8SSagi Grimberg 	*len -= rcv_len;
6493f2304f8SSagi Grimberg 	if (queue->pdu_remaining)
6503f2304f8SSagi Grimberg 		return 0;
6513f2304f8SSagi Grimberg 
6523f2304f8SSagi Grimberg 	hdr = queue->pdu;
6533f2304f8SSagi Grimberg 	if (queue->hdr_digest) {
6543f2304f8SSagi Grimberg 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
6553f2304f8SSagi Grimberg 		if (unlikely(ret))
6563f2304f8SSagi Grimberg 			return ret;
6573f2304f8SSagi Grimberg 	}
6583f2304f8SSagi Grimberg 
6593f2304f8SSagi Grimberg 
6603f2304f8SSagi Grimberg 	if (queue->data_digest) {
6613f2304f8SSagi Grimberg 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
6623f2304f8SSagi Grimberg 		if (unlikely(ret))
6633f2304f8SSagi Grimberg 			return ret;
6643f2304f8SSagi Grimberg 	}
6653f2304f8SSagi Grimberg 
6663f2304f8SSagi Grimberg 	switch (hdr->type) {
6673f2304f8SSagi Grimberg 	case nvme_tcp_c2h_data:
6686be18260SSagi Grimberg 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
6693f2304f8SSagi Grimberg 	case nvme_tcp_rsp:
6703f2304f8SSagi Grimberg 		nvme_tcp_init_recv_ctx(queue);
6716be18260SSagi Grimberg 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
6723f2304f8SSagi Grimberg 	case nvme_tcp_r2t:
6733f2304f8SSagi Grimberg 		nvme_tcp_init_recv_ctx(queue);
6746be18260SSagi Grimberg 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
6753f2304f8SSagi Grimberg 	default:
6763f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
6773f2304f8SSagi Grimberg 			"unsupported pdu type (%d)\n", hdr->type);
6783f2304f8SSagi Grimberg 		return -EINVAL;
6793f2304f8SSagi Grimberg 	}
6803f2304f8SSagi Grimberg }
6813f2304f8SSagi Grimberg 
682988aef9eSChristoph Hellwig static inline void nvme_tcp_end_request(struct request *rq, u16 status)
683602d674cSSagi Grimberg {
684602d674cSSagi Grimberg 	union nvme_result res = {};
685602d674cSSagi Grimberg 
6862eb81a33SChristoph Hellwig 	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
687ff029451SChristoph Hellwig 		nvme_complete_rq(rq);
688602d674cSSagi Grimberg }
689602d674cSSagi Grimberg 
6903f2304f8SSagi Grimberg static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
6913f2304f8SSagi Grimberg 			      unsigned int *offset, size_t *len)
6923f2304f8SSagi Grimberg {
6933f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
6943f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
6953f2304f8SSagi Grimberg 	struct request *rq;
6963f2304f8SSagi Grimberg 
6973f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
6983f2304f8SSagi Grimberg 	if (!rq) {
6993f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
7003f2304f8SSagi Grimberg 			"queue %d tag %#x not found\n",
7013f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), pdu->command_id);
7023f2304f8SSagi Grimberg 		return -ENOENT;
7033f2304f8SSagi Grimberg 	}
7043f2304f8SSagi Grimberg 	req = blk_mq_rq_to_pdu(rq);
7053f2304f8SSagi Grimberg 
7063f2304f8SSagi Grimberg 	while (true) {
7073f2304f8SSagi Grimberg 		int recv_len, ret;
7083f2304f8SSagi Grimberg 
7093f2304f8SSagi Grimberg 		recv_len = min_t(size_t, *len, queue->data_remaining);
7103f2304f8SSagi Grimberg 		if (!recv_len)
7113f2304f8SSagi Grimberg 			break;
7123f2304f8SSagi Grimberg 
7133f2304f8SSagi Grimberg 		if (!iov_iter_count(&req->iter)) {
7143f2304f8SSagi Grimberg 			req->curr_bio = req->curr_bio->bi_next;
7153f2304f8SSagi Grimberg 
7163f2304f8SSagi Grimberg 			/*
7173f2304f8SSagi Grimberg 			 * If we don`t have any bios it means that controller
7183f2304f8SSagi Grimberg 			 * sent more data than we requested, hence error
7193f2304f8SSagi Grimberg 			 */
7203f2304f8SSagi Grimberg 			if (!req->curr_bio) {
7213f2304f8SSagi Grimberg 				dev_err(queue->ctrl->ctrl.device,
7223f2304f8SSagi Grimberg 					"queue %d no space in request %#x",
7233f2304f8SSagi Grimberg 					nvme_tcp_queue_id(queue), rq->tag);
7243f2304f8SSagi Grimberg 				nvme_tcp_init_recv_ctx(queue);
7253f2304f8SSagi Grimberg 				return -EIO;
7263f2304f8SSagi Grimberg 			}
7273f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, READ);
7283f2304f8SSagi Grimberg 		}
7293f2304f8SSagi Grimberg 
7303f2304f8SSagi Grimberg 		/* we can read only from what is left in this bio */
7313f2304f8SSagi Grimberg 		recv_len = min_t(size_t, recv_len,
7323f2304f8SSagi Grimberg 				iov_iter_count(&req->iter));
7333f2304f8SSagi Grimberg 
7343f2304f8SSagi Grimberg 		if (queue->data_digest)
7353f2304f8SSagi Grimberg 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
7363f2304f8SSagi Grimberg 				&req->iter, recv_len, queue->rcv_hash);
7373f2304f8SSagi Grimberg 		else
7383f2304f8SSagi Grimberg 			ret = skb_copy_datagram_iter(skb, *offset,
7393f2304f8SSagi Grimberg 					&req->iter, recv_len);
7403f2304f8SSagi Grimberg 		if (ret) {
7413f2304f8SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
7423f2304f8SSagi Grimberg 				"queue %d failed to copy request %#x data",
7433f2304f8SSagi Grimberg 				nvme_tcp_queue_id(queue), rq->tag);
7443f2304f8SSagi Grimberg 			return ret;
7453f2304f8SSagi Grimberg 		}
7463f2304f8SSagi Grimberg 
7473f2304f8SSagi Grimberg 		*len -= recv_len;
7483f2304f8SSagi Grimberg 		*offset += recv_len;
7493f2304f8SSagi Grimberg 		queue->data_remaining -= recv_len;
7503f2304f8SSagi Grimberg 	}
7513f2304f8SSagi Grimberg 
7523f2304f8SSagi Grimberg 	if (!queue->data_remaining) {
7533f2304f8SSagi Grimberg 		if (queue->data_digest) {
7543f2304f8SSagi Grimberg 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
7553f2304f8SSagi Grimberg 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
7563f2304f8SSagi Grimberg 		} else {
7571a9460ceSSagi Grimberg 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
758602d674cSSagi Grimberg 				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
7591a9460ceSSagi Grimberg 				queue->nr_cqe++;
7601a9460ceSSagi Grimberg 			}
7613f2304f8SSagi Grimberg 			nvme_tcp_init_recv_ctx(queue);
7623f2304f8SSagi Grimberg 		}
7633f2304f8SSagi Grimberg 	}
7643f2304f8SSagi Grimberg 
7653f2304f8SSagi Grimberg 	return 0;
7663f2304f8SSagi Grimberg }
7673f2304f8SSagi Grimberg 
7683f2304f8SSagi Grimberg static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
7693f2304f8SSagi Grimberg 		struct sk_buff *skb, unsigned int *offset, size_t *len)
7703f2304f8SSagi Grimberg {
771602d674cSSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
7723f2304f8SSagi Grimberg 	char *ddgst = (char *)&queue->recv_ddgst;
7733f2304f8SSagi Grimberg 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
7743f2304f8SSagi Grimberg 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
7753f2304f8SSagi Grimberg 	int ret;
7763f2304f8SSagi Grimberg 
7773f2304f8SSagi Grimberg 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
7783f2304f8SSagi Grimberg 	if (unlikely(ret))
7793f2304f8SSagi Grimberg 		return ret;
7803f2304f8SSagi Grimberg 
7813f2304f8SSagi Grimberg 	queue->ddgst_remaining -= recv_len;
7823f2304f8SSagi Grimberg 	*offset += recv_len;
7833f2304f8SSagi Grimberg 	*len -= recv_len;
7843f2304f8SSagi Grimberg 	if (queue->ddgst_remaining)
7853f2304f8SSagi Grimberg 		return 0;
7863f2304f8SSagi Grimberg 
7873f2304f8SSagi Grimberg 	if (queue->recv_ddgst != queue->exp_ddgst) {
7883f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
7893f2304f8SSagi Grimberg 			"data digest error: recv %#x expected %#x\n",
7903f2304f8SSagi Grimberg 			le32_to_cpu(queue->recv_ddgst),
7913f2304f8SSagi Grimberg 			le32_to_cpu(queue->exp_ddgst));
7923f2304f8SSagi Grimberg 		return -EIO;
7933f2304f8SSagi Grimberg 	}
7943f2304f8SSagi Grimberg 
795602d674cSSagi Grimberg 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
796602d674cSSagi Grimberg 		struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
797602d674cSSagi Grimberg 						pdu->command_id);
798602d674cSSagi Grimberg 
799602d674cSSagi Grimberg 		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
8001a9460ceSSagi Grimberg 		queue->nr_cqe++;
801602d674cSSagi Grimberg 	}
802602d674cSSagi Grimberg 
8033f2304f8SSagi Grimberg 	nvme_tcp_init_recv_ctx(queue);
8043f2304f8SSagi Grimberg 	return 0;
8053f2304f8SSagi Grimberg }
8063f2304f8SSagi Grimberg 
8073f2304f8SSagi Grimberg static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
8083f2304f8SSagi Grimberg 			     unsigned int offset, size_t len)
8093f2304f8SSagi Grimberg {
8103f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = desc->arg.data;
8113f2304f8SSagi Grimberg 	size_t consumed = len;
8123f2304f8SSagi Grimberg 	int result;
8133f2304f8SSagi Grimberg 
8143f2304f8SSagi Grimberg 	while (len) {
8153f2304f8SSagi Grimberg 		switch (nvme_tcp_recv_state(queue)) {
8163f2304f8SSagi Grimberg 		case NVME_TCP_RECV_PDU:
8173f2304f8SSagi Grimberg 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
8183f2304f8SSagi Grimberg 			break;
8193f2304f8SSagi Grimberg 		case NVME_TCP_RECV_DATA:
8203f2304f8SSagi Grimberg 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
8213f2304f8SSagi Grimberg 			break;
8223f2304f8SSagi Grimberg 		case NVME_TCP_RECV_DDGST:
8233f2304f8SSagi Grimberg 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
8243f2304f8SSagi Grimberg 			break;
8253f2304f8SSagi Grimberg 		default:
8263f2304f8SSagi Grimberg 			result = -EFAULT;
8273f2304f8SSagi Grimberg 		}
8283f2304f8SSagi Grimberg 		if (result) {
8293f2304f8SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
8303f2304f8SSagi Grimberg 				"receive failed:  %d\n", result);
8313f2304f8SSagi Grimberg 			queue->rd_enabled = false;
8323f2304f8SSagi Grimberg 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8333f2304f8SSagi Grimberg 			return result;
8343f2304f8SSagi Grimberg 		}
8353f2304f8SSagi Grimberg 	}
8363f2304f8SSagi Grimberg 
8373f2304f8SSagi Grimberg 	return consumed;
8383f2304f8SSagi Grimberg }
8393f2304f8SSagi Grimberg 
8403f2304f8SSagi Grimberg static void nvme_tcp_data_ready(struct sock *sk)
8413f2304f8SSagi Grimberg {
8423f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8433f2304f8SSagi Grimberg 
844386e5e6eSSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8453f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
84672e5d757SSagi Grimberg 	if (likely(queue && queue->rd_enabled) &&
84772e5d757SSagi Grimberg 	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
8483f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
849386e5e6eSSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
8503f2304f8SSagi Grimberg }
8513f2304f8SSagi Grimberg 
8523f2304f8SSagi Grimberg static void nvme_tcp_write_space(struct sock *sk)
8533f2304f8SSagi Grimberg {
8543f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8553f2304f8SSagi Grimberg 
8563f2304f8SSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8573f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
8583f2304f8SSagi Grimberg 	if (likely(queue && sk_stream_is_writeable(sk))) {
8593f2304f8SSagi Grimberg 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
8603f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
8613f2304f8SSagi Grimberg 	}
8623f2304f8SSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
8633f2304f8SSagi Grimberg }
8643f2304f8SSagi Grimberg 
8653f2304f8SSagi Grimberg static void nvme_tcp_state_change(struct sock *sk)
8663f2304f8SSagi Grimberg {
8673f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8683f2304f8SSagi Grimberg 
8693f2304f8SSagi Grimberg 	read_lock(&sk->sk_callback_lock);
8703f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
8713f2304f8SSagi Grimberg 	if (!queue)
8723f2304f8SSagi Grimberg 		goto done;
8733f2304f8SSagi Grimberg 
8743f2304f8SSagi Grimberg 	switch (sk->sk_state) {
8753f2304f8SSagi Grimberg 	case TCP_CLOSE:
8763f2304f8SSagi Grimberg 	case TCP_CLOSE_WAIT:
8773f2304f8SSagi Grimberg 	case TCP_LAST_ACK:
8783f2304f8SSagi Grimberg 	case TCP_FIN_WAIT1:
8793f2304f8SSagi Grimberg 	case TCP_FIN_WAIT2:
8803f2304f8SSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8813f2304f8SSagi Grimberg 		break;
8823f2304f8SSagi Grimberg 	default:
8833f2304f8SSagi Grimberg 		dev_info(queue->ctrl->ctrl.device,
8843f2304f8SSagi Grimberg 			"queue %d socket state %d\n",
8853f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), sk->sk_state);
8863f2304f8SSagi Grimberg 	}
8873f2304f8SSagi Grimberg 
8883f2304f8SSagi Grimberg 	queue->state_change(sk);
8893f2304f8SSagi Grimberg done:
8903f2304f8SSagi Grimberg 	read_unlock(&sk->sk_callback_lock);
8913f2304f8SSagi Grimberg }
8923f2304f8SSagi Grimberg 
893122e5b9fSSagi Grimberg static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
894122e5b9fSSagi Grimberg {
895122e5b9fSSagi Grimberg 	return !list_empty(&queue->send_list) ||
896122e5b9fSSagi Grimberg 		!llist_empty(&queue->req_list) || queue->more_requests;
897122e5b9fSSagi Grimberg }
898122e5b9fSSagi Grimberg 
8993f2304f8SSagi Grimberg static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
9003f2304f8SSagi Grimberg {
9013f2304f8SSagi Grimberg 	queue->request = NULL;
9023f2304f8SSagi Grimberg }
9033f2304f8SSagi Grimberg 
9043f2304f8SSagi Grimberg static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
9053f2304f8SSagi Grimberg {
90616686010SSagi Grimberg 	nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
9073f2304f8SSagi Grimberg }
9083f2304f8SSagi Grimberg 
9093f2304f8SSagi Grimberg static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
9103f2304f8SSagi Grimberg {
9113f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9123f2304f8SSagi Grimberg 
9133f2304f8SSagi Grimberg 	while (true) {
9143f2304f8SSagi Grimberg 		struct page *page = nvme_tcp_req_cur_page(req);
9153f2304f8SSagi Grimberg 		size_t offset = nvme_tcp_req_cur_offset(req);
9163f2304f8SSagi Grimberg 		size_t len = nvme_tcp_req_cur_length(req);
9173f2304f8SSagi Grimberg 		bool last = nvme_tcp_pdu_last_send(req, len);
9183f2304f8SSagi Grimberg 		int ret, flags = MSG_DONTWAIT;
9193f2304f8SSagi Grimberg 
920122e5b9fSSagi Grimberg 		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
9213f2304f8SSagi Grimberg 			flags |= MSG_EOR;
9223f2304f8SSagi Grimberg 		else
9235bb052d7SSagi Grimberg 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
9243f2304f8SSagi Grimberg 
9257d4194abSColy Li 		if (sendpage_ok(page)) {
9267d4194abSColy Li 			ret = kernel_sendpage(queue->sock, page, offset, len,
92737c15219SMikhail Skorzhinskii 					flags);
92837c15219SMikhail Skorzhinskii 		} else {
9297d4194abSColy Li 			ret = sock_no_sendpage(queue->sock, page, offset, len,
93037c15219SMikhail Skorzhinskii 					flags);
93137c15219SMikhail Skorzhinskii 		}
9323f2304f8SSagi Grimberg 		if (ret <= 0)
9333f2304f8SSagi Grimberg 			return ret;
9343f2304f8SSagi Grimberg 
9353f2304f8SSagi Grimberg 		nvme_tcp_advance_req(req, ret);
9363f2304f8SSagi Grimberg 		if (queue->data_digest)
9373f2304f8SSagi Grimberg 			nvme_tcp_ddgst_update(queue->snd_hash, page,
9383f2304f8SSagi Grimberg 					offset, ret);
9393f2304f8SSagi Grimberg 
9403f2304f8SSagi Grimberg 		/* fully successful last write*/
9413f2304f8SSagi Grimberg 		if (last && ret == len) {
9423f2304f8SSagi Grimberg 			if (queue->data_digest) {
9433f2304f8SSagi Grimberg 				nvme_tcp_ddgst_final(queue->snd_hash,
9443f2304f8SSagi Grimberg 					&req->ddgst);
9453f2304f8SSagi Grimberg 				req->state = NVME_TCP_SEND_DDGST;
9463f2304f8SSagi Grimberg 				req->offset = 0;
9473f2304f8SSagi Grimberg 			} else {
9483f2304f8SSagi Grimberg 				nvme_tcp_done_send_req(queue);
9493f2304f8SSagi Grimberg 			}
9503f2304f8SSagi Grimberg 			return 1;
9513f2304f8SSagi Grimberg 		}
9523f2304f8SSagi Grimberg 	}
9533f2304f8SSagi Grimberg 	return -EAGAIN;
9543f2304f8SSagi Grimberg }
9553f2304f8SSagi Grimberg 
9563f2304f8SSagi Grimberg static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
9573f2304f8SSagi Grimberg {
9583f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9593f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
9603f2304f8SSagi Grimberg 	bool inline_data = nvme_tcp_has_inline_data(req);
9613f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
9623f2304f8SSagi Grimberg 	int len = sizeof(*pdu) + hdgst - req->offset;
9635bb052d7SSagi Grimberg 	int flags = MSG_DONTWAIT;
9643f2304f8SSagi Grimberg 	int ret;
9653f2304f8SSagi Grimberg 
966122e5b9fSSagi Grimberg 	if (inline_data || nvme_tcp_queue_more(queue))
9675bb052d7SSagi Grimberg 		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
9685bb052d7SSagi Grimberg 	else
9695bb052d7SSagi Grimberg 		flags |= MSG_EOR;
9705bb052d7SSagi Grimberg 
9713f2304f8SSagi Grimberg 	if (queue->hdr_digest && !req->offset)
9723f2304f8SSagi Grimberg 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
9733f2304f8SSagi Grimberg 
9743f2304f8SSagi Grimberg 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
9753f2304f8SSagi Grimberg 			offset_in_page(pdu) + req->offset, len,  flags);
9763f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
9773f2304f8SSagi Grimberg 		return ret;
9783f2304f8SSagi Grimberg 
9793f2304f8SSagi Grimberg 	len -= ret;
9803f2304f8SSagi Grimberg 	if (!len) {
9813f2304f8SSagi Grimberg 		if (inline_data) {
9823f2304f8SSagi Grimberg 			req->state = NVME_TCP_SEND_DATA;
9833f2304f8SSagi Grimberg 			if (queue->data_digest)
9843f2304f8SSagi Grimberg 				crypto_ahash_init(queue->snd_hash);
9853f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, WRITE);
9863f2304f8SSagi Grimberg 		} else {
9873f2304f8SSagi Grimberg 			nvme_tcp_done_send_req(queue);
9883f2304f8SSagi Grimberg 		}
9893f2304f8SSagi Grimberg 		return 1;
9903f2304f8SSagi Grimberg 	}
9913f2304f8SSagi Grimberg 	req->offset += ret;
9923f2304f8SSagi Grimberg 
9933f2304f8SSagi Grimberg 	return -EAGAIN;
9943f2304f8SSagi Grimberg }
9953f2304f8SSagi Grimberg 
9963f2304f8SSagi Grimberg static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
9973f2304f8SSagi Grimberg {
9983f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9993f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = req->pdu;
10003f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
10013f2304f8SSagi Grimberg 	int len = sizeof(*pdu) - req->offset + hdgst;
10023f2304f8SSagi Grimberg 	int ret;
10033f2304f8SSagi Grimberg 
10043f2304f8SSagi Grimberg 	if (queue->hdr_digest && !req->offset)
10053f2304f8SSagi Grimberg 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
10063f2304f8SSagi Grimberg 
10073f2304f8SSagi Grimberg 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
10083f2304f8SSagi Grimberg 			offset_in_page(pdu) + req->offset, len,
10095bb052d7SSagi Grimberg 			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
10103f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
10113f2304f8SSagi Grimberg 		return ret;
10123f2304f8SSagi Grimberg 
10133f2304f8SSagi Grimberg 	len -= ret;
10143f2304f8SSagi Grimberg 	if (!len) {
10153f2304f8SSagi Grimberg 		req->state = NVME_TCP_SEND_DATA;
10163f2304f8SSagi Grimberg 		if (queue->data_digest)
10173f2304f8SSagi Grimberg 			crypto_ahash_init(queue->snd_hash);
10183f2304f8SSagi Grimberg 		if (!req->data_sent)
10193f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, WRITE);
10203f2304f8SSagi Grimberg 		return 1;
10213f2304f8SSagi Grimberg 	}
10223f2304f8SSagi Grimberg 	req->offset += ret;
10233f2304f8SSagi Grimberg 
10243f2304f8SSagi Grimberg 	return -EAGAIN;
10253f2304f8SSagi Grimberg }
10263f2304f8SSagi Grimberg 
10273f2304f8SSagi Grimberg static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
10283f2304f8SSagi Grimberg {
10293f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
10303f2304f8SSagi Grimberg 	int ret;
1031122e5b9fSSagi Grimberg 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
10323f2304f8SSagi Grimberg 	struct kvec iov = {
10333f2304f8SSagi Grimberg 		.iov_base = &req->ddgst + req->offset,
10343f2304f8SSagi Grimberg 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
10353f2304f8SSagi Grimberg 	};
10363f2304f8SSagi Grimberg 
1037122e5b9fSSagi Grimberg 	if (nvme_tcp_queue_more(queue))
1038122e5b9fSSagi Grimberg 		msg.msg_flags |= MSG_MORE;
1039122e5b9fSSagi Grimberg 	else
1040122e5b9fSSagi Grimberg 		msg.msg_flags |= MSG_EOR;
1041122e5b9fSSagi Grimberg 
10423f2304f8SSagi Grimberg 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
10433f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
10443f2304f8SSagi Grimberg 		return ret;
10453f2304f8SSagi Grimberg 
10463f2304f8SSagi Grimberg 	if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
10473f2304f8SSagi Grimberg 		nvme_tcp_done_send_req(queue);
10483f2304f8SSagi Grimberg 		return 1;
10493f2304f8SSagi Grimberg 	}
10503f2304f8SSagi Grimberg 
10513f2304f8SSagi Grimberg 	req->offset += ret;
10523f2304f8SSagi Grimberg 	return -EAGAIN;
10533f2304f8SSagi Grimberg }
10543f2304f8SSagi Grimberg 
10553f2304f8SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
10563f2304f8SSagi Grimberg {
10573f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
10583f2304f8SSagi Grimberg 	int ret = 1;
10593f2304f8SSagi Grimberg 
10603f2304f8SSagi Grimberg 	if (!queue->request) {
10613f2304f8SSagi Grimberg 		queue->request = nvme_tcp_fetch_request(queue);
10623f2304f8SSagi Grimberg 		if (!queue->request)
10633f2304f8SSagi Grimberg 			return 0;
10643f2304f8SSagi Grimberg 	}
10653f2304f8SSagi Grimberg 	req = queue->request;
10663f2304f8SSagi Grimberg 
10673f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
10683f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_cmd_pdu(req);
10693f2304f8SSagi Grimberg 		if (ret <= 0)
10703f2304f8SSagi Grimberg 			goto done;
10713f2304f8SSagi Grimberg 		if (!nvme_tcp_has_inline_data(req))
10723f2304f8SSagi Grimberg 			return ret;
10733f2304f8SSagi Grimberg 	}
10743f2304f8SSagi Grimberg 
10753f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
10763f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_data_pdu(req);
10773f2304f8SSagi Grimberg 		if (ret <= 0)
10783f2304f8SSagi Grimberg 			goto done;
10793f2304f8SSagi Grimberg 	}
10803f2304f8SSagi Grimberg 
10813f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_DATA) {
10823f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_data(req);
10833f2304f8SSagi Grimberg 		if (ret <= 0)
10843f2304f8SSagi Grimberg 			goto done;
10853f2304f8SSagi Grimberg 	}
10863f2304f8SSagi Grimberg 
10873f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_DDGST)
10883f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_ddgst(req);
10893f2304f8SSagi Grimberg done:
10905ff4e112SSagi Grimberg 	if (ret == -EAGAIN) {
10913f2304f8SSagi Grimberg 		ret = 0;
10925ff4e112SSagi Grimberg 	} else if (ret < 0) {
10935ff4e112SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
10945ff4e112SSagi Grimberg 			"failed to send request %d\n", ret);
10955ff4e112SSagi Grimberg 		if (ret != -EPIPE && ret != -ECONNRESET)
10965ff4e112SSagi Grimberg 			nvme_tcp_fail_request(queue->request);
10975ff4e112SSagi Grimberg 		nvme_tcp_done_send_req(queue);
10985ff4e112SSagi Grimberg 	}
10993f2304f8SSagi Grimberg 	return ret;
11003f2304f8SSagi Grimberg }
11013f2304f8SSagi Grimberg 
11023f2304f8SSagi Grimberg static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
11033f2304f8SSagi Grimberg {
110410407ec9SPotnuri Bharat Teja 	struct socket *sock = queue->sock;
110510407ec9SPotnuri Bharat Teja 	struct sock *sk = sock->sk;
11063f2304f8SSagi Grimberg 	read_descriptor_t rd_desc;
11073f2304f8SSagi Grimberg 	int consumed;
11083f2304f8SSagi Grimberg 
11093f2304f8SSagi Grimberg 	rd_desc.arg.data = queue;
11103f2304f8SSagi Grimberg 	rd_desc.count = 1;
11113f2304f8SSagi Grimberg 	lock_sock(sk);
11121a9460ceSSagi Grimberg 	queue->nr_cqe = 0;
111310407ec9SPotnuri Bharat Teja 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
11143f2304f8SSagi Grimberg 	release_sock(sk);
11153f2304f8SSagi Grimberg 	return consumed;
11163f2304f8SSagi Grimberg }
11173f2304f8SSagi Grimberg 
11183f2304f8SSagi Grimberg static void nvme_tcp_io_work(struct work_struct *w)
11193f2304f8SSagi Grimberg {
11203f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue =
11213f2304f8SSagi Grimberg 		container_of(w, struct nvme_tcp_queue, io_work);
1122ddef2957SWunderlich, Mark 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
11233f2304f8SSagi Grimberg 
11243f2304f8SSagi Grimberg 	do {
11253f2304f8SSagi Grimberg 		bool pending = false;
11263f2304f8SSagi Grimberg 		int result;
11273f2304f8SSagi Grimberg 
1128db5ad6b7SSagi Grimberg 		if (mutex_trylock(&queue->send_mutex)) {
11293f2304f8SSagi Grimberg 			result = nvme_tcp_try_send(queue);
1130db5ad6b7SSagi Grimberg 			mutex_unlock(&queue->send_mutex);
11315ff4e112SSagi Grimberg 			if (result > 0)
11323f2304f8SSagi Grimberg 				pending = true;
11335ff4e112SSagi Grimberg 			else if (unlikely(result < 0))
11345ff4e112SSagi Grimberg 				break;
1135db5ad6b7SSagi Grimberg 		}
11363f2304f8SSagi Grimberg 
11373f2304f8SSagi Grimberg 		result = nvme_tcp_try_recv(queue);
11383f2304f8SSagi Grimberg 		if (result > 0)
11393f2304f8SSagi Grimberg 			pending = true;
1140761ad26cSSagi Grimberg 		else if (unlikely(result < 0))
114139d06079SSagi Grimberg 			return;
11423f2304f8SSagi Grimberg 
11433f2304f8SSagi Grimberg 		if (!pending)
11443f2304f8SSagi Grimberg 			return;
11453f2304f8SSagi Grimberg 
1146ddef2957SWunderlich, Mark 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
11473f2304f8SSagi Grimberg 
11483f2304f8SSagi Grimberg 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
11493f2304f8SSagi Grimberg }
11503f2304f8SSagi Grimberg 
11513f2304f8SSagi Grimberg static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
11523f2304f8SSagi Grimberg {
11533f2304f8SSagi Grimberg 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
11543f2304f8SSagi Grimberg 
11553f2304f8SSagi Grimberg 	ahash_request_free(queue->rcv_hash);
11563f2304f8SSagi Grimberg 	ahash_request_free(queue->snd_hash);
11573f2304f8SSagi Grimberg 	crypto_free_ahash(tfm);
11583f2304f8SSagi Grimberg }
11593f2304f8SSagi Grimberg 
11603f2304f8SSagi Grimberg static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
11613f2304f8SSagi Grimberg {
11623f2304f8SSagi Grimberg 	struct crypto_ahash *tfm;
11633f2304f8SSagi Grimberg 
11643f2304f8SSagi Grimberg 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
11653f2304f8SSagi Grimberg 	if (IS_ERR(tfm))
11663f2304f8SSagi Grimberg 		return PTR_ERR(tfm);
11673f2304f8SSagi Grimberg 
11683f2304f8SSagi Grimberg 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
11693f2304f8SSagi Grimberg 	if (!queue->snd_hash)
11703f2304f8SSagi Grimberg 		goto free_tfm;
11713f2304f8SSagi Grimberg 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
11723f2304f8SSagi Grimberg 
11733f2304f8SSagi Grimberg 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
11743f2304f8SSagi Grimberg 	if (!queue->rcv_hash)
11753f2304f8SSagi Grimberg 		goto free_snd_hash;
11763f2304f8SSagi Grimberg 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
11773f2304f8SSagi Grimberg 
11783f2304f8SSagi Grimberg 	return 0;
11793f2304f8SSagi Grimberg free_snd_hash:
11803f2304f8SSagi Grimberg 	ahash_request_free(queue->snd_hash);
11813f2304f8SSagi Grimberg free_tfm:
11823f2304f8SSagi Grimberg 	crypto_free_ahash(tfm);
11833f2304f8SSagi Grimberg 	return -ENOMEM;
11843f2304f8SSagi Grimberg }
11853f2304f8SSagi Grimberg 
11863f2304f8SSagi Grimberg static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
11873f2304f8SSagi Grimberg {
11883f2304f8SSagi Grimberg 	struct nvme_tcp_request *async = &ctrl->async_req;
11893f2304f8SSagi Grimberg 
11903f2304f8SSagi Grimberg 	page_frag_free(async->pdu);
11913f2304f8SSagi Grimberg }
11923f2304f8SSagi Grimberg 
11933f2304f8SSagi Grimberg static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
11943f2304f8SSagi Grimberg {
11953f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
11963f2304f8SSagi Grimberg 	struct nvme_tcp_request *async = &ctrl->async_req;
11973f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
11983f2304f8SSagi Grimberg 
11993f2304f8SSagi Grimberg 	async->pdu = page_frag_alloc(&queue->pf_cache,
12003f2304f8SSagi Grimberg 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
12013f2304f8SSagi Grimberg 		GFP_KERNEL | __GFP_ZERO);
12023f2304f8SSagi Grimberg 	if (!async->pdu)
12033f2304f8SSagi Grimberg 		return -ENOMEM;
12043f2304f8SSagi Grimberg 
12053f2304f8SSagi Grimberg 	async->queue = &ctrl->queues[0];
12063f2304f8SSagi Grimberg 	return 0;
12073f2304f8SSagi Grimberg }
12083f2304f8SSagi Grimberg 
12093f2304f8SSagi Grimberg static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
12103f2304f8SSagi Grimberg {
12113f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
12123f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
12133f2304f8SSagi Grimberg 
12143f2304f8SSagi Grimberg 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
12153f2304f8SSagi Grimberg 		return;
12163f2304f8SSagi Grimberg 
12173f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest)
12183f2304f8SSagi Grimberg 		nvme_tcp_free_crypto(queue);
12193f2304f8SSagi Grimberg 
12203f2304f8SSagi Grimberg 	sock_release(queue->sock);
12213f2304f8SSagi Grimberg 	kfree(queue->pdu);
12223f2304f8SSagi Grimberg }
12233f2304f8SSagi Grimberg 
12243f2304f8SSagi Grimberg static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
12253f2304f8SSagi Grimberg {
12263f2304f8SSagi Grimberg 	struct nvme_tcp_icreq_pdu *icreq;
12273f2304f8SSagi Grimberg 	struct nvme_tcp_icresp_pdu *icresp;
12283f2304f8SSagi Grimberg 	struct msghdr msg = {};
12293f2304f8SSagi Grimberg 	struct kvec iov;
12303f2304f8SSagi Grimberg 	bool ctrl_hdgst, ctrl_ddgst;
12313f2304f8SSagi Grimberg 	int ret;
12323f2304f8SSagi Grimberg 
12333f2304f8SSagi Grimberg 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
12343f2304f8SSagi Grimberg 	if (!icreq)
12353f2304f8SSagi Grimberg 		return -ENOMEM;
12363f2304f8SSagi Grimberg 
12373f2304f8SSagi Grimberg 	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
12383f2304f8SSagi Grimberg 	if (!icresp) {
12393f2304f8SSagi Grimberg 		ret = -ENOMEM;
12403f2304f8SSagi Grimberg 		goto free_icreq;
12413f2304f8SSagi Grimberg 	}
12423f2304f8SSagi Grimberg 
12433f2304f8SSagi Grimberg 	icreq->hdr.type = nvme_tcp_icreq;
12443f2304f8SSagi Grimberg 	icreq->hdr.hlen = sizeof(*icreq);
12453f2304f8SSagi Grimberg 	icreq->hdr.pdo = 0;
12463f2304f8SSagi Grimberg 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
12473f2304f8SSagi Grimberg 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
12483f2304f8SSagi Grimberg 	icreq->maxr2t = 0; /* single inflight r2t supported */
12493f2304f8SSagi Grimberg 	icreq->hpda = 0; /* no alignment constraint */
12503f2304f8SSagi Grimberg 	if (queue->hdr_digest)
12513f2304f8SSagi Grimberg 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
12523f2304f8SSagi Grimberg 	if (queue->data_digest)
12533f2304f8SSagi Grimberg 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
12543f2304f8SSagi Grimberg 
12553f2304f8SSagi Grimberg 	iov.iov_base = icreq;
12563f2304f8SSagi Grimberg 	iov.iov_len = sizeof(*icreq);
12573f2304f8SSagi Grimberg 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
12583f2304f8SSagi Grimberg 	if (ret < 0)
12593f2304f8SSagi Grimberg 		goto free_icresp;
12603f2304f8SSagi Grimberg 
12613f2304f8SSagi Grimberg 	memset(&msg, 0, sizeof(msg));
12623f2304f8SSagi Grimberg 	iov.iov_base = icresp;
12633f2304f8SSagi Grimberg 	iov.iov_len = sizeof(*icresp);
12643f2304f8SSagi Grimberg 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
12653f2304f8SSagi Grimberg 			iov.iov_len, msg.msg_flags);
12663f2304f8SSagi Grimberg 	if (ret < 0)
12673f2304f8SSagi Grimberg 		goto free_icresp;
12683f2304f8SSagi Grimberg 
12693f2304f8SSagi Grimberg 	ret = -EINVAL;
12703f2304f8SSagi Grimberg 	if (icresp->hdr.type != nvme_tcp_icresp) {
12713f2304f8SSagi Grimberg 		pr_err("queue %d: bad type returned %d\n",
12723f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->hdr.type);
12733f2304f8SSagi Grimberg 		goto free_icresp;
12743f2304f8SSagi Grimberg 	}
12753f2304f8SSagi Grimberg 
12763f2304f8SSagi Grimberg 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
12773f2304f8SSagi Grimberg 		pr_err("queue %d: bad pdu length returned %d\n",
12783f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
12793f2304f8SSagi Grimberg 		goto free_icresp;
12803f2304f8SSagi Grimberg 	}
12813f2304f8SSagi Grimberg 
12823f2304f8SSagi Grimberg 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
12833f2304f8SSagi Grimberg 		pr_err("queue %d: bad pfv returned %d\n",
12843f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->pfv);
12853f2304f8SSagi Grimberg 		goto free_icresp;
12863f2304f8SSagi Grimberg 	}
12873f2304f8SSagi Grimberg 
12883f2304f8SSagi Grimberg 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
12893f2304f8SSagi Grimberg 	if ((queue->data_digest && !ctrl_ddgst) ||
12903f2304f8SSagi Grimberg 	    (!queue->data_digest && ctrl_ddgst)) {
12913f2304f8SSagi Grimberg 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
12923f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue),
12933f2304f8SSagi Grimberg 			queue->data_digest ? "enabled" : "disabled",
12943f2304f8SSagi Grimberg 			ctrl_ddgst ? "enabled" : "disabled");
12953f2304f8SSagi Grimberg 		goto free_icresp;
12963f2304f8SSagi Grimberg 	}
12973f2304f8SSagi Grimberg 
12983f2304f8SSagi Grimberg 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
12993f2304f8SSagi Grimberg 	if ((queue->hdr_digest && !ctrl_hdgst) ||
13003f2304f8SSagi Grimberg 	    (!queue->hdr_digest && ctrl_hdgst)) {
13013f2304f8SSagi Grimberg 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
13023f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue),
13033f2304f8SSagi Grimberg 			queue->hdr_digest ? "enabled" : "disabled",
13043f2304f8SSagi Grimberg 			ctrl_hdgst ? "enabled" : "disabled");
13053f2304f8SSagi Grimberg 		goto free_icresp;
13063f2304f8SSagi Grimberg 	}
13073f2304f8SSagi Grimberg 
13083f2304f8SSagi Grimberg 	if (icresp->cpda != 0) {
13093f2304f8SSagi Grimberg 		pr_err("queue %d: unsupported cpda returned %d\n",
13103f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->cpda);
13113f2304f8SSagi Grimberg 		goto free_icresp;
13123f2304f8SSagi Grimberg 	}
13133f2304f8SSagi Grimberg 
13143f2304f8SSagi Grimberg 	ret = 0;
13153f2304f8SSagi Grimberg free_icresp:
13163f2304f8SSagi Grimberg 	kfree(icresp);
13173f2304f8SSagi Grimberg free_icreq:
13183f2304f8SSagi Grimberg 	kfree(icreq);
13193f2304f8SSagi Grimberg 	return ret;
13203f2304f8SSagi Grimberg }
13213f2304f8SSagi Grimberg 
132240510a63SSagi Grimberg static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
132340510a63SSagi Grimberg {
132440510a63SSagi Grimberg 	return nvme_tcp_queue_id(queue) == 0;
132540510a63SSagi Grimberg }
132640510a63SSagi Grimberg 
132740510a63SSagi Grimberg static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
132840510a63SSagi Grimberg {
132940510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
133040510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
133140510a63SSagi Grimberg 
133240510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
133340510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
133440510a63SSagi Grimberg }
133540510a63SSagi Grimberg 
133640510a63SSagi Grimberg static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
133740510a63SSagi Grimberg {
133840510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
133940510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
134040510a63SSagi Grimberg 
134140510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
134240510a63SSagi Grimberg 		!nvme_tcp_default_queue(queue) &&
134340510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
134440510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_READ];
134540510a63SSagi Grimberg }
134640510a63SSagi Grimberg 
134740510a63SSagi Grimberg static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
134840510a63SSagi Grimberg {
134940510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
135040510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
135140510a63SSagi Grimberg 
135240510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
135340510a63SSagi Grimberg 		!nvme_tcp_default_queue(queue) &&
135440510a63SSagi Grimberg 		!nvme_tcp_read_queue(queue) &&
135540510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
135640510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_READ] +
135740510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_POLL];
135840510a63SSagi Grimberg }
135940510a63SSagi Grimberg 
136040510a63SSagi Grimberg static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
136140510a63SSagi Grimberg {
136240510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
136340510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
136440510a63SSagi Grimberg 	int n = 0;
136540510a63SSagi Grimberg 
136640510a63SSagi Grimberg 	if (nvme_tcp_default_queue(queue))
136740510a63SSagi Grimberg 		n = qid - 1;
136840510a63SSagi Grimberg 	else if (nvme_tcp_read_queue(queue))
136940510a63SSagi Grimberg 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
137040510a63SSagi Grimberg 	else if (nvme_tcp_poll_queue(queue))
137140510a63SSagi Grimberg 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
137240510a63SSagi Grimberg 				ctrl->io_queues[HCTX_TYPE_READ] - 1;
137340510a63SSagi Grimberg 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
137440510a63SSagi Grimberg }
137540510a63SSagi Grimberg 
13763f2304f8SSagi Grimberg static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
13773f2304f8SSagi Grimberg 		int qid, size_t queue_size)
13783f2304f8SSagi Grimberg {
13793f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
13803f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
13816ebf71baSChristoph Hellwig 	int ret, rcv_pdu_size;
13823f2304f8SSagi Grimberg 
13833f2304f8SSagi Grimberg 	queue->ctrl = ctrl;
138415ec928aSSagi Grimberg 	init_llist_head(&queue->req_list);
13853f2304f8SSagi Grimberg 	INIT_LIST_HEAD(&queue->send_list);
1386db5ad6b7SSagi Grimberg 	mutex_init(&queue->send_mutex);
13873f2304f8SSagi Grimberg 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
13883f2304f8SSagi Grimberg 	queue->queue_size = queue_size;
13893f2304f8SSagi Grimberg 
13903f2304f8SSagi Grimberg 	if (qid > 0)
13919924b030SIsrael Rukshin 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
13923f2304f8SSagi Grimberg 	else
13933f2304f8SSagi Grimberg 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
13943f2304f8SSagi Grimberg 						NVME_TCP_ADMIN_CCSZ;
13953f2304f8SSagi Grimberg 
13963f2304f8SSagi Grimberg 	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
13973f2304f8SSagi Grimberg 			IPPROTO_TCP, &queue->sock);
13983f2304f8SSagi Grimberg 	if (ret) {
13999924b030SIsrael Rukshin 		dev_err(nctrl->device,
14003f2304f8SSagi Grimberg 			"failed to create socket: %d\n", ret);
14013f2304f8SSagi Grimberg 		return ret;
14023f2304f8SSagi Grimberg 	}
14033f2304f8SSagi Grimberg 
14043f2304f8SSagi Grimberg 	/* Single syn retry */
1405557eadfcSChristoph Hellwig 	tcp_sock_set_syncnt(queue->sock->sk, 1);
14063f2304f8SSagi Grimberg 
14073f2304f8SSagi Grimberg 	/* Set TCP no delay */
140812abc5eeSChristoph Hellwig 	tcp_sock_set_nodelay(queue->sock->sk);
14093f2304f8SSagi Grimberg 
14103f2304f8SSagi Grimberg 	/*
14113f2304f8SSagi Grimberg 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
14123f2304f8SSagi Grimberg 	 * close. This is done to prevent stale data from being sent should
14133f2304f8SSagi Grimberg 	 * the network connection be restored before TCP times out.
14143f2304f8SSagi Grimberg 	 */
1415c433594cSChristoph Hellwig 	sock_no_linger(queue->sock->sk);
14163f2304f8SSagi Grimberg 
14176e434967SChristoph Hellwig 	if (so_priority > 0)
14186e434967SChristoph Hellwig 		sock_set_priority(queue->sock->sk, so_priority);
14199912ade3SWunderlich, Mark 
1420bb13985dSIsrael Rukshin 	/* Set socket type of service */
14216ebf71baSChristoph Hellwig 	if (nctrl->opts->tos >= 0)
14226ebf71baSChristoph Hellwig 		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1423bb13985dSIsrael Rukshin 
1424adc99fd3SSagi Grimberg 	/* Set 10 seconds timeout for icresp recvmsg */
1425adc99fd3SSagi Grimberg 	queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1426adc99fd3SSagi Grimberg 
14273f2304f8SSagi Grimberg 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
142840510a63SSagi Grimberg 	nvme_tcp_set_queue_io_cpu(queue);
14293f2304f8SSagi Grimberg 	queue->request = NULL;
14303f2304f8SSagi Grimberg 	queue->data_remaining = 0;
14313f2304f8SSagi Grimberg 	queue->ddgst_remaining = 0;
14323f2304f8SSagi Grimberg 	queue->pdu_remaining = 0;
14333f2304f8SSagi Grimberg 	queue->pdu_offset = 0;
14343f2304f8SSagi Grimberg 	sk_set_memalloc(queue->sock->sk);
14353f2304f8SSagi Grimberg 
14369924b030SIsrael Rukshin 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
14373f2304f8SSagi Grimberg 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
14383f2304f8SSagi Grimberg 			sizeof(ctrl->src_addr));
14393f2304f8SSagi Grimberg 		if (ret) {
14409924b030SIsrael Rukshin 			dev_err(nctrl->device,
14413f2304f8SSagi Grimberg 				"failed to bind queue %d socket %d\n",
14423f2304f8SSagi Grimberg 				qid, ret);
14433f2304f8SSagi Grimberg 			goto err_sock;
14443f2304f8SSagi Grimberg 		}
14453f2304f8SSagi Grimberg 	}
14463f2304f8SSagi Grimberg 
14473f2304f8SSagi Grimberg 	queue->hdr_digest = nctrl->opts->hdr_digest;
14483f2304f8SSagi Grimberg 	queue->data_digest = nctrl->opts->data_digest;
14493f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest) {
14503f2304f8SSagi Grimberg 		ret = nvme_tcp_alloc_crypto(queue);
14513f2304f8SSagi Grimberg 		if (ret) {
14529924b030SIsrael Rukshin 			dev_err(nctrl->device,
14533f2304f8SSagi Grimberg 				"failed to allocate queue %d crypto\n", qid);
14543f2304f8SSagi Grimberg 			goto err_sock;
14553f2304f8SSagi Grimberg 		}
14563f2304f8SSagi Grimberg 	}
14573f2304f8SSagi Grimberg 
14583f2304f8SSagi Grimberg 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
14593f2304f8SSagi Grimberg 			nvme_tcp_hdgst_len(queue);
14603f2304f8SSagi Grimberg 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
14613f2304f8SSagi Grimberg 	if (!queue->pdu) {
14623f2304f8SSagi Grimberg 		ret = -ENOMEM;
14633f2304f8SSagi Grimberg 		goto err_crypto;
14643f2304f8SSagi Grimberg 	}
14653f2304f8SSagi Grimberg 
14669924b030SIsrael Rukshin 	dev_dbg(nctrl->device, "connecting queue %d\n",
14673f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue));
14683f2304f8SSagi Grimberg 
14693f2304f8SSagi Grimberg 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
14703f2304f8SSagi Grimberg 		sizeof(ctrl->addr), 0);
14713f2304f8SSagi Grimberg 	if (ret) {
14729924b030SIsrael Rukshin 		dev_err(nctrl->device,
14733f2304f8SSagi Grimberg 			"failed to connect socket: %d\n", ret);
14743f2304f8SSagi Grimberg 		goto err_rcv_pdu;
14753f2304f8SSagi Grimberg 	}
14763f2304f8SSagi Grimberg 
14773f2304f8SSagi Grimberg 	ret = nvme_tcp_init_connection(queue);
14783f2304f8SSagi Grimberg 	if (ret)
14793f2304f8SSagi Grimberg 		goto err_init_connect;
14803f2304f8SSagi Grimberg 
14813f2304f8SSagi Grimberg 	queue->rd_enabled = true;
14823f2304f8SSagi Grimberg 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
14833f2304f8SSagi Grimberg 	nvme_tcp_init_recv_ctx(queue);
14843f2304f8SSagi Grimberg 
14853f2304f8SSagi Grimberg 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
14863f2304f8SSagi Grimberg 	queue->sock->sk->sk_user_data = queue;
14873f2304f8SSagi Grimberg 	queue->state_change = queue->sock->sk->sk_state_change;
14883f2304f8SSagi Grimberg 	queue->data_ready = queue->sock->sk->sk_data_ready;
14893f2304f8SSagi Grimberg 	queue->write_space = queue->sock->sk->sk_write_space;
14903f2304f8SSagi Grimberg 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
14913f2304f8SSagi Grimberg 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
14923f2304f8SSagi Grimberg 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1493ac1c4e18SSebastian Andrzej Siewior #ifdef CONFIG_NET_RX_BUSY_POLL
14941a9460ceSSagi Grimberg 	queue->sock->sk->sk_ll_usec = 1;
1495ac1c4e18SSebastian Andrzej Siewior #endif
14963f2304f8SSagi Grimberg 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
14973f2304f8SSagi Grimberg 
14983f2304f8SSagi Grimberg 	return 0;
14993f2304f8SSagi Grimberg 
15003f2304f8SSagi Grimberg err_init_connect:
15013f2304f8SSagi Grimberg 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
15023f2304f8SSagi Grimberg err_rcv_pdu:
15033f2304f8SSagi Grimberg 	kfree(queue->pdu);
15043f2304f8SSagi Grimberg err_crypto:
15053f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest)
15063f2304f8SSagi Grimberg 		nvme_tcp_free_crypto(queue);
15073f2304f8SSagi Grimberg err_sock:
15083f2304f8SSagi Grimberg 	sock_release(queue->sock);
15093f2304f8SSagi Grimberg 	queue->sock = NULL;
15103f2304f8SSagi Grimberg 	return ret;
15113f2304f8SSagi Grimberg }
15123f2304f8SSagi Grimberg 
15133f2304f8SSagi Grimberg static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
15143f2304f8SSagi Grimberg {
15153f2304f8SSagi Grimberg 	struct socket *sock = queue->sock;
15163f2304f8SSagi Grimberg 
15173f2304f8SSagi Grimberg 	write_lock_bh(&sock->sk->sk_callback_lock);
15183f2304f8SSagi Grimberg 	sock->sk->sk_user_data  = NULL;
15193f2304f8SSagi Grimberg 	sock->sk->sk_data_ready = queue->data_ready;
15203f2304f8SSagi Grimberg 	sock->sk->sk_state_change = queue->state_change;
15213f2304f8SSagi Grimberg 	sock->sk->sk_write_space  = queue->write_space;
15223f2304f8SSagi Grimberg 	write_unlock_bh(&sock->sk->sk_callback_lock);
15233f2304f8SSagi Grimberg }
15243f2304f8SSagi Grimberg 
15253f2304f8SSagi Grimberg static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
15263f2304f8SSagi Grimberg {
15273f2304f8SSagi Grimberg 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
15283f2304f8SSagi Grimberg 	nvme_tcp_restore_sock_calls(queue);
15293f2304f8SSagi Grimberg 	cancel_work_sync(&queue->io_work);
15303f2304f8SSagi Grimberg }
15313f2304f8SSagi Grimberg 
15323f2304f8SSagi Grimberg static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
15333f2304f8SSagi Grimberg {
15343f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15353f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
15363f2304f8SSagi Grimberg 
15373f2304f8SSagi Grimberg 	if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
15383f2304f8SSagi Grimberg 		return;
15393f2304f8SSagi Grimberg 	__nvme_tcp_stop_queue(queue);
15403f2304f8SSagi Grimberg }
15413f2304f8SSagi Grimberg 
15423f2304f8SSagi Grimberg static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
15433f2304f8SSagi Grimberg {
15443f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15453f2304f8SSagi Grimberg 	int ret;
15463f2304f8SSagi Grimberg 
15473f2304f8SSagi Grimberg 	if (idx)
154826c68227SSagi Grimberg 		ret = nvmf_connect_io_queue(nctrl, idx, false);
15493f2304f8SSagi Grimberg 	else
15503f2304f8SSagi Grimberg 		ret = nvmf_connect_admin_queue(nctrl);
15513f2304f8SSagi Grimberg 
15523f2304f8SSagi Grimberg 	if (!ret) {
15533f2304f8SSagi Grimberg 		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
15543f2304f8SSagi Grimberg 	} else {
1555f34e2589SSagi Grimberg 		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
15563f2304f8SSagi Grimberg 			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
15573f2304f8SSagi Grimberg 		dev_err(nctrl->device,
15583f2304f8SSagi Grimberg 			"failed to connect queue: %d ret=%d\n", idx, ret);
15593f2304f8SSagi Grimberg 	}
15603f2304f8SSagi Grimberg 	return ret;
15613f2304f8SSagi Grimberg }
15623f2304f8SSagi Grimberg 
15633f2304f8SSagi Grimberg static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
15643f2304f8SSagi Grimberg 		bool admin)
15653f2304f8SSagi Grimberg {
15663f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15673f2304f8SSagi Grimberg 	struct blk_mq_tag_set *set;
15683f2304f8SSagi Grimberg 	int ret;
15693f2304f8SSagi Grimberg 
15703f2304f8SSagi Grimberg 	if (admin) {
15713f2304f8SSagi Grimberg 		set = &ctrl->admin_tag_set;
15723f2304f8SSagi Grimberg 		memset(set, 0, sizeof(*set));
15733f2304f8SSagi Grimberg 		set->ops = &nvme_tcp_admin_mq_ops;
15743f2304f8SSagi Grimberg 		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
15753f2304f8SSagi Grimberg 		set->reserved_tags = 2; /* connect + keep-alive */
1576610c8235SMax Gurtovoy 		set->numa_node = nctrl->numa_node;
1577db5ad6b7SSagi Grimberg 		set->flags = BLK_MQ_F_BLOCKING;
15783f2304f8SSagi Grimberg 		set->cmd_size = sizeof(struct nvme_tcp_request);
15793f2304f8SSagi Grimberg 		set->driver_data = ctrl;
15803f2304f8SSagi Grimberg 		set->nr_hw_queues = 1;
1581dc96f938SChaitanya Kulkarni 		set->timeout = NVME_ADMIN_TIMEOUT;
15823f2304f8SSagi Grimberg 	} else {
15833f2304f8SSagi Grimberg 		set = &ctrl->tag_set;
15843f2304f8SSagi Grimberg 		memset(set, 0, sizeof(*set));
15853f2304f8SSagi Grimberg 		set->ops = &nvme_tcp_mq_ops;
15863f2304f8SSagi Grimberg 		set->queue_depth = nctrl->sqsize + 1;
15873f2304f8SSagi Grimberg 		set->reserved_tags = 1; /* fabric connect */
1588610c8235SMax Gurtovoy 		set->numa_node = nctrl->numa_node;
1589db5ad6b7SSagi Grimberg 		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
15903f2304f8SSagi Grimberg 		set->cmd_size = sizeof(struct nvme_tcp_request);
15913f2304f8SSagi Grimberg 		set->driver_data = ctrl;
15923f2304f8SSagi Grimberg 		set->nr_hw_queues = nctrl->queue_count - 1;
15933f2304f8SSagi Grimberg 		set->timeout = NVME_IO_TIMEOUT;
15941a9460ceSSagi Grimberg 		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
15953f2304f8SSagi Grimberg 	}
15963f2304f8SSagi Grimberg 
15973f2304f8SSagi Grimberg 	ret = blk_mq_alloc_tag_set(set);
15983f2304f8SSagi Grimberg 	if (ret)
15993f2304f8SSagi Grimberg 		return ERR_PTR(ret);
16003f2304f8SSagi Grimberg 
16013f2304f8SSagi Grimberg 	return set;
16023f2304f8SSagi Grimberg }
16033f2304f8SSagi Grimberg 
16043f2304f8SSagi Grimberg static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
16053f2304f8SSagi Grimberg {
16063f2304f8SSagi Grimberg 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1607ceb1e087SDavid Milburn 		cancel_work_sync(&ctrl->async_event_work);
16083f2304f8SSagi Grimberg 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
16093f2304f8SSagi Grimberg 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
16103f2304f8SSagi Grimberg 	}
16113f2304f8SSagi Grimberg 
16123f2304f8SSagi Grimberg 	nvme_tcp_free_queue(ctrl, 0);
16133f2304f8SSagi Grimberg }
16143f2304f8SSagi Grimberg 
16153f2304f8SSagi Grimberg static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
16163f2304f8SSagi Grimberg {
16173f2304f8SSagi Grimberg 	int i;
16183f2304f8SSagi Grimberg 
16193f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++)
16203f2304f8SSagi Grimberg 		nvme_tcp_free_queue(ctrl, i);
16213f2304f8SSagi Grimberg }
16223f2304f8SSagi Grimberg 
16233f2304f8SSagi Grimberg static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
16243f2304f8SSagi Grimberg {
16253f2304f8SSagi Grimberg 	int i;
16263f2304f8SSagi Grimberg 
16273f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++)
16283f2304f8SSagi Grimberg 		nvme_tcp_stop_queue(ctrl, i);
16293f2304f8SSagi Grimberg }
16303f2304f8SSagi Grimberg 
16313f2304f8SSagi Grimberg static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
16323f2304f8SSagi Grimberg {
16333f2304f8SSagi Grimberg 	int i, ret = 0;
16343f2304f8SSagi Grimberg 
16353f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++) {
16363f2304f8SSagi Grimberg 		ret = nvme_tcp_start_queue(ctrl, i);
16373f2304f8SSagi Grimberg 		if (ret)
16383f2304f8SSagi Grimberg 			goto out_stop_queues;
16393f2304f8SSagi Grimberg 	}
16403f2304f8SSagi Grimberg 
16413f2304f8SSagi Grimberg 	return 0;
16423f2304f8SSagi Grimberg 
16433f2304f8SSagi Grimberg out_stop_queues:
16443f2304f8SSagi Grimberg 	for (i--; i >= 1; i--)
16453f2304f8SSagi Grimberg 		nvme_tcp_stop_queue(ctrl, i);
16463f2304f8SSagi Grimberg 	return ret;
16473f2304f8SSagi Grimberg }
16483f2304f8SSagi Grimberg 
16493f2304f8SSagi Grimberg static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
16503f2304f8SSagi Grimberg {
16513f2304f8SSagi Grimberg 	int ret;
16523f2304f8SSagi Grimberg 
16533f2304f8SSagi Grimberg 	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
16543f2304f8SSagi Grimberg 	if (ret)
16553f2304f8SSagi Grimberg 		return ret;
16563f2304f8SSagi Grimberg 
16573f2304f8SSagi Grimberg 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
16583f2304f8SSagi Grimberg 	if (ret)
16593f2304f8SSagi Grimberg 		goto out_free_queue;
16603f2304f8SSagi Grimberg 
16613f2304f8SSagi Grimberg 	return 0;
16623f2304f8SSagi Grimberg 
16633f2304f8SSagi Grimberg out_free_queue:
16643f2304f8SSagi Grimberg 	nvme_tcp_free_queue(ctrl, 0);
16653f2304f8SSagi Grimberg 	return ret;
16663f2304f8SSagi Grimberg }
16673f2304f8SSagi Grimberg 
1668efb973b1SSagi Grimberg static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
16693f2304f8SSagi Grimberg {
16703f2304f8SSagi Grimberg 	int i, ret;
16713f2304f8SSagi Grimberg 
16723f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++) {
16733f2304f8SSagi Grimberg 		ret = nvme_tcp_alloc_queue(ctrl, i,
16743f2304f8SSagi Grimberg 				ctrl->sqsize + 1);
16753f2304f8SSagi Grimberg 		if (ret)
16763f2304f8SSagi Grimberg 			goto out_free_queues;
16773f2304f8SSagi Grimberg 	}
16783f2304f8SSagi Grimberg 
16793f2304f8SSagi Grimberg 	return 0;
16803f2304f8SSagi Grimberg 
16813f2304f8SSagi Grimberg out_free_queues:
16823f2304f8SSagi Grimberg 	for (i--; i >= 1; i--)
16833f2304f8SSagi Grimberg 		nvme_tcp_free_queue(ctrl, i);
16843f2304f8SSagi Grimberg 
16853f2304f8SSagi Grimberg 	return ret;
16863f2304f8SSagi Grimberg }
16873f2304f8SSagi Grimberg 
16883f2304f8SSagi Grimberg static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
16893f2304f8SSagi Grimberg {
1690873946f4SSagi Grimberg 	unsigned int nr_io_queues;
1691873946f4SSagi Grimberg 
1692873946f4SSagi Grimberg 	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1693873946f4SSagi Grimberg 	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
16941a9460ceSSagi Grimberg 	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1695873946f4SSagi Grimberg 
1696873946f4SSagi Grimberg 	return nr_io_queues;
16973f2304f8SSagi Grimberg }
16983f2304f8SSagi Grimberg 
169964861993SSagi Grimberg static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
170064861993SSagi Grimberg 		unsigned int nr_io_queues)
170164861993SSagi Grimberg {
170264861993SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
170364861993SSagi Grimberg 	struct nvmf_ctrl_options *opts = nctrl->opts;
170464861993SSagi Grimberg 
170564861993SSagi Grimberg 	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
170664861993SSagi Grimberg 		/*
170764861993SSagi Grimberg 		 * separate read/write queues
170864861993SSagi Grimberg 		 * hand out dedicated default queues only after we have
170964861993SSagi Grimberg 		 * sufficient read queues.
171064861993SSagi Grimberg 		 */
171164861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
171264861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
171364861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
171464861993SSagi Grimberg 			min(opts->nr_write_queues, nr_io_queues);
171564861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
171664861993SSagi Grimberg 	} else {
171764861993SSagi Grimberg 		/*
171864861993SSagi Grimberg 		 * shared read/write queues
171964861993SSagi Grimberg 		 * either no write queues were requested, or we don't have
172064861993SSagi Grimberg 		 * sufficient queue count to have dedicated default queues.
172164861993SSagi Grimberg 		 */
172264861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
172364861993SSagi Grimberg 			min(opts->nr_io_queues, nr_io_queues);
172464861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
172564861993SSagi Grimberg 	}
17261a9460ceSSagi Grimberg 
17271a9460ceSSagi Grimberg 	if (opts->nr_poll_queues && nr_io_queues) {
17281a9460ceSSagi Grimberg 		/* map dedicated poll queues only if we have queues left */
17291a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_POLL] =
17301a9460ceSSagi Grimberg 			min(opts->nr_poll_queues, nr_io_queues);
17311a9460ceSSagi Grimberg 	}
173264861993SSagi Grimberg }
173364861993SSagi Grimberg 
1734efb973b1SSagi Grimberg static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
17353f2304f8SSagi Grimberg {
17363f2304f8SSagi Grimberg 	unsigned int nr_io_queues;
17373f2304f8SSagi Grimberg 	int ret;
17383f2304f8SSagi Grimberg 
17393f2304f8SSagi Grimberg 	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
17403f2304f8SSagi Grimberg 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
17413f2304f8SSagi Grimberg 	if (ret)
17423f2304f8SSagi Grimberg 		return ret;
17433f2304f8SSagi Grimberg 
17443f2304f8SSagi Grimberg 	ctrl->queue_count = nr_io_queues + 1;
17453f2304f8SSagi Grimberg 	if (ctrl->queue_count < 2)
17463f2304f8SSagi Grimberg 		return 0;
17473f2304f8SSagi Grimberg 
17483f2304f8SSagi Grimberg 	dev_info(ctrl->device,
17493f2304f8SSagi Grimberg 		"creating %d I/O queues.\n", nr_io_queues);
17503f2304f8SSagi Grimberg 
175164861993SSagi Grimberg 	nvme_tcp_set_io_queues(ctrl, nr_io_queues);
175264861993SSagi Grimberg 
1753efb973b1SSagi Grimberg 	return __nvme_tcp_alloc_io_queues(ctrl);
17543f2304f8SSagi Grimberg }
17553f2304f8SSagi Grimberg 
17563f2304f8SSagi Grimberg static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
17573f2304f8SSagi Grimberg {
17583f2304f8SSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
17593f2304f8SSagi Grimberg 	if (remove) {
17603f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->connect_q);
17613f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->tagset);
17623f2304f8SSagi Grimberg 	}
17633f2304f8SSagi Grimberg 	nvme_tcp_free_io_queues(ctrl);
17643f2304f8SSagi Grimberg }
17653f2304f8SSagi Grimberg 
17663f2304f8SSagi Grimberg static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
17673f2304f8SSagi Grimberg {
17683f2304f8SSagi Grimberg 	int ret;
17693f2304f8SSagi Grimberg 
1770efb973b1SSagi Grimberg 	ret = nvme_tcp_alloc_io_queues(ctrl);
17713f2304f8SSagi Grimberg 	if (ret)
17723f2304f8SSagi Grimberg 		return ret;
17733f2304f8SSagi Grimberg 
17743f2304f8SSagi Grimberg 	if (new) {
17753f2304f8SSagi Grimberg 		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
17763f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->tagset)) {
17773f2304f8SSagi Grimberg 			ret = PTR_ERR(ctrl->tagset);
17783f2304f8SSagi Grimberg 			goto out_free_io_queues;
17793f2304f8SSagi Grimberg 		}
17803f2304f8SSagi Grimberg 
17813f2304f8SSagi Grimberg 		ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
17823f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->connect_q)) {
17833f2304f8SSagi Grimberg 			ret = PTR_ERR(ctrl->connect_q);
17843f2304f8SSagi Grimberg 			goto out_free_tag_set;
17853f2304f8SSagi Grimberg 		}
17863f2304f8SSagi Grimberg 	}
17873f2304f8SSagi Grimberg 
17883f2304f8SSagi Grimberg 	ret = nvme_tcp_start_io_queues(ctrl);
17893f2304f8SSagi Grimberg 	if (ret)
17903f2304f8SSagi Grimberg 		goto out_cleanup_connect_q;
17913f2304f8SSagi Grimberg 
17922875b0aeSSagi Grimberg 	if (!new) {
17932875b0aeSSagi Grimberg 		nvme_start_queues(ctrl);
1794e5c01f4fSSagi Grimberg 		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1795e5c01f4fSSagi Grimberg 			/*
1796e5c01f4fSSagi Grimberg 			 * If we timed out waiting for freeze we are likely to
1797e5c01f4fSSagi Grimberg 			 * be stuck.  Fail the controller initialization just
1798e5c01f4fSSagi Grimberg 			 * to be safe.
1799e5c01f4fSSagi Grimberg 			 */
1800e5c01f4fSSagi Grimberg 			ret = -ENODEV;
1801e5c01f4fSSagi Grimberg 			goto out_wait_freeze_timed_out;
1802e5c01f4fSSagi Grimberg 		}
18032875b0aeSSagi Grimberg 		blk_mq_update_nr_hw_queues(ctrl->tagset,
18042875b0aeSSagi Grimberg 			ctrl->queue_count - 1);
18052875b0aeSSagi Grimberg 		nvme_unfreeze(ctrl);
18062875b0aeSSagi Grimberg 	}
18072875b0aeSSagi Grimberg 
18083f2304f8SSagi Grimberg 	return 0;
18093f2304f8SSagi Grimberg 
1810e5c01f4fSSagi Grimberg out_wait_freeze_timed_out:
1811e5c01f4fSSagi Grimberg 	nvme_stop_queues(ctrl);
1812e5c01f4fSSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
18133f2304f8SSagi Grimberg out_cleanup_connect_q:
1814e85037a2SSagi Grimberg 	if (new)
18153f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->connect_q);
18163f2304f8SSagi Grimberg out_free_tag_set:
18173f2304f8SSagi Grimberg 	if (new)
18183f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->tagset);
18193f2304f8SSagi Grimberg out_free_io_queues:
18203f2304f8SSagi Grimberg 	nvme_tcp_free_io_queues(ctrl);
18213f2304f8SSagi Grimberg 	return ret;
18223f2304f8SSagi Grimberg }
18233f2304f8SSagi Grimberg 
18243f2304f8SSagi Grimberg static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
18253f2304f8SSagi Grimberg {
18263f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
18273f2304f8SSagi Grimberg 	if (remove) {
18283f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->admin_q);
1829e7832cb4SSagi Grimberg 		blk_cleanup_queue(ctrl->fabrics_q);
18303f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->admin_tagset);
18313f2304f8SSagi Grimberg 	}
18323f2304f8SSagi Grimberg 	nvme_tcp_free_admin_queue(ctrl);
18333f2304f8SSagi Grimberg }
18343f2304f8SSagi Grimberg 
18353f2304f8SSagi Grimberg static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
18363f2304f8SSagi Grimberg {
18373f2304f8SSagi Grimberg 	int error;
18383f2304f8SSagi Grimberg 
18393f2304f8SSagi Grimberg 	error = nvme_tcp_alloc_admin_queue(ctrl);
18403f2304f8SSagi Grimberg 	if (error)
18413f2304f8SSagi Grimberg 		return error;
18423f2304f8SSagi Grimberg 
18433f2304f8SSagi Grimberg 	if (new) {
18443f2304f8SSagi Grimberg 		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
18453f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->admin_tagset)) {
18463f2304f8SSagi Grimberg 			error = PTR_ERR(ctrl->admin_tagset);
18473f2304f8SSagi Grimberg 			goto out_free_queue;
18483f2304f8SSagi Grimberg 		}
18493f2304f8SSagi Grimberg 
1850e7832cb4SSagi Grimberg 		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1851e7832cb4SSagi Grimberg 		if (IS_ERR(ctrl->fabrics_q)) {
1852e7832cb4SSagi Grimberg 			error = PTR_ERR(ctrl->fabrics_q);
1853e7832cb4SSagi Grimberg 			goto out_free_tagset;
1854e7832cb4SSagi Grimberg 		}
1855e7832cb4SSagi Grimberg 
18563f2304f8SSagi Grimberg 		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
18573f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->admin_q)) {
18583f2304f8SSagi Grimberg 			error = PTR_ERR(ctrl->admin_q);
1859e7832cb4SSagi Grimberg 			goto out_cleanup_fabrics_q;
18603f2304f8SSagi Grimberg 		}
18613f2304f8SSagi Grimberg 	}
18623f2304f8SSagi Grimberg 
18633f2304f8SSagi Grimberg 	error = nvme_tcp_start_queue(ctrl, 0);
18643f2304f8SSagi Grimberg 	if (error)
18653f2304f8SSagi Grimberg 		goto out_cleanup_queue;
18663f2304f8SSagi Grimberg 
1867c0f2f45bSSagi Grimberg 	error = nvme_enable_ctrl(ctrl);
18683f2304f8SSagi Grimberg 	if (error)
18693f2304f8SSagi Grimberg 		goto out_stop_queue;
18703f2304f8SSagi Grimberg 
1871e7832cb4SSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->admin_q);
1872e7832cb4SSagi Grimberg 
18733f2304f8SSagi Grimberg 	error = nvme_init_identify(ctrl);
18743f2304f8SSagi Grimberg 	if (error)
18753f2304f8SSagi Grimberg 		goto out_stop_queue;
18763f2304f8SSagi Grimberg 
18773f2304f8SSagi Grimberg 	return 0;
18783f2304f8SSagi Grimberg 
18793f2304f8SSagi Grimberg out_stop_queue:
18803f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
18813f2304f8SSagi Grimberg out_cleanup_queue:
18823f2304f8SSagi Grimberg 	if (new)
18833f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->admin_q);
1884e7832cb4SSagi Grimberg out_cleanup_fabrics_q:
1885e7832cb4SSagi Grimberg 	if (new)
1886e7832cb4SSagi Grimberg 		blk_cleanup_queue(ctrl->fabrics_q);
18873f2304f8SSagi Grimberg out_free_tagset:
18883f2304f8SSagi Grimberg 	if (new)
18893f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->admin_tagset);
18903f2304f8SSagi Grimberg out_free_queue:
18913f2304f8SSagi Grimberg 	nvme_tcp_free_admin_queue(ctrl);
18923f2304f8SSagi Grimberg 	return error;
18933f2304f8SSagi Grimberg }
18943f2304f8SSagi Grimberg 
18953f2304f8SSagi Grimberg static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
18963f2304f8SSagi Grimberg 		bool remove)
18973f2304f8SSagi Grimberg {
18983f2304f8SSagi Grimberg 	blk_mq_quiesce_queue(ctrl->admin_q);
1899d6f66210SChao Leng 	blk_sync_queue(ctrl->admin_q);
19003f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
1901622b8b68SMing Lei 	if (ctrl->admin_tagset) {
19027a425896SSagi Grimberg 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
19037a425896SSagi Grimberg 			nvme_cancel_request, ctrl);
1904622b8b68SMing Lei 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1905622b8b68SMing Lei 	}
1906e7832cb4SSagi Grimberg 	if (remove)
19073f2304f8SSagi Grimberg 		blk_mq_unquiesce_queue(ctrl->admin_q);
19083f2304f8SSagi Grimberg 	nvme_tcp_destroy_admin_queue(ctrl, remove);
19093f2304f8SSagi Grimberg }
19103f2304f8SSagi Grimberg 
19113f2304f8SSagi Grimberg static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
19123f2304f8SSagi Grimberg 		bool remove)
19133f2304f8SSagi Grimberg {
19143f2304f8SSagi Grimberg 	if (ctrl->queue_count <= 1)
1915d6f66210SChao Leng 		return;
1916d4d61470SSagi Grimberg 	blk_mq_quiesce_queue(ctrl->admin_q);
19172875b0aeSSagi Grimberg 	nvme_start_freeze(ctrl);
19183f2304f8SSagi Grimberg 	nvme_stop_queues(ctrl);
1919d6f66210SChao Leng 	nvme_sync_io_queues(ctrl);
19203f2304f8SSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
1921622b8b68SMing Lei 	if (ctrl->tagset) {
19227a425896SSagi Grimberg 		blk_mq_tagset_busy_iter(ctrl->tagset,
19237a425896SSagi Grimberg 			nvme_cancel_request, ctrl);
1924622b8b68SMing Lei 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
1925622b8b68SMing Lei 	}
19263f2304f8SSagi Grimberg 	if (remove)
19273f2304f8SSagi Grimberg 		nvme_start_queues(ctrl);
19283f2304f8SSagi Grimberg 	nvme_tcp_destroy_io_queues(ctrl, remove);
19293f2304f8SSagi Grimberg }
19303f2304f8SSagi Grimberg 
19313f2304f8SSagi Grimberg static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
19323f2304f8SSagi Grimberg {
19333f2304f8SSagi Grimberg 	/* If we are resetting/deleting then do nothing */
19343f2304f8SSagi Grimberg 	if (ctrl->state != NVME_CTRL_CONNECTING) {
19353f2304f8SSagi Grimberg 		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
19363f2304f8SSagi Grimberg 			ctrl->state == NVME_CTRL_LIVE);
19373f2304f8SSagi Grimberg 		return;
19383f2304f8SSagi Grimberg 	}
19393f2304f8SSagi Grimberg 
19403f2304f8SSagi Grimberg 	if (nvmf_should_reconnect(ctrl)) {
19413f2304f8SSagi Grimberg 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
19423f2304f8SSagi Grimberg 			ctrl->opts->reconnect_delay);
19433f2304f8SSagi Grimberg 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
19443f2304f8SSagi Grimberg 				ctrl->opts->reconnect_delay * HZ);
19453f2304f8SSagi Grimberg 	} else {
19463f2304f8SSagi Grimberg 		dev_info(ctrl->device, "Removing controller...\n");
19473f2304f8SSagi Grimberg 		nvme_delete_ctrl(ctrl);
19483f2304f8SSagi Grimberg 	}
19493f2304f8SSagi Grimberg }
19503f2304f8SSagi Grimberg 
19513f2304f8SSagi Grimberg static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
19523f2304f8SSagi Grimberg {
19533f2304f8SSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->opts;
1954312910f4SColin Ian King 	int ret;
19553f2304f8SSagi Grimberg 
19563f2304f8SSagi Grimberg 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
19573f2304f8SSagi Grimberg 	if (ret)
19583f2304f8SSagi Grimberg 		return ret;
19593f2304f8SSagi Grimberg 
19603f2304f8SSagi Grimberg 	if (ctrl->icdoff) {
19613f2304f8SSagi Grimberg 		dev_err(ctrl->device, "icdoff is not supported!\n");
19623f2304f8SSagi Grimberg 		goto destroy_admin;
19633f2304f8SSagi Grimberg 	}
19643f2304f8SSagi Grimberg 
19653f2304f8SSagi Grimberg 	if (opts->queue_size > ctrl->sqsize + 1)
19663f2304f8SSagi Grimberg 		dev_warn(ctrl->device,
19673f2304f8SSagi Grimberg 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
19683f2304f8SSagi Grimberg 			opts->queue_size, ctrl->sqsize + 1);
19693f2304f8SSagi Grimberg 
19703f2304f8SSagi Grimberg 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
19713f2304f8SSagi Grimberg 		dev_warn(ctrl->device,
19723f2304f8SSagi Grimberg 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
19733f2304f8SSagi Grimberg 			ctrl->sqsize + 1, ctrl->maxcmd);
19743f2304f8SSagi Grimberg 		ctrl->sqsize = ctrl->maxcmd - 1;
19753f2304f8SSagi Grimberg 	}
19763f2304f8SSagi Grimberg 
19773f2304f8SSagi Grimberg 	if (ctrl->queue_count > 1) {
19783f2304f8SSagi Grimberg 		ret = nvme_tcp_configure_io_queues(ctrl, new);
19793f2304f8SSagi Grimberg 		if (ret)
19803f2304f8SSagi Grimberg 			goto destroy_admin;
19813f2304f8SSagi Grimberg 	}
19823f2304f8SSagi Grimberg 
19833f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1984bea54ef5SIsrael Rukshin 		/*
1985ecca390eSSagi Grimberg 		 * state change failure is ok if we started ctrl delete,
1986bea54ef5SIsrael Rukshin 		 * unless we're during creation of a new controller to
1987bea54ef5SIsrael Rukshin 		 * avoid races with teardown flow.
1988bea54ef5SIsrael Rukshin 		 */
1989ecca390eSSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
1990ecca390eSSagi Grimberg 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
1991bea54ef5SIsrael Rukshin 		WARN_ON_ONCE(new);
19923f2304f8SSagi Grimberg 		ret = -EINVAL;
19933f2304f8SSagi Grimberg 		goto destroy_io;
19943f2304f8SSagi Grimberg 	}
19953f2304f8SSagi Grimberg 
19963f2304f8SSagi Grimberg 	nvme_start_ctrl(ctrl);
19973f2304f8SSagi Grimberg 	return 0;
19983f2304f8SSagi Grimberg 
19993f2304f8SSagi Grimberg destroy_io:
20003f2304f8SSagi Grimberg 	if (ctrl->queue_count > 1)
20013f2304f8SSagi Grimberg 		nvme_tcp_destroy_io_queues(ctrl, new);
20023f2304f8SSagi Grimberg destroy_admin:
20033f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
20043f2304f8SSagi Grimberg 	nvme_tcp_destroy_admin_queue(ctrl, new);
20053f2304f8SSagi Grimberg 	return ret;
20063f2304f8SSagi Grimberg }
20073f2304f8SSagi Grimberg 
20083f2304f8SSagi Grimberg static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
20093f2304f8SSagi Grimberg {
20103f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
20113f2304f8SSagi Grimberg 			struct nvme_tcp_ctrl, connect_work);
20123f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
20133f2304f8SSagi Grimberg 
20143f2304f8SSagi Grimberg 	++ctrl->nr_reconnects;
20153f2304f8SSagi Grimberg 
20163f2304f8SSagi Grimberg 	if (nvme_tcp_setup_ctrl(ctrl, false))
20173f2304f8SSagi Grimberg 		goto requeue;
20183f2304f8SSagi Grimberg 
201956a77d26SColin Ian King 	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
20203f2304f8SSagi Grimberg 			ctrl->nr_reconnects);
20213f2304f8SSagi Grimberg 
20223f2304f8SSagi Grimberg 	ctrl->nr_reconnects = 0;
20233f2304f8SSagi Grimberg 
20243f2304f8SSagi Grimberg 	return;
20253f2304f8SSagi Grimberg 
20263f2304f8SSagi Grimberg requeue:
20273f2304f8SSagi Grimberg 	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
20283f2304f8SSagi Grimberg 			ctrl->nr_reconnects);
20293f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
20303f2304f8SSagi Grimberg }
20313f2304f8SSagi Grimberg 
20323f2304f8SSagi Grimberg static void nvme_tcp_error_recovery_work(struct work_struct *work)
20333f2304f8SSagi Grimberg {
20343f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
20353f2304f8SSagi Grimberg 				struct nvme_tcp_ctrl, err_work);
20363f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
20373f2304f8SSagi Grimberg 
20383f2304f8SSagi Grimberg 	nvme_stop_keep_alive(ctrl);
20393f2304f8SSagi Grimberg 	nvme_tcp_teardown_io_queues(ctrl, false);
20403f2304f8SSagi Grimberg 	/* unquiesce to fail fast pending requests */
20413f2304f8SSagi Grimberg 	nvme_start_queues(ctrl);
20423f2304f8SSagi Grimberg 	nvme_tcp_teardown_admin_queue(ctrl, false);
2043e7832cb4SSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->admin_q);
20443f2304f8SSagi Grimberg 
20453f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2046ecca390eSSagi Grimberg 		/* state change failure is ok if we started ctrl delete */
2047ecca390eSSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2048ecca390eSSagi Grimberg 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
20493f2304f8SSagi Grimberg 		return;
20503f2304f8SSagi Grimberg 	}
20513f2304f8SSagi Grimberg 
20523f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
20533f2304f8SSagi Grimberg }
20543f2304f8SSagi Grimberg 
20553f2304f8SSagi Grimberg static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
20563f2304f8SSagi Grimberg {
2057794a4cb3SSagi Grimberg 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2058794a4cb3SSagi Grimberg 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2059794a4cb3SSagi Grimberg 
20603f2304f8SSagi Grimberg 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
2061e7832cb4SSagi Grimberg 	blk_mq_quiesce_queue(ctrl->admin_q);
20623f2304f8SSagi Grimberg 	if (shutdown)
20633f2304f8SSagi Grimberg 		nvme_shutdown_ctrl(ctrl);
20643f2304f8SSagi Grimberg 	else
2065b5b05048SSagi Grimberg 		nvme_disable_ctrl(ctrl);
20663f2304f8SSagi Grimberg 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
20673f2304f8SSagi Grimberg }
20683f2304f8SSagi Grimberg 
20693f2304f8SSagi Grimberg static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
20703f2304f8SSagi Grimberg {
20713f2304f8SSagi Grimberg 	nvme_tcp_teardown_ctrl(ctrl, true);
20723f2304f8SSagi Grimberg }
20733f2304f8SSagi Grimberg 
20743f2304f8SSagi Grimberg static void nvme_reset_ctrl_work(struct work_struct *work)
20753f2304f8SSagi Grimberg {
20763f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl =
20773f2304f8SSagi Grimberg 		container_of(work, struct nvme_ctrl, reset_work);
20783f2304f8SSagi Grimberg 
20793f2304f8SSagi Grimberg 	nvme_stop_ctrl(ctrl);
20803f2304f8SSagi Grimberg 	nvme_tcp_teardown_ctrl(ctrl, false);
20813f2304f8SSagi Grimberg 
20823f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2083ecca390eSSagi Grimberg 		/* state change failure is ok if we started ctrl delete */
2084ecca390eSSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2085ecca390eSSagi Grimberg 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
20863f2304f8SSagi Grimberg 		return;
20873f2304f8SSagi Grimberg 	}
20883f2304f8SSagi Grimberg 
20893f2304f8SSagi Grimberg 	if (nvme_tcp_setup_ctrl(ctrl, false))
20903f2304f8SSagi Grimberg 		goto out_fail;
20913f2304f8SSagi Grimberg 
20923f2304f8SSagi Grimberg 	return;
20933f2304f8SSagi Grimberg 
20943f2304f8SSagi Grimberg out_fail:
20953f2304f8SSagi Grimberg 	++ctrl->nr_reconnects;
20963f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
20973f2304f8SSagi Grimberg }
20983f2304f8SSagi Grimberg 
20993f2304f8SSagi Grimberg static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
21003f2304f8SSagi Grimberg {
21013f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
21023f2304f8SSagi Grimberg 
21033f2304f8SSagi Grimberg 	if (list_empty(&ctrl->list))
21043f2304f8SSagi Grimberg 		goto free_ctrl;
21053f2304f8SSagi Grimberg 
21063f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
21073f2304f8SSagi Grimberg 	list_del(&ctrl->list);
21083f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
21093f2304f8SSagi Grimberg 
21103f2304f8SSagi Grimberg 	nvmf_free_options(nctrl->opts);
21113f2304f8SSagi Grimberg free_ctrl:
21123f2304f8SSagi Grimberg 	kfree(ctrl->queues);
21133f2304f8SSagi Grimberg 	kfree(ctrl);
21143f2304f8SSagi Grimberg }
21153f2304f8SSagi Grimberg 
21163f2304f8SSagi Grimberg static void nvme_tcp_set_sg_null(struct nvme_command *c)
21173f2304f8SSagi Grimberg {
21183f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
21193f2304f8SSagi Grimberg 
21203f2304f8SSagi Grimberg 	sg->addr = 0;
21213f2304f8SSagi Grimberg 	sg->length = 0;
21223f2304f8SSagi Grimberg 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
21233f2304f8SSagi Grimberg 			NVME_SGL_FMT_TRANSPORT_A;
21243f2304f8SSagi Grimberg }
21253f2304f8SSagi Grimberg 
21263f2304f8SSagi Grimberg static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
21273f2304f8SSagi Grimberg 		struct nvme_command *c, u32 data_len)
21283f2304f8SSagi Grimberg {
21293f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
21303f2304f8SSagi Grimberg 
21313f2304f8SSagi Grimberg 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
21323f2304f8SSagi Grimberg 	sg->length = cpu_to_le32(data_len);
21333f2304f8SSagi Grimberg 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
21343f2304f8SSagi Grimberg }
21353f2304f8SSagi Grimberg 
21363f2304f8SSagi Grimberg static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
21373f2304f8SSagi Grimberg 		u32 data_len)
21383f2304f8SSagi Grimberg {
21393f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
21403f2304f8SSagi Grimberg 
21413f2304f8SSagi Grimberg 	sg->addr = 0;
21423f2304f8SSagi Grimberg 	sg->length = cpu_to_le32(data_len);
21433f2304f8SSagi Grimberg 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
21443f2304f8SSagi Grimberg 			NVME_SGL_FMT_TRANSPORT_A;
21453f2304f8SSagi Grimberg }
21463f2304f8SSagi Grimberg 
21473f2304f8SSagi Grimberg static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
21483f2304f8SSagi Grimberg {
21493f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
21503f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
21513f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
21523f2304f8SSagi Grimberg 	struct nvme_command *cmd = &pdu->cmd;
21533f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
21543f2304f8SSagi Grimberg 
21553f2304f8SSagi Grimberg 	memset(pdu, 0, sizeof(*pdu));
21563f2304f8SSagi Grimberg 	pdu->hdr.type = nvme_tcp_cmd;
21573f2304f8SSagi Grimberg 	if (queue->hdr_digest)
21583f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
21593f2304f8SSagi Grimberg 	pdu->hdr.hlen = sizeof(*pdu);
21603f2304f8SSagi Grimberg 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
21613f2304f8SSagi Grimberg 
21623f2304f8SSagi Grimberg 	cmd->common.opcode = nvme_admin_async_event;
21633f2304f8SSagi Grimberg 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
21643f2304f8SSagi Grimberg 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
21653f2304f8SSagi Grimberg 	nvme_tcp_set_sg_null(cmd);
21663f2304f8SSagi Grimberg 
21673f2304f8SSagi Grimberg 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
21683f2304f8SSagi Grimberg 	ctrl->async_req.offset = 0;
21693f2304f8SSagi Grimberg 	ctrl->async_req.curr_bio = NULL;
21703f2304f8SSagi Grimberg 	ctrl->async_req.data_len = 0;
21713f2304f8SSagi Grimberg 
217286f0348aSSagi Grimberg 	nvme_tcp_queue_request(&ctrl->async_req, true, true);
21733f2304f8SSagi Grimberg }
21743f2304f8SSagi Grimberg 
2175236187c4SSagi Grimberg static void nvme_tcp_complete_timed_out(struct request *rq)
2176236187c4SSagi Grimberg {
2177236187c4SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2178236187c4SSagi Grimberg 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2179236187c4SSagi Grimberg 
2180236187c4SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
21810a8a2c85SSagi Grimberg 	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2182236187c4SSagi Grimberg 		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2183236187c4SSagi Grimberg 		blk_mq_complete_request(rq);
2184236187c4SSagi Grimberg 	}
2185236187c4SSagi Grimberg }
2186236187c4SSagi Grimberg 
21873f2304f8SSagi Grimberg static enum blk_eh_timer_return
21883f2304f8SSagi Grimberg nvme_tcp_timeout(struct request *rq, bool reserved)
21893f2304f8SSagi Grimberg {
21903f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2191236187c4SSagi Grimberg 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
21923f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
21933f2304f8SSagi Grimberg 
2194236187c4SSagi Grimberg 	dev_warn(ctrl->device,
21953f2304f8SSagi Grimberg 		"queue %d: timeout request %#x type %d\n",
219639d57757SSagi Grimberg 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
21973f2304f8SSagi Grimberg 
2198236187c4SSagi Grimberg 	if (ctrl->state != NVME_CTRL_LIVE) {
219939d57757SSagi Grimberg 		/*
2200236187c4SSagi Grimberg 		 * If we are resetting, connecting or deleting we should
2201236187c4SSagi Grimberg 		 * complete immediately because we may block controller
2202236187c4SSagi Grimberg 		 * teardown or setup sequence
2203236187c4SSagi Grimberg 		 * - ctrl disable/shutdown fabrics requests
2204236187c4SSagi Grimberg 		 * - connect requests
2205236187c4SSagi Grimberg 		 * - initialization admin requests
2206236187c4SSagi Grimberg 		 * - I/O requests that entered after unquiescing and
2207236187c4SSagi Grimberg 		 *   the controller stopped responding
2208236187c4SSagi Grimberg 		 *
2209236187c4SSagi Grimberg 		 * All other requests should be cancelled by the error
2210236187c4SSagi Grimberg 		 * recovery work, so it's fine that we fail it here.
221139d57757SSagi Grimberg 		 */
2212236187c4SSagi Grimberg 		nvme_tcp_complete_timed_out(rq);
22133f2304f8SSagi Grimberg 		return BLK_EH_DONE;
22143f2304f8SSagi Grimberg 	}
22153f2304f8SSagi Grimberg 
2216236187c4SSagi Grimberg 	/*
2217236187c4SSagi Grimberg 	 * LIVE state should trigger the normal error recovery which will
2218236187c4SSagi Grimberg 	 * handle completing this request.
2219236187c4SSagi Grimberg 	 */
2220236187c4SSagi Grimberg 	nvme_tcp_error_recovery(ctrl);
22213f2304f8SSagi Grimberg 	return BLK_EH_RESET_TIMER;
22223f2304f8SSagi Grimberg }
22233f2304f8SSagi Grimberg 
22243f2304f8SSagi Grimberg static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
22253f2304f8SSagi Grimberg 			struct request *rq)
22263f2304f8SSagi Grimberg {
22273f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
22283f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
22293f2304f8SSagi Grimberg 	struct nvme_command *c = &pdu->cmd;
22303f2304f8SSagi Grimberg 
22313f2304f8SSagi Grimberg 	c->common.flags |= NVME_CMD_SGL_METABUF;
22323f2304f8SSagi Grimberg 
223325e5cb78SSagi Grimberg 	if (!blk_rq_nr_phys_segments(rq))
223425e5cb78SSagi Grimberg 		nvme_tcp_set_sg_null(c);
223525e5cb78SSagi Grimberg 	else if (rq_data_dir(rq) == WRITE &&
22363f2304f8SSagi Grimberg 	    req->data_len <= nvme_tcp_inline_data_size(queue))
22373f2304f8SSagi Grimberg 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
22383f2304f8SSagi Grimberg 	else
22393f2304f8SSagi Grimberg 		nvme_tcp_set_sg_host_data(c, req->data_len);
22403f2304f8SSagi Grimberg 
22413f2304f8SSagi Grimberg 	return 0;
22423f2304f8SSagi Grimberg }
22433f2304f8SSagi Grimberg 
22443f2304f8SSagi Grimberg static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
22453f2304f8SSagi Grimberg 		struct request *rq)
22463f2304f8SSagi Grimberg {
22473f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
22483f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
22493f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
22503f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
22513f2304f8SSagi Grimberg 	blk_status_t ret;
22523f2304f8SSagi Grimberg 
22533f2304f8SSagi Grimberg 	ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
22543f2304f8SSagi Grimberg 	if (ret)
22553f2304f8SSagi Grimberg 		return ret;
22563f2304f8SSagi Grimberg 
22573f2304f8SSagi Grimberg 	req->state = NVME_TCP_SEND_CMD_PDU;
22583f2304f8SSagi Grimberg 	req->offset = 0;
22593f2304f8SSagi Grimberg 	req->data_sent = 0;
22603f2304f8SSagi Grimberg 	req->pdu_len = 0;
22613f2304f8SSagi Grimberg 	req->pdu_sent = 0;
226225e5cb78SSagi Grimberg 	req->data_len = blk_rq_nr_phys_segments(rq) ?
226325e5cb78SSagi Grimberg 				blk_rq_payload_bytes(rq) : 0;
22643f2304f8SSagi Grimberg 	req->curr_bio = rq->bio;
22653f2304f8SSagi Grimberg 
22663f2304f8SSagi Grimberg 	if (rq_data_dir(rq) == WRITE &&
22673f2304f8SSagi Grimberg 	    req->data_len <= nvme_tcp_inline_data_size(queue))
22683f2304f8SSagi Grimberg 		req->pdu_len = req->data_len;
22693f2304f8SSagi Grimberg 	else if (req->curr_bio)
22703f2304f8SSagi Grimberg 		nvme_tcp_init_iter(req, READ);
22713f2304f8SSagi Grimberg 
22723f2304f8SSagi Grimberg 	pdu->hdr.type = nvme_tcp_cmd;
22733f2304f8SSagi Grimberg 	pdu->hdr.flags = 0;
22743f2304f8SSagi Grimberg 	if (queue->hdr_digest)
22753f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
22763f2304f8SSagi Grimberg 	if (queue->data_digest && req->pdu_len) {
22773f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
22783f2304f8SSagi Grimberg 		ddgst = nvme_tcp_ddgst_len(queue);
22793f2304f8SSagi Grimberg 	}
22803f2304f8SSagi Grimberg 	pdu->hdr.hlen = sizeof(*pdu);
22813f2304f8SSagi Grimberg 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
22823f2304f8SSagi Grimberg 	pdu->hdr.plen =
22833f2304f8SSagi Grimberg 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
22843f2304f8SSagi Grimberg 
22853f2304f8SSagi Grimberg 	ret = nvme_tcp_map_data(queue, rq);
22863f2304f8SSagi Grimberg 	if (unlikely(ret)) {
228728a4cac4SMax Gurtovoy 		nvme_cleanup_cmd(rq);
22883f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
22893f2304f8SSagi Grimberg 			"Failed to map data (%d)\n", ret);
22903f2304f8SSagi Grimberg 		return ret;
22913f2304f8SSagi Grimberg 	}
22923f2304f8SSagi Grimberg 
22933f2304f8SSagi Grimberg 	return 0;
22943f2304f8SSagi Grimberg }
22953f2304f8SSagi Grimberg 
229686f0348aSSagi Grimberg static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
229786f0348aSSagi Grimberg {
229886f0348aSSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
229986f0348aSSagi Grimberg 
230086f0348aSSagi Grimberg 	if (!llist_empty(&queue->req_list))
230186f0348aSSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
230286f0348aSSagi Grimberg }
230386f0348aSSagi Grimberg 
23043f2304f8SSagi Grimberg static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
23053f2304f8SSagi Grimberg 		const struct blk_mq_queue_data *bd)
23063f2304f8SSagi Grimberg {
23073f2304f8SSagi Grimberg 	struct nvme_ns *ns = hctx->queue->queuedata;
23083f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
23093f2304f8SSagi Grimberg 	struct request *rq = bd->rq;
23103f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
23113f2304f8SSagi Grimberg 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
23123f2304f8SSagi Grimberg 	blk_status_t ret;
23133f2304f8SSagi Grimberg 
23143f2304f8SSagi Grimberg 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
23153f2304f8SSagi Grimberg 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
23163f2304f8SSagi Grimberg 
23173f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
23183f2304f8SSagi Grimberg 	if (unlikely(ret))
23193f2304f8SSagi Grimberg 		return ret;
23203f2304f8SSagi Grimberg 
23213f2304f8SSagi Grimberg 	blk_mq_start_request(rq);
23223f2304f8SSagi Grimberg 
232386f0348aSSagi Grimberg 	nvme_tcp_queue_request(req, true, bd->last);
23243f2304f8SSagi Grimberg 
23253f2304f8SSagi Grimberg 	return BLK_STS_OK;
23263f2304f8SSagi Grimberg }
23273f2304f8SSagi Grimberg 
2328873946f4SSagi Grimberg static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2329873946f4SSagi Grimberg {
2330873946f4SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
233164861993SSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2332873946f4SSagi Grimberg 
233364861993SSagi Grimberg 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2334873946f4SSagi Grimberg 		/* separate read/write queues */
2335873946f4SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
233664861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
233764861993SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
233864861993SSagi Grimberg 		set->map[HCTX_TYPE_READ].nr_queues =
233964861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_READ];
2340873946f4SSagi Grimberg 		set->map[HCTX_TYPE_READ].queue_offset =
234164861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2342873946f4SSagi Grimberg 	} else {
234364861993SSagi Grimberg 		/* shared read/write queues */
2344873946f4SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
234564861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
234664861993SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
234764861993SSagi Grimberg 		set->map[HCTX_TYPE_READ].nr_queues =
234864861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2349873946f4SSagi Grimberg 		set->map[HCTX_TYPE_READ].queue_offset = 0;
2350873946f4SSagi Grimberg 	}
2351873946f4SSagi Grimberg 	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2352873946f4SSagi Grimberg 	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
235364861993SSagi Grimberg 
23541a9460ceSSagi Grimberg 	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
23551a9460ceSSagi Grimberg 		/* map dedicated poll queues only if we have queues left */
23561a9460ceSSagi Grimberg 		set->map[HCTX_TYPE_POLL].nr_queues =
23571a9460ceSSagi Grimberg 				ctrl->io_queues[HCTX_TYPE_POLL];
23581a9460ceSSagi Grimberg 		set->map[HCTX_TYPE_POLL].queue_offset =
23591a9460ceSSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
23601a9460ceSSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_READ];
23611a9460ceSSagi Grimberg 		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
23621a9460ceSSagi Grimberg 	}
23631a9460ceSSagi Grimberg 
236464861993SSagi Grimberg 	dev_info(ctrl->ctrl.device,
23651a9460ceSSagi Grimberg 		"mapped %d/%d/%d default/read/poll queues.\n",
236664861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT],
23671a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_READ],
23681a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_POLL]);
236964861993SSagi Grimberg 
2370873946f4SSagi Grimberg 	return 0;
2371873946f4SSagi Grimberg }
2372873946f4SSagi Grimberg 
23731a9460ceSSagi Grimberg static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
23741a9460ceSSagi Grimberg {
23751a9460ceSSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
23761a9460ceSSagi Grimberg 	struct sock *sk = queue->sock->sk;
23771a9460ceSSagi Grimberg 
2378f86e5bf8SSagi Grimberg 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2379f86e5bf8SSagi Grimberg 		return 0;
2380f86e5bf8SSagi Grimberg 
238172e5d757SSagi Grimberg 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
23823f926af3SEric Dumazet 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
23831a9460ceSSagi Grimberg 		sk_busy_loop(sk, true);
23841a9460ceSSagi Grimberg 	nvme_tcp_try_recv(queue);
238572e5d757SSagi Grimberg 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
23861a9460ceSSagi Grimberg 	return queue->nr_cqe;
23871a9460ceSSagi Grimberg }
23881a9460ceSSagi Grimberg 
23896acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops = {
23903f2304f8SSagi Grimberg 	.queue_rq	= nvme_tcp_queue_rq,
239186f0348aSSagi Grimberg 	.commit_rqs	= nvme_tcp_commit_rqs,
23923f2304f8SSagi Grimberg 	.complete	= nvme_complete_rq,
23933f2304f8SSagi Grimberg 	.init_request	= nvme_tcp_init_request,
23943f2304f8SSagi Grimberg 	.exit_request	= nvme_tcp_exit_request,
23953f2304f8SSagi Grimberg 	.init_hctx	= nvme_tcp_init_hctx,
23963f2304f8SSagi Grimberg 	.timeout	= nvme_tcp_timeout,
2397873946f4SSagi Grimberg 	.map_queues	= nvme_tcp_map_queues,
23981a9460ceSSagi Grimberg 	.poll		= nvme_tcp_poll,
23993f2304f8SSagi Grimberg };
24003f2304f8SSagi Grimberg 
24016acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
24023f2304f8SSagi Grimberg 	.queue_rq	= nvme_tcp_queue_rq,
24033f2304f8SSagi Grimberg 	.complete	= nvme_complete_rq,
24043f2304f8SSagi Grimberg 	.init_request	= nvme_tcp_init_request,
24053f2304f8SSagi Grimberg 	.exit_request	= nvme_tcp_exit_request,
24063f2304f8SSagi Grimberg 	.init_hctx	= nvme_tcp_init_admin_hctx,
24073f2304f8SSagi Grimberg 	.timeout	= nvme_tcp_timeout,
24083f2304f8SSagi Grimberg };
24093f2304f8SSagi Grimberg 
24103f2304f8SSagi Grimberg static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
24113f2304f8SSagi Grimberg 	.name			= "tcp",
24123f2304f8SSagi Grimberg 	.module			= THIS_MODULE,
24133f2304f8SSagi Grimberg 	.flags			= NVME_F_FABRICS,
24143f2304f8SSagi Grimberg 	.reg_read32		= nvmf_reg_read32,
24153f2304f8SSagi Grimberg 	.reg_read64		= nvmf_reg_read64,
24163f2304f8SSagi Grimberg 	.reg_write32		= nvmf_reg_write32,
24173f2304f8SSagi Grimberg 	.free_ctrl		= nvme_tcp_free_ctrl,
24183f2304f8SSagi Grimberg 	.submit_async_event	= nvme_tcp_submit_async_event,
24193f2304f8SSagi Grimberg 	.delete_ctrl		= nvme_tcp_delete_ctrl,
24203f2304f8SSagi Grimberg 	.get_address		= nvmf_get_address,
24213f2304f8SSagi Grimberg };
24223f2304f8SSagi Grimberg 
24233f2304f8SSagi Grimberg static bool
24243f2304f8SSagi Grimberg nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
24253f2304f8SSagi Grimberg {
24263f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
24273f2304f8SSagi Grimberg 	bool found = false;
24283f2304f8SSagi Grimberg 
24293f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
24303f2304f8SSagi Grimberg 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
24313f2304f8SSagi Grimberg 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
24323f2304f8SSagi Grimberg 		if (found)
24333f2304f8SSagi Grimberg 			break;
24343f2304f8SSagi Grimberg 	}
24353f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
24363f2304f8SSagi Grimberg 
24373f2304f8SSagi Grimberg 	return found;
24383f2304f8SSagi Grimberg }
24393f2304f8SSagi Grimberg 
24403f2304f8SSagi Grimberg static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
24413f2304f8SSagi Grimberg 		struct nvmf_ctrl_options *opts)
24423f2304f8SSagi Grimberg {
24433f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
24443f2304f8SSagi Grimberg 	int ret;
24453f2304f8SSagi Grimberg 
24463f2304f8SSagi Grimberg 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
24473f2304f8SSagi Grimberg 	if (!ctrl)
24483f2304f8SSagi Grimberg 		return ERR_PTR(-ENOMEM);
24493f2304f8SSagi Grimberg 
24503f2304f8SSagi Grimberg 	INIT_LIST_HEAD(&ctrl->list);
24513f2304f8SSagi Grimberg 	ctrl->ctrl.opts = opts;
24521a9460ceSSagi Grimberg 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
24531a9460ceSSagi Grimberg 				opts->nr_poll_queues + 1;
24543f2304f8SSagi Grimberg 	ctrl->ctrl.sqsize = opts->queue_size - 1;
24553f2304f8SSagi Grimberg 	ctrl->ctrl.kato = opts->kato;
24563f2304f8SSagi Grimberg 
24573f2304f8SSagi Grimberg 	INIT_DELAYED_WORK(&ctrl->connect_work,
24583f2304f8SSagi Grimberg 			nvme_tcp_reconnect_ctrl_work);
24593f2304f8SSagi Grimberg 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
24603f2304f8SSagi Grimberg 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
24613f2304f8SSagi Grimberg 
24623f2304f8SSagi Grimberg 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
24633f2304f8SSagi Grimberg 		opts->trsvcid =
24643f2304f8SSagi Grimberg 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
24653f2304f8SSagi Grimberg 		if (!opts->trsvcid) {
24663f2304f8SSagi Grimberg 			ret = -ENOMEM;
24673f2304f8SSagi Grimberg 			goto out_free_ctrl;
24683f2304f8SSagi Grimberg 		}
24693f2304f8SSagi Grimberg 		opts->mask |= NVMF_OPT_TRSVCID;
24703f2304f8SSagi Grimberg 	}
24713f2304f8SSagi Grimberg 
24723f2304f8SSagi Grimberg 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
24733f2304f8SSagi Grimberg 			opts->traddr, opts->trsvcid, &ctrl->addr);
24743f2304f8SSagi Grimberg 	if (ret) {
24753f2304f8SSagi Grimberg 		pr_err("malformed address passed: %s:%s\n",
24763f2304f8SSagi Grimberg 			opts->traddr, opts->trsvcid);
24773f2304f8SSagi Grimberg 		goto out_free_ctrl;
24783f2304f8SSagi Grimberg 	}
24793f2304f8SSagi Grimberg 
24803f2304f8SSagi Grimberg 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
24813f2304f8SSagi Grimberg 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
24823f2304f8SSagi Grimberg 			opts->host_traddr, NULL, &ctrl->src_addr);
24833f2304f8SSagi Grimberg 		if (ret) {
24843f2304f8SSagi Grimberg 			pr_err("malformed src address passed: %s\n",
24853f2304f8SSagi Grimberg 			       opts->host_traddr);
24863f2304f8SSagi Grimberg 			goto out_free_ctrl;
24873f2304f8SSagi Grimberg 		}
24883f2304f8SSagi Grimberg 	}
24893f2304f8SSagi Grimberg 
24903f2304f8SSagi Grimberg 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
24913f2304f8SSagi Grimberg 		ret = -EALREADY;
24923f2304f8SSagi Grimberg 		goto out_free_ctrl;
24933f2304f8SSagi Grimberg 	}
24943f2304f8SSagi Grimberg 
2495873946f4SSagi Grimberg 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
24963f2304f8SSagi Grimberg 				GFP_KERNEL);
24973f2304f8SSagi Grimberg 	if (!ctrl->queues) {
24983f2304f8SSagi Grimberg 		ret = -ENOMEM;
24993f2304f8SSagi Grimberg 		goto out_free_ctrl;
25003f2304f8SSagi Grimberg 	}
25013f2304f8SSagi Grimberg 
25023f2304f8SSagi Grimberg 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
25033f2304f8SSagi Grimberg 	if (ret)
25043f2304f8SSagi Grimberg 		goto out_kfree_queues;
25053f2304f8SSagi Grimberg 
25063f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
25073f2304f8SSagi Grimberg 		WARN_ON_ONCE(1);
25083f2304f8SSagi Grimberg 		ret = -EINTR;
25093f2304f8SSagi Grimberg 		goto out_uninit_ctrl;
25103f2304f8SSagi Grimberg 	}
25113f2304f8SSagi Grimberg 
25123f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
25133f2304f8SSagi Grimberg 	if (ret)
25143f2304f8SSagi Grimberg 		goto out_uninit_ctrl;
25153f2304f8SSagi Grimberg 
25163f2304f8SSagi Grimberg 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
25173f2304f8SSagi Grimberg 		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
25183f2304f8SSagi Grimberg 
25193f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
25203f2304f8SSagi Grimberg 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
25213f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
25223f2304f8SSagi Grimberg 
25233f2304f8SSagi Grimberg 	return &ctrl->ctrl;
25243f2304f8SSagi Grimberg 
25253f2304f8SSagi Grimberg out_uninit_ctrl:
25263f2304f8SSagi Grimberg 	nvme_uninit_ctrl(&ctrl->ctrl);
25273f2304f8SSagi Grimberg 	nvme_put_ctrl(&ctrl->ctrl);
25283f2304f8SSagi Grimberg 	if (ret > 0)
25293f2304f8SSagi Grimberg 		ret = -EIO;
25303f2304f8SSagi Grimberg 	return ERR_PTR(ret);
25313f2304f8SSagi Grimberg out_kfree_queues:
25323f2304f8SSagi Grimberg 	kfree(ctrl->queues);
25333f2304f8SSagi Grimberg out_free_ctrl:
25343f2304f8SSagi Grimberg 	kfree(ctrl);
25353f2304f8SSagi Grimberg 	return ERR_PTR(ret);
25363f2304f8SSagi Grimberg }
25373f2304f8SSagi Grimberg 
25383f2304f8SSagi Grimberg static struct nvmf_transport_ops nvme_tcp_transport = {
25393f2304f8SSagi Grimberg 	.name		= "tcp",
25403f2304f8SSagi Grimberg 	.module		= THIS_MODULE,
25413f2304f8SSagi Grimberg 	.required_opts	= NVMF_OPT_TRADDR,
25423f2304f8SSagi Grimberg 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
25433f2304f8SSagi Grimberg 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2544873946f4SSagi Grimberg 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2545bb13985dSIsrael Rukshin 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2546bb13985dSIsrael Rukshin 			  NVMF_OPT_TOS,
25473f2304f8SSagi Grimberg 	.create_ctrl	= nvme_tcp_create_ctrl,
25483f2304f8SSagi Grimberg };
25493f2304f8SSagi Grimberg 
25503f2304f8SSagi Grimberg static int __init nvme_tcp_init_module(void)
25513f2304f8SSagi Grimberg {
25523f2304f8SSagi Grimberg 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
25533f2304f8SSagi Grimberg 			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
25543f2304f8SSagi Grimberg 	if (!nvme_tcp_wq)
25553f2304f8SSagi Grimberg 		return -ENOMEM;
25563f2304f8SSagi Grimberg 
25573f2304f8SSagi Grimberg 	nvmf_register_transport(&nvme_tcp_transport);
25583f2304f8SSagi Grimberg 	return 0;
25593f2304f8SSagi Grimberg }
25603f2304f8SSagi Grimberg 
25613f2304f8SSagi Grimberg static void __exit nvme_tcp_cleanup_module(void)
25623f2304f8SSagi Grimberg {
25633f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
25643f2304f8SSagi Grimberg 
25653f2304f8SSagi Grimberg 	nvmf_unregister_transport(&nvme_tcp_transport);
25663f2304f8SSagi Grimberg 
25673f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
25683f2304f8SSagi Grimberg 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
25693f2304f8SSagi Grimberg 		nvme_delete_ctrl(&ctrl->ctrl);
25703f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
25713f2304f8SSagi Grimberg 	flush_workqueue(nvme_delete_wq);
25723f2304f8SSagi Grimberg 
25733f2304f8SSagi Grimberg 	destroy_workqueue(nvme_tcp_wq);
25743f2304f8SSagi Grimberg }
25753f2304f8SSagi Grimberg 
25763f2304f8SSagi Grimberg module_init(nvme_tcp_init_module);
25773f2304f8SSagi Grimberg module_exit(nvme_tcp_cleanup_module);
25783f2304f8SSagi Grimberg 
25793f2304f8SSagi Grimberg MODULE_LICENSE("GPL v2");
2580