xref: /openbmc/linux/drivers/nvme/host/tcp.c (revision adc99fd3)
13f2304f8SSagi Grimberg // SPDX-License-Identifier: GPL-2.0
23f2304f8SSagi Grimberg /*
33f2304f8SSagi Grimberg  * NVMe over Fabrics TCP host.
43f2304f8SSagi Grimberg  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
53f2304f8SSagi Grimberg  */
63f2304f8SSagi Grimberg #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73f2304f8SSagi Grimberg #include <linux/module.h>
83f2304f8SSagi Grimberg #include <linux/init.h>
93f2304f8SSagi Grimberg #include <linux/slab.h>
103f2304f8SSagi Grimberg #include <linux/err.h>
113f2304f8SSagi Grimberg #include <linux/nvme-tcp.h>
123f2304f8SSagi Grimberg #include <net/sock.h>
133f2304f8SSagi Grimberg #include <net/tcp.h>
143f2304f8SSagi Grimberg #include <linux/blk-mq.h>
153f2304f8SSagi Grimberg #include <crypto/hash.h>
161a9460ceSSagi Grimberg #include <net/busy_poll.h>
173f2304f8SSagi Grimberg 
183f2304f8SSagi Grimberg #include "nvme.h"
193f2304f8SSagi Grimberg #include "fabrics.h"
203f2304f8SSagi Grimberg 
213f2304f8SSagi Grimberg struct nvme_tcp_queue;
223f2304f8SSagi Grimberg 
239912ade3SWunderlich, Mark /* Define the socket priority to use for connections were it is desirable
249912ade3SWunderlich, Mark  * that the NIC consider performing optimized packet processing or filtering.
259912ade3SWunderlich, Mark  * A non-zero value being sufficient to indicate general consideration of any
269912ade3SWunderlich, Mark  * possible optimization.  Making it a module param allows for alternative
279912ade3SWunderlich, Mark  * values that may be unique for some NIC implementations.
289912ade3SWunderlich, Mark  */
299912ade3SWunderlich, Mark static int so_priority;
309912ade3SWunderlich, Mark module_param(so_priority, int, 0644);
319912ade3SWunderlich, Mark MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
329912ade3SWunderlich, Mark 
333f2304f8SSagi Grimberg enum nvme_tcp_send_state {
343f2304f8SSagi Grimberg 	NVME_TCP_SEND_CMD_PDU = 0,
353f2304f8SSagi Grimberg 	NVME_TCP_SEND_H2C_PDU,
363f2304f8SSagi Grimberg 	NVME_TCP_SEND_DATA,
373f2304f8SSagi Grimberg 	NVME_TCP_SEND_DDGST,
383f2304f8SSagi Grimberg };
393f2304f8SSagi Grimberg 
403f2304f8SSagi Grimberg struct nvme_tcp_request {
413f2304f8SSagi Grimberg 	struct nvme_request	req;
423f2304f8SSagi Grimberg 	void			*pdu;
433f2304f8SSagi Grimberg 	struct nvme_tcp_queue	*queue;
443f2304f8SSagi Grimberg 	u32			data_len;
453f2304f8SSagi Grimberg 	u32			pdu_len;
463f2304f8SSagi Grimberg 	u32			pdu_sent;
473f2304f8SSagi Grimberg 	u16			ttag;
483f2304f8SSagi Grimberg 	struct list_head	entry;
49a7273d40SChristoph Hellwig 	__le32			ddgst;
503f2304f8SSagi Grimberg 
513f2304f8SSagi Grimberg 	struct bio		*curr_bio;
523f2304f8SSagi Grimberg 	struct iov_iter		iter;
533f2304f8SSagi Grimberg 
543f2304f8SSagi Grimberg 	/* send state */
553f2304f8SSagi Grimberg 	size_t			offset;
563f2304f8SSagi Grimberg 	size_t			data_sent;
573f2304f8SSagi Grimberg 	enum nvme_tcp_send_state state;
583f2304f8SSagi Grimberg };
593f2304f8SSagi Grimberg 
603f2304f8SSagi Grimberg enum nvme_tcp_queue_flags {
613f2304f8SSagi Grimberg 	NVME_TCP_Q_ALLOCATED	= 0,
623f2304f8SSagi Grimberg 	NVME_TCP_Q_LIVE		= 1,
6372e5d757SSagi Grimberg 	NVME_TCP_Q_POLLING	= 2,
643f2304f8SSagi Grimberg };
653f2304f8SSagi Grimberg 
663f2304f8SSagi Grimberg enum nvme_tcp_recv_state {
673f2304f8SSagi Grimberg 	NVME_TCP_RECV_PDU = 0,
683f2304f8SSagi Grimberg 	NVME_TCP_RECV_DATA,
693f2304f8SSagi Grimberg 	NVME_TCP_RECV_DDGST,
703f2304f8SSagi Grimberg };
713f2304f8SSagi Grimberg 
723f2304f8SSagi Grimberg struct nvme_tcp_ctrl;
733f2304f8SSagi Grimberg struct nvme_tcp_queue {
743f2304f8SSagi Grimberg 	struct socket		*sock;
753f2304f8SSagi Grimberg 	struct work_struct	io_work;
763f2304f8SSagi Grimberg 	int			io_cpu;
773f2304f8SSagi Grimberg 
783f2304f8SSagi Grimberg 	spinlock_t		lock;
79db5ad6b7SSagi Grimberg 	struct mutex		send_mutex;
803f2304f8SSagi Grimberg 	struct list_head	send_list;
813f2304f8SSagi Grimberg 
823f2304f8SSagi Grimberg 	/* recv state */
833f2304f8SSagi Grimberg 	void			*pdu;
843f2304f8SSagi Grimberg 	int			pdu_remaining;
853f2304f8SSagi Grimberg 	int			pdu_offset;
863f2304f8SSagi Grimberg 	size_t			data_remaining;
873f2304f8SSagi Grimberg 	size_t			ddgst_remaining;
881a9460ceSSagi Grimberg 	unsigned int		nr_cqe;
893f2304f8SSagi Grimberg 
903f2304f8SSagi Grimberg 	/* send state */
913f2304f8SSagi Grimberg 	struct nvme_tcp_request *request;
923f2304f8SSagi Grimberg 
933f2304f8SSagi Grimberg 	int			queue_size;
943f2304f8SSagi Grimberg 	size_t			cmnd_capsule_len;
953f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl	*ctrl;
963f2304f8SSagi Grimberg 	unsigned long		flags;
973f2304f8SSagi Grimberg 	bool			rd_enabled;
983f2304f8SSagi Grimberg 
993f2304f8SSagi Grimberg 	bool			hdr_digest;
1003f2304f8SSagi Grimberg 	bool			data_digest;
1013f2304f8SSagi Grimberg 	struct ahash_request	*rcv_hash;
1023f2304f8SSagi Grimberg 	struct ahash_request	*snd_hash;
1033f2304f8SSagi Grimberg 	__le32			exp_ddgst;
1043f2304f8SSagi Grimberg 	__le32			recv_ddgst;
1053f2304f8SSagi Grimberg 
1063f2304f8SSagi Grimberg 	struct page_frag_cache	pf_cache;
1073f2304f8SSagi Grimberg 
1083f2304f8SSagi Grimberg 	void (*state_change)(struct sock *);
1093f2304f8SSagi Grimberg 	void (*data_ready)(struct sock *);
1103f2304f8SSagi Grimberg 	void (*write_space)(struct sock *);
1113f2304f8SSagi Grimberg };
1123f2304f8SSagi Grimberg 
1133f2304f8SSagi Grimberg struct nvme_tcp_ctrl {
1143f2304f8SSagi Grimberg 	/* read only in the hot path */
1153f2304f8SSagi Grimberg 	struct nvme_tcp_queue	*queues;
1163f2304f8SSagi Grimberg 	struct blk_mq_tag_set	tag_set;
1173f2304f8SSagi Grimberg 
1183f2304f8SSagi Grimberg 	/* other member variables */
1193f2304f8SSagi Grimberg 	struct list_head	list;
1203f2304f8SSagi Grimberg 	struct blk_mq_tag_set	admin_tag_set;
1213f2304f8SSagi Grimberg 	struct sockaddr_storage addr;
1223f2304f8SSagi Grimberg 	struct sockaddr_storage src_addr;
1233f2304f8SSagi Grimberg 	struct nvme_ctrl	ctrl;
1243f2304f8SSagi Grimberg 
1253f2304f8SSagi Grimberg 	struct work_struct	err_work;
1263f2304f8SSagi Grimberg 	struct delayed_work	connect_work;
1273f2304f8SSagi Grimberg 	struct nvme_tcp_request async_req;
12864861993SSagi Grimberg 	u32			io_queues[HCTX_MAX_TYPES];
1293f2304f8SSagi Grimberg };
1303f2304f8SSagi Grimberg 
1313f2304f8SSagi Grimberg static LIST_HEAD(nvme_tcp_ctrl_list);
1323f2304f8SSagi Grimberg static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
1333f2304f8SSagi Grimberg static struct workqueue_struct *nvme_tcp_wq;
1346acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops;
1356acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
136db5ad6b7SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
1373f2304f8SSagi Grimberg 
1383f2304f8SSagi Grimberg static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
1393f2304f8SSagi Grimberg {
1403f2304f8SSagi Grimberg 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
1413f2304f8SSagi Grimberg }
1423f2304f8SSagi Grimberg 
1433f2304f8SSagi Grimberg static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
1443f2304f8SSagi Grimberg {
1453f2304f8SSagi Grimberg 	return queue - queue->ctrl->queues;
1463f2304f8SSagi Grimberg }
1473f2304f8SSagi Grimberg 
1483f2304f8SSagi Grimberg static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
1493f2304f8SSagi Grimberg {
1503f2304f8SSagi Grimberg 	u32 queue_idx = nvme_tcp_queue_id(queue);
1513f2304f8SSagi Grimberg 
1523f2304f8SSagi Grimberg 	if (queue_idx == 0)
1533f2304f8SSagi Grimberg 		return queue->ctrl->admin_tag_set.tags[queue_idx];
1543f2304f8SSagi Grimberg 	return queue->ctrl->tag_set.tags[queue_idx - 1];
1553f2304f8SSagi Grimberg }
1563f2304f8SSagi Grimberg 
1573f2304f8SSagi Grimberg static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
1583f2304f8SSagi Grimberg {
1593f2304f8SSagi Grimberg 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
1603f2304f8SSagi Grimberg }
1613f2304f8SSagi Grimberg 
1623f2304f8SSagi Grimberg static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
1633f2304f8SSagi Grimberg {
1643f2304f8SSagi Grimberg 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
1653f2304f8SSagi Grimberg }
1663f2304f8SSagi Grimberg 
1673f2304f8SSagi Grimberg static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
1683f2304f8SSagi Grimberg {
1693f2304f8SSagi Grimberg 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
1703f2304f8SSagi Grimberg }
1713f2304f8SSagi Grimberg 
1723f2304f8SSagi Grimberg static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
1733f2304f8SSagi Grimberg {
1743f2304f8SSagi Grimberg 	return req == &req->queue->ctrl->async_req;
1753f2304f8SSagi Grimberg }
1763f2304f8SSagi Grimberg 
1773f2304f8SSagi Grimberg static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
1783f2304f8SSagi Grimberg {
1793f2304f8SSagi Grimberg 	struct request *rq;
1803f2304f8SSagi Grimberg 
1813f2304f8SSagi Grimberg 	if (unlikely(nvme_tcp_async_req(req)))
1823f2304f8SSagi Grimberg 		return false; /* async events don't have a request */
1833f2304f8SSagi Grimberg 
1843f2304f8SSagi Grimberg 	rq = blk_mq_rq_from_pdu(req);
1853f2304f8SSagi Grimberg 
18625e5cb78SSagi Grimberg 	return rq_data_dir(rq) == WRITE && req->data_len &&
18725e5cb78SSagi Grimberg 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
1883f2304f8SSagi Grimberg }
1893f2304f8SSagi Grimberg 
1903f2304f8SSagi Grimberg static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
1913f2304f8SSagi Grimberg {
1923f2304f8SSagi Grimberg 	return req->iter.bvec->bv_page;
1933f2304f8SSagi Grimberg }
1943f2304f8SSagi Grimberg 
1953f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
1963f2304f8SSagi Grimberg {
1973f2304f8SSagi Grimberg 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
1983f2304f8SSagi Grimberg }
1993f2304f8SSagi Grimberg 
2003f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
2013f2304f8SSagi Grimberg {
2023f2304f8SSagi Grimberg 	return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
2033f2304f8SSagi Grimberg 			req->pdu_len - req->pdu_sent);
2043f2304f8SSagi Grimberg }
2053f2304f8SSagi Grimberg 
2063f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
2073f2304f8SSagi Grimberg {
2083f2304f8SSagi Grimberg 	return req->iter.iov_offset;
2093f2304f8SSagi Grimberg }
2103f2304f8SSagi Grimberg 
2113f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
2123f2304f8SSagi Grimberg {
2133f2304f8SSagi Grimberg 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
2143f2304f8SSagi Grimberg 			req->pdu_len - req->pdu_sent : 0;
2153f2304f8SSagi Grimberg }
2163f2304f8SSagi Grimberg 
2173f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
2183f2304f8SSagi Grimberg 		int len)
2193f2304f8SSagi Grimberg {
2203f2304f8SSagi Grimberg 	return nvme_tcp_pdu_data_left(req) <= len;
2213f2304f8SSagi Grimberg }
2223f2304f8SSagi Grimberg 
2233f2304f8SSagi Grimberg static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
2243f2304f8SSagi Grimberg 		unsigned int dir)
2253f2304f8SSagi Grimberg {
2263f2304f8SSagi Grimberg 	struct request *rq = blk_mq_rq_from_pdu(req);
2273f2304f8SSagi Grimberg 	struct bio_vec *vec;
2283f2304f8SSagi Grimberg 	unsigned int size;
2293f2304f8SSagi Grimberg 	int nsegs;
2303f2304f8SSagi Grimberg 	size_t offset;
2313f2304f8SSagi Grimberg 
2323f2304f8SSagi Grimberg 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
2333f2304f8SSagi Grimberg 		vec = &rq->special_vec;
2343f2304f8SSagi Grimberg 		nsegs = 1;
2353f2304f8SSagi Grimberg 		size = blk_rq_payload_bytes(rq);
2363f2304f8SSagi Grimberg 		offset = 0;
2373f2304f8SSagi Grimberg 	} else {
2383f2304f8SSagi Grimberg 		struct bio *bio = req->curr_bio;
2393f2304f8SSagi Grimberg 
2403f2304f8SSagi Grimberg 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
2413f2304f8SSagi Grimberg 		nsegs = bio_segments(bio);
2423f2304f8SSagi Grimberg 		size = bio->bi_iter.bi_size;
2433f2304f8SSagi Grimberg 		offset = bio->bi_iter.bi_bvec_done;
2443f2304f8SSagi Grimberg 	}
2453f2304f8SSagi Grimberg 
2463f2304f8SSagi Grimberg 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
2473f2304f8SSagi Grimberg 	req->iter.iov_offset = offset;
2483f2304f8SSagi Grimberg }
2493f2304f8SSagi Grimberg 
2503f2304f8SSagi Grimberg static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
2513f2304f8SSagi Grimberg 		int len)
2523f2304f8SSagi Grimberg {
2533f2304f8SSagi Grimberg 	req->data_sent += len;
2543f2304f8SSagi Grimberg 	req->pdu_sent += len;
2553f2304f8SSagi Grimberg 	iov_iter_advance(&req->iter, len);
2563f2304f8SSagi Grimberg 	if (!iov_iter_count(&req->iter) &&
2573f2304f8SSagi Grimberg 	    req->data_sent < req->data_len) {
2583f2304f8SSagi Grimberg 		req->curr_bio = req->curr_bio->bi_next;
2593f2304f8SSagi Grimberg 		nvme_tcp_init_iter(req, WRITE);
2603f2304f8SSagi Grimberg 	}
2613f2304f8SSagi Grimberg }
2623f2304f8SSagi Grimberg 
263db5ad6b7SSagi Grimberg static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
264db5ad6b7SSagi Grimberg 		bool sync)
2653f2304f8SSagi Grimberg {
2663f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
267db5ad6b7SSagi Grimberg 	bool empty;
2683f2304f8SSagi Grimberg 
2693f2304f8SSagi Grimberg 	spin_lock(&queue->lock);
270db5ad6b7SSagi Grimberg 	empty = list_empty(&queue->send_list) && !queue->request;
2713f2304f8SSagi Grimberg 	list_add_tail(&req->entry, &queue->send_list);
2723f2304f8SSagi Grimberg 	spin_unlock(&queue->lock);
2733f2304f8SSagi Grimberg 
274db5ad6b7SSagi Grimberg 	/*
275db5ad6b7SSagi Grimberg 	 * if we're the first on the send_list and we can try to send
276db5ad6b7SSagi Grimberg 	 * directly, otherwise queue io_work. Also, only do that if we
277db5ad6b7SSagi Grimberg 	 * are on the same cpu, so we don't introduce contention.
278db5ad6b7SSagi Grimberg 	 */
279db5ad6b7SSagi Grimberg 	if (queue->io_cpu == smp_processor_id() &&
280db5ad6b7SSagi Grimberg 	    sync && empty && mutex_trylock(&queue->send_mutex)) {
281db5ad6b7SSagi Grimberg 		nvme_tcp_try_send(queue);
282db5ad6b7SSagi Grimberg 		mutex_unlock(&queue->send_mutex);
283db5ad6b7SSagi Grimberg 	} else {
2843f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2853f2304f8SSagi Grimberg 	}
286db5ad6b7SSagi Grimberg }
2873f2304f8SSagi Grimberg 
2883f2304f8SSagi Grimberg static inline struct nvme_tcp_request *
2893f2304f8SSagi Grimberg nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
2903f2304f8SSagi Grimberg {
2913f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
2923f2304f8SSagi Grimberg 
2933f2304f8SSagi Grimberg 	spin_lock(&queue->lock);
2943f2304f8SSagi Grimberg 	req = list_first_entry_or_null(&queue->send_list,
2953f2304f8SSagi Grimberg 			struct nvme_tcp_request, entry);
2963f2304f8SSagi Grimberg 	if (req)
2973f2304f8SSagi Grimberg 		list_del(&req->entry);
2983f2304f8SSagi Grimberg 	spin_unlock(&queue->lock);
2993f2304f8SSagi Grimberg 
3003f2304f8SSagi Grimberg 	return req;
3013f2304f8SSagi Grimberg }
3023f2304f8SSagi Grimberg 
303a7273d40SChristoph Hellwig static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
304a7273d40SChristoph Hellwig 		__le32 *dgst)
3053f2304f8SSagi Grimberg {
3063f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
3073f2304f8SSagi Grimberg 	crypto_ahash_final(hash);
3083f2304f8SSagi Grimberg }
3093f2304f8SSagi Grimberg 
3103f2304f8SSagi Grimberg static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
3113f2304f8SSagi Grimberg 		struct page *page, off_t off, size_t len)
3123f2304f8SSagi Grimberg {
3133f2304f8SSagi Grimberg 	struct scatterlist sg;
3143f2304f8SSagi Grimberg 
3153f2304f8SSagi Grimberg 	sg_init_marker(&sg, 1);
3163f2304f8SSagi Grimberg 	sg_set_page(&sg, page, len, off);
3173f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, &sg, NULL, len);
3183f2304f8SSagi Grimberg 	crypto_ahash_update(hash);
3193f2304f8SSagi Grimberg }
3203f2304f8SSagi Grimberg 
3213f2304f8SSagi Grimberg static inline void nvme_tcp_hdgst(struct ahash_request *hash,
3223f2304f8SSagi Grimberg 		void *pdu, size_t len)
3233f2304f8SSagi Grimberg {
3243f2304f8SSagi Grimberg 	struct scatterlist sg;
3253f2304f8SSagi Grimberg 
3263f2304f8SSagi Grimberg 	sg_init_one(&sg, pdu, len);
3273f2304f8SSagi Grimberg 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
3283f2304f8SSagi Grimberg 	crypto_ahash_digest(hash);
3293f2304f8SSagi Grimberg }
3303f2304f8SSagi Grimberg 
3313f2304f8SSagi Grimberg static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
3323f2304f8SSagi Grimberg 		void *pdu, size_t pdu_len)
3333f2304f8SSagi Grimberg {
3343f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr = pdu;
3353f2304f8SSagi Grimberg 	__le32 recv_digest;
3363f2304f8SSagi Grimberg 	__le32 exp_digest;
3373f2304f8SSagi Grimberg 
3383f2304f8SSagi Grimberg 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
3393f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3403f2304f8SSagi Grimberg 			"queue %d: header digest flag is cleared\n",
3413f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue));
3423f2304f8SSagi Grimberg 		return -EPROTO;
3433f2304f8SSagi Grimberg 	}
3443f2304f8SSagi Grimberg 
3453f2304f8SSagi Grimberg 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
3463f2304f8SSagi Grimberg 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
3473f2304f8SSagi Grimberg 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
3483f2304f8SSagi Grimberg 	if (recv_digest != exp_digest) {
3493f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3503f2304f8SSagi Grimberg 			"header digest error: recv %#x expected %#x\n",
3513f2304f8SSagi Grimberg 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
3523f2304f8SSagi Grimberg 		return -EIO;
3533f2304f8SSagi Grimberg 	}
3543f2304f8SSagi Grimberg 
3553f2304f8SSagi Grimberg 	return 0;
3563f2304f8SSagi Grimberg }
3573f2304f8SSagi Grimberg 
3583f2304f8SSagi Grimberg static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
3593f2304f8SSagi Grimberg {
3603f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr = pdu;
3613f2304f8SSagi Grimberg 	u8 digest_len = nvme_tcp_hdgst_len(queue);
3623f2304f8SSagi Grimberg 	u32 len;
3633f2304f8SSagi Grimberg 
3643f2304f8SSagi Grimberg 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
3653f2304f8SSagi Grimberg 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
3663f2304f8SSagi Grimberg 
3673f2304f8SSagi Grimberg 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
3683f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
3693f2304f8SSagi Grimberg 			"queue %d: data digest flag is cleared\n",
3703f2304f8SSagi Grimberg 		nvme_tcp_queue_id(queue));
3713f2304f8SSagi Grimberg 		return -EPROTO;
3723f2304f8SSagi Grimberg 	}
3733f2304f8SSagi Grimberg 	crypto_ahash_init(queue->rcv_hash);
3743f2304f8SSagi Grimberg 
3753f2304f8SSagi Grimberg 	return 0;
3763f2304f8SSagi Grimberg }
3773f2304f8SSagi Grimberg 
3783f2304f8SSagi Grimberg static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
3793f2304f8SSagi Grimberg 		struct request *rq, unsigned int hctx_idx)
3803f2304f8SSagi Grimberg {
3813f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
3823f2304f8SSagi Grimberg 
3833f2304f8SSagi Grimberg 	page_frag_free(req->pdu);
3843f2304f8SSagi Grimberg }
3853f2304f8SSagi Grimberg 
3863f2304f8SSagi Grimberg static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
3873f2304f8SSagi Grimberg 		struct request *rq, unsigned int hctx_idx,
3883f2304f8SSagi Grimberg 		unsigned int numa_node)
3893f2304f8SSagi Grimberg {
3903f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
3913f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
3923f2304f8SSagi Grimberg 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
3933f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
3943f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
3953f2304f8SSagi Grimberg 
3963f2304f8SSagi Grimberg 	req->pdu = page_frag_alloc(&queue->pf_cache,
3973f2304f8SSagi Grimberg 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
3983f2304f8SSagi Grimberg 		GFP_KERNEL | __GFP_ZERO);
3993f2304f8SSagi Grimberg 	if (!req->pdu)
4003f2304f8SSagi Grimberg 		return -ENOMEM;
4013f2304f8SSagi Grimberg 
4023f2304f8SSagi Grimberg 	req->queue = queue;
4033f2304f8SSagi Grimberg 	nvme_req(rq)->ctrl = &ctrl->ctrl;
4043f2304f8SSagi Grimberg 
4053f2304f8SSagi Grimberg 	return 0;
4063f2304f8SSagi Grimberg }
4073f2304f8SSagi Grimberg 
4083f2304f8SSagi Grimberg static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
4093f2304f8SSagi Grimberg 		unsigned int hctx_idx)
4103f2304f8SSagi Grimberg {
4113f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = data;
4123f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
4133f2304f8SSagi Grimberg 
4143f2304f8SSagi Grimberg 	hctx->driver_data = queue;
4153f2304f8SSagi Grimberg 	return 0;
4163f2304f8SSagi Grimberg }
4173f2304f8SSagi Grimberg 
4183f2304f8SSagi Grimberg static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
4193f2304f8SSagi Grimberg 		unsigned int hctx_idx)
4203f2304f8SSagi Grimberg {
4213f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = data;
4223f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
4233f2304f8SSagi Grimberg 
4243f2304f8SSagi Grimberg 	hctx->driver_data = queue;
4253f2304f8SSagi Grimberg 	return 0;
4263f2304f8SSagi Grimberg }
4273f2304f8SSagi Grimberg 
4283f2304f8SSagi Grimberg static enum nvme_tcp_recv_state
4293f2304f8SSagi Grimberg nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
4303f2304f8SSagi Grimberg {
4313f2304f8SSagi Grimberg 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
4323f2304f8SSagi Grimberg 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
4333f2304f8SSagi Grimberg 		NVME_TCP_RECV_DATA;
4343f2304f8SSagi Grimberg }
4353f2304f8SSagi Grimberg 
4363f2304f8SSagi Grimberg static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
4373f2304f8SSagi Grimberg {
4383f2304f8SSagi Grimberg 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
4393f2304f8SSagi Grimberg 				nvme_tcp_hdgst_len(queue);
4403f2304f8SSagi Grimberg 	queue->pdu_offset = 0;
4413f2304f8SSagi Grimberg 	queue->data_remaining = -1;
4423f2304f8SSagi Grimberg 	queue->ddgst_remaining = 0;
4433f2304f8SSagi Grimberg }
4443f2304f8SSagi Grimberg 
4453f2304f8SSagi Grimberg static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
4463f2304f8SSagi Grimberg {
4473f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4483f2304f8SSagi Grimberg 		return;
4493f2304f8SSagi Grimberg 
45097b2512aSNigel Kirkland 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
4513f2304f8SSagi Grimberg }
4523f2304f8SSagi Grimberg 
4533f2304f8SSagi Grimberg static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
4543f2304f8SSagi Grimberg 		struct nvme_completion *cqe)
4553f2304f8SSagi Grimberg {
4563f2304f8SSagi Grimberg 	struct request *rq;
4573f2304f8SSagi Grimberg 
4583f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
4593f2304f8SSagi Grimberg 	if (!rq) {
4603f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
4613f2304f8SSagi Grimberg 			"queue %d tag 0x%x not found\n",
4623f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), cqe->command_id);
4633f2304f8SSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
4643f2304f8SSagi Grimberg 		return -EINVAL;
4653f2304f8SSagi Grimberg 	}
4663f2304f8SSagi Grimberg 
4673f2304f8SSagi Grimberg 	nvme_end_request(rq, cqe->status, cqe->result);
4681a9460ceSSagi Grimberg 	queue->nr_cqe++;
4693f2304f8SSagi Grimberg 
4703f2304f8SSagi Grimberg 	return 0;
4713f2304f8SSagi Grimberg }
4723f2304f8SSagi Grimberg 
4733f2304f8SSagi Grimberg static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
4743f2304f8SSagi Grimberg 		struct nvme_tcp_data_pdu *pdu)
4753f2304f8SSagi Grimberg {
4763f2304f8SSagi Grimberg 	struct request *rq;
4773f2304f8SSagi Grimberg 
4783f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
4793f2304f8SSagi Grimberg 	if (!rq) {
4803f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
4813f2304f8SSagi Grimberg 			"queue %d tag %#x not found\n",
4823f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), pdu->command_id);
4833f2304f8SSagi Grimberg 		return -ENOENT;
4843f2304f8SSagi Grimberg 	}
4853f2304f8SSagi Grimberg 
4863f2304f8SSagi Grimberg 	if (!blk_rq_payload_bytes(rq)) {
4873f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
4883f2304f8SSagi Grimberg 			"queue %d tag %#x unexpected data\n",
4893f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), rq->tag);
4903f2304f8SSagi Grimberg 		return -EIO;
4913f2304f8SSagi Grimberg 	}
4923f2304f8SSagi Grimberg 
4933f2304f8SSagi Grimberg 	queue->data_remaining = le32_to_cpu(pdu->data_length);
4943f2304f8SSagi Grimberg 
495602d674cSSagi Grimberg 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
496602d674cSSagi Grimberg 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
497602d674cSSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
498602d674cSSagi Grimberg 			"queue %d tag %#x SUCCESS set but not last PDU\n",
499602d674cSSagi Grimberg 			nvme_tcp_queue_id(queue), rq->tag);
500602d674cSSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
501602d674cSSagi Grimberg 		return -EPROTO;
502602d674cSSagi Grimberg 	}
503602d674cSSagi Grimberg 
5043f2304f8SSagi Grimberg 	return 0;
5053f2304f8SSagi Grimberg }
5063f2304f8SSagi Grimberg 
5073f2304f8SSagi Grimberg static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
5083f2304f8SSagi Grimberg 		struct nvme_tcp_rsp_pdu *pdu)
5093f2304f8SSagi Grimberg {
5103f2304f8SSagi Grimberg 	struct nvme_completion *cqe = &pdu->cqe;
5113f2304f8SSagi Grimberg 	int ret = 0;
5123f2304f8SSagi Grimberg 
5133f2304f8SSagi Grimberg 	/*
5143f2304f8SSagi Grimberg 	 * AEN requests are special as they don't time out and can
5153f2304f8SSagi Grimberg 	 * survive any kind of queue freeze and often don't respond to
5163f2304f8SSagi Grimberg 	 * aborts.  We don't even bother to allocate a struct request
5173f2304f8SSagi Grimberg 	 * for them but rather special case them here.
5183f2304f8SSagi Grimberg 	 */
51958a8df67SIsrael Rukshin 	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
52058a8df67SIsrael Rukshin 				     cqe->command_id)))
5213f2304f8SSagi Grimberg 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
5223f2304f8SSagi Grimberg 				&cqe->result);
5233f2304f8SSagi Grimberg 	else
5243f2304f8SSagi Grimberg 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
5253f2304f8SSagi Grimberg 
5263f2304f8SSagi Grimberg 	return ret;
5273f2304f8SSagi Grimberg }
5283f2304f8SSagi Grimberg 
5293f2304f8SSagi Grimberg static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
5303f2304f8SSagi Grimberg 		struct nvme_tcp_r2t_pdu *pdu)
5313f2304f8SSagi Grimberg {
5323f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *data = req->pdu;
5333f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
5343f2304f8SSagi Grimberg 	struct request *rq = blk_mq_rq_from_pdu(req);
5353f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
5363f2304f8SSagi Grimberg 	u8 ddgst = nvme_tcp_ddgst_len(queue);
5373f2304f8SSagi Grimberg 
5383f2304f8SSagi Grimberg 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
5393f2304f8SSagi Grimberg 	req->pdu_sent = 0;
5403f2304f8SSagi Grimberg 
5413f2304f8SSagi Grimberg 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
5423f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5433f2304f8SSagi Grimberg 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
5443f2304f8SSagi Grimberg 			rq->tag, req->pdu_len, req->data_len,
5453f2304f8SSagi Grimberg 			req->data_sent);
5463f2304f8SSagi Grimberg 		return -EPROTO;
5473f2304f8SSagi Grimberg 	}
5483f2304f8SSagi Grimberg 
5493f2304f8SSagi Grimberg 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
5503f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5513f2304f8SSagi Grimberg 			"req %d unexpected r2t offset %u (expected %zu)\n",
5523f2304f8SSagi Grimberg 			rq->tag, le32_to_cpu(pdu->r2t_offset),
5533f2304f8SSagi Grimberg 			req->data_sent);
5543f2304f8SSagi Grimberg 		return -EPROTO;
5553f2304f8SSagi Grimberg 	}
5563f2304f8SSagi Grimberg 
5573f2304f8SSagi Grimberg 	memset(data, 0, sizeof(*data));
5583f2304f8SSagi Grimberg 	data->hdr.type = nvme_tcp_h2c_data;
5593f2304f8SSagi Grimberg 	data->hdr.flags = NVME_TCP_F_DATA_LAST;
5603f2304f8SSagi Grimberg 	if (queue->hdr_digest)
5613f2304f8SSagi Grimberg 		data->hdr.flags |= NVME_TCP_F_HDGST;
5623f2304f8SSagi Grimberg 	if (queue->data_digest)
5633f2304f8SSagi Grimberg 		data->hdr.flags |= NVME_TCP_F_DDGST;
5643f2304f8SSagi Grimberg 	data->hdr.hlen = sizeof(*data);
5653f2304f8SSagi Grimberg 	data->hdr.pdo = data->hdr.hlen + hdgst;
5663f2304f8SSagi Grimberg 	data->hdr.plen =
5673f2304f8SSagi Grimberg 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
5683f2304f8SSagi Grimberg 	data->ttag = pdu->ttag;
5693f2304f8SSagi Grimberg 	data->command_id = rq->tag;
5703f2304f8SSagi Grimberg 	data->data_offset = cpu_to_le32(req->data_sent);
5713f2304f8SSagi Grimberg 	data->data_length = cpu_to_le32(req->pdu_len);
5723f2304f8SSagi Grimberg 	return 0;
5733f2304f8SSagi Grimberg }
5743f2304f8SSagi Grimberg 
5753f2304f8SSagi Grimberg static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
5763f2304f8SSagi Grimberg 		struct nvme_tcp_r2t_pdu *pdu)
5773f2304f8SSagi Grimberg {
5783f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
5793f2304f8SSagi Grimberg 	struct request *rq;
5803f2304f8SSagi Grimberg 	int ret;
5813f2304f8SSagi Grimberg 
5823f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
5833f2304f8SSagi Grimberg 	if (!rq) {
5843f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
5853f2304f8SSagi Grimberg 			"queue %d tag %#x not found\n",
5863f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), pdu->command_id);
5873f2304f8SSagi Grimberg 		return -ENOENT;
5883f2304f8SSagi Grimberg 	}
5893f2304f8SSagi Grimberg 	req = blk_mq_rq_to_pdu(rq);
5903f2304f8SSagi Grimberg 
5913f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
5923f2304f8SSagi Grimberg 	if (unlikely(ret))
5933f2304f8SSagi Grimberg 		return ret;
5943f2304f8SSagi Grimberg 
5953f2304f8SSagi Grimberg 	req->state = NVME_TCP_SEND_H2C_PDU;
5963f2304f8SSagi Grimberg 	req->offset = 0;
5973f2304f8SSagi Grimberg 
598db5ad6b7SSagi Grimberg 	nvme_tcp_queue_request(req, false);
5993f2304f8SSagi Grimberg 
6003f2304f8SSagi Grimberg 	return 0;
6013f2304f8SSagi Grimberg }
6023f2304f8SSagi Grimberg 
6033f2304f8SSagi Grimberg static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
6043f2304f8SSagi Grimberg 		unsigned int *offset, size_t *len)
6053f2304f8SSagi Grimberg {
6063f2304f8SSagi Grimberg 	struct nvme_tcp_hdr *hdr;
6073f2304f8SSagi Grimberg 	char *pdu = queue->pdu;
6083f2304f8SSagi Grimberg 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
6093f2304f8SSagi Grimberg 	int ret;
6103f2304f8SSagi Grimberg 
6113f2304f8SSagi Grimberg 	ret = skb_copy_bits(skb, *offset,
6123f2304f8SSagi Grimberg 		&pdu[queue->pdu_offset], rcv_len);
6133f2304f8SSagi Grimberg 	if (unlikely(ret))
6143f2304f8SSagi Grimberg 		return ret;
6153f2304f8SSagi Grimberg 
6163f2304f8SSagi Grimberg 	queue->pdu_remaining -= rcv_len;
6173f2304f8SSagi Grimberg 	queue->pdu_offset += rcv_len;
6183f2304f8SSagi Grimberg 	*offset += rcv_len;
6193f2304f8SSagi Grimberg 	*len -= rcv_len;
6203f2304f8SSagi Grimberg 	if (queue->pdu_remaining)
6213f2304f8SSagi Grimberg 		return 0;
6223f2304f8SSagi Grimberg 
6233f2304f8SSagi Grimberg 	hdr = queue->pdu;
6243f2304f8SSagi Grimberg 	if (queue->hdr_digest) {
6253f2304f8SSagi Grimberg 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
6263f2304f8SSagi Grimberg 		if (unlikely(ret))
6273f2304f8SSagi Grimberg 			return ret;
6283f2304f8SSagi Grimberg 	}
6293f2304f8SSagi Grimberg 
6303f2304f8SSagi Grimberg 
6313f2304f8SSagi Grimberg 	if (queue->data_digest) {
6323f2304f8SSagi Grimberg 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
6333f2304f8SSagi Grimberg 		if (unlikely(ret))
6343f2304f8SSagi Grimberg 			return ret;
6353f2304f8SSagi Grimberg 	}
6363f2304f8SSagi Grimberg 
6373f2304f8SSagi Grimberg 	switch (hdr->type) {
6383f2304f8SSagi Grimberg 	case nvme_tcp_c2h_data:
6396be18260SSagi Grimberg 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
6403f2304f8SSagi Grimberg 	case nvme_tcp_rsp:
6413f2304f8SSagi Grimberg 		nvme_tcp_init_recv_ctx(queue);
6426be18260SSagi Grimberg 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
6433f2304f8SSagi Grimberg 	case nvme_tcp_r2t:
6443f2304f8SSagi Grimberg 		nvme_tcp_init_recv_ctx(queue);
6456be18260SSagi Grimberg 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
6463f2304f8SSagi Grimberg 	default:
6473f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
6483f2304f8SSagi Grimberg 			"unsupported pdu type (%d)\n", hdr->type);
6493f2304f8SSagi Grimberg 		return -EINVAL;
6503f2304f8SSagi Grimberg 	}
6513f2304f8SSagi Grimberg }
6523f2304f8SSagi Grimberg 
653988aef9eSChristoph Hellwig static inline void nvme_tcp_end_request(struct request *rq, u16 status)
654602d674cSSagi Grimberg {
655602d674cSSagi Grimberg 	union nvme_result res = {};
656602d674cSSagi Grimberg 
657602d674cSSagi Grimberg 	nvme_end_request(rq, cpu_to_le16(status << 1), res);
658602d674cSSagi Grimberg }
659602d674cSSagi Grimberg 
6603f2304f8SSagi Grimberg static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
6613f2304f8SSagi Grimberg 			      unsigned int *offset, size_t *len)
6623f2304f8SSagi Grimberg {
6633f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
6643f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
6653f2304f8SSagi Grimberg 	struct request *rq;
6663f2304f8SSagi Grimberg 
6673f2304f8SSagi Grimberg 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
6683f2304f8SSagi Grimberg 	if (!rq) {
6693f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
6703f2304f8SSagi Grimberg 			"queue %d tag %#x not found\n",
6713f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), pdu->command_id);
6723f2304f8SSagi Grimberg 		return -ENOENT;
6733f2304f8SSagi Grimberg 	}
6743f2304f8SSagi Grimberg 	req = blk_mq_rq_to_pdu(rq);
6753f2304f8SSagi Grimberg 
6763f2304f8SSagi Grimberg 	while (true) {
6773f2304f8SSagi Grimberg 		int recv_len, ret;
6783f2304f8SSagi Grimberg 
6793f2304f8SSagi Grimberg 		recv_len = min_t(size_t, *len, queue->data_remaining);
6803f2304f8SSagi Grimberg 		if (!recv_len)
6813f2304f8SSagi Grimberg 			break;
6823f2304f8SSagi Grimberg 
6833f2304f8SSagi Grimberg 		if (!iov_iter_count(&req->iter)) {
6843f2304f8SSagi Grimberg 			req->curr_bio = req->curr_bio->bi_next;
6853f2304f8SSagi Grimberg 
6863f2304f8SSagi Grimberg 			/*
6873f2304f8SSagi Grimberg 			 * If we don`t have any bios it means that controller
6883f2304f8SSagi Grimberg 			 * sent more data than we requested, hence error
6893f2304f8SSagi Grimberg 			 */
6903f2304f8SSagi Grimberg 			if (!req->curr_bio) {
6913f2304f8SSagi Grimberg 				dev_err(queue->ctrl->ctrl.device,
6923f2304f8SSagi Grimberg 					"queue %d no space in request %#x",
6933f2304f8SSagi Grimberg 					nvme_tcp_queue_id(queue), rq->tag);
6943f2304f8SSagi Grimberg 				nvme_tcp_init_recv_ctx(queue);
6953f2304f8SSagi Grimberg 				return -EIO;
6963f2304f8SSagi Grimberg 			}
6973f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, READ);
6983f2304f8SSagi Grimberg 		}
6993f2304f8SSagi Grimberg 
7003f2304f8SSagi Grimberg 		/* we can read only from what is left in this bio */
7013f2304f8SSagi Grimberg 		recv_len = min_t(size_t, recv_len,
7023f2304f8SSagi Grimberg 				iov_iter_count(&req->iter));
7033f2304f8SSagi Grimberg 
7043f2304f8SSagi Grimberg 		if (queue->data_digest)
7053f2304f8SSagi Grimberg 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
7063f2304f8SSagi Grimberg 				&req->iter, recv_len, queue->rcv_hash);
7073f2304f8SSagi Grimberg 		else
7083f2304f8SSagi Grimberg 			ret = skb_copy_datagram_iter(skb, *offset,
7093f2304f8SSagi Grimberg 					&req->iter, recv_len);
7103f2304f8SSagi Grimberg 		if (ret) {
7113f2304f8SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
7123f2304f8SSagi Grimberg 				"queue %d failed to copy request %#x data",
7133f2304f8SSagi Grimberg 				nvme_tcp_queue_id(queue), rq->tag);
7143f2304f8SSagi Grimberg 			return ret;
7153f2304f8SSagi Grimberg 		}
7163f2304f8SSagi Grimberg 
7173f2304f8SSagi Grimberg 		*len -= recv_len;
7183f2304f8SSagi Grimberg 		*offset += recv_len;
7193f2304f8SSagi Grimberg 		queue->data_remaining -= recv_len;
7203f2304f8SSagi Grimberg 	}
7213f2304f8SSagi Grimberg 
7223f2304f8SSagi Grimberg 	if (!queue->data_remaining) {
7233f2304f8SSagi Grimberg 		if (queue->data_digest) {
7243f2304f8SSagi Grimberg 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
7253f2304f8SSagi Grimberg 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
7263f2304f8SSagi Grimberg 		} else {
7271a9460ceSSagi Grimberg 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
728602d674cSSagi Grimberg 				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
7291a9460ceSSagi Grimberg 				queue->nr_cqe++;
7301a9460ceSSagi Grimberg 			}
7313f2304f8SSagi Grimberg 			nvme_tcp_init_recv_ctx(queue);
7323f2304f8SSagi Grimberg 		}
7333f2304f8SSagi Grimberg 	}
7343f2304f8SSagi Grimberg 
7353f2304f8SSagi Grimberg 	return 0;
7363f2304f8SSagi Grimberg }
7373f2304f8SSagi Grimberg 
7383f2304f8SSagi Grimberg static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
7393f2304f8SSagi Grimberg 		struct sk_buff *skb, unsigned int *offset, size_t *len)
7403f2304f8SSagi Grimberg {
741602d674cSSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
7423f2304f8SSagi Grimberg 	char *ddgst = (char *)&queue->recv_ddgst;
7433f2304f8SSagi Grimberg 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
7443f2304f8SSagi Grimberg 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
7453f2304f8SSagi Grimberg 	int ret;
7463f2304f8SSagi Grimberg 
7473f2304f8SSagi Grimberg 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
7483f2304f8SSagi Grimberg 	if (unlikely(ret))
7493f2304f8SSagi Grimberg 		return ret;
7503f2304f8SSagi Grimberg 
7513f2304f8SSagi Grimberg 	queue->ddgst_remaining -= recv_len;
7523f2304f8SSagi Grimberg 	*offset += recv_len;
7533f2304f8SSagi Grimberg 	*len -= recv_len;
7543f2304f8SSagi Grimberg 	if (queue->ddgst_remaining)
7553f2304f8SSagi Grimberg 		return 0;
7563f2304f8SSagi Grimberg 
7573f2304f8SSagi Grimberg 	if (queue->recv_ddgst != queue->exp_ddgst) {
7583f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
7593f2304f8SSagi Grimberg 			"data digest error: recv %#x expected %#x\n",
7603f2304f8SSagi Grimberg 			le32_to_cpu(queue->recv_ddgst),
7613f2304f8SSagi Grimberg 			le32_to_cpu(queue->exp_ddgst));
7623f2304f8SSagi Grimberg 		return -EIO;
7633f2304f8SSagi Grimberg 	}
7643f2304f8SSagi Grimberg 
765602d674cSSagi Grimberg 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
766602d674cSSagi Grimberg 		struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
767602d674cSSagi Grimberg 						pdu->command_id);
768602d674cSSagi Grimberg 
769602d674cSSagi Grimberg 		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
7701a9460ceSSagi Grimberg 		queue->nr_cqe++;
771602d674cSSagi Grimberg 	}
772602d674cSSagi Grimberg 
7733f2304f8SSagi Grimberg 	nvme_tcp_init_recv_ctx(queue);
7743f2304f8SSagi Grimberg 	return 0;
7753f2304f8SSagi Grimberg }
7763f2304f8SSagi Grimberg 
7773f2304f8SSagi Grimberg static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
7783f2304f8SSagi Grimberg 			     unsigned int offset, size_t len)
7793f2304f8SSagi Grimberg {
7803f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = desc->arg.data;
7813f2304f8SSagi Grimberg 	size_t consumed = len;
7823f2304f8SSagi Grimberg 	int result;
7833f2304f8SSagi Grimberg 
7843f2304f8SSagi Grimberg 	while (len) {
7853f2304f8SSagi Grimberg 		switch (nvme_tcp_recv_state(queue)) {
7863f2304f8SSagi Grimberg 		case NVME_TCP_RECV_PDU:
7873f2304f8SSagi Grimberg 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
7883f2304f8SSagi Grimberg 			break;
7893f2304f8SSagi Grimberg 		case NVME_TCP_RECV_DATA:
7903f2304f8SSagi Grimberg 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
7913f2304f8SSagi Grimberg 			break;
7923f2304f8SSagi Grimberg 		case NVME_TCP_RECV_DDGST:
7933f2304f8SSagi Grimberg 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
7943f2304f8SSagi Grimberg 			break;
7953f2304f8SSagi Grimberg 		default:
7963f2304f8SSagi Grimberg 			result = -EFAULT;
7973f2304f8SSagi Grimberg 		}
7983f2304f8SSagi Grimberg 		if (result) {
7993f2304f8SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
8003f2304f8SSagi Grimberg 				"receive failed:  %d\n", result);
8013f2304f8SSagi Grimberg 			queue->rd_enabled = false;
8023f2304f8SSagi Grimberg 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8033f2304f8SSagi Grimberg 			return result;
8043f2304f8SSagi Grimberg 		}
8053f2304f8SSagi Grimberg 	}
8063f2304f8SSagi Grimberg 
8073f2304f8SSagi Grimberg 	return consumed;
8083f2304f8SSagi Grimberg }
8093f2304f8SSagi Grimberg 
8103f2304f8SSagi Grimberg static void nvme_tcp_data_ready(struct sock *sk)
8113f2304f8SSagi Grimberg {
8123f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8133f2304f8SSagi Grimberg 
814386e5e6eSSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8153f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
81672e5d757SSagi Grimberg 	if (likely(queue && queue->rd_enabled) &&
81772e5d757SSagi Grimberg 	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
8183f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
819386e5e6eSSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
8203f2304f8SSagi Grimberg }
8213f2304f8SSagi Grimberg 
8223f2304f8SSagi Grimberg static void nvme_tcp_write_space(struct sock *sk)
8233f2304f8SSagi Grimberg {
8243f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8253f2304f8SSagi Grimberg 
8263f2304f8SSagi Grimberg 	read_lock_bh(&sk->sk_callback_lock);
8273f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
8283f2304f8SSagi Grimberg 	if (likely(queue && sk_stream_is_writeable(sk))) {
8293f2304f8SSagi Grimberg 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
8303f2304f8SSagi Grimberg 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
8313f2304f8SSagi Grimberg 	}
8323f2304f8SSagi Grimberg 	read_unlock_bh(&sk->sk_callback_lock);
8333f2304f8SSagi Grimberg }
8343f2304f8SSagi Grimberg 
8353f2304f8SSagi Grimberg static void nvme_tcp_state_change(struct sock *sk)
8363f2304f8SSagi Grimberg {
8373f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue;
8383f2304f8SSagi Grimberg 
8393f2304f8SSagi Grimberg 	read_lock(&sk->sk_callback_lock);
8403f2304f8SSagi Grimberg 	queue = sk->sk_user_data;
8413f2304f8SSagi Grimberg 	if (!queue)
8423f2304f8SSagi Grimberg 		goto done;
8433f2304f8SSagi Grimberg 
8443f2304f8SSagi Grimberg 	switch (sk->sk_state) {
8453f2304f8SSagi Grimberg 	case TCP_CLOSE:
8463f2304f8SSagi Grimberg 	case TCP_CLOSE_WAIT:
8473f2304f8SSagi Grimberg 	case TCP_LAST_ACK:
8483f2304f8SSagi Grimberg 	case TCP_FIN_WAIT1:
8493f2304f8SSagi Grimberg 	case TCP_FIN_WAIT2:
8503f2304f8SSagi Grimberg 		/* fallthrough */
8513f2304f8SSagi Grimberg 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8523f2304f8SSagi Grimberg 		break;
8533f2304f8SSagi Grimberg 	default:
8543f2304f8SSagi Grimberg 		dev_info(queue->ctrl->ctrl.device,
8553f2304f8SSagi Grimberg 			"queue %d socket state %d\n",
8563f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), sk->sk_state);
8573f2304f8SSagi Grimberg 	}
8583f2304f8SSagi Grimberg 
8593f2304f8SSagi Grimberg 	queue->state_change(sk);
8603f2304f8SSagi Grimberg done:
8613f2304f8SSagi Grimberg 	read_unlock(&sk->sk_callback_lock);
8623f2304f8SSagi Grimberg }
8633f2304f8SSagi Grimberg 
8643f2304f8SSagi Grimberg static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
8653f2304f8SSagi Grimberg {
8663f2304f8SSagi Grimberg 	queue->request = NULL;
8673f2304f8SSagi Grimberg }
8683f2304f8SSagi Grimberg 
8693f2304f8SSagi Grimberg static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
8703f2304f8SSagi Grimberg {
87116686010SSagi Grimberg 	nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
8723f2304f8SSagi Grimberg }
8733f2304f8SSagi Grimberg 
8743f2304f8SSagi Grimberg static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
8753f2304f8SSagi Grimberg {
8763f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
8773f2304f8SSagi Grimberg 
8783f2304f8SSagi Grimberg 	while (true) {
8793f2304f8SSagi Grimberg 		struct page *page = nvme_tcp_req_cur_page(req);
8803f2304f8SSagi Grimberg 		size_t offset = nvme_tcp_req_cur_offset(req);
8813f2304f8SSagi Grimberg 		size_t len = nvme_tcp_req_cur_length(req);
8823f2304f8SSagi Grimberg 		bool last = nvme_tcp_pdu_last_send(req, len);
8833f2304f8SSagi Grimberg 		int ret, flags = MSG_DONTWAIT;
8843f2304f8SSagi Grimberg 
8853f2304f8SSagi Grimberg 		if (last && !queue->data_digest)
8863f2304f8SSagi Grimberg 			flags |= MSG_EOR;
8873f2304f8SSagi Grimberg 		else
8885bb052d7SSagi Grimberg 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
8893f2304f8SSagi Grimberg 
89037c15219SMikhail Skorzhinskii 		/* can't zcopy slab pages */
89137c15219SMikhail Skorzhinskii 		if (unlikely(PageSlab(page))) {
89237c15219SMikhail Skorzhinskii 			ret = sock_no_sendpage(queue->sock, page, offset, len,
89337c15219SMikhail Skorzhinskii 					flags);
89437c15219SMikhail Skorzhinskii 		} else {
89537c15219SMikhail Skorzhinskii 			ret = kernel_sendpage(queue->sock, page, offset, len,
89637c15219SMikhail Skorzhinskii 					flags);
89737c15219SMikhail Skorzhinskii 		}
8983f2304f8SSagi Grimberg 		if (ret <= 0)
8993f2304f8SSagi Grimberg 			return ret;
9003f2304f8SSagi Grimberg 
9013f2304f8SSagi Grimberg 		nvme_tcp_advance_req(req, ret);
9023f2304f8SSagi Grimberg 		if (queue->data_digest)
9033f2304f8SSagi Grimberg 			nvme_tcp_ddgst_update(queue->snd_hash, page,
9043f2304f8SSagi Grimberg 					offset, ret);
9053f2304f8SSagi Grimberg 
9063f2304f8SSagi Grimberg 		/* fully successful last write*/
9073f2304f8SSagi Grimberg 		if (last && ret == len) {
9083f2304f8SSagi Grimberg 			if (queue->data_digest) {
9093f2304f8SSagi Grimberg 				nvme_tcp_ddgst_final(queue->snd_hash,
9103f2304f8SSagi Grimberg 					&req->ddgst);
9113f2304f8SSagi Grimberg 				req->state = NVME_TCP_SEND_DDGST;
9123f2304f8SSagi Grimberg 				req->offset = 0;
9133f2304f8SSagi Grimberg 			} else {
9143f2304f8SSagi Grimberg 				nvme_tcp_done_send_req(queue);
9153f2304f8SSagi Grimberg 			}
9163f2304f8SSagi Grimberg 			return 1;
9173f2304f8SSagi Grimberg 		}
9183f2304f8SSagi Grimberg 	}
9193f2304f8SSagi Grimberg 	return -EAGAIN;
9203f2304f8SSagi Grimberg }
9213f2304f8SSagi Grimberg 
9223f2304f8SSagi Grimberg static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
9233f2304f8SSagi Grimberg {
9243f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9253f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
9263f2304f8SSagi Grimberg 	bool inline_data = nvme_tcp_has_inline_data(req);
9273f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
9283f2304f8SSagi Grimberg 	int len = sizeof(*pdu) + hdgst - req->offset;
9295bb052d7SSagi Grimberg 	int flags = MSG_DONTWAIT;
9303f2304f8SSagi Grimberg 	int ret;
9313f2304f8SSagi Grimberg 
9325bb052d7SSagi Grimberg 	if (inline_data)
9335bb052d7SSagi Grimberg 		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
9345bb052d7SSagi Grimberg 	else
9355bb052d7SSagi Grimberg 		flags |= MSG_EOR;
9365bb052d7SSagi Grimberg 
9373f2304f8SSagi Grimberg 	if (queue->hdr_digest && !req->offset)
9383f2304f8SSagi Grimberg 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
9393f2304f8SSagi Grimberg 
9403f2304f8SSagi Grimberg 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
9413f2304f8SSagi Grimberg 			offset_in_page(pdu) + req->offset, len,  flags);
9423f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
9433f2304f8SSagi Grimberg 		return ret;
9443f2304f8SSagi Grimberg 
9453f2304f8SSagi Grimberg 	len -= ret;
9463f2304f8SSagi Grimberg 	if (!len) {
9473f2304f8SSagi Grimberg 		if (inline_data) {
9483f2304f8SSagi Grimberg 			req->state = NVME_TCP_SEND_DATA;
9493f2304f8SSagi Grimberg 			if (queue->data_digest)
9503f2304f8SSagi Grimberg 				crypto_ahash_init(queue->snd_hash);
9513f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, WRITE);
9523f2304f8SSagi Grimberg 		} else {
9533f2304f8SSagi Grimberg 			nvme_tcp_done_send_req(queue);
9543f2304f8SSagi Grimberg 		}
9553f2304f8SSagi Grimberg 		return 1;
9563f2304f8SSagi Grimberg 	}
9573f2304f8SSagi Grimberg 	req->offset += ret;
9583f2304f8SSagi Grimberg 
9593f2304f8SSagi Grimberg 	return -EAGAIN;
9603f2304f8SSagi Grimberg }
9613f2304f8SSagi Grimberg 
9623f2304f8SSagi Grimberg static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
9633f2304f8SSagi Grimberg {
9643f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9653f2304f8SSagi Grimberg 	struct nvme_tcp_data_pdu *pdu = req->pdu;
9663f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
9673f2304f8SSagi Grimberg 	int len = sizeof(*pdu) - req->offset + hdgst;
9683f2304f8SSagi Grimberg 	int ret;
9693f2304f8SSagi Grimberg 
9703f2304f8SSagi Grimberg 	if (queue->hdr_digest && !req->offset)
9713f2304f8SSagi Grimberg 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
9723f2304f8SSagi Grimberg 
9733f2304f8SSagi Grimberg 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
9743f2304f8SSagi Grimberg 			offset_in_page(pdu) + req->offset, len,
9755bb052d7SSagi Grimberg 			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
9763f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
9773f2304f8SSagi Grimberg 		return ret;
9783f2304f8SSagi Grimberg 
9793f2304f8SSagi Grimberg 	len -= ret;
9803f2304f8SSagi Grimberg 	if (!len) {
9813f2304f8SSagi Grimberg 		req->state = NVME_TCP_SEND_DATA;
9823f2304f8SSagi Grimberg 		if (queue->data_digest)
9833f2304f8SSagi Grimberg 			crypto_ahash_init(queue->snd_hash);
9843f2304f8SSagi Grimberg 		if (!req->data_sent)
9853f2304f8SSagi Grimberg 			nvme_tcp_init_iter(req, WRITE);
9863f2304f8SSagi Grimberg 		return 1;
9873f2304f8SSagi Grimberg 	}
9883f2304f8SSagi Grimberg 	req->offset += ret;
9893f2304f8SSagi Grimberg 
9903f2304f8SSagi Grimberg 	return -EAGAIN;
9913f2304f8SSagi Grimberg }
9923f2304f8SSagi Grimberg 
9933f2304f8SSagi Grimberg static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
9943f2304f8SSagi Grimberg {
9953f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
9963f2304f8SSagi Grimberg 	int ret;
9973f2304f8SSagi Grimberg 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
9983f2304f8SSagi Grimberg 	struct kvec iov = {
9993f2304f8SSagi Grimberg 		.iov_base = &req->ddgst + req->offset,
10003f2304f8SSagi Grimberg 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
10013f2304f8SSagi Grimberg 	};
10023f2304f8SSagi Grimberg 
10033f2304f8SSagi Grimberg 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
10043f2304f8SSagi Grimberg 	if (unlikely(ret <= 0))
10053f2304f8SSagi Grimberg 		return ret;
10063f2304f8SSagi Grimberg 
10073f2304f8SSagi Grimberg 	if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
10083f2304f8SSagi Grimberg 		nvme_tcp_done_send_req(queue);
10093f2304f8SSagi Grimberg 		return 1;
10103f2304f8SSagi Grimberg 	}
10113f2304f8SSagi Grimberg 
10123f2304f8SSagi Grimberg 	req->offset += ret;
10133f2304f8SSagi Grimberg 	return -EAGAIN;
10143f2304f8SSagi Grimberg }
10153f2304f8SSagi Grimberg 
10163f2304f8SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
10173f2304f8SSagi Grimberg {
10183f2304f8SSagi Grimberg 	struct nvme_tcp_request *req;
10193f2304f8SSagi Grimberg 	int ret = 1;
10203f2304f8SSagi Grimberg 
10213f2304f8SSagi Grimberg 	if (!queue->request) {
10223f2304f8SSagi Grimberg 		queue->request = nvme_tcp_fetch_request(queue);
10233f2304f8SSagi Grimberg 		if (!queue->request)
10243f2304f8SSagi Grimberg 			return 0;
10253f2304f8SSagi Grimberg 	}
10263f2304f8SSagi Grimberg 	req = queue->request;
10273f2304f8SSagi Grimberg 
10283f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
10293f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_cmd_pdu(req);
10303f2304f8SSagi Grimberg 		if (ret <= 0)
10313f2304f8SSagi Grimberg 			goto done;
10323f2304f8SSagi Grimberg 		if (!nvme_tcp_has_inline_data(req))
10333f2304f8SSagi Grimberg 			return ret;
10343f2304f8SSagi Grimberg 	}
10353f2304f8SSagi Grimberg 
10363f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
10373f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_data_pdu(req);
10383f2304f8SSagi Grimberg 		if (ret <= 0)
10393f2304f8SSagi Grimberg 			goto done;
10403f2304f8SSagi Grimberg 	}
10413f2304f8SSagi Grimberg 
10423f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_DATA) {
10433f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_data(req);
10443f2304f8SSagi Grimberg 		if (ret <= 0)
10453f2304f8SSagi Grimberg 			goto done;
10463f2304f8SSagi Grimberg 	}
10473f2304f8SSagi Grimberg 
10483f2304f8SSagi Grimberg 	if (req->state == NVME_TCP_SEND_DDGST)
10493f2304f8SSagi Grimberg 		ret = nvme_tcp_try_send_ddgst(req);
10503f2304f8SSagi Grimberg done:
10515ff4e112SSagi Grimberg 	if (ret == -EAGAIN) {
10523f2304f8SSagi Grimberg 		ret = 0;
10535ff4e112SSagi Grimberg 	} else if (ret < 0) {
10545ff4e112SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
10555ff4e112SSagi Grimberg 			"failed to send request %d\n", ret);
10565ff4e112SSagi Grimberg 		if (ret != -EPIPE && ret != -ECONNRESET)
10575ff4e112SSagi Grimberg 			nvme_tcp_fail_request(queue->request);
10585ff4e112SSagi Grimberg 		nvme_tcp_done_send_req(queue);
10595ff4e112SSagi Grimberg 	}
10603f2304f8SSagi Grimberg 	return ret;
10613f2304f8SSagi Grimberg }
10623f2304f8SSagi Grimberg 
10633f2304f8SSagi Grimberg static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
10643f2304f8SSagi Grimberg {
106510407ec9SPotnuri Bharat Teja 	struct socket *sock = queue->sock;
106610407ec9SPotnuri Bharat Teja 	struct sock *sk = sock->sk;
10673f2304f8SSagi Grimberg 	read_descriptor_t rd_desc;
10683f2304f8SSagi Grimberg 	int consumed;
10693f2304f8SSagi Grimberg 
10703f2304f8SSagi Grimberg 	rd_desc.arg.data = queue;
10713f2304f8SSagi Grimberg 	rd_desc.count = 1;
10723f2304f8SSagi Grimberg 	lock_sock(sk);
10731a9460ceSSagi Grimberg 	queue->nr_cqe = 0;
107410407ec9SPotnuri Bharat Teja 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
10753f2304f8SSagi Grimberg 	release_sock(sk);
10763f2304f8SSagi Grimberg 	return consumed;
10773f2304f8SSagi Grimberg }
10783f2304f8SSagi Grimberg 
10793f2304f8SSagi Grimberg static void nvme_tcp_io_work(struct work_struct *w)
10803f2304f8SSagi Grimberg {
10813f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue =
10823f2304f8SSagi Grimberg 		container_of(w, struct nvme_tcp_queue, io_work);
1083ddef2957SWunderlich, Mark 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
10843f2304f8SSagi Grimberg 
10853f2304f8SSagi Grimberg 	do {
10863f2304f8SSagi Grimberg 		bool pending = false;
10873f2304f8SSagi Grimberg 		int result;
10883f2304f8SSagi Grimberg 
1089db5ad6b7SSagi Grimberg 		if (mutex_trylock(&queue->send_mutex)) {
10903f2304f8SSagi Grimberg 			result = nvme_tcp_try_send(queue);
1091db5ad6b7SSagi Grimberg 			mutex_unlock(&queue->send_mutex);
10925ff4e112SSagi Grimberg 			if (result > 0)
10933f2304f8SSagi Grimberg 				pending = true;
10945ff4e112SSagi Grimberg 			else if (unlikely(result < 0))
10955ff4e112SSagi Grimberg 				break;
1096db5ad6b7SSagi Grimberg 		}
10973f2304f8SSagi Grimberg 
10983f2304f8SSagi Grimberg 		result = nvme_tcp_try_recv(queue);
10993f2304f8SSagi Grimberg 		if (result > 0)
11003f2304f8SSagi Grimberg 			pending = true;
1101761ad26cSSagi Grimberg 		else if (unlikely(result < 0))
110239d06079SSagi Grimberg 			return;
11033f2304f8SSagi Grimberg 
11043f2304f8SSagi Grimberg 		if (!pending)
11053f2304f8SSagi Grimberg 			return;
11063f2304f8SSagi Grimberg 
1107ddef2957SWunderlich, Mark 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
11083f2304f8SSagi Grimberg 
11093f2304f8SSagi Grimberg 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
11103f2304f8SSagi Grimberg }
11113f2304f8SSagi Grimberg 
11123f2304f8SSagi Grimberg static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
11133f2304f8SSagi Grimberg {
11143f2304f8SSagi Grimberg 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
11153f2304f8SSagi Grimberg 
11163f2304f8SSagi Grimberg 	ahash_request_free(queue->rcv_hash);
11173f2304f8SSagi Grimberg 	ahash_request_free(queue->snd_hash);
11183f2304f8SSagi Grimberg 	crypto_free_ahash(tfm);
11193f2304f8SSagi Grimberg }
11203f2304f8SSagi Grimberg 
11213f2304f8SSagi Grimberg static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
11223f2304f8SSagi Grimberg {
11233f2304f8SSagi Grimberg 	struct crypto_ahash *tfm;
11243f2304f8SSagi Grimberg 
11253f2304f8SSagi Grimberg 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
11263f2304f8SSagi Grimberg 	if (IS_ERR(tfm))
11273f2304f8SSagi Grimberg 		return PTR_ERR(tfm);
11283f2304f8SSagi Grimberg 
11293f2304f8SSagi Grimberg 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
11303f2304f8SSagi Grimberg 	if (!queue->snd_hash)
11313f2304f8SSagi Grimberg 		goto free_tfm;
11323f2304f8SSagi Grimberg 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
11333f2304f8SSagi Grimberg 
11343f2304f8SSagi Grimberg 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
11353f2304f8SSagi Grimberg 	if (!queue->rcv_hash)
11363f2304f8SSagi Grimberg 		goto free_snd_hash;
11373f2304f8SSagi Grimberg 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
11383f2304f8SSagi Grimberg 
11393f2304f8SSagi Grimberg 	return 0;
11403f2304f8SSagi Grimberg free_snd_hash:
11413f2304f8SSagi Grimberg 	ahash_request_free(queue->snd_hash);
11423f2304f8SSagi Grimberg free_tfm:
11433f2304f8SSagi Grimberg 	crypto_free_ahash(tfm);
11443f2304f8SSagi Grimberg 	return -ENOMEM;
11453f2304f8SSagi Grimberg }
11463f2304f8SSagi Grimberg 
11473f2304f8SSagi Grimberg static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
11483f2304f8SSagi Grimberg {
11493f2304f8SSagi Grimberg 	struct nvme_tcp_request *async = &ctrl->async_req;
11503f2304f8SSagi Grimberg 
11513f2304f8SSagi Grimberg 	page_frag_free(async->pdu);
11523f2304f8SSagi Grimberg }
11533f2304f8SSagi Grimberg 
11543f2304f8SSagi Grimberg static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
11553f2304f8SSagi Grimberg {
11563f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
11573f2304f8SSagi Grimberg 	struct nvme_tcp_request *async = &ctrl->async_req;
11583f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
11593f2304f8SSagi Grimberg 
11603f2304f8SSagi Grimberg 	async->pdu = page_frag_alloc(&queue->pf_cache,
11613f2304f8SSagi Grimberg 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
11623f2304f8SSagi Grimberg 		GFP_KERNEL | __GFP_ZERO);
11633f2304f8SSagi Grimberg 	if (!async->pdu)
11643f2304f8SSagi Grimberg 		return -ENOMEM;
11653f2304f8SSagi Grimberg 
11663f2304f8SSagi Grimberg 	async->queue = &ctrl->queues[0];
11673f2304f8SSagi Grimberg 	return 0;
11683f2304f8SSagi Grimberg }
11693f2304f8SSagi Grimberg 
11703f2304f8SSagi Grimberg static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
11713f2304f8SSagi Grimberg {
11723f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
11733f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
11743f2304f8SSagi Grimberg 
11753f2304f8SSagi Grimberg 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
11763f2304f8SSagi Grimberg 		return;
11773f2304f8SSagi Grimberg 
11783f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest)
11793f2304f8SSagi Grimberg 		nvme_tcp_free_crypto(queue);
11803f2304f8SSagi Grimberg 
11813f2304f8SSagi Grimberg 	sock_release(queue->sock);
11823f2304f8SSagi Grimberg 	kfree(queue->pdu);
11833f2304f8SSagi Grimberg }
11843f2304f8SSagi Grimberg 
11853f2304f8SSagi Grimberg static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
11863f2304f8SSagi Grimberg {
11873f2304f8SSagi Grimberg 	struct nvme_tcp_icreq_pdu *icreq;
11883f2304f8SSagi Grimberg 	struct nvme_tcp_icresp_pdu *icresp;
11893f2304f8SSagi Grimberg 	struct msghdr msg = {};
11903f2304f8SSagi Grimberg 	struct kvec iov;
11913f2304f8SSagi Grimberg 	bool ctrl_hdgst, ctrl_ddgst;
11923f2304f8SSagi Grimberg 	int ret;
11933f2304f8SSagi Grimberg 
11943f2304f8SSagi Grimberg 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
11953f2304f8SSagi Grimberg 	if (!icreq)
11963f2304f8SSagi Grimberg 		return -ENOMEM;
11973f2304f8SSagi Grimberg 
11983f2304f8SSagi Grimberg 	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
11993f2304f8SSagi Grimberg 	if (!icresp) {
12003f2304f8SSagi Grimberg 		ret = -ENOMEM;
12013f2304f8SSagi Grimberg 		goto free_icreq;
12023f2304f8SSagi Grimberg 	}
12033f2304f8SSagi Grimberg 
12043f2304f8SSagi Grimberg 	icreq->hdr.type = nvme_tcp_icreq;
12053f2304f8SSagi Grimberg 	icreq->hdr.hlen = sizeof(*icreq);
12063f2304f8SSagi Grimberg 	icreq->hdr.pdo = 0;
12073f2304f8SSagi Grimberg 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
12083f2304f8SSagi Grimberg 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
12093f2304f8SSagi Grimberg 	icreq->maxr2t = 0; /* single inflight r2t supported */
12103f2304f8SSagi Grimberg 	icreq->hpda = 0; /* no alignment constraint */
12113f2304f8SSagi Grimberg 	if (queue->hdr_digest)
12123f2304f8SSagi Grimberg 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
12133f2304f8SSagi Grimberg 	if (queue->data_digest)
12143f2304f8SSagi Grimberg 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
12153f2304f8SSagi Grimberg 
12163f2304f8SSagi Grimberg 	iov.iov_base = icreq;
12173f2304f8SSagi Grimberg 	iov.iov_len = sizeof(*icreq);
12183f2304f8SSagi Grimberg 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
12193f2304f8SSagi Grimberg 	if (ret < 0)
12203f2304f8SSagi Grimberg 		goto free_icresp;
12213f2304f8SSagi Grimberg 
12223f2304f8SSagi Grimberg 	memset(&msg, 0, sizeof(msg));
12233f2304f8SSagi Grimberg 	iov.iov_base = icresp;
12243f2304f8SSagi Grimberg 	iov.iov_len = sizeof(*icresp);
12253f2304f8SSagi Grimberg 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
12263f2304f8SSagi Grimberg 			iov.iov_len, msg.msg_flags);
12273f2304f8SSagi Grimberg 	if (ret < 0)
12283f2304f8SSagi Grimberg 		goto free_icresp;
12293f2304f8SSagi Grimberg 
12303f2304f8SSagi Grimberg 	ret = -EINVAL;
12313f2304f8SSagi Grimberg 	if (icresp->hdr.type != nvme_tcp_icresp) {
12323f2304f8SSagi Grimberg 		pr_err("queue %d: bad type returned %d\n",
12333f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->hdr.type);
12343f2304f8SSagi Grimberg 		goto free_icresp;
12353f2304f8SSagi Grimberg 	}
12363f2304f8SSagi Grimberg 
12373f2304f8SSagi Grimberg 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
12383f2304f8SSagi Grimberg 		pr_err("queue %d: bad pdu length returned %d\n",
12393f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
12403f2304f8SSagi Grimberg 		goto free_icresp;
12413f2304f8SSagi Grimberg 	}
12423f2304f8SSagi Grimberg 
12433f2304f8SSagi Grimberg 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
12443f2304f8SSagi Grimberg 		pr_err("queue %d: bad pfv returned %d\n",
12453f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->pfv);
12463f2304f8SSagi Grimberg 		goto free_icresp;
12473f2304f8SSagi Grimberg 	}
12483f2304f8SSagi Grimberg 
12493f2304f8SSagi Grimberg 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
12503f2304f8SSagi Grimberg 	if ((queue->data_digest && !ctrl_ddgst) ||
12513f2304f8SSagi Grimberg 	    (!queue->data_digest && ctrl_ddgst)) {
12523f2304f8SSagi Grimberg 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
12533f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue),
12543f2304f8SSagi Grimberg 			queue->data_digest ? "enabled" : "disabled",
12553f2304f8SSagi Grimberg 			ctrl_ddgst ? "enabled" : "disabled");
12563f2304f8SSagi Grimberg 		goto free_icresp;
12573f2304f8SSagi Grimberg 	}
12583f2304f8SSagi Grimberg 
12593f2304f8SSagi Grimberg 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
12603f2304f8SSagi Grimberg 	if ((queue->hdr_digest && !ctrl_hdgst) ||
12613f2304f8SSagi Grimberg 	    (!queue->hdr_digest && ctrl_hdgst)) {
12623f2304f8SSagi Grimberg 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
12633f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue),
12643f2304f8SSagi Grimberg 			queue->hdr_digest ? "enabled" : "disabled",
12653f2304f8SSagi Grimberg 			ctrl_hdgst ? "enabled" : "disabled");
12663f2304f8SSagi Grimberg 		goto free_icresp;
12673f2304f8SSagi Grimberg 	}
12683f2304f8SSagi Grimberg 
12693f2304f8SSagi Grimberg 	if (icresp->cpda != 0) {
12703f2304f8SSagi Grimberg 		pr_err("queue %d: unsupported cpda returned %d\n",
12713f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue), icresp->cpda);
12723f2304f8SSagi Grimberg 		goto free_icresp;
12733f2304f8SSagi Grimberg 	}
12743f2304f8SSagi Grimberg 
12753f2304f8SSagi Grimberg 	ret = 0;
12763f2304f8SSagi Grimberg free_icresp:
12773f2304f8SSagi Grimberg 	kfree(icresp);
12783f2304f8SSagi Grimberg free_icreq:
12793f2304f8SSagi Grimberg 	kfree(icreq);
12803f2304f8SSagi Grimberg 	return ret;
12813f2304f8SSagi Grimberg }
12823f2304f8SSagi Grimberg 
128340510a63SSagi Grimberg static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
128440510a63SSagi Grimberg {
128540510a63SSagi Grimberg 	return nvme_tcp_queue_id(queue) == 0;
128640510a63SSagi Grimberg }
128740510a63SSagi Grimberg 
128840510a63SSagi Grimberg static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
128940510a63SSagi Grimberg {
129040510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
129140510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
129240510a63SSagi Grimberg 
129340510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
129440510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
129540510a63SSagi Grimberg }
129640510a63SSagi Grimberg 
129740510a63SSagi Grimberg static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
129840510a63SSagi Grimberg {
129940510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
130040510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
130140510a63SSagi Grimberg 
130240510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
130340510a63SSagi Grimberg 		!nvme_tcp_default_queue(queue) &&
130440510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
130540510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_READ];
130640510a63SSagi Grimberg }
130740510a63SSagi Grimberg 
130840510a63SSagi Grimberg static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
130940510a63SSagi Grimberg {
131040510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
131140510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
131240510a63SSagi Grimberg 
131340510a63SSagi Grimberg 	return !nvme_tcp_admin_queue(queue) &&
131440510a63SSagi Grimberg 		!nvme_tcp_default_queue(queue) &&
131540510a63SSagi Grimberg 		!nvme_tcp_read_queue(queue) &&
131640510a63SSagi Grimberg 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
131740510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_READ] +
131840510a63SSagi Grimberg 			  ctrl->io_queues[HCTX_TYPE_POLL];
131940510a63SSagi Grimberg }
132040510a63SSagi Grimberg 
132140510a63SSagi Grimberg static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
132240510a63SSagi Grimberg {
132340510a63SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
132440510a63SSagi Grimberg 	int qid = nvme_tcp_queue_id(queue);
132540510a63SSagi Grimberg 	int n = 0;
132640510a63SSagi Grimberg 
132740510a63SSagi Grimberg 	if (nvme_tcp_default_queue(queue))
132840510a63SSagi Grimberg 		n = qid - 1;
132940510a63SSagi Grimberg 	else if (nvme_tcp_read_queue(queue))
133040510a63SSagi Grimberg 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
133140510a63SSagi Grimberg 	else if (nvme_tcp_poll_queue(queue))
133240510a63SSagi Grimberg 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
133340510a63SSagi Grimberg 				ctrl->io_queues[HCTX_TYPE_READ] - 1;
133440510a63SSagi Grimberg 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
133540510a63SSagi Grimberg }
133640510a63SSagi Grimberg 
13373f2304f8SSagi Grimberg static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
13383f2304f8SSagi Grimberg 		int qid, size_t queue_size)
13393f2304f8SSagi Grimberg {
13403f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
13413f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
13426ebf71baSChristoph Hellwig 	int ret, rcv_pdu_size;
13433f2304f8SSagi Grimberg 
13443f2304f8SSagi Grimberg 	queue->ctrl = ctrl;
13453f2304f8SSagi Grimberg 	INIT_LIST_HEAD(&queue->send_list);
13463f2304f8SSagi Grimberg 	spin_lock_init(&queue->lock);
1347db5ad6b7SSagi Grimberg 	mutex_init(&queue->send_mutex);
13483f2304f8SSagi Grimberg 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
13493f2304f8SSagi Grimberg 	queue->queue_size = queue_size;
13503f2304f8SSagi Grimberg 
13513f2304f8SSagi Grimberg 	if (qid > 0)
13529924b030SIsrael Rukshin 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
13533f2304f8SSagi Grimberg 	else
13543f2304f8SSagi Grimberg 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
13553f2304f8SSagi Grimberg 						NVME_TCP_ADMIN_CCSZ;
13563f2304f8SSagi Grimberg 
13573f2304f8SSagi Grimberg 	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
13583f2304f8SSagi Grimberg 			IPPROTO_TCP, &queue->sock);
13593f2304f8SSagi Grimberg 	if (ret) {
13609924b030SIsrael Rukshin 		dev_err(nctrl->device,
13613f2304f8SSagi Grimberg 			"failed to create socket: %d\n", ret);
13623f2304f8SSagi Grimberg 		return ret;
13633f2304f8SSagi Grimberg 	}
13643f2304f8SSagi Grimberg 
13653f2304f8SSagi Grimberg 	/* Single syn retry */
1366557eadfcSChristoph Hellwig 	tcp_sock_set_syncnt(queue->sock->sk, 1);
13673f2304f8SSagi Grimberg 
13683f2304f8SSagi Grimberg 	/* Set TCP no delay */
136912abc5eeSChristoph Hellwig 	tcp_sock_set_nodelay(queue->sock->sk);
13703f2304f8SSagi Grimberg 
13713f2304f8SSagi Grimberg 	/*
13723f2304f8SSagi Grimberg 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
13733f2304f8SSagi Grimberg 	 * close. This is done to prevent stale data from being sent should
13743f2304f8SSagi Grimberg 	 * the network connection be restored before TCP times out.
13753f2304f8SSagi Grimberg 	 */
1376c433594cSChristoph Hellwig 	sock_no_linger(queue->sock->sk);
13773f2304f8SSagi Grimberg 
13786e434967SChristoph Hellwig 	if (so_priority > 0)
13796e434967SChristoph Hellwig 		sock_set_priority(queue->sock->sk, so_priority);
13809912ade3SWunderlich, Mark 
1381bb13985dSIsrael Rukshin 	/* Set socket type of service */
13826ebf71baSChristoph Hellwig 	if (nctrl->opts->tos >= 0)
13836ebf71baSChristoph Hellwig 		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1384bb13985dSIsrael Rukshin 
1385adc99fd3SSagi Grimberg 	/* Set 10 seconds timeout for icresp recvmsg */
1386adc99fd3SSagi Grimberg 	queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1387adc99fd3SSagi Grimberg 
13883f2304f8SSagi Grimberg 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
138940510a63SSagi Grimberg 	nvme_tcp_set_queue_io_cpu(queue);
13903f2304f8SSagi Grimberg 	queue->request = NULL;
13913f2304f8SSagi Grimberg 	queue->data_remaining = 0;
13923f2304f8SSagi Grimberg 	queue->ddgst_remaining = 0;
13933f2304f8SSagi Grimberg 	queue->pdu_remaining = 0;
13943f2304f8SSagi Grimberg 	queue->pdu_offset = 0;
13953f2304f8SSagi Grimberg 	sk_set_memalloc(queue->sock->sk);
13963f2304f8SSagi Grimberg 
13979924b030SIsrael Rukshin 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
13983f2304f8SSagi Grimberg 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
13993f2304f8SSagi Grimberg 			sizeof(ctrl->src_addr));
14003f2304f8SSagi Grimberg 		if (ret) {
14019924b030SIsrael Rukshin 			dev_err(nctrl->device,
14023f2304f8SSagi Grimberg 				"failed to bind queue %d socket %d\n",
14033f2304f8SSagi Grimberg 				qid, ret);
14043f2304f8SSagi Grimberg 			goto err_sock;
14053f2304f8SSagi Grimberg 		}
14063f2304f8SSagi Grimberg 	}
14073f2304f8SSagi Grimberg 
14083f2304f8SSagi Grimberg 	queue->hdr_digest = nctrl->opts->hdr_digest;
14093f2304f8SSagi Grimberg 	queue->data_digest = nctrl->opts->data_digest;
14103f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest) {
14113f2304f8SSagi Grimberg 		ret = nvme_tcp_alloc_crypto(queue);
14123f2304f8SSagi Grimberg 		if (ret) {
14139924b030SIsrael Rukshin 			dev_err(nctrl->device,
14143f2304f8SSagi Grimberg 				"failed to allocate queue %d crypto\n", qid);
14153f2304f8SSagi Grimberg 			goto err_sock;
14163f2304f8SSagi Grimberg 		}
14173f2304f8SSagi Grimberg 	}
14183f2304f8SSagi Grimberg 
14193f2304f8SSagi Grimberg 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
14203f2304f8SSagi Grimberg 			nvme_tcp_hdgst_len(queue);
14213f2304f8SSagi Grimberg 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
14223f2304f8SSagi Grimberg 	if (!queue->pdu) {
14233f2304f8SSagi Grimberg 		ret = -ENOMEM;
14243f2304f8SSagi Grimberg 		goto err_crypto;
14253f2304f8SSagi Grimberg 	}
14263f2304f8SSagi Grimberg 
14279924b030SIsrael Rukshin 	dev_dbg(nctrl->device, "connecting queue %d\n",
14283f2304f8SSagi Grimberg 			nvme_tcp_queue_id(queue));
14293f2304f8SSagi Grimberg 
14303f2304f8SSagi Grimberg 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
14313f2304f8SSagi Grimberg 		sizeof(ctrl->addr), 0);
14323f2304f8SSagi Grimberg 	if (ret) {
14339924b030SIsrael Rukshin 		dev_err(nctrl->device,
14343f2304f8SSagi Grimberg 			"failed to connect socket: %d\n", ret);
14353f2304f8SSagi Grimberg 		goto err_rcv_pdu;
14363f2304f8SSagi Grimberg 	}
14373f2304f8SSagi Grimberg 
14383f2304f8SSagi Grimberg 	ret = nvme_tcp_init_connection(queue);
14393f2304f8SSagi Grimberg 	if (ret)
14403f2304f8SSagi Grimberg 		goto err_init_connect;
14413f2304f8SSagi Grimberg 
14423f2304f8SSagi Grimberg 	queue->rd_enabled = true;
14433f2304f8SSagi Grimberg 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
14443f2304f8SSagi Grimberg 	nvme_tcp_init_recv_ctx(queue);
14453f2304f8SSagi Grimberg 
14463f2304f8SSagi Grimberg 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
14473f2304f8SSagi Grimberg 	queue->sock->sk->sk_user_data = queue;
14483f2304f8SSagi Grimberg 	queue->state_change = queue->sock->sk->sk_state_change;
14493f2304f8SSagi Grimberg 	queue->data_ready = queue->sock->sk->sk_data_ready;
14503f2304f8SSagi Grimberg 	queue->write_space = queue->sock->sk->sk_write_space;
14513f2304f8SSagi Grimberg 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
14523f2304f8SSagi Grimberg 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
14533f2304f8SSagi Grimberg 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1454ac1c4e18SSebastian Andrzej Siewior #ifdef CONFIG_NET_RX_BUSY_POLL
14551a9460ceSSagi Grimberg 	queue->sock->sk->sk_ll_usec = 1;
1456ac1c4e18SSebastian Andrzej Siewior #endif
14573f2304f8SSagi Grimberg 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
14583f2304f8SSagi Grimberg 
14593f2304f8SSagi Grimberg 	return 0;
14603f2304f8SSagi Grimberg 
14613f2304f8SSagi Grimberg err_init_connect:
14623f2304f8SSagi Grimberg 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
14633f2304f8SSagi Grimberg err_rcv_pdu:
14643f2304f8SSagi Grimberg 	kfree(queue->pdu);
14653f2304f8SSagi Grimberg err_crypto:
14663f2304f8SSagi Grimberg 	if (queue->hdr_digest || queue->data_digest)
14673f2304f8SSagi Grimberg 		nvme_tcp_free_crypto(queue);
14683f2304f8SSagi Grimberg err_sock:
14693f2304f8SSagi Grimberg 	sock_release(queue->sock);
14703f2304f8SSagi Grimberg 	queue->sock = NULL;
14713f2304f8SSagi Grimberg 	return ret;
14723f2304f8SSagi Grimberg }
14733f2304f8SSagi Grimberg 
14743f2304f8SSagi Grimberg static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
14753f2304f8SSagi Grimberg {
14763f2304f8SSagi Grimberg 	struct socket *sock = queue->sock;
14773f2304f8SSagi Grimberg 
14783f2304f8SSagi Grimberg 	write_lock_bh(&sock->sk->sk_callback_lock);
14793f2304f8SSagi Grimberg 	sock->sk->sk_user_data  = NULL;
14803f2304f8SSagi Grimberg 	sock->sk->sk_data_ready = queue->data_ready;
14813f2304f8SSagi Grimberg 	sock->sk->sk_state_change = queue->state_change;
14823f2304f8SSagi Grimberg 	sock->sk->sk_write_space  = queue->write_space;
14833f2304f8SSagi Grimberg 	write_unlock_bh(&sock->sk->sk_callback_lock);
14843f2304f8SSagi Grimberg }
14853f2304f8SSagi Grimberg 
14863f2304f8SSagi Grimberg static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
14873f2304f8SSagi Grimberg {
14883f2304f8SSagi Grimberg 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
14893f2304f8SSagi Grimberg 	nvme_tcp_restore_sock_calls(queue);
14903f2304f8SSagi Grimberg 	cancel_work_sync(&queue->io_work);
14913f2304f8SSagi Grimberg }
14923f2304f8SSagi Grimberg 
14933f2304f8SSagi Grimberg static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
14943f2304f8SSagi Grimberg {
14953f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
14963f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
14973f2304f8SSagi Grimberg 
14983f2304f8SSagi Grimberg 	if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
14993f2304f8SSagi Grimberg 		return;
15003f2304f8SSagi Grimberg 
15013f2304f8SSagi Grimberg 	__nvme_tcp_stop_queue(queue);
15023f2304f8SSagi Grimberg }
15033f2304f8SSagi Grimberg 
15043f2304f8SSagi Grimberg static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
15053f2304f8SSagi Grimberg {
15063f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15073f2304f8SSagi Grimberg 	int ret;
15083f2304f8SSagi Grimberg 
15093f2304f8SSagi Grimberg 	if (idx)
151026c68227SSagi Grimberg 		ret = nvmf_connect_io_queue(nctrl, idx, false);
15113f2304f8SSagi Grimberg 	else
15123f2304f8SSagi Grimberg 		ret = nvmf_connect_admin_queue(nctrl);
15133f2304f8SSagi Grimberg 
15143f2304f8SSagi Grimberg 	if (!ret) {
15153f2304f8SSagi Grimberg 		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
15163f2304f8SSagi Grimberg 	} else {
1517f34e2589SSagi Grimberg 		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
15183f2304f8SSagi Grimberg 			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
15193f2304f8SSagi Grimberg 		dev_err(nctrl->device,
15203f2304f8SSagi Grimberg 			"failed to connect queue: %d ret=%d\n", idx, ret);
15213f2304f8SSagi Grimberg 	}
15223f2304f8SSagi Grimberg 	return ret;
15233f2304f8SSagi Grimberg }
15243f2304f8SSagi Grimberg 
15253f2304f8SSagi Grimberg static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
15263f2304f8SSagi Grimberg 		bool admin)
15273f2304f8SSagi Grimberg {
15283f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15293f2304f8SSagi Grimberg 	struct blk_mq_tag_set *set;
15303f2304f8SSagi Grimberg 	int ret;
15313f2304f8SSagi Grimberg 
15323f2304f8SSagi Grimberg 	if (admin) {
15333f2304f8SSagi Grimberg 		set = &ctrl->admin_tag_set;
15343f2304f8SSagi Grimberg 		memset(set, 0, sizeof(*set));
15353f2304f8SSagi Grimberg 		set->ops = &nvme_tcp_admin_mq_ops;
15363f2304f8SSagi Grimberg 		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
15373f2304f8SSagi Grimberg 		set->reserved_tags = 2; /* connect + keep-alive */
1538610c8235SMax Gurtovoy 		set->numa_node = nctrl->numa_node;
1539db5ad6b7SSagi Grimberg 		set->flags = BLK_MQ_F_BLOCKING;
15403f2304f8SSagi Grimberg 		set->cmd_size = sizeof(struct nvme_tcp_request);
15413f2304f8SSagi Grimberg 		set->driver_data = ctrl;
15423f2304f8SSagi Grimberg 		set->nr_hw_queues = 1;
15433f2304f8SSagi Grimberg 		set->timeout = ADMIN_TIMEOUT;
15443f2304f8SSagi Grimberg 	} else {
15453f2304f8SSagi Grimberg 		set = &ctrl->tag_set;
15463f2304f8SSagi Grimberg 		memset(set, 0, sizeof(*set));
15473f2304f8SSagi Grimberg 		set->ops = &nvme_tcp_mq_ops;
15483f2304f8SSagi Grimberg 		set->queue_depth = nctrl->sqsize + 1;
15493f2304f8SSagi Grimberg 		set->reserved_tags = 1; /* fabric connect */
1550610c8235SMax Gurtovoy 		set->numa_node = nctrl->numa_node;
1551db5ad6b7SSagi Grimberg 		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
15523f2304f8SSagi Grimberg 		set->cmd_size = sizeof(struct nvme_tcp_request);
15533f2304f8SSagi Grimberg 		set->driver_data = ctrl;
15543f2304f8SSagi Grimberg 		set->nr_hw_queues = nctrl->queue_count - 1;
15553f2304f8SSagi Grimberg 		set->timeout = NVME_IO_TIMEOUT;
15561a9460ceSSagi Grimberg 		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
15573f2304f8SSagi Grimberg 	}
15583f2304f8SSagi Grimberg 
15593f2304f8SSagi Grimberg 	ret = blk_mq_alloc_tag_set(set);
15603f2304f8SSagi Grimberg 	if (ret)
15613f2304f8SSagi Grimberg 		return ERR_PTR(ret);
15623f2304f8SSagi Grimberg 
15633f2304f8SSagi Grimberg 	return set;
15643f2304f8SSagi Grimberg }
15653f2304f8SSagi Grimberg 
15663f2304f8SSagi Grimberg static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
15673f2304f8SSagi Grimberg {
15683f2304f8SSagi Grimberg 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
15693f2304f8SSagi Grimberg 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
15703f2304f8SSagi Grimberg 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
15713f2304f8SSagi Grimberg 	}
15723f2304f8SSagi Grimberg 
15733f2304f8SSagi Grimberg 	nvme_tcp_free_queue(ctrl, 0);
15743f2304f8SSagi Grimberg }
15753f2304f8SSagi Grimberg 
15763f2304f8SSagi Grimberg static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
15773f2304f8SSagi Grimberg {
15783f2304f8SSagi Grimberg 	int i;
15793f2304f8SSagi Grimberg 
15803f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++)
15813f2304f8SSagi Grimberg 		nvme_tcp_free_queue(ctrl, i);
15823f2304f8SSagi Grimberg }
15833f2304f8SSagi Grimberg 
15843f2304f8SSagi Grimberg static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
15853f2304f8SSagi Grimberg {
15863f2304f8SSagi Grimberg 	int i;
15873f2304f8SSagi Grimberg 
15883f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++)
15893f2304f8SSagi Grimberg 		nvme_tcp_stop_queue(ctrl, i);
15903f2304f8SSagi Grimberg }
15913f2304f8SSagi Grimberg 
15923f2304f8SSagi Grimberg static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
15933f2304f8SSagi Grimberg {
15943f2304f8SSagi Grimberg 	int i, ret = 0;
15953f2304f8SSagi Grimberg 
15963f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++) {
15973f2304f8SSagi Grimberg 		ret = nvme_tcp_start_queue(ctrl, i);
15983f2304f8SSagi Grimberg 		if (ret)
15993f2304f8SSagi Grimberg 			goto out_stop_queues;
16003f2304f8SSagi Grimberg 	}
16013f2304f8SSagi Grimberg 
16023f2304f8SSagi Grimberg 	return 0;
16033f2304f8SSagi Grimberg 
16043f2304f8SSagi Grimberg out_stop_queues:
16053f2304f8SSagi Grimberg 	for (i--; i >= 1; i--)
16063f2304f8SSagi Grimberg 		nvme_tcp_stop_queue(ctrl, i);
16073f2304f8SSagi Grimberg 	return ret;
16083f2304f8SSagi Grimberg }
16093f2304f8SSagi Grimberg 
16103f2304f8SSagi Grimberg static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
16113f2304f8SSagi Grimberg {
16123f2304f8SSagi Grimberg 	int ret;
16133f2304f8SSagi Grimberg 
16143f2304f8SSagi Grimberg 	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
16153f2304f8SSagi Grimberg 	if (ret)
16163f2304f8SSagi Grimberg 		return ret;
16173f2304f8SSagi Grimberg 
16183f2304f8SSagi Grimberg 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
16193f2304f8SSagi Grimberg 	if (ret)
16203f2304f8SSagi Grimberg 		goto out_free_queue;
16213f2304f8SSagi Grimberg 
16223f2304f8SSagi Grimberg 	return 0;
16233f2304f8SSagi Grimberg 
16243f2304f8SSagi Grimberg out_free_queue:
16253f2304f8SSagi Grimberg 	nvme_tcp_free_queue(ctrl, 0);
16263f2304f8SSagi Grimberg 	return ret;
16273f2304f8SSagi Grimberg }
16283f2304f8SSagi Grimberg 
1629efb973b1SSagi Grimberg static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
16303f2304f8SSagi Grimberg {
16313f2304f8SSagi Grimberg 	int i, ret;
16323f2304f8SSagi Grimberg 
16333f2304f8SSagi Grimberg 	for (i = 1; i < ctrl->queue_count; i++) {
16343f2304f8SSagi Grimberg 		ret = nvme_tcp_alloc_queue(ctrl, i,
16353f2304f8SSagi Grimberg 				ctrl->sqsize + 1);
16363f2304f8SSagi Grimberg 		if (ret)
16373f2304f8SSagi Grimberg 			goto out_free_queues;
16383f2304f8SSagi Grimberg 	}
16393f2304f8SSagi Grimberg 
16403f2304f8SSagi Grimberg 	return 0;
16413f2304f8SSagi Grimberg 
16423f2304f8SSagi Grimberg out_free_queues:
16433f2304f8SSagi Grimberg 	for (i--; i >= 1; i--)
16443f2304f8SSagi Grimberg 		nvme_tcp_free_queue(ctrl, i);
16453f2304f8SSagi Grimberg 
16463f2304f8SSagi Grimberg 	return ret;
16473f2304f8SSagi Grimberg }
16483f2304f8SSagi Grimberg 
16493f2304f8SSagi Grimberg static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
16503f2304f8SSagi Grimberg {
1651873946f4SSagi Grimberg 	unsigned int nr_io_queues;
1652873946f4SSagi Grimberg 
1653873946f4SSagi Grimberg 	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1654873946f4SSagi Grimberg 	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
16551a9460ceSSagi Grimberg 	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1656873946f4SSagi Grimberg 
1657873946f4SSagi Grimberg 	return nr_io_queues;
16583f2304f8SSagi Grimberg }
16593f2304f8SSagi Grimberg 
166064861993SSagi Grimberg static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
166164861993SSagi Grimberg 		unsigned int nr_io_queues)
166264861993SSagi Grimberg {
166364861993SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
166464861993SSagi Grimberg 	struct nvmf_ctrl_options *opts = nctrl->opts;
166564861993SSagi Grimberg 
166664861993SSagi Grimberg 	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
166764861993SSagi Grimberg 		/*
166864861993SSagi Grimberg 		 * separate read/write queues
166964861993SSagi Grimberg 		 * hand out dedicated default queues only after we have
167064861993SSagi Grimberg 		 * sufficient read queues.
167164861993SSagi Grimberg 		 */
167264861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
167364861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
167464861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
167564861993SSagi Grimberg 			min(opts->nr_write_queues, nr_io_queues);
167664861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
167764861993SSagi Grimberg 	} else {
167864861993SSagi Grimberg 		/*
167964861993SSagi Grimberg 		 * shared read/write queues
168064861993SSagi Grimberg 		 * either no write queues were requested, or we don't have
168164861993SSagi Grimberg 		 * sufficient queue count to have dedicated default queues.
168264861993SSagi Grimberg 		 */
168364861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
168464861993SSagi Grimberg 			min(opts->nr_io_queues, nr_io_queues);
168564861993SSagi Grimberg 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
168664861993SSagi Grimberg 	}
16871a9460ceSSagi Grimberg 
16881a9460ceSSagi Grimberg 	if (opts->nr_poll_queues && nr_io_queues) {
16891a9460ceSSagi Grimberg 		/* map dedicated poll queues only if we have queues left */
16901a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_POLL] =
16911a9460ceSSagi Grimberg 			min(opts->nr_poll_queues, nr_io_queues);
16921a9460ceSSagi Grimberg 	}
169364861993SSagi Grimberg }
169464861993SSagi Grimberg 
1695efb973b1SSagi Grimberg static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
16963f2304f8SSagi Grimberg {
16973f2304f8SSagi Grimberg 	unsigned int nr_io_queues;
16983f2304f8SSagi Grimberg 	int ret;
16993f2304f8SSagi Grimberg 
17003f2304f8SSagi Grimberg 	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
17013f2304f8SSagi Grimberg 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
17023f2304f8SSagi Grimberg 	if (ret)
17033f2304f8SSagi Grimberg 		return ret;
17043f2304f8SSagi Grimberg 
17053f2304f8SSagi Grimberg 	ctrl->queue_count = nr_io_queues + 1;
17063f2304f8SSagi Grimberg 	if (ctrl->queue_count < 2)
17073f2304f8SSagi Grimberg 		return 0;
17083f2304f8SSagi Grimberg 
17093f2304f8SSagi Grimberg 	dev_info(ctrl->device,
17103f2304f8SSagi Grimberg 		"creating %d I/O queues.\n", nr_io_queues);
17113f2304f8SSagi Grimberg 
171264861993SSagi Grimberg 	nvme_tcp_set_io_queues(ctrl, nr_io_queues);
171364861993SSagi Grimberg 
1714efb973b1SSagi Grimberg 	return __nvme_tcp_alloc_io_queues(ctrl);
17153f2304f8SSagi Grimberg }
17163f2304f8SSagi Grimberg 
17173f2304f8SSagi Grimberg static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
17183f2304f8SSagi Grimberg {
17193f2304f8SSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
17203f2304f8SSagi Grimberg 	if (remove) {
17213f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->connect_q);
17223f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->tagset);
17233f2304f8SSagi Grimberg 	}
17243f2304f8SSagi Grimberg 	nvme_tcp_free_io_queues(ctrl);
17253f2304f8SSagi Grimberg }
17263f2304f8SSagi Grimberg 
17273f2304f8SSagi Grimberg static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
17283f2304f8SSagi Grimberg {
17293f2304f8SSagi Grimberg 	int ret;
17303f2304f8SSagi Grimberg 
1731efb973b1SSagi Grimberg 	ret = nvme_tcp_alloc_io_queues(ctrl);
17323f2304f8SSagi Grimberg 	if (ret)
17333f2304f8SSagi Grimberg 		return ret;
17343f2304f8SSagi Grimberg 
17353f2304f8SSagi Grimberg 	if (new) {
17363f2304f8SSagi Grimberg 		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
17373f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->tagset)) {
17383f2304f8SSagi Grimberg 			ret = PTR_ERR(ctrl->tagset);
17393f2304f8SSagi Grimberg 			goto out_free_io_queues;
17403f2304f8SSagi Grimberg 		}
17413f2304f8SSagi Grimberg 
17423f2304f8SSagi Grimberg 		ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
17433f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->connect_q)) {
17443f2304f8SSagi Grimberg 			ret = PTR_ERR(ctrl->connect_q);
17453f2304f8SSagi Grimberg 			goto out_free_tag_set;
17463f2304f8SSagi Grimberg 		}
17473f2304f8SSagi Grimberg 	} else {
17483f2304f8SSagi Grimberg 		blk_mq_update_nr_hw_queues(ctrl->tagset,
17493f2304f8SSagi Grimberg 			ctrl->queue_count - 1);
17503f2304f8SSagi Grimberg 	}
17513f2304f8SSagi Grimberg 
17523f2304f8SSagi Grimberg 	ret = nvme_tcp_start_io_queues(ctrl);
17533f2304f8SSagi Grimberg 	if (ret)
17543f2304f8SSagi Grimberg 		goto out_cleanup_connect_q;
17553f2304f8SSagi Grimberg 
17563f2304f8SSagi Grimberg 	return 0;
17573f2304f8SSagi Grimberg 
17583f2304f8SSagi Grimberg out_cleanup_connect_q:
1759e85037a2SSagi Grimberg 	if (new)
17603f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->connect_q);
17613f2304f8SSagi Grimberg out_free_tag_set:
17623f2304f8SSagi Grimberg 	if (new)
17633f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->tagset);
17643f2304f8SSagi Grimberg out_free_io_queues:
17653f2304f8SSagi Grimberg 	nvme_tcp_free_io_queues(ctrl);
17663f2304f8SSagi Grimberg 	return ret;
17673f2304f8SSagi Grimberg }
17683f2304f8SSagi Grimberg 
17693f2304f8SSagi Grimberg static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
17703f2304f8SSagi Grimberg {
17713f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
17723f2304f8SSagi Grimberg 	if (remove) {
17733f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->admin_q);
1774e7832cb4SSagi Grimberg 		blk_cleanup_queue(ctrl->fabrics_q);
17753f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->admin_tagset);
17763f2304f8SSagi Grimberg 	}
17773f2304f8SSagi Grimberg 	nvme_tcp_free_admin_queue(ctrl);
17783f2304f8SSagi Grimberg }
17793f2304f8SSagi Grimberg 
17803f2304f8SSagi Grimberg static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
17813f2304f8SSagi Grimberg {
17823f2304f8SSagi Grimberg 	int error;
17833f2304f8SSagi Grimberg 
17843f2304f8SSagi Grimberg 	error = nvme_tcp_alloc_admin_queue(ctrl);
17853f2304f8SSagi Grimberg 	if (error)
17863f2304f8SSagi Grimberg 		return error;
17873f2304f8SSagi Grimberg 
17883f2304f8SSagi Grimberg 	if (new) {
17893f2304f8SSagi Grimberg 		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
17903f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->admin_tagset)) {
17913f2304f8SSagi Grimberg 			error = PTR_ERR(ctrl->admin_tagset);
17923f2304f8SSagi Grimberg 			goto out_free_queue;
17933f2304f8SSagi Grimberg 		}
17943f2304f8SSagi Grimberg 
1795e7832cb4SSagi Grimberg 		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1796e7832cb4SSagi Grimberg 		if (IS_ERR(ctrl->fabrics_q)) {
1797e7832cb4SSagi Grimberg 			error = PTR_ERR(ctrl->fabrics_q);
1798e7832cb4SSagi Grimberg 			goto out_free_tagset;
1799e7832cb4SSagi Grimberg 		}
1800e7832cb4SSagi Grimberg 
18013f2304f8SSagi Grimberg 		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
18023f2304f8SSagi Grimberg 		if (IS_ERR(ctrl->admin_q)) {
18033f2304f8SSagi Grimberg 			error = PTR_ERR(ctrl->admin_q);
1804e7832cb4SSagi Grimberg 			goto out_cleanup_fabrics_q;
18053f2304f8SSagi Grimberg 		}
18063f2304f8SSagi Grimberg 	}
18073f2304f8SSagi Grimberg 
18083f2304f8SSagi Grimberg 	error = nvme_tcp_start_queue(ctrl, 0);
18093f2304f8SSagi Grimberg 	if (error)
18103f2304f8SSagi Grimberg 		goto out_cleanup_queue;
18113f2304f8SSagi Grimberg 
1812c0f2f45bSSagi Grimberg 	error = nvme_enable_ctrl(ctrl);
18133f2304f8SSagi Grimberg 	if (error)
18143f2304f8SSagi Grimberg 		goto out_stop_queue;
18153f2304f8SSagi Grimberg 
1816e7832cb4SSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->admin_q);
1817e7832cb4SSagi Grimberg 
18183f2304f8SSagi Grimberg 	error = nvme_init_identify(ctrl);
18193f2304f8SSagi Grimberg 	if (error)
18203f2304f8SSagi Grimberg 		goto out_stop_queue;
18213f2304f8SSagi Grimberg 
18223f2304f8SSagi Grimberg 	return 0;
18233f2304f8SSagi Grimberg 
18243f2304f8SSagi Grimberg out_stop_queue:
18253f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
18263f2304f8SSagi Grimberg out_cleanup_queue:
18273f2304f8SSagi Grimberg 	if (new)
18283f2304f8SSagi Grimberg 		blk_cleanup_queue(ctrl->admin_q);
1829e7832cb4SSagi Grimberg out_cleanup_fabrics_q:
1830e7832cb4SSagi Grimberg 	if (new)
1831e7832cb4SSagi Grimberg 		blk_cleanup_queue(ctrl->fabrics_q);
18323f2304f8SSagi Grimberg out_free_tagset:
18333f2304f8SSagi Grimberg 	if (new)
18343f2304f8SSagi Grimberg 		blk_mq_free_tag_set(ctrl->admin_tagset);
18353f2304f8SSagi Grimberg out_free_queue:
18363f2304f8SSagi Grimberg 	nvme_tcp_free_admin_queue(ctrl);
18373f2304f8SSagi Grimberg 	return error;
18383f2304f8SSagi Grimberg }
18393f2304f8SSagi Grimberg 
18403f2304f8SSagi Grimberg static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
18413f2304f8SSagi Grimberg 		bool remove)
18423f2304f8SSagi Grimberg {
18433f2304f8SSagi Grimberg 	blk_mq_quiesce_queue(ctrl->admin_q);
18443f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
1845622b8b68SMing Lei 	if (ctrl->admin_tagset) {
18467a425896SSagi Grimberg 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
18477a425896SSagi Grimberg 			nvme_cancel_request, ctrl);
1848622b8b68SMing Lei 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1849622b8b68SMing Lei 	}
1850e7832cb4SSagi Grimberg 	if (remove)
18513f2304f8SSagi Grimberg 		blk_mq_unquiesce_queue(ctrl->admin_q);
18523f2304f8SSagi Grimberg 	nvme_tcp_destroy_admin_queue(ctrl, remove);
18533f2304f8SSagi Grimberg }
18543f2304f8SSagi Grimberg 
18553f2304f8SSagi Grimberg static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
18563f2304f8SSagi Grimberg 		bool remove)
18573f2304f8SSagi Grimberg {
18583f2304f8SSagi Grimberg 	if (ctrl->queue_count <= 1)
18593f2304f8SSagi Grimberg 		return;
18603f2304f8SSagi Grimberg 	nvme_stop_queues(ctrl);
18613f2304f8SSagi Grimberg 	nvme_tcp_stop_io_queues(ctrl);
1862622b8b68SMing Lei 	if (ctrl->tagset) {
18637a425896SSagi Grimberg 		blk_mq_tagset_busy_iter(ctrl->tagset,
18647a425896SSagi Grimberg 			nvme_cancel_request, ctrl);
1865622b8b68SMing Lei 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
1866622b8b68SMing Lei 	}
18673f2304f8SSagi Grimberg 	if (remove)
18683f2304f8SSagi Grimberg 		nvme_start_queues(ctrl);
18693f2304f8SSagi Grimberg 	nvme_tcp_destroy_io_queues(ctrl, remove);
18703f2304f8SSagi Grimberg }
18713f2304f8SSagi Grimberg 
18723f2304f8SSagi Grimberg static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
18733f2304f8SSagi Grimberg {
18743f2304f8SSagi Grimberg 	/* If we are resetting/deleting then do nothing */
18753f2304f8SSagi Grimberg 	if (ctrl->state != NVME_CTRL_CONNECTING) {
18763f2304f8SSagi Grimberg 		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
18773f2304f8SSagi Grimberg 			ctrl->state == NVME_CTRL_LIVE);
18783f2304f8SSagi Grimberg 		return;
18793f2304f8SSagi Grimberg 	}
18803f2304f8SSagi Grimberg 
18813f2304f8SSagi Grimberg 	if (nvmf_should_reconnect(ctrl)) {
18823f2304f8SSagi Grimberg 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
18833f2304f8SSagi Grimberg 			ctrl->opts->reconnect_delay);
18843f2304f8SSagi Grimberg 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
18853f2304f8SSagi Grimberg 				ctrl->opts->reconnect_delay * HZ);
18863f2304f8SSagi Grimberg 	} else {
18873f2304f8SSagi Grimberg 		dev_info(ctrl->device, "Removing controller...\n");
18883f2304f8SSagi Grimberg 		nvme_delete_ctrl(ctrl);
18893f2304f8SSagi Grimberg 	}
18903f2304f8SSagi Grimberg }
18913f2304f8SSagi Grimberg 
18923f2304f8SSagi Grimberg static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
18933f2304f8SSagi Grimberg {
18943f2304f8SSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->opts;
1895312910f4SColin Ian King 	int ret;
18963f2304f8SSagi Grimberg 
18973f2304f8SSagi Grimberg 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
18983f2304f8SSagi Grimberg 	if (ret)
18993f2304f8SSagi Grimberg 		return ret;
19003f2304f8SSagi Grimberg 
19013f2304f8SSagi Grimberg 	if (ctrl->icdoff) {
19023f2304f8SSagi Grimberg 		dev_err(ctrl->device, "icdoff is not supported!\n");
19033f2304f8SSagi Grimberg 		goto destroy_admin;
19043f2304f8SSagi Grimberg 	}
19053f2304f8SSagi Grimberg 
19063f2304f8SSagi Grimberg 	if (opts->queue_size > ctrl->sqsize + 1)
19073f2304f8SSagi Grimberg 		dev_warn(ctrl->device,
19083f2304f8SSagi Grimberg 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
19093f2304f8SSagi Grimberg 			opts->queue_size, ctrl->sqsize + 1);
19103f2304f8SSagi Grimberg 
19113f2304f8SSagi Grimberg 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
19123f2304f8SSagi Grimberg 		dev_warn(ctrl->device,
19133f2304f8SSagi Grimberg 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
19143f2304f8SSagi Grimberg 			ctrl->sqsize + 1, ctrl->maxcmd);
19153f2304f8SSagi Grimberg 		ctrl->sqsize = ctrl->maxcmd - 1;
19163f2304f8SSagi Grimberg 	}
19173f2304f8SSagi Grimberg 
19183f2304f8SSagi Grimberg 	if (ctrl->queue_count > 1) {
19193f2304f8SSagi Grimberg 		ret = nvme_tcp_configure_io_queues(ctrl, new);
19203f2304f8SSagi Grimberg 		if (ret)
19213f2304f8SSagi Grimberg 			goto destroy_admin;
19223f2304f8SSagi Grimberg 	}
19233f2304f8SSagi Grimberg 
19243f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1925bea54ef5SIsrael Rukshin 		/*
1926bea54ef5SIsrael Rukshin 		 * state change failure is ok if we're in DELETING state,
1927bea54ef5SIsrael Rukshin 		 * unless we're during creation of a new controller to
1928bea54ef5SIsrael Rukshin 		 * avoid races with teardown flow.
1929bea54ef5SIsrael Rukshin 		 */
19303f2304f8SSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1931bea54ef5SIsrael Rukshin 		WARN_ON_ONCE(new);
19323f2304f8SSagi Grimberg 		ret = -EINVAL;
19333f2304f8SSagi Grimberg 		goto destroy_io;
19343f2304f8SSagi Grimberg 	}
19353f2304f8SSagi Grimberg 
19363f2304f8SSagi Grimberg 	nvme_start_ctrl(ctrl);
19373f2304f8SSagi Grimberg 	return 0;
19383f2304f8SSagi Grimberg 
19393f2304f8SSagi Grimberg destroy_io:
19403f2304f8SSagi Grimberg 	if (ctrl->queue_count > 1)
19413f2304f8SSagi Grimberg 		nvme_tcp_destroy_io_queues(ctrl, new);
19423f2304f8SSagi Grimberg destroy_admin:
19433f2304f8SSagi Grimberg 	nvme_tcp_stop_queue(ctrl, 0);
19443f2304f8SSagi Grimberg 	nvme_tcp_destroy_admin_queue(ctrl, new);
19453f2304f8SSagi Grimberg 	return ret;
19463f2304f8SSagi Grimberg }
19473f2304f8SSagi Grimberg 
19483f2304f8SSagi Grimberg static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
19493f2304f8SSagi Grimberg {
19503f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
19513f2304f8SSagi Grimberg 			struct nvme_tcp_ctrl, connect_work);
19523f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
19533f2304f8SSagi Grimberg 
19543f2304f8SSagi Grimberg 	++ctrl->nr_reconnects;
19553f2304f8SSagi Grimberg 
19563f2304f8SSagi Grimberg 	if (nvme_tcp_setup_ctrl(ctrl, false))
19573f2304f8SSagi Grimberg 		goto requeue;
19583f2304f8SSagi Grimberg 
195956a77d26SColin Ian King 	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
19603f2304f8SSagi Grimberg 			ctrl->nr_reconnects);
19613f2304f8SSagi Grimberg 
19623f2304f8SSagi Grimberg 	ctrl->nr_reconnects = 0;
19633f2304f8SSagi Grimberg 
19643f2304f8SSagi Grimberg 	return;
19653f2304f8SSagi Grimberg 
19663f2304f8SSagi Grimberg requeue:
19673f2304f8SSagi Grimberg 	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
19683f2304f8SSagi Grimberg 			ctrl->nr_reconnects);
19693f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
19703f2304f8SSagi Grimberg }
19713f2304f8SSagi Grimberg 
19723f2304f8SSagi Grimberg static void nvme_tcp_error_recovery_work(struct work_struct *work)
19733f2304f8SSagi Grimberg {
19743f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
19753f2304f8SSagi Grimberg 				struct nvme_tcp_ctrl, err_work);
19763f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
19773f2304f8SSagi Grimberg 
19783f2304f8SSagi Grimberg 	nvme_stop_keep_alive(ctrl);
19793f2304f8SSagi Grimberg 	nvme_tcp_teardown_io_queues(ctrl, false);
19803f2304f8SSagi Grimberg 	/* unquiesce to fail fast pending requests */
19813f2304f8SSagi Grimberg 	nvme_start_queues(ctrl);
19823f2304f8SSagi Grimberg 	nvme_tcp_teardown_admin_queue(ctrl, false);
1983e7832cb4SSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->admin_q);
19843f2304f8SSagi Grimberg 
19853f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
19863f2304f8SSagi Grimberg 		/* state change failure is ok if we're in DELETING state */
19873f2304f8SSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
19883f2304f8SSagi Grimberg 		return;
19893f2304f8SSagi Grimberg 	}
19903f2304f8SSagi Grimberg 
19913f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
19923f2304f8SSagi Grimberg }
19933f2304f8SSagi Grimberg 
19943f2304f8SSagi Grimberg static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
19953f2304f8SSagi Grimberg {
1996794a4cb3SSagi Grimberg 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1997794a4cb3SSagi Grimberg 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1998794a4cb3SSagi Grimberg 
19993f2304f8SSagi Grimberg 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
2000e7832cb4SSagi Grimberg 	blk_mq_quiesce_queue(ctrl->admin_q);
20013f2304f8SSagi Grimberg 	if (shutdown)
20023f2304f8SSagi Grimberg 		nvme_shutdown_ctrl(ctrl);
20033f2304f8SSagi Grimberg 	else
2004b5b05048SSagi Grimberg 		nvme_disable_ctrl(ctrl);
20053f2304f8SSagi Grimberg 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
20063f2304f8SSagi Grimberg }
20073f2304f8SSagi Grimberg 
20083f2304f8SSagi Grimberg static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
20093f2304f8SSagi Grimberg {
20103f2304f8SSagi Grimberg 	nvme_tcp_teardown_ctrl(ctrl, true);
20113f2304f8SSagi Grimberg }
20123f2304f8SSagi Grimberg 
20133f2304f8SSagi Grimberg static void nvme_reset_ctrl_work(struct work_struct *work)
20143f2304f8SSagi Grimberg {
20153f2304f8SSagi Grimberg 	struct nvme_ctrl *ctrl =
20163f2304f8SSagi Grimberg 		container_of(work, struct nvme_ctrl, reset_work);
20173f2304f8SSagi Grimberg 
20183f2304f8SSagi Grimberg 	nvme_stop_ctrl(ctrl);
20193f2304f8SSagi Grimberg 	nvme_tcp_teardown_ctrl(ctrl, false);
20203f2304f8SSagi Grimberg 
20213f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
20223f2304f8SSagi Grimberg 		/* state change failure is ok if we're in DELETING state */
20233f2304f8SSagi Grimberg 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
20243f2304f8SSagi Grimberg 		return;
20253f2304f8SSagi Grimberg 	}
20263f2304f8SSagi Grimberg 
20273f2304f8SSagi Grimberg 	if (nvme_tcp_setup_ctrl(ctrl, false))
20283f2304f8SSagi Grimberg 		goto out_fail;
20293f2304f8SSagi Grimberg 
20303f2304f8SSagi Grimberg 	return;
20313f2304f8SSagi Grimberg 
20323f2304f8SSagi Grimberg out_fail:
20333f2304f8SSagi Grimberg 	++ctrl->nr_reconnects;
20343f2304f8SSagi Grimberg 	nvme_tcp_reconnect_or_remove(ctrl);
20353f2304f8SSagi Grimberg }
20363f2304f8SSagi Grimberg 
20373f2304f8SSagi Grimberg static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
20383f2304f8SSagi Grimberg {
20393f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
20403f2304f8SSagi Grimberg 
20413f2304f8SSagi Grimberg 	if (list_empty(&ctrl->list))
20423f2304f8SSagi Grimberg 		goto free_ctrl;
20433f2304f8SSagi Grimberg 
20443f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
20453f2304f8SSagi Grimberg 	list_del(&ctrl->list);
20463f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
20473f2304f8SSagi Grimberg 
20483f2304f8SSagi Grimberg 	nvmf_free_options(nctrl->opts);
20493f2304f8SSagi Grimberg free_ctrl:
20503f2304f8SSagi Grimberg 	kfree(ctrl->queues);
20513f2304f8SSagi Grimberg 	kfree(ctrl);
20523f2304f8SSagi Grimberg }
20533f2304f8SSagi Grimberg 
20543f2304f8SSagi Grimberg static void nvme_tcp_set_sg_null(struct nvme_command *c)
20553f2304f8SSagi Grimberg {
20563f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
20573f2304f8SSagi Grimberg 
20583f2304f8SSagi Grimberg 	sg->addr = 0;
20593f2304f8SSagi Grimberg 	sg->length = 0;
20603f2304f8SSagi Grimberg 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
20613f2304f8SSagi Grimberg 			NVME_SGL_FMT_TRANSPORT_A;
20623f2304f8SSagi Grimberg }
20633f2304f8SSagi Grimberg 
20643f2304f8SSagi Grimberg static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
20653f2304f8SSagi Grimberg 		struct nvme_command *c, u32 data_len)
20663f2304f8SSagi Grimberg {
20673f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
20683f2304f8SSagi Grimberg 
20693f2304f8SSagi Grimberg 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
20703f2304f8SSagi Grimberg 	sg->length = cpu_to_le32(data_len);
20713f2304f8SSagi Grimberg 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
20723f2304f8SSagi Grimberg }
20733f2304f8SSagi Grimberg 
20743f2304f8SSagi Grimberg static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
20753f2304f8SSagi Grimberg 		u32 data_len)
20763f2304f8SSagi Grimberg {
20773f2304f8SSagi Grimberg 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
20783f2304f8SSagi Grimberg 
20793f2304f8SSagi Grimberg 	sg->addr = 0;
20803f2304f8SSagi Grimberg 	sg->length = cpu_to_le32(data_len);
20813f2304f8SSagi Grimberg 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
20823f2304f8SSagi Grimberg 			NVME_SGL_FMT_TRANSPORT_A;
20833f2304f8SSagi Grimberg }
20843f2304f8SSagi Grimberg 
20853f2304f8SSagi Grimberg static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
20863f2304f8SSagi Grimberg {
20873f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
20883f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
20893f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
20903f2304f8SSagi Grimberg 	struct nvme_command *cmd = &pdu->cmd;
20913f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue);
20923f2304f8SSagi Grimberg 
20933f2304f8SSagi Grimberg 	memset(pdu, 0, sizeof(*pdu));
20943f2304f8SSagi Grimberg 	pdu->hdr.type = nvme_tcp_cmd;
20953f2304f8SSagi Grimberg 	if (queue->hdr_digest)
20963f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
20973f2304f8SSagi Grimberg 	pdu->hdr.hlen = sizeof(*pdu);
20983f2304f8SSagi Grimberg 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
20993f2304f8SSagi Grimberg 
21003f2304f8SSagi Grimberg 	cmd->common.opcode = nvme_admin_async_event;
21013f2304f8SSagi Grimberg 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
21023f2304f8SSagi Grimberg 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
21033f2304f8SSagi Grimberg 	nvme_tcp_set_sg_null(cmd);
21043f2304f8SSagi Grimberg 
21053f2304f8SSagi Grimberg 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
21063f2304f8SSagi Grimberg 	ctrl->async_req.offset = 0;
21073f2304f8SSagi Grimberg 	ctrl->async_req.curr_bio = NULL;
21083f2304f8SSagi Grimberg 	ctrl->async_req.data_len = 0;
21093f2304f8SSagi Grimberg 
2110db5ad6b7SSagi Grimberg 	nvme_tcp_queue_request(&ctrl->async_req, true);
21113f2304f8SSagi Grimberg }
21123f2304f8SSagi Grimberg 
21133f2304f8SSagi Grimberg static enum blk_eh_timer_return
21143f2304f8SSagi Grimberg nvme_tcp_timeout(struct request *rq, bool reserved)
21153f2304f8SSagi Grimberg {
21163f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
21173f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
21183f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
21193f2304f8SSagi Grimberg 
212092b98e88SKeith Busch 	/*
212192b98e88SKeith Busch 	 * Restart the timer if a controller reset is already scheduled. Any
212292b98e88SKeith Busch 	 * timed out commands would be handled before entering the connecting
212392b98e88SKeith Busch 	 * state.
212492b98e88SKeith Busch 	 */
212592b98e88SKeith Busch 	if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
212692b98e88SKeith Busch 		return BLK_EH_RESET_TIMER;
212792b98e88SKeith Busch 
212839d57757SSagi Grimberg 	dev_warn(ctrl->ctrl.device,
21293f2304f8SSagi Grimberg 		"queue %d: timeout request %#x type %d\n",
213039d57757SSagi Grimberg 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
21313f2304f8SSagi Grimberg 
21323f2304f8SSagi Grimberg 	if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
213339d57757SSagi Grimberg 		/*
213439d57757SSagi Grimberg 		 * Teardown immediately if controller times out while starting
213539d57757SSagi Grimberg 		 * or we are already started error recovery. all outstanding
213639d57757SSagi Grimberg 		 * requests are completed on shutdown, so we return BLK_EH_DONE.
213739d57757SSagi Grimberg 		 */
213839d57757SSagi Grimberg 		flush_work(&ctrl->err_work);
213939d57757SSagi Grimberg 		nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
214039d57757SSagi Grimberg 		nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
21413f2304f8SSagi Grimberg 		return BLK_EH_DONE;
21423f2304f8SSagi Grimberg 	}
21433f2304f8SSagi Grimberg 
214439d57757SSagi Grimberg 	dev_warn(ctrl->ctrl.device, "starting error recovery\n");
21453f2304f8SSagi Grimberg 	nvme_tcp_error_recovery(&ctrl->ctrl);
21463f2304f8SSagi Grimberg 
21473f2304f8SSagi Grimberg 	return BLK_EH_RESET_TIMER;
21483f2304f8SSagi Grimberg }
21493f2304f8SSagi Grimberg 
21503f2304f8SSagi Grimberg static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
21513f2304f8SSagi Grimberg 			struct request *rq)
21523f2304f8SSagi Grimberg {
21533f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
21543f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
21553f2304f8SSagi Grimberg 	struct nvme_command *c = &pdu->cmd;
21563f2304f8SSagi Grimberg 
21573f2304f8SSagi Grimberg 	c->common.flags |= NVME_CMD_SGL_METABUF;
21583f2304f8SSagi Grimberg 
215925e5cb78SSagi Grimberg 	if (!blk_rq_nr_phys_segments(rq))
216025e5cb78SSagi Grimberg 		nvme_tcp_set_sg_null(c);
216125e5cb78SSagi Grimberg 	else if (rq_data_dir(rq) == WRITE &&
21623f2304f8SSagi Grimberg 	    req->data_len <= nvme_tcp_inline_data_size(queue))
21633f2304f8SSagi Grimberg 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
21643f2304f8SSagi Grimberg 	else
21653f2304f8SSagi Grimberg 		nvme_tcp_set_sg_host_data(c, req->data_len);
21663f2304f8SSagi Grimberg 
21673f2304f8SSagi Grimberg 	return 0;
21683f2304f8SSagi Grimberg }
21693f2304f8SSagi Grimberg 
21703f2304f8SSagi Grimberg static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
21713f2304f8SSagi Grimberg 		struct request *rq)
21723f2304f8SSagi Grimberg {
21733f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
21743f2304f8SSagi Grimberg 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
21753f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = req->queue;
21763f2304f8SSagi Grimberg 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
21773f2304f8SSagi Grimberg 	blk_status_t ret;
21783f2304f8SSagi Grimberg 
21793f2304f8SSagi Grimberg 	ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
21803f2304f8SSagi Grimberg 	if (ret)
21813f2304f8SSagi Grimberg 		return ret;
21823f2304f8SSagi Grimberg 
21833f2304f8SSagi Grimberg 	req->state = NVME_TCP_SEND_CMD_PDU;
21843f2304f8SSagi Grimberg 	req->offset = 0;
21853f2304f8SSagi Grimberg 	req->data_sent = 0;
21863f2304f8SSagi Grimberg 	req->pdu_len = 0;
21873f2304f8SSagi Grimberg 	req->pdu_sent = 0;
218825e5cb78SSagi Grimberg 	req->data_len = blk_rq_nr_phys_segments(rq) ?
218925e5cb78SSagi Grimberg 				blk_rq_payload_bytes(rq) : 0;
21903f2304f8SSagi Grimberg 	req->curr_bio = rq->bio;
21913f2304f8SSagi Grimberg 
21923f2304f8SSagi Grimberg 	if (rq_data_dir(rq) == WRITE &&
21933f2304f8SSagi Grimberg 	    req->data_len <= nvme_tcp_inline_data_size(queue))
21943f2304f8SSagi Grimberg 		req->pdu_len = req->data_len;
21953f2304f8SSagi Grimberg 	else if (req->curr_bio)
21963f2304f8SSagi Grimberg 		nvme_tcp_init_iter(req, READ);
21973f2304f8SSagi Grimberg 
21983f2304f8SSagi Grimberg 	pdu->hdr.type = nvme_tcp_cmd;
21993f2304f8SSagi Grimberg 	pdu->hdr.flags = 0;
22003f2304f8SSagi Grimberg 	if (queue->hdr_digest)
22013f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
22023f2304f8SSagi Grimberg 	if (queue->data_digest && req->pdu_len) {
22033f2304f8SSagi Grimberg 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
22043f2304f8SSagi Grimberg 		ddgst = nvme_tcp_ddgst_len(queue);
22053f2304f8SSagi Grimberg 	}
22063f2304f8SSagi Grimberg 	pdu->hdr.hlen = sizeof(*pdu);
22073f2304f8SSagi Grimberg 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
22083f2304f8SSagi Grimberg 	pdu->hdr.plen =
22093f2304f8SSagi Grimberg 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
22103f2304f8SSagi Grimberg 
22113f2304f8SSagi Grimberg 	ret = nvme_tcp_map_data(queue, rq);
22123f2304f8SSagi Grimberg 	if (unlikely(ret)) {
221328a4cac4SMax Gurtovoy 		nvme_cleanup_cmd(rq);
22143f2304f8SSagi Grimberg 		dev_err(queue->ctrl->ctrl.device,
22153f2304f8SSagi Grimberg 			"Failed to map data (%d)\n", ret);
22163f2304f8SSagi Grimberg 		return ret;
22173f2304f8SSagi Grimberg 	}
22183f2304f8SSagi Grimberg 
22193f2304f8SSagi Grimberg 	return 0;
22203f2304f8SSagi Grimberg }
22213f2304f8SSagi Grimberg 
22223f2304f8SSagi Grimberg static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
22233f2304f8SSagi Grimberg 		const struct blk_mq_queue_data *bd)
22243f2304f8SSagi Grimberg {
22253f2304f8SSagi Grimberg 	struct nvme_ns *ns = hctx->queue->queuedata;
22263f2304f8SSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
22273f2304f8SSagi Grimberg 	struct request *rq = bd->rq;
22283f2304f8SSagi Grimberg 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
22293f2304f8SSagi Grimberg 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
22303f2304f8SSagi Grimberg 	blk_status_t ret;
22313f2304f8SSagi Grimberg 
22323f2304f8SSagi Grimberg 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
22333f2304f8SSagi Grimberg 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
22343f2304f8SSagi Grimberg 
22353f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
22363f2304f8SSagi Grimberg 	if (unlikely(ret))
22373f2304f8SSagi Grimberg 		return ret;
22383f2304f8SSagi Grimberg 
22393f2304f8SSagi Grimberg 	blk_mq_start_request(rq);
22403f2304f8SSagi Grimberg 
2241db5ad6b7SSagi Grimberg 	nvme_tcp_queue_request(req, true);
22423f2304f8SSagi Grimberg 
22433f2304f8SSagi Grimberg 	return BLK_STS_OK;
22443f2304f8SSagi Grimberg }
22453f2304f8SSagi Grimberg 
2246873946f4SSagi Grimberg static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2247873946f4SSagi Grimberg {
2248873946f4SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
224964861993SSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2250873946f4SSagi Grimberg 
225164861993SSagi Grimberg 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2252873946f4SSagi Grimberg 		/* separate read/write queues */
2253873946f4SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
225464861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
225564861993SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
225664861993SSagi Grimberg 		set->map[HCTX_TYPE_READ].nr_queues =
225764861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_READ];
2258873946f4SSagi Grimberg 		set->map[HCTX_TYPE_READ].queue_offset =
225964861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2260873946f4SSagi Grimberg 	} else {
226164861993SSagi Grimberg 		/* shared read/write queues */
2262873946f4SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
226364861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
226464861993SSagi Grimberg 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
226564861993SSagi Grimberg 		set->map[HCTX_TYPE_READ].nr_queues =
226664861993SSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2267873946f4SSagi Grimberg 		set->map[HCTX_TYPE_READ].queue_offset = 0;
2268873946f4SSagi Grimberg 	}
2269873946f4SSagi Grimberg 	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2270873946f4SSagi Grimberg 	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
227164861993SSagi Grimberg 
22721a9460ceSSagi Grimberg 	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
22731a9460ceSSagi Grimberg 		/* map dedicated poll queues only if we have queues left */
22741a9460ceSSagi Grimberg 		set->map[HCTX_TYPE_POLL].nr_queues =
22751a9460ceSSagi Grimberg 				ctrl->io_queues[HCTX_TYPE_POLL];
22761a9460ceSSagi Grimberg 		set->map[HCTX_TYPE_POLL].queue_offset =
22771a9460ceSSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
22781a9460ceSSagi Grimberg 			ctrl->io_queues[HCTX_TYPE_READ];
22791a9460ceSSagi Grimberg 		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
22801a9460ceSSagi Grimberg 	}
22811a9460ceSSagi Grimberg 
228264861993SSagi Grimberg 	dev_info(ctrl->ctrl.device,
22831a9460ceSSagi Grimberg 		"mapped %d/%d/%d default/read/poll queues.\n",
228464861993SSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_DEFAULT],
22851a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_READ],
22861a9460ceSSagi Grimberg 		ctrl->io_queues[HCTX_TYPE_POLL]);
228764861993SSagi Grimberg 
2288873946f4SSagi Grimberg 	return 0;
2289873946f4SSagi Grimberg }
2290873946f4SSagi Grimberg 
22911a9460ceSSagi Grimberg static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
22921a9460ceSSagi Grimberg {
22931a9460ceSSagi Grimberg 	struct nvme_tcp_queue *queue = hctx->driver_data;
22941a9460ceSSagi Grimberg 	struct sock *sk = queue->sock->sk;
22951a9460ceSSagi Grimberg 
2296f86e5bf8SSagi Grimberg 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2297f86e5bf8SSagi Grimberg 		return 0;
2298f86e5bf8SSagi Grimberg 
229972e5d757SSagi Grimberg 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
23003f926af3SEric Dumazet 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
23011a9460ceSSagi Grimberg 		sk_busy_loop(sk, true);
23021a9460ceSSagi Grimberg 	nvme_tcp_try_recv(queue);
230372e5d757SSagi Grimberg 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
23041a9460ceSSagi Grimberg 	return queue->nr_cqe;
23051a9460ceSSagi Grimberg }
23061a9460ceSSagi Grimberg 
23076acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops = {
23083f2304f8SSagi Grimberg 	.queue_rq	= nvme_tcp_queue_rq,
23093f2304f8SSagi Grimberg 	.complete	= nvme_complete_rq,
23103f2304f8SSagi Grimberg 	.init_request	= nvme_tcp_init_request,
23113f2304f8SSagi Grimberg 	.exit_request	= nvme_tcp_exit_request,
23123f2304f8SSagi Grimberg 	.init_hctx	= nvme_tcp_init_hctx,
23133f2304f8SSagi Grimberg 	.timeout	= nvme_tcp_timeout,
2314873946f4SSagi Grimberg 	.map_queues	= nvme_tcp_map_queues,
23151a9460ceSSagi Grimberg 	.poll		= nvme_tcp_poll,
23163f2304f8SSagi Grimberg };
23173f2304f8SSagi Grimberg 
23186acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
23193f2304f8SSagi Grimberg 	.queue_rq	= nvme_tcp_queue_rq,
23203f2304f8SSagi Grimberg 	.complete	= nvme_complete_rq,
23213f2304f8SSagi Grimberg 	.init_request	= nvme_tcp_init_request,
23223f2304f8SSagi Grimberg 	.exit_request	= nvme_tcp_exit_request,
23233f2304f8SSagi Grimberg 	.init_hctx	= nvme_tcp_init_admin_hctx,
23243f2304f8SSagi Grimberg 	.timeout	= nvme_tcp_timeout,
23253f2304f8SSagi Grimberg };
23263f2304f8SSagi Grimberg 
23273f2304f8SSagi Grimberg static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
23283f2304f8SSagi Grimberg 	.name			= "tcp",
23293f2304f8SSagi Grimberg 	.module			= THIS_MODULE,
23303f2304f8SSagi Grimberg 	.flags			= NVME_F_FABRICS,
23313f2304f8SSagi Grimberg 	.reg_read32		= nvmf_reg_read32,
23323f2304f8SSagi Grimberg 	.reg_read64		= nvmf_reg_read64,
23333f2304f8SSagi Grimberg 	.reg_write32		= nvmf_reg_write32,
23343f2304f8SSagi Grimberg 	.free_ctrl		= nvme_tcp_free_ctrl,
23353f2304f8SSagi Grimberg 	.submit_async_event	= nvme_tcp_submit_async_event,
23363f2304f8SSagi Grimberg 	.delete_ctrl		= nvme_tcp_delete_ctrl,
23373f2304f8SSagi Grimberg 	.get_address		= nvmf_get_address,
23383f2304f8SSagi Grimberg };
23393f2304f8SSagi Grimberg 
23403f2304f8SSagi Grimberg static bool
23413f2304f8SSagi Grimberg nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
23423f2304f8SSagi Grimberg {
23433f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
23443f2304f8SSagi Grimberg 	bool found = false;
23453f2304f8SSagi Grimberg 
23463f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
23473f2304f8SSagi Grimberg 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
23483f2304f8SSagi Grimberg 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
23493f2304f8SSagi Grimberg 		if (found)
23503f2304f8SSagi Grimberg 			break;
23513f2304f8SSagi Grimberg 	}
23523f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
23533f2304f8SSagi Grimberg 
23543f2304f8SSagi Grimberg 	return found;
23553f2304f8SSagi Grimberg }
23563f2304f8SSagi Grimberg 
23573f2304f8SSagi Grimberg static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
23583f2304f8SSagi Grimberg 		struct nvmf_ctrl_options *opts)
23593f2304f8SSagi Grimberg {
23603f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
23613f2304f8SSagi Grimberg 	int ret;
23623f2304f8SSagi Grimberg 
23633f2304f8SSagi Grimberg 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
23643f2304f8SSagi Grimberg 	if (!ctrl)
23653f2304f8SSagi Grimberg 		return ERR_PTR(-ENOMEM);
23663f2304f8SSagi Grimberg 
23673f2304f8SSagi Grimberg 	INIT_LIST_HEAD(&ctrl->list);
23683f2304f8SSagi Grimberg 	ctrl->ctrl.opts = opts;
23691a9460ceSSagi Grimberg 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
23701a9460ceSSagi Grimberg 				opts->nr_poll_queues + 1;
23713f2304f8SSagi Grimberg 	ctrl->ctrl.sqsize = opts->queue_size - 1;
23723f2304f8SSagi Grimberg 	ctrl->ctrl.kato = opts->kato;
23733f2304f8SSagi Grimberg 
23743f2304f8SSagi Grimberg 	INIT_DELAYED_WORK(&ctrl->connect_work,
23753f2304f8SSagi Grimberg 			nvme_tcp_reconnect_ctrl_work);
23763f2304f8SSagi Grimberg 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
23773f2304f8SSagi Grimberg 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
23783f2304f8SSagi Grimberg 
23793f2304f8SSagi Grimberg 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
23803f2304f8SSagi Grimberg 		opts->trsvcid =
23813f2304f8SSagi Grimberg 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
23823f2304f8SSagi Grimberg 		if (!opts->trsvcid) {
23833f2304f8SSagi Grimberg 			ret = -ENOMEM;
23843f2304f8SSagi Grimberg 			goto out_free_ctrl;
23853f2304f8SSagi Grimberg 		}
23863f2304f8SSagi Grimberg 		opts->mask |= NVMF_OPT_TRSVCID;
23873f2304f8SSagi Grimberg 	}
23883f2304f8SSagi Grimberg 
23893f2304f8SSagi Grimberg 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
23903f2304f8SSagi Grimberg 			opts->traddr, opts->trsvcid, &ctrl->addr);
23913f2304f8SSagi Grimberg 	if (ret) {
23923f2304f8SSagi Grimberg 		pr_err("malformed address passed: %s:%s\n",
23933f2304f8SSagi Grimberg 			opts->traddr, opts->trsvcid);
23943f2304f8SSagi Grimberg 		goto out_free_ctrl;
23953f2304f8SSagi Grimberg 	}
23963f2304f8SSagi Grimberg 
23973f2304f8SSagi Grimberg 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
23983f2304f8SSagi Grimberg 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
23993f2304f8SSagi Grimberg 			opts->host_traddr, NULL, &ctrl->src_addr);
24003f2304f8SSagi Grimberg 		if (ret) {
24013f2304f8SSagi Grimberg 			pr_err("malformed src address passed: %s\n",
24023f2304f8SSagi Grimberg 			       opts->host_traddr);
24033f2304f8SSagi Grimberg 			goto out_free_ctrl;
24043f2304f8SSagi Grimberg 		}
24053f2304f8SSagi Grimberg 	}
24063f2304f8SSagi Grimberg 
24073f2304f8SSagi Grimberg 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
24083f2304f8SSagi Grimberg 		ret = -EALREADY;
24093f2304f8SSagi Grimberg 		goto out_free_ctrl;
24103f2304f8SSagi Grimberg 	}
24113f2304f8SSagi Grimberg 
2412873946f4SSagi Grimberg 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
24133f2304f8SSagi Grimberg 				GFP_KERNEL);
24143f2304f8SSagi Grimberg 	if (!ctrl->queues) {
24153f2304f8SSagi Grimberg 		ret = -ENOMEM;
24163f2304f8SSagi Grimberg 		goto out_free_ctrl;
24173f2304f8SSagi Grimberg 	}
24183f2304f8SSagi Grimberg 
24193f2304f8SSagi Grimberg 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
24203f2304f8SSagi Grimberg 	if (ret)
24213f2304f8SSagi Grimberg 		goto out_kfree_queues;
24223f2304f8SSagi Grimberg 
24233f2304f8SSagi Grimberg 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
24243f2304f8SSagi Grimberg 		WARN_ON_ONCE(1);
24253f2304f8SSagi Grimberg 		ret = -EINTR;
24263f2304f8SSagi Grimberg 		goto out_uninit_ctrl;
24273f2304f8SSagi Grimberg 	}
24283f2304f8SSagi Grimberg 
24293f2304f8SSagi Grimberg 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
24303f2304f8SSagi Grimberg 	if (ret)
24313f2304f8SSagi Grimberg 		goto out_uninit_ctrl;
24323f2304f8SSagi Grimberg 
24333f2304f8SSagi Grimberg 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
24343f2304f8SSagi Grimberg 		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
24353f2304f8SSagi Grimberg 
24363f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
24373f2304f8SSagi Grimberg 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
24383f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
24393f2304f8SSagi Grimberg 
24403f2304f8SSagi Grimberg 	return &ctrl->ctrl;
24413f2304f8SSagi Grimberg 
24423f2304f8SSagi Grimberg out_uninit_ctrl:
24433f2304f8SSagi Grimberg 	nvme_uninit_ctrl(&ctrl->ctrl);
24443f2304f8SSagi Grimberg 	nvme_put_ctrl(&ctrl->ctrl);
24453f2304f8SSagi Grimberg 	if (ret > 0)
24463f2304f8SSagi Grimberg 		ret = -EIO;
24473f2304f8SSagi Grimberg 	return ERR_PTR(ret);
24483f2304f8SSagi Grimberg out_kfree_queues:
24493f2304f8SSagi Grimberg 	kfree(ctrl->queues);
24503f2304f8SSagi Grimberg out_free_ctrl:
24513f2304f8SSagi Grimberg 	kfree(ctrl);
24523f2304f8SSagi Grimberg 	return ERR_PTR(ret);
24533f2304f8SSagi Grimberg }
24543f2304f8SSagi Grimberg 
24553f2304f8SSagi Grimberg static struct nvmf_transport_ops nvme_tcp_transport = {
24563f2304f8SSagi Grimberg 	.name		= "tcp",
24573f2304f8SSagi Grimberg 	.module		= THIS_MODULE,
24583f2304f8SSagi Grimberg 	.required_opts	= NVMF_OPT_TRADDR,
24593f2304f8SSagi Grimberg 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
24603f2304f8SSagi Grimberg 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2461873946f4SSagi Grimberg 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2462bb13985dSIsrael Rukshin 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2463bb13985dSIsrael Rukshin 			  NVMF_OPT_TOS,
24643f2304f8SSagi Grimberg 	.create_ctrl	= nvme_tcp_create_ctrl,
24653f2304f8SSagi Grimberg };
24663f2304f8SSagi Grimberg 
24673f2304f8SSagi Grimberg static int __init nvme_tcp_init_module(void)
24683f2304f8SSagi Grimberg {
24693f2304f8SSagi Grimberg 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
24703f2304f8SSagi Grimberg 			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
24713f2304f8SSagi Grimberg 	if (!nvme_tcp_wq)
24723f2304f8SSagi Grimberg 		return -ENOMEM;
24733f2304f8SSagi Grimberg 
24743f2304f8SSagi Grimberg 	nvmf_register_transport(&nvme_tcp_transport);
24753f2304f8SSagi Grimberg 	return 0;
24763f2304f8SSagi Grimberg }
24773f2304f8SSagi Grimberg 
24783f2304f8SSagi Grimberg static void __exit nvme_tcp_cleanup_module(void)
24793f2304f8SSagi Grimberg {
24803f2304f8SSagi Grimberg 	struct nvme_tcp_ctrl *ctrl;
24813f2304f8SSagi Grimberg 
24823f2304f8SSagi Grimberg 	nvmf_unregister_transport(&nvme_tcp_transport);
24833f2304f8SSagi Grimberg 
24843f2304f8SSagi Grimberg 	mutex_lock(&nvme_tcp_ctrl_mutex);
24853f2304f8SSagi Grimberg 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
24863f2304f8SSagi Grimberg 		nvme_delete_ctrl(&ctrl->ctrl);
24873f2304f8SSagi Grimberg 	mutex_unlock(&nvme_tcp_ctrl_mutex);
24883f2304f8SSagi Grimberg 	flush_workqueue(nvme_delete_wq);
24893f2304f8SSagi Grimberg 
24903f2304f8SSagi Grimberg 	destroy_workqueue(nvme_tcp_wq);
24913f2304f8SSagi Grimberg }
24923f2304f8SSagi Grimberg 
24933f2304f8SSagi Grimberg module_init(nvme_tcp_init_module);
24943f2304f8SSagi Grimberg module_exit(nvme_tcp_cleanup_module);
24953f2304f8SSagi Grimberg 
24963f2304f8SSagi Grimberg MODULE_LICENSE("GPL v2");
2497