13f2304f8SSagi Grimberg // SPDX-License-Identifier: GPL-2.0 23f2304f8SSagi Grimberg /* 33f2304f8SSagi Grimberg * NVMe over Fabrics TCP host. 43f2304f8SSagi Grimberg * Copyright (c) 2018 Lightbits Labs. All rights reserved. 53f2304f8SSagi Grimberg */ 63f2304f8SSagi Grimberg #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 73f2304f8SSagi Grimberg #include <linux/module.h> 83f2304f8SSagi Grimberg #include <linux/init.h> 93f2304f8SSagi Grimberg #include <linux/slab.h> 103f2304f8SSagi Grimberg #include <linux/err.h> 113f2304f8SSagi Grimberg #include <linux/nvme-tcp.h> 123f2304f8SSagi Grimberg #include <net/sock.h> 133f2304f8SSagi Grimberg #include <net/tcp.h> 143f2304f8SSagi Grimberg #include <linux/blk-mq.h> 153f2304f8SSagi Grimberg #include <crypto/hash.h> 161a9460ceSSagi Grimberg #include <net/busy_poll.h> 173f2304f8SSagi Grimberg 183f2304f8SSagi Grimberg #include "nvme.h" 193f2304f8SSagi Grimberg #include "fabrics.h" 203f2304f8SSagi Grimberg 213f2304f8SSagi Grimberg struct nvme_tcp_queue; 223f2304f8SSagi Grimberg 239912ade3SWunderlich, Mark /* Define the socket priority to use for connections were it is desirable 249912ade3SWunderlich, Mark * that the NIC consider performing optimized packet processing or filtering. 259912ade3SWunderlich, Mark * A non-zero value being sufficient to indicate general consideration of any 269912ade3SWunderlich, Mark * possible optimization. Making it a module param allows for alternative 279912ade3SWunderlich, Mark * values that may be unique for some NIC implementations. 289912ade3SWunderlich, Mark */ 299912ade3SWunderlich, Mark static int so_priority; 309912ade3SWunderlich, Mark module_param(so_priority, int, 0644); 319912ade3SWunderlich, Mark MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); 329912ade3SWunderlich, Mark 333f2304f8SSagi Grimberg enum nvme_tcp_send_state { 343f2304f8SSagi Grimberg NVME_TCP_SEND_CMD_PDU = 0, 353f2304f8SSagi Grimberg NVME_TCP_SEND_H2C_PDU, 363f2304f8SSagi Grimberg NVME_TCP_SEND_DATA, 373f2304f8SSagi Grimberg NVME_TCP_SEND_DDGST, 383f2304f8SSagi Grimberg }; 393f2304f8SSagi Grimberg 403f2304f8SSagi Grimberg struct nvme_tcp_request { 413f2304f8SSagi Grimberg struct nvme_request req; 423f2304f8SSagi Grimberg void *pdu; 433f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 443f2304f8SSagi Grimberg u32 data_len; 453f2304f8SSagi Grimberg u32 pdu_len; 463f2304f8SSagi Grimberg u32 pdu_sent; 473f2304f8SSagi Grimberg u16 ttag; 483f2304f8SSagi Grimberg struct list_head entry; 49a7273d40SChristoph Hellwig __le32 ddgst; 503f2304f8SSagi Grimberg 513f2304f8SSagi Grimberg struct bio *curr_bio; 523f2304f8SSagi Grimberg struct iov_iter iter; 533f2304f8SSagi Grimberg 543f2304f8SSagi Grimberg /* send state */ 553f2304f8SSagi Grimberg size_t offset; 563f2304f8SSagi Grimberg size_t data_sent; 573f2304f8SSagi Grimberg enum nvme_tcp_send_state state; 583f2304f8SSagi Grimberg }; 593f2304f8SSagi Grimberg 603f2304f8SSagi Grimberg enum nvme_tcp_queue_flags { 613f2304f8SSagi Grimberg NVME_TCP_Q_ALLOCATED = 0, 623f2304f8SSagi Grimberg NVME_TCP_Q_LIVE = 1, 6372e5d757SSagi Grimberg NVME_TCP_Q_POLLING = 2, 643f2304f8SSagi Grimberg }; 653f2304f8SSagi Grimberg 663f2304f8SSagi Grimberg enum nvme_tcp_recv_state { 673f2304f8SSagi Grimberg NVME_TCP_RECV_PDU = 0, 683f2304f8SSagi Grimberg NVME_TCP_RECV_DATA, 693f2304f8SSagi Grimberg NVME_TCP_RECV_DDGST, 703f2304f8SSagi Grimberg }; 713f2304f8SSagi Grimberg 723f2304f8SSagi Grimberg struct nvme_tcp_ctrl; 733f2304f8SSagi Grimberg struct nvme_tcp_queue { 743f2304f8SSagi Grimberg struct socket *sock; 753f2304f8SSagi Grimberg struct work_struct io_work; 763f2304f8SSagi Grimberg int io_cpu; 773f2304f8SSagi Grimberg 783f2304f8SSagi Grimberg spinlock_t lock; 793f2304f8SSagi Grimberg struct list_head send_list; 803f2304f8SSagi Grimberg 813f2304f8SSagi Grimberg /* recv state */ 823f2304f8SSagi Grimberg void *pdu; 833f2304f8SSagi Grimberg int pdu_remaining; 843f2304f8SSagi Grimberg int pdu_offset; 853f2304f8SSagi Grimberg size_t data_remaining; 863f2304f8SSagi Grimberg size_t ddgst_remaining; 871a9460ceSSagi Grimberg unsigned int nr_cqe; 883f2304f8SSagi Grimberg 893f2304f8SSagi Grimberg /* send state */ 903f2304f8SSagi Grimberg struct nvme_tcp_request *request; 913f2304f8SSagi Grimberg 923f2304f8SSagi Grimberg int queue_size; 933f2304f8SSagi Grimberg size_t cmnd_capsule_len; 943f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 953f2304f8SSagi Grimberg unsigned long flags; 963f2304f8SSagi Grimberg bool rd_enabled; 973f2304f8SSagi Grimberg 983f2304f8SSagi Grimberg bool hdr_digest; 993f2304f8SSagi Grimberg bool data_digest; 1003f2304f8SSagi Grimberg struct ahash_request *rcv_hash; 1013f2304f8SSagi Grimberg struct ahash_request *snd_hash; 1023f2304f8SSagi Grimberg __le32 exp_ddgst; 1033f2304f8SSagi Grimberg __le32 recv_ddgst; 1043f2304f8SSagi Grimberg 1053f2304f8SSagi Grimberg struct page_frag_cache pf_cache; 1063f2304f8SSagi Grimberg 1073f2304f8SSagi Grimberg void (*state_change)(struct sock *); 1083f2304f8SSagi Grimberg void (*data_ready)(struct sock *); 1093f2304f8SSagi Grimberg void (*write_space)(struct sock *); 1103f2304f8SSagi Grimberg }; 1113f2304f8SSagi Grimberg 1123f2304f8SSagi Grimberg struct nvme_tcp_ctrl { 1133f2304f8SSagi Grimberg /* read only in the hot path */ 1143f2304f8SSagi Grimberg struct nvme_tcp_queue *queues; 1153f2304f8SSagi Grimberg struct blk_mq_tag_set tag_set; 1163f2304f8SSagi Grimberg 1173f2304f8SSagi Grimberg /* other member variables */ 1183f2304f8SSagi Grimberg struct list_head list; 1193f2304f8SSagi Grimberg struct blk_mq_tag_set admin_tag_set; 1203f2304f8SSagi Grimberg struct sockaddr_storage addr; 1213f2304f8SSagi Grimberg struct sockaddr_storage src_addr; 1223f2304f8SSagi Grimberg struct nvme_ctrl ctrl; 1233f2304f8SSagi Grimberg 1243f2304f8SSagi Grimberg struct work_struct err_work; 1253f2304f8SSagi Grimberg struct delayed_work connect_work; 1263f2304f8SSagi Grimberg struct nvme_tcp_request async_req; 12764861993SSagi Grimberg u32 io_queues[HCTX_MAX_TYPES]; 1283f2304f8SSagi Grimberg }; 1293f2304f8SSagi Grimberg 1303f2304f8SSagi Grimberg static LIST_HEAD(nvme_tcp_ctrl_list); 1313f2304f8SSagi Grimberg static DEFINE_MUTEX(nvme_tcp_ctrl_mutex); 1323f2304f8SSagi Grimberg static struct workqueue_struct *nvme_tcp_wq; 1333f2304f8SSagi Grimberg static struct blk_mq_ops nvme_tcp_mq_ops; 1343f2304f8SSagi Grimberg static struct blk_mq_ops nvme_tcp_admin_mq_ops; 1353f2304f8SSagi Grimberg 1363f2304f8SSagi Grimberg static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) 1373f2304f8SSagi Grimberg { 1383f2304f8SSagi Grimberg return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); 1393f2304f8SSagi Grimberg } 1403f2304f8SSagi Grimberg 1413f2304f8SSagi Grimberg static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) 1423f2304f8SSagi Grimberg { 1433f2304f8SSagi Grimberg return queue - queue->ctrl->queues; 1443f2304f8SSagi Grimberg } 1453f2304f8SSagi Grimberg 1463f2304f8SSagi Grimberg static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) 1473f2304f8SSagi Grimberg { 1483f2304f8SSagi Grimberg u32 queue_idx = nvme_tcp_queue_id(queue); 1493f2304f8SSagi Grimberg 1503f2304f8SSagi Grimberg if (queue_idx == 0) 1513f2304f8SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx]; 1523f2304f8SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1]; 1533f2304f8SSagi Grimberg } 1543f2304f8SSagi Grimberg 1553f2304f8SSagi Grimberg static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) 1563f2304f8SSagi Grimberg { 1573f2304f8SSagi Grimberg return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 1583f2304f8SSagi Grimberg } 1593f2304f8SSagi Grimberg 1603f2304f8SSagi Grimberg static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) 1613f2304f8SSagi Grimberg { 1623f2304f8SSagi Grimberg return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 1633f2304f8SSagi Grimberg } 1643f2304f8SSagi Grimberg 1653f2304f8SSagi Grimberg static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue) 1663f2304f8SSagi Grimberg { 1673f2304f8SSagi Grimberg return queue->cmnd_capsule_len - sizeof(struct nvme_command); 1683f2304f8SSagi Grimberg } 1693f2304f8SSagi Grimberg 1703f2304f8SSagi Grimberg static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) 1713f2304f8SSagi Grimberg { 1723f2304f8SSagi Grimberg return req == &req->queue->ctrl->async_req; 1733f2304f8SSagi Grimberg } 1743f2304f8SSagi Grimberg 1753f2304f8SSagi Grimberg static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) 1763f2304f8SSagi Grimberg { 1773f2304f8SSagi Grimberg struct request *rq; 1783f2304f8SSagi Grimberg 1793f2304f8SSagi Grimberg if (unlikely(nvme_tcp_async_req(req))) 1803f2304f8SSagi Grimberg return false; /* async events don't have a request */ 1813f2304f8SSagi Grimberg 1823f2304f8SSagi Grimberg rq = blk_mq_rq_from_pdu(req); 1833f2304f8SSagi Grimberg 18425e5cb78SSagi Grimberg return rq_data_dir(rq) == WRITE && req->data_len && 18525e5cb78SSagi Grimberg req->data_len <= nvme_tcp_inline_data_size(req->queue); 1863f2304f8SSagi Grimberg } 1873f2304f8SSagi Grimberg 1883f2304f8SSagi Grimberg static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) 1893f2304f8SSagi Grimberg { 1903f2304f8SSagi Grimberg return req->iter.bvec->bv_page; 1913f2304f8SSagi Grimberg } 1923f2304f8SSagi Grimberg 1933f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) 1943f2304f8SSagi Grimberg { 1953f2304f8SSagi Grimberg return req->iter.bvec->bv_offset + req->iter.iov_offset; 1963f2304f8SSagi Grimberg } 1973f2304f8SSagi Grimberg 1983f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) 1993f2304f8SSagi Grimberg { 2003f2304f8SSagi Grimberg return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, 2013f2304f8SSagi Grimberg req->pdu_len - req->pdu_sent); 2023f2304f8SSagi Grimberg } 2033f2304f8SSagi Grimberg 2043f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req) 2053f2304f8SSagi Grimberg { 2063f2304f8SSagi Grimberg return req->iter.iov_offset; 2073f2304f8SSagi Grimberg } 2083f2304f8SSagi Grimberg 2093f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) 2103f2304f8SSagi Grimberg { 2113f2304f8SSagi Grimberg return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? 2123f2304f8SSagi Grimberg req->pdu_len - req->pdu_sent : 0; 2133f2304f8SSagi Grimberg } 2143f2304f8SSagi Grimberg 2153f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, 2163f2304f8SSagi Grimberg int len) 2173f2304f8SSagi Grimberg { 2183f2304f8SSagi Grimberg return nvme_tcp_pdu_data_left(req) <= len; 2193f2304f8SSagi Grimberg } 2203f2304f8SSagi Grimberg 2213f2304f8SSagi Grimberg static void nvme_tcp_init_iter(struct nvme_tcp_request *req, 2223f2304f8SSagi Grimberg unsigned int dir) 2233f2304f8SSagi Grimberg { 2243f2304f8SSagi Grimberg struct request *rq = blk_mq_rq_from_pdu(req); 2253f2304f8SSagi Grimberg struct bio_vec *vec; 2263f2304f8SSagi Grimberg unsigned int size; 2273f2304f8SSagi Grimberg int nsegs; 2283f2304f8SSagi Grimberg size_t offset; 2293f2304f8SSagi Grimberg 2303f2304f8SSagi Grimberg if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { 2313f2304f8SSagi Grimberg vec = &rq->special_vec; 2323f2304f8SSagi Grimberg nsegs = 1; 2333f2304f8SSagi Grimberg size = blk_rq_payload_bytes(rq); 2343f2304f8SSagi Grimberg offset = 0; 2353f2304f8SSagi Grimberg } else { 2363f2304f8SSagi Grimberg struct bio *bio = req->curr_bio; 2373f2304f8SSagi Grimberg 2383f2304f8SSagi Grimberg vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 2393f2304f8SSagi Grimberg nsegs = bio_segments(bio); 2403f2304f8SSagi Grimberg size = bio->bi_iter.bi_size; 2413f2304f8SSagi Grimberg offset = bio->bi_iter.bi_bvec_done; 2423f2304f8SSagi Grimberg } 2433f2304f8SSagi Grimberg 2443f2304f8SSagi Grimberg iov_iter_bvec(&req->iter, dir, vec, nsegs, size); 2453f2304f8SSagi Grimberg req->iter.iov_offset = offset; 2463f2304f8SSagi Grimberg } 2473f2304f8SSagi Grimberg 2483f2304f8SSagi Grimberg static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, 2493f2304f8SSagi Grimberg int len) 2503f2304f8SSagi Grimberg { 2513f2304f8SSagi Grimberg req->data_sent += len; 2523f2304f8SSagi Grimberg req->pdu_sent += len; 2533f2304f8SSagi Grimberg iov_iter_advance(&req->iter, len); 2543f2304f8SSagi Grimberg if (!iov_iter_count(&req->iter) && 2553f2304f8SSagi Grimberg req->data_sent < req->data_len) { 2563f2304f8SSagi Grimberg req->curr_bio = req->curr_bio->bi_next; 2573f2304f8SSagi Grimberg nvme_tcp_init_iter(req, WRITE); 2583f2304f8SSagi Grimberg } 2593f2304f8SSagi Grimberg } 2603f2304f8SSagi Grimberg 2613f2304f8SSagi Grimberg static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req) 2623f2304f8SSagi Grimberg { 2633f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 2643f2304f8SSagi Grimberg 2653f2304f8SSagi Grimberg spin_lock(&queue->lock); 2663f2304f8SSagi Grimberg list_add_tail(&req->entry, &queue->send_list); 2673f2304f8SSagi Grimberg spin_unlock(&queue->lock); 2683f2304f8SSagi Grimberg 2693f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 2703f2304f8SSagi Grimberg } 2713f2304f8SSagi Grimberg 2723f2304f8SSagi Grimberg static inline struct nvme_tcp_request * 2733f2304f8SSagi Grimberg nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) 2743f2304f8SSagi Grimberg { 2753f2304f8SSagi Grimberg struct nvme_tcp_request *req; 2763f2304f8SSagi Grimberg 2773f2304f8SSagi Grimberg spin_lock(&queue->lock); 2783f2304f8SSagi Grimberg req = list_first_entry_or_null(&queue->send_list, 2793f2304f8SSagi Grimberg struct nvme_tcp_request, entry); 2803f2304f8SSagi Grimberg if (req) 2813f2304f8SSagi Grimberg list_del(&req->entry); 2823f2304f8SSagi Grimberg spin_unlock(&queue->lock); 2833f2304f8SSagi Grimberg 2843f2304f8SSagi Grimberg return req; 2853f2304f8SSagi Grimberg } 2863f2304f8SSagi Grimberg 287a7273d40SChristoph Hellwig static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, 288a7273d40SChristoph Hellwig __le32 *dgst) 2893f2304f8SSagi Grimberg { 2903f2304f8SSagi Grimberg ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); 2913f2304f8SSagi Grimberg crypto_ahash_final(hash); 2923f2304f8SSagi Grimberg } 2933f2304f8SSagi Grimberg 2943f2304f8SSagi Grimberg static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, 2953f2304f8SSagi Grimberg struct page *page, off_t off, size_t len) 2963f2304f8SSagi Grimberg { 2973f2304f8SSagi Grimberg struct scatterlist sg; 2983f2304f8SSagi Grimberg 2993f2304f8SSagi Grimberg sg_init_marker(&sg, 1); 3003f2304f8SSagi Grimberg sg_set_page(&sg, page, len, off); 3013f2304f8SSagi Grimberg ahash_request_set_crypt(hash, &sg, NULL, len); 3023f2304f8SSagi Grimberg crypto_ahash_update(hash); 3033f2304f8SSagi Grimberg } 3043f2304f8SSagi Grimberg 3053f2304f8SSagi Grimberg static inline void nvme_tcp_hdgst(struct ahash_request *hash, 3063f2304f8SSagi Grimberg void *pdu, size_t len) 3073f2304f8SSagi Grimberg { 3083f2304f8SSagi Grimberg struct scatterlist sg; 3093f2304f8SSagi Grimberg 3103f2304f8SSagi Grimberg sg_init_one(&sg, pdu, len); 3113f2304f8SSagi Grimberg ahash_request_set_crypt(hash, &sg, pdu + len, len); 3123f2304f8SSagi Grimberg crypto_ahash_digest(hash); 3133f2304f8SSagi Grimberg } 3143f2304f8SSagi Grimberg 3153f2304f8SSagi Grimberg static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, 3163f2304f8SSagi Grimberg void *pdu, size_t pdu_len) 3173f2304f8SSagi Grimberg { 3183f2304f8SSagi Grimberg struct nvme_tcp_hdr *hdr = pdu; 3193f2304f8SSagi Grimberg __le32 recv_digest; 3203f2304f8SSagi Grimberg __le32 exp_digest; 3213f2304f8SSagi Grimberg 3223f2304f8SSagi Grimberg if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 3233f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 3243f2304f8SSagi Grimberg "queue %d: header digest flag is cleared\n", 3253f2304f8SSagi Grimberg nvme_tcp_queue_id(queue)); 3263f2304f8SSagi Grimberg return -EPROTO; 3273f2304f8SSagi Grimberg } 3283f2304f8SSagi Grimberg 3293f2304f8SSagi Grimberg recv_digest = *(__le32 *)(pdu + hdr->hlen); 3303f2304f8SSagi Grimberg nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); 3313f2304f8SSagi Grimberg exp_digest = *(__le32 *)(pdu + hdr->hlen); 3323f2304f8SSagi Grimberg if (recv_digest != exp_digest) { 3333f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 3343f2304f8SSagi Grimberg "header digest error: recv %#x expected %#x\n", 3353f2304f8SSagi Grimberg le32_to_cpu(recv_digest), le32_to_cpu(exp_digest)); 3363f2304f8SSagi Grimberg return -EIO; 3373f2304f8SSagi Grimberg } 3383f2304f8SSagi Grimberg 3393f2304f8SSagi Grimberg return 0; 3403f2304f8SSagi Grimberg } 3413f2304f8SSagi Grimberg 3423f2304f8SSagi Grimberg static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) 3433f2304f8SSagi Grimberg { 3443f2304f8SSagi Grimberg struct nvme_tcp_hdr *hdr = pdu; 3453f2304f8SSagi Grimberg u8 digest_len = nvme_tcp_hdgst_len(queue); 3463f2304f8SSagi Grimberg u32 len; 3473f2304f8SSagi Grimberg 3483f2304f8SSagi Grimberg len = le32_to_cpu(hdr->plen) - hdr->hlen - 3493f2304f8SSagi Grimberg ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); 3503f2304f8SSagi Grimberg 3513f2304f8SSagi Grimberg if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 3523f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 3533f2304f8SSagi Grimberg "queue %d: data digest flag is cleared\n", 3543f2304f8SSagi Grimberg nvme_tcp_queue_id(queue)); 3553f2304f8SSagi Grimberg return -EPROTO; 3563f2304f8SSagi Grimberg } 3573f2304f8SSagi Grimberg crypto_ahash_init(queue->rcv_hash); 3583f2304f8SSagi Grimberg 3593f2304f8SSagi Grimberg return 0; 3603f2304f8SSagi Grimberg } 3613f2304f8SSagi Grimberg 3623f2304f8SSagi Grimberg static void nvme_tcp_exit_request(struct blk_mq_tag_set *set, 3633f2304f8SSagi Grimberg struct request *rq, unsigned int hctx_idx) 3643f2304f8SSagi Grimberg { 3653f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 3663f2304f8SSagi Grimberg 3673f2304f8SSagi Grimberg page_frag_free(req->pdu); 3683f2304f8SSagi Grimberg } 3693f2304f8SSagi Grimberg 3703f2304f8SSagi Grimberg static int nvme_tcp_init_request(struct blk_mq_tag_set *set, 3713f2304f8SSagi Grimberg struct request *rq, unsigned int hctx_idx, 3723f2304f8SSagi Grimberg unsigned int numa_node) 3733f2304f8SSagi Grimberg { 3743f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = set->driver_data; 3753f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 3763f2304f8SSagi Grimberg int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 3773f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; 3783f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 3793f2304f8SSagi Grimberg 3803f2304f8SSagi Grimberg req->pdu = page_frag_alloc(&queue->pf_cache, 3813f2304f8SSagi Grimberg sizeof(struct nvme_tcp_cmd_pdu) + hdgst, 3823f2304f8SSagi Grimberg GFP_KERNEL | __GFP_ZERO); 3833f2304f8SSagi Grimberg if (!req->pdu) 3843f2304f8SSagi Grimberg return -ENOMEM; 3853f2304f8SSagi Grimberg 3863f2304f8SSagi Grimberg req->queue = queue; 3873f2304f8SSagi Grimberg nvme_req(rq)->ctrl = &ctrl->ctrl; 3883f2304f8SSagi Grimberg 3893f2304f8SSagi Grimberg return 0; 3903f2304f8SSagi Grimberg } 3913f2304f8SSagi Grimberg 3923f2304f8SSagi Grimberg static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3933f2304f8SSagi Grimberg unsigned int hctx_idx) 3943f2304f8SSagi Grimberg { 3953f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = data; 3963f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; 3973f2304f8SSagi Grimberg 3983f2304f8SSagi Grimberg hctx->driver_data = queue; 3993f2304f8SSagi Grimberg return 0; 4003f2304f8SSagi Grimberg } 4013f2304f8SSagi Grimberg 4023f2304f8SSagi Grimberg static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 4033f2304f8SSagi Grimberg unsigned int hctx_idx) 4043f2304f8SSagi Grimberg { 4053f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = data; 4063f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[0]; 4073f2304f8SSagi Grimberg 4083f2304f8SSagi Grimberg hctx->driver_data = queue; 4093f2304f8SSagi Grimberg return 0; 4103f2304f8SSagi Grimberg } 4113f2304f8SSagi Grimberg 4123f2304f8SSagi Grimberg static enum nvme_tcp_recv_state 4133f2304f8SSagi Grimberg nvme_tcp_recv_state(struct nvme_tcp_queue *queue) 4143f2304f8SSagi Grimberg { 4153f2304f8SSagi Grimberg return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : 4163f2304f8SSagi Grimberg (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : 4173f2304f8SSagi Grimberg NVME_TCP_RECV_DATA; 4183f2304f8SSagi Grimberg } 4193f2304f8SSagi Grimberg 4203f2304f8SSagi Grimberg static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) 4213f2304f8SSagi Grimberg { 4223f2304f8SSagi Grimberg queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + 4233f2304f8SSagi Grimberg nvme_tcp_hdgst_len(queue); 4243f2304f8SSagi Grimberg queue->pdu_offset = 0; 4253f2304f8SSagi Grimberg queue->data_remaining = -1; 4263f2304f8SSagi Grimberg queue->ddgst_remaining = 0; 4273f2304f8SSagi Grimberg } 4283f2304f8SSagi Grimberg 4293f2304f8SSagi Grimberg static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) 4303f2304f8SSagi Grimberg { 4313f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 4323f2304f8SSagi Grimberg return; 4333f2304f8SSagi Grimberg 43497b2512aSNigel Kirkland queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); 4353f2304f8SSagi Grimberg } 4363f2304f8SSagi Grimberg 4373f2304f8SSagi Grimberg static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, 4383f2304f8SSagi Grimberg struct nvme_completion *cqe) 4393f2304f8SSagi Grimberg { 4403f2304f8SSagi Grimberg struct request *rq; 4413f2304f8SSagi Grimberg 4423f2304f8SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id); 4433f2304f8SSagi Grimberg if (!rq) { 4443f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 4453f2304f8SSagi Grimberg "queue %d tag 0x%x not found\n", 4463f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), cqe->command_id); 4473f2304f8SSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 4483f2304f8SSagi Grimberg return -EINVAL; 4493f2304f8SSagi Grimberg } 4503f2304f8SSagi Grimberg 4513f2304f8SSagi Grimberg nvme_end_request(rq, cqe->status, cqe->result); 4521a9460ceSSagi Grimberg queue->nr_cqe++; 4533f2304f8SSagi Grimberg 4543f2304f8SSagi Grimberg return 0; 4553f2304f8SSagi Grimberg } 4563f2304f8SSagi Grimberg 4573f2304f8SSagi Grimberg static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, 4583f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *pdu) 4593f2304f8SSagi Grimberg { 4603f2304f8SSagi Grimberg struct request *rq; 4613f2304f8SSagi Grimberg 4623f2304f8SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); 4633f2304f8SSagi Grimberg if (!rq) { 4643f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 4653f2304f8SSagi Grimberg "queue %d tag %#x not found\n", 4663f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), pdu->command_id); 4673f2304f8SSagi Grimberg return -ENOENT; 4683f2304f8SSagi Grimberg } 4693f2304f8SSagi Grimberg 4703f2304f8SSagi Grimberg if (!blk_rq_payload_bytes(rq)) { 4713f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 4723f2304f8SSagi Grimberg "queue %d tag %#x unexpected data\n", 4733f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 4743f2304f8SSagi Grimberg return -EIO; 4753f2304f8SSagi Grimberg } 4763f2304f8SSagi Grimberg 4773f2304f8SSagi Grimberg queue->data_remaining = le32_to_cpu(pdu->data_length); 4783f2304f8SSagi Grimberg 479602d674cSSagi Grimberg if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && 480602d674cSSagi Grimberg unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { 481602d674cSSagi Grimberg dev_err(queue->ctrl->ctrl.device, 482602d674cSSagi Grimberg "queue %d tag %#x SUCCESS set but not last PDU\n", 483602d674cSSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 484602d674cSSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 485602d674cSSagi Grimberg return -EPROTO; 486602d674cSSagi Grimberg } 487602d674cSSagi Grimberg 4883f2304f8SSagi Grimberg return 0; 4893f2304f8SSagi Grimberg } 4903f2304f8SSagi Grimberg 4913f2304f8SSagi Grimberg static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, 4923f2304f8SSagi Grimberg struct nvme_tcp_rsp_pdu *pdu) 4933f2304f8SSagi Grimberg { 4943f2304f8SSagi Grimberg struct nvme_completion *cqe = &pdu->cqe; 4953f2304f8SSagi Grimberg int ret = 0; 4963f2304f8SSagi Grimberg 4973f2304f8SSagi Grimberg /* 4983f2304f8SSagi Grimberg * AEN requests are special as they don't time out and can 4993f2304f8SSagi Grimberg * survive any kind of queue freeze and often don't respond to 5003f2304f8SSagi Grimberg * aborts. We don't even bother to allocate a struct request 5013f2304f8SSagi Grimberg * for them but rather special case them here. 5023f2304f8SSagi Grimberg */ 50358a8df67SIsrael Rukshin if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), 50458a8df67SIsrael Rukshin cqe->command_id))) 5053f2304f8SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 5063f2304f8SSagi Grimberg &cqe->result); 5073f2304f8SSagi Grimberg else 5083f2304f8SSagi Grimberg ret = nvme_tcp_process_nvme_cqe(queue, cqe); 5093f2304f8SSagi Grimberg 5103f2304f8SSagi Grimberg return ret; 5113f2304f8SSagi Grimberg } 5123f2304f8SSagi Grimberg 5133f2304f8SSagi Grimberg static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, 5143f2304f8SSagi Grimberg struct nvme_tcp_r2t_pdu *pdu) 5153f2304f8SSagi Grimberg { 5163f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *data = req->pdu; 5173f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 5183f2304f8SSagi Grimberg struct request *rq = blk_mq_rq_from_pdu(req); 5193f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 5203f2304f8SSagi Grimberg u8 ddgst = nvme_tcp_ddgst_len(queue); 5213f2304f8SSagi Grimberg 5223f2304f8SSagi Grimberg req->pdu_len = le32_to_cpu(pdu->r2t_length); 5233f2304f8SSagi Grimberg req->pdu_sent = 0; 5243f2304f8SSagi Grimberg 5253f2304f8SSagi Grimberg if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { 5263f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 5273f2304f8SSagi Grimberg "req %d r2t len %u exceeded data len %u (%zu sent)\n", 5283f2304f8SSagi Grimberg rq->tag, req->pdu_len, req->data_len, 5293f2304f8SSagi Grimberg req->data_sent); 5303f2304f8SSagi Grimberg return -EPROTO; 5313f2304f8SSagi Grimberg } 5323f2304f8SSagi Grimberg 5333f2304f8SSagi Grimberg if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { 5343f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 5353f2304f8SSagi Grimberg "req %d unexpected r2t offset %u (expected %zu)\n", 5363f2304f8SSagi Grimberg rq->tag, le32_to_cpu(pdu->r2t_offset), 5373f2304f8SSagi Grimberg req->data_sent); 5383f2304f8SSagi Grimberg return -EPROTO; 5393f2304f8SSagi Grimberg } 5403f2304f8SSagi Grimberg 5413f2304f8SSagi Grimberg memset(data, 0, sizeof(*data)); 5423f2304f8SSagi Grimberg data->hdr.type = nvme_tcp_h2c_data; 5433f2304f8SSagi Grimberg data->hdr.flags = NVME_TCP_F_DATA_LAST; 5443f2304f8SSagi Grimberg if (queue->hdr_digest) 5453f2304f8SSagi Grimberg data->hdr.flags |= NVME_TCP_F_HDGST; 5463f2304f8SSagi Grimberg if (queue->data_digest) 5473f2304f8SSagi Grimberg data->hdr.flags |= NVME_TCP_F_DDGST; 5483f2304f8SSagi Grimberg data->hdr.hlen = sizeof(*data); 5493f2304f8SSagi Grimberg data->hdr.pdo = data->hdr.hlen + hdgst; 5503f2304f8SSagi Grimberg data->hdr.plen = 5513f2304f8SSagi Grimberg cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); 5523f2304f8SSagi Grimberg data->ttag = pdu->ttag; 5533f2304f8SSagi Grimberg data->command_id = rq->tag; 5543f2304f8SSagi Grimberg data->data_offset = cpu_to_le32(req->data_sent); 5553f2304f8SSagi Grimberg data->data_length = cpu_to_le32(req->pdu_len); 5563f2304f8SSagi Grimberg return 0; 5573f2304f8SSagi Grimberg } 5583f2304f8SSagi Grimberg 5593f2304f8SSagi Grimberg static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, 5603f2304f8SSagi Grimberg struct nvme_tcp_r2t_pdu *pdu) 5613f2304f8SSagi Grimberg { 5623f2304f8SSagi Grimberg struct nvme_tcp_request *req; 5633f2304f8SSagi Grimberg struct request *rq; 5643f2304f8SSagi Grimberg int ret; 5653f2304f8SSagi Grimberg 5663f2304f8SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); 5673f2304f8SSagi Grimberg if (!rq) { 5683f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 5693f2304f8SSagi Grimberg "queue %d tag %#x not found\n", 5703f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), pdu->command_id); 5713f2304f8SSagi Grimberg return -ENOENT; 5723f2304f8SSagi Grimberg } 5733f2304f8SSagi Grimberg req = blk_mq_rq_to_pdu(rq); 5743f2304f8SSagi Grimberg 5753f2304f8SSagi Grimberg ret = nvme_tcp_setup_h2c_data_pdu(req, pdu); 5763f2304f8SSagi Grimberg if (unlikely(ret)) 5773f2304f8SSagi Grimberg return ret; 5783f2304f8SSagi Grimberg 5793f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_H2C_PDU; 5803f2304f8SSagi Grimberg req->offset = 0; 5813f2304f8SSagi Grimberg 5823f2304f8SSagi Grimberg nvme_tcp_queue_request(req); 5833f2304f8SSagi Grimberg 5843f2304f8SSagi Grimberg return 0; 5853f2304f8SSagi Grimberg } 5863f2304f8SSagi Grimberg 5873f2304f8SSagi Grimberg static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, 5883f2304f8SSagi Grimberg unsigned int *offset, size_t *len) 5893f2304f8SSagi Grimberg { 5903f2304f8SSagi Grimberg struct nvme_tcp_hdr *hdr; 5913f2304f8SSagi Grimberg char *pdu = queue->pdu; 5923f2304f8SSagi Grimberg size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); 5933f2304f8SSagi Grimberg int ret; 5943f2304f8SSagi Grimberg 5953f2304f8SSagi Grimberg ret = skb_copy_bits(skb, *offset, 5963f2304f8SSagi Grimberg &pdu[queue->pdu_offset], rcv_len); 5973f2304f8SSagi Grimberg if (unlikely(ret)) 5983f2304f8SSagi Grimberg return ret; 5993f2304f8SSagi Grimberg 6003f2304f8SSagi Grimberg queue->pdu_remaining -= rcv_len; 6013f2304f8SSagi Grimberg queue->pdu_offset += rcv_len; 6023f2304f8SSagi Grimberg *offset += rcv_len; 6033f2304f8SSagi Grimberg *len -= rcv_len; 6043f2304f8SSagi Grimberg if (queue->pdu_remaining) 6053f2304f8SSagi Grimberg return 0; 6063f2304f8SSagi Grimberg 6073f2304f8SSagi Grimberg hdr = queue->pdu; 6083f2304f8SSagi Grimberg if (queue->hdr_digest) { 6093f2304f8SSagi Grimberg ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); 6103f2304f8SSagi Grimberg if (unlikely(ret)) 6113f2304f8SSagi Grimberg return ret; 6123f2304f8SSagi Grimberg } 6133f2304f8SSagi Grimberg 6143f2304f8SSagi Grimberg 6153f2304f8SSagi Grimberg if (queue->data_digest) { 6163f2304f8SSagi Grimberg ret = nvme_tcp_check_ddgst(queue, queue->pdu); 6173f2304f8SSagi Grimberg if (unlikely(ret)) 6183f2304f8SSagi Grimberg return ret; 6193f2304f8SSagi Grimberg } 6203f2304f8SSagi Grimberg 6213f2304f8SSagi Grimberg switch (hdr->type) { 6223f2304f8SSagi Grimberg case nvme_tcp_c2h_data: 6236be18260SSagi Grimberg return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); 6243f2304f8SSagi Grimberg case nvme_tcp_rsp: 6253f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 6266be18260SSagi Grimberg return nvme_tcp_handle_comp(queue, (void *)queue->pdu); 6273f2304f8SSagi Grimberg case nvme_tcp_r2t: 6283f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 6296be18260SSagi Grimberg return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); 6303f2304f8SSagi Grimberg default: 6313f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 6323f2304f8SSagi Grimberg "unsupported pdu type (%d)\n", hdr->type); 6333f2304f8SSagi Grimberg return -EINVAL; 6343f2304f8SSagi Grimberg } 6353f2304f8SSagi Grimberg } 6363f2304f8SSagi Grimberg 637988aef9eSChristoph Hellwig static inline void nvme_tcp_end_request(struct request *rq, u16 status) 638602d674cSSagi Grimberg { 639602d674cSSagi Grimberg union nvme_result res = {}; 640602d674cSSagi Grimberg 641602d674cSSagi Grimberg nvme_end_request(rq, cpu_to_le16(status << 1), res); 642602d674cSSagi Grimberg } 643602d674cSSagi Grimberg 6443f2304f8SSagi Grimberg static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, 6453f2304f8SSagi Grimberg unsigned int *offset, size_t *len) 6463f2304f8SSagi Grimberg { 6473f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; 6483f2304f8SSagi Grimberg struct nvme_tcp_request *req; 6493f2304f8SSagi Grimberg struct request *rq; 6503f2304f8SSagi Grimberg 6513f2304f8SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); 6523f2304f8SSagi Grimberg if (!rq) { 6533f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 6543f2304f8SSagi Grimberg "queue %d tag %#x not found\n", 6553f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), pdu->command_id); 6563f2304f8SSagi Grimberg return -ENOENT; 6573f2304f8SSagi Grimberg } 6583f2304f8SSagi Grimberg req = blk_mq_rq_to_pdu(rq); 6593f2304f8SSagi Grimberg 6603f2304f8SSagi Grimberg while (true) { 6613f2304f8SSagi Grimberg int recv_len, ret; 6623f2304f8SSagi Grimberg 6633f2304f8SSagi Grimberg recv_len = min_t(size_t, *len, queue->data_remaining); 6643f2304f8SSagi Grimberg if (!recv_len) 6653f2304f8SSagi Grimberg break; 6663f2304f8SSagi Grimberg 6673f2304f8SSagi Grimberg if (!iov_iter_count(&req->iter)) { 6683f2304f8SSagi Grimberg req->curr_bio = req->curr_bio->bi_next; 6693f2304f8SSagi Grimberg 6703f2304f8SSagi Grimberg /* 6713f2304f8SSagi Grimberg * If we don`t have any bios it means that controller 6723f2304f8SSagi Grimberg * sent more data than we requested, hence error 6733f2304f8SSagi Grimberg */ 6743f2304f8SSagi Grimberg if (!req->curr_bio) { 6753f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 6763f2304f8SSagi Grimberg "queue %d no space in request %#x", 6773f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 6783f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 6793f2304f8SSagi Grimberg return -EIO; 6803f2304f8SSagi Grimberg } 6813f2304f8SSagi Grimberg nvme_tcp_init_iter(req, READ); 6823f2304f8SSagi Grimberg } 6833f2304f8SSagi Grimberg 6843f2304f8SSagi Grimberg /* we can read only from what is left in this bio */ 6853f2304f8SSagi Grimberg recv_len = min_t(size_t, recv_len, 6863f2304f8SSagi Grimberg iov_iter_count(&req->iter)); 6873f2304f8SSagi Grimberg 6883f2304f8SSagi Grimberg if (queue->data_digest) 6893f2304f8SSagi Grimberg ret = skb_copy_and_hash_datagram_iter(skb, *offset, 6903f2304f8SSagi Grimberg &req->iter, recv_len, queue->rcv_hash); 6913f2304f8SSagi Grimberg else 6923f2304f8SSagi Grimberg ret = skb_copy_datagram_iter(skb, *offset, 6933f2304f8SSagi Grimberg &req->iter, recv_len); 6943f2304f8SSagi Grimberg if (ret) { 6953f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 6963f2304f8SSagi Grimberg "queue %d failed to copy request %#x data", 6973f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 6983f2304f8SSagi Grimberg return ret; 6993f2304f8SSagi Grimberg } 7003f2304f8SSagi Grimberg 7013f2304f8SSagi Grimberg *len -= recv_len; 7023f2304f8SSagi Grimberg *offset += recv_len; 7033f2304f8SSagi Grimberg queue->data_remaining -= recv_len; 7043f2304f8SSagi Grimberg } 7053f2304f8SSagi Grimberg 7063f2304f8SSagi Grimberg if (!queue->data_remaining) { 7073f2304f8SSagi Grimberg if (queue->data_digest) { 7083f2304f8SSagi Grimberg nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); 7093f2304f8SSagi Grimberg queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; 7103f2304f8SSagi Grimberg } else { 7111a9460ceSSagi Grimberg if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { 712602d674cSSagi Grimberg nvme_tcp_end_request(rq, NVME_SC_SUCCESS); 7131a9460ceSSagi Grimberg queue->nr_cqe++; 7141a9460ceSSagi Grimberg } 7153f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 7163f2304f8SSagi Grimberg } 7173f2304f8SSagi Grimberg } 7183f2304f8SSagi Grimberg 7193f2304f8SSagi Grimberg return 0; 7203f2304f8SSagi Grimberg } 7213f2304f8SSagi Grimberg 7223f2304f8SSagi Grimberg static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, 7233f2304f8SSagi Grimberg struct sk_buff *skb, unsigned int *offset, size_t *len) 7243f2304f8SSagi Grimberg { 725602d674cSSagi Grimberg struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; 7263f2304f8SSagi Grimberg char *ddgst = (char *)&queue->recv_ddgst; 7273f2304f8SSagi Grimberg size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); 7283f2304f8SSagi Grimberg off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; 7293f2304f8SSagi Grimberg int ret; 7303f2304f8SSagi Grimberg 7313f2304f8SSagi Grimberg ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len); 7323f2304f8SSagi Grimberg if (unlikely(ret)) 7333f2304f8SSagi Grimberg return ret; 7343f2304f8SSagi Grimberg 7353f2304f8SSagi Grimberg queue->ddgst_remaining -= recv_len; 7363f2304f8SSagi Grimberg *offset += recv_len; 7373f2304f8SSagi Grimberg *len -= recv_len; 7383f2304f8SSagi Grimberg if (queue->ddgst_remaining) 7393f2304f8SSagi Grimberg return 0; 7403f2304f8SSagi Grimberg 7413f2304f8SSagi Grimberg if (queue->recv_ddgst != queue->exp_ddgst) { 7423f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 7433f2304f8SSagi Grimberg "data digest error: recv %#x expected %#x\n", 7443f2304f8SSagi Grimberg le32_to_cpu(queue->recv_ddgst), 7453f2304f8SSagi Grimberg le32_to_cpu(queue->exp_ddgst)); 7463f2304f8SSagi Grimberg return -EIO; 7473f2304f8SSagi Grimberg } 7483f2304f8SSagi Grimberg 749602d674cSSagi Grimberg if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { 750602d674cSSagi Grimberg struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), 751602d674cSSagi Grimberg pdu->command_id); 752602d674cSSagi Grimberg 753602d674cSSagi Grimberg nvme_tcp_end_request(rq, NVME_SC_SUCCESS); 7541a9460ceSSagi Grimberg queue->nr_cqe++; 755602d674cSSagi Grimberg } 756602d674cSSagi Grimberg 7573f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 7583f2304f8SSagi Grimberg return 0; 7593f2304f8SSagi Grimberg } 7603f2304f8SSagi Grimberg 7613f2304f8SSagi Grimberg static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, 7623f2304f8SSagi Grimberg unsigned int offset, size_t len) 7633f2304f8SSagi Grimberg { 7643f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = desc->arg.data; 7653f2304f8SSagi Grimberg size_t consumed = len; 7663f2304f8SSagi Grimberg int result; 7673f2304f8SSagi Grimberg 7683f2304f8SSagi Grimberg while (len) { 7693f2304f8SSagi Grimberg switch (nvme_tcp_recv_state(queue)) { 7703f2304f8SSagi Grimberg case NVME_TCP_RECV_PDU: 7713f2304f8SSagi Grimberg result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); 7723f2304f8SSagi Grimberg break; 7733f2304f8SSagi Grimberg case NVME_TCP_RECV_DATA: 7743f2304f8SSagi Grimberg result = nvme_tcp_recv_data(queue, skb, &offset, &len); 7753f2304f8SSagi Grimberg break; 7763f2304f8SSagi Grimberg case NVME_TCP_RECV_DDGST: 7773f2304f8SSagi Grimberg result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); 7783f2304f8SSagi Grimberg break; 7793f2304f8SSagi Grimberg default: 7803f2304f8SSagi Grimberg result = -EFAULT; 7813f2304f8SSagi Grimberg } 7823f2304f8SSagi Grimberg if (result) { 7833f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 7843f2304f8SSagi Grimberg "receive failed: %d\n", result); 7853f2304f8SSagi Grimberg queue->rd_enabled = false; 7863f2304f8SSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 7873f2304f8SSagi Grimberg return result; 7883f2304f8SSagi Grimberg } 7893f2304f8SSagi Grimberg } 7903f2304f8SSagi Grimberg 7913f2304f8SSagi Grimberg return consumed; 7923f2304f8SSagi Grimberg } 7933f2304f8SSagi Grimberg 7943f2304f8SSagi Grimberg static void nvme_tcp_data_ready(struct sock *sk) 7953f2304f8SSagi Grimberg { 7963f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 7973f2304f8SSagi Grimberg 798386e5e6eSSagi Grimberg read_lock_bh(&sk->sk_callback_lock); 7993f2304f8SSagi Grimberg queue = sk->sk_user_data; 80072e5d757SSagi Grimberg if (likely(queue && queue->rd_enabled) && 80172e5d757SSagi Grimberg !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) 8023f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 803386e5e6eSSagi Grimberg read_unlock_bh(&sk->sk_callback_lock); 8043f2304f8SSagi Grimberg } 8053f2304f8SSagi Grimberg 8063f2304f8SSagi Grimberg static void nvme_tcp_write_space(struct sock *sk) 8073f2304f8SSagi Grimberg { 8083f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 8093f2304f8SSagi Grimberg 8103f2304f8SSagi Grimberg read_lock_bh(&sk->sk_callback_lock); 8113f2304f8SSagi Grimberg queue = sk->sk_user_data; 8123f2304f8SSagi Grimberg if (likely(queue && sk_stream_is_writeable(sk))) { 8133f2304f8SSagi Grimberg clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 8143f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 8153f2304f8SSagi Grimberg } 8163f2304f8SSagi Grimberg read_unlock_bh(&sk->sk_callback_lock); 8173f2304f8SSagi Grimberg } 8183f2304f8SSagi Grimberg 8193f2304f8SSagi Grimberg static void nvme_tcp_state_change(struct sock *sk) 8203f2304f8SSagi Grimberg { 8213f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 8223f2304f8SSagi Grimberg 8233f2304f8SSagi Grimberg read_lock(&sk->sk_callback_lock); 8243f2304f8SSagi Grimberg queue = sk->sk_user_data; 8253f2304f8SSagi Grimberg if (!queue) 8263f2304f8SSagi Grimberg goto done; 8273f2304f8SSagi Grimberg 8283f2304f8SSagi Grimberg switch (sk->sk_state) { 8293f2304f8SSagi Grimberg case TCP_CLOSE: 8303f2304f8SSagi Grimberg case TCP_CLOSE_WAIT: 8313f2304f8SSagi Grimberg case TCP_LAST_ACK: 8323f2304f8SSagi Grimberg case TCP_FIN_WAIT1: 8333f2304f8SSagi Grimberg case TCP_FIN_WAIT2: 8343f2304f8SSagi Grimberg /* fallthrough */ 8353f2304f8SSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 8363f2304f8SSagi Grimberg break; 8373f2304f8SSagi Grimberg default: 8383f2304f8SSagi Grimberg dev_info(queue->ctrl->ctrl.device, 8393f2304f8SSagi Grimberg "queue %d socket state %d\n", 8403f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), sk->sk_state); 8413f2304f8SSagi Grimberg } 8423f2304f8SSagi Grimberg 8433f2304f8SSagi Grimberg queue->state_change(sk); 8443f2304f8SSagi Grimberg done: 8453f2304f8SSagi Grimberg read_unlock(&sk->sk_callback_lock); 8463f2304f8SSagi Grimberg } 8473f2304f8SSagi Grimberg 8483f2304f8SSagi Grimberg static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) 8493f2304f8SSagi Grimberg { 8503f2304f8SSagi Grimberg queue->request = NULL; 8513f2304f8SSagi Grimberg } 8523f2304f8SSagi Grimberg 8533f2304f8SSagi Grimberg static void nvme_tcp_fail_request(struct nvme_tcp_request *req) 8543f2304f8SSagi Grimberg { 85516686010SSagi Grimberg nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); 8563f2304f8SSagi Grimberg } 8573f2304f8SSagi Grimberg 8583f2304f8SSagi Grimberg static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) 8593f2304f8SSagi Grimberg { 8603f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 8613f2304f8SSagi Grimberg 8623f2304f8SSagi Grimberg while (true) { 8633f2304f8SSagi Grimberg struct page *page = nvme_tcp_req_cur_page(req); 8643f2304f8SSagi Grimberg size_t offset = nvme_tcp_req_cur_offset(req); 8653f2304f8SSagi Grimberg size_t len = nvme_tcp_req_cur_length(req); 8663f2304f8SSagi Grimberg bool last = nvme_tcp_pdu_last_send(req, len); 8673f2304f8SSagi Grimberg int ret, flags = MSG_DONTWAIT; 8683f2304f8SSagi Grimberg 8693f2304f8SSagi Grimberg if (last && !queue->data_digest) 8703f2304f8SSagi Grimberg flags |= MSG_EOR; 8713f2304f8SSagi Grimberg else 8723f2304f8SSagi Grimberg flags |= MSG_MORE; 8733f2304f8SSagi Grimberg 87437c15219SMikhail Skorzhinskii /* can't zcopy slab pages */ 87537c15219SMikhail Skorzhinskii if (unlikely(PageSlab(page))) { 87637c15219SMikhail Skorzhinskii ret = sock_no_sendpage(queue->sock, page, offset, len, 87737c15219SMikhail Skorzhinskii flags); 87837c15219SMikhail Skorzhinskii } else { 87937c15219SMikhail Skorzhinskii ret = kernel_sendpage(queue->sock, page, offset, len, 88037c15219SMikhail Skorzhinskii flags); 88137c15219SMikhail Skorzhinskii } 8823f2304f8SSagi Grimberg if (ret <= 0) 8833f2304f8SSagi Grimberg return ret; 8843f2304f8SSagi Grimberg 8853f2304f8SSagi Grimberg nvme_tcp_advance_req(req, ret); 8863f2304f8SSagi Grimberg if (queue->data_digest) 8873f2304f8SSagi Grimberg nvme_tcp_ddgst_update(queue->snd_hash, page, 8883f2304f8SSagi Grimberg offset, ret); 8893f2304f8SSagi Grimberg 8903f2304f8SSagi Grimberg /* fully successful last write*/ 8913f2304f8SSagi Grimberg if (last && ret == len) { 8923f2304f8SSagi Grimberg if (queue->data_digest) { 8933f2304f8SSagi Grimberg nvme_tcp_ddgst_final(queue->snd_hash, 8943f2304f8SSagi Grimberg &req->ddgst); 8953f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_DDGST; 8963f2304f8SSagi Grimberg req->offset = 0; 8973f2304f8SSagi Grimberg } else { 8983f2304f8SSagi Grimberg nvme_tcp_done_send_req(queue); 8993f2304f8SSagi Grimberg } 9003f2304f8SSagi Grimberg return 1; 9013f2304f8SSagi Grimberg } 9023f2304f8SSagi Grimberg } 9033f2304f8SSagi Grimberg return -EAGAIN; 9043f2304f8SSagi Grimberg } 9053f2304f8SSagi Grimberg 9063f2304f8SSagi Grimberg static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) 9073f2304f8SSagi Grimberg { 9083f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 9093f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 9103f2304f8SSagi Grimberg bool inline_data = nvme_tcp_has_inline_data(req); 9113f2304f8SSagi Grimberg int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR); 9123f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 9133f2304f8SSagi Grimberg int len = sizeof(*pdu) + hdgst - req->offset; 9143f2304f8SSagi Grimberg int ret; 9153f2304f8SSagi Grimberg 9163f2304f8SSagi Grimberg if (queue->hdr_digest && !req->offset) 9173f2304f8SSagi Grimberg nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 9183f2304f8SSagi Grimberg 9193f2304f8SSagi Grimberg ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 9203f2304f8SSagi Grimberg offset_in_page(pdu) + req->offset, len, flags); 9213f2304f8SSagi Grimberg if (unlikely(ret <= 0)) 9223f2304f8SSagi Grimberg return ret; 9233f2304f8SSagi Grimberg 9243f2304f8SSagi Grimberg len -= ret; 9253f2304f8SSagi Grimberg if (!len) { 9263f2304f8SSagi Grimberg if (inline_data) { 9273f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_DATA; 9283f2304f8SSagi Grimberg if (queue->data_digest) 9293f2304f8SSagi Grimberg crypto_ahash_init(queue->snd_hash); 9303f2304f8SSagi Grimberg nvme_tcp_init_iter(req, WRITE); 9313f2304f8SSagi Grimberg } else { 9323f2304f8SSagi Grimberg nvme_tcp_done_send_req(queue); 9333f2304f8SSagi Grimberg } 9343f2304f8SSagi Grimberg return 1; 9353f2304f8SSagi Grimberg } 9363f2304f8SSagi Grimberg req->offset += ret; 9373f2304f8SSagi Grimberg 9383f2304f8SSagi Grimberg return -EAGAIN; 9393f2304f8SSagi Grimberg } 9403f2304f8SSagi Grimberg 9413f2304f8SSagi Grimberg static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) 9423f2304f8SSagi Grimberg { 9433f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 9443f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *pdu = req->pdu; 9453f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 9463f2304f8SSagi Grimberg int len = sizeof(*pdu) - req->offset + hdgst; 9473f2304f8SSagi Grimberg int ret; 9483f2304f8SSagi Grimberg 9493f2304f8SSagi Grimberg if (queue->hdr_digest && !req->offset) 9503f2304f8SSagi Grimberg nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 9513f2304f8SSagi Grimberg 9523f2304f8SSagi Grimberg ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 9533f2304f8SSagi Grimberg offset_in_page(pdu) + req->offset, len, 9543f2304f8SSagi Grimberg MSG_DONTWAIT | MSG_MORE); 9553f2304f8SSagi Grimberg if (unlikely(ret <= 0)) 9563f2304f8SSagi Grimberg return ret; 9573f2304f8SSagi Grimberg 9583f2304f8SSagi Grimberg len -= ret; 9593f2304f8SSagi Grimberg if (!len) { 9603f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_DATA; 9613f2304f8SSagi Grimberg if (queue->data_digest) 9623f2304f8SSagi Grimberg crypto_ahash_init(queue->snd_hash); 9633f2304f8SSagi Grimberg if (!req->data_sent) 9643f2304f8SSagi Grimberg nvme_tcp_init_iter(req, WRITE); 9653f2304f8SSagi Grimberg return 1; 9663f2304f8SSagi Grimberg } 9673f2304f8SSagi Grimberg req->offset += ret; 9683f2304f8SSagi Grimberg 9693f2304f8SSagi Grimberg return -EAGAIN; 9703f2304f8SSagi Grimberg } 9713f2304f8SSagi Grimberg 9723f2304f8SSagi Grimberg static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) 9733f2304f8SSagi Grimberg { 9743f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 9753f2304f8SSagi Grimberg int ret; 9763f2304f8SSagi Grimberg struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR }; 9773f2304f8SSagi Grimberg struct kvec iov = { 9783f2304f8SSagi Grimberg .iov_base = &req->ddgst + req->offset, 9793f2304f8SSagi Grimberg .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset 9803f2304f8SSagi Grimberg }; 9813f2304f8SSagi Grimberg 9823f2304f8SSagi Grimberg ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 9833f2304f8SSagi Grimberg if (unlikely(ret <= 0)) 9843f2304f8SSagi Grimberg return ret; 9853f2304f8SSagi Grimberg 9863f2304f8SSagi Grimberg if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) { 9873f2304f8SSagi Grimberg nvme_tcp_done_send_req(queue); 9883f2304f8SSagi Grimberg return 1; 9893f2304f8SSagi Grimberg } 9903f2304f8SSagi Grimberg 9913f2304f8SSagi Grimberg req->offset += ret; 9923f2304f8SSagi Grimberg return -EAGAIN; 9933f2304f8SSagi Grimberg } 9943f2304f8SSagi Grimberg 9953f2304f8SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) 9963f2304f8SSagi Grimberg { 9973f2304f8SSagi Grimberg struct nvme_tcp_request *req; 9983f2304f8SSagi Grimberg int ret = 1; 9993f2304f8SSagi Grimberg 10003f2304f8SSagi Grimberg if (!queue->request) { 10013f2304f8SSagi Grimberg queue->request = nvme_tcp_fetch_request(queue); 10023f2304f8SSagi Grimberg if (!queue->request) 10033f2304f8SSagi Grimberg return 0; 10043f2304f8SSagi Grimberg } 10053f2304f8SSagi Grimberg req = queue->request; 10063f2304f8SSagi Grimberg 10073f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_CMD_PDU) { 10083f2304f8SSagi Grimberg ret = nvme_tcp_try_send_cmd_pdu(req); 10093f2304f8SSagi Grimberg if (ret <= 0) 10103f2304f8SSagi Grimberg goto done; 10113f2304f8SSagi Grimberg if (!nvme_tcp_has_inline_data(req)) 10123f2304f8SSagi Grimberg return ret; 10133f2304f8SSagi Grimberg } 10143f2304f8SSagi Grimberg 10153f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_H2C_PDU) { 10163f2304f8SSagi Grimberg ret = nvme_tcp_try_send_data_pdu(req); 10173f2304f8SSagi Grimberg if (ret <= 0) 10183f2304f8SSagi Grimberg goto done; 10193f2304f8SSagi Grimberg } 10203f2304f8SSagi Grimberg 10213f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_DATA) { 10223f2304f8SSagi Grimberg ret = nvme_tcp_try_send_data(req); 10233f2304f8SSagi Grimberg if (ret <= 0) 10243f2304f8SSagi Grimberg goto done; 10253f2304f8SSagi Grimberg } 10263f2304f8SSagi Grimberg 10273f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_DDGST) 10283f2304f8SSagi Grimberg ret = nvme_tcp_try_send_ddgst(req); 10293f2304f8SSagi Grimberg done: 10305ff4e112SSagi Grimberg if (ret == -EAGAIN) { 10313f2304f8SSagi Grimberg ret = 0; 10325ff4e112SSagi Grimberg } else if (ret < 0) { 10335ff4e112SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 10345ff4e112SSagi Grimberg "failed to send request %d\n", ret); 10355ff4e112SSagi Grimberg if (ret != -EPIPE && ret != -ECONNRESET) 10365ff4e112SSagi Grimberg nvme_tcp_fail_request(queue->request); 10375ff4e112SSagi Grimberg nvme_tcp_done_send_req(queue); 10385ff4e112SSagi Grimberg } 10393f2304f8SSagi Grimberg return ret; 10403f2304f8SSagi Grimberg } 10413f2304f8SSagi Grimberg 10423f2304f8SSagi Grimberg static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) 10433f2304f8SSagi Grimberg { 104410407ec9SPotnuri Bharat Teja struct socket *sock = queue->sock; 104510407ec9SPotnuri Bharat Teja struct sock *sk = sock->sk; 10463f2304f8SSagi Grimberg read_descriptor_t rd_desc; 10473f2304f8SSagi Grimberg int consumed; 10483f2304f8SSagi Grimberg 10493f2304f8SSagi Grimberg rd_desc.arg.data = queue; 10503f2304f8SSagi Grimberg rd_desc.count = 1; 10513f2304f8SSagi Grimberg lock_sock(sk); 10521a9460ceSSagi Grimberg queue->nr_cqe = 0; 105310407ec9SPotnuri Bharat Teja consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); 10543f2304f8SSagi Grimberg release_sock(sk); 10553f2304f8SSagi Grimberg return consumed; 10563f2304f8SSagi Grimberg } 10573f2304f8SSagi Grimberg 10583f2304f8SSagi Grimberg static void nvme_tcp_io_work(struct work_struct *w) 10593f2304f8SSagi Grimberg { 10603f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = 10613f2304f8SSagi Grimberg container_of(w, struct nvme_tcp_queue, io_work); 1062ddef2957SWunderlich, Mark unsigned long deadline = jiffies + msecs_to_jiffies(1); 10633f2304f8SSagi Grimberg 10643f2304f8SSagi Grimberg do { 10653f2304f8SSagi Grimberg bool pending = false; 10663f2304f8SSagi Grimberg int result; 10673f2304f8SSagi Grimberg 10683f2304f8SSagi Grimberg result = nvme_tcp_try_send(queue); 10695ff4e112SSagi Grimberg if (result > 0) 10703f2304f8SSagi Grimberg pending = true; 10715ff4e112SSagi Grimberg else if (unlikely(result < 0)) 10725ff4e112SSagi Grimberg break; 10733f2304f8SSagi Grimberg 10743f2304f8SSagi Grimberg result = nvme_tcp_try_recv(queue); 10753f2304f8SSagi Grimberg if (result > 0) 10763f2304f8SSagi Grimberg pending = true; 1077761ad26cSSagi Grimberg else if (unlikely(result < 0)) 107839d06079SSagi Grimberg return; 10793f2304f8SSagi Grimberg 10803f2304f8SSagi Grimberg if (!pending) 10813f2304f8SSagi Grimberg return; 10823f2304f8SSagi Grimberg 1083ddef2957SWunderlich, Mark } while (!time_after(jiffies, deadline)); /* quota is exhausted */ 10843f2304f8SSagi Grimberg 10853f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 10863f2304f8SSagi Grimberg } 10873f2304f8SSagi Grimberg 10883f2304f8SSagi Grimberg static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) 10893f2304f8SSagi Grimberg { 10903f2304f8SSagi Grimberg struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 10913f2304f8SSagi Grimberg 10923f2304f8SSagi Grimberg ahash_request_free(queue->rcv_hash); 10933f2304f8SSagi Grimberg ahash_request_free(queue->snd_hash); 10943f2304f8SSagi Grimberg crypto_free_ahash(tfm); 10953f2304f8SSagi Grimberg } 10963f2304f8SSagi Grimberg 10973f2304f8SSagi Grimberg static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) 10983f2304f8SSagi Grimberg { 10993f2304f8SSagi Grimberg struct crypto_ahash *tfm; 11003f2304f8SSagi Grimberg 11013f2304f8SSagi Grimberg tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 11023f2304f8SSagi Grimberg if (IS_ERR(tfm)) 11033f2304f8SSagi Grimberg return PTR_ERR(tfm); 11043f2304f8SSagi Grimberg 11053f2304f8SSagi Grimberg queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 11063f2304f8SSagi Grimberg if (!queue->snd_hash) 11073f2304f8SSagi Grimberg goto free_tfm; 11083f2304f8SSagi Grimberg ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 11093f2304f8SSagi Grimberg 11103f2304f8SSagi Grimberg queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 11113f2304f8SSagi Grimberg if (!queue->rcv_hash) 11123f2304f8SSagi Grimberg goto free_snd_hash; 11133f2304f8SSagi Grimberg ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 11143f2304f8SSagi Grimberg 11153f2304f8SSagi Grimberg return 0; 11163f2304f8SSagi Grimberg free_snd_hash: 11173f2304f8SSagi Grimberg ahash_request_free(queue->snd_hash); 11183f2304f8SSagi Grimberg free_tfm: 11193f2304f8SSagi Grimberg crypto_free_ahash(tfm); 11203f2304f8SSagi Grimberg return -ENOMEM; 11213f2304f8SSagi Grimberg } 11223f2304f8SSagi Grimberg 11233f2304f8SSagi Grimberg static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) 11243f2304f8SSagi Grimberg { 11253f2304f8SSagi Grimberg struct nvme_tcp_request *async = &ctrl->async_req; 11263f2304f8SSagi Grimberg 11273f2304f8SSagi Grimberg page_frag_free(async->pdu); 11283f2304f8SSagi Grimberg } 11293f2304f8SSagi Grimberg 11303f2304f8SSagi Grimberg static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) 11313f2304f8SSagi Grimberg { 11323f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[0]; 11333f2304f8SSagi Grimberg struct nvme_tcp_request *async = &ctrl->async_req; 11343f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 11353f2304f8SSagi Grimberg 11363f2304f8SSagi Grimberg async->pdu = page_frag_alloc(&queue->pf_cache, 11373f2304f8SSagi Grimberg sizeof(struct nvme_tcp_cmd_pdu) + hdgst, 11383f2304f8SSagi Grimberg GFP_KERNEL | __GFP_ZERO); 11393f2304f8SSagi Grimberg if (!async->pdu) 11403f2304f8SSagi Grimberg return -ENOMEM; 11413f2304f8SSagi Grimberg 11423f2304f8SSagi Grimberg async->queue = &ctrl->queues[0]; 11433f2304f8SSagi Grimberg return 0; 11443f2304f8SSagi Grimberg } 11453f2304f8SSagi Grimberg 11463f2304f8SSagi Grimberg static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) 11473f2304f8SSagi Grimberg { 11483f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 11493f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 11503f2304f8SSagi Grimberg 11513f2304f8SSagi Grimberg if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) 11523f2304f8SSagi Grimberg return; 11533f2304f8SSagi Grimberg 11543f2304f8SSagi Grimberg if (queue->hdr_digest || queue->data_digest) 11553f2304f8SSagi Grimberg nvme_tcp_free_crypto(queue); 11563f2304f8SSagi Grimberg 11573f2304f8SSagi Grimberg sock_release(queue->sock); 11583f2304f8SSagi Grimberg kfree(queue->pdu); 11593f2304f8SSagi Grimberg } 11603f2304f8SSagi Grimberg 11613f2304f8SSagi Grimberg static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) 11623f2304f8SSagi Grimberg { 11633f2304f8SSagi Grimberg struct nvme_tcp_icreq_pdu *icreq; 11643f2304f8SSagi Grimberg struct nvme_tcp_icresp_pdu *icresp; 11653f2304f8SSagi Grimberg struct msghdr msg = {}; 11663f2304f8SSagi Grimberg struct kvec iov; 11673f2304f8SSagi Grimberg bool ctrl_hdgst, ctrl_ddgst; 11683f2304f8SSagi Grimberg int ret; 11693f2304f8SSagi Grimberg 11703f2304f8SSagi Grimberg icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); 11713f2304f8SSagi Grimberg if (!icreq) 11723f2304f8SSagi Grimberg return -ENOMEM; 11733f2304f8SSagi Grimberg 11743f2304f8SSagi Grimberg icresp = kzalloc(sizeof(*icresp), GFP_KERNEL); 11753f2304f8SSagi Grimberg if (!icresp) { 11763f2304f8SSagi Grimberg ret = -ENOMEM; 11773f2304f8SSagi Grimberg goto free_icreq; 11783f2304f8SSagi Grimberg } 11793f2304f8SSagi Grimberg 11803f2304f8SSagi Grimberg icreq->hdr.type = nvme_tcp_icreq; 11813f2304f8SSagi Grimberg icreq->hdr.hlen = sizeof(*icreq); 11823f2304f8SSagi Grimberg icreq->hdr.pdo = 0; 11833f2304f8SSagi Grimberg icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); 11843f2304f8SSagi Grimberg icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 11853f2304f8SSagi Grimberg icreq->maxr2t = 0; /* single inflight r2t supported */ 11863f2304f8SSagi Grimberg icreq->hpda = 0; /* no alignment constraint */ 11873f2304f8SSagi Grimberg if (queue->hdr_digest) 11883f2304f8SSagi Grimberg icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 11893f2304f8SSagi Grimberg if (queue->data_digest) 11903f2304f8SSagi Grimberg icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 11913f2304f8SSagi Grimberg 11923f2304f8SSagi Grimberg iov.iov_base = icreq; 11933f2304f8SSagi Grimberg iov.iov_len = sizeof(*icreq); 11943f2304f8SSagi Grimberg ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 11953f2304f8SSagi Grimberg if (ret < 0) 11963f2304f8SSagi Grimberg goto free_icresp; 11973f2304f8SSagi Grimberg 11983f2304f8SSagi Grimberg memset(&msg, 0, sizeof(msg)); 11993f2304f8SSagi Grimberg iov.iov_base = icresp; 12003f2304f8SSagi Grimberg iov.iov_len = sizeof(*icresp); 12013f2304f8SSagi Grimberg ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 12023f2304f8SSagi Grimberg iov.iov_len, msg.msg_flags); 12033f2304f8SSagi Grimberg if (ret < 0) 12043f2304f8SSagi Grimberg goto free_icresp; 12053f2304f8SSagi Grimberg 12063f2304f8SSagi Grimberg ret = -EINVAL; 12073f2304f8SSagi Grimberg if (icresp->hdr.type != nvme_tcp_icresp) { 12083f2304f8SSagi Grimberg pr_err("queue %d: bad type returned %d\n", 12093f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->hdr.type); 12103f2304f8SSagi Grimberg goto free_icresp; 12113f2304f8SSagi Grimberg } 12123f2304f8SSagi Grimberg 12133f2304f8SSagi Grimberg if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { 12143f2304f8SSagi Grimberg pr_err("queue %d: bad pdu length returned %d\n", 12153f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->hdr.plen); 12163f2304f8SSagi Grimberg goto free_icresp; 12173f2304f8SSagi Grimberg } 12183f2304f8SSagi Grimberg 12193f2304f8SSagi Grimberg if (icresp->pfv != NVME_TCP_PFV_1_0) { 12203f2304f8SSagi Grimberg pr_err("queue %d: bad pfv returned %d\n", 12213f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->pfv); 12223f2304f8SSagi Grimberg goto free_icresp; 12233f2304f8SSagi Grimberg } 12243f2304f8SSagi Grimberg 12253f2304f8SSagi Grimberg ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); 12263f2304f8SSagi Grimberg if ((queue->data_digest && !ctrl_ddgst) || 12273f2304f8SSagi Grimberg (!queue->data_digest && ctrl_ddgst)) { 12283f2304f8SSagi Grimberg pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", 12293f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), 12303f2304f8SSagi Grimberg queue->data_digest ? "enabled" : "disabled", 12313f2304f8SSagi Grimberg ctrl_ddgst ? "enabled" : "disabled"); 12323f2304f8SSagi Grimberg goto free_icresp; 12333f2304f8SSagi Grimberg } 12343f2304f8SSagi Grimberg 12353f2304f8SSagi Grimberg ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); 12363f2304f8SSagi Grimberg if ((queue->hdr_digest && !ctrl_hdgst) || 12373f2304f8SSagi Grimberg (!queue->hdr_digest && ctrl_hdgst)) { 12383f2304f8SSagi Grimberg pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", 12393f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), 12403f2304f8SSagi Grimberg queue->hdr_digest ? "enabled" : "disabled", 12413f2304f8SSagi Grimberg ctrl_hdgst ? "enabled" : "disabled"); 12423f2304f8SSagi Grimberg goto free_icresp; 12433f2304f8SSagi Grimberg } 12443f2304f8SSagi Grimberg 12453f2304f8SSagi Grimberg if (icresp->cpda != 0) { 12463f2304f8SSagi Grimberg pr_err("queue %d: unsupported cpda returned %d\n", 12473f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->cpda); 12483f2304f8SSagi Grimberg goto free_icresp; 12493f2304f8SSagi Grimberg } 12503f2304f8SSagi Grimberg 12513f2304f8SSagi Grimberg ret = 0; 12523f2304f8SSagi Grimberg free_icresp: 12533f2304f8SSagi Grimberg kfree(icresp); 12543f2304f8SSagi Grimberg free_icreq: 12553f2304f8SSagi Grimberg kfree(icreq); 12563f2304f8SSagi Grimberg return ret; 12573f2304f8SSagi Grimberg } 12583f2304f8SSagi Grimberg 125940510a63SSagi Grimberg static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) 126040510a63SSagi Grimberg { 126140510a63SSagi Grimberg return nvme_tcp_queue_id(queue) == 0; 126240510a63SSagi Grimberg } 126340510a63SSagi Grimberg 126440510a63SSagi Grimberg static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) 126540510a63SSagi Grimberg { 126640510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 126740510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 126840510a63SSagi Grimberg 126940510a63SSagi Grimberg return !nvme_tcp_admin_queue(queue) && 127040510a63SSagi Grimberg qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; 127140510a63SSagi Grimberg } 127240510a63SSagi Grimberg 127340510a63SSagi Grimberg static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) 127440510a63SSagi Grimberg { 127540510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 127640510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 127740510a63SSagi Grimberg 127840510a63SSagi Grimberg return !nvme_tcp_admin_queue(queue) && 127940510a63SSagi Grimberg !nvme_tcp_default_queue(queue) && 128040510a63SSagi Grimberg qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + 128140510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ]; 128240510a63SSagi Grimberg } 128340510a63SSagi Grimberg 128440510a63SSagi Grimberg static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) 128540510a63SSagi Grimberg { 128640510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 128740510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 128840510a63SSagi Grimberg 128940510a63SSagi Grimberg return !nvme_tcp_admin_queue(queue) && 129040510a63SSagi Grimberg !nvme_tcp_default_queue(queue) && 129140510a63SSagi Grimberg !nvme_tcp_read_queue(queue) && 129240510a63SSagi Grimberg qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + 129340510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ] + 129440510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL]; 129540510a63SSagi Grimberg } 129640510a63SSagi Grimberg 129740510a63SSagi Grimberg static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) 129840510a63SSagi Grimberg { 129940510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 130040510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 130140510a63SSagi Grimberg int n = 0; 130240510a63SSagi Grimberg 130340510a63SSagi Grimberg if (nvme_tcp_default_queue(queue)) 130440510a63SSagi Grimberg n = qid - 1; 130540510a63SSagi Grimberg else if (nvme_tcp_read_queue(queue)) 130640510a63SSagi Grimberg n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; 130740510a63SSagi Grimberg else if (nvme_tcp_poll_queue(queue)) 130840510a63SSagi Grimberg n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 130940510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ] - 1; 131040510a63SSagi Grimberg queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); 131140510a63SSagi Grimberg } 131240510a63SSagi Grimberg 13133f2304f8SSagi Grimberg static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, 13143f2304f8SSagi Grimberg int qid, size_t queue_size) 13153f2304f8SSagi Grimberg { 13163f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 13173f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 13183f2304f8SSagi Grimberg struct linger sol = { .l_onoff = 1, .l_linger = 0 }; 131940510a63SSagi Grimberg int ret, opt, rcv_pdu_size; 13203f2304f8SSagi Grimberg 13213f2304f8SSagi Grimberg queue->ctrl = ctrl; 13223f2304f8SSagi Grimberg INIT_LIST_HEAD(&queue->send_list); 13233f2304f8SSagi Grimberg spin_lock_init(&queue->lock); 13243f2304f8SSagi Grimberg INIT_WORK(&queue->io_work, nvme_tcp_io_work); 13253f2304f8SSagi Grimberg queue->queue_size = queue_size; 13263f2304f8SSagi Grimberg 13273f2304f8SSagi Grimberg if (qid > 0) 13289924b030SIsrael Rukshin queue->cmnd_capsule_len = nctrl->ioccsz * 16; 13293f2304f8SSagi Grimberg else 13303f2304f8SSagi Grimberg queue->cmnd_capsule_len = sizeof(struct nvme_command) + 13313f2304f8SSagi Grimberg NVME_TCP_ADMIN_CCSZ; 13323f2304f8SSagi Grimberg 13333f2304f8SSagi Grimberg ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, 13343f2304f8SSagi Grimberg IPPROTO_TCP, &queue->sock); 13353f2304f8SSagi Grimberg if (ret) { 13369924b030SIsrael Rukshin dev_err(nctrl->device, 13373f2304f8SSagi Grimberg "failed to create socket: %d\n", ret); 13383f2304f8SSagi Grimberg return ret; 13393f2304f8SSagi Grimberg } 13403f2304f8SSagi Grimberg 13413f2304f8SSagi Grimberg /* Single syn retry */ 13423f2304f8SSagi Grimberg opt = 1; 13433f2304f8SSagi Grimberg ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT, 13443f2304f8SSagi Grimberg (char *)&opt, sizeof(opt)); 13453f2304f8SSagi Grimberg if (ret) { 13469924b030SIsrael Rukshin dev_err(nctrl->device, 13473f2304f8SSagi Grimberg "failed to set TCP_SYNCNT sock opt %d\n", ret); 13483f2304f8SSagi Grimberg goto err_sock; 13493f2304f8SSagi Grimberg } 13503f2304f8SSagi Grimberg 13513f2304f8SSagi Grimberg /* Set TCP no delay */ 13523f2304f8SSagi Grimberg opt = 1; 13533f2304f8SSagi Grimberg ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, 13543f2304f8SSagi Grimberg TCP_NODELAY, (char *)&opt, sizeof(opt)); 13553f2304f8SSagi Grimberg if (ret) { 13569924b030SIsrael Rukshin dev_err(nctrl->device, 13573f2304f8SSagi Grimberg "failed to set TCP_NODELAY sock opt %d\n", ret); 13583f2304f8SSagi Grimberg goto err_sock; 13593f2304f8SSagi Grimberg } 13603f2304f8SSagi Grimberg 13613f2304f8SSagi Grimberg /* 13623f2304f8SSagi Grimberg * Cleanup whatever is sitting in the TCP transmit queue on socket 13633f2304f8SSagi Grimberg * close. This is done to prevent stale data from being sent should 13643f2304f8SSagi Grimberg * the network connection be restored before TCP times out. 13653f2304f8SSagi Grimberg */ 13663f2304f8SSagi Grimberg ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER, 13673f2304f8SSagi Grimberg (char *)&sol, sizeof(sol)); 13683f2304f8SSagi Grimberg if (ret) { 13699924b030SIsrael Rukshin dev_err(nctrl->device, 13703f2304f8SSagi Grimberg "failed to set SO_LINGER sock opt %d\n", ret); 13713f2304f8SSagi Grimberg goto err_sock; 13723f2304f8SSagi Grimberg } 13733f2304f8SSagi Grimberg 13749912ade3SWunderlich, Mark if (so_priority > 0) { 13759912ade3SWunderlich, Mark ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY, 13769912ade3SWunderlich, Mark (char *)&so_priority, sizeof(so_priority)); 13779912ade3SWunderlich, Mark if (ret) { 13789912ade3SWunderlich, Mark dev_err(ctrl->ctrl.device, 13799912ade3SWunderlich, Mark "failed to set SO_PRIORITY sock opt, ret %d\n", 13809912ade3SWunderlich, Mark ret); 13819912ade3SWunderlich, Mark goto err_sock; 13829912ade3SWunderlich, Mark } 13839912ade3SWunderlich, Mark } 13849912ade3SWunderlich, Mark 1385bb13985dSIsrael Rukshin /* Set socket type of service */ 1386bb13985dSIsrael Rukshin if (nctrl->opts->tos >= 0) { 1387bb13985dSIsrael Rukshin opt = nctrl->opts->tos; 1388bb13985dSIsrael Rukshin ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS, 1389bb13985dSIsrael Rukshin (char *)&opt, sizeof(opt)); 1390bb13985dSIsrael Rukshin if (ret) { 1391bb13985dSIsrael Rukshin dev_err(nctrl->device, 1392bb13985dSIsrael Rukshin "failed to set IP_TOS sock opt %d\n", ret); 1393bb13985dSIsrael Rukshin goto err_sock; 1394bb13985dSIsrael Rukshin } 1395bb13985dSIsrael Rukshin } 1396bb13985dSIsrael Rukshin 13973f2304f8SSagi Grimberg queue->sock->sk->sk_allocation = GFP_ATOMIC; 139840510a63SSagi Grimberg nvme_tcp_set_queue_io_cpu(queue); 13993f2304f8SSagi Grimberg queue->request = NULL; 14003f2304f8SSagi Grimberg queue->data_remaining = 0; 14013f2304f8SSagi Grimberg queue->ddgst_remaining = 0; 14023f2304f8SSagi Grimberg queue->pdu_remaining = 0; 14033f2304f8SSagi Grimberg queue->pdu_offset = 0; 14043f2304f8SSagi Grimberg sk_set_memalloc(queue->sock->sk); 14053f2304f8SSagi Grimberg 14069924b030SIsrael Rukshin if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { 14073f2304f8SSagi Grimberg ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, 14083f2304f8SSagi Grimberg sizeof(ctrl->src_addr)); 14093f2304f8SSagi Grimberg if (ret) { 14109924b030SIsrael Rukshin dev_err(nctrl->device, 14113f2304f8SSagi Grimberg "failed to bind queue %d socket %d\n", 14123f2304f8SSagi Grimberg qid, ret); 14133f2304f8SSagi Grimberg goto err_sock; 14143f2304f8SSagi Grimberg } 14153f2304f8SSagi Grimberg } 14163f2304f8SSagi Grimberg 14173f2304f8SSagi Grimberg queue->hdr_digest = nctrl->opts->hdr_digest; 14183f2304f8SSagi Grimberg queue->data_digest = nctrl->opts->data_digest; 14193f2304f8SSagi Grimberg if (queue->hdr_digest || queue->data_digest) { 14203f2304f8SSagi Grimberg ret = nvme_tcp_alloc_crypto(queue); 14213f2304f8SSagi Grimberg if (ret) { 14229924b030SIsrael Rukshin dev_err(nctrl->device, 14233f2304f8SSagi Grimberg "failed to allocate queue %d crypto\n", qid); 14243f2304f8SSagi Grimberg goto err_sock; 14253f2304f8SSagi Grimberg } 14263f2304f8SSagi Grimberg } 14273f2304f8SSagi Grimberg 14283f2304f8SSagi Grimberg rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + 14293f2304f8SSagi Grimberg nvme_tcp_hdgst_len(queue); 14303f2304f8SSagi Grimberg queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); 14313f2304f8SSagi Grimberg if (!queue->pdu) { 14323f2304f8SSagi Grimberg ret = -ENOMEM; 14333f2304f8SSagi Grimberg goto err_crypto; 14343f2304f8SSagi Grimberg } 14353f2304f8SSagi Grimberg 14369924b030SIsrael Rukshin dev_dbg(nctrl->device, "connecting queue %d\n", 14373f2304f8SSagi Grimberg nvme_tcp_queue_id(queue)); 14383f2304f8SSagi Grimberg 14393f2304f8SSagi Grimberg ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, 14403f2304f8SSagi Grimberg sizeof(ctrl->addr), 0); 14413f2304f8SSagi Grimberg if (ret) { 14429924b030SIsrael Rukshin dev_err(nctrl->device, 14433f2304f8SSagi Grimberg "failed to connect socket: %d\n", ret); 14443f2304f8SSagi Grimberg goto err_rcv_pdu; 14453f2304f8SSagi Grimberg } 14463f2304f8SSagi Grimberg 14473f2304f8SSagi Grimberg ret = nvme_tcp_init_connection(queue); 14483f2304f8SSagi Grimberg if (ret) 14493f2304f8SSagi Grimberg goto err_init_connect; 14503f2304f8SSagi Grimberg 14513f2304f8SSagi Grimberg queue->rd_enabled = true; 14523f2304f8SSagi Grimberg set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); 14533f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 14543f2304f8SSagi Grimberg 14553f2304f8SSagi Grimberg write_lock_bh(&queue->sock->sk->sk_callback_lock); 14563f2304f8SSagi Grimberg queue->sock->sk->sk_user_data = queue; 14573f2304f8SSagi Grimberg queue->state_change = queue->sock->sk->sk_state_change; 14583f2304f8SSagi Grimberg queue->data_ready = queue->sock->sk->sk_data_ready; 14593f2304f8SSagi Grimberg queue->write_space = queue->sock->sk->sk_write_space; 14603f2304f8SSagi Grimberg queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; 14613f2304f8SSagi Grimberg queue->sock->sk->sk_state_change = nvme_tcp_state_change; 14623f2304f8SSagi Grimberg queue->sock->sk->sk_write_space = nvme_tcp_write_space; 1463ac1c4e18SSebastian Andrzej Siewior #ifdef CONFIG_NET_RX_BUSY_POLL 14641a9460ceSSagi Grimberg queue->sock->sk->sk_ll_usec = 1; 1465ac1c4e18SSebastian Andrzej Siewior #endif 14663f2304f8SSagi Grimberg write_unlock_bh(&queue->sock->sk->sk_callback_lock); 14673f2304f8SSagi Grimberg 14683f2304f8SSagi Grimberg return 0; 14693f2304f8SSagi Grimberg 14703f2304f8SSagi Grimberg err_init_connect: 14713f2304f8SSagi Grimberg kernel_sock_shutdown(queue->sock, SHUT_RDWR); 14723f2304f8SSagi Grimberg err_rcv_pdu: 14733f2304f8SSagi Grimberg kfree(queue->pdu); 14743f2304f8SSagi Grimberg err_crypto: 14753f2304f8SSagi Grimberg if (queue->hdr_digest || queue->data_digest) 14763f2304f8SSagi Grimberg nvme_tcp_free_crypto(queue); 14773f2304f8SSagi Grimberg err_sock: 14783f2304f8SSagi Grimberg sock_release(queue->sock); 14793f2304f8SSagi Grimberg queue->sock = NULL; 14803f2304f8SSagi Grimberg return ret; 14813f2304f8SSagi Grimberg } 14823f2304f8SSagi Grimberg 14833f2304f8SSagi Grimberg static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) 14843f2304f8SSagi Grimberg { 14853f2304f8SSagi Grimberg struct socket *sock = queue->sock; 14863f2304f8SSagi Grimberg 14873f2304f8SSagi Grimberg write_lock_bh(&sock->sk->sk_callback_lock); 14883f2304f8SSagi Grimberg sock->sk->sk_user_data = NULL; 14893f2304f8SSagi Grimberg sock->sk->sk_data_ready = queue->data_ready; 14903f2304f8SSagi Grimberg sock->sk->sk_state_change = queue->state_change; 14913f2304f8SSagi Grimberg sock->sk->sk_write_space = queue->write_space; 14923f2304f8SSagi Grimberg write_unlock_bh(&sock->sk->sk_callback_lock); 14933f2304f8SSagi Grimberg } 14943f2304f8SSagi Grimberg 14953f2304f8SSagi Grimberg static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) 14963f2304f8SSagi Grimberg { 14973f2304f8SSagi Grimberg kernel_sock_shutdown(queue->sock, SHUT_RDWR); 14983f2304f8SSagi Grimberg nvme_tcp_restore_sock_calls(queue); 14993f2304f8SSagi Grimberg cancel_work_sync(&queue->io_work); 15003f2304f8SSagi Grimberg } 15013f2304f8SSagi Grimberg 15023f2304f8SSagi Grimberg static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) 15033f2304f8SSagi Grimberg { 15043f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 15053f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 15063f2304f8SSagi Grimberg 15073f2304f8SSagi Grimberg if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) 15083f2304f8SSagi Grimberg return; 15093f2304f8SSagi Grimberg 15103f2304f8SSagi Grimberg __nvme_tcp_stop_queue(queue); 15113f2304f8SSagi Grimberg } 15123f2304f8SSagi Grimberg 15133f2304f8SSagi Grimberg static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) 15143f2304f8SSagi Grimberg { 15153f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 15163f2304f8SSagi Grimberg int ret; 15173f2304f8SSagi Grimberg 15183f2304f8SSagi Grimberg if (idx) 151926c68227SSagi Grimberg ret = nvmf_connect_io_queue(nctrl, idx, false); 15203f2304f8SSagi Grimberg else 15213f2304f8SSagi Grimberg ret = nvmf_connect_admin_queue(nctrl); 15223f2304f8SSagi Grimberg 15233f2304f8SSagi Grimberg if (!ret) { 15243f2304f8SSagi Grimberg set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); 15253f2304f8SSagi Grimberg } else { 1526f34e2589SSagi Grimberg if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) 15273f2304f8SSagi Grimberg __nvme_tcp_stop_queue(&ctrl->queues[idx]); 15283f2304f8SSagi Grimberg dev_err(nctrl->device, 15293f2304f8SSagi Grimberg "failed to connect queue: %d ret=%d\n", idx, ret); 15303f2304f8SSagi Grimberg } 15313f2304f8SSagi Grimberg return ret; 15323f2304f8SSagi Grimberg } 15333f2304f8SSagi Grimberg 15343f2304f8SSagi Grimberg static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, 15353f2304f8SSagi Grimberg bool admin) 15363f2304f8SSagi Grimberg { 15373f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 15383f2304f8SSagi Grimberg struct blk_mq_tag_set *set; 15393f2304f8SSagi Grimberg int ret; 15403f2304f8SSagi Grimberg 15413f2304f8SSagi Grimberg if (admin) { 15423f2304f8SSagi Grimberg set = &ctrl->admin_tag_set; 15433f2304f8SSagi Grimberg memset(set, 0, sizeof(*set)); 15443f2304f8SSagi Grimberg set->ops = &nvme_tcp_admin_mq_ops; 15453f2304f8SSagi Grimberg set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 15463f2304f8SSagi Grimberg set->reserved_tags = 2; /* connect + keep-alive */ 15473f2304f8SSagi Grimberg set->numa_node = NUMA_NO_NODE; 15483f2304f8SSagi Grimberg set->cmd_size = sizeof(struct nvme_tcp_request); 15493f2304f8SSagi Grimberg set->driver_data = ctrl; 15503f2304f8SSagi Grimberg set->nr_hw_queues = 1; 15513f2304f8SSagi Grimberg set->timeout = ADMIN_TIMEOUT; 15523f2304f8SSagi Grimberg } else { 15533f2304f8SSagi Grimberg set = &ctrl->tag_set; 15543f2304f8SSagi Grimberg memset(set, 0, sizeof(*set)); 15553f2304f8SSagi Grimberg set->ops = &nvme_tcp_mq_ops; 15563f2304f8SSagi Grimberg set->queue_depth = nctrl->sqsize + 1; 15573f2304f8SSagi Grimberg set->reserved_tags = 1; /* fabric connect */ 15583f2304f8SSagi Grimberg set->numa_node = NUMA_NO_NODE; 15593f2304f8SSagi Grimberg set->flags = BLK_MQ_F_SHOULD_MERGE; 15603f2304f8SSagi Grimberg set->cmd_size = sizeof(struct nvme_tcp_request); 15613f2304f8SSagi Grimberg set->driver_data = ctrl; 15623f2304f8SSagi Grimberg set->nr_hw_queues = nctrl->queue_count - 1; 15633f2304f8SSagi Grimberg set->timeout = NVME_IO_TIMEOUT; 15641a9460ceSSagi Grimberg set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 15653f2304f8SSagi Grimberg } 15663f2304f8SSagi Grimberg 15673f2304f8SSagi Grimberg ret = blk_mq_alloc_tag_set(set); 15683f2304f8SSagi Grimberg if (ret) 15693f2304f8SSagi Grimberg return ERR_PTR(ret); 15703f2304f8SSagi Grimberg 15713f2304f8SSagi Grimberg return set; 15723f2304f8SSagi Grimberg } 15733f2304f8SSagi Grimberg 15743f2304f8SSagi Grimberg static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) 15753f2304f8SSagi Grimberg { 15763f2304f8SSagi Grimberg if (to_tcp_ctrl(ctrl)->async_req.pdu) { 15773f2304f8SSagi Grimberg nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); 15783f2304f8SSagi Grimberg to_tcp_ctrl(ctrl)->async_req.pdu = NULL; 15793f2304f8SSagi Grimberg } 15803f2304f8SSagi Grimberg 15813f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, 0); 15823f2304f8SSagi Grimberg } 15833f2304f8SSagi Grimberg 15843f2304f8SSagi Grimberg static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) 15853f2304f8SSagi Grimberg { 15863f2304f8SSagi Grimberg int i; 15873f2304f8SSagi Grimberg 15883f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) 15893f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, i); 15903f2304f8SSagi Grimberg } 15913f2304f8SSagi Grimberg 15923f2304f8SSagi Grimberg static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) 15933f2304f8SSagi Grimberg { 15943f2304f8SSagi Grimberg int i; 15953f2304f8SSagi Grimberg 15963f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) 15973f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, i); 15983f2304f8SSagi Grimberg } 15993f2304f8SSagi Grimberg 16003f2304f8SSagi Grimberg static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) 16013f2304f8SSagi Grimberg { 16023f2304f8SSagi Grimberg int i, ret = 0; 16033f2304f8SSagi Grimberg 16043f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) { 16053f2304f8SSagi Grimberg ret = nvme_tcp_start_queue(ctrl, i); 16063f2304f8SSagi Grimberg if (ret) 16073f2304f8SSagi Grimberg goto out_stop_queues; 16083f2304f8SSagi Grimberg } 16093f2304f8SSagi Grimberg 16103f2304f8SSagi Grimberg return 0; 16113f2304f8SSagi Grimberg 16123f2304f8SSagi Grimberg out_stop_queues: 16133f2304f8SSagi Grimberg for (i--; i >= 1; i--) 16143f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, i); 16153f2304f8SSagi Grimberg return ret; 16163f2304f8SSagi Grimberg } 16173f2304f8SSagi Grimberg 16183f2304f8SSagi Grimberg static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) 16193f2304f8SSagi Grimberg { 16203f2304f8SSagi Grimberg int ret; 16213f2304f8SSagi Grimberg 16223f2304f8SSagi Grimberg ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); 16233f2304f8SSagi Grimberg if (ret) 16243f2304f8SSagi Grimberg return ret; 16253f2304f8SSagi Grimberg 16263f2304f8SSagi Grimberg ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); 16273f2304f8SSagi Grimberg if (ret) 16283f2304f8SSagi Grimberg goto out_free_queue; 16293f2304f8SSagi Grimberg 16303f2304f8SSagi Grimberg return 0; 16313f2304f8SSagi Grimberg 16323f2304f8SSagi Grimberg out_free_queue: 16333f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, 0); 16343f2304f8SSagi Grimberg return ret; 16353f2304f8SSagi Grimberg } 16363f2304f8SSagi Grimberg 1637efb973b1SSagi Grimberg static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 16383f2304f8SSagi Grimberg { 16393f2304f8SSagi Grimberg int i, ret; 16403f2304f8SSagi Grimberg 16413f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) { 16423f2304f8SSagi Grimberg ret = nvme_tcp_alloc_queue(ctrl, i, 16433f2304f8SSagi Grimberg ctrl->sqsize + 1); 16443f2304f8SSagi Grimberg if (ret) 16453f2304f8SSagi Grimberg goto out_free_queues; 16463f2304f8SSagi Grimberg } 16473f2304f8SSagi Grimberg 16483f2304f8SSagi Grimberg return 0; 16493f2304f8SSagi Grimberg 16503f2304f8SSagi Grimberg out_free_queues: 16513f2304f8SSagi Grimberg for (i--; i >= 1; i--) 16523f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, i); 16533f2304f8SSagi Grimberg 16543f2304f8SSagi Grimberg return ret; 16553f2304f8SSagi Grimberg } 16563f2304f8SSagi Grimberg 16573f2304f8SSagi Grimberg static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) 16583f2304f8SSagi Grimberg { 1659873946f4SSagi Grimberg unsigned int nr_io_queues; 1660873946f4SSagi Grimberg 1661873946f4SSagi Grimberg nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); 1662873946f4SSagi Grimberg nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); 16631a9460ceSSagi Grimberg nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); 1664873946f4SSagi Grimberg 1665873946f4SSagi Grimberg return nr_io_queues; 16663f2304f8SSagi Grimberg } 16673f2304f8SSagi Grimberg 166864861993SSagi Grimberg static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl, 166964861993SSagi Grimberg unsigned int nr_io_queues) 167064861993SSagi Grimberg { 167164861993SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 167264861993SSagi Grimberg struct nvmf_ctrl_options *opts = nctrl->opts; 167364861993SSagi Grimberg 167464861993SSagi Grimberg if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { 167564861993SSagi Grimberg /* 167664861993SSagi Grimberg * separate read/write queues 167764861993SSagi Grimberg * hand out dedicated default queues only after we have 167864861993SSagi Grimberg * sufficient read queues. 167964861993SSagi Grimberg */ 168064861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; 168164861993SSagi Grimberg nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; 168264861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT] = 168364861993SSagi Grimberg min(opts->nr_write_queues, nr_io_queues); 168464861993SSagi Grimberg nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 168564861993SSagi Grimberg } else { 168664861993SSagi Grimberg /* 168764861993SSagi Grimberg * shared read/write queues 168864861993SSagi Grimberg * either no write queues were requested, or we don't have 168964861993SSagi Grimberg * sufficient queue count to have dedicated default queues. 169064861993SSagi Grimberg */ 169164861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT] = 169264861993SSagi Grimberg min(opts->nr_io_queues, nr_io_queues); 169364861993SSagi Grimberg nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 169464861993SSagi Grimberg } 16951a9460ceSSagi Grimberg 16961a9460ceSSagi Grimberg if (opts->nr_poll_queues && nr_io_queues) { 16971a9460ceSSagi Grimberg /* map dedicated poll queues only if we have queues left */ 16981a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL] = 16991a9460ceSSagi Grimberg min(opts->nr_poll_queues, nr_io_queues); 17001a9460ceSSagi Grimberg } 170164861993SSagi Grimberg } 170264861993SSagi Grimberg 1703efb973b1SSagi Grimberg static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 17043f2304f8SSagi Grimberg { 17053f2304f8SSagi Grimberg unsigned int nr_io_queues; 17063f2304f8SSagi Grimberg int ret; 17073f2304f8SSagi Grimberg 17083f2304f8SSagi Grimberg nr_io_queues = nvme_tcp_nr_io_queues(ctrl); 17093f2304f8SSagi Grimberg ret = nvme_set_queue_count(ctrl, &nr_io_queues); 17103f2304f8SSagi Grimberg if (ret) 17113f2304f8SSagi Grimberg return ret; 17123f2304f8SSagi Grimberg 17133f2304f8SSagi Grimberg ctrl->queue_count = nr_io_queues + 1; 17143f2304f8SSagi Grimberg if (ctrl->queue_count < 2) 17153f2304f8SSagi Grimberg return 0; 17163f2304f8SSagi Grimberg 17173f2304f8SSagi Grimberg dev_info(ctrl->device, 17183f2304f8SSagi Grimberg "creating %d I/O queues.\n", nr_io_queues); 17193f2304f8SSagi Grimberg 172064861993SSagi Grimberg nvme_tcp_set_io_queues(ctrl, nr_io_queues); 172164861993SSagi Grimberg 1722efb973b1SSagi Grimberg return __nvme_tcp_alloc_io_queues(ctrl); 17233f2304f8SSagi Grimberg } 17243f2304f8SSagi Grimberg 17253f2304f8SSagi Grimberg static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) 17263f2304f8SSagi Grimberg { 17273f2304f8SSagi Grimberg nvme_tcp_stop_io_queues(ctrl); 17283f2304f8SSagi Grimberg if (remove) { 17293f2304f8SSagi Grimberg blk_cleanup_queue(ctrl->connect_q); 17303f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->tagset); 17313f2304f8SSagi Grimberg } 17323f2304f8SSagi Grimberg nvme_tcp_free_io_queues(ctrl); 17333f2304f8SSagi Grimberg } 17343f2304f8SSagi Grimberg 17353f2304f8SSagi Grimberg static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) 17363f2304f8SSagi Grimberg { 17373f2304f8SSagi Grimberg int ret; 17383f2304f8SSagi Grimberg 1739efb973b1SSagi Grimberg ret = nvme_tcp_alloc_io_queues(ctrl); 17403f2304f8SSagi Grimberg if (ret) 17413f2304f8SSagi Grimberg return ret; 17423f2304f8SSagi Grimberg 17433f2304f8SSagi Grimberg if (new) { 17443f2304f8SSagi Grimberg ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); 17453f2304f8SSagi Grimberg if (IS_ERR(ctrl->tagset)) { 17463f2304f8SSagi Grimberg ret = PTR_ERR(ctrl->tagset); 17473f2304f8SSagi Grimberg goto out_free_io_queues; 17483f2304f8SSagi Grimberg } 17493f2304f8SSagi Grimberg 17503f2304f8SSagi Grimberg ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 17513f2304f8SSagi Grimberg if (IS_ERR(ctrl->connect_q)) { 17523f2304f8SSagi Grimberg ret = PTR_ERR(ctrl->connect_q); 17533f2304f8SSagi Grimberg goto out_free_tag_set; 17543f2304f8SSagi Grimberg } 17553f2304f8SSagi Grimberg } else { 17563f2304f8SSagi Grimberg blk_mq_update_nr_hw_queues(ctrl->tagset, 17573f2304f8SSagi Grimberg ctrl->queue_count - 1); 17583f2304f8SSagi Grimberg } 17593f2304f8SSagi Grimberg 17603f2304f8SSagi Grimberg ret = nvme_tcp_start_io_queues(ctrl); 17613f2304f8SSagi Grimberg if (ret) 17623f2304f8SSagi Grimberg goto out_cleanup_connect_q; 17633f2304f8SSagi Grimberg 17643f2304f8SSagi Grimberg return 0; 17653f2304f8SSagi Grimberg 17663f2304f8SSagi Grimberg out_cleanup_connect_q: 1767e85037a2SSagi Grimberg if (new) 17683f2304f8SSagi Grimberg blk_cleanup_queue(ctrl->connect_q); 17693f2304f8SSagi Grimberg out_free_tag_set: 17703f2304f8SSagi Grimberg if (new) 17713f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->tagset); 17723f2304f8SSagi Grimberg out_free_io_queues: 17733f2304f8SSagi Grimberg nvme_tcp_free_io_queues(ctrl); 17743f2304f8SSagi Grimberg return ret; 17753f2304f8SSagi Grimberg } 17763f2304f8SSagi Grimberg 17773f2304f8SSagi Grimberg static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) 17783f2304f8SSagi Grimberg { 17793f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 17803f2304f8SSagi Grimberg if (remove) { 17813f2304f8SSagi Grimberg blk_cleanup_queue(ctrl->admin_q); 1782e7832cb4SSagi Grimberg blk_cleanup_queue(ctrl->fabrics_q); 17833f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->admin_tagset); 17843f2304f8SSagi Grimberg } 17853f2304f8SSagi Grimberg nvme_tcp_free_admin_queue(ctrl); 17863f2304f8SSagi Grimberg } 17873f2304f8SSagi Grimberg 17883f2304f8SSagi Grimberg static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) 17893f2304f8SSagi Grimberg { 17903f2304f8SSagi Grimberg int error; 17913f2304f8SSagi Grimberg 17923f2304f8SSagi Grimberg error = nvme_tcp_alloc_admin_queue(ctrl); 17933f2304f8SSagi Grimberg if (error) 17943f2304f8SSagi Grimberg return error; 17953f2304f8SSagi Grimberg 17963f2304f8SSagi Grimberg if (new) { 17973f2304f8SSagi Grimberg ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); 17983f2304f8SSagi Grimberg if (IS_ERR(ctrl->admin_tagset)) { 17993f2304f8SSagi Grimberg error = PTR_ERR(ctrl->admin_tagset); 18003f2304f8SSagi Grimberg goto out_free_queue; 18013f2304f8SSagi Grimberg } 18023f2304f8SSagi Grimberg 1803e7832cb4SSagi Grimberg ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); 1804e7832cb4SSagi Grimberg if (IS_ERR(ctrl->fabrics_q)) { 1805e7832cb4SSagi Grimberg error = PTR_ERR(ctrl->fabrics_q); 1806e7832cb4SSagi Grimberg goto out_free_tagset; 1807e7832cb4SSagi Grimberg } 1808e7832cb4SSagi Grimberg 18093f2304f8SSagi Grimberg ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); 18103f2304f8SSagi Grimberg if (IS_ERR(ctrl->admin_q)) { 18113f2304f8SSagi Grimberg error = PTR_ERR(ctrl->admin_q); 1812e7832cb4SSagi Grimberg goto out_cleanup_fabrics_q; 18133f2304f8SSagi Grimberg } 18143f2304f8SSagi Grimberg } 18153f2304f8SSagi Grimberg 18163f2304f8SSagi Grimberg error = nvme_tcp_start_queue(ctrl, 0); 18173f2304f8SSagi Grimberg if (error) 18183f2304f8SSagi Grimberg goto out_cleanup_queue; 18193f2304f8SSagi Grimberg 1820c0f2f45bSSagi Grimberg error = nvme_enable_ctrl(ctrl); 18213f2304f8SSagi Grimberg if (error) 18223f2304f8SSagi Grimberg goto out_stop_queue; 18233f2304f8SSagi Grimberg 1824e7832cb4SSagi Grimberg blk_mq_unquiesce_queue(ctrl->admin_q); 1825e7832cb4SSagi Grimberg 18263f2304f8SSagi Grimberg error = nvme_init_identify(ctrl); 18273f2304f8SSagi Grimberg if (error) 18283f2304f8SSagi Grimberg goto out_stop_queue; 18293f2304f8SSagi Grimberg 18303f2304f8SSagi Grimberg return 0; 18313f2304f8SSagi Grimberg 18323f2304f8SSagi Grimberg out_stop_queue: 18333f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 18343f2304f8SSagi Grimberg out_cleanup_queue: 18353f2304f8SSagi Grimberg if (new) 18363f2304f8SSagi Grimberg blk_cleanup_queue(ctrl->admin_q); 1837e7832cb4SSagi Grimberg out_cleanup_fabrics_q: 1838e7832cb4SSagi Grimberg if (new) 1839e7832cb4SSagi Grimberg blk_cleanup_queue(ctrl->fabrics_q); 18403f2304f8SSagi Grimberg out_free_tagset: 18413f2304f8SSagi Grimberg if (new) 18423f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->admin_tagset); 18433f2304f8SSagi Grimberg out_free_queue: 18443f2304f8SSagi Grimberg nvme_tcp_free_admin_queue(ctrl); 18453f2304f8SSagi Grimberg return error; 18463f2304f8SSagi Grimberg } 18473f2304f8SSagi Grimberg 18483f2304f8SSagi Grimberg static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, 18493f2304f8SSagi Grimberg bool remove) 18503f2304f8SSagi Grimberg { 18513f2304f8SSagi Grimberg blk_mq_quiesce_queue(ctrl->admin_q); 18523f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 1853622b8b68SMing Lei if (ctrl->admin_tagset) { 18547a425896SSagi Grimberg blk_mq_tagset_busy_iter(ctrl->admin_tagset, 18557a425896SSagi Grimberg nvme_cancel_request, ctrl); 1856622b8b68SMing Lei blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 1857622b8b68SMing Lei } 1858e7832cb4SSagi Grimberg if (remove) 18593f2304f8SSagi Grimberg blk_mq_unquiesce_queue(ctrl->admin_q); 18603f2304f8SSagi Grimberg nvme_tcp_destroy_admin_queue(ctrl, remove); 18613f2304f8SSagi Grimberg } 18623f2304f8SSagi Grimberg 18633f2304f8SSagi Grimberg static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, 18643f2304f8SSagi Grimberg bool remove) 18653f2304f8SSagi Grimberg { 18663f2304f8SSagi Grimberg if (ctrl->queue_count <= 1) 18673f2304f8SSagi Grimberg return; 18683f2304f8SSagi Grimberg nvme_stop_queues(ctrl); 18693f2304f8SSagi Grimberg nvme_tcp_stop_io_queues(ctrl); 1870622b8b68SMing Lei if (ctrl->tagset) { 18717a425896SSagi Grimberg blk_mq_tagset_busy_iter(ctrl->tagset, 18727a425896SSagi Grimberg nvme_cancel_request, ctrl); 1873622b8b68SMing Lei blk_mq_tagset_wait_completed_request(ctrl->tagset); 1874622b8b68SMing Lei } 18753f2304f8SSagi Grimberg if (remove) 18763f2304f8SSagi Grimberg nvme_start_queues(ctrl); 18773f2304f8SSagi Grimberg nvme_tcp_destroy_io_queues(ctrl, remove); 18783f2304f8SSagi Grimberg } 18793f2304f8SSagi Grimberg 18803f2304f8SSagi Grimberg static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) 18813f2304f8SSagi Grimberg { 18823f2304f8SSagi Grimberg /* If we are resetting/deleting then do nothing */ 18833f2304f8SSagi Grimberg if (ctrl->state != NVME_CTRL_CONNECTING) { 18843f2304f8SSagi Grimberg WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || 18853f2304f8SSagi Grimberg ctrl->state == NVME_CTRL_LIVE); 18863f2304f8SSagi Grimberg return; 18873f2304f8SSagi Grimberg } 18883f2304f8SSagi Grimberg 18893f2304f8SSagi Grimberg if (nvmf_should_reconnect(ctrl)) { 18903f2304f8SSagi Grimberg dev_info(ctrl->device, "Reconnecting in %d seconds...\n", 18913f2304f8SSagi Grimberg ctrl->opts->reconnect_delay); 18923f2304f8SSagi Grimberg queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, 18933f2304f8SSagi Grimberg ctrl->opts->reconnect_delay * HZ); 18943f2304f8SSagi Grimberg } else { 18953f2304f8SSagi Grimberg dev_info(ctrl->device, "Removing controller...\n"); 18963f2304f8SSagi Grimberg nvme_delete_ctrl(ctrl); 18973f2304f8SSagi Grimberg } 18983f2304f8SSagi Grimberg } 18993f2304f8SSagi Grimberg 19003f2304f8SSagi Grimberg static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) 19013f2304f8SSagi Grimberg { 19023f2304f8SSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->opts; 1903312910f4SColin Ian King int ret; 19043f2304f8SSagi Grimberg 19053f2304f8SSagi Grimberg ret = nvme_tcp_configure_admin_queue(ctrl, new); 19063f2304f8SSagi Grimberg if (ret) 19073f2304f8SSagi Grimberg return ret; 19083f2304f8SSagi Grimberg 19093f2304f8SSagi Grimberg if (ctrl->icdoff) { 19103f2304f8SSagi Grimberg dev_err(ctrl->device, "icdoff is not supported!\n"); 19113f2304f8SSagi Grimberg goto destroy_admin; 19123f2304f8SSagi Grimberg } 19133f2304f8SSagi Grimberg 19143f2304f8SSagi Grimberg if (opts->queue_size > ctrl->sqsize + 1) 19153f2304f8SSagi Grimberg dev_warn(ctrl->device, 19163f2304f8SSagi Grimberg "queue_size %zu > ctrl sqsize %u, clamping down\n", 19173f2304f8SSagi Grimberg opts->queue_size, ctrl->sqsize + 1); 19183f2304f8SSagi Grimberg 19193f2304f8SSagi Grimberg if (ctrl->sqsize + 1 > ctrl->maxcmd) { 19203f2304f8SSagi Grimberg dev_warn(ctrl->device, 19213f2304f8SSagi Grimberg "sqsize %u > ctrl maxcmd %u, clamping down\n", 19223f2304f8SSagi Grimberg ctrl->sqsize + 1, ctrl->maxcmd); 19233f2304f8SSagi Grimberg ctrl->sqsize = ctrl->maxcmd - 1; 19243f2304f8SSagi Grimberg } 19253f2304f8SSagi Grimberg 19263f2304f8SSagi Grimberg if (ctrl->queue_count > 1) { 19273f2304f8SSagi Grimberg ret = nvme_tcp_configure_io_queues(ctrl, new); 19283f2304f8SSagi Grimberg if (ret) 19293f2304f8SSagi Grimberg goto destroy_admin; 19303f2304f8SSagi Grimberg } 19313f2304f8SSagi Grimberg 19323f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { 1933bea54ef5SIsrael Rukshin /* 1934bea54ef5SIsrael Rukshin * state change failure is ok if we're in DELETING state, 1935bea54ef5SIsrael Rukshin * unless we're during creation of a new controller to 1936bea54ef5SIsrael Rukshin * avoid races with teardown flow. 1937bea54ef5SIsrael Rukshin */ 19383f2304f8SSagi Grimberg WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); 1939bea54ef5SIsrael Rukshin WARN_ON_ONCE(new); 19403f2304f8SSagi Grimberg ret = -EINVAL; 19413f2304f8SSagi Grimberg goto destroy_io; 19423f2304f8SSagi Grimberg } 19433f2304f8SSagi Grimberg 19443f2304f8SSagi Grimberg nvme_start_ctrl(ctrl); 19453f2304f8SSagi Grimberg return 0; 19463f2304f8SSagi Grimberg 19473f2304f8SSagi Grimberg destroy_io: 19483f2304f8SSagi Grimberg if (ctrl->queue_count > 1) 19493f2304f8SSagi Grimberg nvme_tcp_destroy_io_queues(ctrl, new); 19503f2304f8SSagi Grimberg destroy_admin: 19513f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 19523f2304f8SSagi Grimberg nvme_tcp_destroy_admin_queue(ctrl, new); 19533f2304f8SSagi Grimberg return ret; 19543f2304f8SSagi Grimberg } 19553f2304f8SSagi Grimberg 19563f2304f8SSagi Grimberg static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) 19573f2304f8SSagi Grimberg { 19583f2304f8SSagi Grimberg struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), 19593f2304f8SSagi Grimberg struct nvme_tcp_ctrl, connect_work); 19603f2304f8SSagi Grimberg struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 19613f2304f8SSagi Grimberg 19623f2304f8SSagi Grimberg ++ctrl->nr_reconnects; 19633f2304f8SSagi Grimberg 19643f2304f8SSagi Grimberg if (nvme_tcp_setup_ctrl(ctrl, false)) 19653f2304f8SSagi Grimberg goto requeue; 19663f2304f8SSagi Grimberg 196756a77d26SColin Ian King dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", 19683f2304f8SSagi Grimberg ctrl->nr_reconnects); 19693f2304f8SSagi Grimberg 19703f2304f8SSagi Grimberg ctrl->nr_reconnects = 0; 19713f2304f8SSagi Grimberg 19723f2304f8SSagi Grimberg return; 19733f2304f8SSagi Grimberg 19743f2304f8SSagi Grimberg requeue: 19753f2304f8SSagi Grimberg dev_info(ctrl->device, "Failed reconnect attempt %d\n", 19763f2304f8SSagi Grimberg ctrl->nr_reconnects); 19773f2304f8SSagi Grimberg nvme_tcp_reconnect_or_remove(ctrl); 19783f2304f8SSagi Grimberg } 19793f2304f8SSagi Grimberg 19803f2304f8SSagi Grimberg static void nvme_tcp_error_recovery_work(struct work_struct *work) 19813f2304f8SSagi Grimberg { 19823f2304f8SSagi Grimberg struct nvme_tcp_ctrl *tcp_ctrl = container_of(work, 19833f2304f8SSagi Grimberg struct nvme_tcp_ctrl, err_work); 19843f2304f8SSagi Grimberg struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 19853f2304f8SSagi Grimberg 19863f2304f8SSagi Grimberg nvme_stop_keep_alive(ctrl); 19873f2304f8SSagi Grimberg nvme_tcp_teardown_io_queues(ctrl, false); 19883f2304f8SSagi Grimberg /* unquiesce to fail fast pending requests */ 19893f2304f8SSagi Grimberg nvme_start_queues(ctrl); 19903f2304f8SSagi Grimberg nvme_tcp_teardown_admin_queue(ctrl, false); 1991e7832cb4SSagi Grimberg blk_mq_unquiesce_queue(ctrl->admin_q); 19923f2304f8SSagi Grimberg 19933f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { 19943f2304f8SSagi Grimberg /* state change failure is ok if we're in DELETING state */ 19953f2304f8SSagi Grimberg WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); 19963f2304f8SSagi Grimberg return; 19973f2304f8SSagi Grimberg } 19983f2304f8SSagi Grimberg 19993f2304f8SSagi Grimberg nvme_tcp_reconnect_or_remove(ctrl); 20003f2304f8SSagi Grimberg } 20013f2304f8SSagi Grimberg 20023f2304f8SSagi Grimberg static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 20033f2304f8SSagi Grimberg { 2004794a4cb3SSagi Grimberg cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); 2005794a4cb3SSagi Grimberg cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); 2006794a4cb3SSagi Grimberg 20073f2304f8SSagi Grimberg nvme_tcp_teardown_io_queues(ctrl, shutdown); 2008e7832cb4SSagi Grimberg blk_mq_quiesce_queue(ctrl->admin_q); 20093f2304f8SSagi Grimberg if (shutdown) 20103f2304f8SSagi Grimberg nvme_shutdown_ctrl(ctrl); 20113f2304f8SSagi Grimberg else 2012b5b05048SSagi Grimberg nvme_disable_ctrl(ctrl); 20133f2304f8SSagi Grimberg nvme_tcp_teardown_admin_queue(ctrl, shutdown); 20143f2304f8SSagi Grimberg } 20153f2304f8SSagi Grimberg 20163f2304f8SSagi Grimberg static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) 20173f2304f8SSagi Grimberg { 20183f2304f8SSagi Grimberg nvme_tcp_teardown_ctrl(ctrl, true); 20193f2304f8SSagi Grimberg } 20203f2304f8SSagi Grimberg 20213f2304f8SSagi Grimberg static void nvme_reset_ctrl_work(struct work_struct *work) 20223f2304f8SSagi Grimberg { 20233f2304f8SSagi Grimberg struct nvme_ctrl *ctrl = 20243f2304f8SSagi Grimberg container_of(work, struct nvme_ctrl, reset_work); 20253f2304f8SSagi Grimberg 20263f2304f8SSagi Grimberg nvme_stop_ctrl(ctrl); 20273f2304f8SSagi Grimberg nvme_tcp_teardown_ctrl(ctrl, false); 20283f2304f8SSagi Grimberg 20293f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { 20303f2304f8SSagi Grimberg /* state change failure is ok if we're in DELETING state */ 20313f2304f8SSagi Grimberg WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); 20323f2304f8SSagi Grimberg return; 20333f2304f8SSagi Grimberg } 20343f2304f8SSagi Grimberg 20353f2304f8SSagi Grimberg if (nvme_tcp_setup_ctrl(ctrl, false)) 20363f2304f8SSagi Grimberg goto out_fail; 20373f2304f8SSagi Grimberg 20383f2304f8SSagi Grimberg return; 20393f2304f8SSagi Grimberg 20403f2304f8SSagi Grimberg out_fail: 20413f2304f8SSagi Grimberg ++ctrl->nr_reconnects; 20423f2304f8SSagi Grimberg nvme_tcp_reconnect_or_remove(ctrl); 20433f2304f8SSagi Grimberg } 20443f2304f8SSagi Grimberg 20453f2304f8SSagi Grimberg static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) 20463f2304f8SSagi Grimberg { 20473f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 20483f2304f8SSagi Grimberg 20493f2304f8SSagi Grimberg if (list_empty(&ctrl->list)) 20503f2304f8SSagi Grimberg goto free_ctrl; 20513f2304f8SSagi Grimberg 20523f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 20533f2304f8SSagi Grimberg list_del(&ctrl->list); 20543f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 20553f2304f8SSagi Grimberg 20563f2304f8SSagi Grimberg nvmf_free_options(nctrl->opts); 20573f2304f8SSagi Grimberg free_ctrl: 20583f2304f8SSagi Grimberg kfree(ctrl->queues); 20593f2304f8SSagi Grimberg kfree(ctrl); 20603f2304f8SSagi Grimberg } 20613f2304f8SSagi Grimberg 20623f2304f8SSagi Grimberg static void nvme_tcp_set_sg_null(struct nvme_command *c) 20633f2304f8SSagi Grimberg { 20643f2304f8SSagi Grimberg struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 20653f2304f8SSagi Grimberg 20663f2304f8SSagi Grimberg sg->addr = 0; 20673f2304f8SSagi Grimberg sg->length = 0; 20683f2304f8SSagi Grimberg sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 20693f2304f8SSagi Grimberg NVME_SGL_FMT_TRANSPORT_A; 20703f2304f8SSagi Grimberg } 20713f2304f8SSagi Grimberg 20723f2304f8SSagi Grimberg static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, 20733f2304f8SSagi Grimberg struct nvme_command *c, u32 data_len) 20743f2304f8SSagi Grimberg { 20753f2304f8SSagi Grimberg struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 20763f2304f8SSagi Grimberg 20773f2304f8SSagi Grimberg sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 20783f2304f8SSagi Grimberg sg->length = cpu_to_le32(data_len); 20793f2304f8SSagi Grimberg sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 20803f2304f8SSagi Grimberg } 20813f2304f8SSagi Grimberg 20823f2304f8SSagi Grimberg static void nvme_tcp_set_sg_host_data(struct nvme_command *c, 20833f2304f8SSagi Grimberg u32 data_len) 20843f2304f8SSagi Grimberg { 20853f2304f8SSagi Grimberg struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 20863f2304f8SSagi Grimberg 20873f2304f8SSagi Grimberg sg->addr = 0; 20883f2304f8SSagi Grimberg sg->length = cpu_to_le32(data_len); 20893f2304f8SSagi Grimberg sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 20903f2304f8SSagi Grimberg NVME_SGL_FMT_TRANSPORT_A; 20913f2304f8SSagi Grimberg } 20923f2304f8SSagi Grimberg 20933f2304f8SSagi Grimberg static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) 20943f2304f8SSagi Grimberg { 20953f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); 20963f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[0]; 20973f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; 20983f2304f8SSagi Grimberg struct nvme_command *cmd = &pdu->cmd; 20993f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 21003f2304f8SSagi Grimberg 21013f2304f8SSagi Grimberg memset(pdu, 0, sizeof(*pdu)); 21023f2304f8SSagi Grimberg pdu->hdr.type = nvme_tcp_cmd; 21033f2304f8SSagi Grimberg if (queue->hdr_digest) 21043f2304f8SSagi Grimberg pdu->hdr.flags |= NVME_TCP_F_HDGST; 21053f2304f8SSagi Grimberg pdu->hdr.hlen = sizeof(*pdu); 21063f2304f8SSagi Grimberg pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 21073f2304f8SSagi Grimberg 21083f2304f8SSagi Grimberg cmd->common.opcode = nvme_admin_async_event; 21093f2304f8SSagi Grimberg cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; 21103f2304f8SSagi Grimberg cmd->common.flags |= NVME_CMD_SGL_METABUF; 21113f2304f8SSagi Grimberg nvme_tcp_set_sg_null(cmd); 21123f2304f8SSagi Grimberg 21133f2304f8SSagi Grimberg ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; 21143f2304f8SSagi Grimberg ctrl->async_req.offset = 0; 21153f2304f8SSagi Grimberg ctrl->async_req.curr_bio = NULL; 21163f2304f8SSagi Grimberg ctrl->async_req.data_len = 0; 21173f2304f8SSagi Grimberg 21183f2304f8SSagi Grimberg nvme_tcp_queue_request(&ctrl->async_req); 21193f2304f8SSagi Grimberg } 21203f2304f8SSagi Grimberg 21213f2304f8SSagi Grimberg static enum blk_eh_timer_return 21223f2304f8SSagi Grimberg nvme_tcp_timeout(struct request *rq, bool reserved) 21233f2304f8SSagi Grimberg { 21243f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 21253f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; 21263f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 21273f2304f8SSagi Grimberg 212892b98e88SKeith Busch /* 212992b98e88SKeith Busch * Restart the timer if a controller reset is already scheduled. Any 213092b98e88SKeith Busch * timed out commands would be handled before entering the connecting 213192b98e88SKeith Busch * state. 213292b98e88SKeith Busch */ 213392b98e88SKeith Busch if (ctrl->ctrl.state == NVME_CTRL_RESETTING) 213492b98e88SKeith Busch return BLK_EH_RESET_TIMER; 213592b98e88SKeith Busch 213639d57757SSagi Grimberg dev_warn(ctrl->ctrl.device, 21373f2304f8SSagi Grimberg "queue %d: timeout request %#x type %d\n", 213839d57757SSagi Grimberg nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); 21393f2304f8SSagi Grimberg 21403f2304f8SSagi Grimberg if (ctrl->ctrl.state != NVME_CTRL_LIVE) { 214139d57757SSagi Grimberg /* 214239d57757SSagi Grimberg * Teardown immediately if controller times out while starting 214339d57757SSagi Grimberg * or we are already started error recovery. all outstanding 214439d57757SSagi Grimberg * requests are completed on shutdown, so we return BLK_EH_DONE. 214539d57757SSagi Grimberg */ 214639d57757SSagi Grimberg flush_work(&ctrl->err_work); 214739d57757SSagi Grimberg nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); 214839d57757SSagi Grimberg nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); 21493f2304f8SSagi Grimberg return BLK_EH_DONE; 21503f2304f8SSagi Grimberg } 21513f2304f8SSagi Grimberg 215239d57757SSagi Grimberg dev_warn(ctrl->ctrl.device, "starting error recovery\n"); 21533f2304f8SSagi Grimberg nvme_tcp_error_recovery(&ctrl->ctrl); 21543f2304f8SSagi Grimberg 21553f2304f8SSagi Grimberg return BLK_EH_RESET_TIMER; 21563f2304f8SSagi Grimberg } 21573f2304f8SSagi Grimberg 21583f2304f8SSagi Grimberg static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, 21593f2304f8SSagi Grimberg struct request *rq) 21603f2304f8SSagi Grimberg { 21613f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 21623f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 21633f2304f8SSagi Grimberg struct nvme_command *c = &pdu->cmd; 21643f2304f8SSagi Grimberg 21653f2304f8SSagi Grimberg c->common.flags |= NVME_CMD_SGL_METABUF; 21663f2304f8SSagi Grimberg 216725e5cb78SSagi Grimberg if (!blk_rq_nr_phys_segments(rq)) 216825e5cb78SSagi Grimberg nvme_tcp_set_sg_null(c); 216925e5cb78SSagi Grimberg else if (rq_data_dir(rq) == WRITE && 21703f2304f8SSagi Grimberg req->data_len <= nvme_tcp_inline_data_size(queue)) 21713f2304f8SSagi Grimberg nvme_tcp_set_sg_inline(queue, c, req->data_len); 21723f2304f8SSagi Grimberg else 21733f2304f8SSagi Grimberg nvme_tcp_set_sg_host_data(c, req->data_len); 21743f2304f8SSagi Grimberg 21753f2304f8SSagi Grimberg return 0; 21763f2304f8SSagi Grimberg } 21773f2304f8SSagi Grimberg 21783f2304f8SSagi Grimberg static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, 21793f2304f8SSagi Grimberg struct request *rq) 21803f2304f8SSagi Grimberg { 21813f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 21823f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 21833f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 21843f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; 21853f2304f8SSagi Grimberg blk_status_t ret; 21863f2304f8SSagi Grimberg 21873f2304f8SSagi Grimberg ret = nvme_setup_cmd(ns, rq, &pdu->cmd); 21883f2304f8SSagi Grimberg if (ret) 21893f2304f8SSagi Grimberg return ret; 21903f2304f8SSagi Grimberg 21913f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_CMD_PDU; 21923f2304f8SSagi Grimberg req->offset = 0; 21933f2304f8SSagi Grimberg req->data_sent = 0; 21943f2304f8SSagi Grimberg req->pdu_len = 0; 21953f2304f8SSagi Grimberg req->pdu_sent = 0; 219625e5cb78SSagi Grimberg req->data_len = blk_rq_nr_phys_segments(rq) ? 219725e5cb78SSagi Grimberg blk_rq_payload_bytes(rq) : 0; 21983f2304f8SSagi Grimberg req->curr_bio = rq->bio; 21993f2304f8SSagi Grimberg 22003f2304f8SSagi Grimberg if (rq_data_dir(rq) == WRITE && 22013f2304f8SSagi Grimberg req->data_len <= nvme_tcp_inline_data_size(queue)) 22023f2304f8SSagi Grimberg req->pdu_len = req->data_len; 22033f2304f8SSagi Grimberg else if (req->curr_bio) 22043f2304f8SSagi Grimberg nvme_tcp_init_iter(req, READ); 22053f2304f8SSagi Grimberg 22063f2304f8SSagi Grimberg pdu->hdr.type = nvme_tcp_cmd; 22073f2304f8SSagi Grimberg pdu->hdr.flags = 0; 22083f2304f8SSagi Grimberg if (queue->hdr_digest) 22093f2304f8SSagi Grimberg pdu->hdr.flags |= NVME_TCP_F_HDGST; 22103f2304f8SSagi Grimberg if (queue->data_digest && req->pdu_len) { 22113f2304f8SSagi Grimberg pdu->hdr.flags |= NVME_TCP_F_DDGST; 22123f2304f8SSagi Grimberg ddgst = nvme_tcp_ddgst_len(queue); 22133f2304f8SSagi Grimberg } 22143f2304f8SSagi Grimberg pdu->hdr.hlen = sizeof(*pdu); 22153f2304f8SSagi Grimberg pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; 22163f2304f8SSagi Grimberg pdu->hdr.plen = 22173f2304f8SSagi Grimberg cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); 22183f2304f8SSagi Grimberg 22193f2304f8SSagi Grimberg ret = nvme_tcp_map_data(queue, rq); 22203f2304f8SSagi Grimberg if (unlikely(ret)) { 222128a4cac4SMax Gurtovoy nvme_cleanup_cmd(rq); 22223f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 22233f2304f8SSagi Grimberg "Failed to map data (%d)\n", ret); 22243f2304f8SSagi Grimberg return ret; 22253f2304f8SSagi Grimberg } 22263f2304f8SSagi Grimberg 22273f2304f8SSagi Grimberg return 0; 22283f2304f8SSagi Grimberg } 22293f2304f8SSagi Grimberg 22303f2304f8SSagi Grimberg static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, 22313f2304f8SSagi Grimberg const struct blk_mq_queue_data *bd) 22323f2304f8SSagi Grimberg { 22333f2304f8SSagi Grimberg struct nvme_ns *ns = hctx->queue->queuedata; 22343f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = hctx->driver_data; 22353f2304f8SSagi Grimberg struct request *rq = bd->rq; 22363f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 22373f2304f8SSagi Grimberg bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); 22383f2304f8SSagi Grimberg blk_status_t ret; 22393f2304f8SSagi Grimberg 22403f2304f8SSagi Grimberg if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 22413f2304f8SSagi Grimberg return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 22423f2304f8SSagi Grimberg 22433f2304f8SSagi Grimberg ret = nvme_tcp_setup_cmd_pdu(ns, rq); 22443f2304f8SSagi Grimberg if (unlikely(ret)) 22453f2304f8SSagi Grimberg return ret; 22463f2304f8SSagi Grimberg 22473f2304f8SSagi Grimberg blk_mq_start_request(rq); 22483f2304f8SSagi Grimberg 22493f2304f8SSagi Grimberg nvme_tcp_queue_request(req); 22503f2304f8SSagi Grimberg 22513f2304f8SSagi Grimberg return BLK_STS_OK; 22523f2304f8SSagi Grimberg } 22533f2304f8SSagi Grimberg 2254873946f4SSagi Grimberg static int nvme_tcp_map_queues(struct blk_mq_tag_set *set) 2255873946f4SSagi Grimberg { 2256873946f4SSagi Grimberg struct nvme_tcp_ctrl *ctrl = set->driver_data; 225764861993SSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2258873946f4SSagi Grimberg 225964861993SSagi Grimberg if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { 2260873946f4SSagi Grimberg /* separate read/write queues */ 2261873946f4SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].nr_queues = 226264861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 226364861993SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 226464861993SSagi Grimberg set->map[HCTX_TYPE_READ].nr_queues = 226564861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ]; 2266873946f4SSagi Grimberg set->map[HCTX_TYPE_READ].queue_offset = 226764861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2268873946f4SSagi Grimberg } else { 226964861993SSagi Grimberg /* shared read/write queues */ 2270873946f4SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].nr_queues = 227164861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 227264861993SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 227364861993SSagi Grimberg set->map[HCTX_TYPE_READ].nr_queues = 227464861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2275873946f4SSagi Grimberg set->map[HCTX_TYPE_READ].queue_offset = 0; 2276873946f4SSagi Grimberg } 2277873946f4SSagi Grimberg blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 2278873946f4SSagi Grimberg blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); 227964861993SSagi Grimberg 22801a9460ceSSagi Grimberg if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { 22811a9460ceSSagi Grimberg /* map dedicated poll queues only if we have queues left */ 22821a9460ceSSagi Grimberg set->map[HCTX_TYPE_POLL].nr_queues = 22831a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL]; 22841a9460ceSSagi Grimberg set->map[HCTX_TYPE_POLL].queue_offset = 22851a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT] + 22861a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ]; 22871a9460ceSSagi Grimberg blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 22881a9460ceSSagi Grimberg } 22891a9460ceSSagi Grimberg 229064861993SSagi Grimberg dev_info(ctrl->ctrl.device, 22911a9460ceSSagi Grimberg "mapped %d/%d/%d default/read/poll queues.\n", 229264861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT], 22931a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ], 22941a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL]); 229564861993SSagi Grimberg 2296873946f4SSagi Grimberg return 0; 2297873946f4SSagi Grimberg } 2298873946f4SSagi Grimberg 22991a9460ceSSagi Grimberg static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) 23001a9460ceSSagi Grimberg { 23011a9460ceSSagi Grimberg struct nvme_tcp_queue *queue = hctx->driver_data; 23021a9460ceSSagi Grimberg struct sock *sk = queue->sock->sk; 23031a9460ceSSagi Grimberg 2304f86e5bf8SSagi Grimberg if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) 2305f86e5bf8SSagi Grimberg return 0; 2306f86e5bf8SSagi Grimberg 230772e5d757SSagi Grimberg set_bit(NVME_TCP_Q_POLLING, &queue->flags); 23083f926af3SEric Dumazet if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) 23091a9460ceSSagi Grimberg sk_busy_loop(sk, true); 23101a9460ceSSagi Grimberg nvme_tcp_try_recv(queue); 231172e5d757SSagi Grimberg clear_bit(NVME_TCP_Q_POLLING, &queue->flags); 23121a9460ceSSagi Grimberg return queue->nr_cqe; 23131a9460ceSSagi Grimberg } 23141a9460ceSSagi Grimberg 23153f2304f8SSagi Grimberg static struct blk_mq_ops nvme_tcp_mq_ops = { 23163f2304f8SSagi Grimberg .queue_rq = nvme_tcp_queue_rq, 23173f2304f8SSagi Grimberg .complete = nvme_complete_rq, 23183f2304f8SSagi Grimberg .init_request = nvme_tcp_init_request, 23193f2304f8SSagi Grimberg .exit_request = nvme_tcp_exit_request, 23203f2304f8SSagi Grimberg .init_hctx = nvme_tcp_init_hctx, 23213f2304f8SSagi Grimberg .timeout = nvme_tcp_timeout, 2322873946f4SSagi Grimberg .map_queues = nvme_tcp_map_queues, 23231a9460ceSSagi Grimberg .poll = nvme_tcp_poll, 23243f2304f8SSagi Grimberg }; 23253f2304f8SSagi Grimberg 23263f2304f8SSagi Grimberg static struct blk_mq_ops nvme_tcp_admin_mq_ops = { 23273f2304f8SSagi Grimberg .queue_rq = nvme_tcp_queue_rq, 23283f2304f8SSagi Grimberg .complete = nvme_complete_rq, 23293f2304f8SSagi Grimberg .init_request = nvme_tcp_init_request, 23303f2304f8SSagi Grimberg .exit_request = nvme_tcp_exit_request, 23313f2304f8SSagi Grimberg .init_hctx = nvme_tcp_init_admin_hctx, 23323f2304f8SSagi Grimberg .timeout = nvme_tcp_timeout, 23333f2304f8SSagi Grimberg }; 23343f2304f8SSagi Grimberg 23353f2304f8SSagi Grimberg static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { 23363f2304f8SSagi Grimberg .name = "tcp", 23373f2304f8SSagi Grimberg .module = THIS_MODULE, 23383f2304f8SSagi Grimberg .flags = NVME_F_FABRICS, 23393f2304f8SSagi Grimberg .reg_read32 = nvmf_reg_read32, 23403f2304f8SSagi Grimberg .reg_read64 = nvmf_reg_read64, 23413f2304f8SSagi Grimberg .reg_write32 = nvmf_reg_write32, 23423f2304f8SSagi Grimberg .free_ctrl = nvme_tcp_free_ctrl, 23433f2304f8SSagi Grimberg .submit_async_event = nvme_tcp_submit_async_event, 23443f2304f8SSagi Grimberg .delete_ctrl = nvme_tcp_delete_ctrl, 23453f2304f8SSagi Grimberg .get_address = nvmf_get_address, 23463f2304f8SSagi Grimberg }; 23473f2304f8SSagi Grimberg 23483f2304f8SSagi Grimberg static bool 23493f2304f8SSagi Grimberg nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) 23503f2304f8SSagi Grimberg { 23513f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 23523f2304f8SSagi Grimberg bool found = false; 23533f2304f8SSagi Grimberg 23543f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 23553f2304f8SSagi Grimberg list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { 23563f2304f8SSagi Grimberg found = nvmf_ip_options_match(&ctrl->ctrl, opts); 23573f2304f8SSagi Grimberg if (found) 23583f2304f8SSagi Grimberg break; 23593f2304f8SSagi Grimberg } 23603f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 23613f2304f8SSagi Grimberg 23623f2304f8SSagi Grimberg return found; 23633f2304f8SSagi Grimberg } 23643f2304f8SSagi Grimberg 23653f2304f8SSagi Grimberg static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, 23663f2304f8SSagi Grimberg struct nvmf_ctrl_options *opts) 23673f2304f8SSagi Grimberg { 23683f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 23693f2304f8SSagi Grimberg int ret; 23703f2304f8SSagi Grimberg 23713f2304f8SSagi Grimberg ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 23723f2304f8SSagi Grimberg if (!ctrl) 23733f2304f8SSagi Grimberg return ERR_PTR(-ENOMEM); 23743f2304f8SSagi Grimberg 23753f2304f8SSagi Grimberg INIT_LIST_HEAD(&ctrl->list); 23763f2304f8SSagi Grimberg ctrl->ctrl.opts = opts; 23771a9460ceSSagi Grimberg ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 23781a9460ceSSagi Grimberg opts->nr_poll_queues + 1; 23793f2304f8SSagi Grimberg ctrl->ctrl.sqsize = opts->queue_size - 1; 23803f2304f8SSagi Grimberg ctrl->ctrl.kato = opts->kato; 23813f2304f8SSagi Grimberg 23823f2304f8SSagi Grimberg INIT_DELAYED_WORK(&ctrl->connect_work, 23833f2304f8SSagi Grimberg nvme_tcp_reconnect_ctrl_work); 23843f2304f8SSagi Grimberg INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); 23853f2304f8SSagi Grimberg INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); 23863f2304f8SSagi Grimberg 23873f2304f8SSagi Grimberg if (!(opts->mask & NVMF_OPT_TRSVCID)) { 23883f2304f8SSagi Grimberg opts->trsvcid = 23893f2304f8SSagi Grimberg kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL); 23903f2304f8SSagi Grimberg if (!opts->trsvcid) { 23913f2304f8SSagi Grimberg ret = -ENOMEM; 23923f2304f8SSagi Grimberg goto out_free_ctrl; 23933f2304f8SSagi Grimberg } 23943f2304f8SSagi Grimberg opts->mask |= NVMF_OPT_TRSVCID; 23953f2304f8SSagi Grimberg } 23963f2304f8SSagi Grimberg 23973f2304f8SSagi Grimberg ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 23983f2304f8SSagi Grimberg opts->traddr, opts->trsvcid, &ctrl->addr); 23993f2304f8SSagi Grimberg if (ret) { 24003f2304f8SSagi Grimberg pr_err("malformed address passed: %s:%s\n", 24013f2304f8SSagi Grimberg opts->traddr, opts->trsvcid); 24023f2304f8SSagi Grimberg goto out_free_ctrl; 24033f2304f8SSagi Grimberg } 24043f2304f8SSagi Grimberg 24053f2304f8SSagi Grimberg if (opts->mask & NVMF_OPT_HOST_TRADDR) { 24063f2304f8SSagi Grimberg ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 24073f2304f8SSagi Grimberg opts->host_traddr, NULL, &ctrl->src_addr); 24083f2304f8SSagi Grimberg if (ret) { 24093f2304f8SSagi Grimberg pr_err("malformed src address passed: %s\n", 24103f2304f8SSagi Grimberg opts->host_traddr); 24113f2304f8SSagi Grimberg goto out_free_ctrl; 24123f2304f8SSagi Grimberg } 24133f2304f8SSagi Grimberg } 24143f2304f8SSagi Grimberg 24153f2304f8SSagi Grimberg if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { 24163f2304f8SSagi Grimberg ret = -EALREADY; 24173f2304f8SSagi Grimberg goto out_free_ctrl; 24183f2304f8SSagi Grimberg } 24193f2304f8SSagi Grimberg 2420873946f4SSagi Grimberg ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 24213f2304f8SSagi Grimberg GFP_KERNEL); 24223f2304f8SSagi Grimberg if (!ctrl->queues) { 24233f2304f8SSagi Grimberg ret = -ENOMEM; 24243f2304f8SSagi Grimberg goto out_free_ctrl; 24253f2304f8SSagi Grimberg } 24263f2304f8SSagi Grimberg 24273f2304f8SSagi Grimberg ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); 24283f2304f8SSagi Grimberg if (ret) 24293f2304f8SSagi Grimberg goto out_kfree_queues; 24303f2304f8SSagi Grimberg 24313f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 24323f2304f8SSagi Grimberg WARN_ON_ONCE(1); 24333f2304f8SSagi Grimberg ret = -EINTR; 24343f2304f8SSagi Grimberg goto out_uninit_ctrl; 24353f2304f8SSagi Grimberg } 24363f2304f8SSagi Grimberg 24373f2304f8SSagi Grimberg ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); 24383f2304f8SSagi Grimberg if (ret) 24393f2304f8SSagi Grimberg goto out_uninit_ctrl; 24403f2304f8SSagi Grimberg 24413f2304f8SSagi Grimberg dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", 24423f2304f8SSagi Grimberg ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 24433f2304f8SSagi Grimberg 24443f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 24453f2304f8SSagi Grimberg list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); 24463f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 24473f2304f8SSagi Grimberg 24483f2304f8SSagi Grimberg return &ctrl->ctrl; 24493f2304f8SSagi Grimberg 24503f2304f8SSagi Grimberg out_uninit_ctrl: 24513f2304f8SSagi Grimberg nvme_uninit_ctrl(&ctrl->ctrl); 24523f2304f8SSagi Grimberg nvme_put_ctrl(&ctrl->ctrl); 24533f2304f8SSagi Grimberg if (ret > 0) 24543f2304f8SSagi Grimberg ret = -EIO; 24553f2304f8SSagi Grimberg return ERR_PTR(ret); 24563f2304f8SSagi Grimberg out_kfree_queues: 24573f2304f8SSagi Grimberg kfree(ctrl->queues); 24583f2304f8SSagi Grimberg out_free_ctrl: 24593f2304f8SSagi Grimberg kfree(ctrl); 24603f2304f8SSagi Grimberg return ERR_PTR(ret); 24613f2304f8SSagi Grimberg } 24623f2304f8SSagi Grimberg 24633f2304f8SSagi Grimberg static struct nvmf_transport_ops nvme_tcp_transport = { 24643f2304f8SSagi Grimberg .name = "tcp", 24653f2304f8SSagi Grimberg .module = THIS_MODULE, 24663f2304f8SSagi Grimberg .required_opts = NVMF_OPT_TRADDR, 24673f2304f8SSagi Grimberg .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 24683f2304f8SSagi Grimberg NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | 2469873946f4SSagi Grimberg NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | 2470bb13985dSIsrael Rukshin NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | 2471bb13985dSIsrael Rukshin NVMF_OPT_TOS, 24723f2304f8SSagi Grimberg .create_ctrl = nvme_tcp_create_ctrl, 24733f2304f8SSagi Grimberg }; 24743f2304f8SSagi Grimberg 24753f2304f8SSagi Grimberg static int __init nvme_tcp_init_module(void) 24763f2304f8SSagi Grimberg { 24773f2304f8SSagi Grimberg nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", 24783f2304f8SSagi Grimberg WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 24793f2304f8SSagi Grimberg if (!nvme_tcp_wq) 24803f2304f8SSagi Grimberg return -ENOMEM; 24813f2304f8SSagi Grimberg 24823f2304f8SSagi Grimberg nvmf_register_transport(&nvme_tcp_transport); 24833f2304f8SSagi Grimberg return 0; 24843f2304f8SSagi Grimberg } 24853f2304f8SSagi Grimberg 24863f2304f8SSagi Grimberg static void __exit nvme_tcp_cleanup_module(void) 24873f2304f8SSagi Grimberg { 24883f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 24893f2304f8SSagi Grimberg 24903f2304f8SSagi Grimberg nvmf_unregister_transport(&nvme_tcp_transport); 24913f2304f8SSagi Grimberg 24923f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 24933f2304f8SSagi Grimberg list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) 24943f2304f8SSagi Grimberg nvme_delete_ctrl(&ctrl->ctrl); 24953f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 24963f2304f8SSagi Grimberg flush_workqueue(nvme_delete_wq); 24973f2304f8SSagi Grimberg 24983f2304f8SSagi Grimberg destroy_workqueue(nvme_tcp_wq); 24993f2304f8SSagi Grimberg } 25003f2304f8SSagi Grimberg 25013f2304f8SSagi Grimberg module_init(nvme_tcp_init_module); 25023f2304f8SSagi Grimberg module_exit(nvme_tcp_cleanup_module); 25033f2304f8SSagi Grimberg 25043f2304f8SSagi Grimberg MODULE_LICENSE("GPL v2"); 2505