13f2304f8SSagi Grimberg // SPDX-License-Identifier: GPL-2.0 23f2304f8SSagi Grimberg /* 33f2304f8SSagi Grimberg * NVMe over Fabrics TCP host. 43f2304f8SSagi Grimberg * Copyright (c) 2018 Lightbits Labs. All rights reserved. 53f2304f8SSagi Grimberg */ 63f2304f8SSagi Grimberg #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 73f2304f8SSagi Grimberg #include <linux/module.h> 83f2304f8SSagi Grimberg #include <linux/init.h> 93f2304f8SSagi Grimberg #include <linux/slab.h> 103f2304f8SSagi Grimberg #include <linux/err.h> 113f2304f8SSagi Grimberg #include <linux/nvme-tcp.h> 123f2304f8SSagi Grimberg #include <net/sock.h> 133f2304f8SSagi Grimberg #include <net/tcp.h> 143f2304f8SSagi Grimberg #include <linux/blk-mq.h> 153f2304f8SSagi Grimberg #include <crypto/hash.h> 161a9460ceSSagi Grimberg #include <net/busy_poll.h> 173f2304f8SSagi Grimberg 183f2304f8SSagi Grimberg #include "nvme.h" 193f2304f8SSagi Grimberg #include "fabrics.h" 203f2304f8SSagi Grimberg 213f2304f8SSagi Grimberg struct nvme_tcp_queue; 223f2304f8SSagi Grimberg 239912ade3SWunderlich, Mark /* Define the socket priority to use for connections were it is desirable 249912ade3SWunderlich, Mark * that the NIC consider performing optimized packet processing or filtering. 259912ade3SWunderlich, Mark * A non-zero value being sufficient to indicate general consideration of any 269912ade3SWunderlich, Mark * possible optimization. Making it a module param allows for alternative 279912ade3SWunderlich, Mark * values that may be unique for some NIC implementations. 289912ade3SWunderlich, Mark */ 299912ade3SWunderlich, Mark static int so_priority; 309912ade3SWunderlich, Mark module_param(so_priority, int, 0644); 319912ade3SWunderlich, Mark MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); 329912ade3SWunderlich, Mark 33841aee4dSChris Leech #ifdef CONFIG_DEBUG_LOCK_ALLOC 34841aee4dSChris Leech /* lockdep can detect a circular dependency of the form 35841aee4dSChris Leech * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock 36841aee4dSChris Leech * because dependencies are tracked for both nvme-tcp and user contexts. Using 37841aee4dSChris Leech * a separate class prevents lockdep from conflating nvme-tcp socket use with 38841aee4dSChris Leech * user-space socket API use. 39841aee4dSChris Leech */ 40841aee4dSChris Leech static struct lock_class_key nvme_tcp_sk_key[2]; 41841aee4dSChris Leech static struct lock_class_key nvme_tcp_slock_key[2]; 42841aee4dSChris Leech 43841aee4dSChris Leech static void nvme_tcp_reclassify_socket(struct socket *sock) 44841aee4dSChris Leech { 45841aee4dSChris Leech struct sock *sk = sock->sk; 46841aee4dSChris Leech 47841aee4dSChris Leech if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) 48841aee4dSChris Leech return; 49841aee4dSChris Leech 50841aee4dSChris Leech switch (sk->sk_family) { 51841aee4dSChris Leech case AF_INET: 52841aee4dSChris Leech sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", 53841aee4dSChris Leech &nvme_tcp_slock_key[0], 54841aee4dSChris Leech "sk_lock-AF_INET-NVME", 55841aee4dSChris Leech &nvme_tcp_sk_key[0]); 56841aee4dSChris Leech break; 57841aee4dSChris Leech case AF_INET6: 58841aee4dSChris Leech sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", 59841aee4dSChris Leech &nvme_tcp_slock_key[1], 60841aee4dSChris Leech "sk_lock-AF_INET6-NVME", 61841aee4dSChris Leech &nvme_tcp_sk_key[1]); 62841aee4dSChris Leech break; 63841aee4dSChris Leech default: 64841aee4dSChris Leech WARN_ON_ONCE(1); 65841aee4dSChris Leech } 66841aee4dSChris Leech } 67841aee4dSChris Leech #else 68841aee4dSChris Leech static void nvme_tcp_reclassify_socket(struct socket *sock) { } 69841aee4dSChris Leech #endif 70841aee4dSChris Leech 713f2304f8SSagi Grimberg enum nvme_tcp_send_state { 723f2304f8SSagi Grimberg NVME_TCP_SEND_CMD_PDU = 0, 733f2304f8SSagi Grimberg NVME_TCP_SEND_H2C_PDU, 743f2304f8SSagi Grimberg NVME_TCP_SEND_DATA, 753f2304f8SSagi Grimberg NVME_TCP_SEND_DDGST, 763f2304f8SSagi Grimberg }; 773f2304f8SSagi Grimberg 783f2304f8SSagi Grimberg struct nvme_tcp_request { 793f2304f8SSagi Grimberg struct nvme_request req; 803f2304f8SSagi Grimberg void *pdu; 813f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 823f2304f8SSagi Grimberg u32 data_len; 833f2304f8SSagi Grimberg u32 pdu_len; 843f2304f8SSagi Grimberg u32 pdu_sent; 85c2700d28SVarun Prakash u32 h2cdata_left; 86c2700d28SVarun Prakash u32 h2cdata_offset; 873f2304f8SSagi Grimberg u16 ttag; 881ba2e507SDaniel Wagner __le16 status; 893f2304f8SSagi Grimberg struct list_head entry; 9015ec928aSSagi Grimberg struct llist_node lentry; 91a7273d40SChristoph Hellwig __le32 ddgst; 923f2304f8SSagi Grimberg 933f2304f8SSagi Grimberg struct bio *curr_bio; 943f2304f8SSagi Grimberg struct iov_iter iter; 953f2304f8SSagi Grimberg 963f2304f8SSagi Grimberg /* send state */ 973f2304f8SSagi Grimberg size_t offset; 983f2304f8SSagi Grimberg size_t data_sent; 993f2304f8SSagi Grimberg enum nvme_tcp_send_state state; 1003f2304f8SSagi Grimberg }; 1013f2304f8SSagi Grimberg 1023f2304f8SSagi Grimberg enum nvme_tcp_queue_flags { 1033f2304f8SSagi Grimberg NVME_TCP_Q_ALLOCATED = 0, 1043f2304f8SSagi Grimberg NVME_TCP_Q_LIVE = 1, 10572e5d757SSagi Grimberg NVME_TCP_Q_POLLING = 2, 1063f2304f8SSagi Grimberg }; 1073f2304f8SSagi Grimberg 1083f2304f8SSagi Grimberg enum nvme_tcp_recv_state { 1093f2304f8SSagi Grimberg NVME_TCP_RECV_PDU = 0, 1103f2304f8SSagi Grimberg NVME_TCP_RECV_DATA, 1113f2304f8SSagi Grimberg NVME_TCP_RECV_DDGST, 1123f2304f8SSagi Grimberg }; 1133f2304f8SSagi Grimberg 1143f2304f8SSagi Grimberg struct nvme_tcp_ctrl; 1153f2304f8SSagi Grimberg struct nvme_tcp_queue { 1163f2304f8SSagi Grimberg struct socket *sock; 1173f2304f8SSagi Grimberg struct work_struct io_work; 1183f2304f8SSagi Grimberg int io_cpu; 1193f2304f8SSagi Grimberg 1209ebbfe49SChao Leng struct mutex queue_lock; 121db5ad6b7SSagi Grimberg struct mutex send_mutex; 12215ec928aSSagi Grimberg struct llist_head req_list; 1233f2304f8SSagi Grimberg struct list_head send_list; 124122e5b9fSSagi Grimberg bool more_requests; 1253f2304f8SSagi Grimberg 1263f2304f8SSagi Grimberg /* recv state */ 1273f2304f8SSagi Grimberg void *pdu; 1283f2304f8SSagi Grimberg int pdu_remaining; 1293f2304f8SSagi Grimberg int pdu_offset; 1303f2304f8SSagi Grimberg size_t data_remaining; 1313f2304f8SSagi Grimberg size_t ddgst_remaining; 1321a9460ceSSagi Grimberg unsigned int nr_cqe; 1333f2304f8SSagi Grimberg 1343f2304f8SSagi Grimberg /* send state */ 1353f2304f8SSagi Grimberg struct nvme_tcp_request *request; 1363f2304f8SSagi Grimberg 1373f2304f8SSagi Grimberg int queue_size; 138c2700d28SVarun Prakash u32 maxh2cdata; 1393f2304f8SSagi Grimberg size_t cmnd_capsule_len; 1403f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 1413f2304f8SSagi Grimberg unsigned long flags; 1423f2304f8SSagi Grimberg bool rd_enabled; 1433f2304f8SSagi Grimberg 1443f2304f8SSagi Grimberg bool hdr_digest; 1453f2304f8SSagi Grimberg bool data_digest; 1463f2304f8SSagi Grimberg struct ahash_request *rcv_hash; 1473f2304f8SSagi Grimberg struct ahash_request *snd_hash; 1483f2304f8SSagi Grimberg __le32 exp_ddgst; 1493f2304f8SSagi Grimberg __le32 recv_ddgst; 1503f2304f8SSagi Grimberg 1513f2304f8SSagi Grimberg struct page_frag_cache pf_cache; 1523f2304f8SSagi Grimberg 1533f2304f8SSagi Grimberg void (*state_change)(struct sock *); 1543f2304f8SSagi Grimberg void (*data_ready)(struct sock *); 1553f2304f8SSagi Grimberg void (*write_space)(struct sock *); 1563f2304f8SSagi Grimberg }; 1573f2304f8SSagi Grimberg 1583f2304f8SSagi Grimberg struct nvme_tcp_ctrl { 1593f2304f8SSagi Grimberg /* read only in the hot path */ 1603f2304f8SSagi Grimberg struct nvme_tcp_queue *queues; 1613f2304f8SSagi Grimberg struct blk_mq_tag_set tag_set; 1623f2304f8SSagi Grimberg 1633f2304f8SSagi Grimberg /* other member variables */ 1643f2304f8SSagi Grimberg struct list_head list; 1653f2304f8SSagi Grimberg struct blk_mq_tag_set admin_tag_set; 1663f2304f8SSagi Grimberg struct sockaddr_storage addr; 1673f2304f8SSagi Grimberg struct sockaddr_storage src_addr; 1683f2304f8SSagi Grimberg struct nvme_ctrl ctrl; 1693f2304f8SSagi Grimberg 1703f2304f8SSagi Grimberg struct work_struct err_work; 1713f2304f8SSagi Grimberg struct delayed_work connect_work; 1723f2304f8SSagi Grimberg struct nvme_tcp_request async_req; 17364861993SSagi Grimberg u32 io_queues[HCTX_MAX_TYPES]; 1743f2304f8SSagi Grimberg }; 1753f2304f8SSagi Grimberg 1763f2304f8SSagi Grimberg static LIST_HEAD(nvme_tcp_ctrl_list); 1773f2304f8SSagi Grimberg static DEFINE_MUTEX(nvme_tcp_ctrl_mutex); 1783f2304f8SSagi Grimberg static struct workqueue_struct *nvme_tcp_wq; 1796acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops; 1806acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops; 181db5ad6b7SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 1823f2304f8SSagi Grimberg 1833f2304f8SSagi Grimberg static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) 1843f2304f8SSagi Grimberg { 1853f2304f8SSagi Grimberg return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); 1863f2304f8SSagi Grimberg } 1873f2304f8SSagi Grimberg 1883f2304f8SSagi Grimberg static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) 1893f2304f8SSagi Grimberg { 1903f2304f8SSagi Grimberg return queue - queue->ctrl->queues; 1913f2304f8SSagi Grimberg } 1923f2304f8SSagi Grimberg 1933f2304f8SSagi Grimberg static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) 1943f2304f8SSagi Grimberg { 1953f2304f8SSagi Grimberg u32 queue_idx = nvme_tcp_queue_id(queue); 1963f2304f8SSagi Grimberg 1973f2304f8SSagi Grimberg if (queue_idx == 0) 1983f2304f8SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx]; 1993f2304f8SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1]; 2003f2304f8SSagi Grimberg } 2013f2304f8SSagi Grimberg 2023f2304f8SSagi Grimberg static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) 2033f2304f8SSagi Grimberg { 2043f2304f8SSagi Grimberg return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 2053f2304f8SSagi Grimberg } 2063f2304f8SSagi Grimberg 2073f2304f8SSagi Grimberg static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) 2083f2304f8SSagi Grimberg { 2093f2304f8SSagi Grimberg return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 2103f2304f8SSagi Grimberg } 2113f2304f8SSagi Grimberg 21253ee9e29SCaleb Sander static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req) 2133f2304f8SSagi Grimberg { 21453ee9e29SCaleb Sander if (nvme_is_fabrics(req->req.cmd)) 21553ee9e29SCaleb Sander return NVME_TCP_ADMIN_CCSZ; 21653ee9e29SCaleb Sander return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); 2173f2304f8SSagi Grimberg } 2183f2304f8SSagi Grimberg 2193f2304f8SSagi Grimberg static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) 2203f2304f8SSagi Grimberg { 2213f2304f8SSagi Grimberg return req == &req->queue->ctrl->async_req; 2223f2304f8SSagi Grimberg } 2233f2304f8SSagi Grimberg 2243f2304f8SSagi Grimberg static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) 2253f2304f8SSagi Grimberg { 2263f2304f8SSagi Grimberg struct request *rq; 2273f2304f8SSagi Grimberg 2283f2304f8SSagi Grimberg if (unlikely(nvme_tcp_async_req(req))) 2293f2304f8SSagi Grimberg return false; /* async events don't have a request */ 2303f2304f8SSagi Grimberg 2313f2304f8SSagi Grimberg rq = blk_mq_rq_from_pdu(req); 2323f2304f8SSagi Grimberg 23325e5cb78SSagi Grimberg return rq_data_dir(rq) == WRITE && req->data_len && 23453ee9e29SCaleb Sander req->data_len <= nvme_tcp_inline_data_size(req); 2353f2304f8SSagi Grimberg } 2363f2304f8SSagi Grimberg 2373f2304f8SSagi Grimberg static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) 2383f2304f8SSagi Grimberg { 2393f2304f8SSagi Grimberg return req->iter.bvec->bv_page; 2403f2304f8SSagi Grimberg } 2413f2304f8SSagi Grimberg 2423f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) 2433f2304f8SSagi Grimberg { 2443f2304f8SSagi Grimberg return req->iter.bvec->bv_offset + req->iter.iov_offset; 2453f2304f8SSagi Grimberg } 2463f2304f8SSagi Grimberg 2473f2304f8SSagi Grimberg static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) 2483f2304f8SSagi Grimberg { 249ca1ff67dSSagi Grimberg return min_t(size_t, iov_iter_single_seg_count(&req->iter), 2503f2304f8SSagi Grimberg req->pdu_len - req->pdu_sent); 2513f2304f8SSagi Grimberg } 2523f2304f8SSagi Grimberg 2533f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) 2543f2304f8SSagi Grimberg { 2553f2304f8SSagi Grimberg return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? 2563f2304f8SSagi Grimberg req->pdu_len - req->pdu_sent : 0; 2573f2304f8SSagi Grimberg } 2583f2304f8SSagi Grimberg 2593f2304f8SSagi Grimberg static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, 2603f2304f8SSagi Grimberg int len) 2613f2304f8SSagi Grimberg { 2623f2304f8SSagi Grimberg return nvme_tcp_pdu_data_left(req) <= len; 2633f2304f8SSagi Grimberg } 2643f2304f8SSagi Grimberg 2653f2304f8SSagi Grimberg static void nvme_tcp_init_iter(struct nvme_tcp_request *req, 2663f2304f8SSagi Grimberg unsigned int dir) 2673f2304f8SSagi Grimberg { 2683f2304f8SSagi Grimberg struct request *rq = blk_mq_rq_from_pdu(req); 2693f2304f8SSagi Grimberg struct bio_vec *vec; 2703f2304f8SSagi Grimberg unsigned int size; 2710dc9edafSSagi Grimberg int nr_bvec; 2723f2304f8SSagi Grimberg size_t offset; 2733f2304f8SSagi Grimberg 2743f2304f8SSagi Grimberg if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { 2753f2304f8SSagi Grimberg vec = &rq->special_vec; 2760dc9edafSSagi Grimberg nr_bvec = 1; 2773f2304f8SSagi Grimberg size = blk_rq_payload_bytes(rq); 2783f2304f8SSagi Grimberg offset = 0; 2793f2304f8SSagi Grimberg } else { 2803f2304f8SSagi Grimberg struct bio *bio = req->curr_bio; 2810dc9edafSSagi Grimberg struct bvec_iter bi; 2820dc9edafSSagi Grimberg struct bio_vec bv; 2833f2304f8SSagi Grimberg 2843f2304f8SSagi Grimberg vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 2850dc9edafSSagi Grimberg nr_bvec = 0; 2860dc9edafSSagi Grimberg bio_for_each_bvec(bv, bio, bi) { 2870dc9edafSSagi Grimberg nr_bvec++; 2880dc9edafSSagi Grimberg } 2893f2304f8SSagi Grimberg size = bio->bi_iter.bi_size; 2903f2304f8SSagi Grimberg offset = bio->bi_iter.bi_bvec_done; 2913f2304f8SSagi Grimberg } 2923f2304f8SSagi Grimberg 2930dc9edafSSagi Grimberg iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); 2943f2304f8SSagi Grimberg req->iter.iov_offset = offset; 2953f2304f8SSagi Grimberg } 2963f2304f8SSagi Grimberg 2973f2304f8SSagi Grimberg static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, 2983f2304f8SSagi Grimberg int len) 2993f2304f8SSagi Grimberg { 3003f2304f8SSagi Grimberg req->data_sent += len; 3013f2304f8SSagi Grimberg req->pdu_sent += len; 3023f2304f8SSagi Grimberg iov_iter_advance(&req->iter, len); 3033f2304f8SSagi Grimberg if (!iov_iter_count(&req->iter) && 3043f2304f8SSagi Grimberg req->data_sent < req->data_len) { 3053f2304f8SSagi Grimberg req->curr_bio = req->curr_bio->bi_next; 3063f2304f8SSagi Grimberg nvme_tcp_init_iter(req, WRITE); 3073f2304f8SSagi Grimberg } 3083f2304f8SSagi Grimberg } 3093f2304f8SSagi Grimberg 3105c11f7d9SSagi Grimberg static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) 3115c11f7d9SSagi Grimberg { 3125c11f7d9SSagi Grimberg int ret; 3135c11f7d9SSagi Grimberg 3145c11f7d9SSagi Grimberg /* drain the send queue as much as we can... */ 3155c11f7d9SSagi Grimberg do { 3165c11f7d9SSagi Grimberg ret = nvme_tcp_try_send(queue); 3175c11f7d9SSagi Grimberg } while (ret > 0); 3185c11f7d9SSagi Grimberg } 3195c11f7d9SSagi Grimberg 32070f437fbSKeith Busch static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) 32170f437fbSKeith Busch { 32270f437fbSKeith Busch return !list_empty(&queue->send_list) || 32370f437fbSKeith Busch !llist_empty(&queue->req_list) || queue->more_requests; 32470f437fbSKeith Busch } 32570f437fbSKeith Busch 326db5ad6b7SSagi Grimberg static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, 32786f0348aSSagi Grimberg bool sync, bool last) 3283f2304f8SSagi Grimberg { 3293f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 330db5ad6b7SSagi Grimberg bool empty; 3313f2304f8SSagi Grimberg 33215ec928aSSagi Grimberg empty = llist_add(&req->lentry, &queue->req_list) && 33315ec928aSSagi Grimberg list_empty(&queue->send_list) && !queue->request; 3343f2304f8SSagi Grimberg 335db5ad6b7SSagi Grimberg /* 336db5ad6b7SSagi Grimberg * if we're the first on the send_list and we can try to send 337db5ad6b7SSagi Grimberg * directly, otherwise queue io_work. Also, only do that if we 338db5ad6b7SSagi Grimberg * are on the same cpu, so we don't introduce contention. 339db5ad6b7SSagi Grimberg */ 340bb833370SSagi Grimberg if (queue->io_cpu == raw_smp_processor_id() && 341db5ad6b7SSagi Grimberg sync && empty && mutex_trylock(&queue->send_mutex)) { 342122e5b9fSSagi Grimberg queue->more_requests = !last; 3435c11f7d9SSagi Grimberg nvme_tcp_send_all(queue); 344122e5b9fSSagi Grimberg queue->more_requests = false; 345db5ad6b7SSagi Grimberg mutex_unlock(&queue->send_mutex); 3463f2304f8SSagi Grimberg } 34770f437fbSKeith Busch 34870f437fbSKeith Busch if (last && nvme_tcp_queue_more(queue)) 34970f437fbSKeith Busch queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 350db5ad6b7SSagi Grimberg } 3513f2304f8SSagi Grimberg 35215ec928aSSagi Grimberg static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) 35315ec928aSSagi Grimberg { 35415ec928aSSagi Grimberg struct nvme_tcp_request *req; 35515ec928aSSagi Grimberg struct llist_node *node; 35615ec928aSSagi Grimberg 35715ec928aSSagi Grimberg for (node = llist_del_all(&queue->req_list); node; node = node->next) { 35815ec928aSSagi Grimberg req = llist_entry(node, struct nvme_tcp_request, lentry); 35915ec928aSSagi Grimberg list_add(&req->entry, &queue->send_list); 36015ec928aSSagi Grimberg } 36115ec928aSSagi Grimberg } 36215ec928aSSagi Grimberg 3633f2304f8SSagi Grimberg static inline struct nvme_tcp_request * 3643f2304f8SSagi Grimberg nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) 3653f2304f8SSagi Grimberg { 3663f2304f8SSagi Grimberg struct nvme_tcp_request *req; 3673f2304f8SSagi Grimberg 3683f2304f8SSagi Grimberg req = list_first_entry_or_null(&queue->send_list, 3693f2304f8SSagi Grimberg struct nvme_tcp_request, entry); 37015ec928aSSagi Grimberg if (!req) { 37115ec928aSSagi Grimberg nvme_tcp_process_req_list(queue); 37215ec928aSSagi Grimberg req = list_first_entry_or_null(&queue->send_list, 37315ec928aSSagi Grimberg struct nvme_tcp_request, entry); 37415ec928aSSagi Grimberg if (unlikely(!req)) 37515ec928aSSagi Grimberg return NULL; 37615ec928aSSagi Grimberg } 3773f2304f8SSagi Grimberg 37815ec928aSSagi Grimberg list_del(&req->entry); 3793f2304f8SSagi Grimberg return req; 3803f2304f8SSagi Grimberg } 3813f2304f8SSagi Grimberg 382a7273d40SChristoph Hellwig static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, 383a7273d40SChristoph Hellwig __le32 *dgst) 3843f2304f8SSagi Grimberg { 3853f2304f8SSagi Grimberg ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); 3863f2304f8SSagi Grimberg crypto_ahash_final(hash); 3873f2304f8SSagi Grimberg } 3883f2304f8SSagi Grimberg 3893f2304f8SSagi Grimberg static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, 3903f2304f8SSagi Grimberg struct page *page, off_t off, size_t len) 3913f2304f8SSagi Grimberg { 3923f2304f8SSagi Grimberg struct scatterlist sg; 3933f2304f8SSagi Grimberg 3943f2304f8SSagi Grimberg sg_init_marker(&sg, 1); 3953f2304f8SSagi Grimberg sg_set_page(&sg, page, len, off); 3963f2304f8SSagi Grimberg ahash_request_set_crypt(hash, &sg, NULL, len); 3973f2304f8SSagi Grimberg crypto_ahash_update(hash); 3983f2304f8SSagi Grimberg } 3993f2304f8SSagi Grimberg 4003f2304f8SSagi Grimberg static inline void nvme_tcp_hdgst(struct ahash_request *hash, 4013f2304f8SSagi Grimberg void *pdu, size_t len) 4023f2304f8SSagi Grimberg { 4033f2304f8SSagi Grimberg struct scatterlist sg; 4043f2304f8SSagi Grimberg 4053f2304f8SSagi Grimberg sg_init_one(&sg, pdu, len); 4063f2304f8SSagi Grimberg ahash_request_set_crypt(hash, &sg, pdu + len, len); 4073f2304f8SSagi Grimberg crypto_ahash_digest(hash); 4083f2304f8SSagi Grimberg } 4093f2304f8SSagi Grimberg 4103f2304f8SSagi Grimberg static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, 4113f2304f8SSagi Grimberg void *pdu, size_t pdu_len) 4123f2304f8SSagi Grimberg { 4133f2304f8SSagi Grimberg struct nvme_tcp_hdr *hdr = pdu; 4143f2304f8SSagi Grimberg __le32 recv_digest; 4153f2304f8SSagi Grimberg __le32 exp_digest; 4163f2304f8SSagi Grimberg 4173f2304f8SSagi Grimberg if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 4183f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 4193f2304f8SSagi Grimberg "queue %d: header digest flag is cleared\n", 4203f2304f8SSagi Grimberg nvme_tcp_queue_id(queue)); 4213f2304f8SSagi Grimberg return -EPROTO; 4223f2304f8SSagi Grimberg } 4233f2304f8SSagi Grimberg 4243f2304f8SSagi Grimberg recv_digest = *(__le32 *)(pdu + hdr->hlen); 4253f2304f8SSagi Grimberg nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); 4263f2304f8SSagi Grimberg exp_digest = *(__le32 *)(pdu + hdr->hlen); 4273f2304f8SSagi Grimberg if (recv_digest != exp_digest) { 4283f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 4293f2304f8SSagi Grimberg "header digest error: recv %#x expected %#x\n", 4303f2304f8SSagi Grimberg le32_to_cpu(recv_digest), le32_to_cpu(exp_digest)); 4313f2304f8SSagi Grimberg return -EIO; 4323f2304f8SSagi Grimberg } 4333f2304f8SSagi Grimberg 4343f2304f8SSagi Grimberg return 0; 4353f2304f8SSagi Grimberg } 4363f2304f8SSagi Grimberg 4373f2304f8SSagi Grimberg static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) 4383f2304f8SSagi Grimberg { 4393f2304f8SSagi Grimberg struct nvme_tcp_hdr *hdr = pdu; 4403f2304f8SSagi Grimberg u8 digest_len = nvme_tcp_hdgst_len(queue); 4413f2304f8SSagi Grimberg u32 len; 4423f2304f8SSagi Grimberg 4433f2304f8SSagi Grimberg len = le32_to_cpu(hdr->plen) - hdr->hlen - 4443f2304f8SSagi Grimberg ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); 4453f2304f8SSagi Grimberg 4463f2304f8SSagi Grimberg if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 4473f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 4483f2304f8SSagi Grimberg "queue %d: data digest flag is cleared\n", 4493f2304f8SSagi Grimberg nvme_tcp_queue_id(queue)); 4503f2304f8SSagi Grimberg return -EPROTO; 4513f2304f8SSagi Grimberg } 4523f2304f8SSagi Grimberg crypto_ahash_init(queue->rcv_hash); 4533f2304f8SSagi Grimberg 4543f2304f8SSagi Grimberg return 0; 4553f2304f8SSagi Grimberg } 4563f2304f8SSagi Grimberg 4573f2304f8SSagi Grimberg static void nvme_tcp_exit_request(struct blk_mq_tag_set *set, 4583f2304f8SSagi Grimberg struct request *rq, unsigned int hctx_idx) 4593f2304f8SSagi Grimberg { 4603f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 4613f2304f8SSagi Grimberg 4623f2304f8SSagi Grimberg page_frag_free(req->pdu); 4633f2304f8SSagi Grimberg } 4643f2304f8SSagi Grimberg 4653f2304f8SSagi Grimberg static int nvme_tcp_init_request(struct blk_mq_tag_set *set, 4663f2304f8SSagi Grimberg struct request *rq, unsigned int hctx_idx, 4673f2304f8SSagi Grimberg unsigned int numa_node) 4683f2304f8SSagi Grimberg { 4693f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = set->driver_data; 4703f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 471f4b9e6c9SKeith Busch struct nvme_tcp_cmd_pdu *pdu; 4723f2304f8SSagi Grimberg int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 4733f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; 4743f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 4753f2304f8SSagi Grimberg 4763f2304f8SSagi Grimberg req->pdu = page_frag_alloc(&queue->pf_cache, 4773f2304f8SSagi Grimberg sizeof(struct nvme_tcp_cmd_pdu) + hdgst, 4783f2304f8SSagi Grimberg GFP_KERNEL | __GFP_ZERO); 4793f2304f8SSagi Grimberg if (!req->pdu) 4803f2304f8SSagi Grimberg return -ENOMEM; 4813f2304f8SSagi Grimberg 482f4b9e6c9SKeith Busch pdu = req->pdu; 4833f2304f8SSagi Grimberg req->queue = queue; 4843f2304f8SSagi Grimberg nvme_req(rq)->ctrl = &ctrl->ctrl; 485f4b9e6c9SKeith Busch nvme_req(rq)->cmd = &pdu->cmd; 4863f2304f8SSagi Grimberg 4873f2304f8SSagi Grimberg return 0; 4883f2304f8SSagi Grimberg } 4893f2304f8SSagi Grimberg 4903f2304f8SSagi Grimberg static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 4913f2304f8SSagi Grimberg unsigned int hctx_idx) 4923f2304f8SSagi Grimberg { 4933f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = data; 4943f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; 4953f2304f8SSagi Grimberg 4963f2304f8SSagi Grimberg hctx->driver_data = queue; 4973f2304f8SSagi Grimberg return 0; 4983f2304f8SSagi Grimberg } 4993f2304f8SSagi Grimberg 5003f2304f8SSagi Grimberg static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 5013f2304f8SSagi Grimberg unsigned int hctx_idx) 5023f2304f8SSagi Grimberg { 5033f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = data; 5043f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[0]; 5053f2304f8SSagi Grimberg 5063f2304f8SSagi Grimberg hctx->driver_data = queue; 5073f2304f8SSagi Grimberg return 0; 5083f2304f8SSagi Grimberg } 5093f2304f8SSagi Grimberg 5103f2304f8SSagi Grimberg static enum nvme_tcp_recv_state 5113f2304f8SSagi Grimberg nvme_tcp_recv_state(struct nvme_tcp_queue *queue) 5123f2304f8SSagi Grimberg { 5133f2304f8SSagi Grimberg return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : 5143f2304f8SSagi Grimberg (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : 5153f2304f8SSagi Grimberg NVME_TCP_RECV_DATA; 5163f2304f8SSagi Grimberg } 5173f2304f8SSagi Grimberg 5183f2304f8SSagi Grimberg static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) 5193f2304f8SSagi Grimberg { 5203f2304f8SSagi Grimberg queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + 5213f2304f8SSagi Grimberg nvme_tcp_hdgst_len(queue); 5223f2304f8SSagi Grimberg queue->pdu_offset = 0; 5233f2304f8SSagi Grimberg queue->data_remaining = -1; 5243f2304f8SSagi Grimberg queue->ddgst_remaining = 0; 5253f2304f8SSagi Grimberg } 5263f2304f8SSagi Grimberg 5273f2304f8SSagi Grimberg static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) 5283f2304f8SSagi Grimberg { 5293f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 5303f2304f8SSagi Grimberg return; 5313f2304f8SSagi Grimberg 532236187c4SSagi Grimberg dev_warn(ctrl->device, "starting error recovery\n"); 53397b2512aSNigel Kirkland queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); 5343f2304f8SSagi Grimberg } 5353f2304f8SSagi Grimberg 5363f2304f8SSagi Grimberg static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, 5373f2304f8SSagi Grimberg struct nvme_completion *cqe) 5383f2304f8SSagi Grimberg { 5391ba2e507SDaniel Wagner struct nvme_tcp_request *req; 5403f2304f8SSagi Grimberg struct request *rq; 5413f2304f8SSagi Grimberg 542e7006de6SSagi Grimberg rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); 5433f2304f8SSagi Grimberg if (!rq) { 5443f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 545e7006de6SSagi Grimberg "got bad cqe.command_id %#x on queue %d\n", 546e7006de6SSagi Grimberg cqe->command_id, nvme_tcp_queue_id(queue)); 5473f2304f8SSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 5483f2304f8SSagi Grimberg return -EINVAL; 5493f2304f8SSagi Grimberg } 5503f2304f8SSagi Grimberg 5511ba2e507SDaniel Wagner req = blk_mq_rq_to_pdu(rq); 5521ba2e507SDaniel Wagner if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) 5531ba2e507SDaniel Wagner req->status = cqe->status; 5541ba2e507SDaniel Wagner 5551ba2e507SDaniel Wagner if (!nvme_try_complete_req(rq, req->status, cqe->result)) 556ff029451SChristoph Hellwig nvme_complete_rq(rq); 5571a9460ceSSagi Grimberg queue->nr_cqe++; 5583f2304f8SSagi Grimberg 5593f2304f8SSagi Grimberg return 0; 5603f2304f8SSagi Grimberg } 5613f2304f8SSagi Grimberg 5623f2304f8SSagi Grimberg static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, 5633f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *pdu) 5643f2304f8SSagi Grimberg { 5653f2304f8SSagi Grimberg struct request *rq; 5663f2304f8SSagi Grimberg 567e7006de6SSagi Grimberg rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); 5683f2304f8SSagi Grimberg if (!rq) { 5693f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 570e7006de6SSagi Grimberg "got bad c2hdata.command_id %#x on queue %d\n", 571e7006de6SSagi Grimberg pdu->command_id, nvme_tcp_queue_id(queue)); 5723f2304f8SSagi Grimberg return -ENOENT; 5733f2304f8SSagi Grimberg } 5743f2304f8SSagi Grimberg 5753f2304f8SSagi Grimberg if (!blk_rq_payload_bytes(rq)) { 5763f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 5773f2304f8SSagi Grimberg "queue %d tag %#x unexpected data\n", 5783f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 5793f2304f8SSagi Grimberg return -EIO; 5803f2304f8SSagi Grimberg } 5813f2304f8SSagi Grimberg 5823f2304f8SSagi Grimberg queue->data_remaining = le32_to_cpu(pdu->data_length); 5833f2304f8SSagi Grimberg 584602d674cSSagi Grimberg if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && 585602d674cSSagi Grimberg unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { 586602d674cSSagi Grimberg dev_err(queue->ctrl->ctrl.device, 587602d674cSSagi Grimberg "queue %d tag %#x SUCCESS set but not last PDU\n", 588602d674cSSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 589602d674cSSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 590602d674cSSagi Grimberg return -EPROTO; 591602d674cSSagi Grimberg } 592602d674cSSagi Grimberg 5933f2304f8SSagi Grimberg return 0; 5943f2304f8SSagi Grimberg } 5953f2304f8SSagi Grimberg 5963f2304f8SSagi Grimberg static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, 5973f2304f8SSagi Grimberg struct nvme_tcp_rsp_pdu *pdu) 5983f2304f8SSagi Grimberg { 5993f2304f8SSagi Grimberg struct nvme_completion *cqe = &pdu->cqe; 6003f2304f8SSagi Grimberg int ret = 0; 6013f2304f8SSagi Grimberg 6023f2304f8SSagi Grimberg /* 6033f2304f8SSagi Grimberg * AEN requests are special as they don't time out and can 6043f2304f8SSagi Grimberg * survive any kind of queue freeze and often don't respond to 6053f2304f8SSagi Grimberg * aborts. We don't even bother to allocate a struct request 6063f2304f8SSagi Grimberg * for them but rather special case them here. 6073f2304f8SSagi Grimberg */ 60858a8df67SIsrael Rukshin if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), 60958a8df67SIsrael Rukshin cqe->command_id))) 6103f2304f8SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 6113f2304f8SSagi Grimberg &cqe->result); 6123f2304f8SSagi Grimberg else 6133f2304f8SSagi Grimberg ret = nvme_tcp_process_nvme_cqe(queue, cqe); 6143f2304f8SSagi Grimberg 6153f2304f8SSagi Grimberg return ret; 6163f2304f8SSagi Grimberg } 6173f2304f8SSagi Grimberg 618c2700d28SVarun Prakash static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) 6193f2304f8SSagi Grimberg { 6203f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *data = req->pdu; 6213f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 6223f2304f8SSagi Grimberg struct request *rq = blk_mq_rq_from_pdu(req); 623c2700d28SVarun Prakash u32 h2cdata_sent = req->pdu_len; 6243f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 6253f2304f8SSagi Grimberg u8 ddgst = nvme_tcp_ddgst_len(queue); 6263f2304f8SSagi Grimberg 6271d3ef9c3SVarun Prakash req->state = NVME_TCP_SEND_H2C_PDU; 6281d3ef9c3SVarun Prakash req->offset = 0; 629c2700d28SVarun Prakash req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); 6303f2304f8SSagi Grimberg req->pdu_sent = 0; 631c2700d28SVarun Prakash req->h2cdata_left -= req->pdu_len; 632c2700d28SVarun Prakash req->h2cdata_offset += h2cdata_sent; 6333f2304f8SSagi Grimberg 6343f2304f8SSagi Grimberg memset(data, 0, sizeof(*data)); 6353f2304f8SSagi Grimberg data->hdr.type = nvme_tcp_h2c_data; 636c2700d28SVarun Prakash if (!req->h2cdata_left) 6373f2304f8SSagi Grimberg data->hdr.flags = NVME_TCP_F_DATA_LAST; 6383f2304f8SSagi Grimberg if (queue->hdr_digest) 6393f2304f8SSagi Grimberg data->hdr.flags |= NVME_TCP_F_HDGST; 6403f2304f8SSagi Grimberg if (queue->data_digest) 6413f2304f8SSagi Grimberg data->hdr.flags |= NVME_TCP_F_DDGST; 6423f2304f8SSagi Grimberg data->hdr.hlen = sizeof(*data); 6433f2304f8SSagi Grimberg data->hdr.pdo = data->hdr.hlen + hdgst; 6443f2304f8SSagi Grimberg data->hdr.plen = 6453f2304f8SSagi Grimberg cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); 646c2700d28SVarun Prakash data->ttag = req->ttag; 647e7006de6SSagi Grimberg data->command_id = nvme_cid(rq); 648c2700d28SVarun Prakash data->data_offset = cpu_to_le32(req->h2cdata_offset); 6493f2304f8SSagi Grimberg data->data_length = cpu_to_le32(req->pdu_len); 6503f2304f8SSagi Grimberg } 6513f2304f8SSagi Grimberg 6523f2304f8SSagi Grimberg static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, 6533f2304f8SSagi Grimberg struct nvme_tcp_r2t_pdu *pdu) 6543f2304f8SSagi Grimberg { 6553f2304f8SSagi Grimberg struct nvme_tcp_request *req; 6563f2304f8SSagi Grimberg struct request *rq; 6571d3ef9c3SVarun Prakash u32 r2t_length = le32_to_cpu(pdu->r2t_length); 658c2700d28SVarun Prakash u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); 6593f2304f8SSagi Grimberg 660e7006de6SSagi Grimberg rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); 6613f2304f8SSagi Grimberg if (!rq) { 6623f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 663e7006de6SSagi Grimberg "got bad r2t.command_id %#x on queue %d\n", 664e7006de6SSagi Grimberg pdu->command_id, nvme_tcp_queue_id(queue)); 6653f2304f8SSagi Grimberg return -ENOENT; 6663f2304f8SSagi Grimberg } 6673f2304f8SSagi Grimberg req = blk_mq_rq_to_pdu(rq); 6683f2304f8SSagi Grimberg 6691d3ef9c3SVarun Prakash if (unlikely(!r2t_length)) { 6701d3ef9c3SVarun Prakash dev_err(queue->ctrl->ctrl.device, 6711d3ef9c3SVarun Prakash "req %d r2t len is %u, probably a bug...\n", 6721d3ef9c3SVarun Prakash rq->tag, r2t_length); 6731d3ef9c3SVarun Prakash return -EPROTO; 6741d3ef9c3SVarun Prakash } 6753f2304f8SSagi Grimberg 6761d3ef9c3SVarun Prakash if (unlikely(req->data_sent + r2t_length > req->data_len)) { 6771d3ef9c3SVarun Prakash dev_err(queue->ctrl->ctrl.device, 6781d3ef9c3SVarun Prakash "req %d r2t len %u exceeded data len %u (%zu sent)\n", 6791d3ef9c3SVarun Prakash rq->tag, r2t_length, req->data_len, req->data_sent); 6801d3ef9c3SVarun Prakash return -EPROTO; 6811d3ef9c3SVarun Prakash } 6823f2304f8SSagi Grimberg 683c2700d28SVarun Prakash if (unlikely(r2t_offset < req->data_sent)) { 6841d3ef9c3SVarun Prakash dev_err(queue->ctrl->ctrl.device, 6851d3ef9c3SVarun Prakash "req %d unexpected r2t offset %u (expected %zu)\n", 686c2700d28SVarun Prakash rq->tag, r2t_offset, req->data_sent); 6871d3ef9c3SVarun Prakash return -EPROTO; 6881d3ef9c3SVarun Prakash } 6891d3ef9c3SVarun Prakash 690c2700d28SVarun Prakash req->pdu_len = 0; 691c2700d28SVarun Prakash req->h2cdata_left = r2t_length; 692c2700d28SVarun Prakash req->h2cdata_offset = r2t_offset; 693c2700d28SVarun Prakash req->ttag = pdu->ttag; 694c2700d28SVarun Prakash 695c2700d28SVarun Prakash nvme_tcp_setup_h2c_data_pdu(req); 69686f0348aSSagi Grimberg nvme_tcp_queue_request(req, false, true); 6973f2304f8SSagi Grimberg 6983f2304f8SSagi Grimberg return 0; 6993f2304f8SSagi Grimberg } 7003f2304f8SSagi Grimberg 7013f2304f8SSagi Grimberg static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, 7023f2304f8SSagi Grimberg unsigned int *offset, size_t *len) 7033f2304f8SSagi Grimberg { 7043f2304f8SSagi Grimberg struct nvme_tcp_hdr *hdr; 7053f2304f8SSagi Grimberg char *pdu = queue->pdu; 7063f2304f8SSagi Grimberg size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); 7073f2304f8SSagi Grimberg int ret; 7083f2304f8SSagi Grimberg 7093f2304f8SSagi Grimberg ret = skb_copy_bits(skb, *offset, 7103f2304f8SSagi Grimberg &pdu[queue->pdu_offset], rcv_len); 7113f2304f8SSagi Grimberg if (unlikely(ret)) 7123f2304f8SSagi Grimberg return ret; 7133f2304f8SSagi Grimberg 7143f2304f8SSagi Grimberg queue->pdu_remaining -= rcv_len; 7153f2304f8SSagi Grimberg queue->pdu_offset += rcv_len; 7163f2304f8SSagi Grimberg *offset += rcv_len; 7173f2304f8SSagi Grimberg *len -= rcv_len; 7183f2304f8SSagi Grimberg if (queue->pdu_remaining) 7193f2304f8SSagi Grimberg return 0; 7203f2304f8SSagi Grimberg 7213f2304f8SSagi Grimberg hdr = queue->pdu; 7223f2304f8SSagi Grimberg if (queue->hdr_digest) { 7233f2304f8SSagi Grimberg ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); 7243f2304f8SSagi Grimberg if (unlikely(ret)) 7253f2304f8SSagi Grimberg return ret; 7263f2304f8SSagi Grimberg } 7273f2304f8SSagi Grimberg 7283f2304f8SSagi Grimberg 7293f2304f8SSagi Grimberg if (queue->data_digest) { 7303f2304f8SSagi Grimberg ret = nvme_tcp_check_ddgst(queue, queue->pdu); 7313f2304f8SSagi Grimberg if (unlikely(ret)) 7323f2304f8SSagi Grimberg return ret; 7333f2304f8SSagi Grimberg } 7343f2304f8SSagi Grimberg 7353f2304f8SSagi Grimberg switch (hdr->type) { 7363f2304f8SSagi Grimberg case nvme_tcp_c2h_data: 7376be18260SSagi Grimberg return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); 7383f2304f8SSagi Grimberg case nvme_tcp_rsp: 7393f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 7406be18260SSagi Grimberg return nvme_tcp_handle_comp(queue, (void *)queue->pdu); 7413f2304f8SSagi Grimberg case nvme_tcp_r2t: 7423f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 7436be18260SSagi Grimberg return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); 7443f2304f8SSagi Grimberg default: 7453f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 7463f2304f8SSagi Grimberg "unsupported pdu type (%d)\n", hdr->type); 7473f2304f8SSagi Grimberg return -EINVAL; 7483f2304f8SSagi Grimberg } 7493f2304f8SSagi Grimberg } 7503f2304f8SSagi Grimberg 751988aef9eSChristoph Hellwig static inline void nvme_tcp_end_request(struct request *rq, u16 status) 752602d674cSSagi Grimberg { 753602d674cSSagi Grimberg union nvme_result res = {}; 754602d674cSSagi Grimberg 7552eb81a33SChristoph Hellwig if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) 756ff029451SChristoph Hellwig nvme_complete_rq(rq); 757602d674cSSagi Grimberg } 758602d674cSSagi Grimberg 7593f2304f8SSagi Grimberg static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, 7603f2304f8SSagi Grimberg unsigned int *offset, size_t *len) 7613f2304f8SSagi Grimberg { 7623f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; 7633b01a9d0SSagi Grimberg struct request *rq = 764e7006de6SSagi Grimberg nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); 7653b01a9d0SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 7663f2304f8SSagi Grimberg 7673f2304f8SSagi Grimberg while (true) { 7683f2304f8SSagi Grimberg int recv_len, ret; 7693f2304f8SSagi Grimberg 7703f2304f8SSagi Grimberg recv_len = min_t(size_t, *len, queue->data_remaining); 7713f2304f8SSagi Grimberg if (!recv_len) 7723f2304f8SSagi Grimberg break; 7733f2304f8SSagi Grimberg 7743f2304f8SSagi Grimberg if (!iov_iter_count(&req->iter)) { 7753f2304f8SSagi Grimberg req->curr_bio = req->curr_bio->bi_next; 7763f2304f8SSagi Grimberg 7773f2304f8SSagi Grimberg /* 7783f2304f8SSagi Grimberg * If we don`t have any bios it means that controller 7793f2304f8SSagi Grimberg * sent more data than we requested, hence error 7803f2304f8SSagi Grimberg */ 7813f2304f8SSagi Grimberg if (!req->curr_bio) { 7823f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 7833f2304f8SSagi Grimberg "queue %d no space in request %#x", 7843f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 7853f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 7863f2304f8SSagi Grimberg return -EIO; 7873f2304f8SSagi Grimberg } 7883f2304f8SSagi Grimberg nvme_tcp_init_iter(req, READ); 7893f2304f8SSagi Grimberg } 7903f2304f8SSagi Grimberg 7913f2304f8SSagi Grimberg /* we can read only from what is left in this bio */ 7923f2304f8SSagi Grimberg recv_len = min_t(size_t, recv_len, 7933f2304f8SSagi Grimberg iov_iter_count(&req->iter)); 7943f2304f8SSagi Grimberg 7953f2304f8SSagi Grimberg if (queue->data_digest) 7963f2304f8SSagi Grimberg ret = skb_copy_and_hash_datagram_iter(skb, *offset, 7973f2304f8SSagi Grimberg &req->iter, recv_len, queue->rcv_hash); 7983f2304f8SSagi Grimberg else 7993f2304f8SSagi Grimberg ret = skb_copy_datagram_iter(skb, *offset, 8003f2304f8SSagi Grimberg &req->iter, recv_len); 8013f2304f8SSagi Grimberg if (ret) { 8023f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 8033f2304f8SSagi Grimberg "queue %d failed to copy request %#x data", 8043f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), rq->tag); 8053f2304f8SSagi Grimberg return ret; 8063f2304f8SSagi Grimberg } 8073f2304f8SSagi Grimberg 8083f2304f8SSagi Grimberg *len -= recv_len; 8093f2304f8SSagi Grimberg *offset += recv_len; 8103f2304f8SSagi Grimberg queue->data_remaining -= recv_len; 8113f2304f8SSagi Grimberg } 8123f2304f8SSagi Grimberg 8133f2304f8SSagi Grimberg if (!queue->data_remaining) { 8143f2304f8SSagi Grimberg if (queue->data_digest) { 8153f2304f8SSagi Grimberg nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); 8163f2304f8SSagi Grimberg queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; 8173f2304f8SSagi Grimberg } else { 8181a9460ceSSagi Grimberg if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { 8191ba2e507SDaniel Wagner nvme_tcp_end_request(rq, 8201ba2e507SDaniel Wagner le16_to_cpu(req->status)); 8211a9460ceSSagi Grimberg queue->nr_cqe++; 8221a9460ceSSagi Grimberg } 8233f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 8243f2304f8SSagi Grimberg } 8253f2304f8SSagi Grimberg } 8263f2304f8SSagi Grimberg 8273f2304f8SSagi Grimberg return 0; 8283f2304f8SSagi Grimberg } 8293f2304f8SSagi Grimberg 8303f2304f8SSagi Grimberg static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, 8313f2304f8SSagi Grimberg struct sk_buff *skb, unsigned int *offset, size_t *len) 8323f2304f8SSagi Grimberg { 833602d674cSSagi Grimberg struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; 8343f2304f8SSagi Grimberg char *ddgst = (char *)&queue->recv_ddgst; 8353f2304f8SSagi Grimberg size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); 8363f2304f8SSagi Grimberg off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; 8373f2304f8SSagi Grimberg int ret; 8383f2304f8SSagi Grimberg 8393f2304f8SSagi Grimberg ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len); 8403f2304f8SSagi Grimberg if (unlikely(ret)) 8413f2304f8SSagi Grimberg return ret; 8423f2304f8SSagi Grimberg 8433f2304f8SSagi Grimberg queue->ddgst_remaining -= recv_len; 8443f2304f8SSagi Grimberg *offset += recv_len; 8453f2304f8SSagi Grimberg *len -= recv_len; 8463f2304f8SSagi Grimberg if (queue->ddgst_remaining) 8473f2304f8SSagi Grimberg return 0; 8483f2304f8SSagi Grimberg 8493f2304f8SSagi Grimberg if (queue->recv_ddgst != queue->exp_ddgst) { 8501ba2e507SDaniel Wagner struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), 8511ba2e507SDaniel Wagner pdu->command_id); 8521ba2e507SDaniel Wagner struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 8531ba2e507SDaniel Wagner 8541ba2e507SDaniel Wagner req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); 8551ba2e507SDaniel Wagner 8563f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 8573f2304f8SSagi Grimberg "data digest error: recv %#x expected %#x\n", 8583f2304f8SSagi Grimberg le32_to_cpu(queue->recv_ddgst), 8593f2304f8SSagi Grimberg le32_to_cpu(queue->exp_ddgst)); 8603f2304f8SSagi Grimberg } 8613f2304f8SSagi Grimberg 862602d674cSSagi Grimberg if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { 863e7006de6SSagi Grimberg struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), 864602d674cSSagi Grimberg pdu->command_id); 8651ba2e507SDaniel Wagner struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 866602d674cSSagi Grimberg 8671ba2e507SDaniel Wagner nvme_tcp_end_request(rq, le16_to_cpu(req->status)); 8681a9460ceSSagi Grimberg queue->nr_cqe++; 869602d674cSSagi Grimberg } 870602d674cSSagi Grimberg 8713f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 8723f2304f8SSagi Grimberg return 0; 8733f2304f8SSagi Grimberg } 8743f2304f8SSagi Grimberg 8753f2304f8SSagi Grimberg static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, 8763f2304f8SSagi Grimberg unsigned int offset, size_t len) 8773f2304f8SSagi Grimberg { 8783f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = desc->arg.data; 8793f2304f8SSagi Grimberg size_t consumed = len; 8803f2304f8SSagi Grimberg int result; 8813f2304f8SSagi Grimberg 8823f2304f8SSagi Grimberg while (len) { 8833f2304f8SSagi Grimberg switch (nvme_tcp_recv_state(queue)) { 8843f2304f8SSagi Grimberg case NVME_TCP_RECV_PDU: 8853f2304f8SSagi Grimberg result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); 8863f2304f8SSagi Grimberg break; 8873f2304f8SSagi Grimberg case NVME_TCP_RECV_DATA: 8883f2304f8SSagi Grimberg result = nvme_tcp_recv_data(queue, skb, &offset, &len); 8893f2304f8SSagi Grimberg break; 8903f2304f8SSagi Grimberg case NVME_TCP_RECV_DDGST: 8913f2304f8SSagi Grimberg result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); 8923f2304f8SSagi Grimberg break; 8933f2304f8SSagi Grimberg default: 8943f2304f8SSagi Grimberg result = -EFAULT; 8953f2304f8SSagi Grimberg } 8963f2304f8SSagi Grimberg if (result) { 8973f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 8983f2304f8SSagi Grimberg "receive failed: %d\n", result); 8993f2304f8SSagi Grimberg queue->rd_enabled = false; 9003f2304f8SSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 9013f2304f8SSagi Grimberg return result; 9023f2304f8SSagi Grimberg } 9033f2304f8SSagi Grimberg } 9043f2304f8SSagi Grimberg 9053f2304f8SSagi Grimberg return consumed; 9063f2304f8SSagi Grimberg } 9073f2304f8SSagi Grimberg 9083f2304f8SSagi Grimberg static void nvme_tcp_data_ready(struct sock *sk) 9093f2304f8SSagi Grimberg { 9103f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 9113f2304f8SSagi Grimberg 912386e5e6eSSagi Grimberg read_lock_bh(&sk->sk_callback_lock); 9133f2304f8SSagi Grimberg queue = sk->sk_user_data; 91472e5d757SSagi Grimberg if (likely(queue && queue->rd_enabled) && 91572e5d757SSagi Grimberg !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) 9163f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 917386e5e6eSSagi Grimberg read_unlock_bh(&sk->sk_callback_lock); 9183f2304f8SSagi Grimberg } 9193f2304f8SSagi Grimberg 9203f2304f8SSagi Grimberg static void nvme_tcp_write_space(struct sock *sk) 9213f2304f8SSagi Grimberg { 9223f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 9233f2304f8SSagi Grimberg 9243f2304f8SSagi Grimberg read_lock_bh(&sk->sk_callback_lock); 9253f2304f8SSagi Grimberg queue = sk->sk_user_data; 9263f2304f8SSagi Grimberg if (likely(queue && sk_stream_is_writeable(sk))) { 9273f2304f8SSagi Grimberg clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 9283f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 9293f2304f8SSagi Grimberg } 9303f2304f8SSagi Grimberg read_unlock_bh(&sk->sk_callback_lock); 9313f2304f8SSagi Grimberg } 9323f2304f8SSagi Grimberg 9333f2304f8SSagi Grimberg static void nvme_tcp_state_change(struct sock *sk) 9343f2304f8SSagi Grimberg { 9353f2304f8SSagi Grimberg struct nvme_tcp_queue *queue; 9363f2304f8SSagi Grimberg 9378b73b45dSSagi Grimberg read_lock_bh(&sk->sk_callback_lock); 9383f2304f8SSagi Grimberg queue = sk->sk_user_data; 9393f2304f8SSagi Grimberg if (!queue) 9403f2304f8SSagi Grimberg goto done; 9413f2304f8SSagi Grimberg 9423f2304f8SSagi Grimberg switch (sk->sk_state) { 9433f2304f8SSagi Grimberg case TCP_CLOSE: 9443f2304f8SSagi Grimberg case TCP_CLOSE_WAIT: 9453f2304f8SSagi Grimberg case TCP_LAST_ACK: 9463f2304f8SSagi Grimberg case TCP_FIN_WAIT1: 9473f2304f8SSagi Grimberg case TCP_FIN_WAIT2: 9483f2304f8SSagi Grimberg nvme_tcp_error_recovery(&queue->ctrl->ctrl); 9493f2304f8SSagi Grimberg break; 9503f2304f8SSagi Grimberg default: 9513f2304f8SSagi Grimberg dev_info(queue->ctrl->ctrl.device, 9523f2304f8SSagi Grimberg "queue %d socket state %d\n", 9533f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), sk->sk_state); 9543f2304f8SSagi Grimberg } 9553f2304f8SSagi Grimberg 9563f2304f8SSagi Grimberg queue->state_change(sk); 9573f2304f8SSagi Grimberg done: 9588b73b45dSSagi Grimberg read_unlock_bh(&sk->sk_callback_lock); 9593f2304f8SSagi Grimberg } 9603f2304f8SSagi Grimberg 9613f2304f8SSagi Grimberg static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) 9623f2304f8SSagi Grimberg { 9633f2304f8SSagi Grimberg queue->request = NULL; 9643f2304f8SSagi Grimberg } 9653f2304f8SSagi Grimberg 9663f2304f8SSagi Grimberg static void nvme_tcp_fail_request(struct nvme_tcp_request *req) 9673f2304f8SSagi Grimberg { 96863573807SSagi Grimberg if (nvme_tcp_async_req(req)) { 96963573807SSagi Grimberg union nvme_result res = {}; 97063573807SSagi Grimberg 97163573807SSagi Grimberg nvme_complete_async_event(&req->queue->ctrl->ctrl, 97263573807SSagi Grimberg cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); 97363573807SSagi Grimberg } else { 97463573807SSagi Grimberg nvme_tcp_end_request(blk_mq_rq_from_pdu(req), 97563573807SSagi Grimberg NVME_SC_HOST_PATH_ERROR); 97663573807SSagi Grimberg } 9773f2304f8SSagi Grimberg } 9783f2304f8SSagi Grimberg 9793f2304f8SSagi Grimberg static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) 9803f2304f8SSagi Grimberg { 9813f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 98225e1f67eSSagi Grimberg int req_data_len = req->data_len; 983c2700d28SVarun Prakash u32 h2cdata_left = req->h2cdata_left; 9843f2304f8SSagi Grimberg 9853f2304f8SSagi Grimberg while (true) { 9863f2304f8SSagi Grimberg struct page *page = nvme_tcp_req_cur_page(req); 9873f2304f8SSagi Grimberg size_t offset = nvme_tcp_req_cur_offset(req); 9883f2304f8SSagi Grimberg size_t len = nvme_tcp_req_cur_length(req); 9893f2304f8SSagi Grimberg bool last = nvme_tcp_pdu_last_send(req, len); 99025e1f67eSSagi Grimberg int req_data_sent = req->data_sent; 9913f2304f8SSagi Grimberg int ret, flags = MSG_DONTWAIT; 9923f2304f8SSagi Grimberg 993122e5b9fSSagi Grimberg if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) 9943f2304f8SSagi Grimberg flags |= MSG_EOR; 9953f2304f8SSagi Grimberg else 9965bb052d7SSagi Grimberg flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 9973f2304f8SSagi Grimberg 9987d4194abSColy Li if (sendpage_ok(page)) { 9997d4194abSColy Li ret = kernel_sendpage(queue->sock, page, offset, len, 100037c15219SMikhail Skorzhinskii flags); 100137c15219SMikhail Skorzhinskii } else { 10027d4194abSColy Li ret = sock_no_sendpage(queue->sock, page, offset, len, 100337c15219SMikhail Skorzhinskii flags); 100437c15219SMikhail Skorzhinskii } 10053f2304f8SSagi Grimberg if (ret <= 0) 10063f2304f8SSagi Grimberg return ret; 10073f2304f8SSagi Grimberg 10083f2304f8SSagi Grimberg if (queue->data_digest) 10093f2304f8SSagi Grimberg nvme_tcp_ddgst_update(queue->snd_hash, page, 10103f2304f8SSagi Grimberg offset, ret); 10113f2304f8SSagi Grimberg 1012e371af03SSagi Grimberg /* 1013e371af03SSagi Grimberg * update the request iterator except for the last payload send 1014e371af03SSagi Grimberg * in the request where we don't want to modify it as we may 1015e371af03SSagi Grimberg * compete with the RX path completing the request. 1016e371af03SSagi Grimberg */ 101725e1f67eSSagi Grimberg if (req_data_sent + ret < req_data_len) 1018e371af03SSagi Grimberg nvme_tcp_advance_req(req, ret); 1019e371af03SSagi Grimberg 1020e371af03SSagi Grimberg /* fully successful last send in current PDU */ 10213f2304f8SSagi Grimberg if (last && ret == len) { 10223f2304f8SSagi Grimberg if (queue->data_digest) { 10233f2304f8SSagi Grimberg nvme_tcp_ddgst_final(queue->snd_hash, 10243f2304f8SSagi Grimberg &req->ddgst); 10253f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_DDGST; 10263f2304f8SSagi Grimberg req->offset = 0; 10273f2304f8SSagi Grimberg } else { 1028c2700d28SVarun Prakash if (h2cdata_left) 1029c2700d28SVarun Prakash nvme_tcp_setup_h2c_data_pdu(req); 1030c2700d28SVarun Prakash else 10313f2304f8SSagi Grimberg nvme_tcp_done_send_req(queue); 10323f2304f8SSagi Grimberg } 10333f2304f8SSagi Grimberg return 1; 10343f2304f8SSagi Grimberg } 10353f2304f8SSagi Grimberg } 10363f2304f8SSagi Grimberg return -EAGAIN; 10373f2304f8SSagi Grimberg } 10383f2304f8SSagi Grimberg 10393f2304f8SSagi Grimberg static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) 10403f2304f8SSagi Grimberg { 10413f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 10423f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 10433f2304f8SSagi Grimberg bool inline_data = nvme_tcp_has_inline_data(req); 10443f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 10453f2304f8SSagi Grimberg int len = sizeof(*pdu) + hdgst - req->offset; 10465bb052d7SSagi Grimberg int flags = MSG_DONTWAIT; 10473f2304f8SSagi Grimberg int ret; 10483f2304f8SSagi Grimberg 1049122e5b9fSSagi Grimberg if (inline_data || nvme_tcp_queue_more(queue)) 10505bb052d7SSagi Grimberg flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 10515bb052d7SSagi Grimberg else 10525bb052d7SSagi Grimberg flags |= MSG_EOR; 10535bb052d7SSagi Grimberg 10543f2304f8SSagi Grimberg if (queue->hdr_digest && !req->offset) 10553f2304f8SSagi Grimberg nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 10563f2304f8SSagi Grimberg 10573f2304f8SSagi Grimberg ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 10583f2304f8SSagi Grimberg offset_in_page(pdu) + req->offset, len, flags); 10593f2304f8SSagi Grimberg if (unlikely(ret <= 0)) 10603f2304f8SSagi Grimberg return ret; 10613f2304f8SSagi Grimberg 10623f2304f8SSagi Grimberg len -= ret; 10633f2304f8SSagi Grimberg if (!len) { 10643f2304f8SSagi Grimberg if (inline_data) { 10653f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_DATA; 10663f2304f8SSagi Grimberg if (queue->data_digest) 10673f2304f8SSagi Grimberg crypto_ahash_init(queue->snd_hash); 10683f2304f8SSagi Grimberg } else { 10693f2304f8SSagi Grimberg nvme_tcp_done_send_req(queue); 10703f2304f8SSagi Grimberg } 10713f2304f8SSagi Grimberg return 1; 10723f2304f8SSagi Grimberg } 10733f2304f8SSagi Grimberg req->offset += ret; 10743f2304f8SSagi Grimberg 10753f2304f8SSagi Grimberg return -EAGAIN; 10763f2304f8SSagi Grimberg } 10773f2304f8SSagi Grimberg 10783f2304f8SSagi Grimberg static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) 10793f2304f8SSagi Grimberg { 10803f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 10813f2304f8SSagi Grimberg struct nvme_tcp_data_pdu *pdu = req->pdu; 10823f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 10833f2304f8SSagi Grimberg int len = sizeof(*pdu) - req->offset + hdgst; 10843f2304f8SSagi Grimberg int ret; 10853f2304f8SSagi Grimberg 10863f2304f8SSagi Grimberg if (queue->hdr_digest && !req->offset) 10873f2304f8SSagi Grimberg nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 10883f2304f8SSagi Grimberg 1089c2700d28SVarun Prakash if (!req->h2cdata_left) 10903f2304f8SSagi Grimberg ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 10913f2304f8SSagi Grimberg offset_in_page(pdu) + req->offset, len, 10925bb052d7SSagi Grimberg MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 1093c2700d28SVarun Prakash else 1094c2700d28SVarun Prakash ret = sock_no_sendpage(queue->sock, virt_to_page(pdu), 1095c2700d28SVarun Prakash offset_in_page(pdu) + req->offset, len, 1096c2700d28SVarun Prakash MSG_DONTWAIT | MSG_MORE); 10973f2304f8SSagi Grimberg if (unlikely(ret <= 0)) 10983f2304f8SSagi Grimberg return ret; 10993f2304f8SSagi Grimberg 11003f2304f8SSagi Grimberg len -= ret; 11013f2304f8SSagi Grimberg if (!len) { 11023f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_DATA; 11033f2304f8SSagi Grimberg if (queue->data_digest) 11043f2304f8SSagi Grimberg crypto_ahash_init(queue->snd_hash); 11053f2304f8SSagi Grimberg return 1; 11063f2304f8SSagi Grimberg } 11073f2304f8SSagi Grimberg req->offset += ret; 11083f2304f8SSagi Grimberg 11093f2304f8SSagi Grimberg return -EAGAIN; 11103f2304f8SSagi Grimberg } 11113f2304f8SSagi Grimberg 11123f2304f8SSagi Grimberg static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) 11133f2304f8SSagi Grimberg { 11143f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 1115ce7723e9SVarun Prakash size_t offset = req->offset; 1116c2700d28SVarun Prakash u32 h2cdata_left = req->h2cdata_left; 11173f2304f8SSagi Grimberg int ret; 1118122e5b9fSSagi Grimberg struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 11193f2304f8SSagi Grimberg struct kvec iov = { 1120d89b9f3bSVarun Prakash .iov_base = (u8 *)&req->ddgst + req->offset, 11213f2304f8SSagi Grimberg .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset 11223f2304f8SSagi Grimberg }; 11233f2304f8SSagi Grimberg 1124122e5b9fSSagi Grimberg if (nvme_tcp_queue_more(queue)) 1125122e5b9fSSagi Grimberg msg.msg_flags |= MSG_MORE; 1126122e5b9fSSagi Grimberg else 1127122e5b9fSSagi Grimberg msg.msg_flags |= MSG_EOR; 1128122e5b9fSSagi Grimberg 11293f2304f8SSagi Grimberg ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 11303f2304f8SSagi Grimberg if (unlikely(ret <= 0)) 11313f2304f8SSagi Grimberg return ret; 11323f2304f8SSagi Grimberg 1133ce7723e9SVarun Prakash if (offset + ret == NVME_TCP_DIGEST_LENGTH) { 1134c2700d28SVarun Prakash if (h2cdata_left) 1135c2700d28SVarun Prakash nvme_tcp_setup_h2c_data_pdu(req); 1136c2700d28SVarun Prakash else 11373f2304f8SSagi Grimberg nvme_tcp_done_send_req(queue); 11383f2304f8SSagi Grimberg return 1; 11393f2304f8SSagi Grimberg } 11403f2304f8SSagi Grimberg 11413f2304f8SSagi Grimberg req->offset += ret; 11423f2304f8SSagi Grimberg return -EAGAIN; 11433f2304f8SSagi Grimberg } 11443f2304f8SSagi Grimberg 11453f2304f8SSagi Grimberg static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) 11463f2304f8SSagi Grimberg { 11473f2304f8SSagi Grimberg struct nvme_tcp_request *req; 11483f2304f8SSagi Grimberg int ret = 1; 11493f2304f8SSagi Grimberg 11503f2304f8SSagi Grimberg if (!queue->request) { 11513f2304f8SSagi Grimberg queue->request = nvme_tcp_fetch_request(queue); 11523f2304f8SSagi Grimberg if (!queue->request) 11533f2304f8SSagi Grimberg return 0; 11543f2304f8SSagi Grimberg } 11553f2304f8SSagi Grimberg req = queue->request; 11563f2304f8SSagi Grimberg 11573f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_CMD_PDU) { 11583f2304f8SSagi Grimberg ret = nvme_tcp_try_send_cmd_pdu(req); 11593f2304f8SSagi Grimberg if (ret <= 0) 11603f2304f8SSagi Grimberg goto done; 11613f2304f8SSagi Grimberg if (!nvme_tcp_has_inline_data(req)) 11623f2304f8SSagi Grimberg return ret; 11633f2304f8SSagi Grimberg } 11643f2304f8SSagi Grimberg 11653f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_H2C_PDU) { 11663f2304f8SSagi Grimberg ret = nvme_tcp_try_send_data_pdu(req); 11673f2304f8SSagi Grimberg if (ret <= 0) 11683f2304f8SSagi Grimberg goto done; 11693f2304f8SSagi Grimberg } 11703f2304f8SSagi Grimberg 11713f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_DATA) { 11723f2304f8SSagi Grimberg ret = nvme_tcp_try_send_data(req); 11733f2304f8SSagi Grimberg if (ret <= 0) 11743f2304f8SSagi Grimberg goto done; 11753f2304f8SSagi Grimberg } 11763f2304f8SSagi Grimberg 11773f2304f8SSagi Grimberg if (req->state == NVME_TCP_SEND_DDGST) 11783f2304f8SSagi Grimberg ret = nvme_tcp_try_send_ddgst(req); 11793f2304f8SSagi Grimberg done: 11805ff4e112SSagi Grimberg if (ret == -EAGAIN) { 11813f2304f8SSagi Grimberg ret = 0; 11825ff4e112SSagi Grimberg } else if (ret < 0) { 11835ff4e112SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 11845ff4e112SSagi Grimberg "failed to send request %d\n", ret); 11855ff4e112SSagi Grimberg nvme_tcp_fail_request(queue->request); 11865ff4e112SSagi Grimberg nvme_tcp_done_send_req(queue); 11875ff4e112SSagi Grimberg } 11883f2304f8SSagi Grimberg return ret; 11893f2304f8SSagi Grimberg } 11903f2304f8SSagi Grimberg 11913f2304f8SSagi Grimberg static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) 11923f2304f8SSagi Grimberg { 119310407ec9SPotnuri Bharat Teja struct socket *sock = queue->sock; 119410407ec9SPotnuri Bharat Teja struct sock *sk = sock->sk; 11953f2304f8SSagi Grimberg read_descriptor_t rd_desc; 11963f2304f8SSagi Grimberg int consumed; 11973f2304f8SSagi Grimberg 11983f2304f8SSagi Grimberg rd_desc.arg.data = queue; 11993f2304f8SSagi Grimberg rd_desc.count = 1; 12003f2304f8SSagi Grimberg lock_sock(sk); 12011a9460ceSSagi Grimberg queue->nr_cqe = 0; 120210407ec9SPotnuri Bharat Teja consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); 12033f2304f8SSagi Grimberg release_sock(sk); 12043f2304f8SSagi Grimberg return consumed; 12053f2304f8SSagi Grimberg } 12063f2304f8SSagi Grimberg 12073f2304f8SSagi Grimberg static void nvme_tcp_io_work(struct work_struct *w) 12083f2304f8SSagi Grimberg { 12093f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = 12103f2304f8SSagi Grimberg container_of(w, struct nvme_tcp_queue, io_work); 1211ddef2957SWunderlich, Mark unsigned long deadline = jiffies + msecs_to_jiffies(1); 12123f2304f8SSagi Grimberg 12133f2304f8SSagi Grimberg do { 12143f2304f8SSagi Grimberg bool pending = false; 12153f2304f8SSagi Grimberg int result; 12163f2304f8SSagi Grimberg 1217db5ad6b7SSagi Grimberg if (mutex_trylock(&queue->send_mutex)) { 12183f2304f8SSagi Grimberg result = nvme_tcp_try_send(queue); 1219db5ad6b7SSagi Grimberg mutex_unlock(&queue->send_mutex); 12205ff4e112SSagi Grimberg if (result > 0) 12213f2304f8SSagi Grimberg pending = true; 12225ff4e112SSagi Grimberg else if (unlikely(result < 0)) 12235ff4e112SSagi Grimberg break; 122470f437fbSKeith Busch } 12253f2304f8SSagi Grimberg 12263f2304f8SSagi Grimberg result = nvme_tcp_try_recv(queue); 12273f2304f8SSagi Grimberg if (result > 0) 12283f2304f8SSagi Grimberg pending = true; 1229761ad26cSSagi Grimberg else if (unlikely(result < 0)) 123039d06079SSagi Grimberg return; 12313f2304f8SSagi Grimberg 12323f2304f8SSagi Grimberg if (!pending) 12333f2304f8SSagi Grimberg return; 12343f2304f8SSagi Grimberg 1235ddef2957SWunderlich, Mark } while (!time_after(jiffies, deadline)); /* quota is exhausted */ 12363f2304f8SSagi Grimberg 12373f2304f8SSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 12383f2304f8SSagi Grimberg } 12393f2304f8SSagi Grimberg 12403f2304f8SSagi Grimberg static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) 12413f2304f8SSagi Grimberg { 12423f2304f8SSagi Grimberg struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 12433f2304f8SSagi Grimberg 12443f2304f8SSagi Grimberg ahash_request_free(queue->rcv_hash); 12453f2304f8SSagi Grimberg ahash_request_free(queue->snd_hash); 12463f2304f8SSagi Grimberg crypto_free_ahash(tfm); 12473f2304f8SSagi Grimberg } 12483f2304f8SSagi Grimberg 12493f2304f8SSagi Grimberg static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) 12503f2304f8SSagi Grimberg { 12513f2304f8SSagi Grimberg struct crypto_ahash *tfm; 12523f2304f8SSagi Grimberg 12533f2304f8SSagi Grimberg tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 12543f2304f8SSagi Grimberg if (IS_ERR(tfm)) 12553f2304f8SSagi Grimberg return PTR_ERR(tfm); 12563f2304f8SSagi Grimberg 12573f2304f8SSagi Grimberg queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 12583f2304f8SSagi Grimberg if (!queue->snd_hash) 12593f2304f8SSagi Grimberg goto free_tfm; 12603f2304f8SSagi Grimberg ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 12613f2304f8SSagi Grimberg 12623f2304f8SSagi Grimberg queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 12633f2304f8SSagi Grimberg if (!queue->rcv_hash) 12643f2304f8SSagi Grimberg goto free_snd_hash; 12653f2304f8SSagi Grimberg ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 12663f2304f8SSagi Grimberg 12673f2304f8SSagi Grimberg return 0; 12683f2304f8SSagi Grimberg free_snd_hash: 12693f2304f8SSagi Grimberg ahash_request_free(queue->snd_hash); 12703f2304f8SSagi Grimberg free_tfm: 12713f2304f8SSagi Grimberg crypto_free_ahash(tfm); 12723f2304f8SSagi Grimberg return -ENOMEM; 12733f2304f8SSagi Grimberg } 12743f2304f8SSagi Grimberg 12753f2304f8SSagi Grimberg static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) 12763f2304f8SSagi Grimberg { 12773f2304f8SSagi Grimberg struct nvme_tcp_request *async = &ctrl->async_req; 12783f2304f8SSagi Grimberg 12793f2304f8SSagi Grimberg page_frag_free(async->pdu); 12803f2304f8SSagi Grimberg } 12813f2304f8SSagi Grimberg 12823f2304f8SSagi Grimberg static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) 12833f2304f8SSagi Grimberg { 12843f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[0]; 12853f2304f8SSagi Grimberg struct nvme_tcp_request *async = &ctrl->async_req; 12863f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 12873f2304f8SSagi Grimberg 12883f2304f8SSagi Grimberg async->pdu = page_frag_alloc(&queue->pf_cache, 12893f2304f8SSagi Grimberg sizeof(struct nvme_tcp_cmd_pdu) + hdgst, 12903f2304f8SSagi Grimberg GFP_KERNEL | __GFP_ZERO); 12913f2304f8SSagi Grimberg if (!async->pdu) 12923f2304f8SSagi Grimberg return -ENOMEM; 12933f2304f8SSagi Grimberg 12943f2304f8SSagi Grimberg async->queue = &ctrl->queues[0]; 12953f2304f8SSagi Grimberg return 0; 12963f2304f8SSagi Grimberg } 12973f2304f8SSagi Grimberg 12983f2304f8SSagi Grimberg static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) 12993f2304f8SSagi Grimberg { 1300a5053c92SMaurizio Lombardi struct page *page; 13013f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 13023f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 13033f2304f8SSagi Grimberg 13043f2304f8SSagi Grimberg if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) 13053f2304f8SSagi Grimberg return; 13063f2304f8SSagi Grimberg 13073f2304f8SSagi Grimberg if (queue->hdr_digest || queue->data_digest) 13083f2304f8SSagi Grimberg nvme_tcp_free_crypto(queue); 13093f2304f8SSagi Grimberg 1310a5053c92SMaurizio Lombardi if (queue->pf_cache.va) { 1311a5053c92SMaurizio Lombardi page = virt_to_head_page(queue->pf_cache.va); 1312a5053c92SMaurizio Lombardi __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); 1313a5053c92SMaurizio Lombardi queue->pf_cache.va = NULL; 1314a5053c92SMaurizio Lombardi } 13153f2304f8SSagi Grimberg sock_release(queue->sock); 13163f2304f8SSagi Grimberg kfree(queue->pdu); 1317d48f92cdSKeith Busch mutex_destroy(&queue->send_mutex); 13189ebbfe49SChao Leng mutex_destroy(&queue->queue_lock); 13193f2304f8SSagi Grimberg } 13203f2304f8SSagi Grimberg 13213f2304f8SSagi Grimberg static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) 13223f2304f8SSagi Grimberg { 13233f2304f8SSagi Grimberg struct nvme_tcp_icreq_pdu *icreq; 13243f2304f8SSagi Grimberg struct nvme_tcp_icresp_pdu *icresp; 13253f2304f8SSagi Grimberg struct msghdr msg = {}; 13263f2304f8SSagi Grimberg struct kvec iov; 13273f2304f8SSagi Grimberg bool ctrl_hdgst, ctrl_ddgst; 1328c2700d28SVarun Prakash u32 maxh2cdata; 13293f2304f8SSagi Grimberg int ret; 13303f2304f8SSagi Grimberg 13313f2304f8SSagi Grimberg icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); 13323f2304f8SSagi Grimberg if (!icreq) 13333f2304f8SSagi Grimberg return -ENOMEM; 13343f2304f8SSagi Grimberg 13353f2304f8SSagi Grimberg icresp = kzalloc(sizeof(*icresp), GFP_KERNEL); 13363f2304f8SSagi Grimberg if (!icresp) { 13373f2304f8SSagi Grimberg ret = -ENOMEM; 13383f2304f8SSagi Grimberg goto free_icreq; 13393f2304f8SSagi Grimberg } 13403f2304f8SSagi Grimberg 13413f2304f8SSagi Grimberg icreq->hdr.type = nvme_tcp_icreq; 13423f2304f8SSagi Grimberg icreq->hdr.hlen = sizeof(*icreq); 13433f2304f8SSagi Grimberg icreq->hdr.pdo = 0; 13443f2304f8SSagi Grimberg icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); 13453f2304f8SSagi Grimberg icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 13463f2304f8SSagi Grimberg icreq->maxr2t = 0; /* single inflight r2t supported */ 13473f2304f8SSagi Grimberg icreq->hpda = 0; /* no alignment constraint */ 13483f2304f8SSagi Grimberg if (queue->hdr_digest) 13493f2304f8SSagi Grimberg icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 13503f2304f8SSagi Grimberg if (queue->data_digest) 13513f2304f8SSagi Grimberg icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 13523f2304f8SSagi Grimberg 13533f2304f8SSagi Grimberg iov.iov_base = icreq; 13543f2304f8SSagi Grimberg iov.iov_len = sizeof(*icreq); 13553f2304f8SSagi Grimberg ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 13563f2304f8SSagi Grimberg if (ret < 0) 13573f2304f8SSagi Grimberg goto free_icresp; 13583f2304f8SSagi Grimberg 13593f2304f8SSagi Grimberg memset(&msg, 0, sizeof(msg)); 13603f2304f8SSagi Grimberg iov.iov_base = icresp; 13613f2304f8SSagi Grimberg iov.iov_len = sizeof(*icresp); 13623f2304f8SSagi Grimberg ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 13633f2304f8SSagi Grimberg iov.iov_len, msg.msg_flags); 13643f2304f8SSagi Grimberg if (ret < 0) 13653f2304f8SSagi Grimberg goto free_icresp; 13663f2304f8SSagi Grimberg 13673f2304f8SSagi Grimberg ret = -EINVAL; 13683f2304f8SSagi Grimberg if (icresp->hdr.type != nvme_tcp_icresp) { 13693f2304f8SSagi Grimberg pr_err("queue %d: bad type returned %d\n", 13703f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->hdr.type); 13713f2304f8SSagi Grimberg goto free_icresp; 13723f2304f8SSagi Grimberg } 13733f2304f8SSagi Grimberg 13743f2304f8SSagi Grimberg if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { 13753f2304f8SSagi Grimberg pr_err("queue %d: bad pdu length returned %d\n", 13763f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->hdr.plen); 13773f2304f8SSagi Grimberg goto free_icresp; 13783f2304f8SSagi Grimberg } 13793f2304f8SSagi Grimberg 13803f2304f8SSagi Grimberg if (icresp->pfv != NVME_TCP_PFV_1_0) { 13813f2304f8SSagi Grimberg pr_err("queue %d: bad pfv returned %d\n", 13823f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->pfv); 13833f2304f8SSagi Grimberg goto free_icresp; 13843f2304f8SSagi Grimberg } 13853f2304f8SSagi Grimberg 13863f2304f8SSagi Grimberg ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); 13873f2304f8SSagi Grimberg if ((queue->data_digest && !ctrl_ddgst) || 13883f2304f8SSagi Grimberg (!queue->data_digest && ctrl_ddgst)) { 13893f2304f8SSagi Grimberg pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", 13903f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), 13913f2304f8SSagi Grimberg queue->data_digest ? "enabled" : "disabled", 13923f2304f8SSagi Grimberg ctrl_ddgst ? "enabled" : "disabled"); 13933f2304f8SSagi Grimberg goto free_icresp; 13943f2304f8SSagi Grimberg } 13953f2304f8SSagi Grimberg 13963f2304f8SSagi Grimberg ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); 13973f2304f8SSagi Grimberg if ((queue->hdr_digest && !ctrl_hdgst) || 13983f2304f8SSagi Grimberg (!queue->hdr_digest && ctrl_hdgst)) { 13993f2304f8SSagi Grimberg pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", 14003f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), 14013f2304f8SSagi Grimberg queue->hdr_digest ? "enabled" : "disabled", 14023f2304f8SSagi Grimberg ctrl_hdgst ? "enabled" : "disabled"); 14033f2304f8SSagi Grimberg goto free_icresp; 14043f2304f8SSagi Grimberg } 14053f2304f8SSagi Grimberg 14063f2304f8SSagi Grimberg if (icresp->cpda != 0) { 14073f2304f8SSagi Grimberg pr_err("queue %d: unsupported cpda returned %d\n", 14083f2304f8SSagi Grimberg nvme_tcp_queue_id(queue), icresp->cpda); 14093f2304f8SSagi Grimberg goto free_icresp; 14103f2304f8SSagi Grimberg } 14113f2304f8SSagi Grimberg 1412c2700d28SVarun Prakash maxh2cdata = le32_to_cpu(icresp->maxdata); 1413c2700d28SVarun Prakash if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { 1414c2700d28SVarun Prakash pr_err("queue %d: invalid maxh2cdata returned %u\n", 1415c2700d28SVarun Prakash nvme_tcp_queue_id(queue), maxh2cdata); 1416c2700d28SVarun Prakash goto free_icresp; 1417c2700d28SVarun Prakash } 1418c2700d28SVarun Prakash queue->maxh2cdata = maxh2cdata; 1419c2700d28SVarun Prakash 14203f2304f8SSagi Grimberg ret = 0; 14213f2304f8SSagi Grimberg free_icresp: 14223f2304f8SSagi Grimberg kfree(icresp); 14233f2304f8SSagi Grimberg free_icreq: 14243f2304f8SSagi Grimberg kfree(icreq); 14253f2304f8SSagi Grimberg return ret; 14263f2304f8SSagi Grimberg } 14273f2304f8SSagi Grimberg 142840510a63SSagi Grimberg static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) 142940510a63SSagi Grimberg { 143040510a63SSagi Grimberg return nvme_tcp_queue_id(queue) == 0; 143140510a63SSagi Grimberg } 143240510a63SSagi Grimberg 143340510a63SSagi Grimberg static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) 143440510a63SSagi Grimberg { 143540510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 143640510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 143740510a63SSagi Grimberg 143840510a63SSagi Grimberg return !nvme_tcp_admin_queue(queue) && 143940510a63SSagi Grimberg qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; 144040510a63SSagi Grimberg } 144140510a63SSagi Grimberg 144240510a63SSagi Grimberg static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) 144340510a63SSagi Grimberg { 144440510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 144540510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 144640510a63SSagi Grimberg 144740510a63SSagi Grimberg return !nvme_tcp_admin_queue(queue) && 144840510a63SSagi Grimberg !nvme_tcp_default_queue(queue) && 144940510a63SSagi Grimberg qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + 145040510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ]; 145140510a63SSagi Grimberg } 145240510a63SSagi Grimberg 145340510a63SSagi Grimberg static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) 145440510a63SSagi Grimberg { 145540510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 145640510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 145740510a63SSagi Grimberg 145840510a63SSagi Grimberg return !nvme_tcp_admin_queue(queue) && 145940510a63SSagi Grimberg !nvme_tcp_default_queue(queue) && 146040510a63SSagi Grimberg !nvme_tcp_read_queue(queue) && 146140510a63SSagi Grimberg qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + 146240510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ] + 146340510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL]; 146440510a63SSagi Grimberg } 146540510a63SSagi Grimberg 146640510a63SSagi Grimberg static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) 146740510a63SSagi Grimberg { 146840510a63SSagi Grimberg struct nvme_tcp_ctrl *ctrl = queue->ctrl; 146940510a63SSagi Grimberg int qid = nvme_tcp_queue_id(queue); 147040510a63SSagi Grimberg int n = 0; 147140510a63SSagi Grimberg 147240510a63SSagi Grimberg if (nvme_tcp_default_queue(queue)) 147340510a63SSagi Grimberg n = qid - 1; 147440510a63SSagi Grimberg else if (nvme_tcp_read_queue(queue)) 147540510a63SSagi Grimberg n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; 147640510a63SSagi Grimberg else if (nvme_tcp_poll_queue(queue)) 147740510a63SSagi Grimberg n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 147840510a63SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ] - 1; 147940510a63SSagi Grimberg queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); 148040510a63SSagi Grimberg } 148140510a63SSagi Grimberg 14823f2304f8SSagi Grimberg static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, 14833f2304f8SSagi Grimberg int qid, size_t queue_size) 14843f2304f8SSagi Grimberg { 14853f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 14863f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 14876ebf71baSChristoph Hellwig int ret, rcv_pdu_size; 14883f2304f8SSagi Grimberg 14899ebbfe49SChao Leng mutex_init(&queue->queue_lock); 14903f2304f8SSagi Grimberg queue->ctrl = ctrl; 149115ec928aSSagi Grimberg init_llist_head(&queue->req_list); 14923f2304f8SSagi Grimberg INIT_LIST_HEAD(&queue->send_list); 1493db5ad6b7SSagi Grimberg mutex_init(&queue->send_mutex); 14943f2304f8SSagi Grimberg INIT_WORK(&queue->io_work, nvme_tcp_io_work); 14953f2304f8SSagi Grimberg queue->queue_size = queue_size; 14963f2304f8SSagi Grimberg 14973f2304f8SSagi Grimberg if (qid > 0) 14989924b030SIsrael Rukshin queue->cmnd_capsule_len = nctrl->ioccsz * 16; 14993f2304f8SSagi Grimberg else 15003f2304f8SSagi Grimberg queue->cmnd_capsule_len = sizeof(struct nvme_command) + 15013f2304f8SSagi Grimberg NVME_TCP_ADMIN_CCSZ; 15023f2304f8SSagi Grimberg 15033f2304f8SSagi Grimberg ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, 15043f2304f8SSagi Grimberg IPPROTO_TCP, &queue->sock); 15053f2304f8SSagi Grimberg if (ret) { 15069924b030SIsrael Rukshin dev_err(nctrl->device, 15073f2304f8SSagi Grimberg "failed to create socket: %d\n", ret); 15089ebbfe49SChao Leng goto err_destroy_mutex; 15093f2304f8SSagi Grimberg } 15103f2304f8SSagi Grimberg 1511841aee4dSChris Leech nvme_tcp_reclassify_socket(queue->sock); 1512841aee4dSChris Leech 15133f2304f8SSagi Grimberg /* Single syn retry */ 1514557eadfcSChristoph Hellwig tcp_sock_set_syncnt(queue->sock->sk, 1); 15153f2304f8SSagi Grimberg 15163f2304f8SSagi Grimberg /* Set TCP no delay */ 151712abc5eeSChristoph Hellwig tcp_sock_set_nodelay(queue->sock->sk); 15183f2304f8SSagi Grimberg 15193f2304f8SSagi Grimberg /* 15203f2304f8SSagi Grimberg * Cleanup whatever is sitting in the TCP transmit queue on socket 15213f2304f8SSagi Grimberg * close. This is done to prevent stale data from being sent should 15223f2304f8SSagi Grimberg * the network connection be restored before TCP times out. 15233f2304f8SSagi Grimberg */ 1524c433594cSChristoph Hellwig sock_no_linger(queue->sock->sk); 15253f2304f8SSagi Grimberg 15266e434967SChristoph Hellwig if (so_priority > 0) 15276e434967SChristoph Hellwig sock_set_priority(queue->sock->sk, so_priority); 15289912ade3SWunderlich, Mark 1529bb13985dSIsrael Rukshin /* Set socket type of service */ 15306ebf71baSChristoph Hellwig if (nctrl->opts->tos >= 0) 15316ebf71baSChristoph Hellwig ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); 1532bb13985dSIsrael Rukshin 1533adc99fd3SSagi Grimberg /* Set 10 seconds timeout for icresp recvmsg */ 1534adc99fd3SSagi Grimberg queue->sock->sk->sk_rcvtimeo = 10 * HZ; 1535adc99fd3SSagi Grimberg 15363f2304f8SSagi Grimberg queue->sock->sk->sk_allocation = GFP_ATOMIC; 153740510a63SSagi Grimberg nvme_tcp_set_queue_io_cpu(queue); 15383f2304f8SSagi Grimberg queue->request = NULL; 15393f2304f8SSagi Grimberg queue->data_remaining = 0; 15403f2304f8SSagi Grimberg queue->ddgst_remaining = 0; 15413f2304f8SSagi Grimberg queue->pdu_remaining = 0; 15423f2304f8SSagi Grimberg queue->pdu_offset = 0; 15433f2304f8SSagi Grimberg sk_set_memalloc(queue->sock->sk); 15443f2304f8SSagi Grimberg 15459924b030SIsrael Rukshin if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { 15463f2304f8SSagi Grimberg ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, 15473f2304f8SSagi Grimberg sizeof(ctrl->src_addr)); 15483f2304f8SSagi Grimberg if (ret) { 15499924b030SIsrael Rukshin dev_err(nctrl->device, 15503f2304f8SSagi Grimberg "failed to bind queue %d socket %d\n", 15513f2304f8SSagi Grimberg qid, ret); 15523f2304f8SSagi Grimberg goto err_sock; 15533f2304f8SSagi Grimberg } 15543f2304f8SSagi Grimberg } 15553f2304f8SSagi Grimberg 15563ede8f72SMartin Belanger if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { 15573ede8f72SMartin Belanger char *iface = nctrl->opts->host_iface; 15583ede8f72SMartin Belanger sockptr_t optval = KERNEL_SOCKPTR(iface); 15593ede8f72SMartin Belanger 15603ede8f72SMartin Belanger ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, 15613ede8f72SMartin Belanger optval, strlen(iface)); 15623ede8f72SMartin Belanger if (ret) { 15633ede8f72SMartin Belanger dev_err(nctrl->device, 15643ede8f72SMartin Belanger "failed to bind to interface %s queue %d err %d\n", 15653ede8f72SMartin Belanger iface, qid, ret); 15663ede8f72SMartin Belanger goto err_sock; 15673ede8f72SMartin Belanger } 15683ede8f72SMartin Belanger } 15693ede8f72SMartin Belanger 15703f2304f8SSagi Grimberg queue->hdr_digest = nctrl->opts->hdr_digest; 15713f2304f8SSagi Grimberg queue->data_digest = nctrl->opts->data_digest; 15723f2304f8SSagi Grimberg if (queue->hdr_digest || queue->data_digest) { 15733f2304f8SSagi Grimberg ret = nvme_tcp_alloc_crypto(queue); 15743f2304f8SSagi Grimberg if (ret) { 15759924b030SIsrael Rukshin dev_err(nctrl->device, 15763f2304f8SSagi Grimberg "failed to allocate queue %d crypto\n", qid); 15773f2304f8SSagi Grimberg goto err_sock; 15783f2304f8SSagi Grimberg } 15793f2304f8SSagi Grimberg } 15803f2304f8SSagi Grimberg 15813f2304f8SSagi Grimberg rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + 15823f2304f8SSagi Grimberg nvme_tcp_hdgst_len(queue); 15833f2304f8SSagi Grimberg queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); 15843f2304f8SSagi Grimberg if (!queue->pdu) { 15853f2304f8SSagi Grimberg ret = -ENOMEM; 15863f2304f8SSagi Grimberg goto err_crypto; 15873f2304f8SSagi Grimberg } 15883f2304f8SSagi Grimberg 15899924b030SIsrael Rukshin dev_dbg(nctrl->device, "connecting queue %d\n", 15903f2304f8SSagi Grimberg nvme_tcp_queue_id(queue)); 15913f2304f8SSagi Grimberg 15923f2304f8SSagi Grimberg ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, 15933f2304f8SSagi Grimberg sizeof(ctrl->addr), 0); 15943f2304f8SSagi Grimberg if (ret) { 15959924b030SIsrael Rukshin dev_err(nctrl->device, 15963f2304f8SSagi Grimberg "failed to connect socket: %d\n", ret); 15973f2304f8SSagi Grimberg goto err_rcv_pdu; 15983f2304f8SSagi Grimberg } 15993f2304f8SSagi Grimberg 16003f2304f8SSagi Grimberg ret = nvme_tcp_init_connection(queue); 16013f2304f8SSagi Grimberg if (ret) 16023f2304f8SSagi Grimberg goto err_init_connect; 16033f2304f8SSagi Grimberg 16043f2304f8SSagi Grimberg queue->rd_enabled = true; 16053f2304f8SSagi Grimberg set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); 16063f2304f8SSagi Grimberg nvme_tcp_init_recv_ctx(queue); 16073f2304f8SSagi Grimberg 16083f2304f8SSagi Grimberg write_lock_bh(&queue->sock->sk->sk_callback_lock); 16093f2304f8SSagi Grimberg queue->sock->sk->sk_user_data = queue; 16103f2304f8SSagi Grimberg queue->state_change = queue->sock->sk->sk_state_change; 16113f2304f8SSagi Grimberg queue->data_ready = queue->sock->sk->sk_data_ready; 16123f2304f8SSagi Grimberg queue->write_space = queue->sock->sk->sk_write_space; 16133f2304f8SSagi Grimberg queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; 16143f2304f8SSagi Grimberg queue->sock->sk->sk_state_change = nvme_tcp_state_change; 16153f2304f8SSagi Grimberg queue->sock->sk->sk_write_space = nvme_tcp_write_space; 1616ac1c4e18SSebastian Andrzej Siewior #ifdef CONFIG_NET_RX_BUSY_POLL 16171a9460ceSSagi Grimberg queue->sock->sk->sk_ll_usec = 1; 1618ac1c4e18SSebastian Andrzej Siewior #endif 16193f2304f8SSagi Grimberg write_unlock_bh(&queue->sock->sk->sk_callback_lock); 16203f2304f8SSagi Grimberg 16213f2304f8SSagi Grimberg return 0; 16223f2304f8SSagi Grimberg 16233f2304f8SSagi Grimberg err_init_connect: 16243f2304f8SSagi Grimberg kernel_sock_shutdown(queue->sock, SHUT_RDWR); 16253f2304f8SSagi Grimberg err_rcv_pdu: 16263f2304f8SSagi Grimberg kfree(queue->pdu); 16273f2304f8SSagi Grimberg err_crypto: 16283f2304f8SSagi Grimberg if (queue->hdr_digest || queue->data_digest) 16293f2304f8SSagi Grimberg nvme_tcp_free_crypto(queue); 16303f2304f8SSagi Grimberg err_sock: 16313f2304f8SSagi Grimberg sock_release(queue->sock); 16323f2304f8SSagi Grimberg queue->sock = NULL; 16339ebbfe49SChao Leng err_destroy_mutex: 1634d48f92cdSKeith Busch mutex_destroy(&queue->send_mutex); 16359ebbfe49SChao Leng mutex_destroy(&queue->queue_lock); 16363f2304f8SSagi Grimberg return ret; 16373f2304f8SSagi Grimberg } 16383f2304f8SSagi Grimberg 16393f2304f8SSagi Grimberg static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) 16403f2304f8SSagi Grimberg { 16413f2304f8SSagi Grimberg struct socket *sock = queue->sock; 16423f2304f8SSagi Grimberg 16433f2304f8SSagi Grimberg write_lock_bh(&sock->sk->sk_callback_lock); 16443f2304f8SSagi Grimberg sock->sk->sk_user_data = NULL; 16453f2304f8SSagi Grimberg sock->sk->sk_data_ready = queue->data_ready; 16463f2304f8SSagi Grimberg sock->sk->sk_state_change = queue->state_change; 16473f2304f8SSagi Grimberg sock->sk->sk_write_space = queue->write_space; 16483f2304f8SSagi Grimberg write_unlock_bh(&sock->sk->sk_callback_lock); 16493f2304f8SSagi Grimberg } 16503f2304f8SSagi Grimberg 16513f2304f8SSagi Grimberg static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) 16523f2304f8SSagi Grimberg { 16533f2304f8SSagi Grimberg kernel_sock_shutdown(queue->sock, SHUT_RDWR); 16543f2304f8SSagi Grimberg nvme_tcp_restore_sock_calls(queue); 16553f2304f8SSagi Grimberg cancel_work_sync(&queue->io_work); 16563f2304f8SSagi Grimberg } 16573f2304f8SSagi Grimberg 16583f2304f8SSagi Grimberg static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) 16593f2304f8SSagi Grimberg { 16603f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 16613f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 16623f2304f8SSagi Grimberg 1663*2bff487fSMaurizio Lombardi if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) 1664*2bff487fSMaurizio Lombardi return; 1665*2bff487fSMaurizio Lombardi 16669ebbfe49SChao Leng mutex_lock(&queue->queue_lock); 16679ebbfe49SChao Leng if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) 16683f2304f8SSagi Grimberg __nvme_tcp_stop_queue(queue); 16699ebbfe49SChao Leng mutex_unlock(&queue->queue_lock); 16703f2304f8SSagi Grimberg } 16713f2304f8SSagi Grimberg 16723f2304f8SSagi Grimberg static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) 16733f2304f8SSagi Grimberg { 16743f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 16753f2304f8SSagi Grimberg int ret; 16763f2304f8SSagi Grimberg 16773f2304f8SSagi Grimberg if (idx) 1678be42a33bSKeith Busch ret = nvmf_connect_io_queue(nctrl, idx); 16793f2304f8SSagi Grimberg else 16803f2304f8SSagi Grimberg ret = nvmf_connect_admin_queue(nctrl); 16813f2304f8SSagi Grimberg 16823f2304f8SSagi Grimberg if (!ret) { 16833f2304f8SSagi Grimberg set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); 16843f2304f8SSagi Grimberg } else { 1685f34e2589SSagi Grimberg if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) 16863f2304f8SSagi Grimberg __nvme_tcp_stop_queue(&ctrl->queues[idx]); 16873f2304f8SSagi Grimberg dev_err(nctrl->device, 16883f2304f8SSagi Grimberg "failed to connect queue: %d ret=%d\n", idx, ret); 16893f2304f8SSagi Grimberg } 16903f2304f8SSagi Grimberg return ret; 16913f2304f8SSagi Grimberg } 16923f2304f8SSagi Grimberg 16932f7a7e5dSChristoph Hellwig static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl) 16943f2304f8SSagi Grimberg { 16953f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 16962f7a7e5dSChristoph Hellwig struct blk_mq_tag_set *set = &ctrl->admin_tag_set; 16973f2304f8SSagi Grimberg int ret; 16983f2304f8SSagi Grimberg 16993f2304f8SSagi Grimberg memset(set, 0, sizeof(*set)); 17003f2304f8SSagi Grimberg set->ops = &nvme_tcp_admin_mq_ops; 17013f2304f8SSagi Grimberg set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1702ed01fee2SChristoph Hellwig set->reserved_tags = NVMF_RESERVED_TAGS; 1703610c8235SMax Gurtovoy set->numa_node = nctrl->numa_node; 1704db5ad6b7SSagi Grimberg set->flags = BLK_MQ_F_BLOCKING; 17053f2304f8SSagi Grimberg set->cmd_size = sizeof(struct nvme_tcp_request); 17063f2304f8SSagi Grimberg set->driver_data = ctrl; 17073f2304f8SSagi Grimberg set->nr_hw_queues = 1; 1708dc96f938SChaitanya Kulkarni set->timeout = NVME_ADMIN_TIMEOUT; 17092f7a7e5dSChristoph Hellwig ret = blk_mq_alloc_tag_set(set); 17102f7a7e5dSChristoph Hellwig if (!ret) 17112f7a7e5dSChristoph Hellwig nctrl->admin_tagset = set; 17122f7a7e5dSChristoph Hellwig return ret; 17132f7a7e5dSChristoph Hellwig } 17142f7a7e5dSChristoph Hellwig 17152f7a7e5dSChristoph Hellwig static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl) 17162f7a7e5dSChristoph Hellwig { 17172f7a7e5dSChristoph Hellwig struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 17182f7a7e5dSChristoph Hellwig struct blk_mq_tag_set *set = &ctrl->tag_set; 17192f7a7e5dSChristoph Hellwig int ret; 17202f7a7e5dSChristoph Hellwig 17213f2304f8SSagi Grimberg memset(set, 0, sizeof(*set)); 17223f2304f8SSagi Grimberg set->ops = &nvme_tcp_mq_ops; 17233f2304f8SSagi Grimberg set->queue_depth = nctrl->sqsize + 1; 1724ed01fee2SChristoph Hellwig set->reserved_tags = NVMF_RESERVED_TAGS; 1725610c8235SMax Gurtovoy set->numa_node = nctrl->numa_node; 1726db5ad6b7SSagi Grimberg set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 17273f2304f8SSagi Grimberg set->cmd_size = sizeof(struct nvme_tcp_request); 17283f2304f8SSagi Grimberg set->driver_data = ctrl; 17293f2304f8SSagi Grimberg set->nr_hw_queues = nctrl->queue_count - 1; 17303f2304f8SSagi Grimberg set->timeout = NVME_IO_TIMEOUT; 17311a9460ceSSagi Grimberg set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 17323f2304f8SSagi Grimberg ret = blk_mq_alloc_tag_set(set); 17332f7a7e5dSChristoph Hellwig if (!ret) 17342f7a7e5dSChristoph Hellwig nctrl->tagset = set; 17352f7a7e5dSChristoph Hellwig return ret; 17363f2304f8SSagi Grimberg } 17373f2304f8SSagi Grimberg 17383f2304f8SSagi Grimberg static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) 17393f2304f8SSagi Grimberg { 17403f2304f8SSagi Grimberg if (to_tcp_ctrl(ctrl)->async_req.pdu) { 1741ceb1e087SDavid Milburn cancel_work_sync(&ctrl->async_event_work); 17423f2304f8SSagi Grimberg nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); 17433f2304f8SSagi Grimberg to_tcp_ctrl(ctrl)->async_req.pdu = NULL; 17443f2304f8SSagi Grimberg } 17453f2304f8SSagi Grimberg 17463f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, 0); 17473f2304f8SSagi Grimberg } 17483f2304f8SSagi Grimberg 17493f2304f8SSagi Grimberg static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) 17503f2304f8SSagi Grimberg { 17513f2304f8SSagi Grimberg int i; 17523f2304f8SSagi Grimberg 17533f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) 17543f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, i); 17553f2304f8SSagi Grimberg } 17563f2304f8SSagi Grimberg 17573f2304f8SSagi Grimberg static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) 17583f2304f8SSagi Grimberg { 17593f2304f8SSagi Grimberg int i; 17603f2304f8SSagi Grimberg 17613f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) 17623f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, i); 17633f2304f8SSagi Grimberg } 17643f2304f8SSagi Grimberg 17653f2304f8SSagi Grimberg static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) 17663f2304f8SSagi Grimberg { 1767462b8b2dSChaitanya Kulkarni int i, ret; 17683f2304f8SSagi Grimberg 17693f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) { 17703f2304f8SSagi Grimberg ret = nvme_tcp_start_queue(ctrl, i); 17713f2304f8SSagi Grimberg if (ret) 17723f2304f8SSagi Grimberg goto out_stop_queues; 17733f2304f8SSagi Grimberg } 17743f2304f8SSagi Grimberg 17753f2304f8SSagi Grimberg return 0; 17763f2304f8SSagi Grimberg 17773f2304f8SSagi Grimberg out_stop_queues: 17783f2304f8SSagi Grimberg for (i--; i >= 1; i--) 17793f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, i); 17803f2304f8SSagi Grimberg return ret; 17813f2304f8SSagi Grimberg } 17823f2304f8SSagi Grimberg 17833f2304f8SSagi Grimberg static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) 17843f2304f8SSagi Grimberg { 17853f2304f8SSagi Grimberg int ret; 17863f2304f8SSagi Grimberg 17873f2304f8SSagi Grimberg ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); 17883f2304f8SSagi Grimberg if (ret) 17893f2304f8SSagi Grimberg return ret; 17903f2304f8SSagi Grimberg 17913f2304f8SSagi Grimberg ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); 17923f2304f8SSagi Grimberg if (ret) 17933f2304f8SSagi Grimberg goto out_free_queue; 17943f2304f8SSagi Grimberg 17953f2304f8SSagi Grimberg return 0; 17963f2304f8SSagi Grimberg 17973f2304f8SSagi Grimberg out_free_queue: 17983f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, 0); 17993f2304f8SSagi Grimberg return ret; 18003f2304f8SSagi Grimberg } 18013f2304f8SSagi Grimberg 1802efb973b1SSagi Grimberg static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 18033f2304f8SSagi Grimberg { 18043f2304f8SSagi Grimberg int i, ret; 18053f2304f8SSagi Grimberg 18063f2304f8SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) { 1807a387935cSChaitanya Kulkarni ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1); 18083f2304f8SSagi Grimberg if (ret) 18093f2304f8SSagi Grimberg goto out_free_queues; 18103f2304f8SSagi Grimberg } 18113f2304f8SSagi Grimberg 18123f2304f8SSagi Grimberg return 0; 18133f2304f8SSagi Grimberg 18143f2304f8SSagi Grimberg out_free_queues: 18153f2304f8SSagi Grimberg for (i--; i >= 1; i--) 18163f2304f8SSagi Grimberg nvme_tcp_free_queue(ctrl, i); 18173f2304f8SSagi Grimberg 18183f2304f8SSagi Grimberg return ret; 18193f2304f8SSagi Grimberg } 18203f2304f8SSagi Grimberg 18213f2304f8SSagi Grimberg static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) 18223f2304f8SSagi Grimberg { 1823873946f4SSagi Grimberg unsigned int nr_io_queues; 1824873946f4SSagi Grimberg 1825873946f4SSagi Grimberg nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); 1826873946f4SSagi Grimberg nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); 18271a9460ceSSagi Grimberg nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); 1828873946f4SSagi Grimberg 1829873946f4SSagi Grimberg return nr_io_queues; 18303f2304f8SSagi Grimberg } 18313f2304f8SSagi Grimberg 183264861993SSagi Grimberg static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl, 183364861993SSagi Grimberg unsigned int nr_io_queues) 183464861993SSagi Grimberg { 183564861993SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 183664861993SSagi Grimberg struct nvmf_ctrl_options *opts = nctrl->opts; 183764861993SSagi Grimberg 183864861993SSagi Grimberg if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { 183964861993SSagi Grimberg /* 184064861993SSagi Grimberg * separate read/write queues 184164861993SSagi Grimberg * hand out dedicated default queues only after we have 184264861993SSagi Grimberg * sufficient read queues. 184364861993SSagi Grimberg */ 184464861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; 184564861993SSagi Grimberg nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; 184664861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT] = 184764861993SSagi Grimberg min(opts->nr_write_queues, nr_io_queues); 184864861993SSagi Grimberg nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 184964861993SSagi Grimberg } else { 185064861993SSagi Grimberg /* 185164861993SSagi Grimberg * shared read/write queues 185264861993SSagi Grimberg * either no write queues were requested, or we don't have 185364861993SSagi Grimberg * sufficient queue count to have dedicated default queues. 185464861993SSagi Grimberg */ 185564861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT] = 185664861993SSagi Grimberg min(opts->nr_io_queues, nr_io_queues); 185764861993SSagi Grimberg nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 185864861993SSagi Grimberg } 18591a9460ceSSagi Grimberg 18601a9460ceSSagi Grimberg if (opts->nr_poll_queues && nr_io_queues) { 18611a9460ceSSagi Grimberg /* map dedicated poll queues only if we have queues left */ 18621a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL] = 18631a9460ceSSagi Grimberg min(opts->nr_poll_queues, nr_io_queues); 18641a9460ceSSagi Grimberg } 186564861993SSagi Grimberg } 186664861993SSagi Grimberg 1867efb973b1SSagi Grimberg static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 18683f2304f8SSagi Grimberg { 18693f2304f8SSagi Grimberg unsigned int nr_io_queues; 18703f2304f8SSagi Grimberg int ret; 18713f2304f8SSagi Grimberg 18723f2304f8SSagi Grimberg nr_io_queues = nvme_tcp_nr_io_queues(ctrl); 18733f2304f8SSagi Grimberg ret = nvme_set_queue_count(ctrl, &nr_io_queues); 18743f2304f8SSagi Grimberg if (ret) 18753f2304f8SSagi Grimberg return ret; 18763f2304f8SSagi Grimberg 1877664227fdSRuozhu Li if (nr_io_queues == 0) { 187872f57242SSagi Grimberg dev_err(ctrl->device, 187972f57242SSagi Grimberg "unable to set any I/O queues\n"); 188072f57242SSagi Grimberg return -ENOMEM; 188172f57242SSagi Grimberg } 18823f2304f8SSagi Grimberg 1883664227fdSRuozhu Li ctrl->queue_count = nr_io_queues + 1; 18843f2304f8SSagi Grimberg dev_info(ctrl->device, 18853f2304f8SSagi Grimberg "creating %d I/O queues.\n", nr_io_queues); 18863f2304f8SSagi Grimberg 188764861993SSagi Grimberg nvme_tcp_set_io_queues(ctrl, nr_io_queues); 188864861993SSagi Grimberg 1889efb973b1SSagi Grimberg return __nvme_tcp_alloc_io_queues(ctrl); 18903f2304f8SSagi Grimberg } 18913f2304f8SSagi Grimberg 18923f2304f8SSagi Grimberg static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) 18933f2304f8SSagi Grimberg { 18943f2304f8SSagi Grimberg nvme_tcp_stop_io_queues(ctrl); 18953f2304f8SSagi Grimberg if (remove) { 18966f8191fdSChristoph Hellwig blk_mq_destroy_queue(ctrl->connect_q); 18973f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->tagset); 18983f2304f8SSagi Grimberg } 18993f2304f8SSagi Grimberg nvme_tcp_free_io_queues(ctrl); 19003f2304f8SSagi Grimberg } 19013f2304f8SSagi Grimberg 19023f2304f8SSagi Grimberg static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) 19033f2304f8SSagi Grimberg { 19043f2304f8SSagi Grimberg int ret; 19053f2304f8SSagi Grimberg 1906efb973b1SSagi Grimberg ret = nvme_tcp_alloc_io_queues(ctrl); 19073f2304f8SSagi Grimberg if (ret) 19083f2304f8SSagi Grimberg return ret; 19093f2304f8SSagi Grimberg 19103f2304f8SSagi Grimberg if (new) { 19112f7a7e5dSChristoph Hellwig ret = nvme_tcp_alloc_tag_set(ctrl); 19122f7a7e5dSChristoph Hellwig if (ret) 19133f2304f8SSagi Grimberg goto out_free_io_queues; 19143f2304f8SSagi Grimberg 191572e8b5cdSChaitanya Kulkarni ret = nvme_ctrl_init_connect_q(ctrl); 191672e8b5cdSChaitanya Kulkarni if (ret) 19173f2304f8SSagi Grimberg goto out_free_tag_set; 19183f2304f8SSagi Grimberg } 19193f2304f8SSagi Grimberg 19203f2304f8SSagi Grimberg ret = nvme_tcp_start_io_queues(ctrl); 19213f2304f8SSagi Grimberg if (ret) 19223f2304f8SSagi Grimberg goto out_cleanup_connect_q; 19233f2304f8SSagi Grimberg 19242875b0aeSSagi Grimberg if (!new) { 19252875b0aeSSagi Grimberg nvme_start_queues(ctrl); 1926e5c01f4fSSagi Grimberg if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { 1927e5c01f4fSSagi Grimberg /* 1928e5c01f4fSSagi Grimberg * If we timed out waiting for freeze we are likely to 1929e5c01f4fSSagi Grimberg * be stuck. Fail the controller initialization just 1930e5c01f4fSSagi Grimberg * to be safe. 1931e5c01f4fSSagi Grimberg */ 1932e5c01f4fSSagi Grimberg ret = -ENODEV; 1933e5c01f4fSSagi Grimberg goto out_wait_freeze_timed_out; 1934e5c01f4fSSagi Grimberg } 19352875b0aeSSagi Grimberg blk_mq_update_nr_hw_queues(ctrl->tagset, 19362875b0aeSSagi Grimberg ctrl->queue_count - 1); 19372875b0aeSSagi Grimberg nvme_unfreeze(ctrl); 19382875b0aeSSagi Grimberg } 19392875b0aeSSagi Grimberg 19403f2304f8SSagi Grimberg return 0; 19413f2304f8SSagi Grimberg 1942e5c01f4fSSagi Grimberg out_wait_freeze_timed_out: 1943e5c01f4fSSagi Grimberg nvme_stop_queues(ctrl); 194470a99574SChao Leng nvme_sync_io_queues(ctrl); 1945e5c01f4fSSagi Grimberg nvme_tcp_stop_io_queues(ctrl); 19463f2304f8SSagi Grimberg out_cleanup_connect_q: 194770a99574SChao Leng nvme_cancel_tagset(ctrl); 1948e85037a2SSagi Grimberg if (new) 19496f8191fdSChristoph Hellwig blk_mq_destroy_queue(ctrl->connect_q); 19503f2304f8SSagi Grimberg out_free_tag_set: 19513f2304f8SSagi Grimberg if (new) 19523f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->tagset); 19533f2304f8SSagi Grimberg out_free_io_queues: 19543f2304f8SSagi Grimberg nvme_tcp_free_io_queues(ctrl); 19553f2304f8SSagi Grimberg return ret; 19563f2304f8SSagi Grimberg } 19573f2304f8SSagi Grimberg 19583f2304f8SSagi Grimberg static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) 19593f2304f8SSagi Grimberg { 19603f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 19613f2304f8SSagi Grimberg if (remove) { 19626f8191fdSChristoph Hellwig blk_mq_destroy_queue(ctrl->admin_q); 19636f8191fdSChristoph Hellwig blk_mq_destroy_queue(ctrl->fabrics_q); 19643f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->admin_tagset); 19653f2304f8SSagi Grimberg } 19663f2304f8SSagi Grimberg nvme_tcp_free_admin_queue(ctrl); 19673f2304f8SSagi Grimberg } 19683f2304f8SSagi Grimberg 19693f2304f8SSagi Grimberg static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) 19703f2304f8SSagi Grimberg { 19713f2304f8SSagi Grimberg int error; 19723f2304f8SSagi Grimberg 19733f2304f8SSagi Grimberg error = nvme_tcp_alloc_admin_queue(ctrl); 19743f2304f8SSagi Grimberg if (error) 19753f2304f8SSagi Grimberg return error; 19763f2304f8SSagi Grimberg 19773f2304f8SSagi Grimberg if (new) { 19782f7a7e5dSChristoph Hellwig error = nvme_tcp_alloc_admin_tag_set(ctrl); 19792f7a7e5dSChristoph Hellwig if (error) 19803f2304f8SSagi Grimberg goto out_free_queue; 19813f2304f8SSagi Grimberg 1982e7832cb4SSagi Grimberg ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); 1983e7832cb4SSagi Grimberg if (IS_ERR(ctrl->fabrics_q)) { 1984e7832cb4SSagi Grimberg error = PTR_ERR(ctrl->fabrics_q); 1985e7832cb4SSagi Grimberg goto out_free_tagset; 1986e7832cb4SSagi Grimberg } 1987e7832cb4SSagi Grimberg 19883f2304f8SSagi Grimberg ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); 19893f2304f8SSagi Grimberg if (IS_ERR(ctrl->admin_q)) { 19903f2304f8SSagi Grimberg error = PTR_ERR(ctrl->admin_q); 1991e7832cb4SSagi Grimberg goto out_cleanup_fabrics_q; 19923f2304f8SSagi Grimberg } 19933f2304f8SSagi Grimberg } 19943f2304f8SSagi Grimberg 19953f2304f8SSagi Grimberg error = nvme_tcp_start_queue(ctrl, 0); 19963f2304f8SSagi Grimberg if (error) 19973f2304f8SSagi Grimberg goto out_cleanup_queue; 19983f2304f8SSagi Grimberg 1999c0f2f45bSSagi Grimberg error = nvme_enable_ctrl(ctrl); 20003f2304f8SSagi Grimberg if (error) 20013f2304f8SSagi Grimberg goto out_stop_queue; 20023f2304f8SSagi Grimberg 20036ca1d902SMing Lei nvme_start_admin_queue(ctrl); 2004e7832cb4SSagi Grimberg 2005f21c4769SChaitanya Kulkarni error = nvme_init_ctrl_finish(ctrl); 20063f2304f8SSagi Grimberg if (error) 200770a99574SChao Leng goto out_quiesce_queue; 20083f2304f8SSagi Grimberg 20093f2304f8SSagi Grimberg return 0; 20103f2304f8SSagi Grimberg 201170a99574SChao Leng out_quiesce_queue: 20126ca1d902SMing Lei nvme_stop_admin_queue(ctrl); 201370a99574SChao Leng blk_sync_queue(ctrl->admin_q); 20143f2304f8SSagi Grimberg out_stop_queue: 20153f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 201670a99574SChao Leng nvme_cancel_admin_tagset(ctrl); 20173f2304f8SSagi Grimberg out_cleanup_queue: 20183f2304f8SSagi Grimberg if (new) 20196f8191fdSChristoph Hellwig blk_mq_destroy_queue(ctrl->admin_q); 2020e7832cb4SSagi Grimberg out_cleanup_fabrics_q: 2021e7832cb4SSagi Grimberg if (new) 20226f8191fdSChristoph Hellwig blk_mq_destroy_queue(ctrl->fabrics_q); 20233f2304f8SSagi Grimberg out_free_tagset: 20243f2304f8SSagi Grimberg if (new) 20253f2304f8SSagi Grimberg blk_mq_free_tag_set(ctrl->admin_tagset); 20263f2304f8SSagi Grimberg out_free_queue: 20273f2304f8SSagi Grimberg nvme_tcp_free_admin_queue(ctrl); 20283f2304f8SSagi Grimberg return error; 20293f2304f8SSagi Grimberg } 20303f2304f8SSagi Grimberg 20313f2304f8SSagi Grimberg static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, 20323f2304f8SSagi Grimberg bool remove) 20333f2304f8SSagi Grimberg { 20346ca1d902SMing Lei nvme_stop_admin_queue(ctrl); 2035d6f66210SChao Leng blk_sync_queue(ctrl->admin_q); 20363f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 2037563c8158SChao Leng nvme_cancel_admin_tagset(ctrl); 2038e7832cb4SSagi Grimberg if (remove) 20396ca1d902SMing Lei nvme_start_admin_queue(ctrl); 20403f2304f8SSagi Grimberg nvme_tcp_destroy_admin_queue(ctrl, remove); 20413f2304f8SSagi Grimberg } 20423f2304f8SSagi Grimberg 20433f2304f8SSagi Grimberg static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, 20443f2304f8SSagi Grimberg bool remove) 20453f2304f8SSagi Grimberg { 20463f2304f8SSagi Grimberg if (ctrl->queue_count <= 1) 2047d6f66210SChao Leng return; 20486ca1d902SMing Lei nvme_stop_admin_queue(ctrl); 20492875b0aeSSagi Grimberg nvme_start_freeze(ctrl); 20503f2304f8SSagi Grimberg nvme_stop_queues(ctrl); 2051d6f66210SChao Leng nvme_sync_io_queues(ctrl); 20523f2304f8SSagi Grimberg nvme_tcp_stop_io_queues(ctrl); 2053563c8158SChao Leng nvme_cancel_tagset(ctrl); 20543f2304f8SSagi Grimberg if (remove) 20553f2304f8SSagi Grimberg nvme_start_queues(ctrl); 20563f2304f8SSagi Grimberg nvme_tcp_destroy_io_queues(ctrl, remove); 20573f2304f8SSagi Grimberg } 20583f2304f8SSagi Grimberg 20593f2304f8SSagi Grimberg static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) 20603f2304f8SSagi Grimberg { 20613f2304f8SSagi Grimberg /* If we are resetting/deleting then do nothing */ 20623f2304f8SSagi Grimberg if (ctrl->state != NVME_CTRL_CONNECTING) { 20633f2304f8SSagi Grimberg WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || 20643f2304f8SSagi Grimberg ctrl->state == NVME_CTRL_LIVE); 20653f2304f8SSagi Grimberg return; 20663f2304f8SSagi Grimberg } 20673f2304f8SSagi Grimberg 20683f2304f8SSagi Grimberg if (nvmf_should_reconnect(ctrl)) { 20693f2304f8SSagi Grimberg dev_info(ctrl->device, "Reconnecting in %d seconds...\n", 20703f2304f8SSagi Grimberg ctrl->opts->reconnect_delay); 20713f2304f8SSagi Grimberg queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, 20723f2304f8SSagi Grimberg ctrl->opts->reconnect_delay * HZ); 20733f2304f8SSagi Grimberg } else { 20743f2304f8SSagi Grimberg dev_info(ctrl->device, "Removing controller...\n"); 20753f2304f8SSagi Grimberg nvme_delete_ctrl(ctrl); 20763f2304f8SSagi Grimberg } 20773f2304f8SSagi Grimberg } 20783f2304f8SSagi Grimberg 20793f2304f8SSagi Grimberg static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) 20803f2304f8SSagi Grimberg { 20813f2304f8SSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->opts; 2082312910f4SColin Ian King int ret; 20833f2304f8SSagi Grimberg 20843f2304f8SSagi Grimberg ret = nvme_tcp_configure_admin_queue(ctrl, new); 20853f2304f8SSagi Grimberg if (ret) 20863f2304f8SSagi Grimberg return ret; 20873f2304f8SSagi Grimberg 20883f2304f8SSagi Grimberg if (ctrl->icdoff) { 2089522af60cSDan Carpenter ret = -EOPNOTSUPP; 20903f2304f8SSagi Grimberg dev_err(ctrl->device, "icdoff is not supported!\n"); 20913f2304f8SSagi Grimberg goto destroy_admin; 20923f2304f8SSagi Grimberg } 20933f2304f8SSagi Grimberg 20943b54064fSChaitanya Kulkarni if (!nvme_ctrl_sgl_supported(ctrl)) { 2095522af60cSDan Carpenter ret = -EOPNOTSUPP; 209673ffcefcSMax Gurtovoy dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); 209773ffcefcSMax Gurtovoy goto destroy_admin; 209873ffcefcSMax Gurtovoy } 209973ffcefcSMax Gurtovoy 21003f2304f8SSagi Grimberg if (opts->queue_size > ctrl->sqsize + 1) 21013f2304f8SSagi Grimberg dev_warn(ctrl->device, 21023f2304f8SSagi Grimberg "queue_size %zu > ctrl sqsize %u, clamping down\n", 21033f2304f8SSagi Grimberg opts->queue_size, ctrl->sqsize + 1); 21043f2304f8SSagi Grimberg 21053f2304f8SSagi Grimberg if (ctrl->sqsize + 1 > ctrl->maxcmd) { 21063f2304f8SSagi Grimberg dev_warn(ctrl->device, 21073f2304f8SSagi Grimberg "sqsize %u > ctrl maxcmd %u, clamping down\n", 21083f2304f8SSagi Grimberg ctrl->sqsize + 1, ctrl->maxcmd); 21093f2304f8SSagi Grimberg ctrl->sqsize = ctrl->maxcmd - 1; 21103f2304f8SSagi Grimberg } 21113f2304f8SSagi Grimberg 21123f2304f8SSagi Grimberg if (ctrl->queue_count > 1) { 21133f2304f8SSagi Grimberg ret = nvme_tcp_configure_io_queues(ctrl, new); 21143f2304f8SSagi Grimberg if (ret) 21153f2304f8SSagi Grimberg goto destroy_admin; 21163f2304f8SSagi Grimberg } 21173f2304f8SSagi Grimberg 21183f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { 2119bea54ef5SIsrael Rukshin /* 2120ecca390eSSagi Grimberg * state change failure is ok if we started ctrl delete, 2121bea54ef5SIsrael Rukshin * unless we're during creation of a new controller to 2122bea54ef5SIsrael Rukshin * avoid races with teardown flow. 2123bea54ef5SIsrael Rukshin */ 2124ecca390eSSagi Grimberg WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && 2125ecca390eSSagi Grimberg ctrl->state != NVME_CTRL_DELETING_NOIO); 2126bea54ef5SIsrael Rukshin WARN_ON_ONCE(new); 21273f2304f8SSagi Grimberg ret = -EINVAL; 21283f2304f8SSagi Grimberg goto destroy_io; 21293f2304f8SSagi Grimberg } 21303f2304f8SSagi Grimberg 21313f2304f8SSagi Grimberg nvme_start_ctrl(ctrl); 21323f2304f8SSagi Grimberg return 0; 21333f2304f8SSagi Grimberg 21343f2304f8SSagi Grimberg destroy_io: 213570a99574SChao Leng if (ctrl->queue_count > 1) { 213670a99574SChao Leng nvme_stop_queues(ctrl); 213770a99574SChao Leng nvme_sync_io_queues(ctrl); 213870a99574SChao Leng nvme_tcp_stop_io_queues(ctrl); 213970a99574SChao Leng nvme_cancel_tagset(ctrl); 21403f2304f8SSagi Grimberg nvme_tcp_destroy_io_queues(ctrl, new); 214170a99574SChao Leng } 21423f2304f8SSagi Grimberg destroy_admin: 21436ca1d902SMing Lei nvme_stop_admin_queue(ctrl); 214470a99574SChao Leng blk_sync_queue(ctrl->admin_q); 21453f2304f8SSagi Grimberg nvme_tcp_stop_queue(ctrl, 0); 214670a99574SChao Leng nvme_cancel_admin_tagset(ctrl); 21473f2304f8SSagi Grimberg nvme_tcp_destroy_admin_queue(ctrl, new); 21483f2304f8SSagi Grimberg return ret; 21493f2304f8SSagi Grimberg } 21503f2304f8SSagi Grimberg 21513f2304f8SSagi Grimberg static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) 21523f2304f8SSagi Grimberg { 21533f2304f8SSagi Grimberg struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), 21543f2304f8SSagi Grimberg struct nvme_tcp_ctrl, connect_work); 21553f2304f8SSagi Grimberg struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 21563f2304f8SSagi Grimberg 21573f2304f8SSagi Grimberg ++ctrl->nr_reconnects; 21583f2304f8SSagi Grimberg 21593f2304f8SSagi Grimberg if (nvme_tcp_setup_ctrl(ctrl, false)) 21603f2304f8SSagi Grimberg goto requeue; 21613f2304f8SSagi Grimberg 216256a77d26SColin Ian King dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", 21633f2304f8SSagi Grimberg ctrl->nr_reconnects); 21643f2304f8SSagi Grimberg 21653f2304f8SSagi Grimberg ctrl->nr_reconnects = 0; 21663f2304f8SSagi Grimberg 21673f2304f8SSagi Grimberg return; 21683f2304f8SSagi Grimberg 21693f2304f8SSagi Grimberg requeue: 21703f2304f8SSagi Grimberg dev_info(ctrl->device, "Failed reconnect attempt %d\n", 21713f2304f8SSagi Grimberg ctrl->nr_reconnects); 21723f2304f8SSagi Grimberg nvme_tcp_reconnect_or_remove(ctrl); 21733f2304f8SSagi Grimberg } 21743f2304f8SSagi Grimberg 21753f2304f8SSagi Grimberg static void nvme_tcp_error_recovery_work(struct work_struct *work) 21763f2304f8SSagi Grimberg { 21773f2304f8SSagi Grimberg struct nvme_tcp_ctrl *tcp_ctrl = container_of(work, 21783f2304f8SSagi Grimberg struct nvme_tcp_ctrl, err_work); 21793f2304f8SSagi Grimberg struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 21803f2304f8SSagi Grimberg 2181f50fff73SHannes Reinecke nvme_auth_stop(ctrl); 21823f2304f8SSagi Grimberg nvme_stop_keep_alive(ctrl); 2183ff9fc7ebSSagi Grimberg flush_work(&ctrl->async_event_work); 21843f2304f8SSagi Grimberg nvme_tcp_teardown_io_queues(ctrl, false); 21853f2304f8SSagi Grimberg /* unquiesce to fail fast pending requests */ 21863f2304f8SSagi Grimberg nvme_start_queues(ctrl); 21873f2304f8SSagi Grimberg nvme_tcp_teardown_admin_queue(ctrl, false); 21886ca1d902SMing Lei nvme_start_admin_queue(ctrl); 21893f2304f8SSagi Grimberg 21903f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { 2191ecca390eSSagi Grimberg /* state change failure is ok if we started ctrl delete */ 2192ecca390eSSagi Grimberg WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && 2193ecca390eSSagi Grimberg ctrl->state != NVME_CTRL_DELETING_NOIO); 21943f2304f8SSagi Grimberg return; 21953f2304f8SSagi Grimberg } 21963f2304f8SSagi Grimberg 21973f2304f8SSagi Grimberg nvme_tcp_reconnect_or_remove(ctrl); 21983f2304f8SSagi Grimberg } 21993f2304f8SSagi Grimberg 22003f2304f8SSagi Grimberg static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 22013f2304f8SSagi Grimberg { 22023f2304f8SSagi Grimberg nvme_tcp_teardown_io_queues(ctrl, shutdown); 22036ca1d902SMing Lei nvme_stop_admin_queue(ctrl); 22043f2304f8SSagi Grimberg if (shutdown) 22053f2304f8SSagi Grimberg nvme_shutdown_ctrl(ctrl); 22063f2304f8SSagi Grimberg else 2207b5b05048SSagi Grimberg nvme_disable_ctrl(ctrl); 22083f2304f8SSagi Grimberg nvme_tcp_teardown_admin_queue(ctrl, shutdown); 22093f2304f8SSagi Grimberg } 22103f2304f8SSagi Grimberg 22113f2304f8SSagi Grimberg static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) 22123f2304f8SSagi Grimberg { 22133f2304f8SSagi Grimberg nvme_tcp_teardown_ctrl(ctrl, true); 22143f2304f8SSagi Grimberg } 22153f2304f8SSagi Grimberg 22163f2304f8SSagi Grimberg static void nvme_reset_ctrl_work(struct work_struct *work) 22173f2304f8SSagi Grimberg { 22183f2304f8SSagi Grimberg struct nvme_ctrl *ctrl = 22193f2304f8SSagi Grimberg container_of(work, struct nvme_ctrl, reset_work); 22203f2304f8SSagi Grimberg 22213f2304f8SSagi Grimberg nvme_stop_ctrl(ctrl); 22223f2304f8SSagi Grimberg nvme_tcp_teardown_ctrl(ctrl, false); 22233f2304f8SSagi Grimberg 22243f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { 2225ecca390eSSagi Grimberg /* state change failure is ok if we started ctrl delete */ 2226ecca390eSSagi Grimberg WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && 2227ecca390eSSagi Grimberg ctrl->state != NVME_CTRL_DELETING_NOIO); 22283f2304f8SSagi Grimberg return; 22293f2304f8SSagi Grimberg } 22303f2304f8SSagi Grimberg 22313f2304f8SSagi Grimberg if (nvme_tcp_setup_ctrl(ctrl, false)) 22323f2304f8SSagi Grimberg goto out_fail; 22333f2304f8SSagi Grimberg 22343f2304f8SSagi Grimberg return; 22353f2304f8SSagi Grimberg 22363f2304f8SSagi Grimberg out_fail: 22373f2304f8SSagi Grimberg ++ctrl->nr_reconnects; 22383f2304f8SSagi Grimberg nvme_tcp_reconnect_or_remove(ctrl); 22393f2304f8SSagi Grimberg } 22403f2304f8SSagi Grimberg 2241f7f70f4aSRuozhu Li static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) 2242f7f70f4aSRuozhu Li { 2243f7f70f4aSRuozhu Li cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); 2244f7f70f4aSRuozhu Li cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); 2245f7f70f4aSRuozhu Li } 2246f7f70f4aSRuozhu Li 22473f2304f8SSagi Grimberg static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) 22483f2304f8SSagi Grimberg { 22493f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 22503f2304f8SSagi Grimberg 22513f2304f8SSagi Grimberg if (list_empty(&ctrl->list)) 22523f2304f8SSagi Grimberg goto free_ctrl; 22533f2304f8SSagi Grimberg 22543f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 22553f2304f8SSagi Grimberg list_del(&ctrl->list); 22563f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 22573f2304f8SSagi Grimberg 22583f2304f8SSagi Grimberg nvmf_free_options(nctrl->opts); 22593f2304f8SSagi Grimberg free_ctrl: 22603f2304f8SSagi Grimberg kfree(ctrl->queues); 22613f2304f8SSagi Grimberg kfree(ctrl); 22623f2304f8SSagi Grimberg } 22633f2304f8SSagi Grimberg 22643f2304f8SSagi Grimberg static void nvme_tcp_set_sg_null(struct nvme_command *c) 22653f2304f8SSagi Grimberg { 22663f2304f8SSagi Grimberg struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 22673f2304f8SSagi Grimberg 22683f2304f8SSagi Grimberg sg->addr = 0; 22693f2304f8SSagi Grimberg sg->length = 0; 22703f2304f8SSagi Grimberg sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 22713f2304f8SSagi Grimberg NVME_SGL_FMT_TRANSPORT_A; 22723f2304f8SSagi Grimberg } 22733f2304f8SSagi Grimberg 22743f2304f8SSagi Grimberg static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, 22753f2304f8SSagi Grimberg struct nvme_command *c, u32 data_len) 22763f2304f8SSagi Grimberg { 22773f2304f8SSagi Grimberg struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 22783f2304f8SSagi Grimberg 22793f2304f8SSagi Grimberg sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 22803f2304f8SSagi Grimberg sg->length = cpu_to_le32(data_len); 22813f2304f8SSagi Grimberg sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 22823f2304f8SSagi Grimberg } 22833f2304f8SSagi Grimberg 22843f2304f8SSagi Grimberg static void nvme_tcp_set_sg_host_data(struct nvme_command *c, 22853f2304f8SSagi Grimberg u32 data_len) 22863f2304f8SSagi Grimberg { 22873f2304f8SSagi Grimberg struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 22883f2304f8SSagi Grimberg 22893f2304f8SSagi Grimberg sg->addr = 0; 22903f2304f8SSagi Grimberg sg->length = cpu_to_le32(data_len); 22913f2304f8SSagi Grimberg sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 22923f2304f8SSagi Grimberg NVME_SGL_FMT_TRANSPORT_A; 22933f2304f8SSagi Grimberg } 22943f2304f8SSagi Grimberg 22953f2304f8SSagi Grimberg static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) 22963f2304f8SSagi Grimberg { 22973f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); 22983f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = &ctrl->queues[0]; 22993f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; 23003f2304f8SSagi Grimberg struct nvme_command *cmd = &pdu->cmd; 23013f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue); 23023f2304f8SSagi Grimberg 23033f2304f8SSagi Grimberg memset(pdu, 0, sizeof(*pdu)); 23043f2304f8SSagi Grimberg pdu->hdr.type = nvme_tcp_cmd; 23053f2304f8SSagi Grimberg if (queue->hdr_digest) 23063f2304f8SSagi Grimberg pdu->hdr.flags |= NVME_TCP_F_HDGST; 23073f2304f8SSagi Grimberg pdu->hdr.hlen = sizeof(*pdu); 23083f2304f8SSagi Grimberg pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 23093f2304f8SSagi Grimberg 23103f2304f8SSagi Grimberg cmd->common.opcode = nvme_admin_async_event; 23113f2304f8SSagi Grimberg cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; 23123f2304f8SSagi Grimberg cmd->common.flags |= NVME_CMD_SGL_METABUF; 23133f2304f8SSagi Grimberg nvme_tcp_set_sg_null(cmd); 23143f2304f8SSagi Grimberg 23153f2304f8SSagi Grimberg ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; 23163f2304f8SSagi Grimberg ctrl->async_req.offset = 0; 23173f2304f8SSagi Grimberg ctrl->async_req.curr_bio = NULL; 23183f2304f8SSagi Grimberg ctrl->async_req.data_len = 0; 23193f2304f8SSagi Grimberg 232086f0348aSSagi Grimberg nvme_tcp_queue_request(&ctrl->async_req, true, true); 23213f2304f8SSagi Grimberg } 23223f2304f8SSagi Grimberg 2323236187c4SSagi Grimberg static void nvme_tcp_complete_timed_out(struct request *rq) 2324236187c4SSagi Grimberg { 2325236187c4SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2326236187c4SSagi Grimberg struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; 2327236187c4SSagi Grimberg 2328236187c4SSagi Grimberg nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); 232993ba75c9SChaitanya Kulkarni nvmf_complete_timed_out_request(rq); 2330236187c4SSagi Grimberg } 2331236187c4SSagi Grimberg 23329bdb4833SJohn Garry static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) 23333f2304f8SSagi Grimberg { 23343f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2335236187c4SSagi Grimberg struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; 23363f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 23373f2304f8SSagi Grimberg 2338236187c4SSagi Grimberg dev_warn(ctrl->device, 23393f2304f8SSagi Grimberg "queue %d: timeout request %#x type %d\n", 234039d57757SSagi Grimberg nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); 23413f2304f8SSagi Grimberg 2342236187c4SSagi Grimberg if (ctrl->state != NVME_CTRL_LIVE) { 234339d57757SSagi Grimberg /* 2344236187c4SSagi Grimberg * If we are resetting, connecting or deleting we should 2345236187c4SSagi Grimberg * complete immediately because we may block controller 2346236187c4SSagi Grimberg * teardown or setup sequence 2347236187c4SSagi Grimberg * - ctrl disable/shutdown fabrics requests 2348236187c4SSagi Grimberg * - connect requests 2349236187c4SSagi Grimberg * - initialization admin requests 2350236187c4SSagi Grimberg * - I/O requests that entered after unquiescing and 2351236187c4SSagi Grimberg * the controller stopped responding 2352236187c4SSagi Grimberg * 2353236187c4SSagi Grimberg * All other requests should be cancelled by the error 2354236187c4SSagi Grimberg * recovery work, so it's fine that we fail it here. 235539d57757SSagi Grimberg */ 2356236187c4SSagi Grimberg nvme_tcp_complete_timed_out(rq); 23573f2304f8SSagi Grimberg return BLK_EH_DONE; 23583f2304f8SSagi Grimberg } 23593f2304f8SSagi Grimberg 2360236187c4SSagi Grimberg /* 2361236187c4SSagi Grimberg * LIVE state should trigger the normal error recovery which will 2362236187c4SSagi Grimberg * handle completing this request. 2363236187c4SSagi Grimberg */ 2364236187c4SSagi Grimberg nvme_tcp_error_recovery(ctrl); 23653f2304f8SSagi Grimberg return BLK_EH_RESET_TIMER; 23663f2304f8SSagi Grimberg } 23673f2304f8SSagi Grimberg 23683f2304f8SSagi Grimberg static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, 23693f2304f8SSagi Grimberg struct request *rq) 23703f2304f8SSagi Grimberg { 23713f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 23723f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 23733f2304f8SSagi Grimberg struct nvme_command *c = &pdu->cmd; 23743f2304f8SSagi Grimberg 23753f2304f8SSagi Grimberg c->common.flags |= NVME_CMD_SGL_METABUF; 23763f2304f8SSagi Grimberg 237725e5cb78SSagi Grimberg if (!blk_rq_nr_phys_segments(rq)) 237825e5cb78SSagi Grimberg nvme_tcp_set_sg_null(c); 237925e5cb78SSagi Grimberg else if (rq_data_dir(rq) == WRITE && 238053ee9e29SCaleb Sander req->data_len <= nvme_tcp_inline_data_size(req)) 23813f2304f8SSagi Grimberg nvme_tcp_set_sg_inline(queue, c, req->data_len); 23823f2304f8SSagi Grimberg else 23833f2304f8SSagi Grimberg nvme_tcp_set_sg_host_data(c, req->data_len); 23843f2304f8SSagi Grimberg 23853f2304f8SSagi Grimberg return 0; 23863f2304f8SSagi Grimberg } 23873f2304f8SSagi Grimberg 23883f2304f8SSagi Grimberg static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, 23893f2304f8SSagi Grimberg struct request *rq) 23903f2304f8SSagi Grimberg { 23913f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 23923f2304f8SSagi Grimberg struct nvme_tcp_cmd_pdu *pdu = req->pdu; 23933f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = req->queue; 23943f2304f8SSagi Grimberg u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; 23953f2304f8SSagi Grimberg blk_status_t ret; 23963f2304f8SSagi Grimberg 2397f4b9e6c9SKeith Busch ret = nvme_setup_cmd(ns, rq); 23983f2304f8SSagi Grimberg if (ret) 23993f2304f8SSagi Grimberg return ret; 24003f2304f8SSagi Grimberg 24013f2304f8SSagi Grimberg req->state = NVME_TCP_SEND_CMD_PDU; 24021ba2e507SDaniel Wagner req->status = cpu_to_le16(NVME_SC_SUCCESS); 24033f2304f8SSagi Grimberg req->offset = 0; 24043f2304f8SSagi Grimberg req->data_sent = 0; 24053f2304f8SSagi Grimberg req->pdu_len = 0; 24063f2304f8SSagi Grimberg req->pdu_sent = 0; 2407c2700d28SVarun Prakash req->h2cdata_left = 0; 240825e5cb78SSagi Grimberg req->data_len = blk_rq_nr_phys_segments(rq) ? 240925e5cb78SSagi Grimberg blk_rq_payload_bytes(rq) : 0; 24103f2304f8SSagi Grimberg req->curr_bio = rq->bio; 2411e11e5116SSagi Grimberg if (req->curr_bio && req->data_len) 2412cb9b870fSSagi Grimberg nvme_tcp_init_iter(req, rq_data_dir(rq)); 24133f2304f8SSagi Grimberg 24143f2304f8SSagi Grimberg if (rq_data_dir(rq) == WRITE && 241553ee9e29SCaleb Sander req->data_len <= nvme_tcp_inline_data_size(req)) 24163f2304f8SSagi Grimberg req->pdu_len = req->data_len; 24173f2304f8SSagi Grimberg 24183f2304f8SSagi Grimberg pdu->hdr.type = nvme_tcp_cmd; 24193f2304f8SSagi Grimberg pdu->hdr.flags = 0; 24203f2304f8SSagi Grimberg if (queue->hdr_digest) 24213f2304f8SSagi Grimberg pdu->hdr.flags |= NVME_TCP_F_HDGST; 24223f2304f8SSagi Grimberg if (queue->data_digest && req->pdu_len) { 24233f2304f8SSagi Grimberg pdu->hdr.flags |= NVME_TCP_F_DDGST; 24243f2304f8SSagi Grimberg ddgst = nvme_tcp_ddgst_len(queue); 24253f2304f8SSagi Grimberg } 24263f2304f8SSagi Grimberg pdu->hdr.hlen = sizeof(*pdu); 24273f2304f8SSagi Grimberg pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; 24283f2304f8SSagi Grimberg pdu->hdr.plen = 24293f2304f8SSagi Grimberg cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); 24303f2304f8SSagi Grimberg 24313f2304f8SSagi Grimberg ret = nvme_tcp_map_data(queue, rq); 24323f2304f8SSagi Grimberg if (unlikely(ret)) { 243328a4cac4SMax Gurtovoy nvme_cleanup_cmd(rq); 24343f2304f8SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 24353f2304f8SSagi Grimberg "Failed to map data (%d)\n", ret); 24363f2304f8SSagi Grimberg return ret; 24373f2304f8SSagi Grimberg } 24383f2304f8SSagi Grimberg 24393f2304f8SSagi Grimberg return 0; 24403f2304f8SSagi Grimberg } 24413f2304f8SSagi Grimberg 244286f0348aSSagi Grimberg static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) 244386f0348aSSagi Grimberg { 244486f0348aSSagi Grimberg struct nvme_tcp_queue *queue = hctx->driver_data; 244586f0348aSSagi Grimberg 244686f0348aSSagi Grimberg if (!llist_empty(&queue->req_list)) 244786f0348aSSagi Grimberg queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 244886f0348aSSagi Grimberg } 244986f0348aSSagi Grimberg 24503f2304f8SSagi Grimberg static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, 24513f2304f8SSagi Grimberg const struct blk_mq_queue_data *bd) 24523f2304f8SSagi Grimberg { 24533f2304f8SSagi Grimberg struct nvme_ns *ns = hctx->queue->queuedata; 24543f2304f8SSagi Grimberg struct nvme_tcp_queue *queue = hctx->driver_data; 24553f2304f8SSagi Grimberg struct request *rq = bd->rq; 24563f2304f8SSagi Grimberg struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 24573f2304f8SSagi Grimberg bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); 24583f2304f8SSagi Grimberg blk_status_t ret; 24593f2304f8SSagi Grimberg 2460a9715744STao Chiu if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2461a9715744STao Chiu return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); 24623f2304f8SSagi Grimberg 24633f2304f8SSagi Grimberg ret = nvme_tcp_setup_cmd_pdu(ns, rq); 24643f2304f8SSagi Grimberg if (unlikely(ret)) 24653f2304f8SSagi Grimberg return ret; 24663f2304f8SSagi Grimberg 24673f2304f8SSagi Grimberg blk_mq_start_request(rq); 24683f2304f8SSagi Grimberg 246986f0348aSSagi Grimberg nvme_tcp_queue_request(req, true, bd->last); 24703f2304f8SSagi Grimberg 24713f2304f8SSagi Grimberg return BLK_STS_OK; 24723f2304f8SSagi Grimberg } 24733f2304f8SSagi Grimberg 2474873946f4SSagi Grimberg static int nvme_tcp_map_queues(struct blk_mq_tag_set *set) 2475873946f4SSagi Grimberg { 2476873946f4SSagi Grimberg struct nvme_tcp_ctrl *ctrl = set->driver_data; 247764861993SSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2478873946f4SSagi Grimberg 247964861993SSagi Grimberg if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { 2480873946f4SSagi Grimberg /* separate read/write queues */ 2481873946f4SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].nr_queues = 248264861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 248364861993SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 248464861993SSagi Grimberg set->map[HCTX_TYPE_READ].nr_queues = 248564861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ]; 2486873946f4SSagi Grimberg set->map[HCTX_TYPE_READ].queue_offset = 248764861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2488873946f4SSagi Grimberg } else { 248964861993SSagi Grimberg /* shared read/write queues */ 2490873946f4SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].nr_queues = 249164861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 249264861993SSagi Grimberg set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 249364861993SSagi Grimberg set->map[HCTX_TYPE_READ].nr_queues = 249464861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2495873946f4SSagi Grimberg set->map[HCTX_TYPE_READ].queue_offset = 0; 2496873946f4SSagi Grimberg } 2497873946f4SSagi Grimberg blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 2498873946f4SSagi Grimberg blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); 249964861993SSagi Grimberg 25001a9460ceSSagi Grimberg if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { 25011a9460ceSSagi Grimberg /* map dedicated poll queues only if we have queues left */ 25021a9460ceSSagi Grimberg set->map[HCTX_TYPE_POLL].nr_queues = 25031a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL]; 25041a9460ceSSagi Grimberg set->map[HCTX_TYPE_POLL].queue_offset = 25051a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT] + 25061a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ]; 25071a9460ceSSagi Grimberg blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 25081a9460ceSSagi Grimberg } 25091a9460ceSSagi Grimberg 251064861993SSagi Grimberg dev_info(ctrl->ctrl.device, 25111a9460ceSSagi Grimberg "mapped %d/%d/%d default/read/poll queues.\n", 251264861993SSagi Grimberg ctrl->io_queues[HCTX_TYPE_DEFAULT], 25131a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_READ], 25141a9460ceSSagi Grimberg ctrl->io_queues[HCTX_TYPE_POLL]); 251564861993SSagi Grimberg 2516873946f4SSagi Grimberg return 0; 2517873946f4SSagi Grimberg } 2518873946f4SSagi Grimberg 25195a72e899SJens Axboe static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 25201a9460ceSSagi Grimberg { 25211a9460ceSSagi Grimberg struct nvme_tcp_queue *queue = hctx->driver_data; 25221a9460ceSSagi Grimberg struct sock *sk = queue->sock->sk; 25231a9460ceSSagi Grimberg 2524f86e5bf8SSagi Grimberg if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) 2525f86e5bf8SSagi Grimberg return 0; 2526f86e5bf8SSagi Grimberg 252772e5d757SSagi Grimberg set_bit(NVME_TCP_Q_POLLING, &queue->flags); 25283f926af3SEric Dumazet if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) 25291a9460ceSSagi Grimberg sk_busy_loop(sk, true); 25301a9460ceSSagi Grimberg nvme_tcp_try_recv(queue); 253172e5d757SSagi Grimberg clear_bit(NVME_TCP_Q_POLLING, &queue->flags); 25321a9460ceSSagi Grimberg return queue->nr_cqe; 25331a9460ceSSagi Grimberg } 25341a9460ceSSagi Grimberg 25356acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_mq_ops = { 25363f2304f8SSagi Grimberg .queue_rq = nvme_tcp_queue_rq, 253786f0348aSSagi Grimberg .commit_rqs = nvme_tcp_commit_rqs, 25383f2304f8SSagi Grimberg .complete = nvme_complete_rq, 25393f2304f8SSagi Grimberg .init_request = nvme_tcp_init_request, 25403f2304f8SSagi Grimberg .exit_request = nvme_tcp_exit_request, 25413f2304f8SSagi Grimberg .init_hctx = nvme_tcp_init_hctx, 25423f2304f8SSagi Grimberg .timeout = nvme_tcp_timeout, 2543873946f4SSagi Grimberg .map_queues = nvme_tcp_map_queues, 25441a9460ceSSagi Grimberg .poll = nvme_tcp_poll, 25453f2304f8SSagi Grimberg }; 25463f2304f8SSagi Grimberg 25476acbd961SRikard Falkeborn static const struct blk_mq_ops nvme_tcp_admin_mq_ops = { 25483f2304f8SSagi Grimberg .queue_rq = nvme_tcp_queue_rq, 25493f2304f8SSagi Grimberg .complete = nvme_complete_rq, 25503f2304f8SSagi Grimberg .init_request = nvme_tcp_init_request, 25513f2304f8SSagi Grimberg .exit_request = nvme_tcp_exit_request, 25523f2304f8SSagi Grimberg .init_hctx = nvme_tcp_init_admin_hctx, 25533f2304f8SSagi Grimberg .timeout = nvme_tcp_timeout, 25543f2304f8SSagi Grimberg }; 25553f2304f8SSagi Grimberg 25563f2304f8SSagi Grimberg static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { 25573f2304f8SSagi Grimberg .name = "tcp", 25583f2304f8SSagi Grimberg .module = THIS_MODULE, 25593f2304f8SSagi Grimberg .flags = NVME_F_FABRICS, 25603f2304f8SSagi Grimberg .reg_read32 = nvmf_reg_read32, 25613f2304f8SSagi Grimberg .reg_read64 = nvmf_reg_read64, 25623f2304f8SSagi Grimberg .reg_write32 = nvmf_reg_write32, 25633f2304f8SSagi Grimberg .free_ctrl = nvme_tcp_free_ctrl, 25643f2304f8SSagi Grimberg .submit_async_event = nvme_tcp_submit_async_event, 25653f2304f8SSagi Grimberg .delete_ctrl = nvme_tcp_delete_ctrl, 25663f2304f8SSagi Grimberg .get_address = nvmf_get_address, 2567f7f70f4aSRuozhu Li .stop_ctrl = nvme_tcp_stop_ctrl, 25683f2304f8SSagi Grimberg }; 25693f2304f8SSagi Grimberg 25703f2304f8SSagi Grimberg static bool 25713f2304f8SSagi Grimberg nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) 25723f2304f8SSagi Grimberg { 25733f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 25743f2304f8SSagi Grimberg bool found = false; 25753f2304f8SSagi Grimberg 25763f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 25773f2304f8SSagi Grimberg list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { 25783f2304f8SSagi Grimberg found = nvmf_ip_options_match(&ctrl->ctrl, opts); 25793f2304f8SSagi Grimberg if (found) 25803f2304f8SSagi Grimberg break; 25813f2304f8SSagi Grimberg } 25823f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 25833f2304f8SSagi Grimberg 25843f2304f8SSagi Grimberg return found; 25853f2304f8SSagi Grimberg } 25863f2304f8SSagi Grimberg 25873f2304f8SSagi Grimberg static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, 25883f2304f8SSagi Grimberg struct nvmf_ctrl_options *opts) 25893f2304f8SSagi Grimberg { 25903f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 25913f2304f8SSagi Grimberg int ret; 25923f2304f8SSagi Grimberg 25933f2304f8SSagi Grimberg ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 25943f2304f8SSagi Grimberg if (!ctrl) 25953f2304f8SSagi Grimberg return ERR_PTR(-ENOMEM); 25963f2304f8SSagi Grimberg 25973f2304f8SSagi Grimberg INIT_LIST_HEAD(&ctrl->list); 25983f2304f8SSagi Grimberg ctrl->ctrl.opts = opts; 25991a9460ceSSagi Grimberg ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 26001a9460ceSSagi Grimberg opts->nr_poll_queues + 1; 26013f2304f8SSagi Grimberg ctrl->ctrl.sqsize = opts->queue_size - 1; 26023f2304f8SSagi Grimberg ctrl->ctrl.kato = opts->kato; 26033f2304f8SSagi Grimberg 26043f2304f8SSagi Grimberg INIT_DELAYED_WORK(&ctrl->connect_work, 26053f2304f8SSagi Grimberg nvme_tcp_reconnect_ctrl_work); 26063f2304f8SSagi Grimberg INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); 26073f2304f8SSagi Grimberg INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); 26083f2304f8SSagi Grimberg 26093f2304f8SSagi Grimberg if (!(opts->mask & NVMF_OPT_TRSVCID)) { 26103f2304f8SSagi Grimberg opts->trsvcid = 26113f2304f8SSagi Grimberg kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL); 26123f2304f8SSagi Grimberg if (!opts->trsvcid) { 26133f2304f8SSagi Grimberg ret = -ENOMEM; 26143f2304f8SSagi Grimberg goto out_free_ctrl; 26153f2304f8SSagi Grimberg } 26163f2304f8SSagi Grimberg opts->mask |= NVMF_OPT_TRSVCID; 26173f2304f8SSagi Grimberg } 26183f2304f8SSagi Grimberg 26193f2304f8SSagi Grimberg ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 26203f2304f8SSagi Grimberg opts->traddr, opts->trsvcid, &ctrl->addr); 26213f2304f8SSagi Grimberg if (ret) { 26223f2304f8SSagi Grimberg pr_err("malformed address passed: %s:%s\n", 26233f2304f8SSagi Grimberg opts->traddr, opts->trsvcid); 26243f2304f8SSagi Grimberg goto out_free_ctrl; 26253f2304f8SSagi Grimberg } 26263f2304f8SSagi Grimberg 26273f2304f8SSagi Grimberg if (opts->mask & NVMF_OPT_HOST_TRADDR) { 26283f2304f8SSagi Grimberg ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 26293f2304f8SSagi Grimberg opts->host_traddr, NULL, &ctrl->src_addr); 26303f2304f8SSagi Grimberg if (ret) { 26313f2304f8SSagi Grimberg pr_err("malformed src address passed: %s\n", 26323f2304f8SSagi Grimberg opts->host_traddr); 26333f2304f8SSagi Grimberg goto out_free_ctrl; 26343f2304f8SSagi Grimberg } 26353f2304f8SSagi Grimberg } 26363f2304f8SSagi Grimberg 26373ede8f72SMartin Belanger if (opts->mask & NVMF_OPT_HOST_IFACE) { 26388b43ced6SPrabhakar Kushwaha if (!__dev_get_by_name(&init_net, opts->host_iface)) { 26393ede8f72SMartin Belanger pr_err("invalid interface passed: %s\n", 26403ede8f72SMartin Belanger opts->host_iface); 26413ede8f72SMartin Belanger ret = -ENODEV; 26423ede8f72SMartin Belanger goto out_free_ctrl; 26433ede8f72SMartin Belanger } 26443ede8f72SMartin Belanger } 26453ede8f72SMartin Belanger 26463f2304f8SSagi Grimberg if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { 26473f2304f8SSagi Grimberg ret = -EALREADY; 26483f2304f8SSagi Grimberg goto out_free_ctrl; 26493f2304f8SSagi Grimberg } 26503f2304f8SSagi Grimberg 2651873946f4SSagi Grimberg ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 26523f2304f8SSagi Grimberg GFP_KERNEL); 26533f2304f8SSagi Grimberg if (!ctrl->queues) { 26543f2304f8SSagi Grimberg ret = -ENOMEM; 26553f2304f8SSagi Grimberg goto out_free_ctrl; 26563f2304f8SSagi Grimberg } 26573f2304f8SSagi Grimberg 26583f2304f8SSagi Grimberg ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); 26593f2304f8SSagi Grimberg if (ret) 26603f2304f8SSagi Grimberg goto out_kfree_queues; 26613f2304f8SSagi Grimberg 26623f2304f8SSagi Grimberg if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 26633f2304f8SSagi Grimberg WARN_ON_ONCE(1); 26643f2304f8SSagi Grimberg ret = -EINTR; 26653f2304f8SSagi Grimberg goto out_uninit_ctrl; 26663f2304f8SSagi Grimberg } 26673f2304f8SSagi Grimberg 26683f2304f8SSagi Grimberg ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); 26693f2304f8SSagi Grimberg if (ret) 26703f2304f8SSagi Grimberg goto out_uninit_ctrl; 26713f2304f8SSagi Grimberg 26723f2304f8SSagi Grimberg dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", 2673e5ea42faSHannes Reinecke nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); 26743f2304f8SSagi Grimberg 26753f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 26763f2304f8SSagi Grimberg list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); 26773f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 26783f2304f8SSagi Grimberg 26793f2304f8SSagi Grimberg return &ctrl->ctrl; 26803f2304f8SSagi Grimberg 26813f2304f8SSagi Grimberg out_uninit_ctrl: 26823f2304f8SSagi Grimberg nvme_uninit_ctrl(&ctrl->ctrl); 26833f2304f8SSagi Grimberg nvme_put_ctrl(&ctrl->ctrl); 26843f2304f8SSagi Grimberg if (ret > 0) 26853f2304f8SSagi Grimberg ret = -EIO; 26863f2304f8SSagi Grimberg return ERR_PTR(ret); 26873f2304f8SSagi Grimberg out_kfree_queues: 26883f2304f8SSagi Grimberg kfree(ctrl->queues); 26893f2304f8SSagi Grimberg out_free_ctrl: 26903f2304f8SSagi Grimberg kfree(ctrl); 26913f2304f8SSagi Grimberg return ERR_PTR(ret); 26923f2304f8SSagi Grimberg } 26933f2304f8SSagi Grimberg 26943f2304f8SSagi Grimberg static struct nvmf_transport_ops nvme_tcp_transport = { 26953f2304f8SSagi Grimberg .name = "tcp", 26963f2304f8SSagi Grimberg .module = THIS_MODULE, 26973f2304f8SSagi Grimberg .required_opts = NVMF_OPT_TRADDR, 26983f2304f8SSagi Grimberg .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 26993f2304f8SSagi Grimberg NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | 2700873946f4SSagi Grimberg NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | 2701bb13985dSIsrael Rukshin NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | 27023ede8f72SMartin Belanger NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, 27033f2304f8SSagi Grimberg .create_ctrl = nvme_tcp_create_ctrl, 27043f2304f8SSagi Grimberg }; 27053f2304f8SSagi Grimberg 27063f2304f8SSagi Grimberg static int __init nvme_tcp_init_module(void) 27073f2304f8SSagi Grimberg { 27083f2304f8SSagi Grimberg nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", 27093f2304f8SSagi Grimberg WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 27103f2304f8SSagi Grimberg if (!nvme_tcp_wq) 27113f2304f8SSagi Grimberg return -ENOMEM; 27123f2304f8SSagi Grimberg 27133f2304f8SSagi Grimberg nvmf_register_transport(&nvme_tcp_transport); 27143f2304f8SSagi Grimberg return 0; 27153f2304f8SSagi Grimberg } 27163f2304f8SSagi Grimberg 27173f2304f8SSagi Grimberg static void __exit nvme_tcp_cleanup_module(void) 27183f2304f8SSagi Grimberg { 27193f2304f8SSagi Grimberg struct nvme_tcp_ctrl *ctrl; 27203f2304f8SSagi Grimberg 27213f2304f8SSagi Grimberg nvmf_unregister_transport(&nvme_tcp_transport); 27223f2304f8SSagi Grimberg 27233f2304f8SSagi Grimberg mutex_lock(&nvme_tcp_ctrl_mutex); 27243f2304f8SSagi Grimberg list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) 27253f2304f8SSagi Grimberg nvme_delete_ctrl(&ctrl->ctrl); 27263f2304f8SSagi Grimberg mutex_unlock(&nvme_tcp_ctrl_mutex); 27273f2304f8SSagi Grimberg flush_workqueue(nvme_delete_wq); 27283f2304f8SSagi Grimberg 27293f2304f8SSagi Grimberg destroy_workqueue(nvme_tcp_wq); 27303f2304f8SSagi Grimberg } 27313f2304f8SSagi Grimberg 27323f2304f8SSagi Grimberg module_init(nvme_tcp_init_module); 27333f2304f8SSagi Grimberg module_exit(nvme_tcp_cleanup_module); 27343f2304f8SSagi Grimberg 27353f2304f8SSagi Grimberg MODULE_LICENSE("GPL v2"); 2736