18f000cacSChristoph Hellwig /* 28f000cacSChristoph Hellwig * NVMe over Fabrics RDMA target. 38f000cacSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 48f000cacSChristoph Hellwig * 58f000cacSChristoph Hellwig * This program is free software; you can redistribute it and/or modify it 68f000cacSChristoph Hellwig * under the terms and conditions of the GNU General Public License, 78f000cacSChristoph Hellwig * version 2, as published by the Free Software Foundation. 88f000cacSChristoph Hellwig * 98f000cacSChristoph Hellwig * This program is distributed in the hope it will be useful, but WITHOUT 108f000cacSChristoph Hellwig * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 118f000cacSChristoph Hellwig * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 128f000cacSChristoph Hellwig * more details. 138f000cacSChristoph Hellwig */ 148f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 158f000cacSChristoph Hellwig #include <linux/atomic.h> 168f000cacSChristoph Hellwig #include <linux/ctype.h> 178f000cacSChristoph Hellwig #include <linux/delay.h> 188f000cacSChristoph Hellwig #include <linux/err.h> 198f000cacSChristoph Hellwig #include <linux/init.h> 208f000cacSChristoph Hellwig #include <linux/module.h> 218f000cacSChristoph Hellwig #include <linux/nvme.h> 228f000cacSChristoph Hellwig #include <linux/slab.h> 238f000cacSChristoph Hellwig #include <linux/string.h> 248f000cacSChristoph Hellwig #include <linux/wait.h> 258f000cacSChristoph Hellwig #include <linux/inet.h> 268f000cacSChristoph Hellwig #include <asm/unaligned.h> 278f000cacSChristoph Hellwig 288f000cacSChristoph Hellwig #include <rdma/ib_verbs.h> 298f000cacSChristoph Hellwig #include <rdma/rdma_cm.h> 308f000cacSChristoph Hellwig #include <rdma/rw.h> 318f000cacSChristoph Hellwig 328f000cacSChristoph Hellwig #include <linux/nvme-rdma.h> 338f000cacSChristoph Hellwig #include "nvmet.h" 348f000cacSChristoph Hellwig 358f000cacSChristoph Hellwig /* 368f000cacSChristoph Hellwig * We allow up to a page of inline data to go with the SQE 378f000cacSChristoph Hellwig */ 388f000cacSChristoph Hellwig #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE 398f000cacSChristoph Hellwig 408f000cacSChristoph Hellwig struct nvmet_rdma_cmd { 418f000cacSChristoph Hellwig struct ib_sge sge[2]; 428f000cacSChristoph Hellwig struct ib_cqe cqe; 438f000cacSChristoph Hellwig struct ib_recv_wr wr; 448f000cacSChristoph Hellwig struct scatterlist inline_sg; 458f000cacSChristoph Hellwig struct page *inline_page; 468f000cacSChristoph Hellwig struct nvme_command *nvme_cmd; 478f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 488f000cacSChristoph Hellwig }; 498f000cacSChristoph Hellwig 508f000cacSChristoph Hellwig enum { 518f000cacSChristoph Hellwig NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 528f000cacSChristoph Hellwig NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), 538f000cacSChristoph Hellwig }; 548f000cacSChristoph Hellwig 558f000cacSChristoph Hellwig struct nvmet_rdma_rsp { 568f000cacSChristoph Hellwig struct ib_sge send_sge; 578f000cacSChristoph Hellwig struct ib_cqe send_cqe; 588f000cacSChristoph Hellwig struct ib_send_wr send_wr; 598f000cacSChristoph Hellwig 608f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd; 618f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 628f000cacSChristoph Hellwig 638f000cacSChristoph Hellwig struct ib_cqe read_cqe; 648f000cacSChristoph Hellwig struct rdma_rw_ctx rw; 658f000cacSChristoph Hellwig 668f000cacSChristoph Hellwig struct nvmet_req req; 678f000cacSChristoph Hellwig 688f000cacSChristoph Hellwig u8 n_rdma; 698f000cacSChristoph Hellwig u32 flags; 708f000cacSChristoph Hellwig u32 invalidate_rkey; 718f000cacSChristoph Hellwig 728f000cacSChristoph Hellwig struct list_head wait_list; 738f000cacSChristoph Hellwig struct list_head free_list; 748f000cacSChristoph Hellwig }; 758f000cacSChristoph Hellwig 768f000cacSChristoph Hellwig enum nvmet_rdma_queue_state { 778f000cacSChristoph Hellwig NVMET_RDMA_Q_CONNECTING, 788f000cacSChristoph Hellwig NVMET_RDMA_Q_LIVE, 798f000cacSChristoph Hellwig NVMET_RDMA_Q_DISCONNECTING, 80d8f7750aSSagi Grimberg NVMET_RDMA_IN_DEVICE_REMOVAL, 818f000cacSChristoph Hellwig }; 828f000cacSChristoph Hellwig 838f000cacSChristoph Hellwig struct nvmet_rdma_queue { 848f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 858f000cacSChristoph Hellwig struct nvmet_port *port; 868f000cacSChristoph Hellwig struct ib_cq *cq; 878f000cacSChristoph Hellwig atomic_t sq_wr_avail; 888f000cacSChristoph Hellwig struct nvmet_rdma_device *dev; 898f000cacSChristoph Hellwig spinlock_t state_lock; 908f000cacSChristoph Hellwig enum nvmet_rdma_queue_state state; 918f000cacSChristoph Hellwig struct nvmet_cq nvme_cq; 928f000cacSChristoph Hellwig struct nvmet_sq nvme_sq; 938f000cacSChristoph Hellwig 948f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsps; 958f000cacSChristoph Hellwig struct list_head free_rsps; 968f000cacSChristoph Hellwig spinlock_t rsps_lock; 978f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 988f000cacSChristoph Hellwig 998f000cacSChristoph Hellwig struct work_struct release_work; 1008f000cacSChristoph Hellwig struct list_head rsp_wait_list; 1018f000cacSChristoph Hellwig struct list_head rsp_wr_wait_list; 1028f000cacSChristoph Hellwig spinlock_t rsp_wr_wait_lock; 1038f000cacSChristoph Hellwig 1048f000cacSChristoph Hellwig int idx; 1058f000cacSChristoph Hellwig int host_qid; 1068f000cacSChristoph Hellwig int recv_queue_size; 1078f000cacSChristoph Hellwig int send_queue_size; 1088f000cacSChristoph Hellwig 1098f000cacSChristoph Hellwig struct list_head queue_list; 1108f000cacSChristoph Hellwig }; 1118f000cacSChristoph Hellwig 1128f000cacSChristoph Hellwig struct nvmet_rdma_device { 1138f000cacSChristoph Hellwig struct ib_device *device; 1148f000cacSChristoph Hellwig struct ib_pd *pd; 1158f000cacSChristoph Hellwig struct ib_srq *srq; 1168f000cacSChristoph Hellwig struct nvmet_rdma_cmd *srq_cmds; 1178f000cacSChristoph Hellwig size_t srq_size; 1188f000cacSChristoph Hellwig struct kref ref; 1198f000cacSChristoph Hellwig struct list_head entry; 1208f000cacSChristoph Hellwig }; 1218f000cacSChristoph Hellwig 1228f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq; 1238f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 1248f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 1258f000cacSChristoph Hellwig 1268f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida); 1278f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list); 1288f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 1298f000cacSChristoph Hellwig 1308f000cacSChristoph Hellwig static LIST_HEAD(device_list); 1318f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex); 1328f000cacSChristoph Hellwig 1338f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 1348f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 1358f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 1368f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 1378f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 1388f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 1398f000cacSChristoph Hellwig 1408f000cacSChristoph Hellwig static struct nvmet_fabrics_ops nvmet_rdma_ops; 1418f000cacSChristoph Hellwig 1428f000cacSChristoph Hellwig /* XXX: really should move to a generic header sooner or later.. */ 1438f000cacSChristoph Hellwig static inline u32 get_unaligned_le24(const u8 *p) 1448f000cacSChristoph Hellwig { 1458f000cacSChristoph Hellwig return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; 1468f000cacSChristoph Hellwig } 1478f000cacSChristoph Hellwig 1488f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 1498f000cacSChristoph Hellwig { 1508f000cacSChristoph Hellwig return nvme_is_write(rsp->req.cmd) && 1518f000cacSChristoph Hellwig rsp->req.data_len && 1528f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1538f000cacSChristoph Hellwig } 1548f000cacSChristoph Hellwig 1558f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 1568f000cacSChristoph Hellwig { 1578f000cacSChristoph Hellwig return !nvme_is_write(rsp->req.cmd) && 1588f000cacSChristoph Hellwig rsp->req.data_len && 1598f000cacSChristoph Hellwig !rsp->req.rsp->status && 1608f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1618f000cacSChristoph Hellwig } 1628f000cacSChristoph Hellwig 1638f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp * 1648f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 1658f000cacSChristoph Hellwig { 1668f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 1678f000cacSChristoph Hellwig unsigned long flags; 1688f000cacSChristoph Hellwig 1698f000cacSChristoph Hellwig spin_lock_irqsave(&queue->rsps_lock, flags); 1708f000cacSChristoph Hellwig rsp = list_first_entry(&queue->free_rsps, 1718f000cacSChristoph Hellwig struct nvmet_rdma_rsp, free_list); 1728f000cacSChristoph Hellwig list_del(&rsp->free_list); 1738f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->rsps_lock, flags); 1748f000cacSChristoph Hellwig 1758f000cacSChristoph Hellwig return rsp; 1768f000cacSChristoph Hellwig } 1778f000cacSChristoph Hellwig 1788f000cacSChristoph Hellwig static inline void 1798f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 1808f000cacSChristoph Hellwig { 1818f000cacSChristoph Hellwig unsigned long flags; 1828f000cacSChristoph Hellwig 1838f000cacSChristoph Hellwig spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 1848f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 1858f000cacSChristoph Hellwig spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 1868f000cacSChristoph Hellwig } 1878f000cacSChristoph Hellwig 1888f000cacSChristoph Hellwig static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents) 1898f000cacSChristoph Hellwig { 1908f000cacSChristoph Hellwig struct scatterlist *sg; 1918f000cacSChristoph Hellwig int count; 1928f000cacSChristoph Hellwig 1938f000cacSChristoph Hellwig if (!sgl || !nents) 1948f000cacSChristoph Hellwig return; 1958f000cacSChristoph Hellwig 1968f000cacSChristoph Hellwig for_each_sg(sgl, sg, nents, count) 1978f000cacSChristoph Hellwig __free_page(sg_page(sg)); 1988f000cacSChristoph Hellwig kfree(sgl); 1998f000cacSChristoph Hellwig } 2008f000cacSChristoph Hellwig 2018f000cacSChristoph Hellwig static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, 2028f000cacSChristoph Hellwig u32 length) 2038f000cacSChristoph Hellwig { 2048f000cacSChristoph Hellwig struct scatterlist *sg; 2058f000cacSChristoph Hellwig struct page *page; 2068f000cacSChristoph Hellwig unsigned int nent; 2078f000cacSChristoph Hellwig int i = 0; 2088f000cacSChristoph Hellwig 2098f000cacSChristoph Hellwig nent = DIV_ROUND_UP(length, PAGE_SIZE); 2108f000cacSChristoph Hellwig sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); 2118f000cacSChristoph Hellwig if (!sg) 2128f000cacSChristoph Hellwig goto out; 2138f000cacSChristoph Hellwig 2148f000cacSChristoph Hellwig sg_init_table(sg, nent); 2158f000cacSChristoph Hellwig 2168f000cacSChristoph Hellwig while (length) { 2178f000cacSChristoph Hellwig u32 page_len = min_t(u32, length, PAGE_SIZE); 2188f000cacSChristoph Hellwig 2198f000cacSChristoph Hellwig page = alloc_page(GFP_KERNEL); 2208f000cacSChristoph Hellwig if (!page) 2218f000cacSChristoph Hellwig goto out_free_pages; 2228f000cacSChristoph Hellwig 2238f000cacSChristoph Hellwig sg_set_page(&sg[i], page, page_len, 0); 2248f000cacSChristoph Hellwig length -= page_len; 2258f000cacSChristoph Hellwig i++; 2268f000cacSChristoph Hellwig } 2278f000cacSChristoph Hellwig *sgl = sg; 2288f000cacSChristoph Hellwig *nents = nent; 2298f000cacSChristoph Hellwig return 0; 2308f000cacSChristoph Hellwig 2318f000cacSChristoph Hellwig out_free_pages: 2328f000cacSChristoph Hellwig while (i > 0) { 2338f000cacSChristoph Hellwig i--; 2348f000cacSChristoph Hellwig __free_page(sg_page(&sg[i])); 2358f000cacSChristoph Hellwig } 2368f000cacSChristoph Hellwig kfree(sg); 2378f000cacSChristoph Hellwig out: 2388f000cacSChristoph Hellwig return NVME_SC_INTERNAL; 2398f000cacSChristoph Hellwig } 2408f000cacSChristoph Hellwig 2418f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 2428f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 2438f000cacSChristoph Hellwig { 2448f000cacSChristoph Hellwig /* NVMe command / RDMA RECV */ 2458f000cacSChristoph Hellwig c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); 2468f000cacSChristoph Hellwig if (!c->nvme_cmd) 2478f000cacSChristoph Hellwig goto out; 2488f000cacSChristoph Hellwig 2498f000cacSChristoph Hellwig c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 2508f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2518f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 2528f000cacSChristoph Hellwig goto out_free_cmd; 2538f000cacSChristoph Hellwig 2548f000cacSChristoph Hellwig c->sge[0].length = sizeof(*c->nvme_cmd); 2558f000cacSChristoph Hellwig c->sge[0].lkey = ndev->pd->local_dma_lkey; 2568f000cacSChristoph Hellwig 2578f000cacSChristoph Hellwig if (!admin) { 2588f000cacSChristoph Hellwig c->inline_page = alloc_pages(GFP_KERNEL, 2598f000cacSChristoph Hellwig get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 2608f000cacSChristoph Hellwig if (!c->inline_page) 2618f000cacSChristoph Hellwig goto out_unmap_cmd; 2628f000cacSChristoph Hellwig c->sge[1].addr = ib_dma_map_page(ndev->device, 2638f000cacSChristoph Hellwig c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, 2648f000cacSChristoph Hellwig DMA_FROM_DEVICE); 2658f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) 2668f000cacSChristoph Hellwig goto out_free_inline_page; 2678f000cacSChristoph Hellwig c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; 2688f000cacSChristoph Hellwig c->sge[1].lkey = ndev->pd->local_dma_lkey; 2698f000cacSChristoph Hellwig } 2708f000cacSChristoph Hellwig 2718f000cacSChristoph Hellwig c->cqe.done = nvmet_rdma_recv_done; 2728f000cacSChristoph Hellwig 2738f000cacSChristoph Hellwig c->wr.wr_cqe = &c->cqe; 2748f000cacSChristoph Hellwig c->wr.sg_list = c->sge; 2758f000cacSChristoph Hellwig c->wr.num_sge = admin ? 1 : 2; 2768f000cacSChristoph Hellwig 2778f000cacSChristoph Hellwig return 0; 2788f000cacSChristoph Hellwig 2798f000cacSChristoph Hellwig out_free_inline_page: 2808f000cacSChristoph Hellwig if (!admin) { 2818f000cacSChristoph Hellwig __free_pages(c->inline_page, 2828f000cacSChristoph Hellwig get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 2838f000cacSChristoph Hellwig } 2848f000cacSChristoph Hellwig out_unmap_cmd: 2858f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 2868f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2878f000cacSChristoph Hellwig out_free_cmd: 2888f000cacSChristoph Hellwig kfree(c->nvme_cmd); 2898f000cacSChristoph Hellwig 2908f000cacSChristoph Hellwig out: 2918f000cacSChristoph Hellwig return -ENOMEM; 2928f000cacSChristoph Hellwig } 2938f000cacSChristoph Hellwig 2948f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 2958f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 2968f000cacSChristoph Hellwig { 2978f000cacSChristoph Hellwig if (!admin) { 2988f000cacSChristoph Hellwig ib_dma_unmap_page(ndev->device, c->sge[1].addr, 2998f000cacSChristoph Hellwig NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); 3008f000cacSChristoph Hellwig __free_pages(c->inline_page, 3018f000cacSChristoph Hellwig get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 3028f000cacSChristoph Hellwig } 3038f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3048f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3058f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3068f000cacSChristoph Hellwig } 3078f000cacSChristoph Hellwig 3088f000cacSChristoph Hellwig static struct nvmet_rdma_cmd * 3098f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 3108f000cacSChristoph Hellwig int nr_cmds, bool admin) 3118f000cacSChristoph Hellwig { 3128f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 3138f000cacSChristoph Hellwig int ret = -EINVAL, i; 3148f000cacSChristoph Hellwig 3158f000cacSChristoph Hellwig cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 3168f000cacSChristoph Hellwig if (!cmds) 3178f000cacSChristoph Hellwig goto out; 3188f000cacSChristoph Hellwig 3198f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) { 3208f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 3218f000cacSChristoph Hellwig if (ret) 3228f000cacSChristoph Hellwig goto out_free; 3238f000cacSChristoph Hellwig } 3248f000cacSChristoph Hellwig 3258f000cacSChristoph Hellwig return cmds; 3268f000cacSChristoph Hellwig 3278f000cacSChristoph Hellwig out_free: 3288f000cacSChristoph Hellwig while (--i >= 0) 3298f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3308f000cacSChristoph Hellwig kfree(cmds); 3318f000cacSChristoph Hellwig out: 3328f000cacSChristoph Hellwig return ERR_PTR(ret); 3338f000cacSChristoph Hellwig } 3348f000cacSChristoph Hellwig 3358f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 3368f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 3378f000cacSChristoph Hellwig { 3388f000cacSChristoph Hellwig int i; 3398f000cacSChristoph Hellwig 3408f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) 3418f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3428f000cacSChristoph Hellwig kfree(cmds); 3438f000cacSChristoph Hellwig } 3448f000cacSChristoph Hellwig 3458f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 3468f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3478f000cacSChristoph Hellwig { 3488f000cacSChristoph Hellwig /* NVMe CQE / RDMA SEND */ 3498f000cacSChristoph Hellwig r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); 3508f000cacSChristoph Hellwig if (!r->req.rsp) 3518f000cacSChristoph Hellwig goto out; 3528f000cacSChristoph Hellwig 3538f000cacSChristoph Hellwig r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, 3548f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3558f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 3568f000cacSChristoph Hellwig goto out_free_rsp; 3578f000cacSChristoph Hellwig 3588f000cacSChristoph Hellwig r->send_sge.length = sizeof(*r->req.rsp); 3598f000cacSChristoph Hellwig r->send_sge.lkey = ndev->pd->local_dma_lkey; 3608f000cacSChristoph Hellwig 3618f000cacSChristoph Hellwig r->send_cqe.done = nvmet_rdma_send_done; 3628f000cacSChristoph Hellwig 3638f000cacSChristoph Hellwig r->send_wr.wr_cqe = &r->send_cqe; 3648f000cacSChristoph Hellwig r->send_wr.sg_list = &r->send_sge; 3658f000cacSChristoph Hellwig r->send_wr.num_sge = 1; 3668f000cacSChristoph Hellwig r->send_wr.send_flags = IB_SEND_SIGNALED; 3678f000cacSChristoph Hellwig 3688f000cacSChristoph Hellwig /* Data In / RDMA READ */ 3698f000cacSChristoph Hellwig r->read_cqe.done = nvmet_rdma_read_data_done; 3708f000cacSChristoph Hellwig return 0; 3718f000cacSChristoph Hellwig 3728f000cacSChristoph Hellwig out_free_rsp: 3738f000cacSChristoph Hellwig kfree(r->req.rsp); 3748f000cacSChristoph Hellwig out: 3758f000cacSChristoph Hellwig return -ENOMEM; 3768f000cacSChristoph Hellwig } 3778f000cacSChristoph Hellwig 3788f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 3798f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3808f000cacSChristoph Hellwig { 3818f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, r->send_sge.addr, 3828f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3838f000cacSChristoph Hellwig kfree(r->req.rsp); 3848f000cacSChristoph Hellwig } 3858f000cacSChristoph Hellwig 3868f000cacSChristoph Hellwig static int 3878f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 3888f000cacSChristoph Hellwig { 3898f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 3908f000cacSChristoph Hellwig int nr_rsps = queue->recv_queue_size * 2; 3918f000cacSChristoph Hellwig int ret = -EINVAL, i; 3928f000cacSChristoph Hellwig 3938f000cacSChristoph Hellwig queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 3948f000cacSChristoph Hellwig GFP_KERNEL); 3958f000cacSChristoph Hellwig if (!queue->rsps) 3968f000cacSChristoph Hellwig goto out; 3978f000cacSChristoph Hellwig 3988f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 3998f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4008f000cacSChristoph Hellwig 4018f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsp(ndev, rsp); 4028f000cacSChristoph Hellwig if (ret) 4038f000cacSChristoph Hellwig goto out_free; 4048f000cacSChristoph Hellwig 4058f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &queue->free_rsps); 4068f000cacSChristoph Hellwig } 4078f000cacSChristoph Hellwig 4088f000cacSChristoph Hellwig return 0; 4098f000cacSChristoph Hellwig 4108f000cacSChristoph Hellwig out_free: 4118f000cacSChristoph Hellwig while (--i >= 0) { 4128f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4138f000cacSChristoph Hellwig 4148f000cacSChristoph Hellwig list_del(&rsp->free_list); 4158f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4168f000cacSChristoph Hellwig } 4178f000cacSChristoph Hellwig kfree(queue->rsps); 4188f000cacSChristoph Hellwig out: 4198f000cacSChristoph Hellwig return ret; 4208f000cacSChristoph Hellwig } 4218f000cacSChristoph Hellwig 4228f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 4238f000cacSChristoph Hellwig { 4248f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4258f000cacSChristoph Hellwig int i, nr_rsps = queue->recv_queue_size * 2; 4268f000cacSChristoph Hellwig 4278f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4288f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4298f000cacSChristoph Hellwig 4308f000cacSChristoph Hellwig list_del(&rsp->free_list); 4318f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4328f000cacSChristoph Hellwig } 4338f000cacSChristoph Hellwig kfree(queue->rsps); 4348f000cacSChristoph Hellwig } 4358f000cacSChristoph Hellwig 4368f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 4378f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd) 4388f000cacSChristoph Hellwig { 4398f000cacSChristoph Hellwig struct ib_recv_wr *bad_wr; 4408f000cacSChristoph Hellwig 441748ff840SParav Pandit ib_dma_sync_single_for_device(ndev->device, 442748ff840SParav Pandit cmd->sge[0].addr, cmd->sge[0].length, 443748ff840SParav Pandit DMA_FROM_DEVICE); 444748ff840SParav Pandit 4458f000cacSChristoph Hellwig if (ndev->srq) 4468f000cacSChristoph Hellwig return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 4478f000cacSChristoph Hellwig return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); 4488f000cacSChristoph Hellwig } 4498f000cacSChristoph Hellwig 4508f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 4518f000cacSChristoph Hellwig { 4528f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4538f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wr_wait_list)) { 4548f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 4558f000cacSChristoph Hellwig bool ret; 4568f000cacSChristoph Hellwig 4578f000cacSChristoph Hellwig rsp = list_entry(queue->rsp_wr_wait_list.next, 4588f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 4598f000cacSChristoph Hellwig list_del(&rsp->wait_list); 4608f000cacSChristoph Hellwig 4618f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4628f000cacSChristoph Hellwig ret = nvmet_rdma_execute_command(rsp); 4638f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4648f000cacSChristoph Hellwig 4658f000cacSChristoph Hellwig if (!ret) { 4668f000cacSChristoph Hellwig list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 4678f000cacSChristoph Hellwig break; 4688f000cacSChristoph Hellwig } 4698f000cacSChristoph Hellwig } 4708f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4718f000cacSChristoph Hellwig } 4728f000cacSChristoph Hellwig 4738f000cacSChristoph Hellwig 4748f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 4758f000cacSChristoph Hellwig { 4768f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 4778f000cacSChristoph Hellwig 4788f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 4798f000cacSChristoph Hellwig 4808f000cacSChristoph Hellwig if (rsp->n_rdma) { 4818f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 4828f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 4838f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 4848f000cacSChristoph Hellwig } 4858f000cacSChristoph Hellwig 4868f000cacSChristoph Hellwig if (rsp->req.sg != &rsp->cmd->inline_sg) 4878f000cacSChristoph Hellwig nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt); 4888f000cacSChristoph Hellwig 4898f000cacSChristoph Hellwig if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 4908f000cacSChristoph Hellwig nvmet_rdma_process_wr_wait_list(queue); 4918f000cacSChristoph Hellwig 4928f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 4938f000cacSChristoph Hellwig } 4948f000cacSChristoph Hellwig 4958f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 4968f000cacSChristoph Hellwig { 4978f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl) { 4988f000cacSChristoph Hellwig nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 4998f000cacSChristoph Hellwig } else { 5008f000cacSChristoph Hellwig /* 5018f000cacSChristoph Hellwig * we didn't setup the controller yet in case 5028f000cacSChristoph Hellwig * of admin connect error, just disconnect and 5038f000cacSChristoph Hellwig * cleanup the queue 5048f000cacSChristoph Hellwig */ 5058f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 5068f000cacSChristoph Hellwig } 5078f000cacSChristoph Hellwig } 5088f000cacSChristoph Hellwig 5098f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 5108f000cacSChristoph Hellwig { 5118f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5128f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 5138f000cacSChristoph Hellwig 5148f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5158f000cacSChristoph Hellwig 5168f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS && 5178f000cacSChristoph Hellwig wc->status != IB_WC_WR_FLUSH_ERR)) { 5188f000cacSChristoph Hellwig pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 5198f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 5208f000cacSChristoph Hellwig nvmet_rdma_error_comp(rsp->queue); 5218f000cacSChristoph Hellwig } 5228f000cacSChristoph Hellwig } 5238f000cacSChristoph Hellwig 5248f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req) 5258f000cacSChristoph Hellwig { 5268f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5278f000cacSChristoph Hellwig container_of(req, struct nvmet_rdma_rsp, req); 5288f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 5298f000cacSChristoph Hellwig struct ib_send_wr *first_wr, *bad_wr; 5308f000cacSChristoph Hellwig 5318f000cacSChristoph Hellwig if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { 5328f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 5338f000cacSChristoph Hellwig rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 5348f000cacSChristoph Hellwig } else { 5358f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND; 5368f000cacSChristoph Hellwig } 5378f000cacSChristoph Hellwig 5388f000cacSChristoph Hellwig if (nvmet_rdma_need_data_out(rsp)) 5398f000cacSChristoph Hellwig first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 5408f000cacSChristoph Hellwig cm_id->port_num, NULL, &rsp->send_wr); 5418f000cacSChristoph Hellwig else 5428f000cacSChristoph Hellwig first_wr = &rsp->send_wr; 5438f000cacSChristoph Hellwig 5448f000cacSChristoph Hellwig nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 545748ff840SParav Pandit 546748ff840SParav Pandit ib_dma_sync_single_for_device(rsp->queue->dev->device, 547748ff840SParav Pandit rsp->send_sge.addr, rsp->send_sge.length, 548748ff840SParav Pandit DMA_TO_DEVICE); 549748ff840SParav Pandit 5508f000cacSChristoph Hellwig if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 5518f000cacSChristoph Hellwig pr_err("sending cmd response failed\n"); 5528f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5538f000cacSChristoph Hellwig } 5548f000cacSChristoph Hellwig } 5558f000cacSChristoph Hellwig 5568f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 5578f000cacSChristoph Hellwig { 5588f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5598f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 5608f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 5618f000cacSChristoph Hellwig 5628f000cacSChristoph Hellwig WARN_ON(rsp->n_rdma <= 0); 5638f000cacSChristoph Hellwig atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 5648f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5658f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5668f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 5678f000cacSChristoph Hellwig rsp->n_rdma = 0; 5688f000cacSChristoph Hellwig 5698f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 570549f01aeSVijay Immanuel nvmet_req_uninit(&rsp->req); 5718f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5728f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 5738f000cacSChristoph Hellwig pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 5748f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 5758f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 5768f000cacSChristoph Hellwig } 5778f000cacSChristoph Hellwig return; 5788f000cacSChristoph Hellwig } 5798f000cacSChristoph Hellwig 5808f000cacSChristoph Hellwig rsp->req.execute(&rsp->req); 5818f000cacSChristoph Hellwig } 5828f000cacSChristoph Hellwig 5838f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 5848f000cacSChristoph Hellwig u64 off) 5858f000cacSChristoph Hellwig { 5868f000cacSChristoph Hellwig sg_init_table(&rsp->cmd->inline_sg, 1); 5878f000cacSChristoph Hellwig sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); 5888f000cacSChristoph Hellwig rsp->req.sg = &rsp->cmd->inline_sg; 5898f000cacSChristoph Hellwig rsp->req.sg_cnt = 1; 5908f000cacSChristoph Hellwig } 5918f000cacSChristoph Hellwig 5928f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 5938f000cacSChristoph Hellwig { 5948f000cacSChristoph Hellwig struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 5958f000cacSChristoph Hellwig u64 off = le64_to_cpu(sgl->addr); 5968f000cacSChristoph Hellwig u32 len = le32_to_cpu(sgl->length); 5978f000cacSChristoph Hellwig 5988f000cacSChristoph Hellwig if (!nvme_is_write(rsp->req.cmd)) 5998f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6008f000cacSChristoph Hellwig 6018f000cacSChristoph Hellwig if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { 6028f000cacSChristoph Hellwig pr_err("invalid inline data offset!\n"); 6038f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 6048f000cacSChristoph Hellwig } 6058f000cacSChristoph Hellwig 6068f000cacSChristoph Hellwig /* no data command? */ 6078f000cacSChristoph Hellwig if (!len) 6088f000cacSChristoph Hellwig return 0; 6098f000cacSChristoph Hellwig 6108f000cacSChristoph Hellwig nvmet_rdma_use_inline_sg(rsp, len, off); 6118f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 6128f000cacSChristoph Hellwig return 0; 6138f000cacSChristoph Hellwig } 6148f000cacSChristoph Hellwig 6158f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 6168f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl, bool invalidate) 6178f000cacSChristoph Hellwig { 6188f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 6198f000cacSChristoph Hellwig u64 addr = le64_to_cpu(sgl->addr); 6208f000cacSChristoph Hellwig u32 len = get_unaligned_le24(sgl->length); 6218f000cacSChristoph Hellwig u32 key = get_unaligned_le32(sgl->key); 6228f000cacSChristoph Hellwig int ret; 6238f000cacSChristoph Hellwig u16 status; 6248f000cacSChristoph Hellwig 6258f000cacSChristoph Hellwig /* no data command? */ 6268f000cacSChristoph Hellwig if (!len) 6278f000cacSChristoph Hellwig return 0; 6288f000cacSChristoph Hellwig 6298f000cacSChristoph Hellwig status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, 6308f000cacSChristoph Hellwig len); 6318f000cacSChristoph Hellwig if (status) 6328f000cacSChristoph Hellwig return status; 6338f000cacSChristoph Hellwig 6348f000cacSChristoph Hellwig ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 6358f000cacSChristoph Hellwig rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 6368f000cacSChristoph Hellwig nvmet_data_dir(&rsp->req)); 6378f000cacSChristoph Hellwig if (ret < 0) 6388f000cacSChristoph Hellwig return NVME_SC_INTERNAL; 6398f000cacSChristoph Hellwig rsp->n_rdma += ret; 6408f000cacSChristoph Hellwig 6418f000cacSChristoph Hellwig if (invalidate) { 6428f000cacSChristoph Hellwig rsp->invalidate_rkey = key; 6438f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; 6448f000cacSChristoph Hellwig } 6458f000cacSChristoph Hellwig 6468f000cacSChristoph Hellwig return 0; 6478f000cacSChristoph Hellwig } 6488f000cacSChristoph Hellwig 6498f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 6508f000cacSChristoph Hellwig { 6518f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 6528f000cacSChristoph Hellwig 6538f000cacSChristoph Hellwig switch (sgl->type >> 4) { 6548f000cacSChristoph Hellwig case NVME_SGL_FMT_DATA_DESC: 6558f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 6568f000cacSChristoph Hellwig case NVME_SGL_FMT_OFFSET: 6578f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_inline(rsp); 6588f000cacSChristoph Hellwig default: 6598f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 6608f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6618f000cacSChristoph Hellwig } 6628f000cacSChristoph Hellwig case NVME_KEY_SGL_FMT_DATA_DESC: 6638f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 6648f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 6658f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 6668f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS: 6678f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 6688f000cacSChristoph Hellwig default: 6698f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 6708f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6718f000cacSChristoph Hellwig } 6728f000cacSChristoph Hellwig default: 6738f000cacSChristoph Hellwig pr_err("invalid SGL type: %#x\n", sgl->type); 6748f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; 6758f000cacSChristoph Hellwig } 6768f000cacSChristoph Hellwig } 6778f000cacSChristoph Hellwig 6788f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 6798f000cacSChristoph Hellwig { 6808f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 6818f000cacSChristoph Hellwig 6828f000cacSChristoph Hellwig if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 6838f000cacSChristoph Hellwig &queue->sq_wr_avail) < 0)) { 6848f000cacSChristoph Hellwig pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 6858f000cacSChristoph Hellwig 1 + rsp->n_rdma, queue->idx, 6868f000cacSChristoph Hellwig queue->nvme_sq.ctrl->cntlid); 6878f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 6888f000cacSChristoph Hellwig return false; 6898f000cacSChristoph Hellwig } 6908f000cacSChristoph Hellwig 6918f000cacSChristoph Hellwig if (nvmet_rdma_need_data_in(rsp)) { 6928f000cacSChristoph Hellwig if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, 6938f000cacSChristoph Hellwig queue->cm_id->port_num, &rsp->read_cqe, NULL)) 6948f000cacSChristoph Hellwig nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 6958f000cacSChristoph Hellwig } else { 6968f000cacSChristoph Hellwig rsp->req.execute(&rsp->req); 6978f000cacSChristoph Hellwig } 6988f000cacSChristoph Hellwig 6998f000cacSChristoph Hellwig return true; 7008f000cacSChristoph Hellwig } 7018f000cacSChristoph Hellwig 7028f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 7038f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd) 7048f000cacSChristoph Hellwig { 7058f000cacSChristoph Hellwig u16 status; 7068f000cacSChristoph Hellwig 707748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 708748ff840SParav Pandit cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 709748ff840SParav Pandit DMA_FROM_DEVICE); 710748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 711748ff840SParav Pandit cmd->send_sge.addr, cmd->send_sge.length, 712748ff840SParav Pandit DMA_TO_DEVICE); 713748ff840SParav Pandit 7148f000cacSChristoph Hellwig if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 7158f000cacSChristoph Hellwig &queue->nvme_sq, &nvmet_rdma_ops)) 7168f000cacSChristoph Hellwig return; 7178f000cacSChristoph Hellwig 7188f000cacSChristoph Hellwig status = nvmet_rdma_map_sgl(cmd); 7198f000cacSChristoph Hellwig if (status) 7208f000cacSChristoph Hellwig goto out_err; 7218f000cacSChristoph Hellwig 7228f000cacSChristoph Hellwig if (unlikely(!nvmet_rdma_execute_command(cmd))) { 7238f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 7248f000cacSChristoph Hellwig list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 7258f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 7268f000cacSChristoph Hellwig } 7278f000cacSChristoph Hellwig 7288f000cacSChristoph Hellwig return; 7298f000cacSChristoph Hellwig 7308f000cacSChristoph Hellwig out_err: 7318f000cacSChristoph Hellwig nvmet_req_complete(&cmd->req, status); 7328f000cacSChristoph Hellwig } 7338f000cacSChristoph Hellwig 7348f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 7358f000cacSChristoph Hellwig { 7368f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd = 7378f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 7388f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 7398f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 7408f000cacSChristoph Hellwig 7418f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 7428f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 7438f000cacSChristoph Hellwig pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 7448f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), 7458f000cacSChristoph Hellwig wc->status); 7468f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 7478f000cacSChristoph Hellwig } 7488f000cacSChristoph Hellwig return; 7498f000cacSChristoph Hellwig } 7508f000cacSChristoph Hellwig 7518f000cacSChristoph Hellwig if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 7528f000cacSChristoph Hellwig pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 7538f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 7548f000cacSChristoph Hellwig return; 7558f000cacSChristoph Hellwig } 7568f000cacSChristoph Hellwig 7578f000cacSChristoph Hellwig cmd->queue = queue; 7588f000cacSChristoph Hellwig rsp = nvmet_rdma_get_rsp(queue); 7598d61413dSSagi Grimberg rsp->queue = queue; 7608f000cacSChristoph Hellwig rsp->cmd = cmd; 7618f000cacSChristoph Hellwig rsp->flags = 0; 7628f000cacSChristoph Hellwig rsp->req.cmd = cmd->nvme_cmd; 7638d61413dSSagi Grimberg rsp->req.port = queue->port; 7648d61413dSSagi Grimberg rsp->n_rdma = 0; 7658f000cacSChristoph Hellwig 7668f000cacSChristoph Hellwig if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 7678f000cacSChristoph Hellwig unsigned long flags; 7688f000cacSChristoph Hellwig 7698f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 7708f000cacSChristoph Hellwig if (queue->state == NVMET_RDMA_Q_CONNECTING) 7718f000cacSChristoph Hellwig list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 7728f000cacSChristoph Hellwig else 7738f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 7748f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 7758f000cacSChristoph Hellwig return; 7768f000cacSChristoph Hellwig } 7778f000cacSChristoph Hellwig 7788f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, rsp); 7798f000cacSChristoph Hellwig } 7808f000cacSChristoph Hellwig 7818f000cacSChristoph Hellwig static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) 7828f000cacSChristoph Hellwig { 7838f000cacSChristoph Hellwig if (!ndev->srq) 7848f000cacSChristoph Hellwig return; 7858f000cacSChristoph Hellwig 7868f000cacSChristoph Hellwig nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 7878f000cacSChristoph Hellwig ib_destroy_srq(ndev->srq); 7888f000cacSChristoph Hellwig } 7898f000cacSChristoph Hellwig 7908f000cacSChristoph Hellwig static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 7918f000cacSChristoph Hellwig { 7928f000cacSChristoph Hellwig struct ib_srq_init_attr srq_attr = { NULL, }; 7938f000cacSChristoph Hellwig struct ib_srq *srq; 7948f000cacSChristoph Hellwig size_t srq_size; 7958f000cacSChristoph Hellwig int ret, i; 7968f000cacSChristoph Hellwig 7978f000cacSChristoph Hellwig srq_size = 4095; /* XXX: tune */ 7988f000cacSChristoph Hellwig 7998f000cacSChristoph Hellwig srq_attr.attr.max_wr = srq_size; 8008f000cacSChristoph Hellwig srq_attr.attr.max_sge = 2; 8018f000cacSChristoph Hellwig srq_attr.attr.srq_limit = 0; 8028f000cacSChristoph Hellwig srq_attr.srq_type = IB_SRQT_BASIC; 8038f000cacSChristoph Hellwig srq = ib_create_srq(ndev->pd, &srq_attr); 8048f000cacSChristoph Hellwig if (IS_ERR(srq)) { 8058f000cacSChristoph Hellwig /* 8068f000cacSChristoph Hellwig * If SRQs aren't supported we just go ahead and use normal 8078f000cacSChristoph Hellwig * non-shared receive queues. 8088f000cacSChristoph Hellwig */ 8098f000cacSChristoph Hellwig pr_info("SRQ requested but not supported.\n"); 8108f000cacSChristoph Hellwig return 0; 8118f000cacSChristoph Hellwig } 8128f000cacSChristoph Hellwig 8138f000cacSChristoph Hellwig ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 8148f000cacSChristoph Hellwig if (IS_ERR(ndev->srq_cmds)) { 8158f000cacSChristoph Hellwig ret = PTR_ERR(ndev->srq_cmds); 8168f000cacSChristoph Hellwig goto out_destroy_srq; 8178f000cacSChristoph Hellwig } 8188f000cacSChristoph Hellwig 8198f000cacSChristoph Hellwig ndev->srq = srq; 8208f000cacSChristoph Hellwig ndev->srq_size = srq_size; 8218f000cacSChristoph Hellwig 8228f000cacSChristoph Hellwig for (i = 0; i < srq_size; i++) 8238f000cacSChristoph Hellwig nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); 8248f000cacSChristoph Hellwig 8258f000cacSChristoph Hellwig return 0; 8268f000cacSChristoph Hellwig 8278f000cacSChristoph Hellwig out_destroy_srq: 8288f000cacSChristoph Hellwig ib_destroy_srq(srq); 8298f000cacSChristoph Hellwig return ret; 8308f000cacSChristoph Hellwig } 8318f000cacSChristoph Hellwig 8328f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref) 8338f000cacSChristoph Hellwig { 8348f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = 8358f000cacSChristoph Hellwig container_of(ref, struct nvmet_rdma_device, ref); 8368f000cacSChristoph Hellwig 8378f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 8388f000cacSChristoph Hellwig list_del(&ndev->entry); 8398f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 8408f000cacSChristoph Hellwig 8418f000cacSChristoph Hellwig nvmet_rdma_destroy_srq(ndev); 8428f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 8438f000cacSChristoph Hellwig 8448f000cacSChristoph Hellwig kfree(ndev); 8458f000cacSChristoph Hellwig } 8468f000cacSChristoph Hellwig 8478f000cacSChristoph Hellwig static struct nvmet_rdma_device * 8488f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 8498f000cacSChristoph Hellwig { 8508f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 8518f000cacSChristoph Hellwig int ret; 8528f000cacSChristoph Hellwig 8538f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 8548f000cacSChristoph Hellwig list_for_each_entry(ndev, &device_list, entry) { 8558f000cacSChristoph Hellwig if (ndev->device->node_guid == cm_id->device->node_guid && 8568f000cacSChristoph Hellwig kref_get_unless_zero(&ndev->ref)) 8578f000cacSChristoph Hellwig goto out_unlock; 8588f000cacSChristoph Hellwig } 8598f000cacSChristoph Hellwig 8608f000cacSChristoph Hellwig ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 8618f000cacSChristoph Hellwig if (!ndev) 8628f000cacSChristoph Hellwig goto out_err; 8638f000cacSChristoph Hellwig 8648f000cacSChristoph Hellwig ndev->device = cm_id->device; 8658f000cacSChristoph Hellwig kref_init(&ndev->ref); 8668f000cacSChristoph Hellwig 867ed082d36SChristoph Hellwig ndev->pd = ib_alloc_pd(ndev->device, 0); 8688f000cacSChristoph Hellwig if (IS_ERR(ndev->pd)) 8698f000cacSChristoph Hellwig goto out_free_dev; 8708f000cacSChristoph Hellwig 8718f000cacSChristoph Hellwig if (nvmet_rdma_use_srq) { 8728f000cacSChristoph Hellwig ret = nvmet_rdma_init_srq(ndev); 8738f000cacSChristoph Hellwig if (ret) 8748f000cacSChristoph Hellwig goto out_free_pd; 8758f000cacSChristoph Hellwig } 8768f000cacSChristoph Hellwig 8778f000cacSChristoph Hellwig list_add(&ndev->entry, &device_list); 8788f000cacSChristoph Hellwig out_unlock: 8798f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 8808f000cacSChristoph Hellwig pr_debug("added %s.\n", ndev->device->name); 8818f000cacSChristoph Hellwig return ndev; 8828f000cacSChristoph Hellwig 8838f000cacSChristoph Hellwig out_free_pd: 8848f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 8858f000cacSChristoph Hellwig out_free_dev: 8868f000cacSChristoph Hellwig kfree(ndev); 8878f000cacSChristoph Hellwig out_err: 8888f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 8898f000cacSChristoph Hellwig return NULL; 8908f000cacSChristoph Hellwig } 8918f000cacSChristoph Hellwig 8928f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 8938f000cacSChristoph Hellwig { 8948f000cacSChristoph Hellwig struct ib_qp_init_attr qp_attr; 8958f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 8968f000cacSChristoph Hellwig int comp_vector, nr_cqe, ret, i; 8978f000cacSChristoph Hellwig 8988f000cacSChristoph Hellwig /* 8998f000cacSChristoph Hellwig * Spread the io queues across completion vectors, 9008f000cacSChristoph Hellwig * but still keep all admin queues on vector 0. 9018f000cacSChristoph Hellwig */ 9028f000cacSChristoph Hellwig comp_vector = !queue->host_qid ? 0 : 9038f000cacSChristoph Hellwig queue->idx % ndev->device->num_comp_vectors; 9048f000cacSChristoph Hellwig 9058f000cacSChristoph Hellwig /* 9068f000cacSChristoph Hellwig * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 9078f000cacSChristoph Hellwig */ 9088f000cacSChristoph Hellwig nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 9098f000cacSChristoph Hellwig 9108f000cacSChristoph Hellwig queue->cq = ib_alloc_cq(ndev->device, queue, 9118f000cacSChristoph Hellwig nr_cqe + 1, comp_vector, 9128f000cacSChristoph Hellwig IB_POLL_WORKQUEUE); 9138f000cacSChristoph Hellwig if (IS_ERR(queue->cq)) { 9148f000cacSChristoph Hellwig ret = PTR_ERR(queue->cq); 9158f000cacSChristoph Hellwig pr_err("failed to create CQ cqe= %d ret= %d\n", 9168f000cacSChristoph Hellwig nr_cqe + 1, ret); 9178f000cacSChristoph Hellwig goto out; 9188f000cacSChristoph Hellwig } 9198f000cacSChristoph Hellwig 9208f000cacSChristoph Hellwig memset(&qp_attr, 0, sizeof(qp_attr)); 9218f000cacSChristoph Hellwig qp_attr.qp_context = queue; 9228f000cacSChristoph Hellwig qp_attr.event_handler = nvmet_rdma_qp_event; 9238f000cacSChristoph Hellwig qp_attr.send_cq = queue->cq; 9248f000cacSChristoph Hellwig qp_attr.recv_cq = queue->cq; 9258f000cacSChristoph Hellwig qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 9268f000cacSChristoph Hellwig qp_attr.qp_type = IB_QPT_RC; 9278f000cacSChristoph Hellwig /* +1 for drain */ 9288f000cacSChristoph Hellwig qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 9298f000cacSChristoph Hellwig qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; 9308f000cacSChristoph Hellwig qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 9318f000cacSChristoph Hellwig ndev->device->attrs.max_sge); 9328f000cacSChristoph Hellwig 9338f000cacSChristoph Hellwig if (ndev->srq) { 9348f000cacSChristoph Hellwig qp_attr.srq = ndev->srq; 9358f000cacSChristoph Hellwig } else { 9368f000cacSChristoph Hellwig /* +1 for drain */ 9378f000cacSChristoph Hellwig qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 9388f000cacSChristoph Hellwig qp_attr.cap.max_recv_sge = 2; 9398f000cacSChristoph Hellwig } 9408f000cacSChristoph Hellwig 9418f000cacSChristoph Hellwig ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 9428f000cacSChristoph Hellwig if (ret) { 9438f000cacSChristoph Hellwig pr_err("failed to create_qp ret= %d\n", ret); 9448f000cacSChristoph Hellwig goto err_destroy_cq; 9458f000cacSChristoph Hellwig } 9468f000cacSChristoph Hellwig 9478f000cacSChristoph Hellwig atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 9488f000cacSChristoph Hellwig 9498f000cacSChristoph Hellwig pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 9508f000cacSChristoph Hellwig __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 9518f000cacSChristoph Hellwig qp_attr.cap.max_send_wr, queue->cm_id); 9528f000cacSChristoph Hellwig 9538f000cacSChristoph Hellwig if (!ndev->srq) { 9548f000cacSChristoph Hellwig for (i = 0; i < queue->recv_queue_size; i++) { 9558f000cacSChristoph Hellwig queue->cmds[i].queue = queue; 9568f000cacSChristoph Hellwig nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 9578f000cacSChristoph Hellwig } 9588f000cacSChristoph Hellwig } 9598f000cacSChristoph Hellwig 9608f000cacSChristoph Hellwig out: 9618f000cacSChristoph Hellwig return ret; 9628f000cacSChristoph Hellwig 9638f000cacSChristoph Hellwig err_destroy_cq: 9648f000cacSChristoph Hellwig ib_free_cq(queue->cq); 9658f000cacSChristoph Hellwig goto out; 9668f000cacSChristoph Hellwig } 9678f000cacSChristoph Hellwig 9688f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 9698f000cacSChristoph Hellwig { 97014c862dbSSagi Grimberg ib_drain_qp(queue->cm_id->qp); 9718f000cacSChristoph Hellwig rdma_destroy_qp(queue->cm_id); 9728f000cacSChristoph Hellwig ib_free_cq(queue->cq); 9738f000cacSChristoph Hellwig } 9748f000cacSChristoph Hellwig 9758f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 9768f000cacSChristoph Hellwig { 9778f000cacSChristoph Hellwig pr_info("freeing queue %d\n", queue->idx); 9788f000cacSChristoph Hellwig 9798f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 9808f000cacSChristoph Hellwig 9818f000cacSChristoph Hellwig nvmet_rdma_destroy_queue_ib(queue); 9828f000cacSChristoph Hellwig if (!queue->dev->srq) { 9838f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 9848f000cacSChristoph Hellwig queue->recv_queue_size, 9858f000cacSChristoph Hellwig !queue->host_qid); 9868f000cacSChristoph Hellwig } 9878f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 9888f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 9898f000cacSChristoph Hellwig kfree(queue); 9908f000cacSChristoph Hellwig } 9918f000cacSChristoph Hellwig 9928f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w) 9938f000cacSChristoph Hellwig { 9948f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = 9958f000cacSChristoph Hellwig container_of(w, struct nvmet_rdma_queue, release_work); 9968f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = queue->cm_id; 9978f000cacSChristoph Hellwig struct nvmet_rdma_device *dev = queue->dev; 9983256aaefSVincent Stehlé enum nvmet_rdma_queue_state state = queue->state; 9998f000cacSChristoph Hellwig 10008f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 1001d8f7750aSSagi Grimberg 10023256aaefSVincent Stehlé if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) 10038f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 1004d8f7750aSSagi Grimberg 10058f000cacSChristoph Hellwig kref_put(&dev->ref, nvmet_rdma_free_dev); 10068f000cacSChristoph Hellwig } 10078f000cacSChristoph Hellwig 10088f000cacSChristoph Hellwig static int 10098f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 10108f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 10118f000cacSChristoph Hellwig { 10128f000cacSChristoph Hellwig struct nvme_rdma_cm_req *req; 10138f000cacSChristoph Hellwig 10148f000cacSChristoph Hellwig req = (struct nvme_rdma_cm_req *)conn->private_data; 10158f000cacSChristoph Hellwig if (!req || conn->private_data_len == 0) 10168f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_LEN; 10178f000cacSChristoph Hellwig 10188f000cacSChristoph Hellwig if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 10198f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_RECFMT; 10208f000cacSChristoph Hellwig 10218f000cacSChristoph Hellwig queue->host_qid = le16_to_cpu(req->qid); 10228f000cacSChristoph Hellwig 10238f000cacSChristoph Hellwig /* 1024b825b44cSJay Freyensee * req->hsqsize corresponds to our recv queue size plus 1 10258f000cacSChristoph Hellwig * req->hrqsize corresponds to our send queue size 10268f000cacSChristoph Hellwig */ 1027b825b44cSJay Freyensee queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 10288f000cacSChristoph Hellwig queue->send_queue_size = le16_to_cpu(req->hrqsize); 10298f000cacSChristoph Hellwig 1030*7aa1f427SSagi Grimberg if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 10318f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_HSQSIZE; 10328f000cacSChristoph Hellwig 10338f000cacSChristoph Hellwig /* XXX: Should we enforce some kind of max for IO queues? */ 10348f000cacSChristoph Hellwig 10358f000cacSChristoph Hellwig return 0; 10368f000cacSChristoph Hellwig } 10378f000cacSChristoph Hellwig 10388f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 10398f000cacSChristoph Hellwig enum nvme_rdma_cm_status status) 10408f000cacSChristoph Hellwig { 10418f000cacSChristoph Hellwig struct nvme_rdma_cm_rej rej; 10428f000cacSChristoph Hellwig 10437a01a6eaSMax Gurtovoy pr_debug("rejecting connect request: status %d (%s)\n", 10447a01a6eaSMax Gurtovoy status, nvme_rdma_cm_msg(status)); 10457a01a6eaSMax Gurtovoy 10468f000cacSChristoph Hellwig rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 10478f000cacSChristoph Hellwig rej.sts = cpu_to_le16(status); 10488f000cacSChristoph Hellwig 10498f000cacSChristoph Hellwig return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); 10508f000cacSChristoph Hellwig } 10518f000cacSChristoph Hellwig 10528f000cacSChristoph Hellwig static struct nvmet_rdma_queue * 10538f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 10548f000cacSChristoph Hellwig struct rdma_cm_id *cm_id, 10558f000cacSChristoph Hellwig struct rdma_cm_event *event) 10568f000cacSChristoph Hellwig { 10578f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 10588f000cacSChristoph Hellwig int ret; 10598f000cacSChristoph Hellwig 10608f000cacSChristoph Hellwig queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10618f000cacSChristoph Hellwig if (!queue) { 10628f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10638f000cacSChristoph Hellwig goto out_reject; 10648f000cacSChristoph Hellwig } 10658f000cacSChristoph Hellwig 10668f000cacSChristoph Hellwig ret = nvmet_sq_init(&queue->nvme_sq); 106770d4281cSBart Van Assche if (ret) { 106870d4281cSBart Van Assche ret = NVME_RDMA_CM_NO_RSC; 10698f000cacSChristoph Hellwig goto out_free_queue; 107070d4281cSBart Van Assche } 10718f000cacSChristoph Hellwig 10728f000cacSChristoph Hellwig ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 10738f000cacSChristoph Hellwig if (ret) 10748f000cacSChristoph Hellwig goto out_destroy_sq; 10758f000cacSChristoph Hellwig 10768f000cacSChristoph Hellwig /* 10778f000cacSChristoph Hellwig * Schedules the actual release because calling rdma_destroy_id from 10788f000cacSChristoph Hellwig * inside a CM callback would trigger a deadlock. (great API design..) 10798f000cacSChristoph Hellwig */ 10808f000cacSChristoph Hellwig INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 10818f000cacSChristoph Hellwig queue->dev = ndev; 10828f000cacSChristoph Hellwig queue->cm_id = cm_id; 10838f000cacSChristoph Hellwig 10848f000cacSChristoph Hellwig spin_lock_init(&queue->state_lock); 10858f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_CONNECTING; 10868f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wait_list); 10878f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 10888f000cacSChristoph Hellwig spin_lock_init(&queue->rsp_wr_wait_lock); 10898f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->free_rsps); 10908f000cacSChristoph Hellwig spin_lock_init(&queue->rsps_lock); 1091766dbb17SSagi Grimberg INIT_LIST_HEAD(&queue->queue_list); 10928f000cacSChristoph Hellwig 10938f000cacSChristoph Hellwig queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 10948f000cacSChristoph Hellwig if (queue->idx < 0) { 10958f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10966ccaeb56SChristophe JAILLET goto out_destroy_sq; 10978f000cacSChristoph Hellwig } 10988f000cacSChristoph Hellwig 10998f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsps(queue); 11008f000cacSChristoph Hellwig if (ret) { 11018f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11028f000cacSChristoph Hellwig goto out_ida_remove; 11038f000cacSChristoph Hellwig } 11048f000cacSChristoph Hellwig 11058f000cacSChristoph Hellwig if (!ndev->srq) { 11068f000cacSChristoph Hellwig queue->cmds = nvmet_rdma_alloc_cmds(ndev, 11078f000cacSChristoph Hellwig queue->recv_queue_size, 11088f000cacSChristoph Hellwig !queue->host_qid); 11098f000cacSChristoph Hellwig if (IS_ERR(queue->cmds)) { 11108f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11118f000cacSChristoph Hellwig goto out_free_responses; 11128f000cacSChristoph Hellwig } 11138f000cacSChristoph Hellwig } 11148f000cacSChristoph Hellwig 11158f000cacSChristoph Hellwig ret = nvmet_rdma_create_queue_ib(queue); 11168f000cacSChristoph Hellwig if (ret) { 11178f000cacSChristoph Hellwig pr_err("%s: creating RDMA queue failed (%d).\n", 11188f000cacSChristoph Hellwig __func__, ret); 11198f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11208f000cacSChristoph Hellwig goto out_free_cmds; 11218f000cacSChristoph Hellwig } 11228f000cacSChristoph Hellwig 11238f000cacSChristoph Hellwig return queue; 11248f000cacSChristoph Hellwig 11258f000cacSChristoph Hellwig out_free_cmds: 11268f000cacSChristoph Hellwig if (!ndev->srq) { 11278f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 11288f000cacSChristoph Hellwig queue->recv_queue_size, 11298f000cacSChristoph Hellwig !queue->host_qid); 11308f000cacSChristoph Hellwig } 11318f000cacSChristoph Hellwig out_free_responses: 11328f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 11338f000cacSChristoph Hellwig out_ida_remove: 11348f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 11358f000cacSChristoph Hellwig out_destroy_sq: 11368f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 11378f000cacSChristoph Hellwig out_free_queue: 11388f000cacSChristoph Hellwig kfree(queue); 11398f000cacSChristoph Hellwig out_reject: 11408f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, ret); 11418f000cacSChristoph Hellwig return NULL; 11428f000cacSChristoph Hellwig } 11438f000cacSChristoph Hellwig 11448f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 11458f000cacSChristoph Hellwig { 11468f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = priv; 11478f000cacSChristoph Hellwig 11488f000cacSChristoph Hellwig switch (event->event) { 11498f000cacSChristoph Hellwig case IB_EVENT_COMM_EST: 11508f000cacSChristoph Hellwig rdma_notify(queue->cm_id, event->event); 11518f000cacSChristoph Hellwig break; 11528f000cacSChristoph Hellwig default: 1153675796beSMax Gurtovoy pr_err("received IB QP event: %s (%d)\n", 1154675796beSMax Gurtovoy ib_event_msg(event->event), event->event); 11558f000cacSChristoph Hellwig break; 11568f000cacSChristoph Hellwig } 11578f000cacSChristoph Hellwig } 11588f000cacSChristoph Hellwig 11598f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 11608f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue, 11618f000cacSChristoph Hellwig struct rdma_conn_param *p) 11628f000cacSChristoph Hellwig { 11638f000cacSChristoph Hellwig struct rdma_conn_param param = { }; 11648f000cacSChristoph Hellwig struct nvme_rdma_cm_rep priv = { }; 11658f000cacSChristoph Hellwig int ret = -ENOMEM; 11668f000cacSChristoph Hellwig 11678f000cacSChristoph Hellwig param.rnr_retry_count = 7; 11688f000cacSChristoph Hellwig param.flow_control = 1; 11698f000cacSChristoph Hellwig param.initiator_depth = min_t(u8, p->initiator_depth, 11708f000cacSChristoph Hellwig queue->dev->device->attrs.max_qp_init_rd_atom); 11718f000cacSChristoph Hellwig param.private_data = &priv; 11728f000cacSChristoph Hellwig param.private_data_len = sizeof(priv); 11738f000cacSChristoph Hellwig priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 11748f000cacSChristoph Hellwig priv.crqsize = cpu_to_le16(queue->recv_queue_size); 11758f000cacSChristoph Hellwig 11768f000cacSChristoph Hellwig ret = rdma_accept(cm_id, ¶m); 11778f000cacSChristoph Hellwig if (ret) 11788f000cacSChristoph Hellwig pr_err("rdma_accept failed (error code = %d)\n", ret); 11798f000cacSChristoph Hellwig 11808f000cacSChristoph Hellwig return ret; 11818f000cacSChristoph Hellwig } 11828f000cacSChristoph Hellwig 11838f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 11848f000cacSChristoph Hellwig struct rdma_cm_event *event) 11858f000cacSChristoph Hellwig { 11868f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 11878f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 11888f000cacSChristoph Hellwig int ret = -EINVAL; 11898f000cacSChristoph Hellwig 11908f000cacSChristoph Hellwig ndev = nvmet_rdma_find_get_device(cm_id); 11918f000cacSChristoph Hellwig if (!ndev) { 11928f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 11938f000cacSChristoph Hellwig return -ECONNREFUSED; 11948f000cacSChristoph Hellwig } 11958f000cacSChristoph Hellwig 11968f000cacSChristoph Hellwig queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 11978f000cacSChristoph Hellwig if (!queue) { 11988f000cacSChristoph Hellwig ret = -ENOMEM; 11998f000cacSChristoph Hellwig goto put_device; 12008f000cacSChristoph Hellwig } 12018f000cacSChristoph Hellwig queue->port = cm_id->context; 12028f000cacSChristoph Hellwig 1203777dc823SSagi Grimberg if (queue->host_qid == 0) { 1204777dc823SSagi Grimberg /* Let inflight controller teardown complete */ 1205777dc823SSagi Grimberg flush_scheduled_work(); 1206777dc823SSagi Grimberg } 1207777dc823SSagi Grimberg 12088f000cacSChristoph Hellwig ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 12098f000cacSChristoph Hellwig if (ret) 12108f000cacSChristoph Hellwig goto release_queue; 12118f000cacSChristoph Hellwig 12128f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 12138f000cacSChristoph Hellwig list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 12148f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 12158f000cacSChristoph Hellwig 12168f000cacSChristoph Hellwig return 0; 12178f000cacSChristoph Hellwig 12188f000cacSChristoph Hellwig release_queue: 12198f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 12208f000cacSChristoph Hellwig put_device: 12218f000cacSChristoph Hellwig kref_put(&ndev->ref, nvmet_rdma_free_dev); 12228f000cacSChristoph Hellwig 12238f000cacSChristoph Hellwig return ret; 12248f000cacSChristoph Hellwig } 12258f000cacSChristoph Hellwig 12268f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 12278f000cacSChristoph Hellwig { 12288f000cacSChristoph Hellwig unsigned long flags; 12298f000cacSChristoph Hellwig 12308f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 12318f000cacSChristoph Hellwig if (queue->state != NVMET_RDMA_Q_CONNECTING) { 12328f000cacSChristoph Hellwig pr_warn("trying to establish a connected queue\n"); 12338f000cacSChristoph Hellwig goto out_unlock; 12348f000cacSChristoph Hellwig } 12358f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_LIVE; 12368f000cacSChristoph Hellwig 12378f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wait_list)) { 12388f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd; 12398f000cacSChristoph Hellwig 12408f000cacSChristoph Hellwig cmd = list_first_entry(&queue->rsp_wait_list, 12418f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 12428f000cacSChristoph Hellwig list_del(&cmd->wait_list); 12438f000cacSChristoph Hellwig 12448f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 12458f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, cmd); 12468f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 12478f000cacSChristoph Hellwig } 12488f000cacSChristoph Hellwig 12498f000cacSChristoph Hellwig out_unlock: 12508f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 12518f000cacSChristoph Hellwig } 12528f000cacSChristoph Hellwig 12538f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 12548f000cacSChristoph Hellwig { 12558f000cacSChristoph Hellwig bool disconnect = false; 12568f000cacSChristoph Hellwig unsigned long flags; 12578f000cacSChristoph Hellwig 12588f000cacSChristoph Hellwig pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 12598f000cacSChristoph Hellwig 12608f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 12618f000cacSChristoph Hellwig switch (queue->state) { 12628f000cacSChristoph Hellwig case NVMET_RDMA_Q_CONNECTING: 12638f000cacSChristoph Hellwig case NVMET_RDMA_Q_LIVE: 12648f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_DISCONNECTING; 1265d8f7750aSSagi Grimberg case NVMET_RDMA_IN_DEVICE_REMOVAL: 1266d8f7750aSSagi Grimberg disconnect = true; 12678f000cacSChristoph Hellwig break; 12688f000cacSChristoph Hellwig case NVMET_RDMA_Q_DISCONNECTING: 12698f000cacSChristoph Hellwig break; 12708f000cacSChristoph Hellwig } 12718f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 12728f000cacSChristoph Hellwig 12738f000cacSChristoph Hellwig if (disconnect) { 12748f000cacSChristoph Hellwig rdma_disconnect(queue->cm_id); 12758f000cacSChristoph Hellwig schedule_work(&queue->release_work); 12768f000cacSChristoph Hellwig } 12778f000cacSChristoph Hellwig } 12788f000cacSChristoph Hellwig 12798f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 12808f000cacSChristoph Hellwig { 12818f000cacSChristoph Hellwig bool disconnect = false; 12828f000cacSChristoph Hellwig 12838f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 12848f000cacSChristoph Hellwig if (!list_empty(&queue->queue_list)) { 12858f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 12868f000cacSChristoph Hellwig disconnect = true; 12878f000cacSChristoph Hellwig } 12888f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 12898f000cacSChristoph Hellwig 12908f000cacSChristoph Hellwig if (disconnect) 12918f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 12928f000cacSChristoph Hellwig } 12938f000cacSChristoph Hellwig 12948f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 12958f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 12968f000cacSChristoph Hellwig { 12978f000cacSChristoph Hellwig WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 12988f000cacSChristoph Hellwig 1299766dbb17SSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 1300766dbb17SSagi Grimberg if (!list_empty(&queue->queue_list)) 1301766dbb17SSagi Grimberg list_del_init(&queue->queue_list); 1302766dbb17SSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1303766dbb17SSagi Grimberg 1304766dbb17SSagi Grimberg pr_err("failed to connect queue %d\n", queue->idx); 13058f000cacSChristoph Hellwig schedule_work(&queue->release_work); 13068f000cacSChristoph Hellwig } 13078f000cacSChristoph Hellwig 1308d8f7750aSSagi Grimberg /** 1309d8f7750aSSagi Grimberg * nvme_rdma_device_removal() - Handle RDMA device removal 1310d8f7750aSSagi Grimberg * @queue: nvmet rdma queue (cm id qp_context) 1311d8f7750aSSagi Grimberg * @addr: nvmet address (cm_id context) 1312d8f7750aSSagi Grimberg * 1313d8f7750aSSagi Grimberg * DEVICE_REMOVAL event notifies us that the RDMA device is about 1314d8f7750aSSagi Grimberg * to unplug so we should take care of destroying our RDMA resources. 1315d8f7750aSSagi Grimberg * This event will be generated for each allocated cm_id. 1316d8f7750aSSagi Grimberg * 1317d8f7750aSSagi Grimberg * Note that this event can be generated on a normal queue cm_id 1318d8f7750aSSagi Grimberg * and/or a device bound listener cm_id (where in this case 1319d8f7750aSSagi Grimberg * queue will be null). 1320d8f7750aSSagi Grimberg * 1321d8f7750aSSagi Grimberg * we claim ownership on destroying the cm_id. For queues we move 1322d8f7750aSSagi Grimberg * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port 1323d8f7750aSSagi Grimberg * we nullify the priv to prevent double cm_id destruction and destroying 1324d8f7750aSSagi Grimberg * the cm_id implicitely by returning a non-zero rc to the callout. 1325d8f7750aSSagi Grimberg */ 1326d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1327d8f7750aSSagi Grimberg struct nvmet_rdma_queue *queue) 1328d8f7750aSSagi Grimberg { 1329d8f7750aSSagi Grimberg unsigned long flags; 1330d8f7750aSSagi Grimberg 1331d8f7750aSSagi Grimberg if (!queue) { 1332d8f7750aSSagi Grimberg struct nvmet_port *port = cm_id->context; 1333d8f7750aSSagi Grimberg 1334d8f7750aSSagi Grimberg /* 1335d8f7750aSSagi Grimberg * This is a listener cm_id. Make sure that 1336d8f7750aSSagi Grimberg * future remove_port won't invoke a double 1337d8f7750aSSagi Grimberg * cm_id destroy. use atomic xchg to make sure 1338d8f7750aSSagi Grimberg * we don't compete with remove_port. 1339d8f7750aSSagi Grimberg */ 1340d8f7750aSSagi Grimberg if (xchg(&port->priv, NULL) != cm_id) 1341d8f7750aSSagi Grimberg return 0; 1342d8f7750aSSagi Grimberg } else { 1343d8f7750aSSagi Grimberg /* 1344d8f7750aSSagi Grimberg * This is a queue cm_id. Make sure that 1345d8f7750aSSagi Grimberg * release queue will not destroy the cm_id 1346d8f7750aSSagi Grimberg * and schedule all ctrl queues removal (only 1347d8f7750aSSagi Grimberg * if the queue is not disconnecting already). 1348d8f7750aSSagi Grimberg */ 1349d8f7750aSSagi Grimberg spin_lock_irqsave(&queue->state_lock, flags); 1350d8f7750aSSagi Grimberg if (queue->state != NVMET_RDMA_Q_DISCONNECTING) 1351d8f7750aSSagi Grimberg queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL; 1352d8f7750aSSagi Grimberg spin_unlock_irqrestore(&queue->state_lock, flags); 1353d8f7750aSSagi Grimberg nvmet_rdma_queue_disconnect(queue); 1354d8f7750aSSagi Grimberg flush_scheduled_work(); 1355d8f7750aSSagi Grimberg } 1356d8f7750aSSagi Grimberg 1357d8f7750aSSagi Grimberg /* 1358d8f7750aSSagi Grimberg * We need to return 1 so that the core will destroy 1359d8f7750aSSagi Grimberg * it's own ID. What a great API design.. 1360d8f7750aSSagi Grimberg */ 1361d8f7750aSSagi Grimberg return 1; 1362d8f7750aSSagi Grimberg } 1363d8f7750aSSagi Grimberg 13648f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 13658f000cacSChristoph Hellwig struct rdma_cm_event *event) 13668f000cacSChristoph Hellwig { 13678f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = NULL; 13688f000cacSChristoph Hellwig int ret = 0; 13698f000cacSChristoph Hellwig 13708f000cacSChristoph Hellwig if (cm_id->qp) 13718f000cacSChristoph Hellwig queue = cm_id->qp->qp_context; 13728f000cacSChristoph Hellwig 13738f000cacSChristoph Hellwig pr_debug("%s (%d): status %d id %p\n", 13748f000cacSChristoph Hellwig rdma_event_msg(event->event), event->event, 13758f000cacSChristoph Hellwig event->status, cm_id); 13768f000cacSChristoph Hellwig 13778f000cacSChristoph Hellwig switch (event->event) { 13788f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_REQUEST: 13798f000cacSChristoph Hellwig ret = nvmet_rdma_queue_connect(cm_id, event); 13808f000cacSChristoph Hellwig break; 13818f000cacSChristoph Hellwig case RDMA_CM_EVENT_ESTABLISHED: 13828f000cacSChristoph Hellwig nvmet_rdma_queue_established(queue); 13838f000cacSChristoph Hellwig break; 13848f000cacSChristoph Hellwig case RDMA_CM_EVENT_ADDR_CHANGE: 13858f000cacSChristoph Hellwig case RDMA_CM_EVENT_DISCONNECTED: 13868f000cacSChristoph Hellwig case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1387fa14a0acSBart Van Assche /* 1388fa14a0acSBart Van Assche * We might end up here when we already freed the qp 1389fa14a0acSBart Van Assche * which means queue release sequence is in progress, 1390fa14a0acSBart Van Assche * so don't get in the way... 1391fa14a0acSBart Van Assche */ 1392fa14a0acSBart Van Assche if (queue) 13938f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 1394d8f7750aSSagi Grimberg break; 1395d8f7750aSSagi Grimberg case RDMA_CM_EVENT_DEVICE_REMOVAL: 1396d8f7750aSSagi Grimberg ret = nvmet_rdma_device_removal(cm_id, queue); 13978f000cacSChristoph Hellwig break; 13988f000cacSChristoph Hellwig case RDMA_CM_EVENT_REJECTED: 1399512fb1b3SSteve Wise pr_debug("Connection rejected: %s\n", 1400512fb1b3SSteve Wise rdma_reject_msg(cm_id, event->status)); 1401512fb1b3SSteve Wise /* FALLTHROUGH */ 14028f000cacSChristoph Hellwig case RDMA_CM_EVENT_UNREACHABLE: 14038f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_ERROR: 14048f000cacSChristoph Hellwig nvmet_rdma_queue_connect_fail(cm_id, queue); 14058f000cacSChristoph Hellwig break; 14068f000cacSChristoph Hellwig default: 14078f000cacSChristoph Hellwig pr_err("received unrecognized RDMA CM event %d\n", 14088f000cacSChristoph Hellwig event->event); 14098f000cacSChristoph Hellwig break; 14108f000cacSChristoph Hellwig } 14118f000cacSChristoph Hellwig 14128f000cacSChristoph Hellwig return ret; 14138f000cacSChristoph Hellwig } 14148f000cacSChristoph Hellwig 14158f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 14168f000cacSChristoph Hellwig { 14178f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 14188f000cacSChristoph Hellwig 14198f000cacSChristoph Hellwig restart: 14208f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 14218f000cacSChristoph Hellwig list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { 14228f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl == ctrl) { 14238f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 14248f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 14258f000cacSChristoph Hellwig 14268f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 14278f000cacSChristoph Hellwig goto restart; 14288f000cacSChristoph Hellwig } 14298f000cacSChristoph Hellwig } 14308f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 14318f000cacSChristoph Hellwig } 14328f000cacSChristoph Hellwig 14338f000cacSChristoph Hellwig static int nvmet_rdma_add_port(struct nvmet_port *port) 14348f000cacSChristoph Hellwig { 14358f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 1436670c2a3aSSagi Grimberg struct sockaddr_storage addr = { }; 1437670c2a3aSSagi Grimberg __kernel_sa_family_t af; 14388f000cacSChristoph Hellwig int ret; 14398f000cacSChristoph Hellwig 14408f000cacSChristoph Hellwig switch (port->disc_addr.adrfam) { 14418f000cacSChristoph Hellwig case NVMF_ADDR_FAMILY_IP4: 1442670c2a3aSSagi Grimberg af = AF_INET; 1443670c2a3aSSagi Grimberg break; 1444670c2a3aSSagi Grimberg case NVMF_ADDR_FAMILY_IP6: 1445670c2a3aSSagi Grimberg af = AF_INET6; 14468f000cacSChristoph Hellwig break; 14478f000cacSChristoph Hellwig default: 14488f000cacSChristoph Hellwig pr_err("address family %d not supported\n", 14498f000cacSChristoph Hellwig port->disc_addr.adrfam); 14508f000cacSChristoph Hellwig return -EINVAL; 14518f000cacSChristoph Hellwig } 14528f000cacSChristoph Hellwig 1453670c2a3aSSagi Grimberg ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, 1454670c2a3aSSagi Grimberg port->disc_addr.trsvcid, &addr); 1455670c2a3aSSagi Grimberg if (ret) { 1456670c2a3aSSagi Grimberg pr_err("malformed ip/port passed: %s:%s\n", 1457670c2a3aSSagi Grimberg port->disc_addr.traddr, port->disc_addr.trsvcid); 14588f000cacSChristoph Hellwig return ret; 1459670c2a3aSSagi Grimberg } 14608f000cacSChristoph Hellwig 14618f000cacSChristoph Hellwig cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 14628f000cacSChristoph Hellwig RDMA_PS_TCP, IB_QPT_RC); 14638f000cacSChristoph Hellwig if (IS_ERR(cm_id)) { 14648f000cacSChristoph Hellwig pr_err("CM ID creation failed\n"); 14658f000cacSChristoph Hellwig return PTR_ERR(cm_id); 14668f000cacSChristoph Hellwig } 14678f000cacSChristoph Hellwig 1468670c2a3aSSagi Grimberg /* 1469670c2a3aSSagi Grimberg * Allow both IPv4 and IPv6 sockets to bind a single port 1470670c2a3aSSagi Grimberg * at the same time. 1471670c2a3aSSagi Grimberg */ 1472670c2a3aSSagi Grimberg ret = rdma_set_afonly(cm_id, 1); 14738f000cacSChristoph Hellwig if (ret) { 1474670c2a3aSSagi Grimberg pr_err("rdma_set_afonly failed (%d)\n", ret); 1475670c2a3aSSagi Grimberg goto out_destroy_id; 1476670c2a3aSSagi Grimberg } 1477670c2a3aSSagi Grimberg 1478670c2a3aSSagi Grimberg ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr); 1479670c2a3aSSagi Grimberg if (ret) { 1480670c2a3aSSagi Grimberg pr_err("binding CM ID to %pISpcs failed (%d)\n", 1481670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 14828f000cacSChristoph Hellwig goto out_destroy_id; 14838f000cacSChristoph Hellwig } 14848f000cacSChristoph Hellwig 14858f000cacSChristoph Hellwig ret = rdma_listen(cm_id, 128); 14868f000cacSChristoph Hellwig if (ret) { 1487670c2a3aSSagi Grimberg pr_err("listening to %pISpcs failed (%d)\n", 1488670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 14898f000cacSChristoph Hellwig goto out_destroy_id; 14908f000cacSChristoph Hellwig } 14918f000cacSChristoph Hellwig 1492670c2a3aSSagi Grimberg pr_info("enabling port %d (%pISpcs)\n", 1493670c2a3aSSagi Grimberg le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr); 14948f000cacSChristoph Hellwig port->priv = cm_id; 14958f000cacSChristoph Hellwig return 0; 14968f000cacSChristoph Hellwig 14978f000cacSChristoph Hellwig out_destroy_id: 14988f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 14998f000cacSChristoph Hellwig return ret; 15008f000cacSChristoph Hellwig } 15018f000cacSChristoph Hellwig 15028f000cacSChristoph Hellwig static void nvmet_rdma_remove_port(struct nvmet_port *port) 15038f000cacSChristoph Hellwig { 1504d8f7750aSSagi Grimberg struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); 15058f000cacSChristoph Hellwig 1506d8f7750aSSagi Grimberg if (cm_id) 15078f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 15088f000cacSChristoph Hellwig } 15098f000cacSChristoph Hellwig 15108f000cacSChristoph Hellwig static struct nvmet_fabrics_ops nvmet_rdma_ops = { 15118f000cacSChristoph Hellwig .owner = THIS_MODULE, 15128f000cacSChristoph Hellwig .type = NVMF_TRTYPE_RDMA, 15138f000cacSChristoph Hellwig .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, 15148f000cacSChristoph Hellwig .msdbd = 1, 15158f000cacSChristoph Hellwig .has_keyed_sgls = 1, 15168f000cacSChristoph Hellwig .add_port = nvmet_rdma_add_port, 15178f000cacSChristoph Hellwig .remove_port = nvmet_rdma_remove_port, 15188f000cacSChristoph Hellwig .queue_response = nvmet_rdma_queue_response, 15198f000cacSChristoph Hellwig .delete_ctrl = nvmet_rdma_delete_ctrl, 15208f000cacSChristoph Hellwig }; 15218f000cacSChristoph Hellwig 15228f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void) 15238f000cacSChristoph Hellwig { 15248f000cacSChristoph Hellwig return nvmet_register_transport(&nvmet_rdma_ops); 15258f000cacSChristoph Hellwig } 15268f000cacSChristoph Hellwig 15278f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void) 15288f000cacSChristoph Hellwig { 15298f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 15308f000cacSChristoph Hellwig 15318f000cacSChristoph Hellwig nvmet_unregister_transport(&nvmet_rdma_ops); 15328f000cacSChristoph Hellwig 15338f000cacSChristoph Hellwig flush_scheduled_work(); 15348f000cacSChristoph Hellwig 15358f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 15368f000cacSChristoph Hellwig while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list, 15378f000cacSChristoph Hellwig struct nvmet_rdma_queue, queue_list))) { 15388f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 15398f000cacSChristoph Hellwig 15408f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 15418f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 15428f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 15438f000cacSChristoph Hellwig } 15448f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 15458f000cacSChristoph Hellwig 15468f000cacSChristoph Hellwig flush_scheduled_work(); 15478f000cacSChristoph Hellwig ida_destroy(&nvmet_rdma_queue_ida); 15488f000cacSChristoph Hellwig } 15498f000cacSChristoph Hellwig 15508f000cacSChristoph Hellwig module_init(nvmet_rdma_init); 15518f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit); 15528f000cacSChristoph Hellwig 15538f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2"); 15548f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 1555