18f000cacSChristoph Hellwig /* 28f000cacSChristoph Hellwig * NVMe over Fabrics RDMA target. 38f000cacSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 48f000cacSChristoph Hellwig * 58f000cacSChristoph Hellwig * This program is free software; you can redistribute it and/or modify it 68f000cacSChristoph Hellwig * under the terms and conditions of the GNU General Public License, 78f000cacSChristoph Hellwig * version 2, as published by the Free Software Foundation. 88f000cacSChristoph Hellwig * 98f000cacSChristoph Hellwig * This program is distributed in the hope it will be useful, but WITHOUT 108f000cacSChristoph Hellwig * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 118f000cacSChristoph Hellwig * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 128f000cacSChristoph Hellwig * more details. 138f000cacSChristoph Hellwig */ 148f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 158f000cacSChristoph Hellwig #include <linux/atomic.h> 168f000cacSChristoph Hellwig #include <linux/ctype.h> 178f000cacSChristoph Hellwig #include <linux/delay.h> 188f000cacSChristoph Hellwig #include <linux/err.h> 198f000cacSChristoph Hellwig #include <linux/init.h> 208f000cacSChristoph Hellwig #include <linux/module.h> 218f000cacSChristoph Hellwig #include <linux/nvme.h> 228f000cacSChristoph Hellwig #include <linux/slab.h> 238f000cacSChristoph Hellwig #include <linux/string.h> 248f000cacSChristoph Hellwig #include <linux/wait.h> 258f000cacSChristoph Hellwig #include <linux/inet.h> 268f000cacSChristoph Hellwig #include <asm/unaligned.h> 278f000cacSChristoph Hellwig 288f000cacSChristoph Hellwig #include <rdma/ib_verbs.h> 298f000cacSChristoph Hellwig #include <rdma/rdma_cm.h> 308f000cacSChristoph Hellwig #include <rdma/rw.h> 318f000cacSChristoph Hellwig 328f000cacSChristoph Hellwig #include <linux/nvme-rdma.h> 338f000cacSChristoph Hellwig #include "nvmet.h" 348f000cacSChristoph Hellwig 358f000cacSChristoph Hellwig /* 368f000cacSChristoph Hellwig * We allow up to a page of inline data to go with the SQE 378f000cacSChristoph Hellwig */ 388f000cacSChristoph Hellwig #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE 398f000cacSChristoph Hellwig 408f000cacSChristoph Hellwig struct nvmet_rdma_cmd { 418f000cacSChristoph Hellwig struct ib_sge sge[2]; 428f000cacSChristoph Hellwig struct ib_cqe cqe; 438f000cacSChristoph Hellwig struct ib_recv_wr wr; 448f000cacSChristoph Hellwig struct scatterlist inline_sg; 458f000cacSChristoph Hellwig struct page *inline_page; 468f000cacSChristoph Hellwig struct nvme_command *nvme_cmd; 478f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 488f000cacSChristoph Hellwig }; 498f000cacSChristoph Hellwig 508f000cacSChristoph Hellwig enum { 518f000cacSChristoph Hellwig NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 528f000cacSChristoph Hellwig NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), 538f000cacSChristoph Hellwig }; 548f000cacSChristoph Hellwig 558f000cacSChristoph Hellwig struct nvmet_rdma_rsp { 568f000cacSChristoph Hellwig struct ib_sge send_sge; 578f000cacSChristoph Hellwig struct ib_cqe send_cqe; 588f000cacSChristoph Hellwig struct ib_send_wr send_wr; 598f000cacSChristoph Hellwig 608f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd; 618f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 628f000cacSChristoph Hellwig 638f000cacSChristoph Hellwig struct ib_cqe read_cqe; 648f000cacSChristoph Hellwig struct rdma_rw_ctx rw; 658f000cacSChristoph Hellwig 668f000cacSChristoph Hellwig struct nvmet_req req; 678f000cacSChristoph Hellwig 688f000cacSChristoph Hellwig u8 n_rdma; 698f000cacSChristoph Hellwig u32 flags; 708f000cacSChristoph Hellwig u32 invalidate_rkey; 718f000cacSChristoph Hellwig 728f000cacSChristoph Hellwig struct list_head wait_list; 738f000cacSChristoph Hellwig struct list_head free_list; 748f000cacSChristoph Hellwig }; 758f000cacSChristoph Hellwig 768f000cacSChristoph Hellwig enum nvmet_rdma_queue_state { 778f000cacSChristoph Hellwig NVMET_RDMA_Q_CONNECTING, 788f000cacSChristoph Hellwig NVMET_RDMA_Q_LIVE, 798f000cacSChristoph Hellwig NVMET_RDMA_Q_DISCONNECTING, 80d8f7750aSSagi Grimberg NVMET_RDMA_IN_DEVICE_REMOVAL, 818f000cacSChristoph Hellwig }; 828f000cacSChristoph Hellwig 838f000cacSChristoph Hellwig struct nvmet_rdma_queue { 848f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 858f000cacSChristoph Hellwig struct nvmet_port *port; 868f000cacSChristoph Hellwig struct ib_cq *cq; 878f000cacSChristoph Hellwig atomic_t sq_wr_avail; 888f000cacSChristoph Hellwig struct nvmet_rdma_device *dev; 898f000cacSChristoph Hellwig spinlock_t state_lock; 908f000cacSChristoph Hellwig enum nvmet_rdma_queue_state state; 918f000cacSChristoph Hellwig struct nvmet_cq nvme_cq; 928f000cacSChristoph Hellwig struct nvmet_sq nvme_sq; 938f000cacSChristoph Hellwig 948f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsps; 958f000cacSChristoph Hellwig struct list_head free_rsps; 968f000cacSChristoph Hellwig spinlock_t rsps_lock; 978f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 988f000cacSChristoph Hellwig 998f000cacSChristoph Hellwig struct work_struct release_work; 1008f000cacSChristoph Hellwig struct list_head rsp_wait_list; 1018f000cacSChristoph Hellwig struct list_head rsp_wr_wait_list; 1028f000cacSChristoph Hellwig spinlock_t rsp_wr_wait_lock; 1038f000cacSChristoph Hellwig 1048f000cacSChristoph Hellwig int idx; 1058f000cacSChristoph Hellwig int host_qid; 1068f000cacSChristoph Hellwig int recv_queue_size; 1078f000cacSChristoph Hellwig int send_queue_size; 1088f000cacSChristoph Hellwig 1098f000cacSChristoph Hellwig struct list_head queue_list; 1108f000cacSChristoph Hellwig }; 1118f000cacSChristoph Hellwig 1128f000cacSChristoph Hellwig struct nvmet_rdma_device { 1138f000cacSChristoph Hellwig struct ib_device *device; 1148f000cacSChristoph Hellwig struct ib_pd *pd; 1158f000cacSChristoph Hellwig struct ib_srq *srq; 1168f000cacSChristoph Hellwig struct nvmet_rdma_cmd *srq_cmds; 1178f000cacSChristoph Hellwig size_t srq_size; 1188f000cacSChristoph Hellwig struct kref ref; 1198f000cacSChristoph Hellwig struct list_head entry; 1208f000cacSChristoph Hellwig }; 1218f000cacSChristoph Hellwig 1228f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq; 1238f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 1248f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 1258f000cacSChristoph Hellwig 1268f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida); 1278f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list); 1288f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 1298f000cacSChristoph Hellwig 1308f000cacSChristoph Hellwig static LIST_HEAD(device_list); 1318f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex); 1328f000cacSChristoph Hellwig 1338f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 1348f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 1358f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 1368f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 1378f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 1388f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 1398f000cacSChristoph Hellwig 1408f000cacSChristoph Hellwig static struct nvmet_fabrics_ops nvmet_rdma_ops; 1418f000cacSChristoph Hellwig 1428f000cacSChristoph Hellwig /* XXX: really should move to a generic header sooner or later.. */ 1438f000cacSChristoph Hellwig static inline u32 get_unaligned_le24(const u8 *p) 1448f000cacSChristoph Hellwig { 1458f000cacSChristoph Hellwig return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; 1468f000cacSChristoph Hellwig } 1478f000cacSChristoph Hellwig 1488f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 1498f000cacSChristoph Hellwig { 1508f000cacSChristoph Hellwig return nvme_is_write(rsp->req.cmd) && 1515e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1528f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1538f000cacSChristoph Hellwig } 1548f000cacSChristoph Hellwig 1558f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 1568f000cacSChristoph Hellwig { 1578f000cacSChristoph Hellwig return !nvme_is_write(rsp->req.cmd) && 1585e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1598f000cacSChristoph Hellwig !rsp->req.rsp->status && 1608f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1618f000cacSChristoph Hellwig } 1628f000cacSChristoph Hellwig 1638f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp * 1648f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 1658f000cacSChristoph Hellwig { 1668f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 1678f000cacSChristoph Hellwig unsigned long flags; 1688f000cacSChristoph Hellwig 1698f000cacSChristoph Hellwig spin_lock_irqsave(&queue->rsps_lock, flags); 1708f000cacSChristoph Hellwig rsp = list_first_entry(&queue->free_rsps, 1718f000cacSChristoph Hellwig struct nvmet_rdma_rsp, free_list); 1728f000cacSChristoph Hellwig list_del(&rsp->free_list); 1738f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->rsps_lock, flags); 1748f000cacSChristoph Hellwig 1758f000cacSChristoph Hellwig return rsp; 1768f000cacSChristoph Hellwig } 1778f000cacSChristoph Hellwig 1788f000cacSChristoph Hellwig static inline void 1798f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 1808f000cacSChristoph Hellwig { 1818f000cacSChristoph Hellwig unsigned long flags; 1828f000cacSChristoph Hellwig 1838f000cacSChristoph Hellwig spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 1848f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 1858f000cacSChristoph Hellwig spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 1868f000cacSChristoph Hellwig } 1878f000cacSChristoph Hellwig 1888f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 1898f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 1908f000cacSChristoph Hellwig { 1918f000cacSChristoph Hellwig /* NVMe command / RDMA RECV */ 1928f000cacSChristoph Hellwig c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); 1938f000cacSChristoph Hellwig if (!c->nvme_cmd) 1948f000cacSChristoph Hellwig goto out; 1958f000cacSChristoph Hellwig 1968f000cacSChristoph Hellwig c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 1978f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 1988f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 1998f000cacSChristoph Hellwig goto out_free_cmd; 2008f000cacSChristoph Hellwig 2018f000cacSChristoph Hellwig c->sge[0].length = sizeof(*c->nvme_cmd); 2028f000cacSChristoph Hellwig c->sge[0].lkey = ndev->pd->local_dma_lkey; 2038f000cacSChristoph Hellwig 2048f000cacSChristoph Hellwig if (!admin) { 2058f000cacSChristoph Hellwig c->inline_page = alloc_pages(GFP_KERNEL, 2068f000cacSChristoph Hellwig get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 2078f000cacSChristoph Hellwig if (!c->inline_page) 2088f000cacSChristoph Hellwig goto out_unmap_cmd; 2098f000cacSChristoph Hellwig c->sge[1].addr = ib_dma_map_page(ndev->device, 2108f000cacSChristoph Hellwig c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, 2118f000cacSChristoph Hellwig DMA_FROM_DEVICE); 2128f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) 2138f000cacSChristoph Hellwig goto out_free_inline_page; 2148f000cacSChristoph Hellwig c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; 2158f000cacSChristoph Hellwig c->sge[1].lkey = ndev->pd->local_dma_lkey; 2168f000cacSChristoph Hellwig } 2178f000cacSChristoph Hellwig 2188f000cacSChristoph Hellwig c->cqe.done = nvmet_rdma_recv_done; 2198f000cacSChristoph Hellwig 2208f000cacSChristoph Hellwig c->wr.wr_cqe = &c->cqe; 2218f000cacSChristoph Hellwig c->wr.sg_list = c->sge; 2228f000cacSChristoph Hellwig c->wr.num_sge = admin ? 1 : 2; 2238f000cacSChristoph Hellwig 2248f000cacSChristoph Hellwig return 0; 2258f000cacSChristoph Hellwig 2268f000cacSChristoph Hellwig out_free_inline_page: 2278f000cacSChristoph Hellwig if (!admin) { 2288f000cacSChristoph Hellwig __free_pages(c->inline_page, 2298f000cacSChristoph Hellwig get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 2308f000cacSChristoph Hellwig } 2318f000cacSChristoph Hellwig out_unmap_cmd: 2328f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 2338f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2348f000cacSChristoph Hellwig out_free_cmd: 2358f000cacSChristoph Hellwig kfree(c->nvme_cmd); 2368f000cacSChristoph Hellwig 2378f000cacSChristoph Hellwig out: 2388f000cacSChristoph Hellwig return -ENOMEM; 2398f000cacSChristoph Hellwig } 2408f000cacSChristoph Hellwig 2418f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 2428f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 2438f000cacSChristoph Hellwig { 2448f000cacSChristoph Hellwig if (!admin) { 2458f000cacSChristoph Hellwig ib_dma_unmap_page(ndev->device, c->sge[1].addr, 2468f000cacSChristoph Hellwig NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); 2478f000cacSChristoph Hellwig __free_pages(c->inline_page, 2488f000cacSChristoph Hellwig get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 2498f000cacSChristoph Hellwig } 2508f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 2518f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2528f000cacSChristoph Hellwig kfree(c->nvme_cmd); 2538f000cacSChristoph Hellwig } 2548f000cacSChristoph Hellwig 2558f000cacSChristoph Hellwig static struct nvmet_rdma_cmd * 2568f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 2578f000cacSChristoph Hellwig int nr_cmds, bool admin) 2588f000cacSChristoph Hellwig { 2598f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 2608f000cacSChristoph Hellwig int ret = -EINVAL, i; 2618f000cacSChristoph Hellwig 2628f000cacSChristoph Hellwig cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 2638f000cacSChristoph Hellwig if (!cmds) 2648f000cacSChristoph Hellwig goto out; 2658f000cacSChristoph Hellwig 2668f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) { 2678f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 2688f000cacSChristoph Hellwig if (ret) 2698f000cacSChristoph Hellwig goto out_free; 2708f000cacSChristoph Hellwig } 2718f000cacSChristoph Hellwig 2728f000cacSChristoph Hellwig return cmds; 2738f000cacSChristoph Hellwig 2748f000cacSChristoph Hellwig out_free: 2758f000cacSChristoph Hellwig while (--i >= 0) 2768f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 2778f000cacSChristoph Hellwig kfree(cmds); 2788f000cacSChristoph Hellwig out: 2798f000cacSChristoph Hellwig return ERR_PTR(ret); 2808f000cacSChristoph Hellwig } 2818f000cacSChristoph Hellwig 2828f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 2838f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 2848f000cacSChristoph Hellwig { 2858f000cacSChristoph Hellwig int i; 2868f000cacSChristoph Hellwig 2878f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) 2888f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 2898f000cacSChristoph Hellwig kfree(cmds); 2908f000cacSChristoph Hellwig } 2918f000cacSChristoph Hellwig 2928f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 2938f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 2948f000cacSChristoph Hellwig { 2958f000cacSChristoph Hellwig /* NVMe CQE / RDMA SEND */ 2968f000cacSChristoph Hellwig r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); 2978f000cacSChristoph Hellwig if (!r->req.rsp) 2988f000cacSChristoph Hellwig goto out; 2998f000cacSChristoph Hellwig 3008f000cacSChristoph Hellwig r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, 3018f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3028f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 3038f000cacSChristoph Hellwig goto out_free_rsp; 3048f000cacSChristoph Hellwig 3058f000cacSChristoph Hellwig r->send_sge.length = sizeof(*r->req.rsp); 3068f000cacSChristoph Hellwig r->send_sge.lkey = ndev->pd->local_dma_lkey; 3078f000cacSChristoph Hellwig 3088f000cacSChristoph Hellwig r->send_cqe.done = nvmet_rdma_send_done; 3098f000cacSChristoph Hellwig 3108f000cacSChristoph Hellwig r->send_wr.wr_cqe = &r->send_cqe; 3118f000cacSChristoph Hellwig r->send_wr.sg_list = &r->send_sge; 3128f000cacSChristoph Hellwig r->send_wr.num_sge = 1; 3138f000cacSChristoph Hellwig r->send_wr.send_flags = IB_SEND_SIGNALED; 3148f000cacSChristoph Hellwig 3158f000cacSChristoph Hellwig /* Data In / RDMA READ */ 3168f000cacSChristoph Hellwig r->read_cqe.done = nvmet_rdma_read_data_done; 3178f000cacSChristoph Hellwig return 0; 3188f000cacSChristoph Hellwig 3198f000cacSChristoph Hellwig out_free_rsp: 3208f000cacSChristoph Hellwig kfree(r->req.rsp); 3218f000cacSChristoph Hellwig out: 3228f000cacSChristoph Hellwig return -ENOMEM; 3238f000cacSChristoph Hellwig } 3248f000cacSChristoph Hellwig 3258f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 3268f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3278f000cacSChristoph Hellwig { 3288f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, r->send_sge.addr, 3298f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3308f000cacSChristoph Hellwig kfree(r->req.rsp); 3318f000cacSChristoph Hellwig } 3328f000cacSChristoph Hellwig 3338f000cacSChristoph Hellwig static int 3348f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 3358f000cacSChristoph Hellwig { 3368f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 3378f000cacSChristoph Hellwig int nr_rsps = queue->recv_queue_size * 2; 3388f000cacSChristoph Hellwig int ret = -EINVAL, i; 3398f000cacSChristoph Hellwig 3408f000cacSChristoph Hellwig queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 3418f000cacSChristoph Hellwig GFP_KERNEL); 3428f000cacSChristoph Hellwig if (!queue->rsps) 3438f000cacSChristoph Hellwig goto out; 3448f000cacSChristoph Hellwig 3458f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 3468f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 3478f000cacSChristoph Hellwig 3488f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsp(ndev, rsp); 3498f000cacSChristoph Hellwig if (ret) 3508f000cacSChristoph Hellwig goto out_free; 3518f000cacSChristoph Hellwig 3528f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &queue->free_rsps); 3538f000cacSChristoph Hellwig } 3548f000cacSChristoph Hellwig 3558f000cacSChristoph Hellwig return 0; 3568f000cacSChristoph Hellwig 3578f000cacSChristoph Hellwig out_free: 3588f000cacSChristoph Hellwig while (--i >= 0) { 3598f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 3608f000cacSChristoph Hellwig 3618f000cacSChristoph Hellwig list_del(&rsp->free_list); 3628f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 3638f000cacSChristoph Hellwig } 3648f000cacSChristoph Hellwig kfree(queue->rsps); 3658f000cacSChristoph Hellwig out: 3668f000cacSChristoph Hellwig return ret; 3678f000cacSChristoph Hellwig } 3688f000cacSChristoph Hellwig 3698f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 3708f000cacSChristoph Hellwig { 3718f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 3728f000cacSChristoph Hellwig int i, nr_rsps = queue->recv_queue_size * 2; 3738f000cacSChristoph Hellwig 3748f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 3758f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 3768f000cacSChristoph Hellwig 3778f000cacSChristoph Hellwig list_del(&rsp->free_list); 3788f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 3798f000cacSChristoph Hellwig } 3808f000cacSChristoph Hellwig kfree(queue->rsps); 3818f000cacSChristoph Hellwig } 3828f000cacSChristoph Hellwig 3838f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 3848f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd) 3858f000cacSChristoph Hellwig { 3868f000cacSChristoph Hellwig struct ib_recv_wr *bad_wr; 3878f000cacSChristoph Hellwig 388748ff840SParav Pandit ib_dma_sync_single_for_device(ndev->device, 389748ff840SParav Pandit cmd->sge[0].addr, cmd->sge[0].length, 390748ff840SParav Pandit DMA_FROM_DEVICE); 391748ff840SParav Pandit 3928f000cacSChristoph Hellwig if (ndev->srq) 3938f000cacSChristoph Hellwig return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 3948f000cacSChristoph Hellwig return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); 3958f000cacSChristoph Hellwig } 3968f000cacSChristoph Hellwig 3978f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 3988f000cacSChristoph Hellwig { 3998f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4008f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wr_wait_list)) { 4018f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 4028f000cacSChristoph Hellwig bool ret; 4038f000cacSChristoph Hellwig 4048f000cacSChristoph Hellwig rsp = list_entry(queue->rsp_wr_wait_list.next, 4058f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 4068f000cacSChristoph Hellwig list_del(&rsp->wait_list); 4078f000cacSChristoph Hellwig 4088f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4098f000cacSChristoph Hellwig ret = nvmet_rdma_execute_command(rsp); 4108f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4118f000cacSChristoph Hellwig 4128f000cacSChristoph Hellwig if (!ret) { 4138f000cacSChristoph Hellwig list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 4148f000cacSChristoph Hellwig break; 4158f000cacSChristoph Hellwig } 4168f000cacSChristoph Hellwig } 4178f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4188f000cacSChristoph Hellwig } 4198f000cacSChristoph Hellwig 4208f000cacSChristoph Hellwig 4218f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 4228f000cacSChristoph Hellwig { 4238f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 4248f000cacSChristoph Hellwig 4258f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 4268f000cacSChristoph Hellwig 4278f000cacSChristoph Hellwig if (rsp->n_rdma) { 4288f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 4298f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 4308f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 4318f000cacSChristoph Hellwig } 4328f000cacSChristoph Hellwig 4338f000cacSChristoph Hellwig if (rsp->req.sg != &rsp->cmd->inline_sg) 43468c6e9cdSBart Van Assche sgl_free(rsp->req.sg); 4358f000cacSChristoph Hellwig 4368f000cacSChristoph Hellwig if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 4378f000cacSChristoph Hellwig nvmet_rdma_process_wr_wait_list(queue); 4388f000cacSChristoph Hellwig 4398f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 4408f000cacSChristoph Hellwig } 4418f000cacSChristoph Hellwig 4428f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 4438f000cacSChristoph Hellwig { 4448f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl) { 4458f000cacSChristoph Hellwig nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 4468f000cacSChristoph Hellwig } else { 4478f000cacSChristoph Hellwig /* 4488f000cacSChristoph Hellwig * we didn't setup the controller yet in case 4498f000cacSChristoph Hellwig * of admin connect error, just disconnect and 4508f000cacSChristoph Hellwig * cleanup the queue 4518f000cacSChristoph Hellwig */ 4528f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 4538f000cacSChristoph Hellwig } 4548f000cacSChristoph Hellwig } 4558f000cacSChristoph Hellwig 4568f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 4578f000cacSChristoph Hellwig { 4588f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 4598f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 4608f000cacSChristoph Hellwig 4618f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 4628f000cacSChristoph Hellwig 4638f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS && 4648f000cacSChristoph Hellwig wc->status != IB_WC_WR_FLUSH_ERR)) { 4658f000cacSChristoph Hellwig pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 4668f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 4678f000cacSChristoph Hellwig nvmet_rdma_error_comp(rsp->queue); 4688f000cacSChristoph Hellwig } 4698f000cacSChristoph Hellwig } 4708f000cacSChristoph Hellwig 4718f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req) 4728f000cacSChristoph Hellwig { 4738f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 4748f000cacSChristoph Hellwig container_of(req, struct nvmet_rdma_rsp, req); 4758f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 4768f000cacSChristoph Hellwig struct ib_send_wr *first_wr, *bad_wr; 4778f000cacSChristoph Hellwig 4788f000cacSChristoph Hellwig if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { 4798f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 4808f000cacSChristoph Hellwig rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 4818f000cacSChristoph Hellwig } else { 4828f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND; 4838f000cacSChristoph Hellwig } 4848f000cacSChristoph Hellwig 4858f000cacSChristoph Hellwig if (nvmet_rdma_need_data_out(rsp)) 4868f000cacSChristoph Hellwig first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 4878f000cacSChristoph Hellwig cm_id->port_num, NULL, &rsp->send_wr); 4888f000cacSChristoph Hellwig else 4898f000cacSChristoph Hellwig first_wr = &rsp->send_wr; 4908f000cacSChristoph Hellwig 4918f000cacSChristoph Hellwig nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 492748ff840SParav Pandit 493748ff840SParav Pandit ib_dma_sync_single_for_device(rsp->queue->dev->device, 494748ff840SParav Pandit rsp->send_sge.addr, rsp->send_sge.length, 495748ff840SParav Pandit DMA_TO_DEVICE); 496748ff840SParav Pandit 4978f000cacSChristoph Hellwig if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 4988f000cacSChristoph Hellwig pr_err("sending cmd response failed\n"); 4998f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5008f000cacSChristoph Hellwig } 5018f000cacSChristoph Hellwig } 5028f000cacSChristoph Hellwig 5038f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 5048f000cacSChristoph Hellwig { 5058f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5068f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 5078f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 5088f000cacSChristoph Hellwig 5098f000cacSChristoph Hellwig WARN_ON(rsp->n_rdma <= 0); 5108f000cacSChristoph Hellwig atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 5118f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5128f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5138f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 5148f000cacSChristoph Hellwig rsp->n_rdma = 0; 5158f000cacSChristoph Hellwig 5168f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 517549f01aeSVijay Immanuel nvmet_req_uninit(&rsp->req); 5188f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5198f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 5208f000cacSChristoph Hellwig pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 5218f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 5228f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 5238f000cacSChristoph Hellwig } 5248f000cacSChristoph Hellwig return; 5258f000cacSChristoph Hellwig } 5268f000cacSChristoph Hellwig 5275e62d5c9SChristoph Hellwig nvmet_req_execute(&rsp->req); 5288f000cacSChristoph Hellwig } 5298f000cacSChristoph Hellwig 5308f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 5318f000cacSChristoph Hellwig u64 off) 5328f000cacSChristoph Hellwig { 5338f000cacSChristoph Hellwig sg_init_table(&rsp->cmd->inline_sg, 1); 5348f000cacSChristoph Hellwig sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); 5358f000cacSChristoph Hellwig rsp->req.sg = &rsp->cmd->inline_sg; 5368f000cacSChristoph Hellwig rsp->req.sg_cnt = 1; 5378f000cacSChristoph Hellwig } 5388f000cacSChristoph Hellwig 5398f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 5408f000cacSChristoph Hellwig { 5418f000cacSChristoph Hellwig struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 5428f000cacSChristoph Hellwig u64 off = le64_to_cpu(sgl->addr); 5438f000cacSChristoph Hellwig u32 len = le32_to_cpu(sgl->length); 5448f000cacSChristoph Hellwig 5458f000cacSChristoph Hellwig if (!nvme_is_write(rsp->req.cmd)) 5468f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 5478f000cacSChristoph Hellwig 5488f000cacSChristoph Hellwig if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { 5498f000cacSChristoph Hellwig pr_err("invalid inline data offset!\n"); 5508f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 5518f000cacSChristoph Hellwig } 5528f000cacSChristoph Hellwig 5538f000cacSChristoph Hellwig /* no data command? */ 5548f000cacSChristoph Hellwig if (!len) 5558f000cacSChristoph Hellwig return 0; 5568f000cacSChristoph Hellwig 5578f000cacSChristoph Hellwig nvmet_rdma_use_inline_sg(rsp, len, off); 5588f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 5595e62d5c9SChristoph Hellwig rsp->req.transfer_len += len; 5608f000cacSChristoph Hellwig return 0; 5618f000cacSChristoph Hellwig } 5628f000cacSChristoph Hellwig 5638f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 5648f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl, bool invalidate) 5658f000cacSChristoph Hellwig { 5668f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 5678f000cacSChristoph Hellwig u64 addr = le64_to_cpu(sgl->addr); 5688f000cacSChristoph Hellwig u32 len = get_unaligned_le24(sgl->length); 5698f000cacSChristoph Hellwig u32 key = get_unaligned_le32(sgl->key); 5708f000cacSChristoph Hellwig int ret; 5718f000cacSChristoph Hellwig 5728f000cacSChristoph Hellwig /* no data command? */ 5738f000cacSChristoph Hellwig if (!len) 5748f000cacSChristoph Hellwig return 0; 5758f000cacSChristoph Hellwig 57668c6e9cdSBart Van Assche rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt); 57768c6e9cdSBart Van Assche if (!rsp->req.sg) 57868c6e9cdSBart Van Assche return NVME_SC_INTERNAL; 5798f000cacSChristoph Hellwig 5808f000cacSChristoph Hellwig ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 5818f000cacSChristoph Hellwig rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 5828f000cacSChristoph Hellwig nvmet_data_dir(&rsp->req)); 5838f000cacSChristoph Hellwig if (ret < 0) 5848f000cacSChristoph Hellwig return NVME_SC_INTERNAL; 5855e62d5c9SChristoph Hellwig rsp->req.transfer_len += len; 5868f000cacSChristoph Hellwig rsp->n_rdma += ret; 5878f000cacSChristoph Hellwig 5888f000cacSChristoph Hellwig if (invalidate) { 5898f000cacSChristoph Hellwig rsp->invalidate_rkey = key; 5908f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; 5918f000cacSChristoph Hellwig } 5928f000cacSChristoph Hellwig 5938f000cacSChristoph Hellwig return 0; 5948f000cacSChristoph Hellwig } 5958f000cacSChristoph Hellwig 5968f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 5978f000cacSChristoph Hellwig { 5988f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 5998f000cacSChristoph Hellwig 6008f000cacSChristoph Hellwig switch (sgl->type >> 4) { 6018f000cacSChristoph Hellwig case NVME_SGL_FMT_DATA_DESC: 6028f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 6038f000cacSChristoph Hellwig case NVME_SGL_FMT_OFFSET: 6048f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_inline(rsp); 6058f000cacSChristoph Hellwig default: 6068f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 6078f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6088f000cacSChristoph Hellwig } 6098f000cacSChristoph Hellwig case NVME_KEY_SGL_FMT_DATA_DESC: 6108f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 6118f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 6128f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 6138f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS: 6148f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 6158f000cacSChristoph Hellwig default: 6168f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 6178f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6188f000cacSChristoph Hellwig } 6198f000cacSChristoph Hellwig default: 6208f000cacSChristoph Hellwig pr_err("invalid SGL type: %#x\n", sgl->type); 6218f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; 6228f000cacSChristoph Hellwig } 6238f000cacSChristoph Hellwig } 6248f000cacSChristoph Hellwig 6258f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 6268f000cacSChristoph Hellwig { 6278f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 6288f000cacSChristoph Hellwig 6298f000cacSChristoph Hellwig if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 6308f000cacSChristoph Hellwig &queue->sq_wr_avail) < 0)) { 6318f000cacSChristoph Hellwig pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 6328f000cacSChristoph Hellwig 1 + rsp->n_rdma, queue->idx, 6338f000cacSChristoph Hellwig queue->nvme_sq.ctrl->cntlid); 6348f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 6358f000cacSChristoph Hellwig return false; 6368f000cacSChristoph Hellwig } 6378f000cacSChristoph Hellwig 6388f000cacSChristoph Hellwig if (nvmet_rdma_need_data_in(rsp)) { 6398f000cacSChristoph Hellwig if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, 6408f000cacSChristoph Hellwig queue->cm_id->port_num, &rsp->read_cqe, NULL)) 6418f000cacSChristoph Hellwig nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 6428f000cacSChristoph Hellwig } else { 6435e62d5c9SChristoph Hellwig nvmet_req_execute(&rsp->req); 6448f000cacSChristoph Hellwig } 6458f000cacSChristoph Hellwig 6468f000cacSChristoph Hellwig return true; 6478f000cacSChristoph Hellwig } 6488f000cacSChristoph Hellwig 6498f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 6508f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd) 6518f000cacSChristoph Hellwig { 6528f000cacSChristoph Hellwig u16 status; 6538f000cacSChristoph Hellwig 654748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 655748ff840SParav Pandit cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 656748ff840SParav Pandit DMA_FROM_DEVICE); 657748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 658748ff840SParav Pandit cmd->send_sge.addr, cmd->send_sge.length, 659748ff840SParav Pandit DMA_TO_DEVICE); 660748ff840SParav Pandit 6618f000cacSChristoph Hellwig if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 6628f000cacSChristoph Hellwig &queue->nvme_sq, &nvmet_rdma_ops)) 6638f000cacSChristoph Hellwig return; 6648f000cacSChristoph Hellwig 6658f000cacSChristoph Hellwig status = nvmet_rdma_map_sgl(cmd); 6668f000cacSChristoph Hellwig if (status) 6678f000cacSChristoph Hellwig goto out_err; 6688f000cacSChristoph Hellwig 6698f000cacSChristoph Hellwig if (unlikely(!nvmet_rdma_execute_command(cmd))) { 6708f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 6718f000cacSChristoph Hellwig list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 6728f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 6738f000cacSChristoph Hellwig } 6748f000cacSChristoph Hellwig 6758f000cacSChristoph Hellwig return; 6768f000cacSChristoph Hellwig 6778f000cacSChristoph Hellwig out_err: 6788f000cacSChristoph Hellwig nvmet_req_complete(&cmd->req, status); 6798f000cacSChristoph Hellwig } 6808f000cacSChristoph Hellwig 6818f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 6828f000cacSChristoph Hellwig { 6838f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd = 6848f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 6858f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 6868f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 6878f000cacSChristoph Hellwig 6888f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 6898f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 6908f000cacSChristoph Hellwig pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 6918f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), 6928f000cacSChristoph Hellwig wc->status); 6938f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 6948f000cacSChristoph Hellwig } 6958f000cacSChristoph Hellwig return; 6968f000cacSChristoph Hellwig } 6978f000cacSChristoph Hellwig 6988f000cacSChristoph Hellwig if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 6998f000cacSChristoph Hellwig pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 7008f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 7018f000cacSChristoph Hellwig return; 7028f000cacSChristoph Hellwig } 7038f000cacSChristoph Hellwig 7048f000cacSChristoph Hellwig cmd->queue = queue; 7058f000cacSChristoph Hellwig rsp = nvmet_rdma_get_rsp(queue); 7068d61413dSSagi Grimberg rsp->queue = queue; 7078f000cacSChristoph Hellwig rsp->cmd = cmd; 7088f000cacSChristoph Hellwig rsp->flags = 0; 7098f000cacSChristoph Hellwig rsp->req.cmd = cmd->nvme_cmd; 7108d61413dSSagi Grimberg rsp->req.port = queue->port; 7118d61413dSSagi Grimberg rsp->n_rdma = 0; 7128f000cacSChristoph Hellwig 7138f000cacSChristoph Hellwig if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 7148f000cacSChristoph Hellwig unsigned long flags; 7158f000cacSChristoph Hellwig 7168f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 7178f000cacSChristoph Hellwig if (queue->state == NVMET_RDMA_Q_CONNECTING) 7188f000cacSChristoph Hellwig list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 7198f000cacSChristoph Hellwig else 7208f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 7218f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 7228f000cacSChristoph Hellwig return; 7238f000cacSChristoph Hellwig } 7248f000cacSChristoph Hellwig 7258f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, rsp); 7268f000cacSChristoph Hellwig } 7278f000cacSChristoph Hellwig 7288f000cacSChristoph Hellwig static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) 7298f000cacSChristoph Hellwig { 7308f000cacSChristoph Hellwig if (!ndev->srq) 7318f000cacSChristoph Hellwig return; 7328f000cacSChristoph Hellwig 7338f000cacSChristoph Hellwig nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 7348f000cacSChristoph Hellwig ib_destroy_srq(ndev->srq); 7358f000cacSChristoph Hellwig } 7368f000cacSChristoph Hellwig 7378f000cacSChristoph Hellwig static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 7388f000cacSChristoph Hellwig { 7398f000cacSChristoph Hellwig struct ib_srq_init_attr srq_attr = { NULL, }; 7408f000cacSChristoph Hellwig struct ib_srq *srq; 7418f000cacSChristoph Hellwig size_t srq_size; 7428f000cacSChristoph Hellwig int ret, i; 7438f000cacSChristoph Hellwig 7448f000cacSChristoph Hellwig srq_size = 4095; /* XXX: tune */ 7458f000cacSChristoph Hellwig 7468f000cacSChristoph Hellwig srq_attr.attr.max_wr = srq_size; 7478f000cacSChristoph Hellwig srq_attr.attr.max_sge = 2; 7488f000cacSChristoph Hellwig srq_attr.attr.srq_limit = 0; 7498f000cacSChristoph Hellwig srq_attr.srq_type = IB_SRQT_BASIC; 7508f000cacSChristoph Hellwig srq = ib_create_srq(ndev->pd, &srq_attr); 7518f000cacSChristoph Hellwig if (IS_ERR(srq)) { 7528f000cacSChristoph Hellwig /* 7538f000cacSChristoph Hellwig * If SRQs aren't supported we just go ahead and use normal 7548f000cacSChristoph Hellwig * non-shared receive queues. 7558f000cacSChristoph Hellwig */ 7568f000cacSChristoph Hellwig pr_info("SRQ requested but not supported.\n"); 7578f000cacSChristoph Hellwig return 0; 7588f000cacSChristoph Hellwig } 7598f000cacSChristoph Hellwig 7608f000cacSChristoph Hellwig ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 7618f000cacSChristoph Hellwig if (IS_ERR(ndev->srq_cmds)) { 7628f000cacSChristoph Hellwig ret = PTR_ERR(ndev->srq_cmds); 7638f000cacSChristoph Hellwig goto out_destroy_srq; 7648f000cacSChristoph Hellwig } 7658f000cacSChristoph Hellwig 7668f000cacSChristoph Hellwig ndev->srq = srq; 7678f000cacSChristoph Hellwig ndev->srq_size = srq_size; 7688f000cacSChristoph Hellwig 7698f000cacSChristoph Hellwig for (i = 0; i < srq_size; i++) 7708f000cacSChristoph Hellwig nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); 7718f000cacSChristoph Hellwig 7728f000cacSChristoph Hellwig return 0; 7738f000cacSChristoph Hellwig 7748f000cacSChristoph Hellwig out_destroy_srq: 7758f000cacSChristoph Hellwig ib_destroy_srq(srq); 7768f000cacSChristoph Hellwig return ret; 7778f000cacSChristoph Hellwig } 7788f000cacSChristoph Hellwig 7798f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref) 7808f000cacSChristoph Hellwig { 7818f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = 7828f000cacSChristoph Hellwig container_of(ref, struct nvmet_rdma_device, ref); 7838f000cacSChristoph Hellwig 7848f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 7858f000cacSChristoph Hellwig list_del(&ndev->entry); 7868f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 7878f000cacSChristoph Hellwig 7888f000cacSChristoph Hellwig nvmet_rdma_destroy_srq(ndev); 7898f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 7908f000cacSChristoph Hellwig 7918f000cacSChristoph Hellwig kfree(ndev); 7928f000cacSChristoph Hellwig } 7938f000cacSChristoph Hellwig 7948f000cacSChristoph Hellwig static struct nvmet_rdma_device * 7958f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 7968f000cacSChristoph Hellwig { 7978f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 7988f000cacSChristoph Hellwig int ret; 7998f000cacSChristoph Hellwig 8008f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 8018f000cacSChristoph Hellwig list_for_each_entry(ndev, &device_list, entry) { 8028f000cacSChristoph Hellwig if (ndev->device->node_guid == cm_id->device->node_guid && 8038f000cacSChristoph Hellwig kref_get_unless_zero(&ndev->ref)) 8048f000cacSChristoph Hellwig goto out_unlock; 8058f000cacSChristoph Hellwig } 8068f000cacSChristoph Hellwig 8078f000cacSChristoph Hellwig ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 8088f000cacSChristoph Hellwig if (!ndev) 8098f000cacSChristoph Hellwig goto out_err; 8108f000cacSChristoph Hellwig 8118f000cacSChristoph Hellwig ndev->device = cm_id->device; 8128f000cacSChristoph Hellwig kref_init(&ndev->ref); 8138f000cacSChristoph Hellwig 814ed082d36SChristoph Hellwig ndev->pd = ib_alloc_pd(ndev->device, 0); 8158f000cacSChristoph Hellwig if (IS_ERR(ndev->pd)) 8168f000cacSChristoph Hellwig goto out_free_dev; 8178f000cacSChristoph Hellwig 8188f000cacSChristoph Hellwig if (nvmet_rdma_use_srq) { 8198f000cacSChristoph Hellwig ret = nvmet_rdma_init_srq(ndev); 8208f000cacSChristoph Hellwig if (ret) 8218f000cacSChristoph Hellwig goto out_free_pd; 8228f000cacSChristoph Hellwig } 8238f000cacSChristoph Hellwig 8248f000cacSChristoph Hellwig list_add(&ndev->entry, &device_list); 8258f000cacSChristoph Hellwig out_unlock: 8268f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 8278f000cacSChristoph Hellwig pr_debug("added %s.\n", ndev->device->name); 8288f000cacSChristoph Hellwig return ndev; 8298f000cacSChristoph Hellwig 8308f000cacSChristoph Hellwig out_free_pd: 8318f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 8328f000cacSChristoph Hellwig out_free_dev: 8338f000cacSChristoph Hellwig kfree(ndev); 8348f000cacSChristoph Hellwig out_err: 8358f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 8368f000cacSChristoph Hellwig return NULL; 8378f000cacSChristoph Hellwig } 8388f000cacSChristoph Hellwig 8398f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 8408f000cacSChristoph Hellwig { 8418f000cacSChristoph Hellwig struct ib_qp_init_attr qp_attr; 8428f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 8438f000cacSChristoph Hellwig int comp_vector, nr_cqe, ret, i; 8448f000cacSChristoph Hellwig 8458f000cacSChristoph Hellwig /* 8468f000cacSChristoph Hellwig * Spread the io queues across completion vectors, 8478f000cacSChristoph Hellwig * but still keep all admin queues on vector 0. 8488f000cacSChristoph Hellwig */ 8498f000cacSChristoph Hellwig comp_vector = !queue->host_qid ? 0 : 8508f000cacSChristoph Hellwig queue->idx % ndev->device->num_comp_vectors; 8518f000cacSChristoph Hellwig 8528f000cacSChristoph Hellwig /* 8538f000cacSChristoph Hellwig * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 8548f000cacSChristoph Hellwig */ 8558f000cacSChristoph Hellwig nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 8568f000cacSChristoph Hellwig 8578f000cacSChristoph Hellwig queue->cq = ib_alloc_cq(ndev->device, queue, 8588f000cacSChristoph Hellwig nr_cqe + 1, comp_vector, 8598f000cacSChristoph Hellwig IB_POLL_WORKQUEUE); 8608f000cacSChristoph Hellwig if (IS_ERR(queue->cq)) { 8618f000cacSChristoph Hellwig ret = PTR_ERR(queue->cq); 8628f000cacSChristoph Hellwig pr_err("failed to create CQ cqe= %d ret= %d\n", 8638f000cacSChristoph Hellwig nr_cqe + 1, ret); 8648f000cacSChristoph Hellwig goto out; 8658f000cacSChristoph Hellwig } 8668f000cacSChristoph Hellwig 8678f000cacSChristoph Hellwig memset(&qp_attr, 0, sizeof(qp_attr)); 8688f000cacSChristoph Hellwig qp_attr.qp_context = queue; 8698f000cacSChristoph Hellwig qp_attr.event_handler = nvmet_rdma_qp_event; 8708f000cacSChristoph Hellwig qp_attr.send_cq = queue->cq; 8718f000cacSChristoph Hellwig qp_attr.recv_cq = queue->cq; 8728f000cacSChristoph Hellwig qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 8738f000cacSChristoph Hellwig qp_attr.qp_type = IB_QPT_RC; 8748f000cacSChristoph Hellwig /* +1 for drain */ 8758f000cacSChristoph Hellwig qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 8768f000cacSChristoph Hellwig qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; 8778f000cacSChristoph Hellwig qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 8788f000cacSChristoph Hellwig ndev->device->attrs.max_sge); 8798f000cacSChristoph Hellwig 8808f000cacSChristoph Hellwig if (ndev->srq) { 8818f000cacSChristoph Hellwig qp_attr.srq = ndev->srq; 8828f000cacSChristoph Hellwig } else { 8838f000cacSChristoph Hellwig /* +1 for drain */ 8848f000cacSChristoph Hellwig qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 8858f000cacSChristoph Hellwig qp_attr.cap.max_recv_sge = 2; 8868f000cacSChristoph Hellwig } 8878f000cacSChristoph Hellwig 8888f000cacSChristoph Hellwig ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 8898f000cacSChristoph Hellwig if (ret) { 8908f000cacSChristoph Hellwig pr_err("failed to create_qp ret= %d\n", ret); 8918f000cacSChristoph Hellwig goto err_destroy_cq; 8928f000cacSChristoph Hellwig } 8938f000cacSChristoph Hellwig 8948f000cacSChristoph Hellwig atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 8958f000cacSChristoph Hellwig 8968f000cacSChristoph Hellwig pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 8978f000cacSChristoph Hellwig __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 8988f000cacSChristoph Hellwig qp_attr.cap.max_send_wr, queue->cm_id); 8998f000cacSChristoph Hellwig 9008f000cacSChristoph Hellwig if (!ndev->srq) { 9018f000cacSChristoph Hellwig for (i = 0; i < queue->recv_queue_size; i++) { 9028f000cacSChristoph Hellwig queue->cmds[i].queue = queue; 9038f000cacSChristoph Hellwig nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 9048f000cacSChristoph Hellwig } 9058f000cacSChristoph Hellwig } 9068f000cacSChristoph Hellwig 9078f000cacSChristoph Hellwig out: 9088f000cacSChristoph Hellwig return ret; 9098f000cacSChristoph Hellwig 9108f000cacSChristoph Hellwig err_destroy_cq: 9118f000cacSChristoph Hellwig ib_free_cq(queue->cq); 9128f000cacSChristoph Hellwig goto out; 9138f000cacSChristoph Hellwig } 9148f000cacSChristoph Hellwig 9158f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 9168f000cacSChristoph Hellwig { 91714c862dbSSagi Grimberg ib_drain_qp(queue->cm_id->qp); 9188f000cacSChristoph Hellwig rdma_destroy_qp(queue->cm_id); 9198f000cacSChristoph Hellwig ib_free_cq(queue->cq); 9208f000cacSChristoph Hellwig } 9218f000cacSChristoph Hellwig 9228f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 9238f000cacSChristoph Hellwig { 924*424125a0SSagi Grimberg pr_debug("freeing queue %d\n", queue->idx); 9258f000cacSChristoph Hellwig 9268f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 9278f000cacSChristoph Hellwig 9288f000cacSChristoph Hellwig nvmet_rdma_destroy_queue_ib(queue); 9298f000cacSChristoph Hellwig if (!queue->dev->srq) { 9308f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 9318f000cacSChristoph Hellwig queue->recv_queue_size, 9328f000cacSChristoph Hellwig !queue->host_qid); 9338f000cacSChristoph Hellwig } 9348f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 9358f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 9368f000cacSChristoph Hellwig kfree(queue); 9378f000cacSChristoph Hellwig } 9388f000cacSChristoph Hellwig 9398f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w) 9408f000cacSChristoph Hellwig { 9418f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = 9428f000cacSChristoph Hellwig container_of(w, struct nvmet_rdma_queue, release_work); 9438f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = queue->cm_id; 9448f000cacSChristoph Hellwig struct nvmet_rdma_device *dev = queue->dev; 9453256aaefSVincent Stehlé enum nvmet_rdma_queue_state state = queue->state; 9468f000cacSChristoph Hellwig 9478f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 948d8f7750aSSagi Grimberg 9493256aaefSVincent Stehlé if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) 9508f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 951d8f7750aSSagi Grimberg 9528f000cacSChristoph Hellwig kref_put(&dev->ref, nvmet_rdma_free_dev); 9538f000cacSChristoph Hellwig } 9548f000cacSChristoph Hellwig 9558f000cacSChristoph Hellwig static int 9568f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 9578f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 9588f000cacSChristoph Hellwig { 9598f000cacSChristoph Hellwig struct nvme_rdma_cm_req *req; 9608f000cacSChristoph Hellwig 9618f000cacSChristoph Hellwig req = (struct nvme_rdma_cm_req *)conn->private_data; 9628f000cacSChristoph Hellwig if (!req || conn->private_data_len == 0) 9638f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_LEN; 9648f000cacSChristoph Hellwig 9658f000cacSChristoph Hellwig if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 9668f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_RECFMT; 9678f000cacSChristoph Hellwig 9688f000cacSChristoph Hellwig queue->host_qid = le16_to_cpu(req->qid); 9698f000cacSChristoph Hellwig 9708f000cacSChristoph Hellwig /* 971b825b44cSJay Freyensee * req->hsqsize corresponds to our recv queue size plus 1 9728f000cacSChristoph Hellwig * req->hrqsize corresponds to our send queue size 9738f000cacSChristoph Hellwig */ 974b825b44cSJay Freyensee queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 9758f000cacSChristoph Hellwig queue->send_queue_size = le16_to_cpu(req->hrqsize); 9768f000cacSChristoph Hellwig 9777aa1f427SSagi Grimberg if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 9788f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_HSQSIZE; 9798f000cacSChristoph Hellwig 9808f000cacSChristoph Hellwig /* XXX: Should we enforce some kind of max for IO queues? */ 9818f000cacSChristoph Hellwig 9828f000cacSChristoph Hellwig return 0; 9838f000cacSChristoph Hellwig } 9848f000cacSChristoph Hellwig 9858f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 9868f000cacSChristoph Hellwig enum nvme_rdma_cm_status status) 9878f000cacSChristoph Hellwig { 9888f000cacSChristoph Hellwig struct nvme_rdma_cm_rej rej; 9898f000cacSChristoph Hellwig 9907a01a6eaSMax Gurtovoy pr_debug("rejecting connect request: status %d (%s)\n", 9917a01a6eaSMax Gurtovoy status, nvme_rdma_cm_msg(status)); 9927a01a6eaSMax Gurtovoy 9938f000cacSChristoph Hellwig rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 9948f000cacSChristoph Hellwig rej.sts = cpu_to_le16(status); 9958f000cacSChristoph Hellwig 9968f000cacSChristoph Hellwig return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); 9978f000cacSChristoph Hellwig } 9988f000cacSChristoph Hellwig 9998f000cacSChristoph Hellwig static struct nvmet_rdma_queue * 10008f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 10018f000cacSChristoph Hellwig struct rdma_cm_id *cm_id, 10028f000cacSChristoph Hellwig struct rdma_cm_event *event) 10038f000cacSChristoph Hellwig { 10048f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 10058f000cacSChristoph Hellwig int ret; 10068f000cacSChristoph Hellwig 10078f000cacSChristoph Hellwig queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10088f000cacSChristoph Hellwig if (!queue) { 10098f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10108f000cacSChristoph Hellwig goto out_reject; 10118f000cacSChristoph Hellwig } 10128f000cacSChristoph Hellwig 10138f000cacSChristoph Hellwig ret = nvmet_sq_init(&queue->nvme_sq); 101470d4281cSBart Van Assche if (ret) { 101570d4281cSBart Van Assche ret = NVME_RDMA_CM_NO_RSC; 10168f000cacSChristoph Hellwig goto out_free_queue; 101770d4281cSBart Van Assche } 10188f000cacSChristoph Hellwig 10198f000cacSChristoph Hellwig ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 10208f000cacSChristoph Hellwig if (ret) 10218f000cacSChristoph Hellwig goto out_destroy_sq; 10228f000cacSChristoph Hellwig 10238f000cacSChristoph Hellwig /* 10248f000cacSChristoph Hellwig * Schedules the actual release because calling rdma_destroy_id from 10258f000cacSChristoph Hellwig * inside a CM callback would trigger a deadlock. (great API design..) 10268f000cacSChristoph Hellwig */ 10278f000cacSChristoph Hellwig INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 10288f000cacSChristoph Hellwig queue->dev = ndev; 10298f000cacSChristoph Hellwig queue->cm_id = cm_id; 10308f000cacSChristoph Hellwig 10318f000cacSChristoph Hellwig spin_lock_init(&queue->state_lock); 10328f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_CONNECTING; 10338f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wait_list); 10348f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 10358f000cacSChristoph Hellwig spin_lock_init(&queue->rsp_wr_wait_lock); 10368f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->free_rsps); 10378f000cacSChristoph Hellwig spin_lock_init(&queue->rsps_lock); 1038766dbb17SSagi Grimberg INIT_LIST_HEAD(&queue->queue_list); 10398f000cacSChristoph Hellwig 10408f000cacSChristoph Hellwig queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 10418f000cacSChristoph Hellwig if (queue->idx < 0) { 10428f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10436ccaeb56SChristophe JAILLET goto out_destroy_sq; 10448f000cacSChristoph Hellwig } 10458f000cacSChristoph Hellwig 10468f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsps(queue); 10478f000cacSChristoph Hellwig if (ret) { 10488f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10498f000cacSChristoph Hellwig goto out_ida_remove; 10508f000cacSChristoph Hellwig } 10518f000cacSChristoph Hellwig 10528f000cacSChristoph Hellwig if (!ndev->srq) { 10538f000cacSChristoph Hellwig queue->cmds = nvmet_rdma_alloc_cmds(ndev, 10548f000cacSChristoph Hellwig queue->recv_queue_size, 10558f000cacSChristoph Hellwig !queue->host_qid); 10568f000cacSChristoph Hellwig if (IS_ERR(queue->cmds)) { 10578f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10588f000cacSChristoph Hellwig goto out_free_responses; 10598f000cacSChristoph Hellwig } 10608f000cacSChristoph Hellwig } 10618f000cacSChristoph Hellwig 10628f000cacSChristoph Hellwig ret = nvmet_rdma_create_queue_ib(queue); 10638f000cacSChristoph Hellwig if (ret) { 10648f000cacSChristoph Hellwig pr_err("%s: creating RDMA queue failed (%d).\n", 10658f000cacSChristoph Hellwig __func__, ret); 10668f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 10678f000cacSChristoph Hellwig goto out_free_cmds; 10688f000cacSChristoph Hellwig } 10698f000cacSChristoph Hellwig 10708f000cacSChristoph Hellwig return queue; 10718f000cacSChristoph Hellwig 10728f000cacSChristoph Hellwig out_free_cmds: 10738f000cacSChristoph Hellwig if (!ndev->srq) { 10748f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 10758f000cacSChristoph Hellwig queue->recv_queue_size, 10768f000cacSChristoph Hellwig !queue->host_qid); 10778f000cacSChristoph Hellwig } 10788f000cacSChristoph Hellwig out_free_responses: 10798f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 10808f000cacSChristoph Hellwig out_ida_remove: 10818f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 10828f000cacSChristoph Hellwig out_destroy_sq: 10838f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 10848f000cacSChristoph Hellwig out_free_queue: 10858f000cacSChristoph Hellwig kfree(queue); 10868f000cacSChristoph Hellwig out_reject: 10878f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, ret); 10888f000cacSChristoph Hellwig return NULL; 10898f000cacSChristoph Hellwig } 10908f000cacSChristoph Hellwig 10918f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 10928f000cacSChristoph Hellwig { 10938f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = priv; 10948f000cacSChristoph Hellwig 10958f000cacSChristoph Hellwig switch (event->event) { 10968f000cacSChristoph Hellwig case IB_EVENT_COMM_EST: 10978f000cacSChristoph Hellwig rdma_notify(queue->cm_id, event->event); 10988f000cacSChristoph Hellwig break; 10998f000cacSChristoph Hellwig default: 1100675796beSMax Gurtovoy pr_err("received IB QP event: %s (%d)\n", 1101675796beSMax Gurtovoy ib_event_msg(event->event), event->event); 11028f000cacSChristoph Hellwig break; 11038f000cacSChristoph Hellwig } 11048f000cacSChristoph Hellwig } 11058f000cacSChristoph Hellwig 11068f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 11078f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue, 11088f000cacSChristoph Hellwig struct rdma_conn_param *p) 11098f000cacSChristoph Hellwig { 11108f000cacSChristoph Hellwig struct rdma_conn_param param = { }; 11118f000cacSChristoph Hellwig struct nvme_rdma_cm_rep priv = { }; 11128f000cacSChristoph Hellwig int ret = -ENOMEM; 11138f000cacSChristoph Hellwig 11148f000cacSChristoph Hellwig param.rnr_retry_count = 7; 11158f000cacSChristoph Hellwig param.flow_control = 1; 11168f000cacSChristoph Hellwig param.initiator_depth = min_t(u8, p->initiator_depth, 11178f000cacSChristoph Hellwig queue->dev->device->attrs.max_qp_init_rd_atom); 11188f000cacSChristoph Hellwig param.private_data = &priv; 11198f000cacSChristoph Hellwig param.private_data_len = sizeof(priv); 11208f000cacSChristoph Hellwig priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 11218f000cacSChristoph Hellwig priv.crqsize = cpu_to_le16(queue->recv_queue_size); 11228f000cacSChristoph Hellwig 11238f000cacSChristoph Hellwig ret = rdma_accept(cm_id, ¶m); 11248f000cacSChristoph Hellwig if (ret) 11258f000cacSChristoph Hellwig pr_err("rdma_accept failed (error code = %d)\n", ret); 11268f000cacSChristoph Hellwig 11278f000cacSChristoph Hellwig return ret; 11288f000cacSChristoph Hellwig } 11298f000cacSChristoph Hellwig 11308f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 11318f000cacSChristoph Hellwig struct rdma_cm_event *event) 11328f000cacSChristoph Hellwig { 11338f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 11348f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 11358f000cacSChristoph Hellwig int ret = -EINVAL; 11368f000cacSChristoph Hellwig 11378f000cacSChristoph Hellwig ndev = nvmet_rdma_find_get_device(cm_id); 11388f000cacSChristoph Hellwig if (!ndev) { 11398f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 11408f000cacSChristoph Hellwig return -ECONNREFUSED; 11418f000cacSChristoph Hellwig } 11428f000cacSChristoph Hellwig 11438f000cacSChristoph Hellwig queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 11448f000cacSChristoph Hellwig if (!queue) { 11458f000cacSChristoph Hellwig ret = -ENOMEM; 11468f000cacSChristoph Hellwig goto put_device; 11478f000cacSChristoph Hellwig } 11488f000cacSChristoph Hellwig queue->port = cm_id->context; 11498f000cacSChristoph Hellwig 1150777dc823SSagi Grimberg if (queue->host_qid == 0) { 1151777dc823SSagi Grimberg /* Let inflight controller teardown complete */ 1152777dc823SSagi Grimberg flush_scheduled_work(); 1153777dc823SSagi Grimberg } 1154777dc823SSagi Grimberg 11558f000cacSChristoph Hellwig ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 11568f000cacSChristoph Hellwig if (ret) 11578f000cacSChristoph Hellwig goto release_queue; 11588f000cacSChristoph Hellwig 11598f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 11608f000cacSChristoph Hellwig list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 11618f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 11628f000cacSChristoph Hellwig 11638f000cacSChristoph Hellwig return 0; 11648f000cacSChristoph Hellwig 11658f000cacSChristoph Hellwig release_queue: 11668f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 11678f000cacSChristoph Hellwig put_device: 11688f000cacSChristoph Hellwig kref_put(&ndev->ref, nvmet_rdma_free_dev); 11698f000cacSChristoph Hellwig 11708f000cacSChristoph Hellwig return ret; 11718f000cacSChristoph Hellwig } 11728f000cacSChristoph Hellwig 11738f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 11748f000cacSChristoph Hellwig { 11758f000cacSChristoph Hellwig unsigned long flags; 11768f000cacSChristoph Hellwig 11778f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 11788f000cacSChristoph Hellwig if (queue->state != NVMET_RDMA_Q_CONNECTING) { 11798f000cacSChristoph Hellwig pr_warn("trying to establish a connected queue\n"); 11808f000cacSChristoph Hellwig goto out_unlock; 11818f000cacSChristoph Hellwig } 11828f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_LIVE; 11838f000cacSChristoph Hellwig 11848f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wait_list)) { 11858f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd; 11868f000cacSChristoph Hellwig 11878f000cacSChristoph Hellwig cmd = list_first_entry(&queue->rsp_wait_list, 11888f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 11898f000cacSChristoph Hellwig list_del(&cmd->wait_list); 11908f000cacSChristoph Hellwig 11918f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 11928f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, cmd); 11938f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 11948f000cacSChristoph Hellwig } 11958f000cacSChristoph Hellwig 11968f000cacSChristoph Hellwig out_unlock: 11978f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 11988f000cacSChristoph Hellwig } 11998f000cacSChristoph Hellwig 12008f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 12018f000cacSChristoph Hellwig { 12028f000cacSChristoph Hellwig bool disconnect = false; 12038f000cacSChristoph Hellwig unsigned long flags; 12048f000cacSChristoph Hellwig 12058f000cacSChristoph Hellwig pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 12068f000cacSChristoph Hellwig 12078f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 12088f000cacSChristoph Hellwig switch (queue->state) { 12098f000cacSChristoph Hellwig case NVMET_RDMA_Q_CONNECTING: 12108f000cacSChristoph Hellwig case NVMET_RDMA_Q_LIVE: 12118f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_DISCONNECTING; 1212d8f7750aSSagi Grimberg case NVMET_RDMA_IN_DEVICE_REMOVAL: 1213d8f7750aSSagi Grimberg disconnect = true; 12148f000cacSChristoph Hellwig break; 12158f000cacSChristoph Hellwig case NVMET_RDMA_Q_DISCONNECTING: 12168f000cacSChristoph Hellwig break; 12178f000cacSChristoph Hellwig } 12188f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 12198f000cacSChristoph Hellwig 12208f000cacSChristoph Hellwig if (disconnect) { 12218f000cacSChristoph Hellwig rdma_disconnect(queue->cm_id); 12228f000cacSChristoph Hellwig schedule_work(&queue->release_work); 12238f000cacSChristoph Hellwig } 12248f000cacSChristoph Hellwig } 12258f000cacSChristoph Hellwig 12268f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 12278f000cacSChristoph Hellwig { 12288f000cacSChristoph Hellwig bool disconnect = false; 12298f000cacSChristoph Hellwig 12308f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 12318f000cacSChristoph Hellwig if (!list_empty(&queue->queue_list)) { 12328f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 12338f000cacSChristoph Hellwig disconnect = true; 12348f000cacSChristoph Hellwig } 12358f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 12368f000cacSChristoph Hellwig 12378f000cacSChristoph Hellwig if (disconnect) 12388f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 12398f000cacSChristoph Hellwig } 12408f000cacSChristoph Hellwig 12418f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 12428f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 12438f000cacSChristoph Hellwig { 12448f000cacSChristoph Hellwig WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 12458f000cacSChristoph Hellwig 1246766dbb17SSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 1247766dbb17SSagi Grimberg if (!list_empty(&queue->queue_list)) 1248766dbb17SSagi Grimberg list_del_init(&queue->queue_list); 1249766dbb17SSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1250766dbb17SSagi Grimberg 1251766dbb17SSagi Grimberg pr_err("failed to connect queue %d\n", queue->idx); 12528f000cacSChristoph Hellwig schedule_work(&queue->release_work); 12538f000cacSChristoph Hellwig } 12548f000cacSChristoph Hellwig 1255d8f7750aSSagi Grimberg /** 1256d8f7750aSSagi Grimberg * nvme_rdma_device_removal() - Handle RDMA device removal 1257f1d4ef7dSSagi Grimberg * @cm_id: rdma_cm id, used for nvmet port 1258d8f7750aSSagi Grimberg * @queue: nvmet rdma queue (cm id qp_context) 1259d8f7750aSSagi Grimberg * 1260d8f7750aSSagi Grimberg * DEVICE_REMOVAL event notifies us that the RDMA device is about 1261f1d4ef7dSSagi Grimberg * to unplug. Note that this event can be generated on a normal 1262f1d4ef7dSSagi Grimberg * queue cm_id and/or a device bound listener cm_id (where in this 1263f1d4ef7dSSagi Grimberg * case queue will be null). 1264d8f7750aSSagi Grimberg * 1265f1d4ef7dSSagi Grimberg * We registered an ib_client to handle device removal for queues, 1266f1d4ef7dSSagi Grimberg * so we only need to handle the listening port cm_ids. In this case 1267d8f7750aSSagi Grimberg * we nullify the priv to prevent double cm_id destruction and destroying 1268d8f7750aSSagi Grimberg * the cm_id implicitely by returning a non-zero rc to the callout. 1269d8f7750aSSagi Grimberg */ 1270d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1271d8f7750aSSagi Grimberg struct nvmet_rdma_queue *queue) 1272d8f7750aSSagi Grimberg { 1273f1d4ef7dSSagi Grimberg struct nvmet_port *port; 1274d8f7750aSSagi Grimberg 1275f1d4ef7dSSagi Grimberg if (queue) { 1276f1d4ef7dSSagi Grimberg /* 1277f1d4ef7dSSagi Grimberg * This is a queue cm_id. we have registered 1278f1d4ef7dSSagi Grimberg * an ib_client to handle queues removal 1279f1d4ef7dSSagi Grimberg * so don't interfear and just return. 1280f1d4ef7dSSagi Grimberg */ 1281f1d4ef7dSSagi Grimberg return 0; 1282f1d4ef7dSSagi Grimberg } 1283f1d4ef7dSSagi Grimberg 1284f1d4ef7dSSagi Grimberg port = cm_id->context; 1285d8f7750aSSagi Grimberg 1286d8f7750aSSagi Grimberg /* 1287d8f7750aSSagi Grimberg * This is a listener cm_id. Make sure that 1288d8f7750aSSagi Grimberg * future remove_port won't invoke a double 1289d8f7750aSSagi Grimberg * cm_id destroy. use atomic xchg to make sure 1290d8f7750aSSagi Grimberg * we don't compete with remove_port. 1291d8f7750aSSagi Grimberg */ 1292d8f7750aSSagi Grimberg if (xchg(&port->priv, NULL) != cm_id) 1293d8f7750aSSagi Grimberg return 0; 1294d8f7750aSSagi Grimberg 1295d8f7750aSSagi Grimberg /* 1296d8f7750aSSagi Grimberg * We need to return 1 so that the core will destroy 1297d8f7750aSSagi Grimberg * it's own ID. What a great API design.. 1298d8f7750aSSagi Grimberg */ 1299d8f7750aSSagi Grimberg return 1; 1300d8f7750aSSagi Grimberg } 1301d8f7750aSSagi Grimberg 13028f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 13038f000cacSChristoph Hellwig struct rdma_cm_event *event) 13048f000cacSChristoph Hellwig { 13058f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = NULL; 13068f000cacSChristoph Hellwig int ret = 0; 13078f000cacSChristoph Hellwig 13088f000cacSChristoph Hellwig if (cm_id->qp) 13098f000cacSChristoph Hellwig queue = cm_id->qp->qp_context; 13108f000cacSChristoph Hellwig 13118f000cacSChristoph Hellwig pr_debug("%s (%d): status %d id %p\n", 13128f000cacSChristoph Hellwig rdma_event_msg(event->event), event->event, 13138f000cacSChristoph Hellwig event->status, cm_id); 13148f000cacSChristoph Hellwig 13158f000cacSChristoph Hellwig switch (event->event) { 13168f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_REQUEST: 13178f000cacSChristoph Hellwig ret = nvmet_rdma_queue_connect(cm_id, event); 13188f000cacSChristoph Hellwig break; 13198f000cacSChristoph Hellwig case RDMA_CM_EVENT_ESTABLISHED: 13208f000cacSChristoph Hellwig nvmet_rdma_queue_established(queue); 13218f000cacSChristoph Hellwig break; 13228f000cacSChristoph Hellwig case RDMA_CM_EVENT_ADDR_CHANGE: 13238f000cacSChristoph Hellwig case RDMA_CM_EVENT_DISCONNECTED: 13248f000cacSChristoph Hellwig case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1325fa14a0acSBart Van Assche /* 1326fa14a0acSBart Van Assche * We might end up here when we already freed the qp 1327fa14a0acSBart Van Assche * which means queue release sequence is in progress, 1328fa14a0acSBart Van Assche * so don't get in the way... 1329fa14a0acSBart Van Assche */ 1330fa14a0acSBart Van Assche if (queue) 13318f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 1332d8f7750aSSagi Grimberg break; 1333d8f7750aSSagi Grimberg case RDMA_CM_EVENT_DEVICE_REMOVAL: 1334d8f7750aSSagi Grimberg ret = nvmet_rdma_device_removal(cm_id, queue); 13358f000cacSChristoph Hellwig break; 13368f000cacSChristoph Hellwig case RDMA_CM_EVENT_REJECTED: 1337512fb1b3SSteve Wise pr_debug("Connection rejected: %s\n", 1338512fb1b3SSteve Wise rdma_reject_msg(cm_id, event->status)); 1339512fb1b3SSteve Wise /* FALLTHROUGH */ 13408f000cacSChristoph Hellwig case RDMA_CM_EVENT_UNREACHABLE: 13418f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_ERROR: 13428f000cacSChristoph Hellwig nvmet_rdma_queue_connect_fail(cm_id, queue); 13438f000cacSChristoph Hellwig break; 13448f000cacSChristoph Hellwig default: 13458f000cacSChristoph Hellwig pr_err("received unrecognized RDMA CM event %d\n", 13468f000cacSChristoph Hellwig event->event); 13478f000cacSChristoph Hellwig break; 13488f000cacSChristoph Hellwig } 13498f000cacSChristoph Hellwig 13508f000cacSChristoph Hellwig return ret; 13518f000cacSChristoph Hellwig } 13528f000cacSChristoph Hellwig 13538f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 13548f000cacSChristoph Hellwig { 13558f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 13568f000cacSChristoph Hellwig 13578f000cacSChristoph Hellwig restart: 13588f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 13598f000cacSChristoph Hellwig list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { 13608f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl == ctrl) { 13618f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 13628f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13638f000cacSChristoph Hellwig 13648f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 13658f000cacSChristoph Hellwig goto restart; 13668f000cacSChristoph Hellwig } 13678f000cacSChristoph Hellwig } 13688f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13698f000cacSChristoph Hellwig } 13708f000cacSChristoph Hellwig 13718f000cacSChristoph Hellwig static int nvmet_rdma_add_port(struct nvmet_port *port) 13728f000cacSChristoph Hellwig { 13738f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 1374670c2a3aSSagi Grimberg struct sockaddr_storage addr = { }; 1375670c2a3aSSagi Grimberg __kernel_sa_family_t af; 13768f000cacSChristoph Hellwig int ret; 13778f000cacSChristoph Hellwig 13788f000cacSChristoph Hellwig switch (port->disc_addr.adrfam) { 13798f000cacSChristoph Hellwig case NVMF_ADDR_FAMILY_IP4: 1380670c2a3aSSagi Grimberg af = AF_INET; 1381670c2a3aSSagi Grimberg break; 1382670c2a3aSSagi Grimberg case NVMF_ADDR_FAMILY_IP6: 1383670c2a3aSSagi Grimberg af = AF_INET6; 13848f000cacSChristoph Hellwig break; 13858f000cacSChristoph Hellwig default: 13868f000cacSChristoph Hellwig pr_err("address family %d not supported\n", 13878f000cacSChristoph Hellwig port->disc_addr.adrfam); 13888f000cacSChristoph Hellwig return -EINVAL; 13898f000cacSChristoph Hellwig } 13908f000cacSChristoph Hellwig 1391670c2a3aSSagi Grimberg ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, 1392670c2a3aSSagi Grimberg port->disc_addr.trsvcid, &addr); 1393670c2a3aSSagi Grimberg if (ret) { 1394670c2a3aSSagi Grimberg pr_err("malformed ip/port passed: %s:%s\n", 1395670c2a3aSSagi Grimberg port->disc_addr.traddr, port->disc_addr.trsvcid); 13968f000cacSChristoph Hellwig return ret; 1397670c2a3aSSagi Grimberg } 13988f000cacSChristoph Hellwig 13998f000cacSChristoph Hellwig cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 14008f000cacSChristoph Hellwig RDMA_PS_TCP, IB_QPT_RC); 14018f000cacSChristoph Hellwig if (IS_ERR(cm_id)) { 14028f000cacSChristoph Hellwig pr_err("CM ID creation failed\n"); 14038f000cacSChristoph Hellwig return PTR_ERR(cm_id); 14048f000cacSChristoph Hellwig } 14058f000cacSChristoph Hellwig 1406670c2a3aSSagi Grimberg /* 1407670c2a3aSSagi Grimberg * Allow both IPv4 and IPv6 sockets to bind a single port 1408670c2a3aSSagi Grimberg * at the same time. 1409670c2a3aSSagi Grimberg */ 1410670c2a3aSSagi Grimberg ret = rdma_set_afonly(cm_id, 1); 14118f000cacSChristoph Hellwig if (ret) { 1412670c2a3aSSagi Grimberg pr_err("rdma_set_afonly failed (%d)\n", ret); 1413670c2a3aSSagi Grimberg goto out_destroy_id; 1414670c2a3aSSagi Grimberg } 1415670c2a3aSSagi Grimberg 1416670c2a3aSSagi Grimberg ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr); 1417670c2a3aSSagi Grimberg if (ret) { 1418670c2a3aSSagi Grimberg pr_err("binding CM ID to %pISpcs failed (%d)\n", 1419670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 14208f000cacSChristoph Hellwig goto out_destroy_id; 14218f000cacSChristoph Hellwig } 14228f000cacSChristoph Hellwig 14238f000cacSChristoph Hellwig ret = rdma_listen(cm_id, 128); 14248f000cacSChristoph Hellwig if (ret) { 1425670c2a3aSSagi Grimberg pr_err("listening to %pISpcs failed (%d)\n", 1426670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 14278f000cacSChristoph Hellwig goto out_destroy_id; 14288f000cacSChristoph Hellwig } 14298f000cacSChristoph Hellwig 1430670c2a3aSSagi Grimberg pr_info("enabling port %d (%pISpcs)\n", 1431670c2a3aSSagi Grimberg le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr); 14328f000cacSChristoph Hellwig port->priv = cm_id; 14338f000cacSChristoph Hellwig return 0; 14348f000cacSChristoph Hellwig 14358f000cacSChristoph Hellwig out_destroy_id: 14368f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 14378f000cacSChristoph Hellwig return ret; 14388f000cacSChristoph Hellwig } 14398f000cacSChristoph Hellwig 14408f000cacSChristoph Hellwig static void nvmet_rdma_remove_port(struct nvmet_port *port) 14418f000cacSChristoph Hellwig { 1442d8f7750aSSagi Grimberg struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); 14438f000cacSChristoph Hellwig 1444d8f7750aSSagi Grimberg if (cm_id) 14458f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 14468f000cacSChristoph Hellwig } 14478f000cacSChristoph Hellwig 14488f000cacSChristoph Hellwig static struct nvmet_fabrics_ops nvmet_rdma_ops = { 14498f000cacSChristoph Hellwig .owner = THIS_MODULE, 14508f000cacSChristoph Hellwig .type = NVMF_TRTYPE_RDMA, 14518f000cacSChristoph Hellwig .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, 14528f000cacSChristoph Hellwig .msdbd = 1, 14538f000cacSChristoph Hellwig .has_keyed_sgls = 1, 14548f000cacSChristoph Hellwig .add_port = nvmet_rdma_add_port, 14558f000cacSChristoph Hellwig .remove_port = nvmet_rdma_remove_port, 14568f000cacSChristoph Hellwig .queue_response = nvmet_rdma_queue_response, 14578f000cacSChristoph Hellwig .delete_ctrl = nvmet_rdma_delete_ctrl, 14588f000cacSChristoph Hellwig }; 14598f000cacSChristoph Hellwig 1460f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1461f1d4ef7dSSagi Grimberg { 146243b92fd2SIsrael Rukshin struct nvmet_rdma_queue *queue, *tmp; 1463f1d4ef7dSSagi Grimberg 1464f1d4ef7dSSagi Grimberg /* Device is being removed, delete all queues using this device */ 1465f1d4ef7dSSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 146643b92fd2SIsrael Rukshin list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 146743b92fd2SIsrael Rukshin queue_list) { 1468f1d4ef7dSSagi Grimberg if (queue->dev->device != ib_device) 1469f1d4ef7dSSagi Grimberg continue; 1470f1d4ef7dSSagi Grimberg 1471f1d4ef7dSSagi Grimberg pr_info("Removing queue %d\n", queue->idx); 147243b92fd2SIsrael Rukshin list_del_init(&queue->queue_list); 1473f1d4ef7dSSagi Grimberg __nvmet_rdma_queue_disconnect(queue); 1474f1d4ef7dSSagi Grimberg } 1475f1d4ef7dSSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1476f1d4ef7dSSagi Grimberg 1477f1d4ef7dSSagi Grimberg flush_scheduled_work(); 1478f1d4ef7dSSagi Grimberg } 1479f1d4ef7dSSagi Grimberg 1480f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = { 1481f1d4ef7dSSagi Grimberg .name = "nvmet_rdma", 1482f1d4ef7dSSagi Grimberg .remove = nvmet_rdma_remove_one 1483f1d4ef7dSSagi Grimberg }; 1484f1d4ef7dSSagi Grimberg 14858f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void) 14868f000cacSChristoph Hellwig { 1487f1d4ef7dSSagi Grimberg int ret; 1488f1d4ef7dSSagi Grimberg 1489f1d4ef7dSSagi Grimberg ret = ib_register_client(&nvmet_rdma_ib_client); 1490f1d4ef7dSSagi Grimberg if (ret) 1491f1d4ef7dSSagi Grimberg return ret; 1492f1d4ef7dSSagi Grimberg 1493f1d4ef7dSSagi Grimberg ret = nvmet_register_transport(&nvmet_rdma_ops); 1494f1d4ef7dSSagi Grimberg if (ret) 1495f1d4ef7dSSagi Grimberg goto err_ib_client; 1496f1d4ef7dSSagi Grimberg 1497f1d4ef7dSSagi Grimberg return 0; 1498f1d4ef7dSSagi Grimberg 1499f1d4ef7dSSagi Grimberg err_ib_client: 1500f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1501f1d4ef7dSSagi Grimberg return ret; 15028f000cacSChristoph Hellwig } 15038f000cacSChristoph Hellwig 15048f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void) 15058f000cacSChristoph Hellwig { 15068f000cacSChristoph Hellwig nvmet_unregister_transport(&nvmet_rdma_ops); 1507f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1508cb4876e8SSagi Grimberg WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 15098f000cacSChristoph Hellwig ida_destroy(&nvmet_rdma_queue_ida); 15108f000cacSChristoph Hellwig } 15118f000cacSChristoph Hellwig 15128f000cacSChristoph Hellwig module_init(nvmet_rdma_init); 15138f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit); 15148f000cacSChristoph Hellwig 15158f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2"); 15168f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 1517