18f000cacSChristoph Hellwig /* 28f000cacSChristoph Hellwig * NVMe over Fabrics RDMA target. 38f000cacSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 48f000cacSChristoph Hellwig * 58f000cacSChristoph Hellwig * This program is free software; you can redistribute it and/or modify it 68f000cacSChristoph Hellwig * under the terms and conditions of the GNU General Public License, 78f000cacSChristoph Hellwig * version 2, as published by the Free Software Foundation. 88f000cacSChristoph Hellwig * 98f000cacSChristoph Hellwig * This program is distributed in the hope it will be useful, but WITHOUT 108f000cacSChristoph Hellwig * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 118f000cacSChristoph Hellwig * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 128f000cacSChristoph Hellwig * more details. 138f000cacSChristoph Hellwig */ 148f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 158f000cacSChristoph Hellwig #include <linux/atomic.h> 168f000cacSChristoph Hellwig #include <linux/ctype.h> 178f000cacSChristoph Hellwig #include <linux/delay.h> 188f000cacSChristoph Hellwig #include <linux/err.h> 198f000cacSChristoph Hellwig #include <linux/init.h> 208f000cacSChristoph Hellwig #include <linux/module.h> 218f000cacSChristoph Hellwig #include <linux/nvme.h> 228f000cacSChristoph Hellwig #include <linux/slab.h> 238f000cacSChristoph Hellwig #include <linux/string.h> 248f000cacSChristoph Hellwig #include <linux/wait.h> 258f000cacSChristoph Hellwig #include <linux/inet.h> 268f000cacSChristoph Hellwig #include <asm/unaligned.h> 278f000cacSChristoph Hellwig 288f000cacSChristoph Hellwig #include <rdma/ib_verbs.h> 298f000cacSChristoph Hellwig #include <rdma/rdma_cm.h> 308f000cacSChristoph Hellwig #include <rdma/rw.h> 318f000cacSChristoph Hellwig 328f000cacSChristoph Hellwig #include <linux/nvme-rdma.h> 338f000cacSChristoph Hellwig #include "nvmet.h" 348f000cacSChristoph Hellwig 358f000cacSChristoph Hellwig /* 360d5ee2b2SSteve Wise * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data 378f000cacSChristoph Hellwig */ 380d5ee2b2SSteve Wise #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE 390d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_SGE 4 400d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) 418f000cacSChristoph Hellwig 428f000cacSChristoph Hellwig struct nvmet_rdma_cmd { 430d5ee2b2SSteve Wise struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; 448f000cacSChristoph Hellwig struct ib_cqe cqe; 458f000cacSChristoph Hellwig struct ib_recv_wr wr; 460d5ee2b2SSteve Wise struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; 478f000cacSChristoph Hellwig struct nvme_command *nvme_cmd; 488f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 498f000cacSChristoph Hellwig }; 508f000cacSChristoph Hellwig 518f000cacSChristoph Hellwig enum { 528f000cacSChristoph Hellwig NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 538f000cacSChristoph Hellwig NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), 548f000cacSChristoph Hellwig }; 558f000cacSChristoph Hellwig 568f000cacSChristoph Hellwig struct nvmet_rdma_rsp { 578f000cacSChristoph Hellwig struct ib_sge send_sge; 588f000cacSChristoph Hellwig struct ib_cqe send_cqe; 598f000cacSChristoph Hellwig struct ib_send_wr send_wr; 608f000cacSChristoph Hellwig 618f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd; 628f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 638f000cacSChristoph Hellwig 648f000cacSChristoph Hellwig struct ib_cqe read_cqe; 658f000cacSChristoph Hellwig struct rdma_rw_ctx rw; 668f000cacSChristoph Hellwig 678f000cacSChristoph Hellwig struct nvmet_req req; 688f000cacSChristoph Hellwig 698407879cSSagi Grimberg bool allocated; 708f000cacSChristoph Hellwig u8 n_rdma; 718f000cacSChristoph Hellwig u32 flags; 728f000cacSChristoph Hellwig u32 invalidate_rkey; 738f000cacSChristoph Hellwig 748f000cacSChristoph Hellwig struct list_head wait_list; 758f000cacSChristoph Hellwig struct list_head free_list; 768f000cacSChristoph Hellwig }; 778f000cacSChristoph Hellwig 788f000cacSChristoph Hellwig enum nvmet_rdma_queue_state { 798f000cacSChristoph Hellwig NVMET_RDMA_Q_CONNECTING, 808f000cacSChristoph Hellwig NVMET_RDMA_Q_LIVE, 818f000cacSChristoph Hellwig NVMET_RDMA_Q_DISCONNECTING, 828f000cacSChristoph Hellwig }; 838f000cacSChristoph Hellwig 848f000cacSChristoph Hellwig struct nvmet_rdma_queue { 858f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 868f000cacSChristoph Hellwig struct nvmet_port *port; 878f000cacSChristoph Hellwig struct ib_cq *cq; 888f000cacSChristoph Hellwig atomic_t sq_wr_avail; 898f000cacSChristoph Hellwig struct nvmet_rdma_device *dev; 908f000cacSChristoph Hellwig spinlock_t state_lock; 918f000cacSChristoph Hellwig enum nvmet_rdma_queue_state state; 928f000cacSChristoph Hellwig struct nvmet_cq nvme_cq; 938f000cacSChristoph Hellwig struct nvmet_sq nvme_sq; 948f000cacSChristoph Hellwig 958f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsps; 968f000cacSChristoph Hellwig struct list_head free_rsps; 978f000cacSChristoph Hellwig spinlock_t rsps_lock; 988f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 998f000cacSChristoph Hellwig 1008f000cacSChristoph Hellwig struct work_struct release_work; 1018f000cacSChristoph Hellwig struct list_head rsp_wait_list; 1028f000cacSChristoph Hellwig struct list_head rsp_wr_wait_list; 1038f000cacSChristoph Hellwig spinlock_t rsp_wr_wait_lock; 1048f000cacSChristoph Hellwig 1058f000cacSChristoph Hellwig int idx; 1068f000cacSChristoph Hellwig int host_qid; 1078f000cacSChristoph Hellwig int recv_queue_size; 1088f000cacSChristoph Hellwig int send_queue_size; 1098f000cacSChristoph Hellwig 1108f000cacSChristoph Hellwig struct list_head queue_list; 1118f000cacSChristoph Hellwig }; 1128f000cacSChristoph Hellwig 1138f000cacSChristoph Hellwig struct nvmet_rdma_device { 1148f000cacSChristoph Hellwig struct ib_device *device; 1158f000cacSChristoph Hellwig struct ib_pd *pd; 1168f000cacSChristoph Hellwig struct ib_srq *srq; 1178f000cacSChristoph Hellwig struct nvmet_rdma_cmd *srq_cmds; 1188f000cacSChristoph Hellwig size_t srq_size; 1198f000cacSChristoph Hellwig struct kref ref; 1208f000cacSChristoph Hellwig struct list_head entry; 1210d5ee2b2SSteve Wise int inline_data_size; 1220d5ee2b2SSteve Wise int inline_page_count; 1238f000cacSChristoph Hellwig }; 1248f000cacSChristoph Hellwig 1258f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq; 1268f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 1278f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 1288f000cacSChristoph Hellwig 1298f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida); 1308f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list); 1318f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 1328f000cacSChristoph Hellwig 1338f000cacSChristoph Hellwig static LIST_HEAD(device_list); 1348f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex); 1358f000cacSChristoph Hellwig 1368f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 1378f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 1388f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 1398f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 1408f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 1418f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 1428f000cacSChristoph Hellwig 143e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops; 1448f000cacSChristoph Hellwig 1450d5ee2b2SSteve Wise static int num_pages(int len) 1460d5ee2b2SSteve Wise { 1470d5ee2b2SSteve Wise return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); 1480d5ee2b2SSteve Wise } 1490d5ee2b2SSteve Wise 1508f000cacSChristoph Hellwig /* XXX: really should move to a generic header sooner or later.. */ 1518f000cacSChristoph Hellwig static inline u32 get_unaligned_le24(const u8 *p) 1528f000cacSChristoph Hellwig { 1538f000cacSChristoph Hellwig return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; 1548f000cacSChristoph Hellwig } 1558f000cacSChristoph Hellwig 1568f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 1578f000cacSChristoph Hellwig { 1588f000cacSChristoph Hellwig return nvme_is_write(rsp->req.cmd) && 1595e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1608f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1618f000cacSChristoph Hellwig } 1628f000cacSChristoph Hellwig 1638f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 1648f000cacSChristoph Hellwig { 1658f000cacSChristoph Hellwig return !nvme_is_write(rsp->req.cmd) && 1665e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1678f000cacSChristoph Hellwig !rsp->req.rsp->status && 1688f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1698f000cacSChristoph Hellwig } 1708f000cacSChristoph Hellwig 1718f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp * 1728f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 1738f000cacSChristoph Hellwig { 1748f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 1758f000cacSChristoph Hellwig unsigned long flags; 1768f000cacSChristoph Hellwig 1778f000cacSChristoph Hellwig spin_lock_irqsave(&queue->rsps_lock, flags); 1788407879cSSagi Grimberg rsp = list_first_entry_or_null(&queue->free_rsps, 1798f000cacSChristoph Hellwig struct nvmet_rdma_rsp, free_list); 1808407879cSSagi Grimberg if (likely(rsp)) 1818f000cacSChristoph Hellwig list_del(&rsp->free_list); 1828f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->rsps_lock, flags); 1838f000cacSChristoph Hellwig 1848407879cSSagi Grimberg if (unlikely(!rsp)) { 1858407879cSSagi Grimberg rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); 1868407879cSSagi Grimberg if (unlikely(!rsp)) 1878407879cSSagi Grimberg return NULL; 1888407879cSSagi Grimberg rsp->allocated = true; 1898407879cSSagi Grimberg } 1908407879cSSagi Grimberg 1918f000cacSChristoph Hellwig return rsp; 1928f000cacSChristoph Hellwig } 1938f000cacSChristoph Hellwig 1948f000cacSChristoph Hellwig static inline void 1958f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 1968f000cacSChristoph Hellwig { 1978f000cacSChristoph Hellwig unsigned long flags; 1988f000cacSChristoph Hellwig 1998407879cSSagi Grimberg if (rsp->allocated) { 2008407879cSSagi Grimberg kfree(rsp); 2018407879cSSagi Grimberg return; 2028407879cSSagi Grimberg } 2038407879cSSagi Grimberg 2048f000cacSChristoph Hellwig spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 2058f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 2068f000cacSChristoph Hellwig spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 2078f000cacSChristoph Hellwig } 2088f000cacSChristoph Hellwig 2090d5ee2b2SSteve Wise static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, 2100d5ee2b2SSteve Wise struct nvmet_rdma_cmd *c) 2110d5ee2b2SSteve Wise { 2120d5ee2b2SSteve Wise struct scatterlist *sg; 2130d5ee2b2SSteve Wise struct ib_sge *sge; 2140d5ee2b2SSteve Wise int i; 2150d5ee2b2SSteve Wise 2160d5ee2b2SSteve Wise if (!ndev->inline_data_size) 2170d5ee2b2SSteve Wise return; 2180d5ee2b2SSteve Wise 2190d5ee2b2SSteve Wise sg = c->inline_sg; 2200d5ee2b2SSteve Wise sge = &c->sge[1]; 2210d5ee2b2SSteve Wise 2220d5ee2b2SSteve Wise for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 2230d5ee2b2SSteve Wise if (sge->length) 2240d5ee2b2SSteve Wise ib_dma_unmap_page(ndev->device, sge->addr, 2250d5ee2b2SSteve Wise sge->length, DMA_FROM_DEVICE); 2260d5ee2b2SSteve Wise if (sg_page(sg)) 2270d5ee2b2SSteve Wise __free_page(sg_page(sg)); 2280d5ee2b2SSteve Wise } 2290d5ee2b2SSteve Wise } 2300d5ee2b2SSteve Wise 2310d5ee2b2SSteve Wise static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, 2320d5ee2b2SSteve Wise struct nvmet_rdma_cmd *c) 2330d5ee2b2SSteve Wise { 2340d5ee2b2SSteve Wise struct scatterlist *sg; 2350d5ee2b2SSteve Wise struct ib_sge *sge; 2360d5ee2b2SSteve Wise struct page *pg; 2370d5ee2b2SSteve Wise int len; 2380d5ee2b2SSteve Wise int i; 2390d5ee2b2SSteve Wise 2400d5ee2b2SSteve Wise if (!ndev->inline_data_size) 2410d5ee2b2SSteve Wise return 0; 2420d5ee2b2SSteve Wise 2430d5ee2b2SSteve Wise sg = c->inline_sg; 2440d5ee2b2SSteve Wise sg_init_table(sg, ndev->inline_page_count); 2450d5ee2b2SSteve Wise sge = &c->sge[1]; 2460d5ee2b2SSteve Wise len = ndev->inline_data_size; 2470d5ee2b2SSteve Wise 2480d5ee2b2SSteve Wise for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 2490d5ee2b2SSteve Wise pg = alloc_page(GFP_KERNEL); 2500d5ee2b2SSteve Wise if (!pg) 2510d5ee2b2SSteve Wise goto out_err; 2520d5ee2b2SSteve Wise sg_assign_page(sg, pg); 2530d5ee2b2SSteve Wise sge->addr = ib_dma_map_page(ndev->device, 2540d5ee2b2SSteve Wise pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); 2550d5ee2b2SSteve Wise if (ib_dma_mapping_error(ndev->device, sge->addr)) 2560d5ee2b2SSteve Wise goto out_err; 2570d5ee2b2SSteve Wise sge->length = min_t(int, len, PAGE_SIZE); 2580d5ee2b2SSteve Wise sge->lkey = ndev->pd->local_dma_lkey; 2590d5ee2b2SSteve Wise len -= sge->length; 2600d5ee2b2SSteve Wise } 2610d5ee2b2SSteve Wise 2620d5ee2b2SSteve Wise return 0; 2630d5ee2b2SSteve Wise out_err: 2640d5ee2b2SSteve Wise for (; i >= 0; i--, sg--, sge--) { 2650d5ee2b2SSteve Wise if (sge->length) 2660d5ee2b2SSteve Wise ib_dma_unmap_page(ndev->device, sge->addr, 2670d5ee2b2SSteve Wise sge->length, DMA_FROM_DEVICE); 2680d5ee2b2SSteve Wise if (sg_page(sg)) 2690d5ee2b2SSteve Wise __free_page(sg_page(sg)); 2700d5ee2b2SSteve Wise } 2710d5ee2b2SSteve Wise return -ENOMEM; 2720d5ee2b2SSteve Wise } 2730d5ee2b2SSteve Wise 2748f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 2758f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 2768f000cacSChristoph Hellwig { 2778f000cacSChristoph Hellwig /* NVMe command / RDMA RECV */ 2788f000cacSChristoph Hellwig c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); 2798f000cacSChristoph Hellwig if (!c->nvme_cmd) 2808f000cacSChristoph Hellwig goto out; 2818f000cacSChristoph Hellwig 2828f000cacSChristoph Hellwig c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 2838f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2848f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 2858f000cacSChristoph Hellwig goto out_free_cmd; 2868f000cacSChristoph Hellwig 2878f000cacSChristoph Hellwig c->sge[0].length = sizeof(*c->nvme_cmd); 2888f000cacSChristoph Hellwig c->sge[0].lkey = ndev->pd->local_dma_lkey; 2898f000cacSChristoph Hellwig 2900d5ee2b2SSteve Wise if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) 2918f000cacSChristoph Hellwig goto out_unmap_cmd; 2928f000cacSChristoph Hellwig 2938f000cacSChristoph Hellwig c->cqe.done = nvmet_rdma_recv_done; 2948f000cacSChristoph Hellwig 2958f000cacSChristoph Hellwig c->wr.wr_cqe = &c->cqe; 2968f000cacSChristoph Hellwig c->wr.sg_list = c->sge; 2970d5ee2b2SSteve Wise c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; 2988f000cacSChristoph Hellwig 2998f000cacSChristoph Hellwig return 0; 3008f000cacSChristoph Hellwig 3018f000cacSChristoph Hellwig out_unmap_cmd: 3028f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3038f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3048f000cacSChristoph Hellwig out_free_cmd: 3058f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3068f000cacSChristoph Hellwig 3078f000cacSChristoph Hellwig out: 3088f000cacSChristoph Hellwig return -ENOMEM; 3098f000cacSChristoph Hellwig } 3108f000cacSChristoph Hellwig 3118f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 3128f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 3138f000cacSChristoph Hellwig { 3140d5ee2b2SSteve Wise if (!admin) 3150d5ee2b2SSteve Wise nvmet_rdma_free_inline_pages(ndev, c); 3168f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3178f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3188f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3198f000cacSChristoph Hellwig } 3208f000cacSChristoph Hellwig 3218f000cacSChristoph Hellwig static struct nvmet_rdma_cmd * 3228f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 3238f000cacSChristoph Hellwig int nr_cmds, bool admin) 3248f000cacSChristoph Hellwig { 3258f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 3268f000cacSChristoph Hellwig int ret = -EINVAL, i; 3278f000cacSChristoph Hellwig 3288f000cacSChristoph Hellwig cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 3298f000cacSChristoph Hellwig if (!cmds) 3308f000cacSChristoph Hellwig goto out; 3318f000cacSChristoph Hellwig 3328f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) { 3338f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 3348f000cacSChristoph Hellwig if (ret) 3358f000cacSChristoph Hellwig goto out_free; 3368f000cacSChristoph Hellwig } 3378f000cacSChristoph Hellwig 3388f000cacSChristoph Hellwig return cmds; 3398f000cacSChristoph Hellwig 3408f000cacSChristoph Hellwig out_free: 3418f000cacSChristoph Hellwig while (--i >= 0) 3428f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3438f000cacSChristoph Hellwig kfree(cmds); 3448f000cacSChristoph Hellwig out: 3458f000cacSChristoph Hellwig return ERR_PTR(ret); 3468f000cacSChristoph Hellwig } 3478f000cacSChristoph Hellwig 3488f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 3498f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 3508f000cacSChristoph Hellwig { 3518f000cacSChristoph Hellwig int i; 3528f000cacSChristoph Hellwig 3538f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) 3548f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3558f000cacSChristoph Hellwig kfree(cmds); 3568f000cacSChristoph Hellwig } 3578f000cacSChristoph Hellwig 3588f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 3598f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3608f000cacSChristoph Hellwig { 3618f000cacSChristoph Hellwig /* NVMe CQE / RDMA SEND */ 3628f000cacSChristoph Hellwig r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); 3638f000cacSChristoph Hellwig if (!r->req.rsp) 3648f000cacSChristoph Hellwig goto out; 3658f000cacSChristoph Hellwig 3668f000cacSChristoph Hellwig r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, 3678f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3688f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 3698f000cacSChristoph Hellwig goto out_free_rsp; 3708f000cacSChristoph Hellwig 3718f000cacSChristoph Hellwig r->send_sge.length = sizeof(*r->req.rsp); 3728f000cacSChristoph Hellwig r->send_sge.lkey = ndev->pd->local_dma_lkey; 3738f000cacSChristoph Hellwig 3748f000cacSChristoph Hellwig r->send_cqe.done = nvmet_rdma_send_done; 3758f000cacSChristoph Hellwig 3768f000cacSChristoph Hellwig r->send_wr.wr_cqe = &r->send_cqe; 3778f000cacSChristoph Hellwig r->send_wr.sg_list = &r->send_sge; 3788f000cacSChristoph Hellwig r->send_wr.num_sge = 1; 3798f000cacSChristoph Hellwig r->send_wr.send_flags = IB_SEND_SIGNALED; 3808f000cacSChristoph Hellwig 3818f000cacSChristoph Hellwig /* Data In / RDMA READ */ 3828f000cacSChristoph Hellwig r->read_cqe.done = nvmet_rdma_read_data_done; 3838f000cacSChristoph Hellwig return 0; 3848f000cacSChristoph Hellwig 3858f000cacSChristoph Hellwig out_free_rsp: 3868f000cacSChristoph Hellwig kfree(r->req.rsp); 3878f000cacSChristoph Hellwig out: 3888f000cacSChristoph Hellwig return -ENOMEM; 3898f000cacSChristoph Hellwig } 3908f000cacSChristoph Hellwig 3918f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 3928f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3938f000cacSChristoph Hellwig { 3948f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, r->send_sge.addr, 3958f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3968f000cacSChristoph Hellwig kfree(r->req.rsp); 3978f000cacSChristoph Hellwig } 3988f000cacSChristoph Hellwig 3998f000cacSChristoph Hellwig static int 4008f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 4018f000cacSChristoph Hellwig { 4028f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4038f000cacSChristoph Hellwig int nr_rsps = queue->recv_queue_size * 2; 4048f000cacSChristoph Hellwig int ret = -EINVAL, i; 4058f000cacSChristoph Hellwig 4068f000cacSChristoph Hellwig queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 4078f000cacSChristoph Hellwig GFP_KERNEL); 4088f000cacSChristoph Hellwig if (!queue->rsps) 4098f000cacSChristoph Hellwig goto out; 4108f000cacSChristoph Hellwig 4118f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4128f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4138f000cacSChristoph Hellwig 4148f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsp(ndev, rsp); 4158f000cacSChristoph Hellwig if (ret) 4168f000cacSChristoph Hellwig goto out_free; 4178f000cacSChristoph Hellwig 4188f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &queue->free_rsps); 4198f000cacSChristoph Hellwig } 4208f000cacSChristoph Hellwig 4218f000cacSChristoph Hellwig return 0; 4228f000cacSChristoph Hellwig 4238f000cacSChristoph Hellwig out_free: 4248f000cacSChristoph Hellwig while (--i >= 0) { 4258f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4268f000cacSChristoph Hellwig 4278f000cacSChristoph Hellwig list_del(&rsp->free_list); 4288f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4298f000cacSChristoph Hellwig } 4308f000cacSChristoph Hellwig kfree(queue->rsps); 4318f000cacSChristoph Hellwig out: 4328f000cacSChristoph Hellwig return ret; 4338f000cacSChristoph Hellwig } 4348f000cacSChristoph Hellwig 4358f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 4368f000cacSChristoph Hellwig { 4378f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4388f000cacSChristoph Hellwig int i, nr_rsps = queue->recv_queue_size * 2; 4398f000cacSChristoph Hellwig 4408f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4418f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4428f000cacSChristoph Hellwig 4438f000cacSChristoph Hellwig list_del(&rsp->free_list); 4448f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4458f000cacSChristoph Hellwig } 4468f000cacSChristoph Hellwig kfree(queue->rsps); 4478f000cacSChristoph Hellwig } 4488f000cacSChristoph Hellwig 4498f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 4508f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd) 4518f000cacSChristoph Hellwig { 45220209384SMax Gurtovoy int ret; 4538f000cacSChristoph Hellwig 454748ff840SParav Pandit ib_dma_sync_single_for_device(ndev->device, 455748ff840SParav Pandit cmd->sge[0].addr, cmd->sge[0].length, 456748ff840SParav Pandit DMA_FROM_DEVICE); 457748ff840SParav Pandit 4588f000cacSChristoph Hellwig if (ndev->srq) 4590a3173a5SJason Gunthorpe ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); 46020209384SMax Gurtovoy else 4610a3173a5SJason Gunthorpe ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); 46220209384SMax Gurtovoy 46320209384SMax Gurtovoy if (unlikely(ret)) 46420209384SMax Gurtovoy pr_err("post_recv cmd failed\n"); 46520209384SMax Gurtovoy 46620209384SMax Gurtovoy return ret; 4678f000cacSChristoph Hellwig } 4688f000cacSChristoph Hellwig 4698f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 4708f000cacSChristoph Hellwig { 4718f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4728f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wr_wait_list)) { 4738f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 4748f000cacSChristoph Hellwig bool ret; 4758f000cacSChristoph Hellwig 4768f000cacSChristoph Hellwig rsp = list_entry(queue->rsp_wr_wait_list.next, 4778f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 4788f000cacSChristoph Hellwig list_del(&rsp->wait_list); 4798f000cacSChristoph Hellwig 4808f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4818f000cacSChristoph Hellwig ret = nvmet_rdma_execute_command(rsp); 4828f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4838f000cacSChristoph Hellwig 4848f000cacSChristoph Hellwig if (!ret) { 4858f000cacSChristoph Hellwig list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 4868f000cacSChristoph Hellwig break; 4878f000cacSChristoph Hellwig } 4888f000cacSChristoph Hellwig } 4898f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4908f000cacSChristoph Hellwig } 4918f000cacSChristoph Hellwig 4928f000cacSChristoph Hellwig 4938f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 4948f000cacSChristoph Hellwig { 4958f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 4968f000cacSChristoph Hellwig 4978f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 4988f000cacSChristoph Hellwig 4998f000cacSChristoph Hellwig if (rsp->n_rdma) { 5008f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5018f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5028f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 5038f000cacSChristoph Hellwig } 5048f000cacSChristoph Hellwig 5050d5ee2b2SSteve Wise if (rsp->req.sg != rsp->cmd->inline_sg) 506*5b2322e4SLogan Gunthorpe nvmet_req_free_sgl(&rsp->req); 5078f000cacSChristoph Hellwig 5088f000cacSChristoph Hellwig if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 5098f000cacSChristoph Hellwig nvmet_rdma_process_wr_wait_list(queue); 5108f000cacSChristoph Hellwig 5118f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 5128f000cacSChristoph Hellwig } 5138f000cacSChristoph Hellwig 5148f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 5158f000cacSChristoph Hellwig { 5168f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl) { 5178f000cacSChristoph Hellwig nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 5188f000cacSChristoph Hellwig } else { 5198f000cacSChristoph Hellwig /* 5208f000cacSChristoph Hellwig * we didn't setup the controller yet in case 5218f000cacSChristoph Hellwig * of admin connect error, just disconnect and 5228f000cacSChristoph Hellwig * cleanup the queue 5238f000cacSChristoph Hellwig */ 5248f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 5258f000cacSChristoph Hellwig } 5268f000cacSChristoph Hellwig } 5278f000cacSChristoph Hellwig 5288f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 5298f000cacSChristoph Hellwig { 5308f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5318f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 5328f000cacSChristoph Hellwig 5338f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5348f000cacSChristoph Hellwig 5358f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS && 5368f000cacSChristoph Hellwig wc->status != IB_WC_WR_FLUSH_ERR)) { 5378f000cacSChristoph Hellwig pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 5388f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 5398f000cacSChristoph Hellwig nvmet_rdma_error_comp(rsp->queue); 5408f000cacSChristoph Hellwig } 5418f000cacSChristoph Hellwig } 5428f000cacSChristoph Hellwig 5438f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req) 5448f000cacSChristoph Hellwig { 5458f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5468f000cacSChristoph Hellwig container_of(req, struct nvmet_rdma_rsp, req); 5478f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 54823f96d1fSBart Van Assche struct ib_send_wr *first_wr; 5498f000cacSChristoph Hellwig 5508f000cacSChristoph Hellwig if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { 5518f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 5528f000cacSChristoph Hellwig rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 5538f000cacSChristoph Hellwig } else { 5548f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND; 5558f000cacSChristoph Hellwig } 5568f000cacSChristoph Hellwig 5578f000cacSChristoph Hellwig if (nvmet_rdma_need_data_out(rsp)) 5588f000cacSChristoph Hellwig first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 5598f000cacSChristoph Hellwig cm_id->port_num, NULL, &rsp->send_wr); 5608f000cacSChristoph Hellwig else 5618f000cacSChristoph Hellwig first_wr = &rsp->send_wr; 5628f000cacSChristoph Hellwig 5638f000cacSChristoph Hellwig nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 564748ff840SParav Pandit 565748ff840SParav Pandit ib_dma_sync_single_for_device(rsp->queue->dev->device, 566748ff840SParav Pandit rsp->send_sge.addr, rsp->send_sge.length, 567748ff840SParav Pandit DMA_TO_DEVICE); 568748ff840SParav Pandit 5690a3173a5SJason Gunthorpe if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { 5708f000cacSChristoph Hellwig pr_err("sending cmd response failed\n"); 5718f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5728f000cacSChristoph Hellwig } 5738f000cacSChristoph Hellwig } 5748f000cacSChristoph Hellwig 5758f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 5768f000cacSChristoph Hellwig { 5778f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5788f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 5798f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 5808f000cacSChristoph Hellwig 5818f000cacSChristoph Hellwig WARN_ON(rsp->n_rdma <= 0); 5828f000cacSChristoph Hellwig atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 5838f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5848f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5858f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 5868f000cacSChristoph Hellwig rsp->n_rdma = 0; 5878f000cacSChristoph Hellwig 5888f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 589549f01aeSVijay Immanuel nvmet_req_uninit(&rsp->req); 5908f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5918f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 5928f000cacSChristoph Hellwig pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 5938f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 5948f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 5958f000cacSChristoph Hellwig } 5968f000cacSChristoph Hellwig return; 5978f000cacSChristoph Hellwig } 5988f000cacSChristoph Hellwig 5995e62d5c9SChristoph Hellwig nvmet_req_execute(&rsp->req); 6008f000cacSChristoph Hellwig } 6018f000cacSChristoph Hellwig 6028f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 6038f000cacSChristoph Hellwig u64 off) 6048f000cacSChristoph Hellwig { 6050d5ee2b2SSteve Wise int sg_count = num_pages(len); 6060d5ee2b2SSteve Wise struct scatterlist *sg; 6070d5ee2b2SSteve Wise int i; 6080d5ee2b2SSteve Wise 6090d5ee2b2SSteve Wise sg = rsp->cmd->inline_sg; 6100d5ee2b2SSteve Wise for (i = 0; i < sg_count; i++, sg++) { 6110d5ee2b2SSteve Wise if (i < sg_count - 1) 6120d5ee2b2SSteve Wise sg_unmark_end(sg); 6130d5ee2b2SSteve Wise else 6140d5ee2b2SSteve Wise sg_mark_end(sg); 6150d5ee2b2SSteve Wise sg->offset = off; 6160d5ee2b2SSteve Wise sg->length = min_t(int, len, PAGE_SIZE - off); 6170d5ee2b2SSteve Wise len -= sg->length; 6180d5ee2b2SSteve Wise if (!i) 6190d5ee2b2SSteve Wise off = 0; 6200d5ee2b2SSteve Wise } 6210d5ee2b2SSteve Wise 6220d5ee2b2SSteve Wise rsp->req.sg = rsp->cmd->inline_sg; 6230d5ee2b2SSteve Wise rsp->req.sg_cnt = sg_count; 6248f000cacSChristoph Hellwig } 6258f000cacSChristoph Hellwig 6268f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 6278f000cacSChristoph Hellwig { 6288f000cacSChristoph Hellwig struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 6298f000cacSChristoph Hellwig u64 off = le64_to_cpu(sgl->addr); 6308f000cacSChristoph Hellwig u32 len = le32_to_cpu(sgl->length); 6318f000cacSChristoph Hellwig 6328f000cacSChristoph Hellwig if (!nvme_is_write(rsp->req.cmd)) 6338f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6348f000cacSChristoph Hellwig 6350d5ee2b2SSteve Wise if (off + len > rsp->queue->dev->inline_data_size) { 6368f000cacSChristoph Hellwig pr_err("invalid inline data offset!\n"); 6378f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 6388f000cacSChristoph Hellwig } 6398f000cacSChristoph Hellwig 6408f000cacSChristoph Hellwig /* no data command? */ 6418f000cacSChristoph Hellwig if (!len) 6428f000cacSChristoph Hellwig return 0; 6438f000cacSChristoph Hellwig 6448f000cacSChristoph Hellwig nvmet_rdma_use_inline_sg(rsp, len, off); 6458f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 6465e62d5c9SChristoph Hellwig rsp->req.transfer_len += len; 6478f000cacSChristoph Hellwig return 0; 6488f000cacSChristoph Hellwig } 6498f000cacSChristoph Hellwig 6508f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 6518f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl, bool invalidate) 6528f000cacSChristoph Hellwig { 6538f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 6548f000cacSChristoph Hellwig u64 addr = le64_to_cpu(sgl->addr); 6558f000cacSChristoph Hellwig u32 key = get_unaligned_le32(sgl->key); 6568f000cacSChristoph Hellwig int ret; 6578f000cacSChristoph Hellwig 658*5b2322e4SLogan Gunthorpe rsp->req.transfer_len = get_unaligned_le24(sgl->length); 659*5b2322e4SLogan Gunthorpe 6608f000cacSChristoph Hellwig /* no data command? */ 661*5b2322e4SLogan Gunthorpe if (!rsp->req.transfer_len) 6628f000cacSChristoph Hellwig return 0; 6638f000cacSChristoph Hellwig 664*5b2322e4SLogan Gunthorpe ret = nvmet_req_alloc_sgl(&rsp->req); 665*5b2322e4SLogan Gunthorpe if (ret < 0) 666*5b2322e4SLogan Gunthorpe goto error_out; 6678f000cacSChristoph Hellwig 6688f000cacSChristoph Hellwig ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 6698f000cacSChristoph Hellwig rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 6708f000cacSChristoph Hellwig nvmet_data_dir(&rsp->req)); 6718f000cacSChristoph Hellwig if (ret < 0) 672*5b2322e4SLogan Gunthorpe goto error_out; 6738f000cacSChristoph Hellwig rsp->n_rdma += ret; 6748f000cacSChristoph Hellwig 6758f000cacSChristoph Hellwig if (invalidate) { 6768f000cacSChristoph Hellwig rsp->invalidate_rkey = key; 6778f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; 6788f000cacSChristoph Hellwig } 6798f000cacSChristoph Hellwig 6808f000cacSChristoph Hellwig return 0; 681*5b2322e4SLogan Gunthorpe 682*5b2322e4SLogan Gunthorpe error_out: 683*5b2322e4SLogan Gunthorpe rsp->req.transfer_len = 0; 684*5b2322e4SLogan Gunthorpe return NVME_SC_INTERNAL; 6858f000cacSChristoph Hellwig } 6868f000cacSChristoph Hellwig 6878f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 6888f000cacSChristoph Hellwig { 6898f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 6908f000cacSChristoph Hellwig 6918f000cacSChristoph Hellwig switch (sgl->type >> 4) { 6928f000cacSChristoph Hellwig case NVME_SGL_FMT_DATA_DESC: 6938f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 6948f000cacSChristoph Hellwig case NVME_SGL_FMT_OFFSET: 6958f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_inline(rsp); 6968f000cacSChristoph Hellwig default: 6978f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 6988f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 6998f000cacSChristoph Hellwig } 7008f000cacSChristoph Hellwig case NVME_KEY_SGL_FMT_DATA_DESC: 7018f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 7028f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 7038f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 7048f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS: 7058f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 7068f000cacSChristoph Hellwig default: 7078f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 7088f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 7098f000cacSChristoph Hellwig } 7108f000cacSChristoph Hellwig default: 7118f000cacSChristoph Hellwig pr_err("invalid SGL type: %#x\n", sgl->type); 7128f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; 7138f000cacSChristoph Hellwig } 7148f000cacSChristoph Hellwig } 7158f000cacSChristoph Hellwig 7168f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 7178f000cacSChristoph Hellwig { 7188f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 7198f000cacSChristoph Hellwig 7208f000cacSChristoph Hellwig if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 7218f000cacSChristoph Hellwig &queue->sq_wr_avail) < 0)) { 7228f000cacSChristoph Hellwig pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 7238f000cacSChristoph Hellwig 1 + rsp->n_rdma, queue->idx, 7248f000cacSChristoph Hellwig queue->nvme_sq.ctrl->cntlid); 7258f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 7268f000cacSChristoph Hellwig return false; 7278f000cacSChristoph Hellwig } 7288f000cacSChristoph Hellwig 7298f000cacSChristoph Hellwig if (nvmet_rdma_need_data_in(rsp)) { 7308f000cacSChristoph Hellwig if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, 7318f000cacSChristoph Hellwig queue->cm_id->port_num, &rsp->read_cqe, NULL)) 7328f000cacSChristoph Hellwig nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 7338f000cacSChristoph Hellwig } else { 7345e62d5c9SChristoph Hellwig nvmet_req_execute(&rsp->req); 7358f000cacSChristoph Hellwig } 7368f000cacSChristoph Hellwig 7378f000cacSChristoph Hellwig return true; 7388f000cacSChristoph Hellwig } 7398f000cacSChristoph Hellwig 7408f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 7418f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd) 7428f000cacSChristoph Hellwig { 7438f000cacSChristoph Hellwig u16 status; 7448f000cacSChristoph Hellwig 745748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 746748ff840SParav Pandit cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 747748ff840SParav Pandit DMA_FROM_DEVICE); 748748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 749748ff840SParav Pandit cmd->send_sge.addr, cmd->send_sge.length, 750748ff840SParav Pandit DMA_TO_DEVICE); 751748ff840SParav Pandit 7528f000cacSChristoph Hellwig if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 7538f000cacSChristoph Hellwig &queue->nvme_sq, &nvmet_rdma_ops)) 7548f000cacSChristoph Hellwig return; 7558f000cacSChristoph Hellwig 7568f000cacSChristoph Hellwig status = nvmet_rdma_map_sgl(cmd); 7578f000cacSChristoph Hellwig if (status) 7588f000cacSChristoph Hellwig goto out_err; 7598f000cacSChristoph Hellwig 7608f000cacSChristoph Hellwig if (unlikely(!nvmet_rdma_execute_command(cmd))) { 7618f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 7628f000cacSChristoph Hellwig list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 7638f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 7648f000cacSChristoph Hellwig } 7658f000cacSChristoph Hellwig 7668f000cacSChristoph Hellwig return; 7678f000cacSChristoph Hellwig 7688f000cacSChristoph Hellwig out_err: 7698f000cacSChristoph Hellwig nvmet_req_complete(&cmd->req, status); 7708f000cacSChristoph Hellwig } 7718f000cacSChristoph Hellwig 7728f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 7738f000cacSChristoph Hellwig { 7748f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd = 7758f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 7768f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 7778f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 7788f000cacSChristoph Hellwig 7798f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 7808f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 7818f000cacSChristoph Hellwig pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 7828f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), 7838f000cacSChristoph Hellwig wc->status); 7848f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 7858f000cacSChristoph Hellwig } 7868f000cacSChristoph Hellwig return; 7878f000cacSChristoph Hellwig } 7888f000cacSChristoph Hellwig 7898f000cacSChristoph Hellwig if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 7908f000cacSChristoph Hellwig pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 7918f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 7928f000cacSChristoph Hellwig return; 7938f000cacSChristoph Hellwig } 7948f000cacSChristoph Hellwig 7958f000cacSChristoph Hellwig cmd->queue = queue; 7968f000cacSChristoph Hellwig rsp = nvmet_rdma_get_rsp(queue); 7978407879cSSagi Grimberg if (unlikely(!rsp)) { 7988407879cSSagi Grimberg /* 7998407879cSSagi Grimberg * we get here only under memory pressure, 8008407879cSSagi Grimberg * silently drop and have the host retry 8018407879cSSagi Grimberg * as we can't even fail it. 8028407879cSSagi Grimberg */ 8038407879cSSagi Grimberg nvmet_rdma_post_recv(queue->dev, cmd); 8048407879cSSagi Grimberg return; 8058407879cSSagi Grimberg } 8068d61413dSSagi Grimberg rsp->queue = queue; 8078f000cacSChristoph Hellwig rsp->cmd = cmd; 8088f000cacSChristoph Hellwig rsp->flags = 0; 8098f000cacSChristoph Hellwig rsp->req.cmd = cmd->nvme_cmd; 8108d61413dSSagi Grimberg rsp->req.port = queue->port; 8118d61413dSSagi Grimberg rsp->n_rdma = 0; 8128f000cacSChristoph Hellwig 8138f000cacSChristoph Hellwig if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 8148f000cacSChristoph Hellwig unsigned long flags; 8158f000cacSChristoph Hellwig 8168f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 8178f000cacSChristoph Hellwig if (queue->state == NVMET_RDMA_Q_CONNECTING) 8188f000cacSChristoph Hellwig list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 8198f000cacSChristoph Hellwig else 8208f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 8218f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 8228f000cacSChristoph Hellwig return; 8238f000cacSChristoph Hellwig } 8248f000cacSChristoph Hellwig 8258f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, rsp); 8268f000cacSChristoph Hellwig } 8278f000cacSChristoph Hellwig 8288f000cacSChristoph Hellwig static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) 8298f000cacSChristoph Hellwig { 8308f000cacSChristoph Hellwig if (!ndev->srq) 8318f000cacSChristoph Hellwig return; 8328f000cacSChristoph Hellwig 8338f000cacSChristoph Hellwig nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 8348f000cacSChristoph Hellwig ib_destroy_srq(ndev->srq); 8358f000cacSChristoph Hellwig } 8368f000cacSChristoph Hellwig 8378f000cacSChristoph Hellwig static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 8388f000cacSChristoph Hellwig { 8398f000cacSChristoph Hellwig struct ib_srq_init_attr srq_attr = { NULL, }; 8408f000cacSChristoph Hellwig struct ib_srq *srq; 8418f000cacSChristoph Hellwig size_t srq_size; 8428f000cacSChristoph Hellwig int ret, i; 8438f000cacSChristoph Hellwig 8448f000cacSChristoph Hellwig srq_size = 4095; /* XXX: tune */ 8458f000cacSChristoph Hellwig 8468f000cacSChristoph Hellwig srq_attr.attr.max_wr = srq_size; 8470d5ee2b2SSteve Wise srq_attr.attr.max_sge = 1 + ndev->inline_page_count; 8488f000cacSChristoph Hellwig srq_attr.attr.srq_limit = 0; 8498f000cacSChristoph Hellwig srq_attr.srq_type = IB_SRQT_BASIC; 8508f000cacSChristoph Hellwig srq = ib_create_srq(ndev->pd, &srq_attr); 8518f000cacSChristoph Hellwig if (IS_ERR(srq)) { 8528f000cacSChristoph Hellwig /* 8538f000cacSChristoph Hellwig * If SRQs aren't supported we just go ahead and use normal 8548f000cacSChristoph Hellwig * non-shared receive queues. 8558f000cacSChristoph Hellwig */ 8568f000cacSChristoph Hellwig pr_info("SRQ requested but not supported.\n"); 8578f000cacSChristoph Hellwig return 0; 8588f000cacSChristoph Hellwig } 8598f000cacSChristoph Hellwig 8608f000cacSChristoph Hellwig ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 8618f000cacSChristoph Hellwig if (IS_ERR(ndev->srq_cmds)) { 8628f000cacSChristoph Hellwig ret = PTR_ERR(ndev->srq_cmds); 8638f000cacSChristoph Hellwig goto out_destroy_srq; 8648f000cacSChristoph Hellwig } 8658f000cacSChristoph Hellwig 8668f000cacSChristoph Hellwig ndev->srq = srq; 8678f000cacSChristoph Hellwig ndev->srq_size = srq_size; 8688f000cacSChristoph Hellwig 86920209384SMax Gurtovoy for (i = 0; i < srq_size; i++) { 87020209384SMax Gurtovoy ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); 87120209384SMax Gurtovoy if (ret) 87220209384SMax Gurtovoy goto out_free_cmds; 87320209384SMax Gurtovoy } 8748f000cacSChristoph Hellwig 8758f000cacSChristoph Hellwig return 0; 8768f000cacSChristoph Hellwig 87720209384SMax Gurtovoy out_free_cmds: 87820209384SMax Gurtovoy nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 8798f000cacSChristoph Hellwig out_destroy_srq: 8808f000cacSChristoph Hellwig ib_destroy_srq(srq); 8818f000cacSChristoph Hellwig return ret; 8828f000cacSChristoph Hellwig } 8838f000cacSChristoph Hellwig 8848f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref) 8858f000cacSChristoph Hellwig { 8868f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = 8878f000cacSChristoph Hellwig container_of(ref, struct nvmet_rdma_device, ref); 8888f000cacSChristoph Hellwig 8898f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 8908f000cacSChristoph Hellwig list_del(&ndev->entry); 8918f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 8928f000cacSChristoph Hellwig 8938f000cacSChristoph Hellwig nvmet_rdma_destroy_srq(ndev); 8948f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 8958f000cacSChristoph Hellwig 8968f000cacSChristoph Hellwig kfree(ndev); 8978f000cacSChristoph Hellwig } 8988f000cacSChristoph Hellwig 8998f000cacSChristoph Hellwig static struct nvmet_rdma_device * 9008f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 9018f000cacSChristoph Hellwig { 9020d5ee2b2SSteve Wise struct nvmet_port *port = cm_id->context; 9038f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 9040d5ee2b2SSteve Wise int inline_page_count; 9050d5ee2b2SSteve Wise int inline_sge_count; 9068f000cacSChristoph Hellwig int ret; 9078f000cacSChristoph Hellwig 9088f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 9098f000cacSChristoph Hellwig list_for_each_entry(ndev, &device_list, entry) { 9108f000cacSChristoph Hellwig if (ndev->device->node_guid == cm_id->device->node_guid && 9118f000cacSChristoph Hellwig kref_get_unless_zero(&ndev->ref)) 9128f000cacSChristoph Hellwig goto out_unlock; 9138f000cacSChristoph Hellwig } 9148f000cacSChristoph Hellwig 9158f000cacSChristoph Hellwig ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 9168f000cacSChristoph Hellwig if (!ndev) 9178f000cacSChristoph Hellwig goto out_err; 9188f000cacSChristoph Hellwig 9190d5ee2b2SSteve Wise inline_page_count = num_pages(port->inline_data_size); 9200d5ee2b2SSteve Wise inline_sge_count = max(cm_id->device->attrs.max_sge_rd, 9210a3173a5SJason Gunthorpe cm_id->device->attrs.max_recv_sge) - 1; 9220d5ee2b2SSteve Wise if (inline_page_count > inline_sge_count) { 9230d5ee2b2SSteve Wise pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", 9240d5ee2b2SSteve Wise port->inline_data_size, cm_id->device->name, 9250d5ee2b2SSteve Wise inline_sge_count * PAGE_SIZE); 9260d5ee2b2SSteve Wise port->inline_data_size = inline_sge_count * PAGE_SIZE; 9270d5ee2b2SSteve Wise inline_page_count = inline_sge_count; 9280d5ee2b2SSteve Wise } 9290d5ee2b2SSteve Wise ndev->inline_data_size = port->inline_data_size; 9300d5ee2b2SSteve Wise ndev->inline_page_count = inline_page_count; 9318f000cacSChristoph Hellwig ndev->device = cm_id->device; 9328f000cacSChristoph Hellwig kref_init(&ndev->ref); 9338f000cacSChristoph Hellwig 934ed082d36SChristoph Hellwig ndev->pd = ib_alloc_pd(ndev->device, 0); 9358f000cacSChristoph Hellwig if (IS_ERR(ndev->pd)) 9368f000cacSChristoph Hellwig goto out_free_dev; 9378f000cacSChristoph Hellwig 9388f000cacSChristoph Hellwig if (nvmet_rdma_use_srq) { 9398f000cacSChristoph Hellwig ret = nvmet_rdma_init_srq(ndev); 9408f000cacSChristoph Hellwig if (ret) 9418f000cacSChristoph Hellwig goto out_free_pd; 9428f000cacSChristoph Hellwig } 9438f000cacSChristoph Hellwig 9448f000cacSChristoph Hellwig list_add(&ndev->entry, &device_list); 9458f000cacSChristoph Hellwig out_unlock: 9468f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9478f000cacSChristoph Hellwig pr_debug("added %s.\n", ndev->device->name); 9488f000cacSChristoph Hellwig return ndev; 9498f000cacSChristoph Hellwig 9508f000cacSChristoph Hellwig out_free_pd: 9518f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 9528f000cacSChristoph Hellwig out_free_dev: 9538f000cacSChristoph Hellwig kfree(ndev); 9548f000cacSChristoph Hellwig out_err: 9558f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9568f000cacSChristoph Hellwig return NULL; 9578f000cacSChristoph Hellwig } 9588f000cacSChristoph Hellwig 9598f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 9608f000cacSChristoph Hellwig { 9618f000cacSChristoph Hellwig struct ib_qp_init_attr qp_attr; 9628f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 9638f000cacSChristoph Hellwig int comp_vector, nr_cqe, ret, i; 9648f000cacSChristoph Hellwig 9658f000cacSChristoph Hellwig /* 9668f000cacSChristoph Hellwig * Spread the io queues across completion vectors, 9678f000cacSChristoph Hellwig * but still keep all admin queues on vector 0. 9688f000cacSChristoph Hellwig */ 9698f000cacSChristoph Hellwig comp_vector = !queue->host_qid ? 0 : 9708f000cacSChristoph Hellwig queue->idx % ndev->device->num_comp_vectors; 9718f000cacSChristoph Hellwig 9728f000cacSChristoph Hellwig /* 9738f000cacSChristoph Hellwig * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 9748f000cacSChristoph Hellwig */ 9758f000cacSChristoph Hellwig nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 9768f000cacSChristoph Hellwig 9778f000cacSChristoph Hellwig queue->cq = ib_alloc_cq(ndev->device, queue, 9788f000cacSChristoph Hellwig nr_cqe + 1, comp_vector, 9798f000cacSChristoph Hellwig IB_POLL_WORKQUEUE); 9808f000cacSChristoph Hellwig if (IS_ERR(queue->cq)) { 9818f000cacSChristoph Hellwig ret = PTR_ERR(queue->cq); 9828f000cacSChristoph Hellwig pr_err("failed to create CQ cqe= %d ret= %d\n", 9838f000cacSChristoph Hellwig nr_cqe + 1, ret); 9848f000cacSChristoph Hellwig goto out; 9858f000cacSChristoph Hellwig } 9868f000cacSChristoph Hellwig 9878f000cacSChristoph Hellwig memset(&qp_attr, 0, sizeof(qp_attr)); 9888f000cacSChristoph Hellwig qp_attr.qp_context = queue; 9898f000cacSChristoph Hellwig qp_attr.event_handler = nvmet_rdma_qp_event; 9908f000cacSChristoph Hellwig qp_attr.send_cq = queue->cq; 9918f000cacSChristoph Hellwig qp_attr.recv_cq = queue->cq; 9928f000cacSChristoph Hellwig qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 9938f000cacSChristoph Hellwig qp_attr.qp_type = IB_QPT_RC; 9948f000cacSChristoph Hellwig /* +1 for drain */ 9958f000cacSChristoph Hellwig qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 9968f000cacSChristoph Hellwig qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; 9978f000cacSChristoph Hellwig qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 99833023fb8SSteve Wise ndev->device->attrs.max_send_sge); 9998f000cacSChristoph Hellwig 10008f000cacSChristoph Hellwig if (ndev->srq) { 10018f000cacSChristoph Hellwig qp_attr.srq = ndev->srq; 10028f000cacSChristoph Hellwig } else { 10038f000cacSChristoph Hellwig /* +1 for drain */ 10048f000cacSChristoph Hellwig qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 10050d5ee2b2SSteve Wise qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; 10068f000cacSChristoph Hellwig } 10078f000cacSChristoph Hellwig 10088f000cacSChristoph Hellwig ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 10098f000cacSChristoph Hellwig if (ret) { 10108f000cacSChristoph Hellwig pr_err("failed to create_qp ret= %d\n", ret); 10118f000cacSChristoph Hellwig goto err_destroy_cq; 10128f000cacSChristoph Hellwig } 10138f000cacSChristoph Hellwig 10148f000cacSChristoph Hellwig atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 10158f000cacSChristoph Hellwig 10168f000cacSChristoph Hellwig pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 10178f000cacSChristoph Hellwig __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 10188f000cacSChristoph Hellwig qp_attr.cap.max_send_wr, queue->cm_id); 10198f000cacSChristoph Hellwig 10208f000cacSChristoph Hellwig if (!ndev->srq) { 10218f000cacSChristoph Hellwig for (i = 0; i < queue->recv_queue_size; i++) { 10228f000cacSChristoph Hellwig queue->cmds[i].queue = queue; 102320209384SMax Gurtovoy ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 102420209384SMax Gurtovoy if (ret) 102520209384SMax Gurtovoy goto err_destroy_qp; 10268f000cacSChristoph Hellwig } 10278f000cacSChristoph Hellwig } 10288f000cacSChristoph Hellwig 10298f000cacSChristoph Hellwig out: 10308f000cacSChristoph Hellwig return ret; 10318f000cacSChristoph Hellwig 103220209384SMax Gurtovoy err_destroy_qp: 103320209384SMax Gurtovoy rdma_destroy_qp(queue->cm_id); 10348f000cacSChristoph Hellwig err_destroy_cq: 10358f000cacSChristoph Hellwig ib_free_cq(queue->cq); 10368f000cacSChristoph Hellwig goto out; 10378f000cacSChristoph Hellwig } 10388f000cacSChristoph Hellwig 10398f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 10408f000cacSChristoph Hellwig { 1041e1a2ee24SIsrael Rukshin struct ib_qp *qp = queue->cm_id->qp; 1042e1a2ee24SIsrael Rukshin 1043e1a2ee24SIsrael Rukshin ib_drain_qp(qp); 1044e1a2ee24SIsrael Rukshin rdma_destroy_id(queue->cm_id); 1045e1a2ee24SIsrael Rukshin ib_destroy_qp(qp); 10468f000cacSChristoph Hellwig ib_free_cq(queue->cq); 10478f000cacSChristoph Hellwig } 10488f000cacSChristoph Hellwig 10498f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 10508f000cacSChristoph Hellwig { 1051424125a0SSagi Grimberg pr_debug("freeing queue %d\n", queue->idx); 10528f000cacSChristoph Hellwig 10538f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 10548f000cacSChristoph Hellwig 10558f000cacSChristoph Hellwig nvmet_rdma_destroy_queue_ib(queue); 10568f000cacSChristoph Hellwig if (!queue->dev->srq) { 10578f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 10588f000cacSChristoph Hellwig queue->recv_queue_size, 10598f000cacSChristoph Hellwig !queue->host_qid); 10608f000cacSChristoph Hellwig } 10618f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 10628f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 10638f000cacSChristoph Hellwig kfree(queue); 10648f000cacSChristoph Hellwig } 10658f000cacSChristoph Hellwig 10668f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w) 10678f000cacSChristoph Hellwig { 10688f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = 10698f000cacSChristoph Hellwig container_of(w, struct nvmet_rdma_queue, release_work); 10708f000cacSChristoph Hellwig struct nvmet_rdma_device *dev = queue->dev; 10718f000cacSChristoph Hellwig 10728f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 1073d8f7750aSSagi Grimberg 10748f000cacSChristoph Hellwig kref_put(&dev->ref, nvmet_rdma_free_dev); 10758f000cacSChristoph Hellwig } 10768f000cacSChristoph Hellwig 10778f000cacSChristoph Hellwig static int 10788f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 10798f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 10808f000cacSChristoph Hellwig { 10818f000cacSChristoph Hellwig struct nvme_rdma_cm_req *req; 10828f000cacSChristoph Hellwig 10838f000cacSChristoph Hellwig req = (struct nvme_rdma_cm_req *)conn->private_data; 10848f000cacSChristoph Hellwig if (!req || conn->private_data_len == 0) 10858f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_LEN; 10868f000cacSChristoph Hellwig 10878f000cacSChristoph Hellwig if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 10888f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_RECFMT; 10898f000cacSChristoph Hellwig 10908f000cacSChristoph Hellwig queue->host_qid = le16_to_cpu(req->qid); 10918f000cacSChristoph Hellwig 10928f000cacSChristoph Hellwig /* 1093b825b44cSJay Freyensee * req->hsqsize corresponds to our recv queue size plus 1 10948f000cacSChristoph Hellwig * req->hrqsize corresponds to our send queue size 10958f000cacSChristoph Hellwig */ 1096b825b44cSJay Freyensee queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 10978f000cacSChristoph Hellwig queue->send_queue_size = le16_to_cpu(req->hrqsize); 10988f000cacSChristoph Hellwig 10997aa1f427SSagi Grimberg if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 11008f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_HSQSIZE; 11018f000cacSChristoph Hellwig 11028f000cacSChristoph Hellwig /* XXX: Should we enforce some kind of max for IO queues? */ 11038f000cacSChristoph Hellwig 11048f000cacSChristoph Hellwig return 0; 11058f000cacSChristoph Hellwig } 11068f000cacSChristoph Hellwig 11078f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 11088f000cacSChristoph Hellwig enum nvme_rdma_cm_status status) 11098f000cacSChristoph Hellwig { 11108f000cacSChristoph Hellwig struct nvme_rdma_cm_rej rej; 11118f000cacSChristoph Hellwig 11127a01a6eaSMax Gurtovoy pr_debug("rejecting connect request: status %d (%s)\n", 11137a01a6eaSMax Gurtovoy status, nvme_rdma_cm_msg(status)); 11147a01a6eaSMax Gurtovoy 11158f000cacSChristoph Hellwig rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 11168f000cacSChristoph Hellwig rej.sts = cpu_to_le16(status); 11178f000cacSChristoph Hellwig 11188f000cacSChristoph Hellwig return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); 11198f000cacSChristoph Hellwig } 11208f000cacSChristoph Hellwig 11218f000cacSChristoph Hellwig static struct nvmet_rdma_queue * 11228f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 11238f000cacSChristoph Hellwig struct rdma_cm_id *cm_id, 11248f000cacSChristoph Hellwig struct rdma_cm_event *event) 11258f000cacSChristoph Hellwig { 11268f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 11278f000cacSChristoph Hellwig int ret; 11288f000cacSChristoph Hellwig 11298f000cacSChristoph Hellwig queue = kzalloc(sizeof(*queue), GFP_KERNEL); 11308f000cacSChristoph Hellwig if (!queue) { 11318f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11328f000cacSChristoph Hellwig goto out_reject; 11338f000cacSChristoph Hellwig } 11348f000cacSChristoph Hellwig 11358f000cacSChristoph Hellwig ret = nvmet_sq_init(&queue->nvme_sq); 113670d4281cSBart Van Assche if (ret) { 113770d4281cSBart Van Assche ret = NVME_RDMA_CM_NO_RSC; 11388f000cacSChristoph Hellwig goto out_free_queue; 113970d4281cSBart Van Assche } 11408f000cacSChristoph Hellwig 11418f000cacSChristoph Hellwig ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 11428f000cacSChristoph Hellwig if (ret) 11438f000cacSChristoph Hellwig goto out_destroy_sq; 11448f000cacSChristoph Hellwig 11458f000cacSChristoph Hellwig /* 11468f000cacSChristoph Hellwig * Schedules the actual release because calling rdma_destroy_id from 11478f000cacSChristoph Hellwig * inside a CM callback would trigger a deadlock. (great API design..) 11488f000cacSChristoph Hellwig */ 11498f000cacSChristoph Hellwig INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 11508f000cacSChristoph Hellwig queue->dev = ndev; 11518f000cacSChristoph Hellwig queue->cm_id = cm_id; 11528f000cacSChristoph Hellwig 11538f000cacSChristoph Hellwig spin_lock_init(&queue->state_lock); 11548f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_CONNECTING; 11558f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wait_list); 11568f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 11578f000cacSChristoph Hellwig spin_lock_init(&queue->rsp_wr_wait_lock); 11588f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->free_rsps); 11598f000cacSChristoph Hellwig spin_lock_init(&queue->rsps_lock); 1160766dbb17SSagi Grimberg INIT_LIST_HEAD(&queue->queue_list); 11618f000cacSChristoph Hellwig 11628f000cacSChristoph Hellwig queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 11638f000cacSChristoph Hellwig if (queue->idx < 0) { 11648f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11656ccaeb56SChristophe JAILLET goto out_destroy_sq; 11668f000cacSChristoph Hellwig } 11678f000cacSChristoph Hellwig 11688f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsps(queue); 11698f000cacSChristoph Hellwig if (ret) { 11708f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11718f000cacSChristoph Hellwig goto out_ida_remove; 11728f000cacSChristoph Hellwig } 11738f000cacSChristoph Hellwig 11748f000cacSChristoph Hellwig if (!ndev->srq) { 11758f000cacSChristoph Hellwig queue->cmds = nvmet_rdma_alloc_cmds(ndev, 11768f000cacSChristoph Hellwig queue->recv_queue_size, 11778f000cacSChristoph Hellwig !queue->host_qid); 11788f000cacSChristoph Hellwig if (IS_ERR(queue->cmds)) { 11798f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11808f000cacSChristoph Hellwig goto out_free_responses; 11818f000cacSChristoph Hellwig } 11828f000cacSChristoph Hellwig } 11838f000cacSChristoph Hellwig 11848f000cacSChristoph Hellwig ret = nvmet_rdma_create_queue_ib(queue); 11858f000cacSChristoph Hellwig if (ret) { 11868f000cacSChristoph Hellwig pr_err("%s: creating RDMA queue failed (%d).\n", 11878f000cacSChristoph Hellwig __func__, ret); 11888f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11898f000cacSChristoph Hellwig goto out_free_cmds; 11908f000cacSChristoph Hellwig } 11918f000cacSChristoph Hellwig 11928f000cacSChristoph Hellwig return queue; 11938f000cacSChristoph Hellwig 11948f000cacSChristoph Hellwig out_free_cmds: 11958f000cacSChristoph Hellwig if (!ndev->srq) { 11968f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 11978f000cacSChristoph Hellwig queue->recv_queue_size, 11988f000cacSChristoph Hellwig !queue->host_qid); 11998f000cacSChristoph Hellwig } 12008f000cacSChristoph Hellwig out_free_responses: 12018f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 12028f000cacSChristoph Hellwig out_ida_remove: 12038f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 12048f000cacSChristoph Hellwig out_destroy_sq: 12058f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 12068f000cacSChristoph Hellwig out_free_queue: 12078f000cacSChristoph Hellwig kfree(queue); 12088f000cacSChristoph Hellwig out_reject: 12098f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, ret); 12108f000cacSChristoph Hellwig return NULL; 12118f000cacSChristoph Hellwig } 12128f000cacSChristoph Hellwig 12138f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 12148f000cacSChristoph Hellwig { 12158f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = priv; 12168f000cacSChristoph Hellwig 12178f000cacSChristoph Hellwig switch (event->event) { 12188f000cacSChristoph Hellwig case IB_EVENT_COMM_EST: 12198f000cacSChristoph Hellwig rdma_notify(queue->cm_id, event->event); 12208f000cacSChristoph Hellwig break; 12218f000cacSChristoph Hellwig default: 1222675796beSMax Gurtovoy pr_err("received IB QP event: %s (%d)\n", 1223675796beSMax Gurtovoy ib_event_msg(event->event), event->event); 12248f000cacSChristoph Hellwig break; 12258f000cacSChristoph Hellwig } 12268f000cacSChristoph Hellwig } 12278f000cacSChristoph Hellwig 12288f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 12298f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue, 12308f000cacSChristoph Hellwig struct rdma_conn_param *p) 12318f000cacSChristoph Hellwig { 12328f000cacSChristoph Hellwig struct rdma_conn_param param = { }; 12338f000cacSChristoph Hellwig struct nvme_rdma_cm_rep priv = { }; 12348f000cacSChristoph Hellwig int ret = -ENOMEM; 12358f000cacSChristoph Hellwig 12368f000cacSChristoph Hellwig param.rnr_retry_count = 7; 12378f000cacSChristoph Hellwig param.flow_control = 1; 12388f000cacSChristoph Hellwig param.initiator_depth = min_t(u8, p->initiator_depth, 12398f000cacSChristoph Hellwig queue->dev->device->attrs.max_qp_init_rd_atom); 12408f000cacSChristoph Hellwig param.private_data = &priv; 12418f000cacSChristoph Hellwig param.private_data_len = sizeof(priv); 12428f000cacSChristoph Hellwig priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 12438f000cacSChristoph Hellwig priv.crqsize = cpu_to_le16(queue->recv_queue_size); 12448f000cacSChristoph Hellwig 12458f000cacSChristoph Hellwig ret = rdma_accept(cm_id, ¶m); 12468f000cacSChristoph Hellwig if (ret) 12478f000cacSChristoph Hellwig pr_err("rdma_accept failed (error code = %d)\n", ret); 12488f000cacSChristoph Hellwig 12498f000cacSChristoph Hellwig return ret; 12508f000cacSChristoph Hellwig } 12518f000cacSChristoph Hellwig 12528f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 12538f000cacSChristoph Hellwig struct rdma_cm_event *event) 12548f000cacSChristoph Hellwig { 12558f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 12568f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 12578f000cacSChristoph Hellwig int ret = -EINVAL; 12588f000cacSChristoph Hellwig 12598f000cacSChristoph Hellwig ndev = nvmet_rdma_find_get_device(cm_id); 12608f000cacSChristoph Hellwig if (!ndev) { 12618f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 12628f000cacSChristoph Hellwig return -ECONNREFUSED; 12638f000cacSChristoph Hellwig } 12648f000cacSChristoph Hellwig 12658f000cacSChristoph Hellwig queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 12668f000cacSChristoph Hellwig if (!queue) { 12678f000cacSChristoph Hellwig ret = -ENOMEM; 12688f000cacSChristoph Hellwig goto put_device; 12698f000cacSChristoph Hellwig } 12708f000cacSChristoph Hellwig queue->port = cm_id->context; 12718f000cacSChristoph Hellwig 1272777dc823SSagi Grimberg if (queue->host_qid == 0) { 1273777dc823SSagi Grimberg /* Let inflight controller teardown complete */ 1274777dc823SSagi Grimberg flush_scheduled_work(); 1275777dc823SSagi Grimberg } 1276777dc823SSagi Grimberg 12778f000cacSChristoph Hellwig ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1278e1a2ee24SIsrael Rukshin if (ret) { 1279e1a2ee24SIsrael Rukshin schedule_work(&queue->release_work); 1280e1a2ee24SIsrael Rukshin /* Destroying rdma_cm id is not needed here */ 1281e1a2ee24SIsrael Rukshin return 0; 1282e1a2ee24SIsrael Rukshin } 12838f000cacSChristoph Hellwig 12848f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 12858f000cacSChristoph Hellwig list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 12868f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 12878f000cacSChristoph Hellwig 12888f000cacSChristoph Hellwig return 0; 12898f000cacSChristoph Hellwig 12908f000cacSChristoph Hellwig put_device: 12918f000cacSChristoph Hellwig kref_put(&ndev->ref, nvmet_rdma_free_dev); 12928f000cacSChristoph Hellwig 12938f000cacSChristoph Hellwig return ret; 12948f000cacSChristoph Hellwig } 12958f000cacSChristoph Hellwig 12968f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 12978f000cacSChristoph Hellwig { 12988f000cacSChristoph Hellwig unsigned long flags; 12998f000cacSChristoph Hellwig 13008f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13018f000cacSChristoph Hellwig if (queue->state != NVMET_RDMA_Q_CONNECTING) { 13028f000cacSChristoph Hellwig pr_warn("trying to establish a connected queue\n"); 13038f000cacSChristoph Hellwig goto out_unlock; 13048f000cacSChristoph Hellwig } 13058f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_LIVE; 13068f000cacSChristoph Hellwig 13078f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wait_list)) { 13088f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd; 13098f000cacSChristoph Hellwig 13108f000cacSChristoph Hellwig cmd = list_first_entry(&queue->rsp_wait_list, 13118f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 13128f000cacSChristoph Hellwig list_del(&cmd->wait_list); 13138f000cacSChristoph Hellwig 13148f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13158f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, cmd); 13168f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13178f000cacSChristoph Hellwig } 13188f000cacSChristoph Hellwig 13198f000cacSChristoph Hellwig out_unlock: 13208f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13218f000cacSChristoph Hellwig } 13228f000cacSChristoph Hellwig 13238f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 13248f000cacSChristoph Hellwig { 13258f000cacSChristoph Hellwig bool disconnect = false; 13268f000cacSChristoph Hellwig unsigned long flags; 13278f000cacSChristoph Hellwig 13288f000cacSChristoph Hellwig pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 13298f000cacSChristoph Hellwig 13308f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13318f000cacSChristoph Hellwig switch (queue->state) { 13328f000cacSChristoph Hellwig case NVMET_RDMA_Q_CONNECTING: 13338f000cacSChristoph Hellwig case NVMET_RDMA_Q_LIVE: 13348f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_DISCONNECTING; 1335d8f7750aSSagi Grimberg disconnect = true; 13368f000cacSChristoph Hellwig break; 13378f000cacSChristoph Hellwig case NVMET_RDMA_Q_DISCONNECTING: 13388f000cacSChristoph Hellwig break; 13398f000cacSChristoph Hellwig } 13408f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13418f000cacSChristoph Hellwig 13428f000cacSChristoph Hellwig if (disconnect) { 13438f000cacSChristoph Hellwig rdma_disconnect(queue->cm_id); 13448f000cacSChristoph Hellwig schedule_work(&queue->release_work); 13458f000cacSChristoph Hellwig } 13468f000cacSChristoph Hellwig } 13478f000cacSChristoph Hellwig 13488f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 13498f000cacSChristoph Hellwig { 13508f000cacSChristoph Hellwig bool disconnect = false; 13518f000cacSChristoph Hellwig 13528f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 13538f000cacSChristoph Hellwig if (!list_empty(&queue->queue_list)) { 13548f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 13558f000cacSChristoph Hellwig disconnect = true; 13568f000cacSChristoph Hellwig } 13578f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13588f000cacSChristoph Hellwig 13598f000cacSChristoph Hellwig if (disconnect) 13608f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 13618f000cacSChristoph Hellwig } 13628f000cacSChristoph Hellwig 13638f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 13648f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 13658f000cacSChristoph Hellwig { 13668f000cacSChristoph Hellwig WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 13678f000cacSChristoph Hellwig 1368766dbb17SSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 1369766dbb17SSagi Grimberg if (!list_empty(&queue->queue_list)) 1370766dbb17SSagi Grimberg list_del_init(&queue->queue_list); 1371766dbb17SSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1372766dbb17SSagi Grimberg 1373766dbb17SSagi Grimberg pr_err("failed to connect queue %d\n", queue->idx); 13748f000cacSChristoph Hellwig schedule_work(&queue->release_work); 13758f000cacSChristoph Hellwig } 13768f000cacSChristoph Hellwig 1377d8f7750aSSagi Grimberg /** 1378d8f7750aSSagi Grimberg * nvme_rdma_device_removal() - Handle RDMA device removal 1379f1d4ef7dSSagi Grimberg * @cm_id: rdma_cm id, used for nvmet port 1380d8f7750aSSagi Grimberg * @queue: nvmet rdma queue (cm id qp_context) 1381d8f7750aSSagi Grimberg * 1382d8f7750aSSagi Grimberg * DEVICE_REMOVAL event notifies us that the RDMA device is about 1383f1d4ef7dSSagi Grimberg * to unplug. Note that this event can be generated on a normal 1384f1d4ef7dSSagi Grimberg * queue cm_id and/or a device bound listener cm_id (where in this 1385f1d4ef7dSSagi Grimberg * case queue will be null). 1386d8f7750aSSagi Grimberg * 1387f1d4ef7dSSagi Grimberg * We registered an ib_client to handle device removal for queues, 1388f1d4ef7dSSagi Grimberg * so we only need to handle the listening port cm_ids. In this case 1389d8f7750aSSagi Grimberg * we nullify the priv to prevent double cm_id destruction and destroying 1390d8f7750aSSagi Grimberg * the cm_id implicitely by returning a non-zero rc to the callout. 1391d8f7750aSSagi Grimberg */ 1392d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1393d8f7750aSSagi Grimberg struct nvmet_rdma_queue *queue) 1394d8f7750aSSagi Grimberg { 1395f1d4ef7dSSagi Grimberg struct nvmet_port *port; 1396d8f7750aSSagi Grimberg 1397f1d4ef7dSSagi Grimberg if (queue) { 1398f1d4ef7dSSagi Grimberg /* 1399f1d4ef7dSSagi Grimberg * This is a queue cm_id. we have registered 1400f1d4ef7dSSagi Grimberg * an ib_client to handle queues removal 1401f1d4ef7dSSagi Grimberg * so don't interfear and just return. 1402f1d4ef7dSSagi Grimberg */ 1403f1d4ef7dSSagi Grimberg return 0; 1404f1d4ef7dSSagi Grimberg } 1405f1d4ef7dSSagi Grimberg 1406f1d4ef7dSSagi Grimberg port = cm_id->context; 1407d8f7750aSSagi Grimberg 1408d8f7750aSSagi Grimberg /* 1409d8f7750aSSagi Grimberg * This is a listener cm_id. Make sure that 1410d8f7750aSSagi Grimberg * future remove_port won't invoke a double 1411d8f7750aSSagi Grimberg * cm_id destroy. use atomic xchg to make sure 1412d8f7750aSSagi Grimberg * we don't compete with remove_port. 1413d8f7750aSSagi Grimberg */ 1414d8f7750aSSagi Grimberg if (xchg(&port->priv, NULL) != cm_id) 1415d8f7750aSSagi Grimberg return 0; 1416d8f7750aSSagi Grimberg 1417d8f7750aSSagi Grimberg /* 1418d8f7750aSSagi Grimberg * We need to return 1 so that the core will destroy 1419d8f7750aSSagi Grimberg * it's own ID. What a great API design.. 1420d8f7750aSSagi Grimberg */ 1421d8f7750aSSagi Grimberg return 1; 1422d8f7750aSSagi Grimberg } 1423d8f7750aSSagi Grimberg 14248f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 14258f000cacSChristoph Hellwig struct rdma_cm_event *event) 14268f000cacSChristoph Hellwig { 14278f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = NULL; 14288f000cacSChristoph Hellwig int ret = 0; 14298f000cacSChristoph Hellwig 14308f000cacSChristoph Hellwig if (cm_id->qp) 14318f000cacSChristoph Hellwig queue = cm_id->qp->qp_context; 14328f000cacSChristoph Hellwig 14338f000cacSChristoph Hellwig pr_debug("%s (%d): status %d id %p\n", 14348f000cacSChristoph Hellwig rdma_event_msg(event->event), event->event, 14358f000cacSChristoph Hellwig event->status, cm_id); 14368f000cacSChristoph Hellwig 14378f000cacSChristoph Hellwig switch (event->event) { 14388f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_REQUEST: 14398f000cacSChristoph Hellwig ret = nvmet_rdma_queue_connect(cm_id, event); 14408f000cacSChristoph Hellwig break; 14418f000cacSChristoph Hellwig case RDMA_CM_EVENT_ESTABLISHED: 14428f000cacSChristoph Hellwig nvmet_rdma_queue_established(queue); 14438f000cacSChristoph Hellwig break; 14448f000cacSChristoph Hellwig case RDMA_CM_EVENT_ADDR_CHANGE: 14458f000cacSChristoph Hellwig case RDMA_CM_EVENT_DISCONNECTED: 14468f000cacSChristoph Hellwig case RDMA_CM_EVENT_TIMEWAIT_EXIT: 14478f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 1448d8f7750aSSagi Grimberg break; 1449d8f7750aSSagi Grimberg case RDMA_CM_EVENT_DEVICE_REMOVAL: 1450d8f7750aSSagi Grimberg ret = nvmet_rdma_device_removal(cm_id, queue); 14518f000cacSChristoph Hellwig break; 14528f000cacSChristoph Hellwig case RDMA_CM_EVENT_REJECTED: 1453512fb1b3SSteve Wise pr_debug("Connection rejected: %s\n", 1454512fb1b3SSteve Wise rdma_reject_msg(cm_id, event->status)); 1455512fb1b3SSteve Wise /* FALLTHROUGH */ 14568f000cacSChristoph Hellwig case RDMA_CM_EVENT_UNREACHABLE: 14578f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_ERROR: 14588f000cacSChristoph Hellwig nvmet_rdma_queue_connect_fail(cm_id, queue); 14598f000cacSChristoph Hellwig break; 14608f000cacSChristoph Hellwig default: 14618f000cacSChristoph Hellwig pr_err("received unrecognized RDMA CM event %d\n", 14628f000cacSChristoph Hellwig event->event); 14638f000cacSChristoph Hellwig break; 14648f000cacSChristoph Hellwig } 14658f000cacSChristoph Hellwig 14668f000cacSChristoph Hellwig return ret; 14678f000cacSChristoph Hellwig } 14688f000cacSChristoph Hellwig 14698f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 14708f000cacSChristoph Hellwig { 14718f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 14728f000cacSChristoph Hellwig 14738f000cacSChristoph Hellwig restart: 14748f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 14758f000cacSChristoph Hellwig list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { 14768f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl == ctrl) { 14778f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 14788f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 14798f000cacSChristoph Hellwig 14808f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 14818f000cacSChristoph Hellwig goto restart; 14828f000cacSChristoph Hellwig } 14838f000cacSChristoph Hellwig } 14848f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 14858f000cacSChristoph Hellwig } 14868f000cacSChristoph Hellwig 14878f000cacSChristoph Hellwig static int nvmet_rdma_add_port(struct nvmet_port *port) 14888f000cacSChristoph Hellwig { 14898f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 1490670c2a3aSSagi Grimberg struct sockaddr_storage addr = { }; 1491670c2a3aSSagi Grimberg __kernel_sa_family_t af; 14928f000cacSChristoph Hellwig int ret; 14938f000cacSChristoph Hellwig 14948f000cacSChristoph Hellwig switch (port->disc_addr.adrfam) { 14958f000cacSChristoph Hellwig case NVMF_ADDR_FAMILY_IP4: 1496670c2a3aSSagi Grimberg af = AF_INET; 1497670c2a3aSSagi Grimberg break; 1498670c2a3aSSagi Grimberg case NVMF_ADDR_FAMILY_IP6: 1499670c2a3aSSagi Grimberg af = AF_INET6; 15008f000cacSChristoph Hellwig break; 15018f000cacSChristoph Hellwig default: 15028f000cacSChristoph Hellwig pr_err("address family %d not supported\n", 15038f000cacSChristoph Hellwig port->disc_addr.adrfam); 15048f000cacSChristoph Hellwig return -EINVAL; 15058f000cacSChristoph Hellwig } 15068f000cacSChristoph Hellwig 15070d5ee2b2SSteve Wise if (port->inline_data_size < 0) { 15080d5ee2b2SSteve Wise port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; 15090d5ee2b2SSteve Wise } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { 15100d5ee2b2SSteve Wise pr_warn("inline_data_size %u is too large, reducing to %u\n", 15110d5ee2b2SSteve Wise port->inline_data_size, 15120d5ee2b2SSteve Wise NVMET_RDMA_MAX_INLINE_DATA_SIZE); 15130d5ee2b2SSteve Wise port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; 15140d5ee2b2SSteve Wise } 15150d5ee2b2SSteve Wise 1516670c2a3aSSagi Grimberg ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, 1517670c2a3aSSagi Grimberg port->disc_addr.trsvcid, &addr); 1518670c2a3aSSagi Grimberg if (ret) { 1519670c2a3aSSagi Grimberg pr_err("malformed ip/port passed: %s:%s\n", 1520670c2a3aSSagi Grimberg port->disc_addr.traddr, port->disc_addr.trsvcid); 15218f000cacSChristoph Hellwig return ret; 1522670c2a3aSSagi Grimberg } 15238f000cacSChristoph Hellwig 15248f000cacSChristoph Hellwig cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 15258f000cacSChristoph Hellwig RDMA_PS_TCP, IB_QPT_RC); 15268f000cacSChristoph Hellwig if (IS_ERR(cm_id)) { 15278f000cacSChristoph Hellwig pr_err("CM ID creation failed\n"); 15288f000cacSChristoph Hellwig return PTR_ERR(cm_id); 15298f000cacSChristoph Hellwig } 15308f000cacSChristoph Hellwig 1531670c2a3aSSagi Grimberg /* 1532670c2a3aSSagi Grimberg * Allow both IPv4 and IPv6 sockets to bind a single port 1533670c2a3aSSagi Grimberg * at the same time. 1534670c2a3aSSagi Grimberg */ 1535670c2a3aSSagi Grimberg ret = rdma_set_afonly(cm_id, 1); 15368f000cacSChristoph Hellwig if (ret) { 1537670c2a3aSSagi Grimberg pr_err("rdma_set_afonly failed (%d)\n", ret); 1538670c2a3aSSagi Grimberg goto out_destroy_id; 1539670c2a3aSSagi Grimberg } 1540670c2a3aSSagi Grimberg 1541670c2a3aSSagi Grimberg ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr); 1542670c2a3aSSagi Grimberg if (ret) { 1543670c2a3aSSagi Grimberg pr_err("binding CM ID to %pISpcs failed (%d)\n", 1544670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 15458f000cacSChristoph Hellwig goto out_destroy_id; 15468f000cacSChristoph Hellwig } 15478f000cacSChristoph Hellwig 15488f000cacSChristoph Hellwig ret = rdma_listen(cm_id, 128); 15498f000cacSChristoph Hellwig if (ret) { 1550670c2a3aSSagi Grimberg pr_err("listening to %pISpcs failed (%d)\n", 1551670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 15528f000cacSChristoph Hellwig goto out_destroy_id; 15538f000cacSChristoph Hellwig } 15548f000cacSChristoph Hellwig 1555670c2a3aSSagi Grimberg pr_info("enabling port %d (%pISpcs)\n", 1556670c2a3aSSagi Grimberg le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr); 15578f000cacSChristoph Hellwig port->priv = cm_id; 15588f000cacSChristoph Hellwig return 0; 15598f000cacSChristoph Hellwig 15608f000cacSChristoph Hellwig out_destroy_id: 15618f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 15628f000cacSChristoph Hellwig return ret; 15638f000cacSChristoph Hellwig } 15648f000cacSChristoph Hellwig 15658f000cacSChristoph Hellwig static void nvmet_rdma_remove_port(struct nvmet_port *port) 15668f000cacSChristoph Hellwig { 1567d8f7750aSSagi Grimberg struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); 15688f000cacSChristoph Hellwig 1569d8f7750aSSagi Grimberg if (cm_id) 15708f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 15718f000cacSChristoph Hellwig } 15728f000cacSChristoph Hellwig 15734c652685SSagi Grimberg static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, 15744c652685SSagi Grimberg struct nvmet_port *port, char *traddr) 15754c652685SSagi Grimberg { 15764c652685SSagi Grimberg struct rdma_cm_id *cm_id = port->priv; 15774c652685SSagi Grimberg 15784c652685SSagi Grimberg if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { 15794c652685SSagi Grimberg struct nvmet_rdma_rsp *rsp = 15804c652685SSagi Grimberg container_of(req, struct nvmet_rdma_rsp, req); 15814c652685SSagi Grimberg struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; 15824c652685SSagi Grimberg struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; 15834c652685SSagi Grimberg 15844c652685SSagi Grimberg sprintf(traddr, "%pISc", addr); 15854c652685SSagi Grimberg } else { 15864c652685SSagi Grimberg memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 15874c652685SSagi Grimberg } 15884c652685SSagi Grimberg } 15894c652685SSagi Grimberg 1590e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops = { 15918f000cacSChristoph Hellwig .owner = THIS_MODULE, 15928f000cacSChristoph Hellwig .type = NVMF_TRTYPE_RDMA, 15938f000cacSChristoph Hellwig .msdbd = 1, 15948f000cacSChristoph Hellwig .has_keyed_sgls = 1, 15958f000cacSChristoph Hellwig .add_port = nvmet_rdma_add_port, 15968f000cacSChristoph Hellwig .remove_port = nvmet_rdma_remove_port, 15978f000cacSChristoph Hellwig .queue_response = nvmet_rdma_queue_response, 15988f000cacSChristoph Hellwig .delete_ctrl = nvmet_rdma_delete_ctrl, 15994c652685SSagi Grimberg .disc_traddr = nvmet_rdma_disc_port_addr, 16008f000cacSChristoph Hellwig }; 16018f000cacSChristoph Hellwig 1602f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1603f1d4ef7dSSagi Grimberg { 160443b92fd2SIsrael Rukshin struct nvmet_rdma_queue *queue, *tmp; 1605a3dd7d00SMax Gurtovoy struct nvmet_rdma_device *ndev; 1606a3dd7d00SMax Gurtovoy bool found = false; 1607f1d4ef7dSSagi Grimberg 1608a3dd7d00SMax Gurtovoy mutex_lock(&device_list_mutex); 1609a3dd7d00SMax Gurtovoy list_for_each_entry(ndev, &device_list, entry) { 1610a3dd7d00SMax Gurtovoy if (ndev->device == ib_device) { 1611a3dd7d00SMax Gurtovoy found = true; 1612a3dd7d00SMax Gurtovoy break; 1613a3dd7d00SMax Gurtovoy } 1614a3dd7d00SMax Gurtovoy } 1615a3dd7d00SMax Gurtovoy mutex_unlock(&device_list_mutex); 1616a3dd7d00SMax Gurtovoy 1617a3dd7d00SMax Gurtovoy if (!found) 1618a3dd7d00SMax Gurtovoy return; 1619a3dd7d00SMax Gurtovoy 1620a3dd7d00SMax Gurtovoy /* 1621a3dd7d00SMax Gurtovoy * IB Device that is used by nvmet controllers is being removed, 1622a3dd7d00SMax Gurtovoy * delete all queues using this device. 1623a3dd7d00SMax Gurtovoy */ 1624f1d4ef7dSSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 162543b92fd2SIsrael Rukshin list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 162643b92fd2SIsrael Rukshin queue_list) { 1627f1d4ef7dSSagi Grimberg if (queue->dev->device != ib_device) 1628f1d4ef7dSSagi Grimberg continue; 1629f1d4ef7dSSagi Grimberg 1630f1d4ef7dSSagi Grimberg pr_info("Removing queue %d\n", queue->idx); 163143b92fd2SIsrael Rukshin list_del_init(&queue->queue_list); 1632f1d4ef7dSSagi Grimberg __nvmet_rdma_queue_disconnect(queue); 1633f1d4ef7dSSagi Grimberg } 1634f1d4ef7dSSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1635f1d4ef7dSSagi Grimberg 1636f1d4ef7dSSagi Grimberg flush_scheduled_work(); 1637f1d4ef7dSSagi Grimberg } 1638f1d4ef7dSSagi Grimberg 1639f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = { 1640f1d4ef7dSSagi Grimberg .name = "nvmet_rdma", 1641f1d4ef7dSSagi Grimberg .remove = nvmet_rdma_remove_one 1642f1d4ef7dSSagi Grimberg }; 1643f1d4ef7dSSagi Grimberg 16448f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void) 16458f000cacSChristoph Hellwig { 1646f1d4ef7dSSagi Grimberg int ret; 1647f1d4ef7dSSagi Grimberg 1648f1d4ef7dSSagi Grimberg ret = ib_register_client(&nvmet_rdma_ib_client); 1649f1d4ef7dSSagi Grimberg if (ret) 1650f1d4ef7dSSagi Grimberg return ret; 1651f1d4ef7dSSagi Grimberg 1652f1d4ef7dSSagi Grimberg ret = nvmet_register_transport(&nvmet_rdma_ops); 1653f1d4ef7dSSagi Grimberg if (ret) 1654f1d4ef7dSSagi Grimberg goto err_ib_client; 1655f1d4ef7dSSagi Grimberg 1656f1d4ef7dSSagi Grimberg return 0; 1657f1d4ef7dSSagi Grimberg 1658f1d4ef7dSSagi Grimberg err_ib_client: 1659f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1660f1d4ef7dSSagi Grimberg return ret; 16618f000cacSChristoph Hellwig } 16628f000cacSChristoph Hellwig 16638f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void) 16648f000cacSChristoph Hellwig { 16658f000cacSChristoph Hellwig nvmet_unregister_transport(&nvmet_rdma_ops); 1666f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1667cb4876e8SSagi Grimberg WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 16688f000cacSChristoph Hellwig ida_destroy(&nvmet_rdma_queue_ida); 16698f000cacSChristoph Hellwig } 16708f000cacSChristoph Hellwig 16718f000cacSChristoph Hellwig module_init(nvmet_rdma_init); 16728f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit); 16738f000cacSChristoph Hellwig 16748f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2"); 16758f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 1676