18f000cacSChristoph Hellwig /* 28f000cacSChristoph Hellwig * NVMe over Fabrics RDMA target. 38f000cacSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 48f000cacSChristoph Hellwig * 58f000cacSChristoph Hellwig * This program is free software; you can redistribute it and/or modify it 68f000cacSChristoph Hellwig * under the terms and conditions of the GNU General Public License, 78f000cacSChristoph Hellwig * version 2, as published by the Free Software Foundation. 88f000cacSChristoph Hellwig * 98f000cacSChristoph Hellwig * This program is distributed in the hope it will be useful, but WITHOUT 108f000cacSChristoph Hellwig * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 118f000cacSChristoph Hellwig * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 128f000cacSChristoph Hellwig * more details. 138f000cacSChristoph Hellwig */ 148f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 158f000cacSChristoph Hellwig #include <linux/atomic.h> 168f000cacSChristoph Hellwig #include <linux/ctype.h> 178f000cacSChristoph Hellwig #include <linux/delay.h> 188f000cacSChristoph Hellwig #include <linux/err.h> 198f000cacSChristoph Hellwig #include <linux/init.h> 208f000cacSChristoph Hellwig #include <linux/module.h> 218f000cacSChristoph Hellwig #include <linux/nvme.h> 228f000cacSChristoph Hellwig #include <linux/slab.h> 238f000cacSChristoph Hellwig #include <linux/string.h> 248f000cacSChristoph Hellwig #include <linux/wait.h> 258f000cacSChristoph Hellwig #include <linux/inet.h> 268f000cacSChristoph Hellwig #include <asm/unaligned.h> 278f000cacSChristoph Hellwig 288f000cacSChristoph Hellwig #include <rdma/ib_verbs.h> 298f000cacSChristoph Hellwig #include <rdma/rdma_cm.h> 308f000cacSChristoph Hellwig #include <rdma/rw.h> 318f000cacSChristoph Hellwig 328f000cacSChristoph Hellwig #include <linux/nvme-rdma.h> 338f000cacSChristoph Hellwig #include "nvmet.h" 348f000cacSChristoph Hellwig 358f000cacSChristoph Hellwig /* 360d5ee2b2SSteve Wise * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data 378f000cacSChristoph Hellwig */ 380d5ee2b2SSteve Wise #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE 390d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_SGE 4 400d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) 418f000cacSChristoph Hellwig 428f000cacSChristoph Hellwig struct nvmet_rdma_cmd { 430d5ee2b2SSteve Wise struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; 448f000cacSChristoph Hellwig struct ib_cqe cqe; 458f000cacSChristoph Hellwig struct ib_recv_wr wr; 460d5ee2b2SSteve Wise struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; 478f000cacSChristoph Hellwig struct nvme_command *nvme_cmd; 488f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 498f000cacSChristoph Hellwig }; 508f000cacSChristoph Hellwig 518f000cacSChristoph Hellwig enum { 528f000cacSChristoph Hellwig NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 538f000cacSChristoph Hellwig NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), 548f000cacSChristoph Hellwig }; 558f000cacSChristoph Hellwig 568f000cacSChristoph Hellwig struct nvmet_rdma_rsp { 578f000cacSChristoph Hellwig struct ib_sge send_sge; 588f000cacSChristoph Hellwig struct ib_cqe send_cqe; 598f000cacSChristoph Hellwig struct ib_send_wr send_wr; 608f000cacSChristoph Hellwig 618f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd; 628f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 638f000cacSChristoph Hellwig 648f000cacSChristoph Hellwig struct ib_cqe read_cqe; 658f000cacSChristoph Hellwig struct rdma_rw_ctx rw; 668f000cacSChristoph Hellwig 678f000cacSChristoph Hellwig struct nvmet_req req; 688f000cacSChristoph Hellwig 698407879cSSagi Grimberg bool allocated; 708f000cacSChristoph Hellwig u8 n_rdma; 718f000cacSChristoph Hellwig u32 flags; 728f000cacSChristoph Hellwig u32 invalidate_rkey; 738f000cacSChristoph Hellwig 748f000cacSChristoph Hellwig struct list_head wait_list; 758f000cacSChristoph Hellwig struct list_head free_list; 768f000cacSChristoph Hellwig }; 778f000cacSChristoph Hellwig 788f000cacSChristoph Hellwig enum nvmet_rdma_queue_state { 798f000cacSChristoph Hellwig NVMET_RDMA_Q_CONNECTING, 808f000cacSChristoph Hellwig NVMET_RDMA_Q_LIVE, 818f000cacSChristoph Hellwig NVMET_RDMA_Q_DISCONNECTING, 828f000cacSChristoph Hellwig }; 838f000cacSChristoph Hellwig 848f000cacSChristoph Hellwig struct nvmet_rdma_queue { 858f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 868f000cacSChristoph Hellwig struct nvmet_port *port; 878f000cacSChristoph Hellwig struct ib_cq *cq; 888f000cacSChristoph Hellwig atomic_t sq_wr_avail; 898f000cacSChristoph Hellwig struct nvmet_rdma_device *dev; 908f000cacSChristoph Hellwig spinlock_t state_lock; 918f000cacSChristoph Hellwig enum nvmet_rdma_queue_state state; 928f000cacSChristoph Hellwig struct nvmet_cq nvme_cq; 938f000cacSChristoph Hellwig struct nvmet_sq nvme_sq; 948f000cacSChristoph Hellwig 958f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsps; 968f000cacSChristoph Hellwig struct list_head free_rsps; 978f000cacSChristoph Hellwig spinlock_t rsps_lock; 988f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 998f000cacSChristoph Hellwig 1008f000cacSChristoph Hellwig struct work_struct release_work; 1018f000cacSChristoph Hellwig struct list_head rsp_wait_list; 1028f000cacSChristoph Hellwig struct list_head rsp_wr_wait_list; 1038f000cacSChristoph Hellwig spinlock_t rsp_wr_wait_lock; 1048f000cacSChristoph Hellwig 1058f000cacSChristoph Hellwig int idx; 1068f000cacSChristoph Hellwig int host_qid; 1078f000cacSChristoph Hellwig int recv_queue_size; 1088f000cacSChristoph Hellwig int send_queue_size; 1098f000cacSChristoph Hellwig 1108f000cacSChristoph Hellwig struct list_head queue_list; 1118f000cacSChristoph Hellwig }; 1128f000cacSChristoph Hellwig 1138f000cacSChristoph Hellwig struct nvmet_rdma_device { 1148f000cacSChristoph Hellwig struct ib_device *device; 1158f000cacSChristoph Hellwig struct ib_pd *pd; 1168f000cacSChristoph Hellwig struct ib_srq *srq; 1178f000cacSChristoph Hellwig struct nvmet_rdma_cmd *srq_cmds; 1188f000cacSChristoph Hellwig size_t srq_size; 1198f000cacSChristoph Hellwig struct kref ref; 1208f000cacSChristoph Hellwig struct list_head entry; 1210d5ee2b2SSteve Wise int inline_data_size; 1220d5ee2b2SSteve Wise int inline_page_count; 1238f000cacSChristoph Hellwig }; 1248f000cacSChristoph Hellwig 1258f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq; 1268f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 1278f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 1288f000cacSChristoph Hellwig 1298f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida); 1308f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list); 1318f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 1328f000cacSChristoph Hellwig 1338f000cacSChristoph Hellwig static LIST_HEAD(device_list); 1348f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex); 1358f000cacSChristoph Hellwig 1368f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 1378f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 1388f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 1398f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 1408f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 1418f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 142*5cbab630SRaju Rangoju static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 143*5cbab630SRaju Rangoju struct nvmet_rdma_rsp *r); 144*5cbab630SRaju Rangoju static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 145*5cbab630SRaju Rangoju struct nvmet_rdma_rsp *r); 1468f000cacSChristoph Hellwig 147e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops; 1488f000cacSChristoph Hellwig 1490d5ee2b2SSteve Wise static int num_pages(int len) 1500d5ee2b2SSteve Wise { 1510d5ee2b2SSteve Wise return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); 1520d5ee2b2SSteve Wise } 1530d5ee2b2SSteve Wise 1548f000cacSChristoph Hellwig /* XXX: really should move to a generic header sooner or later.. */ 1558f000cacSChristoph Hellwig static inline u32 get_unaligned_le24(const u8 *p) 1568f000cacSChristoph Hellwig { 1578f000cacSChristoph Hellwig return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; 1588f000cacSChristoph Hellwig } 1598f000cacSChristoph Hellwig 1608f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 1618f000cacSChristoph Hellwig { 1628f000cacSChristoph Hellwig return nvme_is_write(rsp->req.cmd) && 1635e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1648f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1658f000cacSChristoph Hellwig } 1668f000cacSChristoph Hellwig 1678f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 1688f000cacSChristoph Hellwig { 1698f000cacSChristoph Hellwig return !nvme_is_write(rsp->req.cmd) && 1705e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1718f000cacSChristoph Hellwig !rsp->req.rsp->status && 1728f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1738f000cacSChristoph Hellwig } 1748f000cacSChristoph Hellwig 1758f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp * 1768f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 1778f000cacSChristoph Hellwig { 1788f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 1798f000cacSChristoph Hellwig unsigned long flags; 1808f000cacSChristoph Hellwig 1818f000cacSChristoph Hellwig spin_lock_irqsave(&queue->rsps_lock, flags); 1828407879cSSagi Grimberg rsp = list_first_entry_or_null(&queue->free_rsps, 1838f000cacSChristoph Hellwig struct nvmet_rdma_rsp, free_list); 1848407879cSSagi Grimberg if (likely(rsp)) 1858f000cacSChristoph Hellwig list_del(&rsp->free_list); 1868f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->rsps_lock, flags); 1878f000cacSChristoph Hellwig 1888407879cSSagi Grimberg if (unlikely(!rsp)) { 189*5cbab630SRaju Rangoju int ret; 190*5cbab630SRaju Rangoju 191*5cbab630SRaju Rangoju rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 1928407879cSSagi Grimberg if (unlikely(!rsp)) 1938407879cSSagi Grimberg return NULL; 194*5cbab630SRaju Rangoju ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); 195*5cbab630SRaju Rangoju if (unlikely(ret)) { 196*5cbab630SRaju Rangoju kfree(rsp); 197*5cbab630SRaju Rangoju return NULL; 198*5cbab630SRaju Rangoju } 199*5cbab630SRaju Rangoju 2008407879cSSagi Grimberg rsp->allocated = true; 2018407879cSSagi Grimberg } 2028407879cSSagi Grimberg 2038f000cacSChristoph Hellwig return rsp; 2048f000cacSChristoph Hellwig } 2058f000cacSChristoph Hellwig 2068f000cacSChristoph Hellwig static inline void 2078f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 2088f000cacSChristoph Hellwig { 2098f000cacSChristoph Hellwig unsigned long flags; 2108f000cacSChristoph Hellwig 211ad1f8249SIsrael Rukshin if (unlikely(rsp->allocated)) { 212*5cbab630SRaju Rangoju nvmet_rdma_free_rsp(rsp->queue->dev, rsp); 2138407879cSSagi Grimberg kfree(rsp); 2148407879cSSagi Grimberg return; 2158407879cSSagi Grimberg } 2168407879cSSagi Grimberg 2178f000cacSChristoph Hellwig spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 2188f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 2198f000cacSChristoph Hellwig spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 2208f000cacSChristoph Hellwig } 2218f000cacSChristoph Hellwig 2220d5ee2b2SSteve Wise static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, 2230d5ee2b2SSteve Wise struct nvmet_rdma_cmd *c) 2240d5ee2b2SSteve Wise { 2250d5ee2b2SSteve Wise struct scatterlist *sg; 2260d5ee2b2SSteve Wise struct ib_sge *sge; 2270d5ee2b2SSteve Wise int i; 2280d5ee2b2SSteve Wise 2290d5ee2b2SSteve Wise if (!ndev->inline_data_size) 2300d5ee2b2SSteve Wise return; 2310d5ee2b2SSteve Wise 2320d5ee2b2SSteve Wise sg = c->inline_sg; 2330d5ee2b2SSteve Wise sge = &c->sge[1]; 2340d5ee2b2SSteve Wise 2350d5ee2b2SSteve Wise for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 2360d5ee2b2SSteve Wise if (sge->length) 2370d5ee2b2SSteve Wise ib_dma_unmap_page(ndev->device, sge->addr, 2380d5ee2b2SSteve Wise sge->length, DMA_FROM_DEVICE); 2390d5ee2b2SSteve Wise if (sg_page(sg)) 2400d5ee2b2SSteve Wise __free_page(sg_page(sg)); 2410d5ee2b2SSteve Wise } 2420d5ee2b2SSteve Wise } 2430d5ee2b2SSteve Wise 2440d5ee2b2SSteve Wise static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, 2450d5ee2b2SSteve Wise struct nvmet_rdma_cmd *c) 2460d5ee2b2SSteve Wise { 2470d5ee2b2SSteve Wise struct scatterlist *sg; 2480d5ee2b2SSteve Wise struct ib_sge *sge; 2490d5ee2b2SSteve Wise struct page *pg; 2500d5ee2b2SSteve Wise int len; 2510d5ee2b2SSteve Wise int i; 2520d5ee2b2SSteve Wise 2530d5ee2b2SSteve Wise if (!ndev->inline_data_size) 2540d5ee2b2SSteve Wise return 0; 2550d5ee2b2SSteve Wise 2560d5ee2b2SSteve Wise sg = c->inline_sg; 2570d5ee2b2SSteve Wise sg_init_table(sg, ndev->inline_page_count); 2580d5ee2b2SSteve Wise sge = &c->sge[1]; 2590d5ee2b2SSteve Wise len = ndev->inline_data_size; 2600d5ee2b2SSteve Wise 2610d5ee2b2SSteve Wise for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 2620d5ee2b2SSteve Wise pg = alloc_page(GFP_KERNEL); 2630d5ee2b2SSteve Wise if (!pg) 2640d5ee2b2SSteve Wise goto out_err; 2650d5ee2b2SSteve Wise sg_assign_page(sg, pg); 2660d5ee2b2SSteve Wise sge->addr = ib_dma_map_page(ndev->device, 2670d5ee2b2SSteve Wise pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); 2680d5ee2b2SSteve Wise if (ib_dma_mapping_error(ndev->device, sge->addr)) 2690d5ee2b2SSteve Wise goto out_err; 2700d5ee2b2SSteve Wise sge->length = min_t(int, len, PAGE_SIZE); 2710d5ee2b2SSteve Wise sge->lkey = ndev->pd->local_dma_lkey; 2720d5ee2b2SSteve Wise len -= sge->length; 2730d5ee2b2SSteve Wise } 2740d5ee2b2SSteve Wise 2750d5ee2b2SSteve Wise return 0; 2760d5ee2b2SSteve Wise out_err: 2770d5ee2b2SSteve Wise for (; i >= 0; i--, sg--, sge--) { 2780d5ee2b2SSteve Wise if (sge->length) 2790d5ee2b2SSteve Wise ib_dma_unmap_page(ndev->device, sge->addr, 2800d5ee2b2SSteve Wise sge->length, DMA_FROM_DEVICE); 2810d5ee2b2SSteve Wise if (sg_page(sg)) 2820d5ee2b2SSteve Wise __free_page(sg_page(sg)); 2830d5ee2b2SSteve Wise } 2840d5ee2b2SSteve Wise return -ENOMEM; 2850d5ee2b2SSteve Wise } 2860d5ee2b2SSteve Wise 2878f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 2888f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 2898f000cacSChristoph Hellwig { 2908f000cacSChristoph Hellwig /* NVMe command / RDMA RECV */ 2918f000cacSChristoph Hellwig c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); 2928f000cacSChristoph Hellwig if (!c->nvme_cmd) 2938f000cacSChristoph Hellwig goto out; 2948f000cacSChristoph Hellwig 2958f000cacSChristoph Hellwig c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 2968f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2978f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 2988f000cacSChristoph Hellwig goto out_free_cmd; 2998f000cacSChristoph Hellwig 3008f000cacSChristoph Hellwig c->sge[0].length = sizeof(*c->nvme_cmd); 3018f000cacSChristoph Hellwig c->sge[0].lkey = ndev->pd->local_dma_lkey; 3028f000cacSChristoph Hellwig 3030d5ee2b2SSteve Wise if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) 3048f000cacSChristoph Hellwig goto out_unmap_cmd; 3058f000cacSChristoph Hellwig 3068f000cacSChristoph Hellwig c->cqe.done = nvmet_rdma_recv_done; 3078f000cacSChristoph Hellwig 3088f000cacSChristoph Hellwig c->wr.wr_cqe = &c->cqe; 3098f000cacSChristoph Hellwig c->wr.sg_list = c->sge; 3100d5ee2b2SSteve Wise c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; 3118f000cacSChristoph Hellwig 3128f000cacSChristoph Hellwig return 0; 3138f000cacSChristoph Hellwig 3148f000cacSChristoph Hellwig out_unmap_cmd: 3158f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3168f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3178f000cacSChristoph Hellwig out_free_cmd: 3188f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3198f000cacSChristoph Hellwig 3208f000cacSChristoph Hellwig out: 3218f000cacSChristoph Hellwig return -ENOMEM; 3228f000cacSChristoph Hellwig } 3238f000cacSChristoph Hellwig 3248f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 3258f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 3268f000cacSChristoph Hellwig { 3270d5ee2b2SSteve Wise if (!admin) 3280d5ee2b2SSteve Wise nvmet_rdma_free_inline_pages(ndev, c); 3298f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3308f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3318f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3328f000cacSChristoph Hellwig } 3338f000cacSChristoph Hellwig 3348f000cacSChristoph Hellwig static struct nvmet_rdma_cmd * 3358f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 3368f000cacSChristoph Hellwig int nr_cmds, bool admin) 3378f000cacSChristoph Hellwig { 3388f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 3398f000cacSChristoph Hellwig int ret = -EINVAL, i; 3408f000cacSChristoph Hellwig 3418f000cacSChristoph Hellwig cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 3428f000cacSChristoph Hellwig if (!cmds) 3438f000cacSChristoph Hellwig goto out; 3448f000cacSChristoph Hellwig 3458f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) { 3468f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 3478f000cacSChristoph Hellwig if (ret) 3488f000cacSChristoph Hellwig goto out_free; 3498f000cacSChristoph Hellwig } 3508f000cacSChristoph Hellwig 3518f000cacSChristoph Hellwig return cmds; 3528f000cacSChristoph Hellwig 3538f000cacSChristoph Hellwig out_free: 3548f000cacSChristoph Hellwig while (--i >= 0) 3558f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3568f000cacSChristoph Hellwig kfree(cmds); 3578f000cacSChristoph Hellwig out: 3588f000cacSChristoph Hellwig return ERR_PTR(ret); 3598f000cacSChristoph Hellwig } 3608f000cacSChristoph Hellwig 3618f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 3628f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 3638f000cacSChristoph Hellwig { 3648f000cacSChristoph Hellwig int i; 3658f000cacSChristoph Hellwig 3668f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) 3678f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3688f000cacSChristoph Hellwig kfree(cmds); 3698f000cacSChristoph Hellwig } 3708f000cacSChristoph Hellwig 3718f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 3728f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3738f000cacSChristoph Hellwig { 3748f000cacSChristoph Hellwig /* NVMe CQE / RDMA SEND */ 3758f000cacSChristoph Hellwig r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); 3768f000cacSChristoph Hellwig if (!r->req.rsp) 3778f000cacSChristoph Hellwig goto out; 3788f000cacSChristoph Hellwig 3798f000cacSChristoph Hellwig r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, 3808f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 3818f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 3828f000cacSChristoph Hellwig goto out_free_rsp; 3838f000cacSChristoph Hellwig 3848f000cacSChristoph Hellwig r->send_sge.length = sizeof(*r->req.rsp); 3858f000cacSChristoph Hellwig r->send_sge.lkey = ndev->pd->local_dma_lkey; 3868f000cacSChristoph Hellwig 3878f000cacSChristoph Hellwig r->send_cqe.done = nvmet_rdma_send_done; 3888f000cacSChristoph Hellwig 3898f000cacSChristoph Hellwig r->send_wr.wr_cqe = &r->send_cqe; 3908f000cacSChristoph Hellwig r->send_wr.sg_list = &r->send_sge; 3918f000cacSChristoph Hellwig r->send_wr.num_sge = 1; 3928f000cacSChristoph Hellwig r->send_wr.send_flags = IB_SEND_SIGNALED; 3938f000cacSChristoph Hellwig 3948f000cacSChristoph Hellwig /* Data In / RDMA READ */ 3958f000cacSChristoph Hellwig r->read_cqe.done = nvmet_rdma_read_data_done; 3968f000cacSChristoph Hellwig return 0; 3978f000cacSChristoph Hellwig 3988f000cacSChristoph Hellwig out_free_rsp: 3998f000cacSChristoph Hellwig kfree(r->req.rsp); 4008f000cacSChristoph Hellwig out: 4018f000cacSChristoph Hellwig return -ENOMEM; 4028f000cacSChristoph Hellwig } 4038f000cacSChristoph Hellwig 4048f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 4058f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 4068f000cacSChristoph Hellwig { 4078f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, r->send_sge.addr, 4088f000cacSChristoph Hellwig sizeof(*r->req.rsp), DMA_TO_DEVICE); 4098f000cacSChristoph Hellwig kfree(r->req.rsp); 4108f000cacSChristoph Hellwig } 4118f000cacSChristoph Hellwig 4128f000cacSChristoph Hellwig static int 4138f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 4148f000cacSChristoph Hellwig { 4158f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4168f000cacSChristoph Hellwig int nr_rsps = queue->recv_queue_size * 2; 4178f000cacSChristoph Hellwig int ret = -EINVAL, i; 4188f000cacSChristoph Hellwig 4198f000cacSChristoph Hellwig queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 4208f000cacSChristoph Hellwig GFP_KERNEL); 4218f000cacSChristoph Hellwig if (!queue->rsps) 4228f000cacSChristoph Hellwig goto out; 4238f000cacSChristoph Hellwig 4248f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4258f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4268f000cacSChristoph Hellwig 4278f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsp(ndev, rsp); 4288f000cacSChristoph Hellwig if (ret) 4298f000cacSChristoph Hellwig goto out_free; 4308f000cacSChristoph Hellwig 4318f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &queue->free_rsps); 4328f000cacSChristoph Hellwig } 4338f000cacSChristoph Hellwig 4348f000cacSChristoph Hellwig return 0; 4358f000cacSChristoph Hellwig 4368f000cacSChristoph Hellwig out_free: 4378f000cacSChristoph Hellwig while (--i >= 0) { 4388f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4398f000cacSChristoph Hellwig 4408f000cacSChristoph Hellwig list_del(&rsp->free_list); 4418f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4428f000cacSChristoph Hellwig } 4438f000cacSChristoph Hellwig kfree(queue->rsps); 4448f000cacSChristoph Hellwig out: 4458f000cacSChristoph Hellwig return ret; 4468f000cacSChristoph Hellwig } 4478f000cacSChristoph Hellwig 4488f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 4498f000cacSChristoph Hellwig { 4508f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4518f000cacSChristoph Hellwig int i, nr_rsps = queue->recv_queue_size * 2; 4528f000cacSChristoph Hellwig 4538f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4548f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4558f000cacSChristoph Hellwig 4568f000cacSChristoph Hellwig list_del(&rsp->free_list); 4578f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4588f000cacSChristoph Hellwig } 4598f000cacSChristoph Hellwig kfree(queue->rsps); 4608f000cacSChristoph Hellwig } 4618f000cacSChristoph Hellwig 4628f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 4638f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd) 4648f000cacSChristoph Hellwig { 46520209384SMax Gurtovoy int ret; 4668f000cacSChristoph Hellwig 467748ff840SParav Pandit ib_dma_sync_single_for_device(ndev->device, 468748ff840SParav Pandit cmd->sge[0].addr, cmd->sge[0].length, 469748ff840SParav Pandit DMA_FROM_DEVICE); 470748ff840SParav Pandit 4718f000cacSChristoph Hellwig if (ndev->srq) 4720a3173a5SJason Gunthorpe ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); 47320209384SMax Gurtovoy else 4740a3173a5SJason Gunthorpe ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); 47520209384SMax Gurtovoy 47620209384SMax Gurtovoy if (unlikely(ret)) 47720209384SMax Gurtovoy pr_err("post_recv cmd failed\n"); 47820209384SMax Gurtovoy 47920209384SMax Gurtovoy return ret; 4808f000cacSChristoph Hellwig } 4818f000cacSChristoph Hellwig 4828f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 4838f000cacSChristoph Hellwig { 4848f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4858f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wr_wait_list)) { 4868f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 4878f000cacSChristoph Hellwig bool ret; 4888f000cacSChristoph Hellwig 4898f000cacSChristoph Hellwig rsp = list_entry(queue->rsp_wr_wait_list.next, 4908f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 4918f000cacSChristoph Hellwig list_del(&rsp->wait_list); 4928f000cacSChristoph Hellwig 4938f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4948f000cacSChristoph Hellwig ret = nvmet_rdma_execute_command(rsp); 4958f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4968f000cacSChristoph Hellwig 4978f000cacSChristoph Hellwig if (!ret) { 4988f000cacSChristoph Hellwig list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 4998f000cacSChristoph Hellwig break; 5008f000cacSChristoph Hellwig } 5018f000cacSChristoph Hellwig } 5028f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 5038f000cacSChristoph Hellwig } 5048f000cacSChristoph Hellwig 5058f000cacSChristoph Hellwig 5068f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 5078f000cacSChristoph Hellwig { 5088f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 5098f000cacSChristoph Hellwig 5108f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 5118f000cacSChristoph Hellwig 5128f000cacSChristoph Hellwig if (rsp->n_rdma) { 5138f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5148f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5158f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 5168f000cacSChristoph Hellwig } 5178f000cacSChristoph Hellwig 5180d5ee2b2SSteve Wise if (rsp->req.sg != rsp->cmd->inline_sg) 5195b2322e4SLogan Gunthorpe nvmet_req_free_sgl(&rsp->req); 5208f000cacSChristoph Hellwig 5218f000cacSChristoph Hellwig if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 5228f000cacSChristoph Hellwig nvmet_rdma_process_wr_wait_list(queue); 5238f000cacSChristoph Hellwig 5248f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 5258f000cacSChristoph Hellwig } 5268f000cacSChristoph Hellwig 5278f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 5288f000cacSChristoph Hellwig { 5298f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl) { 5308f000cacSChristoph Hellwig nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 5318f000cacSChristoph Hellwig } else { 5328f000cacSChristoph Hellwig /* 5338f000cacSChristoph Hellwig * we didn't setup the controller yet in case 5348f000cacSChristoph Hellwig * of admin connect error, just disconnect and 5358f000cacSChristoph Hellwig * cleanup the queue 5368f000cacSChristoph Hellwig */ 5378f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 5388f000cacSChristoph Hellwig } 5398f000cacSChristoph Hellwig } 5408f000cacSChristoph Hellwig 5418f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 5428f000cacSChristoph Hellwig { 5438f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5448f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 545d7dcdf9dSIsrael Rukshin struct nvmet_rdma_queue *queue = cq->cq_context; 5468f000cacSChristoph Hellwig 5478f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5488f000cacSChristoph Hellwig 5498f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS && 5508f000cacSChristoph Hellwig wc->status != IB_WC_WR_FLUSH_ERR)) { 5518f000cacSChristoph Hellwig pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 5528f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 553d7dcdf9dSIsrael Rukshin nvmet_rdma_error_comp(queue); 5548f000cacSChristoph Hellwig } 5558f000cacSChristoph Hellwig } 5568f000cacSChristoph Hellwig 5578f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req) 5588f000cacSChristoph Hellwig { 5598f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5608f000cacSChristoph Hellwig container_of(req, struct nvmet_rdma_rsp, req); 5618f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 56223f96d1fSBart Van Assche struct ib_send_wr *first_wr; 5638f000cacSChristoph Hellwig 5648f000cacSChristoph Hellwig if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { 5658f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 5668f000cacSChristoph Hellwig rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 5678f000cacSChristoph Hellwig } else { 5688f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND; 5698f000cacSChristoph Hellwig } 5708f000cacSChristoph Hellwig 5718f000cacSChristoph Hellwig if (nvmet_rdma_need_data_out(rsp)) 5728f000cacSChristoph Hellwig first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 5738f000cacSChristoph Hellwig cm_id->port_num, NULL, &rsp->send_wr); 5748f000cacSChristoph Hellwig else 5758f000cacSChristoph Hellwig first_wr = &rsp->send_wr; 5768f000cacSChristoph Hellwig 5778f000cacSChristoph Hellwig nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 578748ff840SParav Pandit 579748ff840SParav Pandit ib_dma_sync_single_for_device(rsp->queue->dev->device, 580748ff840SParav Pandit rsp->send_sge.addr, rsp->send_sge.length, 581748ff840SParav Pandit DMA_TO_DEVICE); 582748ff840SParav Pandit 5830a3173a5SJason Gunthorpe if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { 5848f000cacSChristoph Hellwig pr_err("sending cmd response failed\n"); 5858f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5868f000cacSChristoph Hellwig } 5878f000cacSChristoph Hellwig } 5888f000cacSChristoph Hellwig 5898f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 5908f000cacSChristoph Hellwig { 5918f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5928f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 5938f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 5948f000cacSChristoph Hellwig 5958f000cacSChristoph Hellwig WARN_ON(rsp->n_rdma <= 0); 5968f000cacSChristoph Hellwig atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 5978f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5988f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5998f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 6008f000cacSChristoph Hellwig rsp->n_rdma = 0; 6018f000cacSChristoph Hellwig 6028f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 603549f01aeSVijay Immanuel nvmet_req_uninit(&rsp->req); 6048f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 6058f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 6068f000cacSChristoph Hellwig pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 6078f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 6088f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 6098f000cacSChristoph Hellwig } 6108f000cacSChristoph Hellwig return; 6118f000cacSChristoph Hellwig } 6128f000cacSChristoph Hellwig 6135e62d5c9SChristoph Hellwig nvmet_req_execute(&rsp->req); 6148f000cacSChristoph Hellwig } 6158f000cacSChristoph Hellwig 6168f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 6178f000cacSChristoph Hellwig u64 off) 6188f000cacSChristoph Hellwig { 6190d5ee2b2SSteve Wise int sg_count = num_pages(len); 6200d5ee2b2SSteve Wise struct scatterlist *sg; 6210d5ee2b2SSteve Wise int i; 6220d5ee2b2SSteve Wise 6230d5ee2b2SSteve Wise sg = rsp->cmd->inline_sg; 6240d5ee2b2SSteve Wise for (i = 0; i < sg_count; i++, sg++) { 6250d5ee2b2SSteve Wise if (i < sg_count - 1) 6260d5ee2b2SSteve Wise sg_unmark_end(sg); 6270d5ee2b2SSteve Wise else 6280d5ee2b2SSteve Wise sg_mark_end(sg); 6290d5ee2b2SSteve Wise sg->offset = off; 6300d5ee2b2SSteve Wise sg->length = min_t(int, len, PAGE_SIZE - off); 6310d5ee2b2SSteve Wise len -= sg->length; 6320d5ee2b2SSteve Wise if (!i) 6330d5ee2b2SSteve Wise off = 0; 6340d5ee2b2SSteve Wise } 6350d5ee2b2SSteve Wise 6360d5ee2b2SSteve Wise rsp->req.sg = rsp->cmd->inline_sg; 6370d5ee2b2SSteve Wise rsp->req.sg_cnt = sg_count; 6388f000cacSChristoph Hellwig } 6398f000cacSChristoph Hellwig 6408f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 6418f000cacSChristoph Hellwig { 6428f000cacSChristoph Hellwig struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 6438f000cacSChristoph Hellwig u64 off = le64_to_cpu(sgl->addr); 6448f000cacSChristoph Hellwig u32 len = le32_to_cpu(sgl->length); 6458f000cacSChristoph Hellwig 646762a11dfSChaitanya Kulkarni if (!nvme_is_write(rsp->req.cmd)) { 647762a11dfSChaitanya Kulkarni rsp->req.error_loc = 648762a11dfSChaitanya Kulkarni offsetof(struct nvme_common_command, opcode); 6498f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 650762a11dfSChaitanya Kulkarni } 6518f000cacSChristoph Hellwig 6520d5ee2b2SSteve Wise if (off + len > rsp->queue->dev->inline_data_size) { 6538f000cacSChristoph Hellwig pr_err("invalid inline data offset!\n"); 6548f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 6558f000cacSChristoph Hellwig } 6568f000cacSChristoph Hellwig 6578f000cacSChristoph Hellwig /* no data command? */ 6588f000cacSChristoph Hellwig if (!len) 6598f000cacSChristoph Hellwig return 0; 6608f000cacSChristoph Hellwig 6618f000cacSChristoph Hellwig nvmet_rdma_use_inline_sg(rsp, len, off); 6628f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 6635e62d5c9SChristoph Hellwig rsp->req.transfer_len += len; 6648f000cacSChristoph Hellwig return 0; 6658f000cacSChristoph Hellwig } 6668f000cacSChristoph Hellwig 6678f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 6688f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl, bool invalidate) 6698f000cacSChristoph Hellwig { 6708f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 6718f000cacSChristoph Hellwig u64 addr = le64_to_cpu(sgl->addr); 6728f000cacSChristoph Hellwig u32 key = get_unaligned_le32(sgl->key); 6738f000cacSChristoph Hellwig int ret; 6748f000cacSChristoph Hellwig 6755b2322e4SLogan Gunthorpe rsp->req.transfer_len = get_unaligned_le24(sgl->length); 6765b2322e4SLogan Gunthorpe 6778f000cacSChristoph Hellwig /* no data command? */ 6785b2322e4SLogan Gunthorpe if (!rsp->req.transfer_len) 6798f000cacSChristoph Hellwig return 0; 6808f000cacSChristoph Hellwig 6815b2322e4SLogan Gunthorpe ret = nvmet_req_alloc_sgl(&rsp->req); 6825b2322e4SLogan Gunthorpe if (ret < 0) 6835b2322e4SLogan Gunthorpe goto error_out; 6848f000cacSChristoph Hellwig 6858f000cacSChristoph Hellwig ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 6868f000cacSChristoph Hellwig rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 6878f000cacSChristoph Hellwig nvmet_data_dir(&rsp->req)); 6888f000cacSChristoph Hellwig if (ret < 0) 6895b2322e4SLogan Gunthorpe goto error_out; 6908f000cacSChristoph Hellwig rsp->n_rdma += ret; 6918f000cacSChristoph Hellwig 6928f000cacSChristoph Hellwig if (invalidate) { 6938f000cacSChristoph Hellwig rsp->invalidate_rkey = key; 6948f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; 6958f000cacSChristoph Hellwig } 6968f000cacSChristoph Hellwig 6978f000cacSChristoph Hellwig return 0; 6985b2322e4SLogan Gunthorpe 6995b2322e4SLogan Gunthorpe error_out: 7005b2322e4SLogan Gunthorpe rsp->req.transfer_len = 0; 7015b2322e4SLogan Gunthorpe return NVME_SC_INTERNAL; 7028f000cacSChristoph Hellwig } 7038f000cacSChristoph Hellwig 7048f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 7058f000cacSChristoph Hellwig { 7068f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 7078f000cacSChristoph Hellwig 7088f000cacSChristoph Hellwig switch (sgl->type >> 4) { 7098f000cacSChristoph Hellwig case NVME_SGL_FMT_DATA_DESC: 7108f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 7118f000cacSChristoph Hellwig case NVME_SGL_FMT_OFFSET: 7128f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_inline(rsp); 7138f000cacSChristoph Hellwig default: 7148f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 715762a11dfSChaitanya Kulkarni rsp->req.error_loc = 716762a11dfSChaitanya Kulkarni offsetof(struct nvme_common_command, dptr); 7178f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 7188f000cacSChristoph Hellwig } 7198f000cacSChristoph Hellwig case NVME_KEY_SGL_FMT_DATA_DESC: 7208f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 7218f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 7228f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 7238f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS: 7248f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 7258f000cacSChristoph Hellwig default: 7268f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 727762a11dfSChaitanya Kulkarni rsp->req.error_loc = 728762a11dfSChaitanya Kulkarni offsetof(struct nvme_common_command, dptr); 7298f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 7308f000cacSChristoph Hellwig } 7318f000cacSChristoph Hellwig default: 7328f000cacSChristoph Hellwig pr_err("invalid SGL type: %#x\n", sgl->type); 733762a11dfSChaitanya Kulkarni rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); 7348f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; 7358f000cacSChristoph Hellwig } 7368f000cacSChristoph Hellwig } 7378f000cacSChristoph Hellwig 7388f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 7398f000cacSChristoph Hellwig { 7408f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 7418f000cacSChristoph Hellwig 7428f000cacSChristoph Hellwig if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 7438f000cacSChristoph Hellwig &queue->sq_wr_avail) < 0)) { 7448f000cacSChristoph Hellwig pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 7458f000cacSChristoph Hellwig 1 + rsp->n_rdma, queue->idx, 7468f000cacSChristoph Hellwig queue->nvme_sq.ctrl->cntlid); 7478f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 7488f000cacSChristoph Hellwig return false; 7498f000cacSChristoph Hellwig } 7508f000cacSChristoph Hellwig 7518f000cacSChristoph Hellwig if (nvmet_rdma_need_data_in(rsp)) { 7528f000cacSChristoph Hellwig if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, 7538f000cacSChristoph Hellwig queue->cm_id->port_num, &rsp->read_cqe, NULL)) 7548f000cacSChristoph Hellwig nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 7558f000cacSChristoph Hellwig } else { 7565e62d5c9SChristoph Hellwig nvmet_req_execute(&rsp->req); 7578f000cacSChristoph Hellwig } 7588f000cacSChristoph Hellwig 7598f000cacSChristoph Hellwig return true; 7608f000cacSChristoph Hellwig } 7618f000cacSChristoph Hellwig 7628f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 7638f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd) 7648f000cacSChristoph Hellwig { 7658f000cacSChristoph Hellwig u16 status; 7668f000cacSChristoph Hellwig 767748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 768748ff840SParav Pandit cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 769748ff840SParav Pandit DMA_FROM_DEVICE); 770748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 771748ff840SParav Pandit cmd->send_sge.addr, cmd->send_sge.length, 772748ff840SParav Pandit DMA_TO_DEVICE); 773748ff840SParav Pandit 774c6925093SLogan Gunthorpe cmd->req.p2p_client = &queue->dev->device->dev; 775c6925093SLogan Gunthorpe 7768f000cacSChristoph Hellwig if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 7778f000cacSChristoph Hellwig &queue->nvme_sq, &nvmet_rdma_ops)) 7788f000cacSChristoph Hellwig return; 7798f000cacSChristoph Hellwig 7808f000cacSChristoph Hellwig status = nvmet_rdma_map_sgl(cmd); 7818f000cacSChristoph Hellwig if (status) 7828f000cacSChristoph Hellwig goto out_err; 7838f000cacSChristoph Hellwig 7848f000cacSChristoph Hellwig if (unlikely(!nvmet_rdma_execute_command(cmd))) { 7858f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 7868f000cacSChristoph Hellwig list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 7878f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 7888f000cacSChristoph Hellwig } 7898f000cacSChristoph Hellwig 7908f000cacSChristoph Hellwig return; 7918f000cacSChristoph Hellwig 7928f000cacSChristoph Hellwig out_err: 7938f000cacSChristoph Hellwig nvmet_req_complete(&cmd->req, status); 7948f000cacSChristoph Hellwig } 7958f000cacSChristoph Hellwig 7968f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 7978f000cacSChristoph Hellwig { 7988f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd = 7998f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 8008f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 8018f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 8028f000cacSChristoph Hellwig 8038f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 8048f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 8058f000cacSChristoph Hellwig pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 8068f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), 8078f000cacSChristoph Hellwig wc->status); 8088f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 8098f000cacSChristoph Hellwig } 8108f000cacSChristoph Hellwig return; 8118f000cacSChristoph Hellwig } 8128f000cacSChristoph Hellwig 8138f000cacSChristoph Hellwig if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 8148f000cacSChristoph Hellwig pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 8158f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 8168f000cacSChristoph Hellwig return; 8178f000cacSChristoph Hellwig } 8188f000cacSChristoph Hellwig 8198f000cacSChristoph Hellwig cmd->queue = queue; 8208f000cacSChristoph Hellwig rsp = nvmet_rdma_get_rsp(queue); 8218407879cSSagi Grimberg if (unlikely(!rsp)) { 8228407879cSSagi Grimberg /* 8238407879cSSagi Grimberg * we get here only under memory pressure, 8248407879cSSagi Grimberg * silently drop and have the host retry 8258407879cSSagi Grimberg * as we can't even fail it. 8268407879cSSagi Grimberg */ 8278407879cSSagi Grimberg nvmet_rdma_post_recv(queue->dev, cmd); 8288407879cSSagi Grimberg return; 8298407879cSSagi Grimberg } 8308d61413dSSagi Grimberg rsp->queue = queue; 8318f000cacSChristoph Hellwig rsp->cmd = cmd; 8328f000cacSChristoph Hellwig rsp->flags = 0; 8338f000cacSChristoph Hellwig rsp->req.cmd = cmd->nvme_cmd; 8348d61413dSSagi Grimberg rsp->req.port = queue->port; 8358d61413dSSagi Grimberg rsp->n_rdma = 0; 8368f000cacSChristoph Hellwig 8378f000cacSChristoph Hellwig if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 8388f000cacSChristoph Hellwig unsigned long flags; 8398f000cacSChristoph Hellwig 8408f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 8418f000cacSChristoph Hellwig if (queue->state == NVMET_RDMA_Q_CONNECTING) 8428f000cacSChristoph Hellwig list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 8438f000cacSChristoph Hellwig else 8448f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 8458f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 8468f000cacSChristoph Hellwig return; 8478f000cacSChristoph Hellwig } 8488f000cacSChristoph Hellwig 8498f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, rsp); 8508f000cacSChristoph Hellwig } 8518f000cacSChristoph Hellwig 8528f000cacSChristoph Hellwig static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) 8538f000cacSChristoph Hellwig { 8548f000cacSChristoph Hellwig if (!ndev->srq) 8558f000cacSChristoph Hellwig return; 8568f000cacSChristoph Hellwig 8578f000cacSChristoph Hellwig nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 8588f000cacSChristoph Hellwig ib_destroy_srq(ndev->srq); 8598f000cacSChristoph Hellwig } 8608f000cacSChristoph Hellwig 8618f000cacSChristoph Hellwig static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 8628f000cacSChristoph Hellwig { 8638f000cacSChristoph Hellwig struct ib_srq_init_attr srq_attr = { NULL, }; 8648f000cacSChristoph Hellwig struct ib_srq *srq; 8658f000cacSChristoph Hellwig size_t srq_size; 8668f000cacSChristoph Hellwig int ret, i; 8678f000cacSChristoph Hellwig 8688f000cacSChristoph Hellwig srq_size = 4095; /* XXX: tune */ 8698f000cacSChristoph Hellwig 8708f000cacSChristoph Hellwig srq_attr.attr.max_wr = srq_size; 8710d5ee2b2SSteve Wise srq_attr.attr.max_sge = 1 + ndev->inline_page_count; 8728f000cacSChristoph Hellwig srq_attr.attr.srq_limit = 0; 8738f000cacSChristoph Hellwig srq_attr.srq_type = IB_SRQT_BASIC; 8748f000cacSChristoph Hellwig srq = ib_create_srq(ndev->pd, &srq_attr); 8758f000cacSChristoph Hellwig if (IS_ERR(srq)) { 8768f000cacSChristoph Hellwig /* 8778f000cacSChristoph Hellwig * If SRQs aren't supported we just go ahead and use normal 8788f000cacSChristoph Hellwig * non-shared receive queues. 8798f000cacSChristoph Hellwig */ 8808f000cacSChristoph Hellwig pr_info("SRQ requested but not supported.\n"); 8818f000cacSChristoph Hellwig return 0; 8828f000cacSChristoph Hellwig } 8838f000cacSChristoph Hellwig 8848f000cacSChristoph Hellwig ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 8858f000cacSChristoph Hellwig if (IS_ERR(ndev->srq_cmds)) { 8868f000cacSChristoph Hellwig ret = PTR_ERR(ndev->srq_cmds); 8878f000cacSChristoph Hellwig goto out_destroy_srq; 8888f000cacSChristoph Hellwig } 8898f000cacSChristoph Hellwig 8908f000cacSChristoph Hellwig ndev->srq = srq; 8918f000cacSChristoph Hellwig ndev->srq_size = srq_size; 8928f000cacSChristoph Hellwig 89320209384SMax Gurtovoy for (i = 0; i < srq_size; i++) { 89420209384SMax Gurtovoy ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); 89520209384SMax Gurtovoy if (ret) 89620209384SMax Gurtovoy goto out_free_cmds; 89720209384SMax Gurtovoy } 8988f000cacSChristoph Hellwig 8998f000cacSChristoph Hellwig return 0; 9008f000cacSChristoph Hellwig 90120209384SMax Gurtovoy out_free_cmds: 90220209384SMax Gurtovoy nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 9038f000cacSChristoph Hellwig out_destroy_srq: 9048f000cacSChristoph Hellwig ib_destroy_srq(srq); 9058f000cacSChristoph Hellwig return ret; 9068f000cacSChristoph Hellwig } 9078f000cacSChristoph Hellwig 9088f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref) 9098f000cacSChristoph Hellwig { 9108f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = 9118f000cacSChristoph Hellwig container_of(ref, struct nvmet_rdma_device, ref); 9128f000cacSChristoph Hellwig 9138f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 9148f000cacSChristoph Hellwig list_del(&ndev->entry); 9158f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9168f000cacSChristoph Hellwig 9178f000cacSChristoph Hellwig nvmet_rdma_destroy_srq(ndev); 9188f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 9198f000cacSChristoph Hellwig 9208f000cacSChristoph Hellwig kfree(ndev); 9218f000cacSChristoph Hellwig } 9228f000cacSChristoph Hellwig 9238f000cacSChristoph Hellwig static struct nvmet_rdma_device * 9248f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 9258f000cacSChristoph Hellwig { 9260d5ee2b2SSteve Wise struct nvmet_port *port = cm_id->context; 9278f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 9280d5ee2b2SSteve Wise int inline_page_count; 9290d5ee2b2SSteve Wise int inline_sge_count; 9308f000cacSChristoph Hellwig int ret; 9318f000cacSChristoph Hellwig 9328f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 9338f000cacSChristoph Hellwig list_for_each_entry(ndev, &device_list, entry) { 9348f000cacSChristoph Hellwig if (ndev->device->node_guid == cm_id->device->node_guid && 9358f000cacSChristoph Hellwig kref_get_unless_zero(&ndev->ref)) 9368f000cacSChristoph Hellwig goto out_unlock; 9378f000cacSChristoph Hellwig } 9388f000cacSChristoph Hellwig 9398f000cacSChristoph Hellwig ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 9408f000cacSChristoph Hellwig if (!ndev) 9418f000cacSChristoph Hellwig goto out_err; 9428f000cacSChristoph Hellwig 9430d5ee2b2SSteve Wise inline_page_count = num_pages(port->inline_data_size); 9440d5ee2b2SSteve Wise inline_sge_count = max(cm_id->device->attrs.max_sge_rd, 9450a3173a5SJason Gunthorpe cm_id->device->attrs.max_recv_sge) - 1; 9460d5ee2b2SSteve Wise if (inline_page_count > inline_sge_count) { 9470d5ee2b2SSteve Wise pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", 9480d5ee2b2SSteve Wise port->inline_data_size, cm_id->device->name, 9490d5ee2b2SSteve Wise inline_sge_count * PAGE_SIZE); 9500d5ee2b2SSteve Wise port->inline_data_size = inline_sge_count * PAGE_SIZE; 9510d5ee2b2SSteve Wise inline_page_count = inline_sge_count; 9520d5ee2b2SSteve Wise } 9530d5ee2b2SSteve Wise ndev->inline_data_size = port->inline_data_size; 9540d5ee2b2SSteve Wise ndev->inline_page_count = inline_page_count; 9558f000cacSChristoph Hellwig ndev->device = cm_id->device; 9568f000cacSChristoph Hellwig kref_init(&ndev->ref); 9578f000cacSChristoph Hellwig 958ed082d36SChristoph Hellwig ndev->pd = ib_alloc_pd(ndev->device, 0); 9598f000cacSChristoph Hellwig if (IS_ERR(ndev->pd)) 9608f000cacSChristoph Hellwig goto out_free_dev; 9618f000cacSChristoph Hellwig 9628f000cacSChristoph Hellwig if (nvmet_rdma_use_srq) { 9638f000cacSChristoph Hellwig ret = nvmet_rdma_init_srq(ndev); 9648f000cacSChristoph Hellwig if (ret) 9658f000cacSChristoph Hellwig goto out_free_pd; 9668f000cacSChristoph Hellwig } 9678f000cacSChristoph Hellwig 9688f000cacSChristoph Hellwig list_add(&ndev->entry, &device_list); 9698f000cacSChristoph Hellwig out_unlock: 9708f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9718f000cacSChristoph Hellwig pr_debug("added %s.\n", ndev->device->name); 9728f000cacSChristoph Hellwig return ndev; 9738f000cacSChristoph Hellwig 9748f000cacSChristoph Hellwig out_free_pd: 9758f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 9768f000cacSChristoph Hellwig out_free_dev: 9778f000cacSChristoph Hellwig kfree(ndev); 9788f000cacSChristoph Hellwig out_err: 9798f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9808f000cacSChristoph Hellwig return NULL; 9818f000cacSChristoph Hellwig } 9828f000cacSChristoph Hellwig 9838f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 9848f000cacSChristoph Hellwig { 9858f000cacSChristoph Hellwig struct ib_qp_init_attr qp_attr; 9868f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 9878f000cacSChristoph Hellwig int comp_vector, nr_cqe, ret, i; 9888f000cacSChristoph Hellwig 9898f000cacSChristoph Hellwig /* 9908f000cacSChristoph Hellwig * Spread the io queues across completion vectors, 9918f000cacSChristoph Hellwig * but still keep all admin queues on vector 0. 9928f000cacSChristoph Hellwig */ 9938f000cacSChristoph Hellwig comp_vector = !queue->host_qid ? 0 : 9948f000cacSChristoph Hellwig queue->idx % ndev->device->num_comp_vectors; 9958f000cacSChristoph Hellwig 9968f000cacSChristoph Hellwig /* 9978f000cacSChristoph Hellwig * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 9988f000cacSChristoph Hellwig */ 9998f000cacSChristoph Hellwig nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 10008f000cacSChristoph Hellwig 10018f000cacSChristoph Hellwig queue->cq = ib_alloc_cq(ndev->device, queue, 10028f000cacSChristoph Hellwig nr_cqe + 1, comp_vector, 10038f000cacSChristoph Hellwig IB_POLL_WORKQUEUE); 10048f000cacSChristoph Hellwig if (IS_ERR(queue->cq)) { 10058f000cacSChristoph Hellwig ret = PTR_ERR(queue->cq); 10068f000cacSChristoph Hellwig pr_err("failed to create CQ cqe= %d ret= %d\n", 10078f000cacSChristoph Hellwig nr_cqe + 1, ret); 10088f000cacSChristoph Hellwig goto out; 10098f000cacSChristoph Hellwig } 10108f000cacSChristoph Hellwig 10118f000cacSChristoph Hellwig memset(&qp_attr, 0, sizeof(qp_attr)); 10128f000cacSChristoph Hellwig qp_attr.qp_context = queue; 10138f000cacSChristoph Hellwig qp_attr.event_handler = nvmet_rdma_qp_event; 10148f000cacSChristoph Hellwig qp_attr.send_cq = queue->cq; 10158f000cacSChristoph Hellwig qp_attr.recv_cq = queue->cq; 10168f000cacSChristoph Hellwig qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 10178f000cacSChristoph Hellwig qp_attr.qp_type = IB_QPT_RC; 10188f000cacSChristoph Hellwig /* +1 for drain */ 10198f000cacSChristoph Hellwig qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 10208f000cacSChristoph Hellwig qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; 10218f000cacSChristoph Hellwig qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 102233023fb8SSteve Wise ndev->device->attrs.max_send_sge); 10238f000cacSChristoph Hellwig 10248f000cacSChristoph Hellwig if (ndev->srq) { 10258f000cacSChristoph Hellwig qp_attr.srq = ndev->srq; 10268f000cacSChristoph Hellwig } else { 10278f000cacSChristoph Hellwig /* +1 for drain */ 10288f000cacSChristoph Hellwig qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 10290d5ee2b2SSteve Wise qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; 10308f000cacSChristoph Hellwig } 10318f000cacSChristoph Hellwig 10328f000cacSChristoph Hellwig ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 10338f000cacSChristoph Hellwig if (ret) { 10348f000cacSChristoph Hellwig pr_err("failed to create_qp ret= %d\n", ret); 10358f000cacSChristoph Hellwig goto err_destroy_cq; 10368f000cacSChristoph Hellwig } 10378f000cacSChristoph Hellwig 10388f000cacSChristoph Hellwig atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 10398f000cacSChristoph Hellwig 10408f000cacSChristoph Hellwig pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 10418f000cacSChristoph Hellwig __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 10428f000cacSChristoph Hellwig qp_attr.cap.max_send_wr, queue->cm_id); 10438f000cacSChristoph Hellwig 10448f000cacSChristoph Hellwig if (!ndev->srq) { 10458f000cacSChristoph Hellwig for (i = 0; i < queue->recv_queue_size; i++) { 10468f000cacSChristoph Hellwig queue->cmds[i].queue = queue; 104720209384SMax Gurtovoy ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 104820209384SMax Gurtovoy if (ret) 104920209384SMax Gurtovoy goto err_destroy_qp; 10508f000cacSChristoph Hellwig } 10518f000cacSChristoph Hellwig } 10528f000cacSChristoph Hellwig 10538f000cacSChristoph Hellwig out: 10548f000cacSChristoph Hellwig return ret; 10558f000cacSChristoph Hellwig 105620209384SMax Gurtovoy err_destroy_qp: 105720209384SMax Gurtovoy rdma_destroy_qp(queue->cm_id); 10588f000cacSChristoph Hellwig err_destroy_cq: 10598f000cacSChristoph Hellwig ib_free_cq(queue->cq); 10608f000cacSChristoph Hellwig goto out; 10618f000cacSChristoph Hellwig } 10628f000cacSChristoph Hellwig 10638f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 10648f000cacSChristoph Hellwig { 1065e1a2ee24SIsrael Rukshin struct ib_qp *qp = queue->cm_id->qp; 1066e1a2ee24SIsrael Rukshin 1067e1a2ee24SIsrael Rukshin ib_drain_qp(qp); 1068e1a2ee24SIsrael Rukshin rdma_destroy_id(queue->cm_id); 1069e1a2ee24SIsrael Rukshin ib_destroy_qp(qp); 10708f000cacSChristoph Hellwig ib_free_cq(queue->cq); 10718f000cacSChristoph Hellwig } 10728f000cacSChristoph Hellwig 10738f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 10748f000cacSChristoph Hellwig { 1075424125a0SSagi Grimberg pr_debug("freeing queue %d\n", queue->idx); 10768f000cacSChristoph Hellwig 10778f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 10788f000cacSChristoph Hellwig 10798f000cacSChristoph Hellwig nvmet_rdma_destroy_queue_ib(queue); 10808f000cacSChristoph Hellwig if (!queue->dev->srq) { 10818f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 10828f000cacSChristoph Hellwig queue->recv_queue_size, 10838f000cacSChristoph Hellwig !queue->host_qid); 10848f000cacSChristoph Hellwig } 10858f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 10868f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 10878f000cacSChristoph Hellwig kfree(queue); 10888f000cacSChristoph Hellwig } 10898f000cacSChristoph Hellwig 10908f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w) 10918f000cacSChristoph Hellwig { 10928f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = 10938f000cacSChristoph Hellwig container_of(w, struct nvmet_rdma_queue, release_work); 10948f000cacSChristoph Hellwig struct nvmet_rdma_device *dev = queue->dev; 10958f000cacSChristoph Hellwig 10968f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 1097d8f7750aSSagi Grimberg 10988f000cacSChristoph Hellwig kref_put(&dev->ref, nvmet_rdma_free_dev); 10998f000cacSChristoph Hellwig } 11008f000cacSChristoph Hellwig 11018f000cacSChristoph Hellwig static int 11028f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 11038f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 11048f000cacSChristoph Hellwig { 11058f000cacSChristoph Hellwig struct nvme_rdma_cm_req *req; 11068f000cacSChristoph Hellwig 11078f000cacSChristoph Hellwig req = (struct nvme_rdma_cm_req *)conn->private_data; 11088f000cacSChristoph Hellwig if (!req || conn->private_data_len == 0) 11098f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_LEN; 11108f000cacSChristoph Hellwig 11118f000cacSChristoph Hellwig if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 11128f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_RECFMT; 11138f000cacSChristoph Hellwig 11148f000cacSChristoph Hellwig queue->host_qid = le16_to_cpu(req->qid); 11158f000cacSChristoph Hellwig 11168f000cacSChristoph Hellwig /* 1117b825b44cSJay Freyensee * req->hsqsize corresponds to our recv queue size plus 1 11188f000cacSChristoph Hellwig * req->hrqsize corresponds to our send queue size 11198f000cacSChristoph Hellwig */ 1120b825b44cSJay Freyensee queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 11218f000cacSChristoph Hellwig queue->send_queue_size = le16_to_cpu(req->hrqsize); 11228f000cacSChristoph Hellwig 11237aa1f427SSagi Grimberg if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 11248f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_HSQSIZE; 11258f000cacSChristoph Hellwig 11268f000cacSChristoph Hellwig /* XXX: Should we enforce some kind of max for IO queues? */ 11278f000cacSChristoph Hellwig 11288f000cacSChristoph Hellwig return 0; 11298f000cacSChristoph Hellwig } 11308f000cacSChristoph Hellwig 11318f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 11328f000cacSChristoph Hellwig enum nvme_rdma_cm_status status) 11338f000cacSChristoph Hellwig { 11348f000cacSChristoph Hellwig struct nvme_rdma_cm_rej rej; 11358f000cacSChristoph Hellwig 11367a01a6eaSMax Gurtovoy pr_debug("rejecting connect request: status %d (%s)\n", 11377a01a6eaSMax Gurtovoy status, nvme_rdma_cm_msg(status)); 11387a01a6eaSMax Gurtovoy 11398f000cacSChristoph Hellwig rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 11408f000cacSChristoph Hellwig rej.sts = cpu_to_le16(status); 11418f000cacSChristoph Hellwig 11428f000cacSChristoph Hellwig return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); 11438f000cacSChristoph Hellwig } 11448f000cacSChristoph Hellwig 11458f000cacSChristoph Hellwig static struct nvmet_rdma_queue * 11468f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 11478f000cacSChristoph Hellwig struct rdma_cm_id *cm_id, 11488f000cacSChristoph Hellwig struct rdma_cm_event *event) 11498f000cacSChristoph Hellwig { 11508f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 11518f000cacSChristoph Hellwig int ret; 11528f000cacSChristoph Hellwig 11538f000cacSChristoph Hellwig queue = kzalloc(sizeof(*queue), GFP_KERNEL); 11548f000cacSChristoph Hellwig if (!queue) { 11558f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11568f000cacSChristoph Hellwig goto out_reject; 11578f000cacSChristoph Hellwig } 11588f000cacSChristoph Hellwig 11598f000cacSChristoph Hellwig ret = nvmet_sq_init(&queue->nvme_sq); 116070d4281cSBart Van Assche if (ret) { 116170d4281cSBart Van Assche ret = NVME_RDMA_CM_NO_RSC; 11628f000cacSChristoph Hellwig goto out_free_queue; 116370d4281cSBart Van Assche } 11648f000cacSChristoph Hellwig 11658f000cacSChristoph Hellwig ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 11668f000cacSChristoph Hellwig if (ret) 11678f000cacSChristoph Hellwig goto out_destroy_sq; 11688f000cacSChristoph Hellwig 11698f000cacSChristoph Hellwig /* 11708f000cacSChristoph Hellwig * Schedules the actual release because calling rdma_destroy_id from 11718f000cacSChristoph Hellwig * inside a CM callback would trigger a deadlock. (great API design..) 11728f000cacSChristoph Hellwig */ 11738f000cacSChristoph Hellwig INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 11748f000cacSChristoph Hellwig queue->dev = ndev; 11758f000cacSChristoph Hellwig queue->cm_id = cm_id; 11768f000cacSChristoph Hellwig 11778f000cacSChristoph Hellwig spin_lock_init(&queue->state_lock); 11788f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_CONNECTING; 11798f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wait_list); 11808f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 11818f000cacSChristoph Hellwig spin_lock_init(&queue->rsp_wr_wait_lock); 11828f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->free_rsps); 11838f000cacSChristoph Hellwig spin_lock_init(&queue->rsps_lock); 1184766dbb17SSagi Grimberg INIT_LIST_HEAD(&queue->queue_list); 11858f000cacSChristoph Hellwig 11868f000cacSChristoph Hellwig queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 11878f000cacSChristoph Hellwig if (queue->idx < 0) { 11888f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11896ccaeb56SChristophe JAILLET goto out_destroy_sq; 11908f000cacSChristoph Hellwig } 11918f000cacSChristoph Hellwig 11928f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsps(queue); 11938f000cacSChristoph Hellwig if (ret) { 11948f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11958f000cacSChristoph Hellwig goto out_ida_remove; 11968f000cacSChristoph Hellwig } 11978f000cacSChristoph Hellwig 11988f000cacSChristoph Hellwig if (!ndev->srq) { 11998f000cacSChristoph Hellwig queue->cmds = nvmet_rdma_alloc_cmds(ndev, 12008f000cacSChristoph Hellwig queue->recv_queue_size, 12018f000cacSChristoph Hellwig !queue->host_qid); 12028f000cacSChristoph Hellwig if (IS_ERR(queue->cmds)) { 12038f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 12048f000cacSChristoph Hellwig goto out_free_responses; 12058f000cacSChristoph Hellwig } 12068f000cacSChristoph Hellwig } 12078f000cacSChristoph Hellwig 12088f000cacSChristoph Hellwig ret = nvmet_rdma_create_queue_ib(queue); 12098f000cacSChristoph Hellwig if (ret) { 12108f000cacSChristoph Hellwig pr_err("%s: creating RDMA queue failed (%d).\n", 12118f000cacSChristoph Hellwig __func__, ret); 12128f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 12138f000cacSChristoph Hellwig goto out_free_cmds; 12148f000cacSChristoph Hellwig } 12158f000cacSChristoph Hellwig 12168f000cacSChristoph Hellwig return queue; 12178f000cacSChristoph Hellwig 12188f000cacSChristoph Hellwig out_free_cmds: 12198f000cacSChristoph Hellwig if (!ndev->srq) { 12208f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 12218f000cacSChristoph Hellwig queue->recv_queue_size, 12228f000cacSChristoph Hellwig !queue->host_qid); 12238f000cacSChristoph Hellwig } 12248f000cacSChristoph Hellwig out_free_responses: 12258f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 12268f000cacSChristoph Hellwig out_ida_remove: 12278f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 12288f000cacSChristoph Hellwig out_destroy_sq: 12298f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 12308f000cacSChristoph Hellwig out_free_queue: 12318f000cacSChristoph Hellwig kfree(queue); 12328f000cacSChristoph Hellwig out_reject: 12338f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, ret); 12348f000cacSChristoph Hellwig return NULL; 12358f000cacSChristoph Hellwig } 12368f000cacSChristoph Hellwig 12378f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 12388f000cacSChristoph Hellwig { 12398f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = priv; 12408f000cacSChristoph Hellwig 12418f000cacSChristoph Hellwig switch (event->event) { 12428f000cacSChristoph Hellwig case IB_EVENT_COMM_EST: 12438f000cacSChristoph Hellwig rdma_notify(queue->cm_id, event->event); 12448f000cacSChristoph Hellwig break; 12458f000cacSChristoph Hellwig default: 1246675796beSMax Gurtovoy pr_err("received IB QP event: %s (%d)\n", 1247675796beSMax Gurtovoy ib_event_msg(event->event), event->event); 12488f000cacSChristoph Hellwig break; 12498f000cacSChristoph Hellwig } 12508f000cacSChristoph Hellwig } 12518f000cacSChristoph Hellwig 12528f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 12538f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue, 12548f000cacSChristoph Hellwig struct rdma_conn_param *p) 12558f000cacSChristoph Hellwig { 12568f000cacSChristoph Hellwig struct rdma_conn_param param = { }; 12578f000cacSChristoph Hellwig struct nvme_rdma_cm_rep priv = { }; 12588f000cacSChristoph Hellwig int ret = -ENOMEM; 12598f000cacSChristoph Hellwig 12608f000cacSChristoph Hellwig param.rnr_retry_count = 7; 12618f000cacSChristoph Hellwig param.flow_control = 1; 12628f000cacSChristoph Hellwig param.initiator_depth = min_t(u8, p->initiator_depth, 12638f000cacSChristoph Hellwig queue->dev->device->attrs.max_qp_init_rd_atom); 12648f000cacSChristoph Hellwig param.private_data = &priv; 12658f000cacSChristoph Hellwig param.private_data_len = sizeof(priv); 12668f000cacSChristoph Hellwig priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 12678f000cacSChristoph Hellwig priv.crqsize = cpu_to_le16(queue->recv_queue_size); 12688f000cacSChristoph Hellwig 12698f000cacSChristoph Hellwig ret = rdma_accept(cm_id, ¶m); 12708f000cacSChristoph Hellwig if (ret) 12718f000cacSChristoph Hellwig pr_err("rdma_accept failed (error code = %d)\n", ret); 12728f000cacSChristoph Hellwig 12738f000cacSChristoph Hellwig return ret; 12748f000cacSChristoph Hellwig } 12758f000cacSChristoph Hellwig 12768f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 12778f000cacSChristoph Hellwig struct rdma_cm_event *event) 12788f000cacSChristoph Hellwig { 12798f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 12808f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 12818f000cacSChristoph Hellwig int ret = -EINVAL; 12828f000cacSChristoph Hellwig 12838f000cacSChristoph Hellwig ndev = nvmet_rdma_find_get_device(cm_id); 12848f000cacSChristoph Hellwig if (!ndev) { 12858f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 12868f000cacSChristoph Hellwig return -ECONNREFUSED; 12878f000cacSChristoph Hellwig } 12888f000cacSChristoph Hellwig 12898f000cacSChristoph Hellwig queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 12908f000cacSChristoph Hellwig if (!queue) { 12918f000cacSChristoph Hellwig ret = -ENOMEM; 12928f000cacSChristoph Hellwig goto put_device; 12938f000cacSChristoph Hellwig } 12948f000cacSChristoph Hellwig queue->port = cm_id->context; 12958f000cacSChristoph Hellwig 1296777dc823SSagi Grimberg if (queue->host_qid == 0) { 1297777dc823SSagi Grimberg /* Let inflight controller teardown complete */ 1298d39aa497SChristoph Hellwig flush_scheduled_work(); 1299777dc823SSagi Grimberg } 1300777dc823SSagi Grimberg 13018f000cacSChristoph Hellwig ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1302e1a2ee24SIsrael Rukshin if (ret) { 1303d39aa497SChristoph Hellwig schedule_work(&queue->release_work); 1304e1a2ee24SIsrael Rukshin /* Destroying rdma_cm id is not needed here */ 1305e1a2ee24SIsrael Rukshin return 0; 1306e1a2ee24SIsrael Rukshin } 13078f000cacSChristoph Hellwig 13088f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 13098f000cacSChristoph Hellwig list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 13108f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13118f000cacSChristoph Hellwig 13128f000cacSChristoph Hellwig return 0; 13138f000cacSChristoph Hellwig 13148f000cacSChristoph Hellwig put_device: 13158f000cacSChristoph Hellwig kref_put(&ndev->ref, nvmet_rdma_free_dev); 13168f000cacSChristoph Hellwig 13178f000cacSChristoph Hellwig return ret; 13188f000cacSChristoph Hellwig } 13198f000cacSChristoph Hellwig 13208f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 13218f000cacSChristoph Hellwig { 13228f000cacSChristoph Hellwig unsigned long flags; 13238f000cacSChristoph Hellwig 13248f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13258f000cacSChristoph Hellwig if (queue->state != NVMET_RDMA_Q_CONNECTING) { 13268f000cacSChristoph Hellwig pr_warn("trying to establish a connected queue\n"); 13278f000cacSChristoph Hellwig goto out_unlock; 13288f000cacSChristoph Hellwig } 13298f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_LIVE; 13308f000cacSChristoph Hellwig 13318f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wait_list)) { 13328f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd; 13338f000cacSChristoph Hellwig 13348f000cacSChristoph Hellwig cmd = list_first_entry(&queue->rsp_wait_list, 13358f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 13368f000cacSChristoph Hellwig list_del(&cmd->wait_list); 13378f000cacSChristoph Hellwig 13388f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13398f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, cmd); 13408f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13418f000cacSChristoph Hellwig } 13428f000cacSChristoph Hellwig 13438f000cacSChristoph Hellwig out_unlock: 13448f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13458f000cacSChristoph Hellwig } 13468f000cacSChristoph Hellwig 13478f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 13488f000cacSChristoph Hellwig { 13498f000cacSChristoph Hellwig bool disconnect = false; 13508f000cacSChristoph Hellwig unsigned long flags; 13518f000cacSChristoph Hellwig 13528f000cacSChristoph Hellwig pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 13538f000cacSChristoph Hellwig 13548f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13558f000cacSChristoph Hellwig switch (queue->state) { 13568f000cacSChristoph Hellwig case NVMET_RDMA_Q_CONNECTING: 13578f000cacSChristoph Hellwig case NVMET_RDMA_Q_LIVE: 13588f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_DISCONNECTING; 1359d8f7750aSSagi Grimberg disconnect = true; 13608f000cacSChristoph Hellwig break; 13618f000cacSChristoph Hellwig case NVMET_RDMA_Q_DISCONNECTING: 13628f000cacSChristoph Hellwig break; 13638f000cacSChristoph Hellwig } 13648f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13658f000cacSChristoph Hellwig 13668f000cacSChristoph Hellwig if (disconnect) { 13678f000cacSChristoph Hellwig rdma_disconnect(queue->cm_id); 1368d39aa497SChristoph Hellwig schedule_work(&queue->release_work); 13698f000cacSChristoph Hellwig } 13708f000cacSChristoph Hellwig } 13718f000cacSChristoph Hellwig 13728f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 13738f000cacSChristoph Hellwig { 13748f000cacSChristoph Hellwig bool disconnect = false; 13758f000cacSChristoph Hellwig 13768f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 13778f000cacSChristoph Hellwig if (!list_empty(&queue->queue_list)) { 13788f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 13798f000cacSChristoph Hellwig disconnect = true; 13808f000cacSChristoph Hellwig } 13818f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13828f000cacSChristoph Hellwig 13838f000cacSChristoph Hellwig if (disconnect) 13848f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 13858f000cacSChristoph Hellwig } 13868f000cacSChristoph Hellwig 13878f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 13888f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 13898f000cacSChristoph Hellwig { 13908f000cacSChristoph Hellwig WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 13918f000cacSChristoph Hellwig 1392766dbb17SSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 1393766dbb17SSagi Grimberg if (!list_empty(&queue->queue_list)) 1394766dbb17SSagi Grimberg list_del_init(&queue->queue_list); 1395766dbb17SSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1396766dbb17SSagi Grimberg 1397766dbb17SSagi Grimberg pr_err("failed to connect queue %d\n", queue->idx); 1398d39aa497SChristoph Hellwig schedule_work(&queue->release_work); 13998f000cacSChristoph Hellwig } 14008f000cacSChristoph Hellwig 1401d8f7750aSSagi Grimberg /** 1402d8f7750aSSagi Grimberg * nvme_rdma_device_removal() - Handle RDMA device removal 1403f1d4ef7dSSagi Grimberg * @cm_id: rdma_cm id, used for nvmet port 1404d8f7750aSSagi Grimberg * @queue: nvmet rdma queue (cm id qp_context) 1405d8f7750aSSagi Grimberg * 1406d8f7750aSSagi Grimberg * DEVICE_REMOVAL event notifies us that the RDMA device is about 1407f1d4ef7dSSagi Grimberg * to unplug. Note that this event can be generated on a normal 1408f1d4ef7dSSagi Grimberg * queue cm_id and/or a device bound listener cm_id (where in this 1409f1d4ef7dSSagi Grimberg * case queue will be null). 1410d8f7750aSSagi Grimberg * 1411f1d4ef7dSSagi Grimberg * We registered an ib_client to handle device removal for queues, 1412f1d4ef7dSSagi Grimberg * so we only need to handle the listening port cm_ids. In this case 1413d8f7750aSSagi Grimberg * we nullify the priv to prevent double cm_id destruction and destroying 1414d8f7750aSSagi Grimberg * the cm_id implicitely by returning a non-zero rc to the callout. 1415d8f7750aSSagi Grimberg */ 1416d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1417d8f7750aSSagi Grimberg struct nvmet_rdma_queue *queue) 1418d8f7750aSSagi Grimberg { 1419f1d4ef7dSSagi Grimberg struct nvmet_port *port; 1420d8f7750aSSagi Grimberg 1421f1d4ef7dSSagi Grimberg if (queue) { 1422f1d4ef7dSSagi Grimberg /* 1423f1d4ef7dSSagi Grimberg * This is a queue cm_id. we have registered 1424f1d4ef7dSSagi Grimberg * an ib_client to handle queues removal 1425f1d4ef7dSSagi Grimberg * so don't interfear and just return. 1426f1d4ef7dSSagi Grimberg */ 1427f1d4ef7dSSagi Grimberg return 0; 1428f1d4ef7dSSagi Grimberg } 1429f1d4ef7dSSagi Grimberg 1430f1d4ef7dSSagi Grimberg port = cm_id->context; 1431d8f7750aSSagi Grimberg 1432d8f7750aSSagi Grimberg /* 1433d8f7750aSSagi Grimberg * This is a listener cm_id. Make sure that 1434d8f7750aSSagi Grimberg * future remove_port won't invoke a double 1435d8f7750aSSagi Grimberg * cm_id destroy. use atomic xchg to make sure 1436d8f7750aSSagi Grimberg * we don't compete with remove_port. 1437d8f7750aSSagi Grimberg */ 1438d8f7750aSSagi Grimberg if (xchg(&port->priv, NULL) != cm_id) 1439d8f7750aSSagi Grimberg return 0; 1440d8f7750aSSagi Grimberg 1441d8f7750aSSagi Grimberg /* 1442d8f7750aSSagi Grimberg * We need to return 1 so that the core will destroy 1443d8f7750aSSagi Grimberg * it's own ID. What a great API design.. 1444d8f7750aSSagi Grimberg */ 1445d8f7750aSSagi Grimberg return 1; 1446d8f7750aSSagi Grimberg } 1447d8f7750aSSagi Grimberg 14488f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 14498f000cacSChristoph Hellwig struct rdma_cm_event *event) 14508f000cacSChristoph Hellwig { 14518f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = NULL; 14528f000cacSChristoph Hellwig int ret = 0; 14538f000cacSChristoph Hellwig 14548f000cacSChristoph Hellwig if (cm_id->qp) 14558f000cacSChristoph Hellwig queue = cm_id->qp->qp_context; 14568f000cacSChristoph Hellwig 14578f000cacSChristoph Hellwig pr_debug("%s (%d): status %d id %p\n", 14588f000cacSChristoph Hellwig rdma_event_msg(event->event), event->event, 14598f000cacSChristoph Hellwig event->status, cm_id); 14608f000cacSChristoph Hellwig 14618f000cacSChristoph Hellwig switch (event->event) { 14628f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_REQUEST: 14638f000cacSChristoph Hellwig ret = nvmet_rdma_queue_connect(cm_id, event); 14648f000cacSChristoph Hellwig break; 14658f000cacSChristoph Hellwig case RDMA_CM_EVENT_ESTABLISHED: 14668f000cacSChristoph Hellwig nvmet_rdma_queue_established(queue); 14678f000cacSChristoph Hellwig break; 14688f000cacSChristoph Hellwig case RDMA_CM_EVENT_ADDR_CHANGE: 14698f000cacSChristoph Hellwig case RDMA_CM_EVENT_DISCONNECTED: 14708f000cacSChristoph Hellwig case RDMA_CM_EVENT_TIMEWAIT_EXIT: 14718f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 1472d8f7750aSSagi Grimberg break; 1473d8f7750aSSagi Grimberg case RDMA_CM_EVENT_DEVICE_REMOVAL: 1474d8f7750aSSagi Grimberg ret = nvmet_rdma_device_removal(cm_id, queue); 14758f000cacSChristoph Hellwig break; 14768f000cacSChristoph Hellwig case RDMA_CM_EVENT_REJECTED: 1477512fb1b3SSteve Wise pr_debug("Connection rejected: %s\n", 1478512fb1b3SSteve Wise rdma_reject_msg(cm_id, event->status)); 1479512fb1b3SSteve Wise /* FALLTHROUGH */ 14808f000cacSChristoph Hellwig case RDMA_CM_EVENT_UNREACHABLE: 14818f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_ERROR: 14828f000cacSChristoph Hellwig nvmet_rdma_queue_connect_fail(cm_id, queue); 14838f000cacSChristoph Hellwig break; 14848f000cacSChristoph Hellwig default: 14858f000cacSChristoph Hellwig pr_err("received unrecognized RDMA CM event %d\n", 14868f000cacSChristoph Hellwig event->event); 14878f000cacSChristoph Hellwig break; 14888f000cacSChristoph Hellwig } 14898f000cacSChristoph Hellwig 14908f000cacSChristoph Hellwig return ret; 14918f000cacSChristoph Hellwig } 14928f000cacSChristoph Hellwig 14938f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 14948f000cacSChristoph Hellwig { 14958f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 14968f000cacSChristoph Hellwig 14978f000cacSChristoph Hellwig restart: 14988f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 14998f000cacSChristoph Hellwig list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { 15008f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl == ctrl) { 15018f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 15028f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 15038f000cacSChristoph Hellwig 15048f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 15058f000cacSChristoph Hellwig goto restart; 15068f000cacSChristoph Hellwig } 15078f000cacSChristoph Hellwig } 15088f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 15098f000cacSChristoph Hellwig } 15108f000cacSChristoph Hellwig 15118f000cacSChristoph Hellwig static int nvmet_rdma_add_port(struct nvmet_port *port) 15128f000cacSChristoph Hellwig { 15138f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 1514670c2a3aSSagi Grimberg struct sockaddr_storage addr = { }; 1515670c2a3aSSagi Grimberg __kernel_sa_family_t af; 15168f000cacSChristoph Hellwig int ret; 15178f000cacSChristoph Hellwig 15188f000cacSChristoph Hellwig switch (port->disc_addr.adrfam) { 15198f000cacSChristoph Hellwig case NVMF_ADDR_FAMILY_IP4: 1520670c2a3aSSagi Grimberg af = AF_INET; 1521670c2a3aSSagi Grimberg break; 1522670c2a3aSSagi Grimberg case NVMF_ADDR_FAMILY_IP6: 1523670c2a3aSSagi Grimberg af = AF_INET6; 15248f000cacSChristoph Hellwig break; 15258f000cacSChristoph Hellwig default: 15268f000cacSChristoph Hellwig pr_err("address family %d not supported\n", 15278f000cacSChristoph Hellwig port->disc_addr.adrfam); 15288f000cacSChristoph Hellwig return -EINVAL; 15298f000cacSChristoph Hellwig } 15308f000cacSChristoph Hellwig 15310d5ee2b2SSteve Wise if (port->inline_data_size < 0) { 15320d5ee2b2SSteve Wise port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; 15330d5ee2b2SSteve Wise } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { 15340d5ee2b2SSteve Wise pr_warn("inline_data_size %u is too large, reducing to %u\n", 15350d5ee2b2SSteve Wise port->inline_data_size, 15360d5ee2b2SSteve Wise NVMET_RDMA_MAX_INLINE_DATA_SIZE); 15370d5ee2b2SSteve Wise port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; 15380d5ee2b2SSteve Wise } 15390d5ee2b2SSteve Wise 1540670c2a3aSSagi Grimberg ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, 1541670c2a3aSSagi Grimberg port->disc_addr.trsvcid, &addr); 1542670c2a3aSSagi Grimberg if (ret) { 1543670c2a3aSSagi Grimberg pr_err("malformed ip/port passed: %s:%s\n", 1544670c2a3aSSagi Grimberg port->disc_addr.traddr, port->disc_addr.trsvcid); 15458f000cacSChristoph Hellwig return ret; 1546670c2a3aSSagi Grimberg } 15478f000cacSChristoph Hellwig 15488f000cacSChristoph Hellwig cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 15498f000cacSChristoph Hellwig RDMA_PS_TCP, IB_QPT_RC); 15508f000cacSChristoph Hellwig if (IS_ERR(cm_id)) { 15518f000cacSChristoph Hellwig pr_err("CM ID creation failed\n"); 15528f000cacSChristoph Hellwig return PTR_ERR(cm_id); 15538f000cacSChristoph Hellwig } 15548f000cacSChristoph Hellwig 1555670c2a3aSSagi Grimberg /* 1556670c2a3aSSagi Grimberg * Allow both IPv4 and IPv6 sockets to bind a single port 1557670c2a3aSSagi Grimberg * at the same time. 1558670c2a3aSSagi Grimberg */ 1559670c2a3aSSagi Grimberg ret = rdma_set_afonly(cm_id, 1); 15608f000cacSChristoph Hellwig if (ret) { 1561670c2a3aSSagi Grimberg pr_err("rdma_set_afonly failed (%d)\n", ret); 1562670c2a3aSSagi Grimberg goto out_destroy_id; 1563670c2a3aSSagi Grimberg } 1564670c2a3aSSagi Grimberg 1565670c2a3aSSagi Grimberg ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr); 1566670c2a3aSSagi Grimberg if (ret) { 1567670c2a3aSSagi Grimberg pr_err("binding CM ID to %pISpcs failed (%d)\n", 1568670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 15698f000cacSChristoph Hellwig goto out_destroy_id; 15708f000cacSChristoph Hellwig } 15718f000cacSChristoph Hellwig 15728f000cacSChristoph Hellwig ret = rdma_listen(cm_id, 128); 15738f000cacSChristoph Hellwig if (ret) { 1574670c2a3aSSagi Grimberg pr_err("listening to %pISpcs failed (%d)\n", 1575670c2a3aSSagi Grimberg (struct sockaddr *)&addr, ret); 15768f000cacSChristoph Hellwig goto out_destroy_id; 15778f000cacSChristoph Hellwig } 15788f000cacSChristoph Hellwig 1579670c2a3aSSagi Grimberg pr_info("enabling port %d (%pISpcs)\n", 1580670c2a3aSSagi Grimberg le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr); 15818f000cacSChristoph Hellwig port->priv = cm_id; 15828f000cacSChristoph Hellwig return 0; 15838f000cacSChristoph Hellwig 15848f000cacSChristoph Hellwig out_destroy_id: 15858f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 15868f000cacSChristoph Hellwig return ret; 15878f000cacSChristoph Hellwig } 15888f000cacSChristoph Hellwig 15898f000cacSChristoph Hellwig static void nvmet_rdma_remove_port(struct nvmet_port *port) 15908f000cacSChristoph Hellwig { 1591d8f7750aSSagi Grimberg struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); 15928f000cacSChristoph Hellwig 1593d8f7750aSSagi Grimberg if (cm_id) 15948f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 15958f000cacSChristoph Hellwig } 15968f000cacSChristoph Hellwig 15974c652685SSagi Grimberg static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, 15984c652685SSagi Grimberg struct nvmet_port *port, char *traddr) 15994c652685SSagi Grimberg { 16004c652685SSagi Grimberg struct rdma_cm_id *cm_id = port->priv; 16014c652685SSagi Grimberg 16024c652685SSagi Grimberg if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { 16034c652685SSagi Grimberg struct nvmet_rdma_rsp *rsp = 16044c652685SSagi Grimberg container_of(req, struct nvmet_rdma_rsp, req); 16054c652685SSagi Grimberg struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; 16064c652685SSagi Grimberg struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; 16074c652685SSagi Grimberg 16084c652685SSagi Grimberg sprintf(traddr, "%pISc", addr); 16094c652685SSagi Grimberg } else { 16104c652685SSagi Grimberg memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 16114c652685SSagi Grimberg } 16124c652685SSagi Grimberg } 16134c652685SSagi Grimberg 1614e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops = { 16158f000cacSChristoph Hellwig .owner = THIS_MODULE, 16168f000cacSChristoph Hellwig .type = NVMF_TRTYPE_RDMA, 16178f000cacSChristoph Hellwig .msdbd = 1, 16188f000cacSChristoph Hellwig .has_keyed_sgls = 1, 16198f000cacSChristoph Hellwig .add_port = nvmet_rdma_add_port, 16208f000cacSChristoph Hellwig .remove_port = nvmet_rdma_remove_port, 16218f000cacSChristoph Hellwig .queue_response = nvmet_rdma_queue_response, 16228f000cacSChristoph Hellwig .delete_ctrl = nvmet_rdma_delete_ctrl, 16234c652685SSagi Grimberg .disc_traddr = nvmet_rdma_disc_port_addr, 16248f000cacSChristoph Hellwig }; 16258f000cacSChristoph Hellwig 1626f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1627f1d4ef7dSSagi Grimberg { 162843b92fd2SIsrael Rukshin struct nvmet_rdma_queue *queue, *tmp; 1629a3dd7d00SMax Gurtovoy struct nvmet_rdma_device *ndev; 1630a3dd7d00SMax Gurtovoy bool found = false; 1631f1d4ef7dSSagi Grimberg 1632a3dd7d00SMax Gurtovoy mutex_lock(&device_list_mutex); 1633a3dd7d00SMax Gurtovoy list_for_each_entry(ndev, &device_list, entry) { 1634a3dd7d00SMax Gurtovoy if (ndev->device == ib_device) { 1635a3dd7d00SMax Gurtovoy found = true; 1636a3dd7d00SMax Gurtovoy break; 1637a3dd7d00SMax Gurtovoy } 1638a3dd7d00SMax Gurtovoy } 1639a3dd7d00SMax Gurtovoy mutex_unlock(&device_list_mutex); 1640a3dd7d00SMax Gurtovoy 1641a3dd7d00SMax Gurtovoy if (!found) 1642a3dd7d00SMax Gurtovoy return; 1643a3dd7d00SMax Gurtovoy 1644a3dd7d00SMax Gurtovoy /* 1645a3dd7d00SMax Gurtovoy * IB Device that is used by nvmet controllers is being removed, 1646a3dd7d00SMax Gurtovoy * delete all queues using this device. 1647a3dd7d00SMax Gurtovoy */ 1648f1d4ef7dSSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 164943b92fd2SIsrael Rukshin list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 165043b92fd2SIsrael Rukshin queue_list) { 1651f1d4ef7dSSagi Grimberg if (queue->dev->device != ib_device) 1652f1d4ef7dSSagi Grimberg continue; 1653f1d4ef7dSSagi Grimberg 1654f1d4ef7dSSagi Grimberg pr_info("Removing queue %d\n", queue->idx); 165543b92fd2SIsrael Rukshin list_del_init(&queue->queue_list); 1656f1d4ef7dSSagi Grimberg __nvmet_rdma_queue_disconnect(queue); 1657f1d4ef7dSSagi Grimberg } 1658f1d4ef7dSSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1659f1d4ef7dSSagi Grimberg 1660f1d4ef7dSSagi Grimberg flush_scheduled_work(); 1661f1d4ef7dSSagi Grimberg } 1662f1d4ef7dSSagi Grimberg 1663f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = { 1664f1d4ef7dSSagi Grimberg .name = "nvmet_rdma", 1665f1d4ef7dSSagi Grimberg .remove = nvmet_rdma_remove_one 1666f1d4ef7dSSagi Grimberg }; 1667f1d4ef7dSSagi Grimberg 16688f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void) 16698f000cacSChristoph Hellwig { 1670f1d4ef7dSSagi Grimberg int ret; 1671f1d4ef7dSSagi Grimberg 1672f1d4ef7dSSagi Grimberg ret = ib_register_client(&nvmet_rdma_ib_client); 1673f1d4ef7dSSagi Grimberg if (ret) 1674f1d4ef7dSSagi Grimberg return ret; 1675f1d4ef7dSSagi Grimberg 1676f1d4ef7dSSagi Grimberg ret = nvmet_register_transport(&nvmet_rdma_ops); 1677f1d4ef7dSSagi Grimberg if (ret) 1678f1d4ef7dSSagi Grimberg goto err_ib_client; 1679f1d4ef7dSSagi Grimberg 1680f1d4ef7dSSagi Grimberg return 0; 1681f1d4ef7dSSagi Grimberg 1682f1d4ef7dSSagi Grimberg err_ib_client: 1683f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1684f1d4ef7dSSagi Grimberg return ret; 16858f000cacSChristoph Hellwig } 16868f000cacSChristoph Hellwig 16878f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void) 16888f000cacSChristoph Hellwig { 16898f000cacSChristoph Hellwig nvmet_unregister_transport(&nvmet_rdma_ops); 1690f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1691cb4876e8SSagi Grimberg WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 16928f000cacSChristoph Hellwig ida_destroy(&nvmet_rdma_queue_ida); 16938f000cacSChristoph Hellwig } 16948f000cacSChristoph Hellwig 16958f000cacSChristoph Hellwig module_init(nvmet_rdma_init); 16968f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit); 16978f000cacSChristoph Hellwig 16988f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2"); 16998f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 1700