13641bd32SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 28f000cacSChristoph Hellwig /* 38f000cacSChristoph Hellwig * NVMe over Fabrics RDMA target. 48f000cacSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 58f000cacSChristoph Hellwig */ 68f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 78f000cacSChristoph Hellwig #include <linux/atomic.h> 88f000cacSChristoph Hellwig #include <linux/ctype.h> 98f000cacSChristoph Hellwig #include <linux/delay.h> 108f000cacSChristoph Hellwig #include <linux/err.h> 118f000cacSChristoph Hellwig #include <linux/init.h> 128f000cacSChristoph Hellwig #include <linux/module.h> 138f000cacSChristoph Hellwig #include <linux/nvme.h> 148f000cacSChristoph Hellwig #include <linux/slab.h> 158f000cacSChristoph Hellwig #include <linux/string.h> 168f000cacSChristoph Hellwig #include <linux/wait.h> 178f000cacSChristoph Hellwig #include <linux/inet.h> 188f000cacSChristoph Hellwig #include <asm/unaligned.h> 198f000cacSChristoph Hellwig 208f000cacSChristoph Hellwig #include <rdma/ib_verbs.h> 218f000cacSChristoph Hellwig #include <rdma/rdma_cm.h> 228f000cacSChristoph Hellwig #include <rdma/rw.h> 238f000cacSChristoph Hellwig 248f000cacSChristoph Hellwig #include <linux/nvme-rdma.h> 258f000cacSChristoph Hellwig #include "nvmet.h" 268f000cacSChristoph Hellwig 278f000cacSChristoph Hellwig /* 280d5ee2b2SSteve Wise * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data 298f000cacSChristoph Hellwig */ 300d5ee2b2SSteve Wise #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE 310d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_SGE 4 320d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) 338f000cacSChristoph Hellwig 34ec6d20e1SMax Gurtovoy /* Assume mpsmin == device_page_size == 4KB */ 35ec6d20e1SMax Gurtovoy #define NVMET_RDMA_MAX_MDTS 8 36ec6d20e1SMax Gurtovoy 378f000cacSChristoph Hellwig struct nvmet_rdma_cmd { 380d5ee2b2SSteve Wise struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; 398f000cacSChristoph Hellwig struct ib_cqe cqe; 408f000cacSChristoph Hellwig struct ib_recv_wr wr; 410d5ee2b2SSteve Wise struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; 428f000cacSChristoph Hellwig struct nvme_command *nvme_cmd; 438f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 448f000cacSChristoph Hellwig }; 458f000cacSChristoph Hellwig 468f000cacSChristoph Hellwig enum { 478f000cacSChristoph Hellwig NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 488f000cacSChristoph Hellwig NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), 498f000cacSChristoph Hellwig }; 508f000cacSChristoph Hellwig 518f000cacSChristoph Hellwig struct nvmet_rdma_rsp { 528f000cacSChristoph Hellwig struct ib_sge send_sge; 538f000cacSChristoph Hellwig struct ib_cqe send_cqe; 548f000cacSChristoph Hellwig struct ib_send_wr send_wr; 558f000cacSChristoph Hellwig 568f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd; 578f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 588f000cacSChristoph Hellwig 598f000cacSChristoph Hellwig struct ib_cqe read_cqe; 608f000cacSChristoph Hellwig struct rdma_rw_ctx rw; 618f000cacSChristoph Hellwig 628f000cacSChristoph Hellwig struct nvmet_req req; 638f000cacSChristoph Hellwig 648407879cSSagi Grimberg bool allocated; 658f000cacSChristoph Hellwig u8 n_rdma; 668f000cacSChristoph Hellwig u32 flags; 678f000cacSChristoph Hellwig u32 invalidate_rkey; 688f000cacSChristoph Hellwig 698f000cacSChristoph Hellwig struct list_head wait_list; 708f000cacSChristoph Hellwig struct list_head free_list; 718f000cacSChristoph Hellwig }; 728f000cacSChristoph Hellwig 738f000cacSChristoph Hellwig enum nvmet_rdma_queue_state { 748f000cacSChristoph Hellwig NVMET_RDMA_Q_CONNECTING, 758f000cacSChristoph Hellwig NVMET_RDMA_Q_LIVE, 768f000cacSChristoph Hellwig NVMET_RDMA_Q_DISCONNECTING, 778f000cacSChristoph Hellwig }; 788f000cacSChristoph Hellwig 798f000cacSChristoph Hellwig struct nvmet_rdma_queue { 808f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 818f000cacSChristoph Hellwig struct nvmet_port *port; 828f000cacSChristoph Hellwig struct ib_cq *cq; 838f000cacSChristoph Hellwig atomic_t sq_wr_avail; 848f000cacSChristoph Hellwig struct nvmet_rdma_device *dev; 858f000cacSChristoph Hellwig spinlock_t state_lock; 868f000cacSChristoph Hellwig enum nvmet_rdma_queue_state state; 878f000cacSChristoph Hellwig struct nvmet_cq nvme_cq; 888f000cacSChristoph Hellwig struct nvmet_sq nvme_sq; 898f000cacSChristoph Hellwig 908f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsps; 918f000cacSChristoph Hellwig struct list_head free_rsps; 928f000cacSChristoph Hellwig spinlock_t rsps_lock; 938f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 948f000cacSChristoph Hellwig 958f000cacSChristoph Hellwig struct work_struct release_work; 968f000cacSChristoph Hellwig struct list_head rsp_wait_list; 978f000cacSChristoph Hellwig struct list_head rsp_wr_wait_list; 988f000cacSChristoph Hellwig spinlock_t rsp_wr_wait_lock; 998f000cacSChristoph Hellwig 1008f000cacSChristoph Hellwig int idx; 1018f000cacSChristoph Hellwig int host_qid; 1028f000cacSChristoph Hellwig int recv_queue_size; 1038f000cacSChristoph Hellwig int send_queue_size; 1048f000cacSChristoph Hellwig 1058f000cacSChristoph Hellwig struct list_head queue_list; 1068f000cacSChristoph Hellwig }; 1078f000cacSChristoph Hellwig 108*a032e4f6SSagi Grimberg struct nvmet_rdma_port { 109*a032e4f6SSagi Grimberg struct nvmet_port *nport; 110*a032e4f6SSagi Grimberg struct sockaddr_storage addr; 111*a032e4f6SSagi Grimberg struct rdma_cm_id *cm_id; 112*a032e4f6SSagi Grimberg struct delayed_work repair_work; 113*a032e4f6SSagi Grimberg }; 114*a032e4f6SSagi Grimberg 1158f000cacSChristoph Hellwig struct nvmet_rdma_device { 1168f000cacSChristoph Hellwig struct ib_device *device; 1178f000cacSChristoph Hellwig struct ib_pd *pd; 1188f000cacSChristoph Hellwig struct ib_srq *srq; 1198f000cacSChristoph Hellwig struct nvmet_rdma_cmd *srq_cmds; 1208f000cacSChristoph Hellwig size_t srq_size; 1218f000cacSChristoph Hellwig struct kref ref; 1228f000cacSChristoph Hellwig struct list_head entry; 1230d5ee2b2SSteve Wise int inline_data_size; 1240d5ee2b2SSteve Wise int inline_page_count; 1258f000cacSChristoph Hellwig }; 1268f000cacSChristoph Hellwig 1278f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq; 1288f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 1298f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 1308f000cacSChristoph Hellwig 1318f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida); 1328f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list); 1338f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 1348f000cacSChristoph Hellwig 1358f000cacSChristoph Hellwig static LIST_HEAD(device_list); 1368f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex); 1378f000cacSChristoph Hellwig 1388f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 1398f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 1408f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 1418f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 1428f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 1438f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 1445cbab630SRaju Rangoju static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 1455cbab630SRaju Rangoju struct nvmet_rdma_rsp *r); 1465cbab630SRaju Rangoju static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 1475cbab630SRaju Rangoju struct nvmet_rdma_rsp *r); 1488f000cacSChristoph Hellwig 149e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops; 1508f000cacSChristoph Hellwig 1510d5ee2b2SSteve Wise static int num_pages(int len) 1520d5ee2b2SSteve Wise { 1530d5ee2b2SSteve Wise return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); 1540d5ee2b2SSteve Wise } 1550d5ee2b2SSteve Wise 1568f000cacSChristoph Hellwig /* XXX: really should move to a generic header sooner or later.. */ 1578f000cacSChristoph Hellwig static inline u32 get_unaligned_le24(const u8 *p) 1588f000cacSChristoph Hellwig { 1598f000cacSChristoph Hellwig return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; 1608f000cacSChristoph Hellwig } 1618f000cacSChristoph Hellwig 1628f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 1638f000cacSChristoph Hellwig { 1648f000cacSChristoph Hellwig return nvme_is_write(rsp->req.cmd) && 1655e62d5c9SChristoph Hellwig rsp->req.transfer_len && 1668f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1678f000cacSChristoph Hellwig } 1688f000cacSChristoph Hellwig 1698f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 1708f000cacSChristoph Hellwig { 1718f000cacSChristoph Hellwig return !nvme_is_write(rsp->req.cmd) && 1725e62d5c9SChristoph Hellwig rsp->req.transfer_len && 173fc6c9730SMax Gurtovoy !rsp->req.cqe->status && 1748f000cacSChristoph Hellwig !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 1758f000cacSChristoph Hellwig } 1768f000cacSChristoph Hellwig 1778f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp * 1788f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 1798f000cacSChristoph Hellwig { 1808f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 1818f000cacSChristoph Hellwig unsigned long flags; 1828f000cacSChristoph Hellwig 1838f000cacSChristoph Hellwig spin_lock_irqsave(&queue->rsps_lock, flags); 1848407879cSSagi Grimberg rsp = list_first_entry_or_null(&queue->free_rsps, 1858f000cacSChristoph Hellwig struct nvmet_rdma_rsp, free_list); 1868407879cSSagi Grimberg if (likely(rsp)) 1878f000cacSChristoph Hellwig list_del(&rsp->free_list); 1888f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->rsps_lock, flags); 1898f000cacSChristoph Hellwig 1908407879cSSagi Grimberg if (unlikely(!rsp)) { 1915cbab630SRaju Rangoju int ret; 1925cbab630SRaju Rangoju 1935cbab630SRaju Rangoju rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 1948407879cSSagi Grimberg if (unlikely(!rsp)) 1958407879cSSagi Grimberg return NULL; 1965cbab630SRaju Rangoju ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); 1975cbab630SRaju Rangoju if (unlikely(ret)) { 1985cbab630SRaju Rangoju kfree(rsp); 1995cbab630SRaju Rangoju return NULL; 2005cbab630SRaju Rangoju } 2015cbab630SRaju Rangoju 2028407879cSSagi Grimberg rsp->allocated = true; 2038407879cSSagi Grimberg } 2048407879cSSagi Grimberg 2058f000cacSChristoph Hellwig return rsp; 2068f000cacSChristoph Hellwig } 2078f000cacSChristoph Hellwig 2088f000cacSChristoph Hellwig static inline void 2098f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 2108f000cacSChristoph Hellwig { 2118f000cacSChristoph Hellwig unsigned long flags; 2128f000cacSChristoph Hellwig 213ad1f8249SIsrael Rukshin if (unlikely(rsp->allocated)) { 2145cbab630SRaju Rangoju nvmet_rdma_free_rsp(rsp->queue->dev, rsp); 2158407879cSSagi Grimberg kfree(rsp); 2168407879cSSagi Grimberg return; 2178407879cSSagi Grimberg } 2188407879cSSagi Grimberg 2198f000cacSChristoph Hellwig spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 2208f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 2218f000cacSChristoph Hellwig spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 2228f000cacSChristoph Hellwig } 2238f000cacSChristoph Hellwig 2240d5ee2b2SSteve Wise static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, 2250d5ee2b2SSteve Wise struct nvmet_rdma_cmd *c) 2260d5ee2b2SSteve Wise { 2270d5ee2b2SSteve Wise struct scatterlist *sg; 2280d5ee2b2SSteve Wise struct ib_sge *sge; 2290d5ee2b2SSteve Wise int i; 2300d5ee2b2SSteve Wise 2310d5ee2b2SSteve Wise if (!ndev->inline_data_size) 2320d5ee2b2SSteve Wise return; 2330d5ee2b2SSteve Wise 2340d5ee2b2SSteve Wise sg = c->inline_sg; 2350d5ee2b2SSteve Wise sge = &c->sge[1]; 2360d5ee2b2SSteve Wise 2370d5ee2b2SSteve Wise for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 2380d5ee2b2SSteve Wise if (sge->length) 2390d5ee2b2SSteve Wise ib_dma_unmap_page(ndev->device, sge->addr, 2400d5ee2b2SSteve Wise sge->length, DMA_FROM_DEVICE); 2410d5ee2b2SSteve Wise if (sg_page(sg)) 2420d5ee2b2SSteve Wise __free_page(sg_page(sg)); 2430d5ee2b2SSteve Wise } 2440d5ee2b2SSteve Wise } 2450d5ee2b2SSteve Wise 2460d5ee2b2SSteve Wise static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, 2470d5ee2b2SSteve Wise struct nvmet_rdma_cmd *c) 2480d5ee2b2SSteve Wise { 2490d5ee2b2SSteve Wise struct scatterlist *sg; 2500d5ee2b2SSteve Wise struct ib_sge *sge; 2510d5ee2b2SSteve Wise struct page *pg; 2520d5ee2b2SSteve Wise int len; 2530d5ee2b2SSteve Wise int i; 2540d5ee2b2SSteve Wise 2550d5ee2b2SSteve Wise if (!ndev->inline_data_size) 2560d5ee2b2SSteve Wise return 0; 2570d5ee2b2SSteve Wise 2580d5ee2b2SSteve Wise sg = c->inline_sg; 2590d5ee2b2SSteve Wise sg_init_table(sg, ndev->inline_page_count); 2600d5ee2b2SSteve Wise sge = &c->sge[1]; 2610d5ee2b2SSteve Wise len = ndev->inline_data_size; 2620d5ee2b2SSteve Wise 2630d5ee2b2SSteve Wise for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 2640d5ee2b2SSteve Wise pg = alloc_page(GFP_KERNEL); 2650d5ee2b2SSteve Wise if (!pg) 2660d5ee2b2SSteve Wise goto out_err; 2670d5ee2b2SSteve Wise sg_assign_page(sg, pg); 2680d5ee2b2SSteve Wise sge->addr = ib_dma_map_page(ndev->device, 2690d5ee2b2SSteve Wise pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); 2700d5ee2b2SSteve Wise if (ib_dma_mapping_error(ndev->device, sge->addr)) 2710d5ee2b2SSteve Wise goto out_err; 2720d5ee2b2SSteve Wise sge->length = min_t(int, len, PAGE_SIZE); 2730d5ee2b2SSteve Wise sge->lkey = ndev->pd->local_dma_lkey; 2740d5ee2b2SSteve Wise len -= sge->length; 2750d5ee2b2SSteve Wise } 2760d5ee2b2SSteve Wise 2770d5ee2b2SSteve Wise return 0; 2780d5ee2b2SSteve Wise out_err: 2790d5ee2b2SSteve Wise for (; i >= 0; i--, sg--, sge--) { 2800d5ee2b2SSteve Wise if (sge->length) 2810d5ee2b2SSteve Wise ib_dma_unmap_page(ndev->device, sge->addr, 2820d5ee2b2SSteve Wise sge->length, DMA_FROM_DEVICE); 2830d5ee2b2SSteve Wise if (sg_page(sg)) 2840d5ee2b2SSteve Wise __free_page(sg_page(sg)); 2850d5ee2b2SSteve Wise } 2860d5ee2b2SSteve Wise return -ENOMEM; 2870d5ee2b2SSteve Wise } 2880d5ee2b2SSteve Wise 2898f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 2908f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 2918f000cacSChristoph Hellwig { 2928f000cacSChristoph Hellwig /* NVMe command / RDMA RECV */ 2938f000cacSChristoph Hellwig c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); 2948f000cacSChristoph Hellwig if (!c->nvme_cmd) 2958f000cacSChristoph Hellwig goto out; 2968f000cacSChristoph Hellwig 2978f000cacSChristoph Hellwig c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 2988f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 2998f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 3008f000cacSChristoph Hellwig goto out_free_cmd; 3018f000cacSChristoph Hellwig 3028f000cacSChristoph Hellwig c->sge[0].length = sizeof(*c->nvme_cmd); 3038f000cacSChristoph Hellwig c->sge[0].lkey = ndev->pd->local_dma_lkey; 3048f000cacSChristoph Hellwig 3050d5ee2b2SSteve Wise if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) 3068f000cacSChristoph Hellwig goto out_unmap_cmd; 3078f000cacSChristoph Hellwig 3088f000cacSChristoph Hellwig c->cqe.done = nvmet_rdma_recv_done; 3098f000cacSChristoph Hellwig 3108f000cacSChristoph Hellwig c->wr.wr_cqe = &c->cqe; 3118f000cacSChristoph Hellwig c->wr.sg_list = c->sge; 3120d5ee2b2SSteve Wise c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; 3138f000cacSChristoph Hellwig 3148f000cacSChristoph Hellwig return 0; 3158f000cacSChristoph Hellwig 3168f000cacSChristoph Hellwig out_unmap_cmd: 3178f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3188f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3198f000cacSChristoph Hellwig out_free_cmd: 3208f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3218f000cacSChristoph Hellwig 3228f000cacSChristoph Hellwig out: 3238f000cacSChristoph Hellwig return -ENOMEM; 3248f000cacSChristoph Hellwig } 3258f000cacSChristoph Hellwig 3268f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 3278f000cacSChristoph Hellwig struct nvmet_rdma_cmd *c, bool admin) 3288f000cacSChristoph Hellwig { 3290d5ee2b2SSteve Wise if (!admin) 3300d5ee2b2SSteve Wise nvmet_rdma_free_inline_pages(ndev, c); 3318f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, c->sge[0].addr, 3328f000cacSChristoph Hellwig sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 3338f000cacSChristoph Hellwig kfree(c->nvme_cmd); 3348f000cacSChristoph Hellwig } 3358f000cacSChristoph Hellwig 3368f000cacSChristoph Hellwig static struct nvmet_rdma_cmd * 3378f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 3388f000cacSChristoph Hellwig int nr_cmds, bool admin) 3398f000cacSChristoph Hellwig { 3408f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds; 3418f000cacSChristoph Hellwig int ret = -EINVAL, i; 3428f000cacSChristoph Hellwig 3438f000cacSChristoph Hellwig cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 3448f000cacSChristoph Hellwig if (!cmds) 3458f000cacSChristoph Hellwig goto out; 3468f000cacSChristoph Hellwig 3478f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) { 3488f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 3498f000cacSChristoph Hellwig if (ret) 3508f000cacSChristoph Hellwig goto out_free; 3518f000cacSChristoph Hellwig } 3528f000cacSChristoph Hellwig 3538f000cacSChristoph Hellwig return cmds; 3548f000cacSChristoph Hellwig 3558f000cacSChristoph Hellwig out_free: 3568f000cacSChristoph Hellwig while (--i >= 0) 3578f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3588f000cacSChristoph Hellwig kfree(cmds); 3598f000cacSChristoph Hellwig out: 3608f000cacSChristoph Hellwig return ERR_PTR(ret); 3618f000cacSChristoph Hellwig } 3628f000cacSChristoph Hellwig 3638f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 3648f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 3658f000cacSChristoph Hellwig { 3668f000cacSChristoph Hellwig int i; 3678f000cacSChristoph Hellwig 3688f000cacSChristoph Hellwig for (i = 0; i < nr_cmds; i++) 3698f000cacSChristoph Hellwig nvmet_rdma_free_cmd(ndev, cmds + i, admin); 3708f000cacSChristoph Hellwig kfree(cmds); 3718f000cacSChristoph Hellwig } 3728f000cacSChristoph Hellwig 3738f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 3748f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 3758f000cacSChristoph Hellwig { 3768f000cacSChristoph Hellwig /* NVMe CQE / RDMA SEND */ 377fc6c9730SMax Gurtovoy r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); 378fc6c9730SMax Gurtovoy if (!r->req.cqe) 3798f000cacSChristoph Hellwig goto out; 3808f000cacSChristoph Hellwig 381fc6c9730SMax Gurtovoy r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, 382fc6c9730SMax Gurtovoy sizeof(*r->req.cqe), DMA_TO_DEVICE); 3838f000cacSChristoph Hellwig if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 3848f000cacSChristoph Hellwig goto out_free_rsp; 3858f000cacSChristoph Hellwig 3868dc2ed3fSMax Gurtovoy r->req.p2p_client = &ndev->device->dev; 387fc6c9730SMax Gurtovoy r->send_sge.length = sizeof(*r->req.cqe); 3888f000cacSChristoph Hellwig r->send_sge.lkey = ndev->pd->local_dma_lkey; 3898f000cacSChristoph Hellwig 3908f000cacSChristoph Hellwig r->send_cqe.done = nvmet_rdma_send_done; 3918f000cacSChristoph Hellwig 3928f000cacSChristoph Hellwig r->send_wr.wr_cqe = &r->send_cqe; 3938f000cacSChristoph Hellwig r->send_wr.sg_list = &r->send_sge; 3948f000cacSChristoph Hellwig r->send_wr.num_sge = 1; 3958f000cacSChristoph Hellwig r->send_wr.send_flags = IB_SEND_SIGNALED; 3968f000cacSChristoph Hellwig 3978f000cacSChristoph Hellwig /* Data In / RDMA READ */ 3988f000cacSChristoph Hellwig r->read_cqe.done = nvmet_rdma_read_data_done; 3998f000cacSChristoph Hellwig return 0; 4008f000cacSChristoph Hellwig 4018f000cacSChristoph Hellwig out_free_rsp: 402fc6c9730SMax Gurtovoy kfree(r->req.cqe); 4038f000cacSChristoph Hellwig out: 4048f000cacSChristoph Hellwig return -ENOMEM; 4058f000cacSChristoph Hellwig } 4068f000cacSChristoph Hellwig 4078f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 4088f000cacSChristoph Hellwig struct nvmet_rdma_rsp *r) 4098f000cacSChristoph Hellwig { 4108f000cacSChristoph Hellwig ib_dma_unmap_single(ndev->device, r->send_sge.addr, 411fc6c9730SMax Gurtovoy sizeof(*r->req.cqe), DMA_TO_DEVICE); 412fc6c9730SMax Gurtovoy kfree(r->req.cqe); 4138f000cacSChristoph Hellwig } 4148f000cacSChristoph Hellwig 4158f000cacSChristoph Hellwig static int 4168f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 4178f000cacSChristoph Hellwig { 4188f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4198f000cacSChristoph Hellwig int nr_rsps = queue->recv_queue_size * 2; 4208f000cacSChristoph Hellwig int ret = -EINVAL, i; 4218f000cacSChristoph Hellwig 4228f000cacSChristoph Hellwig queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 4238f000cacSChristoph Hellwig GFP_KERNEL); 4248f000cacSChristoph Hellwig if (!queue->rsps) 4258f000cacSChristoph Hellwig goto out; 4268f000cacSChristoph Hellwig 4278f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4288f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4298f000cacSChristoph Hellwig 4308f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsp(ndev, rsp); 4318f000cacSChristoph Hellwig if (ret) 4328f000cacSChristoph Hellwig goto out_free; 4338f000cacSChristoph Hellwig 4348f000cacSChristoph Hellwig list_add_tail(&rsp->free_list, &queue->free_rsps); 4358f000cacSChristoph Hellwig } 4368f000cacSChristoph Hellwig 4378f000cacSChristoph Hellwig return 0; 4388f000cacSChristoph Hellwig 4398f000cacSChristoph Hellwig out_free: 4408f000cacSChristoph Hellwig while (--i >= 0) { 4418f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4428f000cacSChristoph Hellwig 4438f000cacSChristoph Hellwig list_del(&rsp->free_list); 4448f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4458f000cacSChristoph Hellwig } 4468f000cacSChristoph Hellwig kfree(queue->rsps); 4478f000cacSChristoph Hellwig out: 4488f000cacSChristoph Hellwig return ret; 4498f000cacSChristoph Hellwig } 4508f000cacSChristoph Hellwig 4518f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 4528f000cacSChristoph Hellwig { 4538f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 4548f000cacSChristoph Hellwig int i, nr_rsps = queue->recv_queue_size * 2; 4558f000cacSChristoph Hellwig 4568f000cacSChristoph Hellwig for (i = 0; i < nr_rsps; i++) { 4578f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 4588f000cacSChristoph Hellwig 4598f000cacSChristoph Hellwig list_del(&rsp->free_list); 4608f000cacSChristoph Hellwig nvmet_rdma_free_rsp(ndev, rsp); 4618f000cacSChristoph Hellwig } 4628f000cacSChristoph Hellwig kfree(queue->rsps); 4638f000cacSChristoph Hellwig } 4648f000cacSChristoph Hellwig 4658f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 4668f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd) 4678f000cacSChristoph Hellwig { 46820209384SMax Gurtovoy int ret; 4698f000cacSChristoph Hellwig 470748ff840SParav Pandit ib_dma_sync_single_for_device(ndev->device, 471748ff840SParav Pandit cmd->sge[0].addr, cmd->sge[0].length, 472748ff840SParav Pandit DMA_FROM_DEVICE); 473748ff840SParav Pandit 4748f000cacSChristoph Hellwig if (ndev->srq) 4750a3173a5SJason Gunthorpe ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); 47620209384SMax Gurtovoy else 4770a3173a5SJason Gunthorpe ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); 47820209384SMax Gurtovoy 47920209384SMax Gurtovoy if (unlikely(ret)) 48020209384SMax Gurtovoy pr_err("post_recv cmd failed\n"); 48120209384SMax Gurtovoy 48220209384SMax Gurtovoy return ret; 4838f000cacSChristoph Hellwig } 4848f000cacSChristoph Hellwig 4858f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 4868f000cacSChristoph Hellwig { 4878f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4888f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wr_wait_list)) { 4898f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 4908f000cacSChristoph Hellwig bool ret; 4918f000cacSChristoph Hellwig 4928f000cacSChristoph Hellwig rsp = list_entry(queue->rsp_wr_wait_list.next, 4938f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 4948f000cacSChristoph Hellwig list_del(&rsp->wait_list); 4958f000cacSChristoph Hellwig 4968f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 4978f000cacSChristoph Hellwig ret = nvmet_rdma_execute_command(rsp); 4988f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 4998f000cacSChristoph Hellwig 5008f000cacSChristoph Hellwig if (!ret) { 5018f000cacSChristoph Hellwig list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 5028f000cacSChristoph Hellwig break; 5038f000cacSChristoph Hellwig } 5048f000cacSChristoph Hellwig } 5058f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 5068f000cacSChristoph Hellwig } 5078f000cacSChristoph Hellwig 5088f000cacSChristoph Hellwig 5098f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 5108f000cacSChristoph Hellwig { 5118f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 5128f000cacSChristoph Hellwig 5138f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 5148f000cacSChristoph Hellwig 5158f000cacSChristoph Hellwig if (rsp->n_rdma) { 5168f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 5178f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 5188f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 5198f000cacSChristoph Hellwig } 5208f000cacSChristoph Hellwig 5210d5ee2b2SSteve Wise if (rsp->req.sg != rsp->cmd->inline_sg) 5225b2322e4SLogan Gunthorpe nvmet_req_free_sgl(&rsp->req); 5238f000cacSChristoph Hellwig 5248f000cacSChristoph Hellwig if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 5258f000cacSChristoph Hellwig nvmet_rdma_process_wr_wait_list(queue); 5268f000cacSChristoph Hellwig 5278f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 5288f000cacSChristoph Hellwig } 5298f000cacSChristoph Hellwig 5308f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 5318f000cacSChristoph Hellwig { 5328f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl) { 5338f000cacSChristoph Hellwig nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 5348f000cacSChristoph Hellwig } else { 5358f000cacSChristoph Hellwig /* 5368f000cacSChristoph Hellwig * we didn't setup the controller yet in case 5378f000cacSChristoph Hellwig * of admin connect error, just disconnect and 5388f000cacSChristoph Hellwig * cleanup the queue 5398f000cacSChristoph Hellwig */ 5408f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 5418f000cacSChristoph Hellwig } 5428f000cacSChristoph Hellwig } 5438f000cacSChristoph Hellwig 5448f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 5458f000cacSChristoph Hellwig { 5468f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5478f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 548d7dcdf9dSIsrael Rukshin struct nvmet_rdma_queue *queue = cq->cq_context; 5498f000cacSChristoph Hellwig 5508f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5518f000cacSChristoph Hellwig 5528f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS && 5538f000cacSChristoph Hellwig wc->status != IB_WC_WR_FLUSH_ERR)) { 5548f000cacSChristoph Hellwig pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 5558f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 556d7dcdf9dSIsrael Rukshin nvmet_rdma_error_comp(queue); 5578f000cacSChristoph Hellwig } 5588f000cacSChristoph Hellwig } 5598f000cacSChristoph Hellwig 5608f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req) 5618f000cacSChristoph Hellwig { 5628f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5638f000cacSChristoph Hellwig container_of(req, struct nvmet_rdma_rsp, req); 5648f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 56523f96d1fSBart Van Assche struct ib_send_wr *first_wr; 5668f000cacSChristoph Hellwig 5678f000cacSChristoph Hellwig if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { 5688f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 5698f000cacSChristoph Hellwig rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 5708f000cacSChristoph Hellwig } else { 5718f000cacSChristoph Hellwig rsp->send_wr.opcode = IB_WR_SEND; 5728f000cacSChristoph Hellwig } 5738f000cacSChristoph Hellwig 5748f000cacSChristoph Hellwig if (nvmet_rdma_need_data_out(rsp)) 5758f000cacSChristoph Hellwig first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 5768f000cacSChristoph Hellwig cm_id->port_num, NULL, &rsp->send_wr); 5778f000cacSChristoph Hellwig else 5788f000cacSChristoph Hellwig first_wr = &rsp->send_wr; 5798f000cacSChristoph Hellwig 5808f000cacSChristoph Hellwig nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 581748ff840SParav Pandit 582748ff840SParav Pandit ib_dma_sync_single_for_device(rsp->queue->dev->device, 583748ff840SParav Pandit rsp->send_sge.addr, rsp->send_sge.length, 584748ff840SParav Pandit DMA_TO_DEVICE); 585748ff840SParav Pandit 5860a3173a5SJason Gunthorpe if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { 5878f000cacSChristoph Hellwig pr_err("sending cmd response failed\n"); 5888f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 5898f000cacSChristoph Hellwig } 5908f000cacSChristoph Hellwig } 5918f000cacSChristoph Hellwig 5928f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 5938f000cacSChristoph Hellwig { 5948f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp = 5958f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 5968f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 5978f000cacSChristoph Hellwig 5988f000cacSChristoph Hellwig WARN_ON(rsp->n_rdma <= 0); 5998f000cacSChristoph Hellwig atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 6008f000cacSChristoph Hellwig rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 6018f000cacSChristoph Hellwig queue->cm_id->port_num, rsp->req.sg, 6028f000cacSChristoph Hellwig rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 6038f000cacSChristoph Hellwig rsp->n_rdma = 0; 6048f000cacSChristoph Hellwig 6058f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 606549f01aeSVijay Immanuel nvmet_req_uninit(&rsp->req); 6078f000cacSChristoph Hellwig nvmet_rdma_release_rsp(rsp); 6088f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 6098f000cacSChristoph Hellwig pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 6108f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 6118f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 6128f000cacSChristoph Hellwig } 6138f000cacSChristoph Hellwig return; 6148f000cacSChristoph Hellwig } 6158f000cacSChristoph Hellwig 616be3f3114SChristoph Hellwig rsp->req.execute(&rsp->req); 6178f000cacSChristoph Hellwig } 6188f000cacSChristoph Hellwig 6198f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 6208f000cacSChristoph Hellwig u64 off) 6218f000cacSChristoph Hellwig { 6220d5ee2b2SSteve Wise int sg_count = num_pages(len); 6230d5ee2b2SSteve Wise struct scatterlist *sg; 6240d5ee2b2SSteve Wise int i; 6250d5ee2b2SSteve Wise 6260d5ee2b2SSteve Wise sg = rsp->cmd->inline_sg; 6270d5ee2b2SSteve Wise for (i = 0; i < sg_count; i++, sg++) { 6280d5ee2b2SSteve Wise if (i < sg_count - 1) 6290d5ee2b2SSteve Wise sg_unmark_end(sg); 6300d5ee2b2SSteve Wise else 6310d5ee2b2SSteve Wise sg_mark_end(sg); 6320d5ee2b2SSteve Wise sg->offset = off; 6330d5ee2b2SSteve Wise sg->length = min_t(int, len, PAGE_SIZE - off); 6340d5ee2b2SSteve Wise len -= sg->length; 6350d5ee2b2SSteve Wise if (!i) 6360d5ee2b2SSteve Wise off = 0; 6370d5ee2b2SSteve Wise } 6380d5ee2b2SSteve Wise 6390d5ee2b2SSteve Wise rsp->req.sg = rsp->cmd->inline_sg; 6400d5ee2b2SSteve Wise rsp->req.sg_cnt = sg_count; 6418f000cacSChristoph Hellwig } 6428f000cacSChristoph Hellwig 6438f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 6448f000cacSChristoph Hellwig { 6458f000cacSChristoph Hellwig struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 6468f000cacSChristoph Hellwig u64 off = le64_to_cpu(sgl->addr); 6478f000cacSChristoph Hellwig u32 len = le32_to_cpu(sgl->length); 6488f000cacSChristoph Hellwig 649762a11dfSChaitanya Kulkarni if (!nvme_is_write(rsp->req.cmd)) { 650762a11dfSChaitanya Kulkarni rsp->req.error_loc = 651762a11dfSChaitanya Kulkarni offsetof(struct nvme_common_command, opcode); 6528f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 653762a11dfSChaitanya Kulkarni } 6548f000cacSChristoph Hellwig 6550d5ee2b2SSteve Wise if (off + len > rsp->queue->dev->inline_data_size) { 6568f000cacSChristoph Hellwig pr_err("invalid inline data offset!\n"); 6578f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 6588f000cacSChristoph Hellwig } 6598f000cacSChristoph Hellwig 6608f000cacSChristoph Hellwig /* no data command? */ 6618f000cacSChristoph Hellwig if (!len) 6628f000cacSChristoph Hellwig return 0; 6638f000cacSChristoph Hellwig 6648f000cacSChristoph Hellwig nvmet_rdma_use_inline_sg(rsp, len, off); 6658f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 6665e62d5c9SChristoph Hellwig rsp->req.transfer_len += len; 6678f000cacSChristoph Hellwig return 0; 6688f000cacSChristoph Hellwig } 6698f000cacSChristoph Hellwig 6708f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 6718f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl, bool invalidate) 6728f000cacSChristoph Hellwig { 6738f000cacSChristoph Hellwig struct rdma_cm_id *cm_id = rsp->queue->cm_id; 6748f000cacSChristoph Hellwig u64 addr = le64_to_cpu(sgl->addr); 6758f000cacSChristoph Hellwig u32 key = get_unaligned_le32(sgl->key); 6768f000cacSChristoph Hellwig int ret; 6778f000cacSChristoph Hellwig 6785b2322e4SLogan Gunthorpe rsp->req.transfer_len = get_unaligned_le24(sgl->length); 6795b2322e4SLogan Gunthorpe 6808f000cacSChristoph Hellwig /* no data command? */ 6815b2322e4SLogan Gunthorpe if (!rsp->req.transfer_len) 6828f000cacSChristoph Hellwig return 0; 6838f000cacSChristoph Hellwig 6845b2322e4SLogan Gunthorpe ret = nvmet_req_alloc_sgl(&rsp->req); 68559534b9dSIsrael Rukshin if (unlikely(ret < 0)) 6865b2322e4SLogan Gunthorpe goto error_out; 6878f000cacSChristoph Hellwig 6888f000cacSChristoph Hellwig ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 6898f000cacSChristoph Hellwig rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 6908f000cacSChristoph Hellwig nvmet_data_dir(&rsp->req)); 69159534b9dSIsrael Rukshin if (unlikely(ret < 0)) 6925b2322e4SLogan Gunthorpe goto error_out; 6938f000cacSChristoph Hellwig rsp->n_rdma += ret; 6948f000cacSChristoph Hellwig 6958f000cacSChristoph Hellwig if (invalidate) { 6968f000cacSChristoph Hellwig rsp->invalidate_rkey = key; 6978f000cacSChristoph Hellwig rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; 6988f000cacSChristoph Hellwig } 6998f000cacSChristoph Hellwig 7008f000cacSChristoph Hellwig return 0; 7015b2322e4SLogan Gunthorpe 7025b2322e4SLogan Gunthorpe error_out: 7035b2322e4SLogan Gunthorpe rsp->req.transfer_len = 0; 7045b2322e4SLogan Gunthorpe return NVME_SC_INTERNAL; 7058f000cacSChristoph Hellwig } 7068f000cacSChristoph Hellwig 7078f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 7088f000cacSChristoph Hellwig { 7098f000cacSChristoph Hellwig struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 7108f000cacSChristoph Hellwig 7118f000cacSChristoph Hellwig switch (sgl->type >> 4) { 7128f000cacSChristoph Hellwig case NVME_SGL_FMT_DATA_DESC: 7138f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 7148f000cacSChristoph Hellwig case NVME_SGL_FMT_OFFSET: 7158f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_inline(rsp); 7168f000cacSChristoph Hellwig default: 7178f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 718762a11dfSChaitanya Kulkarni rsp->req.error_loc = 719762a11dfSChaitanya Kulkarni offsetof(struct nvme_common_command, dptr); 7208f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 7218f000cacSChristoph Hellwig } 7228f000cacSChristoph Hellwig case NVME_KEY_SGL_FMT_DATA_DESC: 7238f000cacSChristoph Hellwig switch (sgl->type & 0xf) { 7248f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 7258f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 7268f000cacSChristoph Hellwig case NVME_SGL_FMT_ADDRESS: 7278f000cacSChristoph Hellwig return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 7288f000cacSChristoph Hellwig default: 7298f000cacSChristoph Hellwig pr_err("invalid SGL subtype: %#x\n", sgl->type); 730762a11dfSChaitanya Kulkarni rsp->req.error_loc = 731762a11dfSChaitanya Kulkarni offsetof(struct nvme_common_command, dptr); 7328f000cacSChristoph Hellwig return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 7338f000cacSChristoph Hellwig } 7348f000cacSChristoph Hellwig default: 7358f000cacSChristoph Hellwig pr_err("invalid SGL type: %#x\n", sgl->type); 736762a11dfSChaitanya Kulkarni rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); 7378f000cacSChristoph Hellwig return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; 7388f000cacSChristoph Hellwig } 7398f000cacSChristoph Hellwig } 7408f000cacSChristoph Hellwig 7418f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 7428f000cacSChristoph Hellwig { 7438f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = rsp->queue; 7448f000cacSChristoph Hellwig 7458f000cacSChristoph Hellwig if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 7468f000cacSChristoph Hellwig &queue->sq_wr_avail) < 0)) { 7478f000cacSChristoph Hellwig pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 7488f000cacSChristoph Hellwig 1 + rsp->n_rdma, queue->idx, 7498f000cacSChristoph Hellwig queue->nvme_sq.ctrl->cntlid); 7508f000cacSChristoph Hellwig atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 7518f000cacSChristoph Hellwig return false; 7528f000cacSChristoph Hellwig } 7538f000cacSChristoph Hellwig 7548f000cacSChristoph Hellwig if (nvmet_rdma_need_data_in(rsp)) { 7558f000cacSChristoph Hellwig if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, 7568f000cacSChristoph Hellwig queue->cm_id->port_num, &rsp->read_cqe, NULL)) 7578f000cacSChristoph Hellwig nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 7588f000cacSChristoph Hellwig } else { 759be3f3114SChristoph Hellwig rsp->req.execute(&rsp->req); 7608f000cacSChristoph Hellwig } 7618f000cacSChristoph Hellwig 7628f000cacSChristoph Hellwig return true; 7638f000cacSChristoph Hellwig } 7648f000cacSChristoph Hellwig 7658f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 7668f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd) 7678f000cacSChristoph Hellwig { 7688f000cacSChristoph Hellwig u16 status; 7698f000cacSChristoph Hellwig 770748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 771748ff840SParav Pandit cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 772748ff840SParav Pandit DMA_FROM_DEVICE); 773748ff840SParav Pandit ib_dma_sync_single_for_cpu(queue->dev->device, 774748ff840SParav Pandit cmd->send_sge.addr, cmd->send_sge.length, 775748ff840SParav Pandit DMA_TO_DEVICE); 776748ff840SParav Pandit 7778f000cacSChristoph Hellwig if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 7788f000cacSChristoph Hellwig &queue->nvme_sq, &nvmet_rdma_ops)) 7798f000cacSChristoph Hellwig return; 7808f000cacSChristoph Hellwig 7818f000cacSChristoph Hellwig status = nvmet_rdma_map_sgl(cmd); 7828f000cacSChristoph Hellwig if (status) 7838f000cacSChristoph Hellwig goto out_err; 7848f000cacSChristoph Hellwig 7858f000cacSChristoph Hellwig if (unlikely(!nvmet_rdma_execute_command(cmd))) { 7868f000cacSChristoph Hellwig spin_lock(&queue->rsp_wr_wait_lock); 7878f000cacSChristoph Hellwig list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 7888f000cacSChristoph Hellwig spin_unlock(&queue->rsp_wr_wait_lock); 7898f000cacSChristoph Hellwig } 7908f000cacSChristoph Hellwig 7918f000cacSChristoph Hellwig return; 7928f000cacSChristoph Hellwig 7938f000cacSChristoph Hellwig out_err: 7948f000cacSChristoph Hellwig nvmet_req_complete(&cmd->req, status); 7958f000cacSChristoph Hellwig } 7968f000cacSChristoph Hellwig 7978f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 7988f000cacSChristoph Hellwig { 7998f000cacSChristoph Hellwig struct nvmet_rdma_cmd *cmd = 8008f000cacSChristoph Hellwig container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 8018f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = cq->cq_context; 8028f000cacSChristoph Hellwig struct nvmet_rdma_rsp *rsp; 8038f000cacSChristoph Hellwig 8048f000cacSChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 8058f000cacSChristoph Hellwig if (wc->status != IB_WC_WR_FLUSH_ERR) { 8068f000cacSChristoph Hellwig pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 8078f000cacSChristoph Hellwig wc->wr_cqe, ib_wc_status_msg(wc->status), 8088f000cacSChristoph Hellwig wc->status); 8098f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 8108f000cacSChristoph Hellwig } 8118f000cacSChristoph Hellwig return; 8128f000cacSChristoph Hellwig } 8138f000cacSChristoph Hellwig 8148f000cacSChristoph Hellwig if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 8158f000cacSChristoph Hellwig pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 8168f000cacSChristoph Hellwig nvmet_rdma_error_comp(queue); 8178f000cacSChristoph Hellwig return; 8188f000cacSChristoph Hellwig } 8198f000cacSChristoph Hellwig 8208f000cacSChristoph Hellwig cmd->queue = queue; 8218f000cacSChristoph Hellwig rsp = nvmet_rdma_get_rsp(queue); 8228407879cSSagi Grimberg if (unlikely(!rsp)) { 8238407879cSSagi Grimberg /* 8248407879cSSagi Grimberg * we get here only under memory pressure, 8258407879cSSagi Grimberg * silently drop and have the host retry 8268407879cSSagi Grimberg * as we can't even fail it. 8278407879cSSagi Grimberg */ 8288407879cSSagi Grimberg nvmet_rdma_post_recv(queue->dev, cmd); 8298407879cSSagi Grimberg return; 8308407879cSSagi Grimberg } 8318d61413dSSagi Grimberg rsp->queue = queue; 8328f000cacSChristoph Hellwig rsp->cmd = cmd; 8338f000cacSChristoph Hellwig rsp->flags = 0; 8348f000cacSChristoph Hellwig rsp->req.cmd = cmd->nvme_cmd; 8358d61413dSSagi Grimberg rsp->req.port = queue->port; 8368d61413dSSagi Grimberg rsp->n_rdma = 0; 8378f000cacSChristoph Hellwig 8388f000cacSChristoph Hellwig if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 8398f000cacSChristoph Hellwig unsigned long flags; 8408f000cacSChristoph Hellwig 8418f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 8428f000cacSChristoph Hellwig if (queue->state == NVMET_RDMA_Q_CONNECTING) 8438f000cacSChristoph Hellwig list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 8448f000cacSChristoph Hellwig else 8458f000cacSChristoph Hellwig nvmet_rdma_put_rsp(rsp); 8468f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 8478f000cacSChristoph Hellwig return; 8488f000cacSChristoph Hellwig } 8498f000cacSChristoph Hellwig 8508f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, rsp); 8518f000cacSChristoph Hellwig } 8528f000cacSChristoph Hellwig 8538f000cacSChristoph Hellwig static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) 8548f000cacSChristoph Hellwig { 8558f000cacSChristoph Hellwig if (!ndev->srq) 8568f000cacSChristoph Hellwig return; 8578f000cacSChristoph Hellwig 8588f000cacSChristoph Hellwig nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 8598f000cacSChristoph Hellwig ib_destroy_srq(ndev->srq); 8608f000cacSChristoph Hellwig } 8618f000cacSChristoph Hellwig 8628f000cacSChristoph Hellwig static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 8638f000cacSChristoph Hellwig { 8648f000cacSChristoph Hellwig struct ib_srq_init_attr srq_attr = { NULL, }; 8658f000cacSChristoph Hellwig struct ib_srq *srq; 8668f000cacSChristoph Hellwig size_t srq_size; 8678f000cacSChristoph Hellwig int ret, i; 8688f000cacSChristoph Hellwig 8698f000cacSChristoph Hellwig srq_size = 4095; /* XXX: tune */ 8708f000cacSChristoph Hellwig 8718f000cacSChristoph Hellwig srq_attr.attr.max_wr = srq_size; 8720d5ee2b2SSteve Wise srq_attr.attr.max_sge = 1 + ndev->inline_page_count; 8738f000cacSChristoph Hellwig srq_attr.attr.srq_limit = 0; 8748f000cacSChristoph Hellwig srq_attr.srq_type = IB_SRQT_BASIC; 8758f000cacSChristoph Hellwig srq = ib_create_srq(ndev->pd, &srq_attr); 8768f000cacSChristoph Hellwig if (IS_ERR(srq)) { 8778f000cacSChristoph Hellwig /* 8788f000cacSChristoph Hellwig * If SRQs aren't supported we just go ahead and use normal 8798f000cacSChristoph Hellwig * non-shared receive queues. 8808f000cacSChristoph Hellwig */ 8818f000cacSChristoph Hellwig pr_info("SRQ requested but not supported.\n"); 8828f000cacSChristoph Hellwig return 0; 8838f000cacSChristoph Hellwig } 8848f000cacSChristoph Hellwig 8858f000cacSChristoph Hellwig ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 8868f000cacSChristoph Hellwig if (IS_ERR(ndev->srq_cmds)) { 8878f000cacSChristoph Hellwig ret = PTR_ERR(ndev->srq_cmds); 8888f000cacSChristoph Hellwig goto out_destroy_srq; 8898f000cacSChristoph Hellwig } 8908f000cacSChristoph Hellwig 8918f000cacSChristoph Hellwig ndev->srq = srq; 8928f000cacSChristoph Hellwig ndev->srq_size = srq_size; 8938f000cacSChristoph Hellwig 89420209384SMax Gurtovoy for (i = 0; i < srq_size; i++) { 89520209384SMax Gurtovoy ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); 89620209384SMax Gurtovoy if (ret) 89720209384SMax Gurtovoy goto out_free_cmds; 89820209384SMax Gurtovoy } 8998f000cacSChristoph Hellwig 9008f000cacSChristoph Hellwig return 0; 9018f000cacSChristoph Hellwig 90220209384SMax Gurtovoy out_free_cmds: 90320209384SMax Gurtovoy nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 9048f000cacSChristoph Hellwig out_destroy_srq: 9058f000cacSChristoph Hellwig ib_destroy_srq(srq); 9068f000cacSChristoph Hellwig return ret; 9078f000cacSChristoph Hellwig } 9088f000cacSChristoph Hellwig 9098f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref) 9108f000cacSChristoph Hellwig { 9118f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = 9128f000cacSChristoph Hellwig container_of(ref, struct nvmet_rdma_device, ref); 9138f000cacSChristoph Hellwig 9148f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 9158f000cacSChristoph Hellwig list_del(&ndev->entry); 9168f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9178f000cacSChristoph Hellwig 9188f000cacSChristoph Hellwig nvmet_rdma_destroy_srq(ndev); 9198f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 9208f000cacSChristoph Hellwig 9218f000cacSChristoph Hellwig kfree(ndev); 9228f000cacSChristoph Hellwig } 9238f000cacSChristoph Hellwig 9248f000cacSChristoph Hellwig static struct nvmet_rdma_device * 9258f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 9268f000cacSChristoph Hellwig { 927*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port = cm_id->context; 928*a032e4f6SSagi Grimberg struct nvmet_port *nport = port->nport; 9298f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 9300d5ee2b2SSteve Wise int inline_page_count; 9310d5ee2b2SSteve Wise int inline_sge_count; 9328f000cacSChristoph Hellwig int ret; 9338f000cacSChristoph Hellwig 9348f000cacSChristoph Hellwig mutex_lock(&device_list_mutex); 9358f000cacSChristoph Hellwig list_for_each_entry(ndev, &device_list, entry) { 9368f000cacSChristoph Hellwig if (ndev->device->node_guid == cm_id->device->node_guid && 9378f000cacSChristoph Hellwig kref_get_unless_zero(&ndev->ref)) 9388f000cacSChristoph Hellwig goto out_unlock; 9398f000cacSChristoph Hellwig } 9408f000cacSChristoph Hellwig 9418f000cacSChristoph Hellwig ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 9428f000cacSChristoph Hellwig if (!ndev) 9438f000cacSChristoph Hellwig goto out_err; 9448f000cacSChristoph Hellwig 945*a032e4f6SSagi Grimberg inline_page_count = num_pages(nport->inline_data_size); 9460d5ee2b2SSteve Wise inline_sge_count = max(cm_id->device->attrs.max_sge_rd, 9470a3173a5SJason Gunthorpe cm_id->device->attrs.max_recv_sge) - 1; 9480d5ee2b2SSteve Wise if (inline_page_count > inline_sge_count) { 9490d5ee2b2SSteve Wise pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", 950*a032e4f6SSagi Grimberg nport->inline_data_size, cm_id->device->name, 9510d5ee2b2SSteve Wise inline_sge_count * PAGE_SIZE); 952*a032e4f6SSagi Grimberg nport->inline_data_size = inline_sge_count * PAGE_SIZE; 9530d5ee2b2SSteve Wise inline_page_count = inline_sge_count; 9540d5ee2b2SSteve Wise } 955*a032e4f6SSagi Grimberg ndev->inline_data_size = nport->inline_data_size; 9560d5ee2b2SSteve Wise ndev->inline_page_count = inline_page_count; 9578f000cacSChristoph Hellwig ndev->device = cm_id->device; 9588f000cacSChristoph Hellwig kref_init(&ndev->ref); 9598f000cacSChristoph Hellwig 960ed082d36SChristoph Hellwig ndev->pd = ib_alloc_pd(ndev->device, 0); 9618f000cacSChristoph Hellwig if (IS_ERR(ndev->pd)) 9628f000cacSChristoph Hellwig goto out_free_dev; 9638f000cacSChristoph Hellwig 9648f000cacSChristoph Hellwig if (nvmet_rdma_use_srq) { 9658f000cacSChristoph Hellwig ret = nvmet_rdma_init_srq(ndev); 9668f000cacSChristoph Hellwig if (ret) 9678f000cacSChristoph Hellwig goto out_free_pd; 9688f000cacSChristoph Hellwig } 9698f000cacSChristoph Hellwig 9708f000cacSChristoph Hellwig list_add(&ndev->entry, &device_list); 9718f000cacSChristoph Hellwig out_unlock: 9728f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9738f000cacSChristoph Hellwig pr_debug("added %s.\n", ndev->device->name); 9748f000cacSChristoph Hellwig return ndev; 9758f000cacSChristoph Hellwig 9768f000cacSChristoph Hellwig out_free_pd: 9778f000cacSChristoph Hellwig ib_dealloc_pd(ndev->pd); 9788f000cacSChristoph Hellwig out_free_dev: 9798f000cacSChristoph Hellwig kfree(ndev); 9808f000cacSChristoph Hellwig out_err: 9818f000cacSChristoph Hellwig mutex_unlock(&device_list_mutex); 9828f000cacSChristoph Hellwig return NULL; 9838f000cacSChristoph Hellwig } 9848f000cacSChristoph Hellwig 9858f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 9868f000cacSChristoph Hellwig { 9878f000cacSChristoph Hellwig struct ib_qp_init_attr qp_attr; 9888f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev = queue->dev; 989c363f249SMax Gurtovoy int comp_vector, nr_cqe, ret, i, factor; 9908f000cacSChristoph Hellwig 9918f000cacSChristoph Hellwig /* 9928f000cacSChristoph Hellwig * Spread the io queues across completion vectors, 9938f000cacSChristoph Hellwig * but still keep all admin queues on vector 0. 9948f000cacSChristoph Hellwig */ 9958f000cacSChristoph Hellwig comp_vector = !queue->host_qid ? 0 : 9968f000cacSChristoph Hellwig queue->idx % ndev->device->num_comp_vectors; 9978f000cacSChristoph Hellwig 9988f000cacSChristoph Hellwig /* 9998f000cacSChristoph Hellwig * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 10008f000cacSChristoph Hellwig */ 10018f000cacSChristoph Hellwig nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 10028f000cacSChristoph Hellwig 10038f000cacSChristoph Hellwig queue->cq = ib_alloc_cq(ndev->device, queue, 10048f000cacSChristoph Hellwig nr_cqe + 1, comp_vector, 10058f000cacSChristoph Hellwig IB_POLL_WORKQUEUE); 10068f000cacSChristoph Hellwig if (IS_ERR(queue->cq)) { 10078f000cacSChristoph Hellwig ret = PTR_ERR(queue->cq); 10088f000cacSChristoph Hellwig pr_err("failed to create CQ cqe= %d ret= %d\n", 10098f000cacSChristoph Hellwig nr_cqe + 1, ret); 10108f000cacSChristoph Hellwig goto out; 10118f000cacSChristoph Hellwig } 10128f000cacSChristoph Hellwig 10138f000cacSChristoph Hellwig memset(&qp_attr, 0, sizeof(qp_attr)); 10148f000cacSChristoph Hellwig qp_attr.qp_context = queue; 10158f000cacSChristoph Hellwig qp_attr.event_handler = nvmet_rdma_qp_event; 10168f000cacSChristoph Hellwig qp_attr.send_cq = queue->cq; 10178f000cacSChristoph Hellwig qp_attr.recv_cq = queue->cq; 10188f000cacSChristoph Hellwig qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 10198f000cacSChristoph Hellwig qp_attr.qp_type = IB_QPT_RC; 10208f000cacSChristoph Hellwig /* +1 for drain */ 10218f000cacSChristoph Hellwig qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 1022c363f249SMax Gurtovoy factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, 1023c363f249SMax Gurtovoy 1 << NVMET_RDMA_MAX_MDTS); 1024c363f249SMax Gurtovoy qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; 10258f000cacSChristoph Hellwig qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 102633023fb8SSteve Wise ndev->device->attrs.max_send_sge); 10278f000cacSChristoph Hellwig 10288f000cacSChristoph Hellwig if (ndev->srq) { 10298f000cacSChristoph Hellwig qp_attr.srq = ndev->srq; 10308f000cacSChristoph Hellwig } else { 10318f000cacSChristoph Hellwig /* +1 for drain */ 10328f000cacSChristoph Hellwig qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 10330d5ee2b2SSteve Wise qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; 10348f000cacSChristoph Hellwig } 10358f000cacSChristoph Hellwig 10368f000cacSChristoph Hellwig ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 10378f000cacSChristoph Hellwig if (ret) { 10388f000cacSChristoph Hellwig pr_err("failed to create_qp ret= %d\n", ret); 10398f000cacSChristoph Hellwig goto err_destroy_cq; 10408f000cacSChristoph Hellwig } 10418f000cacSChristoph Hellwig 10428f000cacSChristoph Hellwig atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 10438f000cacSChristoph Hellwig 10448f000cacSChristoph Hellwig pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 10458f000cacSChristoph Hellwig __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 10468f000cacSChristoph Hellwig qp_attr.cap.max_send_wr, queue->cm_id); 10478f000cacSChristoph Hellwig 10488f000cacSChristoph Hellwig if (!ndev->srq) { 10498f000cacSChristoph Hellwig for (i = 0; i < queue->recv_queue_size; i++) { 10508f000cacSChristoph Hellwig queue->cmds[i].queue = queue; 105120209384SMax Gurtovoy ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 105220209384SMax Gurtovoy if (ret) 105320209384SMax Gurtovoy goto err_destroy_qp; 10548f000cacSChristoph Hellwig } 10558f000cacSChristoph Hellwig } 10568f000cacSChristoph Hellwig 10578f000cacSChristoph Hellwig out: 10588f000cacSChristoph Hellwig return ret; 10598f000cacSChristoph Hellwig 106020209384SMax Gurtovoy err_destroy_qp: 106120209384SMax Gurtovoy rdma_destroy_qp(queue->cm_id); 10628f000cacSChristoph Hellwig err_destroy_cq: 10638f000cacSChristoph Hellwig ib_free_cq(queue->cq); 10648f000cacSChristoph Hellwig goto out; 10658f000cacSChristoph Hellwig } 10668f000cacSChristoph Hellwig 10678f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 10688f000cacSChristoph Hellwig { 1069e1a2ee24SIsrael Rukshin struct ib_qp *qp = queue->cm_id->qp; 1070e1a2ee24SIsrael Rukshin 1071e1a2ee24SIsrael Rukshin ib_drain_qp(qp); 1072e1a2ee24SIsrael Rukshin rdma_destroy_id(queue->cm_id); 1073e1a2ee24SIsrael Rukshin ib_destroy_qp(qp); 10748f000cacSChristoph Hellwig ib_free_cq(queue->cq); 10758f000cacSChristoph Hellwig } 10768f000cacSChristoph Hellwig 10778f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 10788f000cacSChristoph Hellwig { 1079424125a0SSagi Grimberg pr_debug("freeing queue %d\n", queue->idx); 10808f000cacSChristoph Hellwig 10818f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 10828f000cacSChristoph Hellwig 10838f000cacSChristoph Hellwig nvmet_rdma_destroy_queue_ib(queue); 10848f000cacSChristoph Hellwig if (!queue->dev->srq) { 10858f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 10868f000cacSChristoph Hellwig queue->recv_queue_size, 10878f000cacSChristoph Hellwig !queue->host_qid); 10888f000cacSChristoph Hellwig } 10898f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 10908f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 10918f000cacSChristoph Hellwig kfree(queue); 10928f000cacSChristoph Hellwig } 10938f000cacSChristoph Hellwig 10948f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w) 10958f000cacSChristoph Hellwig { 10968f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = 10978f000cacSChristoph Hellwig container_of(w, struct nvmet_rdma_queue, release_work); 10988f000cacSChristoph Hellwig struct nvmet_rdma_device *dev = queue->dev; 10998f000cacSChristoph Hellwig 11008f000cacSChristoph Hellwig nvmet_rdma_free_queue(queue); 1101d8f7750aSSagi Grimberg 11028f000cacSChristoph Hellwig kref_put(&dev->ref, nvmet_rdma_free_dev); 11038f000cacSChristoph Hellwig } 11048f000cacSChristoph Hellwig 11058f000cacSChristoph Hellwig static int 11068f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 11078f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 11088f000cacSChristoph Hellwig { 11098f000cacSChristoph Hellwig struct nvme_rdma_cm_req *req; 11108f000cacSChristoph Hellwig 11118f000cacSChristoph Hellwig req = (struct nvme_rdma_cm_req *)conn->private_data; 11128f000cacSChristoph Hellwig if (!req || conn->private_data_len == 0) 11138f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_LEN; 11148f000cacSChristoph Hellwig 11158f000cacSChristoph Hellwig if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 11168f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_RECFMT; 11178f000cacSChristoph Hellwig 11188f000cacSChristoph Hellwig queue->host_qid = le16_to_cpu(req->qid); 11198f000cacSChristoph Hellwig 11208f000cacSChristoph Hellwig /* 1121b825b44cSJay Freyensee * req->hsqsize corresponds to our recv queue size plus 1 11228f000cacSChristoph Hellwig * req->hrqsize corresponds to our send queue size 11238f000cacSChristoph Hellwig */ 1124b825b44cSJay Freyensee queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 11258f000cacSChristoph Hellwig queue->send_queue_size = le16_to_cpu(req->hrqsize); 11268f000cacSChristoph Hellwig 11277aa1f427SSagi Grimberg if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 11288f000cacSChristoph Hellwig return NVME_RDMA_CM_INVALID_HSQSIZE; 11298f000cacSChristoph Hellwig 11308f000cacSChristoph Hellwig /* XXX: Should we enforce some kind of max for IO queues? */ 11318f000cacSChristoph Hellwig 11328f000cacSChristoph Hellwig return 0; 11338f000cacSChristoph Hellwig } 11348f000cacSChristoph Hellwig 11358f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 11368f000cacSChristoph Hellwig enum nvme_rdma_cm_status status) 11378f000cacSChristoph Hellwig { 11388f000cacSChristoph Hellwig struct nvme_rdma_cm_rej rej; 11398f000cacSChristoph Hellwig 11407a01a6eaSMax Gurtovoy pr_debug("rejecting connect request: status %d (%s)\n", 11417a01a6eaSMax Gurtovoy status, nvme_rdma_cm_msg(status)); 11427a01a6eaSMax Gurtovoy 11438f000cacSChristoph Hellwig rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 11448f000cacSChristoph Hellwig rej.sts = cpu_to_le16(status); 11458f000cacSChristoph Hellwig 11468f000cacSChristoph Hellwig return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); 11478f000cacSChristoph Hellwig } 11488f000cacSChristoph Hellwig 11498f000cacSChristoph Hellwig static struct nvmet_rdma_queue * 11508f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 11518f000cacSChristoph Hellwig struct rdma_cm_id *cm_id, 11528f000cacSChristoph Hellwig struct rdma_cm_event *event) 11538f000cacSChristoph Hellwig { 11548f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 11558f000cacSChristoph Hellwig int ret; 11568f000cacSChristoph Hellwig 11578f000cacSChristoph Hellwig queue = kzalloc(sizeof(*queue), GFP_KERNEL); 11588f000cacSChristoph Hellwig if (!queue) { 11598f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11608f000cacSChristoph Hellwig goto out_reject; 11618f000cacSChristoph Hellwig } 11628f000cacSChristoph Hellwig 11638f000cacSChristoph Hellwig ret = nvmet_sq_init(&queue->nvme_sq); 116470d4281cSBart Van Assche if (ret) { 116570d4281cSBart Van Assche ret = NVME_RDMA_CM_NO_RSC; 11668f000cacSChristoph Hellwig goto out_free_queue; 116770d4281cSBart Van Assche } 11688f000cacSChristoph Hellwig 11698f000cacSChristoph Hellwig ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 11708f000cacSChristoph Hellwig if (ret) 11718f000cacSChristoph Hellwig goto out_destroy_sq; 11728f000cacSChristoph Hellwig 11738f000cacSChristoph Hellwig /* 11748f000cacSChristoph Hellwig * Schedules the actual release because calling rdma_destroy_id from 11758f000cacSChristoph Hellwig * inside a CM callback would trigger a deadlock. (great API design..) 11768f000cacSChristoph Hellwig */ 11778f000cacSChristoph Hellwig INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 11788f000cacSChristoph Hellwig queue->dev = ndev; 11798f000cacSChristoph Hellwig queue->cm_id = cm_id; 11808f000cacSChristoph Hellwig 11818f000cacSChristoph Hellwig spin_lock_init(&queue->state_lock); 11828f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_CONNECTING; 11838f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wait_list); 11848f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 11858f000cacSChristoph Hellwig spin_lock_init(&queue->rsp_wr_wait_lock); 11868f000cacSChristoph Hellwig INIT_LIST_HEAD(&queue->free_rsps); 11878f000cacSChristoph Hellwig spin_lock_init(&queue->rsps_lock); 1188766dbb17SSagi Grimberg INIT_LIST_HEAD(&queue->queue_list); 11898f000cacSChristoph Hellwig 11908f000cacSChristoph Hellwig queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 11918f000cacSChristoph Hellwig if (queue->idx < 0) { 11928f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11936ccaeb56SChristophe JAILLET goto out_destroy_sq; 11948f000cacSChristoph Hellwig } 11958f000cacSChristoph Hellwig 11968f000cacSChristoph Hellwig ret = nvmet_rdma_alloc_rsps(queue); 11978f000cacSChristoph Hellwig if (ret) { 11988f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 11998f000cacSChristoph Hellwig goto out_ida_remove; 12008f000cacSChristoph Hellwig } 12018f000cacSChristoph Hellwig 12028f000cacSChristoph Hellwig if (!ndev->srq) { 12038f000cacSChristoph Hellwig queue->cmds = nvmet_rdma_alloc_cmds(ndev, 12048f000cacSChristoph Hellwig queue->recv_queue_size, 12058f000cacSChristoph Hellwig !queue->host_qid); 12068f000cacSChristoph Hellwig if (IS_ERR(queue->cmds)) { 12078f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 12088f000cacSChristoph Hellwig goto out_free_responses; 12098f000cacSChristoph Hellwig } 12108f000cacSChristoph Hellwig } 12118f000cacSChristoph Hellwig 12128f000cacSChristoph Hellwig ret = nvmet_rdma_create_queue_ib(queue); 12138f000cacSChristoph Hellwig if (ret) { 12148f000cacSChristoph Hellwig pr_err("%s: creating RDMA queue failed (%d).\n", 12158f000cacSChristoph Hellwig __func__, ret); 12168f000cacSChristoph Hellwig ret = NVME_RDMA_CM_NO_RSC; 12178f000cacSChristoph Hellwig goto out_free_cmds; 12188f000cacSChristoph Hellwig } 12198f000cacSChristoph Hellwig 12208f000cacSChristoph Hellwig return queue; 12218f000cacSChristoph Hellwig 12228f000cacSChristoph Hellwig out_free_cmds: 12238f000cacSChristoph Hellwig if (!ndev->srq) { 12248f000cacSChristoph Hellwig nvmet_rdma_free_cmds(queue->dev, queue->cmds, 12258f000cacSChristoph Hellwig queue->recv_queue_size, 12268f000cacSChristoph Hellwig !queue->host_qid); 12278f000cacSChristoph Hellwig } 12288f000cacSChristoph Hellwig out_free_responses: 12298f000cacSChristoph Hellwig nvmet_rdma_free_rsps(queue); 12308f000cacSChristoph Hellwig out_ida_remove: 12318f000cacSChristoph Hellwig ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 12328f000cacSChristoph Hellwig out_destroy_sq: 12338f000cacSChristoph Hellwig nvmet_sq_destroy(&queue->nvme_sq); 12348f000cacSChristoph Hellwig out_free_queue: 12358f000cacSChristoph Hellwig kfree(queue); 12368f000cacSChristoph Hellwig out_reject: 12378f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, ret); 12388f000cacSChristoph Hellwig return NULL; 12398f000cacSChristoph Hellwig } 12408f000cacSChristoph Hellwig 12418f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 12428f000cacSChristoph Hellwig { 12438f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = priv; 12448f000cacSChristoph Hellwig 12458f000cacSChristoph Hellwig switch (event->event) { 12468f000cacSChristoph Hellwig case IB_EVENT_COMM_EST: 12478f000cacSChristoph Hellwig rdma_notify(queue->cm_id, event->event); 12488f000cacSChristoph Hellwig break; 12498f000cacSChristoph Hellwig default: 1250675796beSMax Gurtovoy pr_err("received IB QP event: %s (%d)\n", 1251675796beSMax Gurtovoy ib_event_msg(event->event), event->event); 12528f000cacSChristoph Hellwig break; 12538f000cacSChristoph Hellwig } 12548f000cacSChristoph Hellwig } 12558f000cacSChristoph Hellwig 12568f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 12578f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue, 12588f000cacSChristoph Hellwig struct rdma_conn_param *p) 12598f000cacSChristoph Hellwig { 12608f000cacSChristoph Hellwig struct rdma_conn_param param = { }; 12618f000cacSChristoph Hellwig struct nvme_rdma_cm_rep priv = { }; 12628f000cacSChristoph Hellwig int ret = -ENOMEM; 12638f000cacSChristoph Hellwig 12648f000cacSChristoph Hellwig param.rnr_retry_count = 7; 12658f000cacSChristoph Hellwig param.flow_control = 1; 12668f000cacSChristoph Hellwig param.initiator_depth = min_t(u8, p->initiator_depth, 12678f000cacSChristoph Hellwig queue->dev->device->attrs.max_qp_init_rd_atom); 12688f000cacSChristoph Hellwig param.private_data = &priv; 12698f000cacSChristoph Hellwig param.private_data_len = sizeof(priv); 12708f000cacSChristoph Hellwig priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 12718f000cacSChristoph Hellwig priv.crqsize = cpu_to_le16(queue->recv_queue_size); 12728f000cacSChristoph Hellwig 12738f000cacSChristoph Hellwig ret = rdma_accept(cm_id, ¶m); 12748f000cacSChristoph Hellwig if (ret) 12758f000cacSChristoph Hellwig pr_err("rdma_accept failed (error code = %d)\n", ret); 12768f000cacSChristoph Hellwig 12778f000cacSChristoph Hellwig return ret; 12788f000cacSChristoph Hellwig } 12798f000cacSChristoph Hellwig 12808f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 12818f000cacSChristoph Hellwig struct rdma_cm_event *event) 12828f000cacSChristoph Hellwig { 1283*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port = cm_id->context; 12848f000cacSChristoph Hellwig struct nvmet_rdma_device *ndev; 12858f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 12868f000cacSChristoph Hellwig int ret = -EINVAL; 12878f000cacSChristoph Hellwig 12888f000cacSChristoph Hellwig ndev = nvmet_rdma_find_get_device(cm_id); 12898f000cacSChristoph Hellwig if (!ndev) { 12908f000cacSChristoph Hellwig nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 12918f000cacSChristoph Hellwig return -ECONNREFUSED; 12928f000cacSChristoph Hellwig } 12938f000cacSChristoph Hellwig 12948f000cacSChristoph Hellwig queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 12958f000cacSChristoph Hellwig if (!queue) { 12968f000cacSChristoph Hellwig ret = -ENOMEM; 12978f000cacSChristoph Hellwig goto put_device; 12988f000cacSChristoph Hellwig } 1299*a032e4f6SSagi Grimberg queue->port = port->nport; 13008f000cacSChristoph Hellwig 1301777dc823SSagi Grimberg if (queue->host_qid == 0) { 1302777dc823SSagi Grimberg /* Let inflight controller teardown complete */ 1303d39aa497SChristoph Hellwig flush_scheduled_work(); 1304777dc823SSagi Grimberg } 1305777dc823SSagi Grimberg 13068f000cacSChristoph Hellwig ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1307e1a2ee24SIsrael Rukshin if (ret) { 1308d39aa497SChristoph Hellwig schedule_work(&queue->release_work); 1309e1a2ee24SIsrael Rukshin /* Destroying rdma_cm id is not needed here */ 1310e1a2ee24SIsrael Rukshin return 0; 1311e1a2ee24SIsrael Rukshin } 13128f000cacSChristoph Hellwig 13138f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 13148f000cacSChristoph Hellwig list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 13158f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13168f000cacSChristoph Hellwig 13178f000cacSChristoph Hellwig return 0; 13188f000cacSChristoph Hellwig 13198f000cacSChristoph Hellwig put_device: 13208f000cacSChristoph Hellwig kref_put(&ndev->ref, nvmet_rdma_free_dev); 13218f000cacSChristoph Hellwig 13228f000cacSChristoph Hellwig return ret; 13238f000cacSChristoph Hellwig } 13248f000cacSChristoph Hellwig 13258f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 13268f000cacSChristoph Hellwig { 13278f000cacSChristoph Hellwig unsigned long flags; 13288f000cacSChristoph Hellwig 13298f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13308f000cacSChristoph Hellwig if (queue->state != NVMET_RDMA_Q_CONNECTING) { 13318f000cacSChristoph Hellwig pr_warn("trying to establish a connected queue\n"); 13328f000cacSChristoph Hellwig goto out_unlock; 13338f000cacSChristoph Hellwig } 13348f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_LIVE; 13358f000cacSChristoph Hellwig 13368f000cacSChristoph Hellwig while (!list_empty(&queue->rsp_wait_list)) { 13378f000cacSChristoph Hellwig struct nvmet_rdma_rsp *cmd; 13388f000cacSChristoph Hellwig 13398f000cacSChristoph Hellwig cmd = list_first_entry(&queue->rsp_wait_list, 13408f000cacSChristoph Hellwig struct nvmet_rdma_rsp, wait_list); 13418f000cacSChristoph Hellwig list_del(&cmd->wait_list); 13428f000cacSChristoph Hellwig 13438f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13448f000cacSChristoph Hellwig nvmet_rdma_handle_command(queue, cmd); 13458f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13468f000cacSChristoph Hellwig } 13478f000cacSChristoph Hellwig 13488f000cacSChristoph Hellwig out_unlock: 13498f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13508f000cacSChristoph Hellwig } 13518f000cacSChristoph Hellwig 13528f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 13538f000cacSChristoph Hellwig { 13548f000cacSChristoph Hellwig bool disconnect = false; 13558f000cacSChristoph Hellwig unsigned long flags; 13568f000cacSChristoph Hellwig 13578f000cacSChristoph Hellwig pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 13588f000cacSChristoph Hellwig 13598f000cacSChristoph Hellwig spin_lock_irqsave(&queue->state_lock, flags); 13608f000cacSChristoph Hellwig switch (queue->state) { 13618f000cacSChristoph Hellwig case NVMET_RDMA_Q_CONNECTING: 13628f000cacSChristoph Hellwig case NVMET_RDMA_Q_LIVE: 13638f000cacSChristoph Hellwig queue->state = NVMET_RDMA_Q_DISCONNECTING; 1364d8f7750aSSagi Grimberg disconnect = true; 13658f000cacSChristoph Hellwig break; 13668f000cacSChristoph Hellwig case NVMET_RDMA_Q_DISCONNECTING: 13678f000cacSChristoph Hellwig break; 13688f000cacSChristoph Hellwig } 13698f000cacSChristoph Hellwig spin_unlock_irqrestore(&queue->state_lock, flags); 13708f000cacSChristoph Hellwig 13718f000cacSChristoph Hellwig if (disconnect) { 13728f000cacSChristoph Hellwig rdma_disconnect(queue->cm_id); 1373d39aa497SChristoph Hellwig schedule_work(&queue->release_work); 13748f000cacSChristoph Hellwig } 13758f000cacSChristoph Hellwig } 13768f000cacSChristoph Hellwig 13778f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 13788f000cacSChristoph Hellwig { 13798f000cacSChristoph Hellwig bool disconnect = false; 13808f000cacSChristoph Hellwig 13818f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 13828f000cacSChristoph Hellwig if (!list_empty(&queue->queue_list)) { 13838f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 13848f000cacSChristoph Hellwig disconnect = true; 13858f000cacSChristoph Hellwig } 13868f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 13878f000cacSChristoph Hellwig 13888f000cacSChristoph Hellwig if (disconnect) 13898f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 13908f000cacSChristoph Hellwig } 13918f000cacSChristoph Hellwig 13928f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 13938f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue) 13948f000cacSChristoph Hellwig { 13958f000cacSChristoph Hellwig WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 13968f000cacSChristoph Hellwig 1397766dbb17SSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 1398766dbb17SSagi Grimberg if (!list_empty(&queue->queue_list)) 1399766dbb17SSagi Grimberg list_del_init(&queue->queue_list); 1400766dbb17SSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1401766dbb17SSagi Grimberg 1402766dbb17SSagi Grimberg pr_err("failed to connect queue %d\n", queue->idx); 1403d39aa497SChristoph Hellwig schedule_work(&queue->release_work); 14048f000cacSChristoph Hellwig } 14058f000cacSChristoph Hellwig 1406d8f7750aSSagi Grimberg /** 1407d8f7750aSSagi Grimberg * nvme_rdma_device_removal() - Handle RDMA device removal 1408f1d4ef7dSSagi Grimberg * @cm_id: rdma_cm id, used for nvmet port 1409d8f7750aSSagi Grimberg * @queue: nvmet rdma queue (cm id qp_context) 1410d8f7750aSSagi Grimberg * 1411d8f7750aSSagi Grimberg * DEVICE_REMOVAL event notifies us that the RDMA device is about 1412f1d4ef7dSSagi Grimberg * to unplug. Note that this event can be generated on a normal 1413f1d4ef7dSSagi Grimberg * queue cm_id and/or a device bound listener cm_id (where in this 1414f1d4ef7dSSagi Grimberg * case queue will be null). 1415d8f7750aSSagi Grimberg * 1416f1d4ef7dSSagi Grimberg * We registered an ib_client to handle device removal for queues, 1417f1d4ef7dSSagi Grimberg * so we only need to handle the listening port cm_ids. In this case 1418d8f7750aSSagi Grimberg * we nullify the priv to prevent double cm_id destruction and destroying 1419d8f7750aSSagi Grimberg * the cm_id implicitely by returning a non-zero rc to the callout. 1420d8f7750aSSagi Grimberg */ 1421d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1422d8f7750aSSagi Grimberg struct nvmet_rdma_queue *queue) 1423d8f7750aSSagi Grimberg { 1424*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port; 1425d8f7750aSSagi Grimberg 1426f1d4ef7dSSagi Grimberg if (queue) { 1427f1d4ef7dSSagi Grimberg /* 1428f1d4ef7dSSagi Grimberg * This is a queue cm_id. we have registered 1429f1d4ef7dSSagi Grimberg * an ib_client to handle queues removal 1430f1d4ef7dSSagi Grimberg * so don't interfear and just return. 1431f1d4ef7dSSagi Grimberg */ 1432f1d4ef7dSSagi Grimberg return 0; 1433f1d4ef7dSSagi Grimberg } 1434f1d4ef7dSSagi Grimberg 1435f1d4ef7dSSagi Grimberg port = cm_id->context; 1436d8f7750aSSagi Grimberg 1437d8f7750aSSagi Grimberg /* 1438d8f7750aSSagi Grimberg * This is a listener cm_id. Make sure that 1439d8f7750aSSagi Grimberg * future remove_port won't invoke a double 1440d8f7750aSSagi Grimberg * cm_id destroy. use atomic xchg to make sure 1441d8f7750aSSagi Grimberg * we don't compete with remove_port. 1442d8f7750aSSagi Grimberg */ 1443*a032e4f6SSagi Grimberg if (xchg(&port->cm_id, NULL) != cm_id) 1444d8f7750aSSagi Grimberg return 0; 1445d8f7750aSSagi Grimberg 1446d8f7750aSSagi Grimberg /* 1447d8f7750aSSagi Grimberg * We need to return 1 so that the core will destroy 1448d8f7750aSSagi Grimberg * it's own ID. What a great API design.. 1449d8f7750aSSagi Grimberg */ 1450d8f7750aSSagi Grimberg return 1; 1451d8f7750aSSagi Grimberg } 1452d8f7750aSSagi Grimberg 14538f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 14548f000cacSChristoph Hellwig struct rdma_cm_event *event) 14558f000cacSChristoph Hellwig { 14568f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue = NULL; 14578f000cacSChristoph Hellwig int ret = 0; 14588f000cacSChristoph Hellwig 14598f000cacSChristoph Hellwig if (cm_id->qp) 14608f000cacSChristoph Hellwig queue = cm_id->qp->qp_context; 14618f000cacSChristoph Hellwig 14628f000cacSChristoph Hellwig pr_debug("%s (%d): status %d id %p\n", 14638f000cacSChristoph Hellwig rdma_event_msg(event->event), event->event, 14648f000cacSChristoph Hellwig event->status, cm_id); 14658f000cacSChristoph Hellwig 14668f000cacSChristoph Hellwig switch (event->event) { 14678f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_REQUEST: 14688f000cacSChristoph Hellwig ret = nvmet_rdma_queue_connect(cm_id, event); 14698f000cacSChristoph Hellwig break; 14708f000cacSChristoph Hellwig case RDMA_CM_EVENT_ESTABLISHED: 14718f000cacSChristoph Hellwig nvmet_rdma_queue_established(queue); 14728f000cacSChristoph Hellwig break; 14738f000cacSChristoph Hellwig case RDMA_CM_EVENT_ADDR_CHANGE: 1474*a032e4f6SSagi Grimberg if (!queue) { 1475*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port = cm_id->context; 1476*a032e4f6SSagi Grimberg 1477*a032e4f6SSagi Grimberg schedule_delayed_work(&port->repair_work, 0); 1478*a032e4f6SSagi Grimberg break; 1479*a032e4f6SSagi Grimberg } 1480*a032e4f6SSagi Grimberg /* FALLTHROUGH */ 14818f000cacSChristoph Hellwig case RDMA_CM_EVENT_DISCONNECTED: 14828f000cacSChristoph Hellwig case RDMA_CM_EVENT_TIMEWAIT_EXIT: 14838f000cacSChristoph Hellwig nvmet_rdma_queue_disconnect(queue); 1484d8f7750aSSagi Grimberg break; 1485d8f7750aSSagi Grimberg case RDMA_CM_EVENT_DEVICE_REMOVAL: 1486d8f7750aSSagi Grimberg ret = nvmet_rdma_device_removal(cm_id, queue); 14878f000cacSChristoph Hellwig break; 14888f000cacSChristoph Hellwig case RDMA_CM_EVENT_REJECTED: 1489512fb1b3SSteve Wise pr_debug("Connection rejected: %s\n", 1490512fb1b3SSteve Wise rdma_reject_msg(cm_id, event->status)); 1491512fb1b3SSteve Wise /* FALLTHROUGH */ 14928f000cacSChristoph Hellwig case RDMA_CM_EVENT_UNREACHABLE: 14938f000cacSChristoph Hellwig case RDMA_CM_EVENT_CONNECT_ERROR: 14948f000cacSChristoph Hellwig nvmet_rdma_queue_connect_fail(cm_id, queue); 14958f000cacSChristoph Hellwig break; 14968f000cacSChristoph Hellwig default: 14978f000cacSChristoph Hellwig pr_err("received unrecognized RDMA CM event %d\n", 14988f000cacSChristoph Hellwig event->event); 14998f000cacSChristoph Hellwig break; 15008f000cacSChristoph Hellwig } 15018f000cacSChristoph Hellwig 15028f000cacSChristoph Hellwig return ret; 15038f000cacSChristoph Hellwig } 15048f000cacSChristoph Hellwig 15058f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 15068f000cacSChristoph Hellwig { 15078f000cacSChristoph Hellwig struct nvmet_rdma_queue *queue; 15088f000cacSChristoph Hellwig 15098f000cacSChristoph Hellwig restart: 15108f000cacSChristoph Hellwig mutex_lock(&nvmet_rdma_queue_mutex); 15118f000cacSChristoph Hellwig list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { 15128f000cacSChristoph Hellwig if (queue->nvme_sq.ctrl == ctrl) { 15138f000cacSChristoph Hellwig list_del_init(&queue->queue_list); 15148f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 15158f000cacSChristoph Hellwig 15168f000cacSChristoph Hellwig __nvmet_rdma_queue_disconnect(queue); 15178f000cacSChristoph Hellwig goto restart; 15188f000cacSChristoph Hellwig } 15198f000cacSChristoph Hellwig } 15208f000cacSChristoph Hellwig mutex_unlock(&nvmet_rdma_queue_mutex); 15218f000cacSChristoph Hellwig } 15228f000cacSChristoph Hellwig 1523*a032e4f6SSagi Grimberg static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) 15248f000cacSChristoph Hellwig { 1525*a032e4f6SSagi Grimberg struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); 1526*a032e4f6SSagi Grimberg 1527*a032e4f6SSagi Grimberg if (cm_id) 1528*a032e4f6SSagi Grimberg rdma_destroy_id(cm_id); 1529*a032e4f6SSagi Grimberg } 1530*a032e4f6SSagi Grimberg 1531*a032e4f6SSagi Grimberg static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) 1532*a032e4f6SSagi Grimberg { 1533*a032e4f6SSagi Grimberg struct sockaddr *addr = (struct sockaddr *)&port->addr; 15348f000cacSChristoph Hellwig struct rdma_cm_id *cm_id; 15358f000cacSChristoph Hellwig int ret; 15368f000cacSChristoph Hellwig 15378f000cacSChristoph Hellwig cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 15388f000cacSChristoph Hellwig RDMA_PS_TCP, IB_QPT_RC); 15398f000cacSChristoph Hellwig if (IS_ERR(cm_id)) { 15408f000cacSChristoph Hellwig pr_err("CM ID creation failed\n"); 15418f000cacSChristoph Hellwig return PTR_ERR(cm_id); 15428f000cacSChristoph Hellwig } 15438f000cacSChristoph Hellwig 1544670c2a3aSSagi Grimberg /* 1545670c2a3aSSagi Grimberg * Allow both IPv4 and IPv6 sockets to bind a single port 1546670c2a3aSSagi Grimberg * at the same time. 1547670c2a3aSSagi Grimberg */ 1548670c2a3aSSagi Grimberg ret = rdma_set_afonly(cm_id, 1); 15498f000cacSChristoph Hellwig if (ret) { 1550670c2a3aSSagi Grimberg pr_err("rdma_set_afonly failed (%d)\n", ret); 1551670c2a3aSSagi Grimberg goto out_destroy_id; 1552670c2a3aSSagi Grimberg } 1553670c2a3aSSagi Grimberg 1554*a032e4f6SSagi Grimberg ret = rdma_bind_addr(cm_id, addr); 1555670c2a3aSSagi Grimberg if (ret) { 1556*a032e4f6SSagi Grimberg pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret); 15578f000cacSChristoph Hellwig goto out_destroy_id; 15588f000cacSChristoph Hellwig } 15598f000cacSChristoph Hellwig 15608f000cacSChristoph Hellwig ret = rdma_listen(cm_id, 128); 15618f000cacSChristoph Hellwig if (ret) { 1562*a032e4f6SSagi Grimberg pr_err("listening to %pISpcs failed (%d)\n", addr, ret); 15638f000cacSChristoph Hellwig goto out_destroy_id; 15648f000cacSChristoph Hellwig } 15658f000cacSChristoph Hellwig 1566*a032e4f6SSagi Grimberg port->cm_id = cm_id; 15678f000cacSChristoph Hellwig return 0; 15688f000cacSChristoph Hellwig 15698f000cacSChristoph Hellwig out_destroy_id: 15708f000cacSChristoph Hellwig rdma_destroy_id(cm_id); 15718f000cacSChristoph Hellwig return ret; 15728f000cacSChristoph Hellwig } 15738f000cacSChristoph Hellwig 1574*a032e4f6SSagi Grimberg static void nvmet_rdma_repair_port_work(struct work_struct *w) 15758f000cacSChristoph Hellwig { 1576*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port = container_of(to_delayed_work(w), 1577*a032e4f6SSagi Grimberg struct nvmet_rdma_port, repair_work); 1578*a032e4f6SSagi Grimberg int ret; 15798f000cacSChristoph Hellwig 1580*a032e4f6SSagi Grimberg nvmet_rdma_disable_port(port); 1581*a032e4f6SSagi Grimberg ret = nvmet_rdma_enable_port(port); 1582*a032e4f6SSagi Grimberg if (ret) 1583*a032e4f6SSagi Grimberg schedule_delayed_work(&port->repair_work, 5 * HZ); 1584*a032e4f6SSagi Grimberg } 1585*a032e4f6SSagi Grimberg 1586*a032e4f6SSagi Grimberg static int nvmet_rdma_add_port(struct nvmet_port *nport) 1587*a032e4f6SSagi Grimberg { 1588*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port; 1589*a032e4f6SSagi Grimberg __kernel_sa_family_t af; 1590*a032e4f6SSagi Grimberg int ret; 1591*a032e4f6SSagi Grimberg 1592*a032e4f6SSagi Grimberg port = kzalloc(sizeof(*port), GFP_KERNEL); 1593*a032e4f6SSagi Grimberg if (!port) 1594*a032e4f6SSagi Grimberg return -ENOMEM; 1595*a032e4f6SSagi Grimberg 1596*a032e4f6SSagi Grimberg nport->priv = port; 1597*a032e4f6SSagi Grimberg port->nport = nport; 1598*a032e4f6SSagi Grimberg INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); 1599*a032e4f6SSagi Grimberg 1600*a032e4f6SSagi Grimberg switch (nport->disc_addr.adrfam) { 1601*a032e4f6SSagi Grimberg case NVMF_ADDR_FAMILY_IP4: 1602*a032e4f6SSagi Grimberg af = AF_INET; 1603*a032e4f6SSagi Grimberg break; 1604*a032e4f6SSagi Grimberg case NVMF_ADDR_FAMILY_IP6: 1605*a032e4f6SSagi Grimberg af = AF_INET6; 1606*a032e4f6SSagi Grimberg break; 1607*a032e4f6SSagi Grimberg default: 1608*a032e4f6SSagi Grimberg pr_err("address family %d not supported\n", 1609*a032e4f6SSagi Grimberg nport->disc_addr.adrfam); 1610*a032e4f6SSagi Grimberg ret = -EINVAL; 1611*a032e4f6SSagi Grimberg goto out_free_port; 1612*a032e4f6SSagi Grimberg } 1613*a032e4f6SSagi Grimberg 1614*a032e4f6SSagi Grimberg if (nport->inline_data_size < 0) { 1615*a032e4f6SSagi Grimberg nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; 1616*a032e4f6SSagi Grimberg } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { 1617*a032e4f6SSagi Grimberg pr_warn("inline_data_size %u is too large, reducing to %u\n", 1618*a032e4f6SSagi Grimberg nport->inline_data_size, 1619*a032e4f6SSagi Grimberg NVMET_RDMA_MAX_INLINE_DATA_SIZE); 1620*a032e4f6SSagi Grimberg nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; 1621*a032e4f6SSagi Grimberg } 1622*a032e4f6SSagi Grimberg 1623*a032e4f6SSagi Grimberg ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 1624*a032e4f6SSagi Grimberg nport->disc_addr.trsvcid, &port->addr); 1625*a032e4f6SSagi Grimberg if (ret) { 1626*a032e4f6SSagi Grimberg pr_err("malformed ip/port passed: %s:%s\n", 1627*a032e4f6SSagi Grimberg nport->disc_addr.traddr, nport->disc_addr.trsvcid); 1628*a032e4f6SSagi Grimberg goto out_free_port; 1629*a032e4f6SSagi Grimberg } 1630*a032e4f6SSagi Grimberg 1631*a032e4f6SSagi Grimberg ret = nvmet_rdma_enable_port(port); 1632*a032e4f6SSagi Grimberg if (ret) 1633*a032e4f6SSagi Grimberg goto out_free_port; 1634*a032e4f6SSagi Grimberg 1635*a032e4f6SSagi Grimberg pr_info("enabling port %d (%pISpcs)\n", 1636*a032e4f6SSagi Grimberg le16_to_cpu(nport->disc_addr.portid), 1637*a032e4f6SSagi Grimberg (struct sockaddr *)&port->addr); 1638*a032e4f6SSagi Grimberg 1639*a032e4f6SSagi Grimberg return 0; 1640*a032e4f6SSagi Grimberg 1641*a032e4f6SSagi Grimberg out_free_port: 1642*a032e4f6SSagi Grimberg kfree(port); 1643*a032e4f6SSagi Grimberg return ret; 1644*a032e4f6SSagi Grimberg } 1645*a032e4f6SSagi Grimberg 1646*a032e4f6SSagi Grimberg static void nvmet_rdma_remove_port(struct nvmet_port *nport) 1647*a032e4f6SSagi Grimberg { 1648*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port = nport->priv; 1649*a032e4f6SSagi Grimberg 1650*a032e4f6SSagi Grimberg cancel_delayed_work_sync(&port->repair_work); 1651*a032e4f6SSagi Grimberg nvmet_rdma_disable_port(port); 1652*a032e4f6SSagi Grimberg kfree(port); 16538f000cacSChristoph Hellwig } 16548f000cacSChristoph Hellwig 16554c652685SSagi Grimberg static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, 1656*a032e4f6SSagi Grimberg struct nvmet_port *nport, char *traddr) 16574c652685SSagi Grimberg { 1658*a032e4f6SSagi Grimberg struct nvmet_rdma_port *port = nport->priv; 1659*a032e4f6SSagi Grimberg struct rdma_cm_id *cm_id = port->cm_id; 16604c652685SSagi Grimberg 16614c652685SSagi Grimberg if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { 16624c652685SSagi Grimberg struct nvmet_rdma_rsp *rsp = 16634c652685SSagi Grimberg container_of(req, struct nvmet_rdma_rsp, req); 16644c652685SSagi Grimberg struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; 16654c652685SSagi Grimberg struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; 16664c652685SSagi Grimberg 16674c652685SSagi Grimberg sprintf(traddr, "%pISc", addr); 16684c652685SSagi Grimberg } else { 1669*a032e4f6SSagi Grimberg memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 16704c652685SSagi Grimberg } 16714c652685SSagi Grimberg } 16724c652685SSagi Grimberg 1673ec6d20e1SMax Gurtovoy static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) 1674ec6d20e1SMax Gurtovoy { 1675ec6d20e1SMax Gurtovoy return NVMET_RDMA_MAX_MDTS; 1676ec6d20e1SMax Gurtovoy } 1677ec6d20e1SMax Gurtovoy 1678e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops = { 16798f000cacSChristoph Hellwig .owner = THIS_MODULE, 16808f000cacSChristoph Hellwig .type = NVMF_TRTYPE_RDMA, 16818f000cacSChristoph Hellwig .msdbd = 1, 16828f000cacSChristoph Hellwig .has_keyed_sgls = 1, 16838f000cacSChristoph Hellwig .add_port = nvmet_rdma_add_port, 16848f000cacSChristoph Hellwig .remove_port = nvmet_rdma_remove_port, 16858f000cacSChristoph Hellwig .queue_response = nvmet_rdma_queue_response, 16868f000cacSChristoph Hellwig .delete_ctrl = nvmet_rdma_delete_ctrl, 16874c652685SSagi Grimberg .disc_traddr = nvmet_rdma_disc_port_addr, 1688ec6d20e1SMax Gurtovoy .get_mdts = nvmet_rdma_get_mdts, 16898f000cacSChristoph Hellwig }; 16908f000cacSChristoph Hellwig 1691f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1692f1d4ef7dSSagi Grimberg { 169343b92fd2SIsrael Rukshin struct nvmet_rdma_queue *queue, *tmp; 1694a3dd7d00SMax Gurtovoy struct nvmet_rdma_device *ndev; 1695a3dd7d00SMax Gurtovoy bool found = false; 1696f1d4ef7dSSagi Grimberg 1697a3dd7d00SMax Gurtovoy mutex_lock(&device_list_mutex); 1698a3dd7d00SMax Gurtovoy list_for_each_entry(ndev, &device_list, entry) { 1699a3dd7d00SMax Gurtovoy if (ndev->device == ib_device) { 1700a3dd7d00SMax Gurtovoy found = true; 1701a3dd7d00SMax Gurtovoy break; 1702a3dd7d00SMax Gurtovoy } 1703a3dd7d00SMax Gurtovoy } 1704a3dd7d00SMax Gurtovoy mutex_unlock(&device_list_mutex); 1705a3dd7d00SMax Gurtovoy 1706a3dd7d00SMax Gurtovoy if (!found) 1707a3dd7d00SMax Gurtovoy return; 1708a3dd7d00SMax Gurtovoy 1709a3dd7d00SMax Gurtovoy /* 1710a3dd7d00SMax Gurtovoy * IB Device that is used by nvmet controllers is being removed, 1711a3dd7d00SMax Gurtovoy * delete all queues using this device. 1712a3dd7d00SMax Gurtovoy */ 1713f1d4ef7dSSagi Grimberg mutex_lock(&nvmet_rdma_queue_mutex); 171443b92fd2SIsrael Rukshin list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 171543b92fd2SIsrael Rukshin queue_list) { 1716f1d4ef7dSSagi Grimberg if (queue->dev->device != ib_device) 1717f1d4ef7dSSagi Grimberg continue; 1718f1d4ef7dSSagi Grimberg 1719f1d4ef7dSSagi Grimberg pr_info("Removing queue %d\n", queue->idx); 172043b92fd2SIsrael Rukshin list_del_init(&queue->queue_list); 1721f1d4ef7dSSagi Grimberg __nvmet_rdma_queue_disconnect(queue); 1722f1d4ef7dSSagi Grimberg } 1723f1d4ef7dSSagi Grimberg mutex_unlock(&nvmet_rdma_queue_mutex); 1724f1d4ef7dSSagi Grimberg 1725f1d4ef7dSSagi Grimberg flush_scheduled_work(); 1726f1d4ef7dSSagi Grimberg } 1727f1d4ef7dSSagi Grimberg 1728f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = { 1729f1d4ef7dSSagi Grimberg .name = "nvmet_rdma", 1730f1d4ef7dSSagi Grimberg .remove = nvmet_rdma_remove_one 1731f1d4ef7dSSagi Grimberg }; 1732f1d4ef7dSSagi Grimberg 17338f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void) 17348f000cacSChristoph Hellwig { 1735f1d4ef7dSSagi Grimberg int ret; 1736f1d4ef7dSSagi Grimberg 1737f1d4ef7dSSagi Grimberg ret = ib_register_client(&nvmet_rdma_ib_client); 1738f1d4ef7dSSagi Grimberg if (ret) 1739f1d4ef7dSSagi Grimberg return ret; 1740f1d4ef7dSSagi Grimberg 1741f1d4ef7dSSagi Grimberg ret = nvmet_register_transport(&nvmet_rdma_ops); 1742f1d4ef7dSSagi Grimberg if (ret) 1743f1d4ef7dSSagi Grimberg goto err_ib_client; 1744f1d4ef7dSSagi Grimberg 1745f1d4ef7dSSagi Grimberg return 0; 1746f1d4ef7dSSagi Grimberg 1747f1d4ef7dSSagi Grimberg err_ib_client: 1748f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1749f1d4ef7dSSagi Grimberg return ret; 17508f000cacSChristoph Hellwig } 17518f000cacSChristoph Hellwig 17528f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void) 17538f000cacSChristoph Hellwig { 17548f000cacSChristoph Hellwig nvmet_unregister_transport(&nvmet_rdma_ops); 1755f1d4ef7dSSagi Grimberg ib_unregister_client(&nvmet_rdma_ib_client); 1756cb4876e8SSagi Grimberg WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 17578f000cacSChristoph Hellwig ida_destroy(&nvmet_rdma_queue_ida); 17588f000cacSChristoph Hellwig } 17598f000cacSChristoph Hellwig 17608f000cacSChristoph Hellwig module_init(nvmet_rdma_init); 17618f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit); 17628f000cacSChristoph Hellwig 17638f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2"); 17648f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 1765