xref: /openbmc/linux/drivers/nvme/target/rdma.c (revision 23f96d1f15a70e2e8ba5449d1c77b634426c4b80)
18f000cacSChristoph Hellwig /*
28f000cacSChristoph Hellwig  * NVMe over Fabrics RDMA target.
38f000cacSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
48f000cacSChristoph Hellwig  *
58f000cacSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify it
68f000cacSChristoph Hellwig  * under the terms and conditions of the GNU General Public License,
78f000cacSChristoph Hellwig  * version 2, as published by the Free Software Foundation.
88f000cacSChristoph Hellwig  *
98f000cacSChristoph Hellwig  * This program is distributed in the hope it will be useful, but WITHOUT
108f000cacSChristoph Hellwig  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
118f000cacSChristoph Hellwig  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
128f000cacSChristoph Hellwig  * more details.
138f000cacSChristoph Hellwig  */
148f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
158f000cacSChristoph Hellwig #include <linux/atomic.h>
168f000cacSChristoph Hellwig #include <linux/ctype.h>
178f000cacSChristoph Hellwig #include <linux/delay.h>
188f000cacSChristoph Hellwig #include <linux/err.h>
198f000cacSChristoph Hellwig #include <linux/init.h>
208f000cacSChristoph Hellwig #include <linux/module.h>
218f000cacSChristoph Hellwig #include <linux/nvme.h>
228f000cacSChristoph Hellwig #include <linux/slab.h>
238f000cacSChristoph Hellwig #include <linux/string.h>
248f000cacSChristoph Hellwig #include <linux/wait.h>
258f000cacSChristoph Hellwig #include <linux/inet.h>
268f000cacSChristoph Hellwig #include <asm/unaligned.h>
278f000cacSChristoph Hellwig 
288f000cacSChristoph Hellwig #include <rdma/ib_verbs.h>
298f000cacSChristoph Hellwig #include <rdma/rdma_cm.h>
308f000cacSChristoph Hellwig #include <rdma/rw.h>
318f000cacSChristoph Hellwig 
328f000cacSChristoph Hellwig #include <linux/nvme-rdma.h>
338f000cacSChristoph Hellwig #include "nvmet.h"
348f000cacSChristoph Hellwig 
358f000cacSChristoph Hellwig /*
368f000cacSChristoph Hellwig  * We allow up to a page of inline data to go with the SQE
378f000cacSChristoph Hellwig  */
388f000cacSChristoph Hellwig #define NVMET_RDMA_INLINE_DATA_SIZE	PAGE_SIZE
398f000cacSChristoph Hellwig 
408f000cacSChristoph Hellwig struct nvmet_rdma_cmd {
418f000cacSChristoph Hellwig 	struct ib_sge		sge[2];
428f000cacSChristoph Hellwig 	struct ib_cqe		cqe;
438f000cacSChristoph Hellwig 	struct ib_recv_wr	wr;
448f000cacSChristoph Hellwig 	struct scatterlist	inline_sg;
458f000cacSChristoph Hellwig 	struct page		*inline_page;
468f000cacSChristoph Hellwig 	struct nvme_command     *nvme_cmd;
478f000cacSChristoph Hellwig 	struct nvmet_rdma_queue	*queue;
488f000cacSChristoph Hellwig };
498f000cacSChristoph Hellwig 
508f000cacSChristoph Hellwig enum {
518f000cacSChristoph Hellwig 	NVMET_RDMA_REQ_INLINE_DATA	= (1 << 0),
528f000cacSChristoph Hellwig 	NVMET_RDMA_REQ_INVALIDATE_RKEY	= (1 << 1),
538f000cacSChristoph Hellwig };
548f000cacSChristoph Hellwig 
558f000cacSChristoph Hellwig struct nvmet_rdma_rsp {
568f000cacSChristoph Hellwig 	struct ib_sge		send_sge;
578f000cacSChristoph Hellwig 	struct ib_cqe		send_cqe;
588f000cacSChristoph Hellwig 	struct ib_send_wr	send_wr;
598f000cacSChristoph Hellwig 
608f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*cmd;
618f000cacSChristoph Hellwig 	struct nvmet_rdma_queue	*queue;
628f000cacSChristoph Hellwig 
638f000cacSChristoph Hellwig 	struct ib_cqe		read_cqe;
648f000cacSChristoph Hellwig 	struct rdma_rw_ctx	rw;
658f000cacSChristoph Hellwig 
668f000cacSChristoph Hellwig 	struct nvmet_req	req;
678f000cacSChristoph Hellwig 
688f000cacSChristoph Hellwig 	u8			n_rdma;
698f000cacSChristoph Hellwig 	u32			flags;
708f000cacSChristoph Hellwig 	u32			invalidate_rkey;
718f000cacSChristoph Hellwig 
728f000cacSChristoph Hellwig 	struct list_head	wait_list;
738f000cacSChristoph Hellwig 	struct list_head	free_list;
748f000cacSChristoph Hellwig };
758f000cacSChristoph Hellwig 
768f000cacSChristoph Hellwig enum nvmet_rdma_queue_state {
778f000cacSChristoph Hellwig 	NVMET_RDMA_Q_CONNECTING,
788f000cacSChristoph Hellwig 	NVMET_RDMA_Q_LIVE,
798f000cacSChristoph Hellwig 	NVMET_RDMA_Q_DISCONNECTING,
808f000cacSChristoph Hellwig };
818f000cacSChristoph Hellwig 
828f000cacSChristoph Hellwig struct nvmet_rdma_queue {
838f000cacSChristoph Hellwig 	struct rdma_cm_id	*cm_id;
848f000cacSChristoph Hellwig 	struct nvmet_port	*port;
858f000cacSChristoph Hellwig 	struct ib_cq		*cq;
868f000cacSChristoph Hellwig 	atomic_t		sq_wr_avail;
878f000cacSChristoph Hellwig 	struct nvmet_rdma_device *dev;
888f000cacSChristoph Hellwig 	spinlock_t		state_lock;
898f000cacSChristoph Hellwig 	enum nvmet_rdma_queue_state state;
908f000cacSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
918f000cacSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
928f000cacSChristoph Hellwig 
938f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp	*rsps;
948f000cacSChristoph Hellwig 	struct list_head	free_rsps;
958f000cacSChristoph Hellwig 	spinlock_t		rsps_lock;
968f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*cmds;
978f000cacSChristoph Hellwig 
988f000cacSChristoph Hellwig 	struct work_struct	release_work;
998f000cacSChristoph Hellwig 	struct list_head	rsp_wait_list;
1008f000cacSChristoph Hellwig 	struct list_head	rsp_wr_wait_list;
1018f000cacSChristoph Hellwig 	spinlock_t		rsp_wr_wait_lock;
1028f000cacSChristoph Hellwig 
1038f000cacSChristoph Hellwig 	int			idx;
1048f000cacSChristoph Hellwig 	int			host_qid;
1058f000cacSChristoph Hellwig 	int			recv_queue_size;
1068f000cacSChristoph Hellwig 	int			send_queue_size;
1078f000cacSChristoph Hellwig 
1088f000cacSChristoph Hellwig 	struct list_head	queue_list;
1098f000cacSChristoph Hellwig };
1108f000cacSChristoph Hellwig 
1118f000cacSChristoph Hellwig struct nvmet_rdma_device {
1128f000cacSChristoph Hellwig 	struct ib_device	*device;
1138f000cacSChristoph Hellwig 	struct ib_pd		*pd;
1148f000cacSChristoph Hellwig 	struct ib_srq		*srq;
1158f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*srq_cmds;
1168f000cacSChristoph Hellwig 	size_t			srq_size;
1178f000cacSChristoph Hellwig 	struct kref		ref;
1188f000cacSChristoph Hellwig 	struct list_head	entry;
1198f000cacSChristoph Hellwig };
1208f000cacSChristoph Hellwig 
1218f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq;
1228f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
1238f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
1248f000cacSChristoph Hellwig 
1258f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida);
1268f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list);
1278f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
1288f000cacSChristoph Hellwig 
1298f000cacSChristoph Hellwig static LIST_HEAD(device_list);
1308f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex);
1318f000cacSChristoph Hellwig 
1328f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
1338f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
1348f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1358f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
1368f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
1378f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
1388f000cacSChristoph Hellwig 
139e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops;
1408f000cacSChristoph Hellwig 
1418f000cacSChristoph Hellwig /* XXX: really should move to a generic header sooner or later.. */
1428f000cacSChristoph Hellwig static inline u32 get_unaligned_le24(const u8 *p)
1438f000cacSChristoph Hellwig {
1448f000cacSChristoph Hellwig 	return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
1458f000cacSChristoph Hellwig }
1468f000cacSChristoph Hellwig 
1478f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
1488f000cacSChristoph Hellwig {
1498f000cacSChristoph Hellwig 	return nvme_is_write(rsp->req.cmd) &&
1505e62d5c9SChristoph Hellwig 		rsp->req.transfer_len &&
1518f000cacSChristoph Hellwig 		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
1528f000cacSChristoph Hellwig }
1538f000cacSChristoph Hellwig 
1548f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
1558f000cacSChristoph Hellwig {
1568f000cacSChristoph Hellwig 	return !nvme_is_write(rsp->req.cmd) &&
1575e62d5c9SChristoph Hellwig 		rsp->req.transfer_len &&
1588f000cacSChristoph Hellwig 		!rsp->req.rsp->status &&
1598f000cacSChristoph Hellwig 		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
1608f000cacSChristoph Hellwig }
1618f000cacSChristoph Hellwig 
1628f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp *
1638f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
1648f000cacSChristoph Hellwig {
1658f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp;
1668f000cacSChristoph Hellwig 	unsigned long flags;
1678f000cacSChristoph Hellwig 
1688f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->rsps_lock, flags);
1698f000cacSChristoph Hellwig 	rsp = list_first_entry(&queue->free_rsps,
1708f000cacSChristoph Hellwig 				struct nvmet_rdma_rsp, free_list);
1718f000cacSChristoph Hellwig 	list_del(&rsp->free_list);
1728f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
1738f000cacSChristoph Hellwig 
1748f000cacSChristoph Hellwig 	return rsp;
1758f000cacSChristoph Hellwig }
1768f000cacSChristoph Hellwig 
1778f000cacSChristoph Hellwig static inline void
1788f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
1798f000cacSChristoph Hellwig {
1808f000cacSChristoph Hellwig 	unsigned long flags;
1818f000cacSChristoph Hellwig 
1828f000cacSChristoph Hellwig 	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
1838f000cacSChristoph Hellwig 	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
1848f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
1858f000cacSChristoph Hellwig }
1868f000cacSChristoph Hellwig 
1878f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
1888f000cacSChristoph Hellwig 			struct nvmet_rdma_cmd *c, bool admin)
1898f000cacSChristoph Hellwig {
1908f000cacSChristoph Hellwig 	/* NVMe command / RDMA RECV */
1918f000cacSChristoph Hellwig 	c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
1928f000cacSChristoph Hellwig 	if (!c->nvme_cmd)
1938f000cacSChristoph Hellwig 		goto out;
1948f000cacSChristoph Hellwig 
1958f000cacSChristoph Hellwig 	c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
1968f000cacSChristoph Hellwig 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
1978f000cacSChristoph Hellwig 	if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
1988f000cacSChristoph Hellwig 		goto out_free_cmd;
1998f000cacSChristoph Hellwig 
2008f000cacSChristoph Hellwig 	c->sge[0].length = sizeof(*c->nvme_cmd);
2018f000cacSChristoph Hellwig 	c->sge[0].lkey = ndev->pd->local_dma_lkey;
2028f000cacSChristoph Hellwig 
2038f000cacSChristoph Hellwig 	if (!admin) {
2048f000cacSChristoph Hellwig 		c->inline_page = alloc_pages(GFP_KERNEL,
2058f000cacSChristoph Hellwig 				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
2068f000cacSChristoph Hellwig 		if (!c->inline_page)
2078f000cacSChristoph Hellwig 			goto out_unmap_cmd;
2088f000cacSChristoph Hellwig 		c->sge[1].addr = ib_dma_map_page(ndev->device,
2098f000cacSChristoph Hellwig 				c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
2108f000cacSChristoph Hellwig 				DMA_FROM_DEVICE);
2118f000cacSChristoph Hellwig 		if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
2128f000cacSChristoph Hellwig 			goto out_free_inline_page;
2138f000cacSChristoph Hellwig 		c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
2148f000cacSChristoph Hellwig 		c->sge[1].lkey = ndev->pd->local_dma_lkey;
2158f000cacSChristoph Hellwig 	}
2168f000cacSChristoph Hellwig 
2178f000cacSChristoph Hellwig 	c->cqe.done = nvmet_rdma_recv_done;
2188f000cacSChristoph Hellwig 
2198f000cacSChristoph Hellwig 	c->wr.wr_cqe = &c->cqe;
2208f000cacSChristoph Hellwig 	c->wr.sg_list = c->sge;
2218f000cacSChristoph Hellwig 	c->wr.num_sge = admin ? 1 : 2;
2228f000cacSChristoph Hellwig 
2238f000cacSChristoph Hellwig 	return 0;
2248f000cacSChristoph Hellwig 
2258f000cacSChristoph Hellwig out_free_inline_page:
2268f000cacSChristoph Hellwig 	if (!admin) {
2278f000cacSChristoph Hellwig 		__free_pages(c->inline_page,
2288f000cacSChristoph Hellwig 				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
2298f000cacSChristoph Hellwig 	}
2308f000cacSChristoph Hellwig out_unmap_cmd:
2318f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
2328f000cacSChristoph Hellwig 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
2338f000cacSChristoph Hellwig out_free_cmd:
2348f000cacSChristoph Hellwig 	kfree(c->nvme_cmd);
2358f000cacSChristoph Hellwig 
2368f000cacSChristoph Hellwig out:
2378f000cacSChristoph Hellwig 	return -ENOMEM;
2388f000cacSChristoph Hellwig }
2398f000cacSChristoph Hellwig 
2408f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
2418f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *c, bool admin)
2428f000cacSChristoph Hellwig {
2438f000cacSChristoph Hellwig 	if (!admin) {
2448f000cacSChristoph Hellwig 		ib_dma_unmap_page(ndev->device, c->sge[1].addr,
2458f000cacSChristoph Hellwig 				NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
2468f000cacSChristoph Hellwig 		__free_pages(c->inline_page,
2478f000cacSChristoph Hellwig 				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
2488f000cacSChristoph Hellwig 	}
2498f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
2508f000cacSChristoph Hellwig 				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
2518f000cacSChristoph Hellwig 	kfree(c->nvme_cmd);
2528f000cacSChristoph Hellwig }
2538f000cacSChristoph Hellwig 
2548f000cacSChristoph Hellwig static struct nvmet_rdma_cmd *
2558f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
2568f000cacSChristoph Hellwig 		int nr_cmds, bool admin)
2578f000cacSChristoph Hellwig {
2588f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd *cmds;
2598f000cacSChristoph Hellwig 	int ret = -EINVAL, i;
2608f000cacSChristoph Hellwig 
2618f000cacSChristoph Hellwig 	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
2628f000cacSChristoph Hellwig 	if (!cmds)
2638f000cacSChristoph Hellwig 		goto out;
2648f000cacSChristoph Hellwig 
2658f000cacSChristoph Hellwig 	for (i = 0; i < nr_cmds; i++) {
2668f000cacSChristoph Hellwig 		ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
2678f000cacSChristoph Hellwig 		if (ret)
2688f000cacSChristoph Hellwig 			goto out_free;
2698f000cacSChristoph Hellwig 	}
2708f000cacSChristoph Hellwig 
2718f000cacSChristoph Hellwig 	return cmds;
2728f000cacSChristoph Hellwig 
2738f000cacSChristoph Hellwig out_free:
2748f000cacSChristoph Hellwig 	while (--i >= 0)
2758f000cacSChristoph Hellwig 		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
2768f000cacSChristoph Hellwig 	kfree(cmds);
2778f000cacSChristoph Hellwig out:
2788f000cacSChristoph Hellwig 	return ERR_PTR(ret);
2798f000cacSChristoph Hellwig }
2808f000cacSChristoph Hellwig 
2818f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
2828f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
2838f000cacSChristoph Hellwig {
2848f000cacSChristoph Hellwig 	int i;
2858f000cacSChristoph Hellwig 
2868f000cacSChristoph Hellwig 	for (i = 0; i < nr_cmds; i++)
2878f000cacSChristoph Hellwig 		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
2888f000cacSChristoph Hellwig 	kfree(cmds);
2898f000cacSChristoph Hellwig }
2908f000cacSChristoph Hellwig 
2918f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
2928f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *r)
2938f000cacSChristoph Hellwig {
2948f000cacSChristoph Hellwig 	/* NVMe CQE / RDMA SEND */
2958f000cacSChristoph Hellwig 	r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
2968f000cacSChristoph Hellwig 	if (!r->req.rsp)
2978f000cacSChristoph Hellwig 		goto out;
2988f000cacSChristoph Hellwig 
2998f000cacSChristoph Hellwig 	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
3008f000cacSChristoph Hellwig 			sizeof(*r->req.rsp), DMA_TO_DEVICE);
3018f000cacSChristoph Hellwig 	if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
3028f000cacSChristoph Hellwig 		goto out_free_rsp;
3038f000cacSChristoph Hellwig 
3048f000cacSChristoph Hellwig 	r->send_sge.length = sizeof(*r->req.rsp);
3058f000cacSChristoph Hellwig 	r->send_sge.lkey = ndev->pd->local_dma_lkey;
3068f000cacSChristoph Hellwig 
3078f000cacSChristoph Hellwig 	r->send_cqe.done = nvmet_rdma_send_done;
3088f000cacSChristoph Hellwig 
3098f000cacSChristoph Hellwig 	r->send_wr.wr_cqe = &r->send_cqe;
3108f000cacSChristoph Hellwig 	r->send_wr.sg_list = &r->send_sge;
3118f000cacSChristoph Hellwig 	r->send_wr.num_sge = 1;
3128f000cacSChristoph Hellwig 	r->send_wr.send_flags = IB_SEND_SIGNALED;
3138f000cacSChristoph Hellwig 
3148f000cacSChristoph Hellwig 	/* Data In / RDMA READ */
3158f000cacSChristoph Hellwig 	r->read_cqe.done = nvmet_rdma_read_data_done;
3168f000cacSChristoph Hellwig 	return 0;
3178f000cacSChristoph Hellwig 
3188f000cacSChristoph Hellwig out_free_rsp:
3198f000cacSChristoph Hellwig 	kfree(r->req.rsp);
3208f000cacSChristoph Hellwig out:
3218f000cacSChristoph Hellwig 	return -ENOMEM;
3228f000cacSChristoph Hellwig }
3238f000cacSChristoph Hellwig 
3248f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
3258f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *r)
3268f000cacSChristoph Hellwig {
3278f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, r->send_sge.addr,
3288f000cacSChristoph Hellwig 				sizeof(*r->req.rsp), DMA_TO_DEVICE);
3298f000cacSChristoph Hellwig 	kfree(r->req.rsp);
3308f000cacSChristoph Hellwig }
3318f000cacSChristoph Hellwig 
3328f000cacSChristoph Hellwig static int
3338f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
3348f000cacSChristoph Hellwig {
3358f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
3368f000cacSChristoph Hellwig 	int nr_rsps = queue->recv_queue_size * 2;
3378f000cacSChristoph Hellwig 	int ret = -EINVAL, i;
3388f000cacSChristoph Hellwig 
3398f000cacSChristoph Hellwig 	queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
3408f000cacSChristoph Hellwig 			GFP_KERNEL);
3418f000cacSChristoph Hellwig 	if (!queue->rsps)
3428f000cacSChristoph Hellwig 		goto out;
3438f000cacSChristoph Hellwig 
3448f000cacSChristoph Hellwig 	for (i = 0; i < nr_rsps; i++) {
3458f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
3468f000cacSChristoph Hellwig 
3478f000cacSChristoph Hellwig 		ret = nvmet_rdma_alloc_rsp(ndev, rsp);
3488f000cacSChristoph Hellwig 		if (ret)
3498f000cacSChristoph Hellwig 			goto out_free;
3508f000cacSChristoph Hellwig 
3518f000cacSChristoph Hellwig 		list_add_tail(&rsp->free_list, &queue->free_rsps);
3528f000cacSChristoph Hellwig 	}
3538f000cacSChristoph Hellwig 
3548f000cacSChristoph Hellwig 	return 0;
3558f000cacSChristoph Hellwig 
3568f000cacSChristoph Hellwig out_free:
3578f000cacSChristoph Hellwig 	while (--i >= 0) {
3588f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
3598f000cacSChristoph Hellwig 
3608f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
3618f000cacSChristoph Hellwig 		nvmet_rdma_free_rsp(ndev, rsp);
3628f000cacSChristoph Hellwig 	}
3638f000cacSChristoph Hellwig 	kfree(queue->rsps);
3648f000cacSChristoph Hellwig out:
3658f000cacSChristoph Hellwig 	return ret;
3668f000cacSChristoph Hellwig }
3678f000cacSChristoph Hellwig 
3688f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
3698f000cacSChristoph Hellwig {
3708f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
3718f000cacSChristoph Hellwig 	int i, nr_rsps = queue->recv_queue_size * 2;
3728f000cacSChristoph Hellwig 
3738f000cacSChristoph Hellwig 	for (i = 0; i < nr_rsps; i++) {
3748f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
3758f000cacSChristoph Hellwig 
3768f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
3778f000cacSChristoph Hellwig 		nvmet_rdma_free_rsp(ndev, rsp);
3788f000cacSChristoph Hellwig 	}
3798f000cacSChristoph Hellwig 	kfree(queue->rsps);
3808f000cacSChristoph Hellwig }
3818f000cacSChristoph Hellwig 
3828f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
3838f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *cmd)
3848f000cacSChristoph Hellwig {
385748ff840SParav Pandit 	ib_dma_sync_single_for_device(ndev->device,
386748ff840SParav Pandit 		cmd->sge[0].addr, cmd->sge[0].length,
387748ff840SParav Pandit 		DMA_FROM_DEVICE);
388748ff840SParav Pandit 
3898f000cacSChristoph Hellwig 	if (ndev->srq)
390*23f96d1fSBart Van Assche 		return ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
391*23f96d1fSBart Van Assche 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
3928f000cacSChristoph Hellwig }
3938f000cacSChristoph Hellwig 
3948f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
3958f000cacSChristoph Hellwig {
3968f000cacSChristoph Hellwig 	spin_lock(&queue->rsp_wr_wait_lock);
3978f000cacSChristoph Hellwig 	while (!list_empty(&queue->rsp_wr_wait_list)) {
3988f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp;
3998f000cacSChristoph Hellwig 		bool ret;
4008f000cacSChristoph Hellwig 
4018f000cacSChristoph Hellwig 		rsp = list_entry(queue->rsp_wr_wait_list.next,
4028f000cacSChristoph Hellwig 				struct nvmet_rdma_rsp, wait_list);
4038f000cacSChristoph Hellwig 		list_del(&rsp->wait_list);
4048f000cacSChristoph Hellwig 
4058f000cacSChristoph Hellwig 		spin_unlock(&queue->rsp_wr_wait_lock);
4068f000cacSChristoph Hellwig 		ret = nvmet_rdma_execute_command(rsp);
4078f000cacSChristoph Hellwig 		spin_lock(&queue->rsp_wr_wait_lock);
4088f000cacSChristoph Hellwig 
4098f000cacSChristoph Hellwig 		if (!ret) {
4108f000cacSChristoph Hellwig 			list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
4118f000cacSChristoph Hellwig 			break;
4128f000cacSChristoph Hellwig 		}
4138f000cacSChristoph Hellwig 	}
4148f000cacSChristoph Hellwig 	spin_unlock(&queue->rsp_wr_wait_lock);
4158f000cacSChristoph Hellwig }
4168f000cacSChristoph Hellwig 
4178f000cacSChristoph Hellwig 
4188f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
4198f000cacSChristoph Hellwig {
4208f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = rsp->queue;
4218f000cacSChristoph Hellwig 
4228f000cacSChristoph Hellwig 	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
4238f000cacSChristoph Hellwig 
4248f000cacSChristoph Hellwig 	if (rsp->n_rdma) {
4258f000cacSChristoph Hellwig 		rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
4268f000cacSChristoph Hellwig 				queue->cm_id->port_num, rsp->req.sg,
4278f000cacSChristoph Hellwig 				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
4288f000cacSChristoph Hellwig 	}
4298f000cacSChristoph Hellwig 
4308f000cacSChristoph Hellwig 	if (rsp->req.sg != &rsp->cmd->inline_sg)
43168c6e9cdSBart Van Assche 		sgl_free(rsp->req.sg);
4328f000cacSChristoph Hellwig 
4338f000cacSChristoph Hellwig 	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
4348f000cacSChristoph Hellwig 		nvmet_rdma_process_wr_wait_list(queue);
4358f000cacSChristoph Hellwig 
4368f000cacSChristoph Hellwig 	nvmet_rdma_put_rsp(rsp);
4378f000cacSChristoph Hellwig }
4388f000cacSChristoph Hellwig 
4398f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
4408f000cacSChristoph Hellwig {
4418f000cacSChristoph Hellwig 	if (queue->nvme_sq.ctrl) {
4428f000cacSChristoph Hellwig 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
4438f000cacSChristoph Hellwig 	} else {
4448f000cacSChristoph Hellwig 		/*
4458f000cacSChristoph Hellwig 		 * we didn't setup the controller yet in case
4468f000cacSChristoph Hellwig 		 * of admin connect error, just disconnect and
4478f000cacSChristoph Hellwig 		 * cleanup the queue
4488f000cacSChristoph Hellwig 		 */
4498f000cacSChristoph Hellwig 		nvmet_rdma_queue_disconnect(queue);
4508f000cacSChristoph Hellwig 	}
4518f000cacSChristoph Hellwig }
4528f000cacSChristoph Hellwig 
4538f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
4548f000cacSChristoph Hellwig {
4558f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
4568f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
4578f000cacSChristoph Hellwig 
4588f000cacSChristoph Hellwig 	nvmet_rdma_release_rsp(rsp);
4598f000cacSChristoph Hellwig 
4608f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS &&
4618f000cacSChristoph Hellwig 		     wc->status != IB_WC_WR_FLUSH_ERR)) {
4628f000cacSChristoph Hellwig 		pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
4638f000cacSChristoph Hellwig 			wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
4648f000cacSChristoph Hellwig 		nvmet_rdma_error_comp(rsp->queue);
4658f000cacSChristoph Hellwig 	}
4668f000cacSChristoph Hellwig }
4678f000cacSChristoph Hellwig 
4688f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req)
4698f000cacSChristoph Hellwig {
4708f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
4718f000cacSChristoph Hellwig 		container_of(req, struct nvmet_rdma_rsp, req);
4728f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
473*23f96d1fSBart Van Assche 	struct ib_send_wr *first_wr;
4748f000cacSChristoph Hellwig 
4758f000cacSChristoph Hellwig 	if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
4768f000cacSChristoph Hellwig 		rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
4778f000cacSChristoph Hellwig 		rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
4788f000cacSChristoph Hellwig 	} else {
4798f000cacSChristoph Hellwig 		rsp->send_wr.opcode = IB_WR_SEND;
4808f000cacSChristoph Hellwig 	}
4818f000cacSChristoph Hellwig 
4828f000cacSChristoph Hellwig 	if (nvmet_rdma_need_data_out(rsp))
4838f000cacSChristoph Hellwig 		first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
4848f000cacSChristoph Hellwig 				cm_id->port_num, NULL, &rsp->send_wr);
4858f000cacSChristoph Hellwig 	else
4868f000cacSChristoph Hellwig 		first_wr = &rsp->send_wr;
4878f000cacSChristoph Hellwig 
4888f000cacSChristoph Hellwig 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
489748ff840SParav Pandit 
490748ff840SParav Pandit 	ib_dma_sync_single_for_device(rsp->queue->dev->device,
491748ff840SParav Pandit 		rsp->send_sge.addr, rsp->send_sge.length,
492748ff840SParav Pandit 		DMA_TO_DEVICE);
493748ff840SParav Pandit 
494*23f96d1fSBart Van Assche 	if (ib_post_send(cm_id->qp, first_wr, NULL)) {
4958f000cacSChristoph Hellwig 		pr_err("sending cmd response failed\n");
4968f000cacSChristoph Hellwig 		nvmet_rdma_release_rsp(rsp);
4978f000cacSChristoph Hellwig 	}
4988f000cacSChristoph Hellwig }
4998f000cacSChristoph Hellwig 
5008f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
5018f000cacSChristoph Hellwig {
5028f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
5038f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
5048f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = cq->cq_context;
5058f000cacSChristoph Hellwig 
5068f000cacSChristoph Hellwig 	WARN_ON(rsp->n_rdma <= 0);
5078f000cacSChristoph Hellwig 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
5088f000cacSChristoph Hellwig 	rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
5098f000cacSChristoph Hellwig 			queue->cm_id->port_num, rsp->req.sg,
5108f000cacSChristoph Hellwig 			rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
5118f000cacSChristoph Hellwig 	rsp->n_rdma = 0;
5128f000cacSChristoph Hellwig 
5138f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
514549f01aeSVijay Immanuel 		nvmet_req_uninit(&rsp->req);
5158f000cacSChristoph Hellwig 		nvmet_rdma_release_rsp(rsp);
5168f000cacSChristoph Hellwig 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
5178f000cacSChristoph Hellwig 			pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
5188f000cacSChristoph Hellwig 				wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
5198f000cacSChristoph Hellwig 			nvmet_rdma_error_comp(queue);
5208f000cacSChristoph Hellwig 		}
5218f000cacSChristoph Hellwig 		return;
5228f000cacSChristoph Hellwig 	}
5238f000cacSChristoph Hellwig 
5245e62d5c9SChristoph Hellwig 	nvmet_req_execute(&rsp->req);
5258f000cacSChristoph Hellwig }
5268f000cacSChristoph Hellwig 
5278f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
5288f000cacSChristoph Hellwig 		u64 off)
5298f000cacSChristoph Hellwig {
5308f000cacSChristoph Hellwig 	sg_init_table(&rsp->cmd->inline_sg, 1);
5318f000cacSChristoph Hellwig 	sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
5328f000cacSChristoph Hellwig 	rsp->req.sg = &rsp->cmd->inline_sg;
5338f000cacSChristoph Hellwig 	rsp->req.sg_cnt = 1;
5348f000cacSChristoph Hellwig }
5358f000cacSChristoph Hellwig 
5368f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
5378f000cacSChristoph Hellwig {
5388f000cacSChristoph Hellwig 	struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
5398f000cacSChristoph Hellwig 	u64 off = le64_to_cpu(sgl->addr);
5408f000cacSChristoph Hellwig 	u32 len = le32_to_cpu(sgl->length);
5418f000cacSChristoph Hellwig 
5428f000cacSChristoph Hellwig 	if (!nvme_is_write(rsp->req.cmd))
5438f000cacSChristoph Hellwig 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
5448f000cacSChristoph Hellwig 
5458f000cacSChristoph Hellwig 	if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
5468f000cacSChristoph Hellwig 		pr_err("invalid inline data offset!\n");
5478f000cacSChristoph Hellwig 		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
5488f000cacSChristoph Hellwig 	}
5498f000cacSChristoph Hellwig 
5508f000cacSChristoph Hellwig 	/* no data command? */
5518f000cacSChristoph Hellwig 	if (!len)
5528f000cacSChristoph Hellwig 		return 0;
5538f000cacSChristoph Hellwig 
5548f000cacSChristoph Hellwig 	nvmet_rdma_use_inline_sg(rsp, len, off);
5558f000cacSChristoph Hellwig 	rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
5565e62d5c9SChristoph Hellwig 	rsp->req.transfer_len += len;
5578f000cacSChristoph Hellwig 	return 0;
5588f000cacSChristoph Hellwig }
5598f000cacSChristoph Hellwig 
5608f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
5618f000cacSChristoph Hellwig 		struct nvme_keyed_sgl_desc *sgl, bool invalidate)
5628f000cacSChristoph Hellwig {
5638f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
5648f000cacSChristoph Hellwig 	u64 addr = le64_to_cpu(sgl->addr);
5658f000cacSChristoph Hellwig 	u32 len = get_unaligned_le24(sgl->length);
5668f000cacSChristoph Hellwig 	u32 key = get_unaligned_le32(sgl->key);
5678f000cacSChristoph Hellwig 	int ret;
5688f000cacSChristoph Hellwig 
5698f000cacSChristoph Hellwig 	/* no data command? */
5708f000cacSChristoph Hellwig 	if (!len)
5718f000cacSChristoph Hellwig 		return 0;
5728f000cacSChristoph Hellwig 
57368c6e9cdSBart Van Assche 	rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
57468c6e9cdSBart Van Assche 	if (!rsp->req.sg)
57568c6e9cdSBart Van Assche 		return NVME_SC_INTERNAL;
5768f000cacSChristoph Hellwig 
5778f000cacSChristoph Hellwig 	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
5788f000cacSChristoph Hellwig 			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
5798f000cacSChristoph Hellwig 			nvmet_data_dir(&rsp->req));
5808f000cacSChristoph Hellwig 	if (ret < 0)
5818f000cacSChristoph Hellwig 		return NVME_SC_INTERNAL;
5825e62d5c9SChristoph Hellwig 	rsp->req.transfer_len += len;
5838f000cacSChristoph Hellwig 	rsp->n_rdma += ret;
5848f000cacSChristoph Hellwig 
5858f000cacSChristoph Hellwig 	if (invalidate) {
5868f000cacSChristoph Hellwig 		rsp->invalidate_rkey = key;
5878f000cacSChristoph Hellwig 		rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
5888f000cacSChristoph Hellwig 	}
5898f000cacSChristoph Hellwig 
5908f000cacSChristoph Hellwig 	return 0;
5918f000cacSChristoph Hellwig }
5928f000cacSChristoph Hellwig 
5938f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
5948f000cacSChristoph Hellwig {
5958f000cacSChristoph Hellwig 	struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
5968f000cacSChristoph Hellwig 
5978f000cacSChristoph Hellwig 	switch (sgl->type >> 4) {
5988f000cacSChristoph Hellwig 	case NVME_SGL_FMT_DATA_DESC:
5998f000cacSChristoph Hellwig 		switch (sgl->type & 0xf) {
6008f000cacSChristoph Hellwig 		case NVME_SGL_FMT_OFFSET:
6018f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_inline(rsp);
6028f000cacSChristoph Hellwig 		default:
6038f000cacSChristoph Hellwig 			pr_err("invalid SGL subtype: %#x\n", sgl->type);
6048f000cacSChristoph Hellwig 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
6058f000cacSChristoph Hellwig 		}
6068f000cacSChristoph Hellwig 	case NVME_KEY_SGL_FMT_DATA_DESC:
6078f000cacSChristoph Hellwig 		switch (sgl->type & 0xf) {
6088f000cacSChristoph Hellwig 		case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
6098f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
6108f000cacSChristoph Hellwig 		case NVME_SGL_FMT_ADDRESS:
6118f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
6128f000cacSChristoph Hellwig 		default:
6138f000cacSChristoph Hellwig 			pr_err("invalid SGL subtype: %#x\n", sgl->type);
6148f000cacSChristoph Hellwig 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
6158f000cacSChristoph Hellwig 		}
6168f000cacSChristoph Hellwig 	default:
6178f000cacSChristoph Hellwig 		pr_err("invalid SGL type: %#x\n", sgl->type);
6188f000cacSChristoph Hellwig 		return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
6198f000cacSChristoph Hellwig 	}
6208f000cacSChristoph Hellwig }
6218f000cacSChristoph Hellwig 
6228f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
6238f000cacSChristoph Hellwig {
6248f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = rsp->queue;
6258f000cacSChristoph Hellwig 
6268f000cacSChristoph Hellwig 	if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
6278f000cacSChristoph Hellwig 			&queue->sq_wr_avail) < 0)) {
6288f000cacSChristoph Hellwig 		pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
6298f000cacSChristoph Hellwig 				1 + rsp->n_rdma, queue->idx,
6308f000cacSChristoph Hellwig 				queue->nvme_sq.ctrl->cntlid);
6318f000cacSChristoph Hellwig 		atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
6328f000cacSChristoph Hellwig 		return false;
6338f000cacSChristoph Hellwig 	}
6348f000cacSChristoph Hellwig 
6358f000cacSChristoph Hellwig 	if (nvmet_rdma_need_data_in(rsp)) {
6368f000cacSChristoph Hellwig 		if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
6378f000cacSChristoph Hellwig 				queue->cm_id->port_num, &rsp->read_cqe, NULL))
6388f000cacSChristoph Hellwig 			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
6398f000cacSChristoph Hellwig 	} else {
6405e62d5c9SChristoph Hellwig 		nvmet_req_execute(&rsp->req);
6418f000cacSChristoph Hellwig 	}
6428f000cacSChristoph Hellwig 
6438f000cacSChristoph Hellwig 	return true;
6448f000cacSChristoph Hellwig }
6458f000cacSChristoph Hellwig 
6468f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
6478f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *cmd)
6488f000cacSChristoph Hellwig {
6498f000cacSChristoph Hellwig 	u16 status;
6508f000cacSChristoph Hellwig 
651748ff840SParav Pandit 	ib_dma_sync_single_for_cpu(queue->dev->device,
652748ff840SParav Pandit 		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
653748ff840SParav Pandit 		DMA_FROM_DEVICE);
654748ff840SParav Pandit 	ib_dma_sync_single_for_cpu(queue->dev->device,
655748ff840SParav Pandit 		cmd->send_sge.addr, cmd->send_sge.length,
656748ff840SParav Pandit 		DMA_TO_DEVICE);
657748ff840SParav Pandit 
6588f000cacSChristoph Hellwig 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
6598f000cacSChristoph Hellwig 			&queue->nvme_sq, &nvmet_rdma_ops))
6608f000cacSChristoph Hellwig 		return;
6618f000cacSChristoph Hellwig 
6628f000cacSChristoph Hellwig 	status = nvmet_rdma_map_sgl(cmd);
6638f000cacSChristoph Hellwig 	if (status)
6648f000cacSChristoph Hellwig 		goto out_err;
6658f000cacSChristoph Hellwig 
6668f000cacSChristoph Hellwig 	if (unlikely(!nvmet_rdma_execute_command(cmd))) {
6678f000cacSChristoph Hellwig 		spin_lock(&queue->rsp_wr_wait_lock);
6688f000cacSChristoph Hellwig 		list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
6698f000cacSChristoph Hellwig 		spin_unlock(&queue->rsp_wr_wait_lock);
6708f000cacSChristoph Hellwig 	}
6718f000cacSChristoph Hellwig 
6728f000cacSChristoph Hellwig 	return;
6738f000cacSChristoph Hellwig 
6748f000cacSChristoph Hellwig out_err:
6758f000cacSChristoph Hellwig 	nvmet_req_complete(&cmd->req, status);
6768f000cacSChristoph Hellwig }
6778f000cacSChristoph Hellwig 
6788f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
6798f000cacSChristoph Hellwig {
6808f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd *cmd =
6818f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
6828f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = cq->cq_context;
6838f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp;
6848f000cacSChristoph Hellwig 
6858f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
6868f000cacSChristoph Hellwig 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
6878f000cacSChristoph Hellwig 			pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
6888f000cacSChristoph Hellwig 				wc->wr_cqe, ib_wc_status_msg(wc->status),
6898f000cacSChristoph Hellwig 				wc->status);
6908f000cacSChristoph Hellwig 			nvmet_rdma_error_comp(queue);
6918f000cacSChristoph Hellwig 		}
6928f000cacSChristoph Hellwig 		return;
6938f000cacSChristoph Hellwig 	}
6948f000cacSChristoph Hellwig 
6958f000cacSChristoph Hellwig 	if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
6968f000cacSChristoph Hellwig 		pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
6978f000cacSChristoph Hellwig 		nvmet_rdma_error_comp(queue);
6988f000cacSChristoph Hellwig 		return;
6998f000cacSChristoph Hellwig 	}
7008f000cacSChristoph Hellwig 
7018f000cacSChristoph Hellwig 	cmd->queue = queue;
7028f000cacSChristoph Hellwig 	rsp = nvmet_rdma_get_rsp(queue);
7038d61413dSSagi Grimberg 	rsp->queue = queue;
7048f000cacSChristoph Hellwig 	rsp->cmd = cmd;
7058f000cacSChristoph Hellwig 	rsp->flags = 0;
7068f000cacSChristoph Hellwig 	rsp->req.cmd = cmd->nvme_cmd;
7078d61413dSSagi Grimberg 	rsp->req.port = queue->port;
7088d61413dSSagi Grimberg 	rsp->n_rdma = 0;
7098f000cacSChristoph Hellwig 
7108f000cacSChristoph Hellwig 	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
7118f000cacSChristoph Hellwig 		unsigned long flags;
7128f000cacSChristoph Hellwig 
7138f000cacSChristoph Hellwig 		spin_lock_irqsave(&queue->state_lock, flags);
7148f000cacSChristoph Hellwig 		if (queue->state == NVMET_RDMA_Q_CONNECTING)
7158f000cacSChristoph Hellwig 			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
7168f000cacSChristoph Hellwig 		else
7178f000cacSChristoph Hellwig 			nvmet_rdma_put_rsp(rsp);
7188f000cacSChristoph Hellwig 		spin_unlock_irqrestore(&queue->state_lock, flags);
7198f000cacSChristoph Hellwig 		return;
7208f000cacSChristoph Hellwig 	}
7218f000cacSChristoph Hellwig 
7228f000cacSChristoph Hellwig 	nvmet_rdma_handle_command(queue, rsp);
7238f000cacSChristoph Hellwig }
7248f000cacSChristoph Hellwig 
7258f000cacSChristoph Hellwig static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
7268f000cacSChristoph Hellwig {
7278f000cacSChristoph Hellwig 	if (!ndev->srq)
7288f000cacSChristoph Hellwig 		return;
7298f000cacSChristoph Hellwig 
7308f000cacSChristoph Hellwig 	nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
7318f000cacSChristoph Hellwig 	ib_destroy_srq(ndev->srq);
7328f000cacSChristoph Hellwig }
7338f000cacSChristoph Hellwig 
7348f000cacSChristoph Hellwig static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
7358f000cacSChristoph Hellwig {
7368f000cacSChristoph Hellwig 	struct ib_srq_init_attr srq_attr = { NULL, };
7378f000cacSChristoph Hellwig 	struct ib_srq *srq;
7388f000cacSChristoph Hellwig 	size_t srq_size;
7398f000cacSChristoph Hellwig 	int ret, i;
7408f000cacSChristoph Hellwig 
7418f000cacSChristoph Hellwig 	srq_size = 4095;	/* XXX: tune */
7428f000cacSChristoph Hellwig 
7438f000cacSChristoph Hellwig 	srq_attr.attr.max_wr = srq_size;
7448f000cacSChristoph Hellwig 	srq_attr.attr.max_sge = 2;
7458f000cacSChristoph Hellwig 	srq_attr.attr.srq_limit = 0;
7468f000cacSChristoph Hellwig 	srq_attr.srq_type = IB_SRQT_BASIC;
7478f000cacSChristoph Hellwig 	srq = ib_create_srq(ndev->pd, &srq_attr);
7488f000cacSChristoph Hellwig 	if (IS_ERR(srq)) {
7498f000cacSChristoph Hellwig 		/*
7508f000cacSChristoph Hellwig 		 * If SRQs aren't supported we just go ahead and use normal
7518f000cacSChristoph Hellwig 		 * non-shared receive queues.
7528f000cacSChristoph Hellwig 		 */
7538f000cacSChristoph Hellwig 		pr_info("SRQ requested but not supported.\n");
7548f000cacSChristoph Hellwig 		return 0;
7558f000cacSChristoph Hellwig 	}
7568f000cacSChristoph Hellwig 
7578f000cacSChristoph Hellwig 	ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
7588f000cacSChristoph Hellwig 	if (IS_ERR(ndev->srq_cmds)) {
7598f000cacSChristoph Hellwig 		ret = PTR_ERR(ndev->srq_cmds);
7608f000cacSChristoph Hellwig 		goto out_destroy_srq;
7618f000cacSChristoph Hellwig 	}
7628f000cacSChristoph Hellwig 
7638f000cacSChristoph Hellwig 	ndev->srq = srq;
7648f000cacSChristoph Hellwig 	ndev->srq_size = srq_size;
7658f000cacSChristoph Hellwig 
7668f000cacSChristoph Hellwig 	for (i = 0; i < srq_size; i++)
7678f000cacSChristoph Hellwig 		nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
7688f000cacSChristoph Hellwig 
7698f000cacSChristoph Hellwig 	return 0;
7708f000cacSChristoph Hellwig 
7718f000cacSChristoph Hellwig out_destroy_srq:
7728f000cacSChristoph Hellwig 	ib_destroy_srq(srq);
7738f000cacSChristoph Hellwig 	return ret;
7748f000cacSChristoph Hellwig }
7758f000cacSChristoph Hellwig 
7768f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref)
7778f000cacSChristoph Hellwig {
7788f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev =
7798f000cacSChristoph Hellwig 		container_of(ref, struct nvmet_rdma_device, ref);
7808f000cacSChristoph Hellwig 
7818f000cacSChristoph Hellwig 	mutex_lock(&device_list_mutex);
7828f000cacSChristoph Hellwig 	list_del(&ndev->entry);
7838f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
7848f000cacSChristoph Hellwig 
7858f000cacSChristoph Hellwig 	nvmet_rdma_destroy_srq(ndev);
7868f000cacSChristoph Hellwig 	ib_dealloc_pd(ndev->pd);
7878f000cacSChristoph Hellwig 
7888f000cacSChristoph Hellwig 	kfree(ndev);
7898f000cacSChristoph Hellwig }
7908f000cacSChristoph Hellwig 
7918f000cacSChristoph Hellwig static struct nvmet_rdma_device *
7928f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
7938f000cacSChristoph Hellwig {
7948f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev;
7958f000cacSChristoph Hellwig 	int ret;
7968f000cacSChristoph Hellwig 
7978f000cacSChristoph Hellwig 	mutex_lock(&device_list_mutex);
7988f000cacSChristoph Hellwig 	list_for_each_entry(ndev, &device_list, entry) {
7998f000cacSChristoph Hellwig 		if (ndev->device->node_guid == cm_id->device->node_guid &&
8008f000cacSChristoph Hellwig 		    kref_get_unless_zero(&ndev->ref))
8018f000cacSChristoph Hellwig 			goto out_unlock;
8028f000cacSChristoph Hellwig 	}
8038f000cacSChristoph Hellwig 
8048f000cacSChristoph Hellwig 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
8058f000cacSChristoph Hellwig 	if (!ndev)
8068f000cacSChristoph Hellwig 		goto out_err;
8078f000cacSChristoph Hellwig 
8088f000cacSChristoph Hellwig 	ndev->device = cm_id->device;
8098f000cacSChristoph Hellwig 	kref_init(&ndev->ref);
8108f000cacSChristoph Hellwig 
811ed082d36SChristoph Hellwig 	ndev->pd = ib_alloc_pd(ndev->device, 0);
8128f000cacSChristoph Hellwig 	if (IS_ERR(ndev->pd))
8138f000cacSChristoph Hellwig 		goto out_free_dev;
8148f000cacSChristoph Hellwig 
8158f000cacSChristoph Hellwig 	if (nvmet_rdma_use_srq) {
8168f000cacSChristoph Hellwig 		ret = nvmet_rdma_init_srq(ndev);
8178f000cacSChristoph Hellwig 		if (ret)
8188f000cacSChristoph Hellwig 			goto out_free_pd;
8198f000cacSChristoph Hellwig 	}
8208f000cacSChristoph Hellwig 
8218f000cacSChristoph Hellwig 	list_add(&ndev->entry, &device_list);
8228f000cacSChristoph Hellwig out_unlock:
8238f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
8248f000cacSChristoph Hellwig 	pr_debug("added %s.\n", ndev->device->name);
8258f000cacSChristoph Hellwig 	return ndev;
8268f000cacSChristoph Hellwig 
8278f000cacSChristoph Hellwig out_free_pd:
8288f000cacSChristoph Hellwig 	ib_dealloc_pd(ndev->pd);
8298f000cacSChristoph Hellwig out_free_dev:
8308f000cacSChristoph Hellwig 	kfree(ndev);
8318f000cacSChristoph Hellwig out_err:
8328f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
8338f000cacSChristoph Hellwig 	return NULL;
8348f000cacSChristoph Hellwig }
8358f000cacSChristoph Hellwig 
8368f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
8378f000cacSChristoph Hellwig {
8388f000cacSChristoph Hellwig 	struct ib_qp_init_attr qp_attr;
8398f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
8408f000cacSChristoph Hellwig 	int comp_vector, nr_cqe, ret, i;
8418f000cacSChristoph Hellwig 
8428f000cacSChristoph Hellwig 	/*
8438f000cacSChristoph Hellwig 	 * Spread the io queues across completion vectors,
8448f000cacSChristoph Hellwig 	 * but still keep all admin queues on vector 0.
8458f000cacSChristoph Hellwig 	 */
8468f000cacSChristoph Hellwig 	comp_vector = !queue->host_qid ? 0 :
8478f000cacSChristoph Hellwig 		queue->idx % ndev->device->num_comp_vectors;
8488f000cacSChristoph Hellwig 
8498f000cacSChristoph Hellwig 	/*
8508f000cacSChristoph Hellwig 	 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
8518f000cacSChristoph Hellwig 	 */
8528f000cacSChristoph Hellwig 	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
8538f000cacSChristoph Hellwig 
8548f000cacSChristoph Hellwig 	queue->cq = ib_alloc_cq(ndev->device, queue,
8558f000cacSChristoph Hellwig 			nr_cqe + 1, comp_vector,
8568f000cacSChristoph Hellwig 			IB_POLL_WORKQUEUE);
8578f000cacSChristoph Hellwig 	if (IS_ERR(queue->cq)) {
8588f000cacSChristoph Hellwig 		ret = PTR_ERR(queue->cq);
8598f000cacSChristoph Hellwig 		pr_err("failed to create CQ cqe= %d ret= %d\n",
8608f000cacSChristoph Hellwig 		       nr_cqe + 1, ret);
8618f000cacSChristoph Hellwig 		goto out;
8628f000cacSChristoph Hellwig 	}
8638f000cacSChristoph Hellwig 
8648f000cacSChristoph Hellwig 	memset(&qp_attr, 0, sizeof(qp_attr));
8658f000cacSChristoph Hellwig 	qp_attr.qp_context = queue;
8668f000cacSChristoph Hellwig 	qp_attr.event_handler = nvmet_rdma_qp_event;
8678f000cacSChristoph Hellwig 	qp_attr.send_cq = queue->cq;
8688f000cacSChristoph Hellwig 	qp_attr.recv_cq = queue->cq;
8698f000cacSChristoph Hellwig 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
8708f000cacSChristoph Hellwig 	qp_attr.qp_type = IB_QPT_RC;
8718f000cacSChristoph Hellwig 	/* +1 for drain */
8728f000cacSChristoph Hellwig 	qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
8738f000cacSChristoph Hellwig 	qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
8748f000cacSChristoph Hellwig 	qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
87533023fb8SSteve Wise 					ndev->device->attrs.max_send_sge);
8768f000cacSChristoph Hellwig 
8778f000cacSChristoph Hellwig 	if (ndev->srq) {
8788f000cacSChristoph Hellwig 		qp_attr.srq = ndev->srq;
8798f000cacSChristoph Hellwig 	} else {
8808f000cacSChristoph Hellwig 		/* +1 for drain */
8818f000cacSChristoph Hellwig 		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
8828f000cacSChristoph Hellwig 		qp_attr.cap.max_recv_sge = 2;
8838f000cacSChristoph Hellwig 	}
8848f000cacSChristoph Hellwig 
8858f000cacSChristoph Hellwig 	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
8868f000cacSChristoph Hellwig 	if (ret) {
8878f000cacSChristoph Hellwig 		pr_err("failed to create_qp ret= %d\n", ret);
8888f000cacSChristoph Hellwig 		goto err_destroy_cq;
8898f000cacSChristoph Hellwig 	}
8908f000cacSChristoph Hellwig 
8918f000cacSChristoph Hellwig 	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
8928f000cacSChristoph Hellwig 
8938f000cacSChristoph Hellwig 	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
8948f000cacSChristoph Hellwig 		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
8958f000cacSChristoph Hellwig 		 qp_attr.cap.max_send_wr, queue->cm_id);
8968f000cacSChristoph Hellwig 
8978f000cacSChristoph Hellwig 	if (!ndev->srq) {
8988f000cacSChristoph Hellwig 		for (i = 0; i < queue->recv_queue_size; i++) {
8998f000cacSChristoph Hellwig 			queue->cmds[i].queue = queue;
9008f000cacSChristoph Hellwig 			nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
9018f000cacSChristoph Hellwig 		}
9028f000cacSChristoph Hellwig 	}
9038f000cacSChristoph Hellwig 
9048f000cacSChristoph Hellwig out:
9058f000cacSChristoph Hellwig 	return ret;
9068f000cacSChristoph Hellwig 
9078f000cacSChristoph Hellwig err_destroy_cq:
9088f000cacSChristoph Hellwig 	ib_free_cq(queue->cq);
9098f000cacSChristoph Hellwig 	goto out;
9108f000cacSChristoph Hellwig }
9118f000cacSChristoph Hellwig 
9128f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
9138f000cacSChristoph Hellwig {
914e1a2ee24SIsrael Rukshin 	struct ib_qp *qp = queue->cm_id->qp;
915e1a2ee24SIsrael Rukshin 
916e1a2ee24SIsrael Rukshin 	ib_drain_qp(qp);
917e1a2ee24SIsrael Rukshin 	rdma_destroy_id(queue->cm_id);
918e1a2ee24SIsrael Rukshin 	ib_destroy_qp(qp);
9198f000cacSChristoph Hellwig 	ib_free_cq(queue->cq);
9208f000cacSChristoph Hellwig }
9218f000cacSChristoph Hellwig 
9228f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
9238f000cacSChristoph Hellwig {
924424125a0SSagi Grimberg 	pr_debug("freeing queue %d\n", queue->idx);
9258f000cacSChristoph Hellwig 
9268f000cacSChristoph Hellwig 	nvmet_sq_destroy(&queue->nvme_sq);
9278f000cacSChristoph Hellwig 
9288f000cacSChristoph Hellwig 	nvmet_rdma_destroy_queue_ib(queue);
9298f000cacSChristoph Hellwig 	if (!queue->dev->srq) {
9308f000cacSChristoph Hellwig 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
9318f000cacSChristoph Hellwig 				queue->recv_queue_size,
9328f000cacSChristoph Hellwig 				!queue->host_qid);
9338f000cacSChristoph Hellwig 	}
9348f000cacSChristoph Hellwig 	nvmet_rdma_free_rsps(queue);
9358f000cacSChristoph Hellwig 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
9368f000cacSChristoph Hellwig 	kfree(queue);
9378f000cacSChristoph Hellwig }
9388f000cacSChristoph Hellwig 
9398f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w)
9408f000cacSChristoph Hellwig {
9418f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue =
9428f000cacSChristoph Hellwig 		container_of(w, struct nvmet_rdma_queue, release_work);
9438f000cacSChristoph Hellwig 	struct nvmet_rdma_device *dev = queue->dev;
9448f000cacSChristoph Hellwig 
9458f000cacSChristoph Hellwig 	nvmet_rdma_free_queue(queue);
946d8f7750aSSagi Grimberg 
9478f000cacSChristoph Hellwig 	kref_put(&dev->ref, nvmet_rdma_free_dev);
9488f000cacSChristoph Hellwig }
9498f000cacSChristoph Hellwig 
9508f000cacSChristoph Hellwig static int
9518f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
9528f000cacSChristoph Hellwig 				struct nvmet_rdma_queue *queue)
9538f000cacSChristoph Hellwig {
9548f000cacSChristoph Hellwig 	struct nvme_rdma_cm_req *req;
9558f000cacSChristoph Hellwig 
9568f000cacSChristoph Hellwig 	req = (struct nvme_rdma_cm_req *)conn->private_data;
9578f000cacSChristoph Hellwig 	if (!req || conn->private_data_len == 0)
9588f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_LEN;
9598f000cacSChristoph Hellwig 
9608f000cacSChristoph Hellwig 	if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
9618f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_RECFMT;
9628f000cacSChristoph Hellwig 
9638f000cacSChristoph Hellwig 	queue->host_qid = le16_to_cpu(req->qid);
9648f000cacSChristoph Hellwig 
9658f000cacSChristoph Hellwig 	/*
966b825b44cSJay Freyensee 	 * req->hsqsize corresponds to our recv queue size plus 1
9678f000cacSChristoph Hellwig 	 * req->hrqsize corresponds to our send queue size
9688f000cacSChristoph Hellwig 	 */
969b825b44cSJay Freyensee 	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
9708f000cacSChristoph Hellwig 	queue->send_queue_size = le16_to_cpu(req->hrqsize);
9718f000cacSChristoph Hellwig 
9727aa1f427SSagi Grimberg 	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
9738f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_HSQSIZE;
9748f000cacSChristoph Hellwig 
9758f000cacSChristoph Hellwig 	/* XXX: Should we enforce some kind of max for IO queues? */
9768f000cacSChristoph Hellwig 
9778f000cacSChristoph Hellwig 	return 0;
9788f000cacSChristoph Hellwig }
9798f000cacSChristoph Hellwig 
9808f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
9818f000cacSChristoph Hellwig 				enum nvme_rdma_cm_status status)
9828f000cacSChristoph Hellwig {
9838f000cacSChristoph Hellwig 	struct nvme_rdma_cm_rej rej;
9848f000cacSChristoph Hellwig 
9857a01a6eaSMax Gurtovoy 	pr_debug("rejecting connect request: status %d (%s)\n",
9867a01a6eaSMax Gurtovoy 		 status, nvme_rdma_cm_msg(status));
9877a01a6eaSMax Gurtovoy 
9888f000cacSChristoph Hellwig 	rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
9898f000cacSChristoph Hellwig 	rej.sts = cpu_to_le16(status);
9908f000cacSChristoph Hellwig 
9918f000cacSChristoph Hellwig 	return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
9928f000cacSChristoph Hellwig }
9938f000cacSChristoph Hellwig 
9948f000cacSChristoph Hellwig static struct nvmet_rdma_queue *
9958f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
9968f000cacSChristoph Hellwig 		struct rdma_cm_id *cm_id,
9978f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
9988f000cacSChristoph Hellwig {
9998f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
10008f000cacSChristoph Hellwig 	int ret;
10018f000cacSChristoph Hellwig 
10028f000cacSChristoph Hellwig 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10038f000cacSChristoph Hellwig 	if (!queue) {
10048f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
10058f000cacSChristoph Hellwig 		goto out_reject;
10068f000cacSChristoph Hellwig 	}
10078f000cacSChristoph Hellwig 
10088f000cacSChristoph Hellwig 	ret = nvmet_sq_init(&queue->nvme_sq);
100970d4281cSBart Van Assche 	if (ret) {
101070d4281cSBart Van Assche 		ret = NVME_RDMA_CM_NO_RSC;
10118f000cacSChristoph Hellwig 		goto out_free_queue;
101270d4281cSBart Van Assche 	}
10138f000cacSChristoph Hellwig 
10148f000cacSChristoph Hellwig 	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
10158f000cacSChristoph Hellwig 	if (ret)
10168f000cacSChristoph Hellwig 		goto out_destroy_sq;
10178f000cacSChristoph Hellwig 
10188f000cacSChristoph Hellwig 	/*
10198f000cacSChristoph Hellwig 	 * Schedules the actual release because calling rdma_destroy_id from
10208f000cacSChristoph Hellwig 	 * inside a CM callback would trigger a deadlock. (great API design..)
10218f000cacSChristoph Hellwig 	 */
10228f000cacSChristoph Hellwig 	INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
10238f000cacSChristoph Hellwig 	queue->dev = ndev;
10248f000cacSChristoph Hellwig 	queue->cm_id = cm_id;
10258f000cacSChristoph Hellwig 
10268f000cacSChristoph Hellwig 	spin_lock_init(&queue->state_lock);
10278f000cacSChristoph Hellwig 	queue->state = NVMET_RDMA_Q_CONNECTING;
10288f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->rsp_wait_list);
10298f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
10308f000cacSChristoph Hellwig 	spin_lock_init(&queue->rsp_wr_wait_lock);
10318f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->free_rsps);
10328f000cacSChristoph Hellwig 	spin_lock_init(&queue->rsps_lock);
1033766dbb17SSagi Grimberg 	INIT_LIST_HEAD(&queue->queue_list);
10348f000cacSChristoph Hellwig 
10358f000cacSChristoph Hellwig 	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
10368f000cacSChristoph Hellwig 	if (queue->idx < 0) {
10378f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
10386ccaeb56SChristophe JAILLET 		goto out_destroy_sq;
10398f000cacSChristoph Hellwig 	}
10408f000cacSChristoph Hellwig 
10418f000cacSChristoph Hellwig 	ret = nvmet_rdma_alloc_rsps(queue);
10428f000cacSChristoph Hellwig 	if (ret) {
10438f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
10448f000cacSChristoph Hellwig 		goto out_ida_remove;
10458f000cacSChristoph Hellwig 	}
10468f000cacSChristoph Hellwig 
10478f000cacSChristoph Hellwig 	if (!ndev->srq) {
10488f000cacSChristoph Hellwig 		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
10498f000cacSChristoph Hellwig 				queue->recv_queue_size,
10508f000cacSChristoph Hellwig 				!queue->host_qid);
10518f000cacSChristoph Hellwig 		if (IS_ERR(queue->cmds)) {
10528f000cacSChristoph Hellwig 			ret = NVME_RDMA_CM_NO_RSC;
10538f000cacSChristoph Hellwig 			goto out_free_responses;
10548f000cacSChristoph Hellwig 		}
10558f000cacSChristoph Hellwig 	}
10568f000cacSChristoph Hellwig 
10578f000cacSChristoph Hellwig 	ret = nvmet_rdma_create_queue_ib(queue);
10588f000cacSChristoph Hellwig 	if (ret) {
10598f000cacSChristoph Hellwig 		pr_err("%s: creating RDMA queue failed (%d).\n",
10608f000cacSChristoph Hellwig 			__func__, ret);
10618f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
10628f000cacSChristoph Hellwig 		goto out_free_cmds;
10638f000cacSChristoph Hellwig 	}
10648f000cacSChristoph Hellwig 
10658f000cacSChristoph Hellwig 	return queue;
10668f000cacSChristoph Hellwig 
10678f000cacSChristoph Hellwig out_free_cmds:
10688f000cacSChristoph Hellwig 	if (!ndev->srq) {
10698f000cacSChristoph Hellwig 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
10708f000cacSChristoph Hellwig 				queue->recv_queue_size,
10718f000cacSChristoph Hellwig 				!queue->host_qid);
10728f000cacSChristoph Hellwig 	}
10738f000cacSChristoph Hellwig out_free_responses:
10748f000cacSChristoph Hellwig 	nvmet_rdma_free_rsps(queue);
10758f000cacSChristoph Hellwig out_ida_remove:
10768f000cacSChristoph Hellwig 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
10778f000cacSChristoph Hellwig out_destroy_sq:
10788f000cacSChristoph Hellwig 	nvmet_sq_destroy(&queue->nvme_sq);
10798f000cacSChristoph Hellwig out_free_queue:
10808f000cacSChristoph Hellwig 	kfree(queue);
10818f000cacSChristoph Hellwig out_reject:
10828f000cacSChristoph Hellwig 	nvmet_rdma_cm_reject(cm_id, ret);
10838f000cacSChristoph Hellwig 	return NULL;
10848f000cacSChristoph Hellwig }
10858f000cacSChristoph Hellwig 
10868f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
10878f000cacSChristoph Hellwig {
10888f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = priv;
10898f000cacSChristoph Hellwig 
10908f000cacSChristoph Hellwig 	switch (event->event) {
10918f000cacSChristoph Hellwig 	case IB_EVENT_COMM_EST:
10928f000cacSChristoph Hellwig 		rdma_notify(queue->cm_id, event->event);
10938f000cacSChristoph Hellwig 		break;
10948f000cacSChristoph Hellwig 	default:
1095675796beSMax Gurtovoy 		pr_err("received IB QP event: %s (%d)\n",
1096675796beSMax Gurtovoy 		       ib_event_msg(event->event), event->event);
10978f000cacSChristoph Hellwig 		break;
10988f000cacSChristoph Hellwig 	}
10998f000cacSChristoph Hellwig }
11008f000cacSChristoph Hellwig 
11018f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
11028f000cacSChristoph Hellwig 		struct nvmet_rdma_queue *queue,
11038f000cacSChristoph Hellwig 		struct rdma_conn_param *p)
11048f000cacSChristoph Hellwig {
11058f000cacSChristoph Hellwig 	struct rdma_conn_param  param = { };
11068f000cacSChristoph Hellwig 	struct nvme_rdma_cm_rep priv = { };
11078f000cacSChristoph Hellwig 	int ret = -ENOMEM;
11088f000cacSChristoph Hellwig 
11098f000cacSChristoph Hellwig 	param.rnr_retry_count = 7;
11108f000cacSChristoph Hellwig 	param.flow_control = 1;
11118f000cacSChristoph Hellwig 	param.initiator_depth = min_t(u8, p->initiator_depth,
11128f000cacSChristoph Hellwig 		queue->dev->device->attrs.max_qp_init_rd_atom);
11138f000cacSChristoph Hellwig 	param.private_data = &priv;
11148f000cacSChristoph Hellwig 	param.private_data_len = sizeof(priv);
11158f000cacSChristoph Hellwig 	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
11168f000cacSChristoph Hellwig 	priv.crqsize = cpu_to_le16(queue->recv_queue_size);
11178f000cacSChristoph Hellwig 
11188f000cacSChristoph Hellwig 	ret = rdma_accept(cm_id, &param);
11198f000cacSChristoph Hellwig 	if (ret)
11208f000cacSChristoph Hellwig 		pr_err("rdma_accept failed (error code = %d)\n", ret);
11218f000cacSChristoph Hellwig 
11228f000cacSChristoph Hellwig 	return ret;
11238f000cacSChristoph Hellwig }
11248f000cacSChristoph Hellwig 
11258f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
11268f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
11278f000cacSChristoph Hellwig {
11288f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev;
11298f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
11308f000cacSChristoph Hellwig 	int ret = -EINVAL;
11318f000cacSChristoph Hellwig 
11328f000cacSChristoph Hellwig 	ndev = nvmet_rdma_find_get_device(cm_id);
11338f000cacSChristoph Hellwig 	if (!ndev) {
11348f000cacSChristoph Hellwig 		nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
11358f000cacSChristoph Hellwig 		return -ECONNREFUSED;
11368f000cacSChristoph Hellwig 	}
11378f000cacSChristoph Hellwig 
11388f000cacSChristoph Hellwig 	queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
11398f000cacSChristoph Hellwig 	if (!queue) {
11408f000cacSChristoph Hellwig 		ret = -ENOMEM;
11418f000cacSChristoph Hellwig 		goto put_device;
11428f000cacSChristoph Hellwig 	}
11438f000cacSChristoph Hellwig 	queue->port = cm_id->context;
11448f000cacSChristoph Hellwig 
1145777dc823SSagi Grimberg 	if (queue->host_qid == 0) {
1146777dc823SSagi Grimberg 		/* Let inflight controller teardown complete */
1147777dc823SSagi Grimberg 		flush_scheduled_work();
1148777dc823SSagi Grimberg 	}
1149777dc823SSagi Grimberg 
11508f000cacSChristoph Hellwig 	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1151e1a2ee24SIsrael Rukshin 	if (ret) {
1152e1a2ee24SIsrael Rukshin 		schedule_work(&queue->release_work);
1153e1a2ee24SIsrael Rukshin 		/* Destroying rdma_cm id is not needed here */
1154e1a2ee24SIsrael Rukshin 		return 0;
1155e1a2ee24SIsrael Rukshin 	}
11568f000cacSChristoph Hellwig 
11578f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
11588f000cacSChristoph Hellwig 	list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
11598f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
11608f000cacSChristoph Hellwig 
11618f000cacSChristoph Hellwig 	return 0;
11628f000cacSChristoph Hellwig 
11638f000cacSChristoph Hellwig put_device:
11648f000cacSChristoph Hellwig 	kref_put(&ndev->ref, nvmet_rdma_free_dev);
11658f000cacSChristoph Hellwig 
11668f000cacSChristoph Hellwig 	return ret;
11678f000cacSChristoph Hellwig }
11688f000cacSChristoph Hellwig 
11698f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
11708f000cacSChristoph Hellwig {
11718f000cacSChristoph Hellwig 	unsigned long flags;
11728f000cacSChristoph Hellwig 
11738f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->state_lock, flags);
11748f000cacSChristoph Hellwig 	if (queue->state != NVMET_RDMA_Q_CONNECTING) {
11758f000cacSChristoph Hellwig 		pr_warn("trying to establish a connected queue\n");
11768f000cacSChristoph Hellwig 		goto out_unlock;
11778f000cacSChristoph Hellwig 	}
11788f000cacSChristoph Hellwig 	queue->state = NVMET_RDMA_Q_LIVE;
11798f000cacSChristoph Hellwig 
11808f000cacSChristoph Hellwig 	while (!list_empty(&queue->rsp_wait_list)) {
11818f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *cmd;
11828f000cacSChristoph Hellwig 
11838f000cacSChristoph Hellwig 		cmd = list_first_entry(&queue->rsp_wait_list,
11848f000cacSChristoph Hellwig 					struct nvmet_rdma_rsp, wait_list);
11858f000cacSChristoph Hellwig 		list_del(&cmd->wait_list);
11868f000cacSChristoph Hellwig 
11878f000cacSChristoph Hellwig 		spin_unlock_irqrestore(&queue->state_lock, flags);
11888f000cacSChristoph Hellwig 		nvmet_rdma_handle_command(queue, cmd);
11898f000cacSChristoph Hellwig 		spin_lock_irqsave(&queue->state_lock, flags);
11908f000cacSChristoph Hellwig 	}
11918f000cacSChristoph Hellwig 
11928f000cacSChristoph Hellwig out_unlock:
11938f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->state_lock, flags);
11948f000cacSChristoph Hellwig }
11958f000cacSChristoph Hellwig 
11968f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
11978f000cacSChristoph Hellwig {
11988f000cacSChristoph Hellwig 	bool disconnect = false;
11998f000cacSChristoph Hellwig 	unsigned long flags;
12008f000cacSChristoph Hellwig 
12018f000cacSChristoph Hellwig 	pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
12028f000cacSChristoph Hellwig 
12038f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->state_lock, flags);
12048f000cacSChristoph Hellwig 	switch (queue->state) {
12058f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_CONNECTING:
12068f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_LIVE:
12078f000cacSChristoph Hellwig 		queue->state = NVMET_RDMA_Q_DISCONNECTING;
1208d8f7750aSSagi Grimberg 		disconnect = true;
12098f000cacSChristoph Hellwig 		break;
12108f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_DISCONNECTING:
12118f000cacSChristoph Hellwig 		break;
12128f000cacSChristoph Hellwig 	}
12138f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->state_lock, flags);
12148f000cacSChristoph Hellwig 
12158f000cacSChristoph Hellwig 	if (disconnect) {
12168f000cacSChristoph Hellwig 		rdma_disconnect(queue->cm_id);
12178f000cacSChristoph Hellwig 		schedule_work(&queue->release_work);
12188f000cacSChristoph Hellwig 	}
12198f000cacSChristoph Hellwig }
12208f000cacSChristoph Hellwig 
12218f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
12228f000cacSChristoph Hellwig {
12238f000cacSChristoph Hellwig 	bool disconnect = false;
12248f000cacSChristoph Hellwig 
12258f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
12268f000cacSChristoph Hellwig 	if (!list_empty(&queue->queue_list)) {
12278f000cacSChristoph Hellwig 		list_del_init(&queue->queue_list);
12288f000cacSChristoph Hellwig 		disconnect = true;
12298f000cacSChristoph Hellwig 	}
12308f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
12318f000cacSChristoph Hellwig 
12328f000cacSChristoph Hellwig 	if (disconnect)
12338f000cacSChristoph Hellwig 		__nvmet_rdma_queue_disconnect(queue);
12348f000cacSChristoph Hellwig }
12358f000cacSChristoph Hellwig 
12368f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
12378f000cacSChristoph Hellwig 		struct nvmet_rdma_queue *queue)
12388f000cacSChristoph Hellwig {
12398f000cacSChristoph Hellwig 	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
12408f000cacSChristoph Hellwig 
1241766dbb17SSagi Grimberg 	mutex_lock(&nvmet_rdma_queue_mutex);
1242766dbb17SSagi Grimberg 	if (!list_empty(&queue->queue_list))
1243766dbb17SSagi Grimberg 		list_del_init(&queue->queue_list);
1244766dbb17SSagi Grimberg 	mutex_unlock(&nvmet_rdma_queue_mutex);
1245766dbb17SSagi Grimberg 
1246766dbb17SSagi Grimberg 	pr_err("failed to connect queue %d\n", queue->idx);
12478f000cacSChristoph Hellwig 	schedule_work(&queue->release_work);
12488f000cacSChristoph Hellwig }
12498f000cacSChristoph Hellwig 
1250d8f7750aSSagi Grimberg /**
1251d8f7750aSSagi Grimberg  * nvme_rdma_device_removal() - Handle RDMA device removal
1252f1d4ef7dSSagi Grimberg  * @cm_id:	rdma_cm id, used for nvmet port
1253d8f7750aSSagi Grimberg  * @queue:      nvmet rdma queue (cm id qp_context)
1254d8f7750aSSagi Grimberg  *
1255d8f7750aSSagi Grimberg  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1256f1d4ef7dSSagi Grimberg  * to unplug. Note that this event can be generated on a normal
1257f1d4ef7dSSagi Grimberg  * queue cm_id and/or a device bound listener cm_id (where in this
1258f1d4ef7dSSagi Grimberg  * case queue will be null).
1259d8f7750aSSagi Grimberg  *
1260f1d4ef7dSSagi Grimberg  * We registered an ib_client to handle device removal for queues,
1261f1d4ef7dSSagi Grimberg  * so we only need to handle the listening port cm_ids. In this case
1262d8f7750aSSagi Grimberg  * we nullify the priv to prevent double cm_id destruction and destroying
1263d8f7750aSSagi Grimberg  * the cm_id implicitely by returning a non-zero rc to the callout.
1264d8f7750aSSagi Grimberg  */
1265d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1266d8f7750aSSagi Grimberg 		struct nvmet_rdma_queue *queue)
1267d8f7750aSSagi Grimberg {
1268f1d4ef7dSSagi Grimberg 	struct nvmet_port *port;
1269d8f7750aSSagi Grimberg 
1270f1d4ef7dSSagi Grimberg 	if (queue) {
1271f1d4ef7dSSagi Grimberg 		/*
1272f1d4ef7dSSagi Grimberg 		 * This is a queue cm_id. we have registered
1273f1d4ef7dSSagi Grimberg 		 * an ib_client to handle queues removal
1274f1d4ef7dSSagi Grimberg 		 * so don't interfear and just return.
1275f1d4ef7dSSagi Grimberg 		 */
1276f1d4ef7dSSagi Grimberg 		return 0;
1277f1d4ef7dSSagi Grimberg 	}
1278f1d4ef7dSSagi Grimberg 
1279f1d4ef7dSSagi Grimberg 	port = cm_id->context;
1280d8f7750aSSagi Grimberg 
1281d8f7750aSSagi Grimberg 	/*
1282d8f7750aSSagi Grimberg 	 * This is a listener cm_id. Make sure that
1283d8f7750aSSagi Grimberg 	 * future remove_port won't invoke a double
1284d8f7750aSSagi Grimberg 	 * cm_id destroy. use atomic xchg to make sure
1285d8f7750aSSagi Grimberg 	 * we don't compete with remove_port.
1286d8f7750aSSagi Grimberg 	 */
1287d8f7750aSSagi Grimberg 	if (xchg(&port->priv, NULL) != cm_id)
1288d8f7750aSSagi Grimberg 		return 0;
1289d8f7750aSSagi Grimberg 
1290d8f7750aSSagi Grimberg 	/*
1291d8f7750aSSagi Grimberg 	 * We need to return 1 so that the core will destroy
1292d8f7750aSSagi Grimberg 	 * it's own ID.  What a great API design..
1293d8f7750aSSagi Grimberg 	 */
1294d8f7750aSSagi Grimberg 	return 1;
1295d8f7750aSSagi Grimberg }
1296d8f7750aSSagi Grimberg 
12978f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
12988f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
12998f000cacSChristoph Hellwig {
13008f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = NULL;
13018f000cacSChristoph Hellwig 	int ret = 0;
13028f000cacSChristoph Hellwig 
13038f000cacSChristoph Hellwig 	if (cm_id->qp)
13048f000cacSChristoph Hellwig 		queue = cm_id->qp->qp_context;
13058f000cacSChristoph Hellwig 
13068f000cacSChristoph Hellwig 	pr_debug("%s (%d): status %d id %p\n",
13078f000cacSChristoph Hellwig 		rdma_event_msg(event->event), event->event,
13088f000cacSChristoph Hellwig 		event->status, cm_id);
13098f000cacSChristoph Hellwig 
13108f000cacSChristoph Hellwig 	switch (event->event) {
13118f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_CONNECT_REQUEST:
13128f000cacSChristoph Hellwig 		ret = nvmet_rdma_queue_connect(cm_id, event);
13138f000cacSChristoph Hellwig 		break;
13148f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_ESTABLISHED:
13158f000cacSChristoph Hellwig 		nvmet_rdma_queue_established(queue);
13168f000cacSChristoph Hellwig 		break;
13178f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_ADDR_CHANGE:
13188f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_DISCONNECTED:
13198f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
13208f000cacSChristoph Hellwig 		nvmet_rdma_queue_disconnect(queue);
1321d8f7750aSSagi Grimberg 		break;
1322d8f7750aSSagi Grimberg 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
1323d8f7750aSSagi Grimberg 		ret = nvmet_rdma_device_removal(cm_id, queue);
13248f000cacSChristoph Hellwig 		break;
13258f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_REJECTED:
1326512fb1b3SSteve Wise 		pr_debug("Connection rejected: %s\n",
1327512fb1b3SSteve Wise 			 rdma_reject_msg(cm_id, event->status));
1328512fb1b3SSteve Wise 		/* FALLTHROUGH */
13298f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_UNREACHABLE:
13308f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_CONNECT_ERROR:
13318f000cacSChristoph Hellwig 		nvmet_rdma_queue_connect_fail(cm_id, queue);
13328f000cacSChristoph Hellwig 		break;
13338f000cacSChristoph Hellwig 	default:
13348f000cacSChristoph Hellwig 		pr_err("received unrecognized RDMA CM event %d\n",
13358f000cacSChristoph Hellwig 			event->event);
13368f000cacSChristoph Hellwig 		break;
13378f000cacSChristoph Hellwig 	}
13388f000cacSChristoph Hellwig 
13398f000cacSChristoph Hellwig 	return ret;
13408f000cacSChristoph Hellwig }
13418f000cacSChristoph Hellwig 
13428f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
13438f000cacSChristoph Hellwig {
13448f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
13458f000cacSChristoph Hellwig 
13468f000cacSChristoph Hellwig restart:
13478f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
13488f000cacSChristoph Hellwig 	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
13498f000cacSChristoph Hellwig 		if (queue->nvme_sq.ctrl == ctrl) {
13508f000cacSChristoph Hellwig 			list_del_init(&queue->queue_list);
13518f000cacSChristoph Hellwig 			mutex_unlock(&nvmet_rdma_queue_mutex);
13528f000cacSChristoph Hellwig 
13538f000cacSChristoph Hellwig 			__nvmet_rdma_queue_disconnect(queue);
13548f000cacSChristoph Hellwig 			goto restart;
13558f000cacSChristoph Hellwig 		}
13568f000cacSChristoph Hellwig 	}
13578f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
13588f000cacSChristoph Hellwig }
13598f000cacSChristoph Hellwig 
13608f000cacSChristoph Hellwig static int nvmet_rdma_add_port(struct nvmet_port *port)
13618f000cacSChristoph Hellwig {
13628f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id;
1363670c2a3aSSagi Grimberg 	struct sockaddr_storage addr = { };
1364670c2a3aSSagi Grimberg 	__kernel_sa_family_t af;
13658f000cacSChristoph Hellwig 	int ret;
13668f000cacSChristoph Hellwig 
13678f000cacSChristoph Hellwig 	switch (port->disc_addr.adrfam) {
13688f000cacSChristoph Hellwig 	case NVMF_ADDR_FAMILY_IP4:
1369670c2a3aSSagi Grimberg 		af = AF_INET;
1370670c2a3aSSagi Grimberg 		break;
1371670c2a3aSSagi Grimberg 	case NVMF_ADDR_FAMILY_IP6:
1372670c2a3aSSagi Grimberg 		af = AF_INET6;
13738f000cacSChristoph Hellwig 		break;
13748f000cacSChristoph Hellwig 	default:
13758f000cacSChristoph Hellwig 		pr_err("address family %d not supported\n",
13768f000cacSChristoph Hellwig 				port->disc_addr.adrfam);
13778f000cacSChristoph Hellwig 		return -EINVAL;
13788f000cacSChristoph Hellwig 	}
13798f000cacSChristoph Hellwig 
1380670c2a3aSSagi Grimberg 	ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
1381670c2a3aSSagi Grimberg 			port->disc_addr.trsvcid, &addr);
1382670c2a3aSSagi Grimberg 	if (ret) {
1383670c2a3aSSagi Grimberg 		pr_err("malformed ip/port passed: %s:%s\n",
1384670c2a3aSSagi Grimberg 			port->disc_addr.traddr, port->disc_addr.trsvcid);
13858f000cacSChristoph Hellwig 		return ret;
1386670c2a3aSSagi Grimberg 	}
13878f000cacSChristoph Hellwig 
13888f000cacSChristoph Hellwig 	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
13898f000cacSChristoph Hellwig 			RDMA_PS_TCP, IB_QPT_RC);
13908f000cacSChristoph Hellwig 	if (IS_ERR(cm_id)) {
13918f000cacSChristoph Hellwig 		pr_err("CM ID creation failed\n");
13928f000cacSChristoph Hellwig 		return PTR_ERR(cm_id);
13938f000cacSChristoph Hellwig 	}
13948f000cacSChristoph Hellwig 
1395670c2a3aSSagi Grimberg 	/*
1396670c2a3aSSagi Grimberg 	 * Allow both IPv4 and IPv6 sockets to bind a single port
1397670c2a3aSSagi Grimberg 	 * at the same time.
1398670c2a3aSSagi Grimberg 	 */
1399670c2a3aSSagi Grimberg 	ret = rdma_set_afonly(cm_id, 1);
14008f000cacSChristoph Hellwig 	if (ret) {
1401670c2a3aSSagi Grimberg 		pr_err("rdma_set_afonly failed (%d)\n", ret);
1402670c2a3aSSagi Grimberg 		goto out_destroy_id;
1403670c2a3aSSagi Grimberg 	}
1404670c2a3aSSagi Grimberg 
1405670c2a3aSSagi Grimberg 	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1406670c2a3aSSagi Grimberg 	if (ret) {
1407670c2a3aSSagi Grimberg 		pr_err("binding CM ID to %pISpcs failed (%d)\n",
1408670c2a3aSSagi Grimberg 			(struct sockaddr *)&addr, ret);
14098f000cacSChristoph Hellwig 		goto out_destroy_id;
14108f000cacSChristoph Hellwig 	}
14118f000cacSChristoph Hellwig 
14128f000cacSChristoph Hellwig 	ret = rdma_listen(cm_id, 128);
14138f000cacSChristoph Hellwig 	if (ret) {
1414670c2a3aSSagi Grimberg 		pr_err("listening to %pISpcs failed (%d)\n",
1415670c2a3aSSagi Grimberg 			(struct sockaddr *)&addr, ret);
14168f000cacSChristoph Hellwig 		goto out_destroy_id;
14178f000cacSChristoph Hellwig 	}
14188f000cacSChristoph Hellwig 
1419670c2a3aSSagi Grimberg 	pr_info("enabling port %d (%pISpcs)\n",
1420670c2a3aSSagi Grimberg 		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
14218f000cacSChristoph Hellwig 	port->priv = cm_id;
14228f000cacSChristoph Hellwig 	return 0;
14238f000cacSChristoph Hellwig 
14248f000cacSChristoph Hellwig out_destroy_id:
14258f000cacSChristoph Hellwig 	rdma_destroy_id(cm_id);
14268f000cacSChristoph Hellwig 	return ret;
14278f000cacSChristoph Hellwig }
14288f000cacSChristoph Hellwig 
14298f000cacSChristoph Hellwig static void nvmet_rdma_remove_port(struct nvmet_port *port)
14308f000cacSChristoph Hellwig {
1431d8f7750aSSagi Grimberg 	struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
14328f000cacSChristoph Hellwig 
1433d8f7750aSSagi Grimberg 	if (cm_id)
14348f000cacSChristoph Hellwig 		rdma_destroy_id(cm_id);
14358f000cacSChristoph Hellwig }
14368f000cacSChristoph Hellwig 
14374c652685SSagi Grimberg static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
14384c652685SSagi Grimberg 		struct nvmet_port *port, char *traddr)
14394c652685SSagi Grimberg {
14404c652685SSagi Grimberg 	struct rdma_cm_id *cm_id = port->priv;
14414c652685SSagi Grimberg 
14424c652685SSagi Grimberg 	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
14434c652685SSagi Grimberg 		struct nvmet_rdma_rsp *rsp =
14444c652685SSagi Grimberg 			container_of(req, struct nvmet_rdma_rsp, req);
14454c652685SSagi Grimberg 		struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
14464c652685SSagi Grimberg 		struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
14474c652685SSagi Grimberg 
14484c652685SSagi Grimberg 		sprintf(traddr, "%pISc", addr);
14494c652685SSagi Grimberg 	} else {
14504c652685SSagi Grimberg 		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
14514c652685SSagi Grimberg 	}
14524c652685SSagi Grimberg }
14534c652685SSagi Grimberg 
1454e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
14558f000cacSChristoph Hellwig 	.owner			= THIS_MODULE,
14568f000cacSChristoph Hellwig 	.type			= NVMF_TRTYPE_RDMA,
14578f000cacSChristoph Hellwig 	.sqe_inline_size	= NVMET_RDMA_INLINE_DATA_SIZE,
14588f000cacSChristoph Hellwig 	.msdbd			= 1,
14598f000cacSChristoph Hellwig 	.has_keyed_sgls		= 1,
14608f000cacSChristoph Hellwig 	.add_port		= nvmet_rdma_add_port,
14618f000cacSChristoph Hellwig 	.remove_port		= nvmet_rdma_remove_port,
14628f000cacSChristoph Hellwig 	.queue_response		= nvmet_rdma_queue_response,
14638f000cacSChristoph Hellwig 	.delete_ctrl		= nvmet_rdma_delete_ctrl,
14644c652685SSagi Grimberg 	.disc_traddr		= nvmet_rdma_disc_port_addr,
14658f000cacSChristoph Hellwig };
14668f000cacSChristoph Hellwig 
1467f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1468f1d4ef7dSSagi Grimberg {
146943b92fd2SIsrael Rukshin 	struct nvmet_rdma_queue *queue, *tmp;
1470a3dd7d00SMax Gurtovoy 	struct nvmet_rdma_device *ndev;
1471a3dd7d00SMax Gurtovoy 	bool found = false;
1472f1d4ef7dSSagi Grimberg 
1473a3dd7d00SMax Gurtovoy 	mutex_lock(&device_list_mutex);
1474a3dd7d00SMax Gurtovoy 	list_for_each_entry(ndev, &device_list, entry) {
1475a3dd7d00SMax Gurtovoy 		if (ndev->device == ib_device) {
1476a3dd7d00SMax Gurtovoy 			found = true;
1477a3dd7d00SMax Gurtovoy 			break;
1478a3dd7d00SMax Gurtovoy 		}
1479a3dd7d00SMax Gurtovoy 	}
1480a3dd7d00SMax Gurtovoy 	mutex_unlock(&device_list_mutex);
1481a3dd7d00SMax Gurtovoy 
1482a3dd7d00SMax Gurtovoy 	if (!found)
1483a3dd7d00SMax Gurtovoy 		return;
1484a3dd7d00SMax Gurtovoy 
1485a3dd7d00SMax Gurtovoy 	/*
1486a3dd7d00SMax Gurtovoy 	 * IB Device that is used by nvmet controllers is being removed,
1487a3dd7d00SMax Gurtovoy 	 * delete all queues using this device.
1488a3dd7d00SMax Gurtovoy 	 */
1489f1d4ef7dSSagi Grimberg 	mutex_lock(&nvmet_rdma_queue_mutex);
149043b92fd2SIsrael Rukshin 	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
149143b92fd2SIsrael Rukshin 				 queue_list) {
1492f1d4ef7dSSagi Grimberg 		if (queue->dev->device != ib_device)
1493f1d4ef7dSSagi Grimberg 			continue;
1494f1d4ef7dSSagi Grimberg 
1495f1d4ef7dSSagi Grimberg 		pr_info("Removing queue %d\n", queue->idx);
149643b92fd2SIsrael Rukshin 		list_del_init(&queue->queue_list);
1497f1d4ef7dSSagi Grimberg 		__nvmet_rdma_queue_disconnect(queue);
1498f1d4ef7dSSagi Grimberg 	}
1499f1d4ef7dSSagi Grimberg 	mutex_unlock(&nvmet_rdma_queue_mutex);
1500f1d4ef7dSSagi Grimberg 
1501f1d4ef7dSSagi Grimberg 	flush_scheduled_work();
1502f1d4ef7dSSagi Grimberg }
1503f1d4ef7dSSagi Grimberg 
1504f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = {
1505f1d4ef7dSSagi Grimberg 	.name   = "nvmet_rdma",
1506f1d4ef7dSSagi Grimberg 	.remove = nvmet_rdma_remove_one
1507f1d4ef7dSSagi Grimberg };
1508f1d4ef7dSSagi Grimberg 
15098f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void)
15108f000cacSChristoph Hellwig {
1511f1d4ef7dSSagi Grimberg 	int ret;
1512f1d4ef7dSSagi Grimberg 
1513f1d4ef7dSSagi Grimberg 	ret = ib_register_client(&nvmet_rdma_ib_client);
1514f1d4ef7dSSagi Grimberg 	if (ret)
1515f1d4ef7dSSagi Grimberg 		return ret;
1516f1d4ef7dSSagi Grimberg 
1517f1d4ef7dSSagi Grimberg 	ret = nvmet_register_transport(&nvmet_rdma_ops);
1518f1d4ef7dSSagi Grimberg 	if (ret)
1519f1d4ef7dSSagi Grimberg 		goto err_ib_client;
1520f1d4ef7dSSagi Grimberg 
1521f1d4ef7dSSagi Grimberg 	return 0;
1522f1d4ef7dSSagi Grimberg 
1523f1d4ef7dSSagi Grimberg err_ib_client:
1524f1d4ef7dSSagi Grimberg 	ib_unregister_client(&nvmet_rdma_ib_client);
1525f1d4ef7dSSagi Grimberg 	return ret;
15268f000cacSChristoph Hellwig }
15278f000cacSChristoph Hellwig 
15288f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void)
15298f000cacSChristoph Hellwig {
15308f000cacSChristoph Hellwig 	nvmet_unregister_transport(&nvmet_rdma_ops);
1531f1d4ef7dSSagi Grimberg 	ib_unregister_client(&nvmet_rdma_ib_client);
1532cb4876e8SSagi Grimberg 	WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
15338f000cacSChristoph Hellwig 	ida_destroy(&nvmet_rdma_queue_ida);
15348f000cacSChristoph Hellwig }
15358f000cacSChristoph Hellwig 
15368f000cacSChristoph Hellwig module_init(nvmet_rdma_init);
15378f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit);
15388f000cacSChristoph Hellwig 
15398f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2");
15408f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
1541