xref: /openbmc/linux/drivers/nvme/target/rdma.c (revision c6e3f13398123a008cd2ee28f93510b113a32791)
13641bd32SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
28f000cacSChristoph Hellwig /*
38f000cacSChristoph Hellwig  * NVMe over Fabrics RDMA target.
48f000cacSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
58f000cacSChristoph Hellwig  */
68f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
78f000cacSChristoph Hellwig #include <linux/atomic.h>
88f000cacSChristoph Hellwig #include <linux/ctype.h>
98f000cacSChristoph Hellwig #include <linux/delay.h>
108f000cacSChristoph Hellwig #include <linux/err.h>
118f000cacSChristoph Hellwig #include <linux/init.h>
128f000cacSChristoph Hellwig #include <linux/module.h>
138f000cacSChristoph Hellwig #include <linux/nvme.h>
148f000cacSChristoph Hellwig #include <linux/slab.h>
158f000cacSChristoph Hellwig #include <linux/string.h>
168f000cacSChristoph Hellwig #include <linux/wait.h>
178f000cacSChristoph Hellwig #include <linux/inet.h>
188f000cacSChristoph Hellwig #include <asm/unaligned.h>
198f000cacSChristoph Hellwig 
208f000cacSChristoph Hellwig #include <rdma/ib_verbs.h>
218f000cacSChristoph Hellwig #include <rdma/rdma_cm.h>
228f000cacSChristoph Hellwig #include <rdma/rw.h>
238f000cacSChristoph Hellwig 
248f000cacSChristoph Hellwig #include <linux/nvme-rdma.h>
258f000cacSChristoph Hellwig #include "nvmet.h"
268f000cacSChristoph Hellwig 
278f000cacSChristoph Hellwig /*
280d5ee2b2SSteve Wise  * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
298f000cacSChristoph Hellwig  */
300d5ee2b2SSteve Wise #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE	PAGE_SIZE
310d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_SGE		4
320d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_DATA_SIZE		max_t(int, SZ_16K, PAGE_SIZE)
338f000cacSChristoph Hellwig 
34ec6d20e1SMax Gurtovoy /* Assume mpsmin == device_page_size == 4KB */
35ec6d20e1SMax Gurtovoy #define NVMET_RDMA_MAX_MDTS			8
36ec6d20e1SMax Gurtovoy 
37b0012dd3SMax Gurtovoy struct nvmet_rdma_srq;
38b0012dd3SMax Gurtovoy 
398f000cacSChristoph Hellwig struct nvmet_rdma_cmd {
400d5ee2b2SSteve Wise 	struct ib_sge		sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
418f000cacSChristoph Hellwig 	struct ib_cqe		cqe;
428f000cacSChristoph Hellwig 	struct ib_recv_wr	wr;
430d5ee2b2SSteve Wise 	struct scatterlist	inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
448f000cacSChristoph Hellwig 	struct nvme_command     *nvme_cmd;
458f000cacSChristoph Hellwig 	struct nvmet_rdma_queue	*queue;
46b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq   *nsrq;
478f000cacSChristoph Hellwig };
488f000cacSChristoph Hellwig 
498f000cacSChristoph Hellwig enum {
508f000cacSChristoph Hellwig 	NVMET_RDMA_REQ_INLINE_DATA	= (1 << 0),
518f000cacSChristoph Hellwig 	NVMET_RDMA_REQ_INVALIDATE_RKEY	= (1 << 1),
528f000cacSChristoph Hellwig };
538f000cacSChristoph Hellwig 
548f000cacSChristoph Hellwig struct nvmet_rdma_rsp {
558f000cacSChristoph Hellwig 	struct ib_sge		send_sge;
568f000cacSChristoph Hellwig 	struct ib_cqe		send_cqe;
578f000cacSChristoph Hellwig 	struct ib_send_wr	send_wr;
588f000cacSChristoph Hellwig 
598f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*cmd;
608f000cacSChristoph Hellwig 	struct nvmet_rdma_queue	*queue;
618f000cacSChristoph Hellwig 
628f000cacSChristoph Hellwig 	struct ib_cqe		read_cqe;
638f000cacSChristoph Hellwig 	struct rdma_rw_ctx	rw;
648f000cacSChristoph Hellwig 
658f000cacSChristoph Hellwig 	struct nvmet_req	req;
668f000cacSChristoph Hellwig 
678407879cSSagi Grimberg 	bool			allocated;
688f000cacSChristoph Hellwig 	u8			n_rdma;
698f000cacSChristoph Hellwig 	u32			flags;
708f000cacSChristoph Hellwig 	u32			invalidate_rkey;
718f000cacSChristoph Hellwig 
728f000cacSChristoph Hellwig 	struct list_head	wait_list;
738f000cacSChristoph Hellwig 	struct list_head	free_list;
748f000cacSChristoph Hellwig };
758f000cacSChristoph Hellwig 
768f000cacSChristoph Hellwig enum nvmet_rdma_queue_state {
778f000cacSChristoph Hellwig 	NVMET_RDMA_Q_CONNECTING,
788f000cacSChristoph Hellwig 	NVMET_RDMA_Q_LIVE,
798f000cacSChristoph Hellwig 	NVMET_RDMA_Q_DISCONNECTING,
808f000cacSChristoph Hellwig };
818f000cacSChristoph Hellwig 
828f000cacSChristoph Hellwig struct nvmet_rdma_queue {
838f000cacSChristoph Hellwig 	struct rdma_cm_id	*cm_id;
8421f90243SIsrael Rukshin 	struct ib_qp		*qp;
858f000cacSChristoph Hellwig 	struct nvmet_port	*port;
868f000cacSChristoph Hellwig 	struct ib_cq		*cq;
878f000cacSChristoph Hellwig 	atomic_t		sq_wr_avail;
888f000cacSChristoph Hellwig 	struct nvmet_rdma_device *dev;
89b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq   *nsrq;
908f000cacSChristoph Hellwig 	spinlock_t		state_lock;
918f000cacSChristoph Hellwig 	enum nvmet_rdma_queue_state state;
928f000cacSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
938f000cacSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
948f000cacSChristoph Hellwig 
958f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp	*rsps;
968f000cacSChristoph Hellwig 	struct list_head	free_rsps;
978f000cacSChristoph Hellwig 	spinlock_t		rsps_lock;
988f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*cmds;
998f000cacSChristoph Hellwig 
1008f000cacSChristoph Hellwig 	struct work_struct	release_work;
1018f000cacSChristoph Hellwig 	struct list_head	rsp_wait_list;
1028f000cacSChristoph Hellwig 	struct list_head	rsp_wr_wait_list;
1038f000cacSChristoph Hellwig 	spinlock_t		rsp_wr_wait_lock;
1048f000cacSChristoph Hellwig 
1058f000cacSChristoph Hellwig 	int			idx;
1068f000cacSChristoph Hellwig 	int			host_qid;
107b0012dd3SMax Gurtovoy 	int			comp_vector;
1088f000cacSChristoph Hellwig 	int			recv_queue_size;
1098f000cacSChristoph Hellwig 	int			send_queue_size;
1108f000cacSChristoph Hellwig 
1118f000cacSChristoph Hellwig 	struct list_head	queue_list;
1128f000cacSChristoph Hellwig };
1138f000cacSChristoph Hellwig 
114a032e4f6SSagi Grimberg struct nvmet_rdma_port {
115a032e4f6SSagi Grimberg 	struct nvmet_port	*nport;
116a032e4f6SSagi Grimberg 	struct sockaddr_storage addr;
117a032e4f6SSagi Grimberg 	struct rdma_cm_id	*cm_id;
118a032e4f6SSagi Grimberg 	struct delayed_work	repair_work;
119a032e4f6SSagi Grimberg };
120a032e4f6SSagi Grimberg 
121b0012dd3SMax Gurtovoy struct nvmet_rdma_srq {
122b0012dd3SMax Gurtovoy 	struct ib_srq            *srq;
123b0012dd3SMax Gurtovoy 	struct nvmet_rdma_cmd    *cmds;
124b0012dd3SMax Gurtovoy 	struct nvmet_rdma_device *ndev;
125b0012dd3SMax Gurtovoy };
126b0012dd3SMax Gurtovoy 
1278f000cacSChristoph Hellwig struct nvmet_rdma_device {
1288f000cacSChristoph Hellwig 	struct ib_device	*device;
1298f000cacSChristoph Hellwig 	struct ib_pd		*pd;
130b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq	**srqs;
131b0012dd3SMax Gurtovoy 	int			srq_count;
1328f000cacSChristoph Hellwig 	size_t			srq_size;
1338f000cacSChristoph Hellwig 	struct kref		ref;
1348f000cacSChristoph Hellwig 	struct list_head	entry;
1350d5ee2b2SSteve Wise 	int			inline_data_size;
1360d5ee2b2SSteve Wise 	int			inline_page_count;
1378f000cacSChristoph Hellwig };
1388f000cacSChristoph Hellwig 
1398f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq;
1408f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
1418f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
1428f000cacSChristoph Hellwig 
143b0012dd3SMax Gurtovoy static int srq_size_set(const char *val, const struct kernel_param *kp);
144b0012dd3SMax Gurtovoy static const struct kernel_param_ops srq_size_ops = {
145b0012dd3SMax Gurtovoy 	.set = srq_size_set,
146b0012dd3SMax Gurtovoy 	.get = param_get_int,
147b0012dd3SMax Gurtovoy };
148b0012dd3SMax Gurtovoy 
149b0012dd3SMax Gurtovoy static int nvmet_rdma_srq_size = 1024;
150b0012dd3SMax Gurtovoy module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
151b0012dd3SMax Gurtovoy MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
152b0012dd3SMax Gurtovoy 
1538f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida);
1548f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list);
1558f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
1568f000cacSChristoph Hellwig 
1578f000cacSChristoph Hellwig static LIST_HEAD(device_list);
1588f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex);
1598f000cacSChristoph Hellwig 
1608f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
1618f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
1628f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1638f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
1648f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
1658f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
1665cbab630SRaju Rangoju static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
1675cbab630SRaju Rangoju 				struct nvmet_rdma_rsp *r);
1685cbab630SRaju Rangoju static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
1695cbab630SRaju Rangoju 				struct nvmet_rdma_rsp *r);
1708f000cacSChristoph Hellwig 
171e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops;
1728f000cacSChristoph Hellwig 
173b0012dd3SMax Gurtovoy static int srq_size_set(const char *val, const struct kernel_param *kp)
174b0012dd3SMax Gurtovoy {
175b0012dd3SMax Gurtovoy 	int n = 0, ret;
176b0012dd3SMax Gurtovoy 
177b0012dd3SMax Gurtovoy 	ret = kstrtoint(val, 10, &n);
178b0012dd3SMax Gurtovoy 	if (ret != 0 || n < 256)
179b0012dd3SMax Gurtovoy 		return -EINVAL;
180b0012dd3SMax Gurtovoy 
181b0012dd3SMax Gurtovoy 	return param_set_int(val, kp);
182b0012dd3SMax Gurtovoy }
183b0012dd3SMax Gurtovoy 
1840d5ee2b2SSteve Wise static int num_pages(int len)
1850d5ee2b2SSteve Wise {
1860d5ee2b2SSteve Wise 	return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
1870d5ee2b2SSteve Wise }
1880d5ee2b2SSteve Wise 
1898f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
1908f000cacSChristoph Hellwig {
1918f000cacSChristoph Hellwig 	return nvme_is_write(rsp->req.cmd) &&
1925e62d5c9SChristoph Hellwig 		rsp->req.transfer_len &&
1938f000cacSChristoph Hellwig 		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
1948f000cacSChristoph Hellwig }
1958f000cacSChristoph Hellwig 
1968f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
1978f000cacSChristoph Hellwig {
1988f000cacSChristoph Hellwig 	return !nvme_is_write(rsp->req.cmd) &&
1995e62d5c9SChristoph Hellwig 		rsp->req.transfer_len &&
200fc6c9730SMax Gurtovoy 		!rsp->req.cqe->status &&
2018f000cacSChristoph Hellwig 		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
2028f000cacSChristoph Hellwig }
2038f000cacSChristoph Hellwig 
2048f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp *
2058f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
2068f000cacSChristoph Hellwig {
2078f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp;
2088f000cacSChristoph Hellwig 	unsigned long flags;
2098f000cacSChristoph Hellwig 
2108f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->rsps_lock, flags);
2118407879cSSagi Grimberg 	rsp = list_first_entry_or_null(&queue->free_rsps,
2128f000cacSChristoph Hellwig 				struct nvmet_rdma_rsp, free_list);
2138407879cSSagi Grimberg 	if (likely(rsp))
2148f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
2158f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
2168f000cacSChristoph Hellwig 
2178407879cSSagi Grimberg 	if (unlikely(!rsp)) {
2185cbab630SRaju Rangoju 		int ret;
2195cbab630SRaju Rangoju 
2205cbab630SRaju Rangoju 		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2218407879cSSagi Grimberg 		if (unlikely(!rsp))
2228407879cSSagi Grimberg 			return NULL;
2235cbab630SRaju Rangoju 		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
2245cbab630SRaju Rangoju 		if (unlikely(ret)) {
2255cbab630SRaju Rangoju 			kfree(rsp);
2265cbab630SRaju Rangoju 			return NULL;
2275cbab630SRaju Rangoju 		}
2285cbab630SRaju Rangoju 
2298407879cSSagi Grimberg 		rsp->allocated = true;
2308407879cSSagi Grimberg 	}
2318407879cSSagi Grimberg 
2328f000cacSChristoph Hellwig 	return rsp;
2338f000cacSChristoph Hellwig }
2348f000cacSChristoph Hellwig 
2358f000cacSChristoph Hellwig static inline void
2368f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
2378f000cacSChristoph Hellwig {
2388f000cacSChristoph Hellwig 	unsigned long flags;
2398f000cacSChristoph Hellwig 
240ad1f8249SIsrael Rukshin 	if (unlikely(rsp->allocated)) {
2415cbab630SRaju Rangoju 		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
2428407879cSSagi Grimberg 		kfree(rsp);
2438407879cSSagi Grimberg 		return;
2448407879cSSagi Grimberg 	}
2458407879cSSagi Grimberg 
2468f000cacSChristoph Hellwig 	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
2478f000cacSChristoph Hellwig 	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
2488f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
2498f000cacSChristoph Hellwig }
2508f000cacSChristoph Hellwig 
2510d5ee2b2SSteve Wise static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
2520d5ee2b2SSteve Wise 				struct nvmet_rdma_cmd *c)
2530d5ee2b2SSteve Wise {
2540d5ee2b2SSteve Wise 	struct scatterlist *sg;
2550d5ee2b2SSteve Wise 	struct ib_sge *sge;
2560d5ee2b2SSteve Wise 	int i;
2570d5ee2b2SSteve Wise 
2580d5ee2b2SSteve Wise 	if (!ndev->inline_data_size)
2590d5ee2b2SSteve Wise 		return;
2600d5ee2b2SSteve Wise 
2610d5ee2b2SSteve Wise 	sg = c->inline_sg;
2620d5ee2b2SSteve Wise 	sge = &c->sge[1];
2630d5ee2b2SSteve Wise 
2640d5ee2b2SSteve Wise 	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
2650d5ee2b2SSteve Wise 		if (sge->length)
2660d5ee2b2SSteve Wise 			ib_dma_unmap_page(ndev->device, sge->addr,
2670d5ee2b2SSteve Wise 					sge->length, DMA_FROM_DEVICE);
2680d5ee2b2SSteve Wise 		if (sg_page(sg))
2690d5ee2b2SSteve Wise 			__free_page(sg_page(sg));
2700d5ee2b2SSteve Wise 	}
2710d5ee2b2SSteve Wise }
2720d5ee2b2SSteve Wise 
2730d5ee2b2SSteve Wise static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
2740d5ee2b2SSteve Wise 				struct nvmet_rdma_cmd *c)
2750d5ee2b2SSteve Wise {
2760d5ee2b2SSteve Wise 	struct scatterlist *sg;
2770d5ee2b2SSteve Wise 	struct ib_sge *sge;
2780d5ee2b2SSteve Wise 	struct page *pg;
2790d5ee2b2SSteve Wise 	int len;
2800d5ee2b2SSteve Wise 	int i;
2810d5ee2b2SSteve Wise 
2820d5ee2b2SSteve Wise 	if (!ndev->inline_data_size)
2830d5ee2b2SSteve Wise 		return 0;
2840d5ee2b2SSteve Wise 
2850d5ee2b2SSteve Wise 	sg = c->inline_sg;
2860d5ee2b2SSteve Wise 	sg_init_table(sg, ndev->inline_page_count);
2870d5ee2b2SSteve Wise 	sge = &c->sge[1];
2880d5ee2b2SSteve Wise 	len = ndev->inline_data_size;
2890d5ee2b2SSteve Wise 
2900d5ee2b2SSteve Wise 	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
2910d5ee2b2SSteve Wise 		pg = alloc_page(GFP_KERNEL);
2920d5ee2b2SSteve Wise 		if (!pg)
2930d5ee2b2SSteve Wise 			goto out_err;
2940d5ee2b2SSteve Wise 		sg_assign_page(sg, pg);
2950d5ee2b2SSteve Wise 		sge->addr = ib_dma_map_page(ndev->device,
2960d5ee2b2SSteve Wise 			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2970d5ee2b2SSteve Wise 		if (ib_dma_mapping_error(ndev->device, sge->addr))
2980d5ee2b2SSteve Wise 			goto out_err;
2990d5ee2b2SSteve Wise 		sge->length = min_t(int, len, PAGE_SIZE);
3000d5ee2b2SSteve Wise 		sge->lkey = ndev->pd->local_dma_lkey;
3010d5ee2b2SSteve Wise 		len -= sge->length;
3020d5ee2b2SSteve Wise 	}
3030d5ee2b2SSteve Wise 
3040d5ee2b2SSteve Wise 	return 0;
3050d5ee2b2SSteve Wise out_err:
3060d5ee2b2SSteve Wise 	for (; i >= 0; i--, sg--, sge--) {
3070d5ee2b2SSteve Wise 		if (sge->length)
3080d5ee2b2SSteve Wise 			ib_dma_unmap_page(ndev->device, sge->addr,
3090d5ee2b2SSteve Wise 					sge->length, DMA_FROM_DEVICE);
3100d5ee2b2SSteve Wise 		if (sg_page(sg))
3110d5ee2b2SSteve Wise 			__free_page(sg_page(sg));
3120d5ee2b2SSteve Wise 	}
3130d5ee2b2SSteve Wise 	return -ENOMEM;
3140d5ee2b2SSteve Wise }
3150d5ee2b2SSteve Wise 
3168f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
3178f000cacSChristoph Hellwig 			struct nvmet_rdma_cmd *c, bool admin)
3188f000cacSChristoph Hellwig {
3198f000cacSChristoph Hellwig 	/* NVMe command / RDMA RECV */
3208f000cacSChristoph Hellwig 	c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
3218f000cacSChristoph Hellwig 	if (!c->nvme_cmd)
3228f000cacSChristoph Hellwig 		goto out;
3238f000cacSChristoph Hellwig 
3248f000cacSChristoph Hellwig 	c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
3258f000cacSChristoph Hellwig 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
3268f000cacSChristoph Hellwig 	if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
3278f000cacSChristoph Hellwig 		goto out_free_cmd;
3288f000cacSChristoph Hellwig 
3298f000cacSChristoph Hellwig 	c->sge[0].length = sizeof(*c->nvme_cmd);
3308f000cacSChristoph Hellwig 	c->sge[0].lkey = ndev->pd->local_dma_lkey;
3318f000cacSChristoph Hellwig 
3320d5ee2b2SSteve Wise 	if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
3338f000cacSChristoph Hellwig 		goto out_unmap_cmd;
3348f000cacSChristoph Hellwig 
3358f000cacSChristoph Hellwig 	c->cqe.done = nvmet_rdma_recv_done;
3368f000cacSChristoph Hellwig 
3378f000cacSChristoph Hellwig 	c->wr.wr_cqe = &c->cqe;
3388f000cacSChristoph Hellwig 	c->wr.sg_list = c->sge;
3390d5ee2b2SSteve Wise 	c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
3408f000cacSChristoph Hellwig 
3418f000cacSChristoph Hellwig 	return 0;
3428f000cacSChristoph Hellwig 
3438f000cacSChristoph Hellwig out_unmap_cmd:
3448f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
3458f000cacSChristoph Hellwig 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
3468f000cacSChristoph Hellwig out_free_cmd:
3478f000cacSChristoph Hellwig 	kfree(c->nvme_cmd);
3488f000cacSChristoph Hellwig 
3498f000cacSChristoph Hellwig out:
3508f000cacSChristoph Hellwig 	return -ENOMEM;
3518f000cacSChristoph Hellwig }
3528f000cacSChristoph Hellwig 
3538f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
3548f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *c, bool admin)
3558f000cacSChristoph Hellwig {
3560d5ee2b2SSteve Wise 	if (!admin)
3570d5ee2b2SSteve Wise 		nvmet_rdma_free_inline_pages(ndev, c);
3588f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
3598f000cacSChristoph Hellwig 				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
3608f000cacSChristoph Hellwig 	kfree(c->nvme_cmd);
3618f000cacSChristoph Hellwig }
3628f000cacSChristoph Hellwig 
3638f000cacSChristoph Hellwig static struct nvmet_rdma_cmd *
3648f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
3658f000cacSChristoph Hellwig 		int nr_cmds, bool admin)
3668f000cacSChristoph Hellwig {
3678f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd *cmds;
3688f000cacSChristoph Hellwig 	int ret = -EINVAL, i;
3698f000cacSChristoph Hellwig 
3708f000cacSChristoph Hellwig 	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
3718f000cacSChristoph Hellwig 	if (!cmds)
3728f000cacSChristoph Hellwig 		goto out;
3738f000cacSChristoph Hellwig 
3748f000cacSChristoph Hellwig 	for (i = 0; i < nr_cmds; i++) {
3758f000cacSChristoph Hellwig 		ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
3768f000cacSChristoph Hellwig 		if (ret)
3778f000cacSChristoph Hellwig 			goto out_free;
3788f000cacSChristoph Hellwig 	}
3798f000cacSChristoph Hellwig 
3808f000cacSChristoph Hellwig 	return cmds;
3818f000cacSChristoph Hellwig 
3828f000cacSChristoph Hellwig out_free:
3838f000cacSChristoph Hellwig 	while (--i >= 0)
3848f000cacSChristoph Hellwig 		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
3858f000cacSChristoph Hellwig 	kfree(cmds);
3868f000cacSChristoph Hellwig out:
3878f000cacSChristoph Hellwig 	return ERR_PTR(ret);
3888f000cacSChristoph Hellwig }
3898f000cacSChristoph Hellwig 
3908f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
3918f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
3928f000cacSChristoph Hellwig {
3938f000cacSChristoph Hellwig 	int i;
3948f000cacSChristoph Hellwig 
3958f000cacSChristoph Hellwig 	for (i = 0; i < nr_cmds; i++)
3968f000cacSChristoph Hellwig 		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
3978f000cacSChristoph Hellwig 	kfree(cmds);
3988f000cacSChristoph Hellwig }
3998f000cacSChristoph Hellwig 
4008f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
4018f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *r)
4028f000cacSChristoph Hellwig {
4038f000cacSChristoph Hellwig 	/* NVMe CQE / RDMA SEND */
404fc6c9730SMax Gurtovoy 	r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
405fc6c9730SMax Gurtovoy 	if (!r->req.cqe)
4068f000cacSChristoph Hellwig 		goto out;
4078f000cacSChristoph Hellwig 
408fc6c9730SMax Gurtovoy 	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
409fc6c9730SMax Gurtovoy 			sizeof(*r->req.cqe), DMA_TO_DEVICE);
4108f000cacSChristoph Hellwig 	if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
4118f000cacSChristoph Hellwig 		goto out_free_rsp;
4128f000cacSChristoph Hellwig 
4138dc2ed3fSMax Gurtovoy 	r->req.p2p_client = &ndev->device->dev;
414fc6c9730SMax Gurtovoy 	r->send_sge.length = sizeof(*r->req.cqe);
4158f000cacSChristoph Hellwig 	r->send_sge.lkey = ndev->pd->local_dma_lkey;
4168f000cacSChristoph Hellwig 
4178f000cacSChristoph Hellwig 	r->send_cqe.done = nvmet_rdma_send_done;
4188f000cacSChristoph Hellwig 
4198f000cacSChristoph Hellwig 	r->send_wr.wr_cqe = &r->send_cqe;
4208f000cacSChristoph Hellwig 	r->send_wr.sg_list = &r->send_sge;
4218f000cacSChristoph Hellwig 	r->send_wr.num_sge = 1;
4228f000cacSChristoph Hellwig 	r->send_wr.send_flags = IB_SEND_SIGNALED;
4238f000cacSChristoph Hellwig 
4248f000cacSChristoph Hellwig 	/* Data In / RDMA READ */
4258f000cacSChristoph Hellwig 	r->read_cqe.done = nvmet_rdma_read_data_done;
4268f000cacSChristoph Hellwig 	return 0;
4278f000cacSChristoph Hellwig 
4288f000cacSChristoph Hellwig out_free_rsp:
429fc6c9730SMax Gurtovoy 	kfree(r->req.cqe);
4308f000cacSChristoph Hellwig out:
4318f000cacSChristoph Hellwig 	return -ENOMEM;
4328f000cacSChristoph Hellwig }
4338f000cacSChristoph Hellwig 
4348f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
4358f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *r)
4368f000cacSChristoph Hellwig {
4378f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, r->send_sge.addr,
438fc6c9730SMax Gurtovoy 				sizeof(*r->req.cqe), DMA_TO_DEVICE);
439fc6c9730SMax Gurtovoy 	kfree(r->req.cqe);
4408f000cacSChristoph Hellwig }
4418f000cacSChristoph Hellwig 
4428f000cacSChristoph Hellwig static int
4438f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
4448f000cacSChristoph Hellwig {
4458f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
4468f000cacSChristoph Hellwig 	int nr_rsps = queue->recv_queue_size * 2;
4478f000cacSChristoph Hellwig 	int ret = -EINVAL, i;
4488f000cacSChristoph Hellwig 
4498f000cacSChristoph Hellwig 	queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
4508f000cacSChristoph Hellwig 			GFP_KERNEL);
4518f000cacSChristoph Hellwig 	if (!queue->rsps)
4528f000cacSChristoph Hellwig 		goto out;
4538f000cacSChristoph Hellwig 
4548f000cacSChristoph Hellwig 	for (i = 0; i < nr_rsps; i++) {
4558f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
4568f000cacSChristoph Hellwig 
4578f000cacSChristoph Hellwig 		ret = nvmet_rdma_alloc_rsp(ndev, rsp);
4588f000cacSChristoph Hellwig 		if (ret)
4598f000cacSChristoph Hellwig 			goto out_free;
4608f000cacSChristoph Hellwig 
4618f000cacSChristoph Hellwig 		list_add_tail(&rsp->free_list, &queue->free_rsps);
4628f000cacSChristoph Hellwig 	}
4638f000cacSChristoph Hellwig 
4648f000cacSChristoph Hellwig 	return 0;
4658f000cacSChristoph Hellwig 
4668f000cacSChristoph Hellwig out_free:
4678f000cacSChristoph Hellwig 	while (--i >= 0) {
4688f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
4698f000cacSChristoph Hellwig 
4708f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
4718f000cacSChristoph Hellwig 		nvmet_rdma_free_rsp(ndev, rsp);
4728f000cacSChristoph Hellwig 	}
4738f000cacSChristoph Hellwig 	kfree(queue->rsps);
4748f000cacSChristoph Hellwig out:
4758f000cacSChristoph Hellwig 	return ret;
4768f000cacSChristoph Hellwig }
4778f000cacSChristoph Hellwig 
4788f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
4798f000cacSChristoph Hellwig {
4808f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
4818f000cacSChristoph Hellwig 	int i, nr_rsps = queue->recv_queue_size * 2;
4828f000cacSChristoph Hellwig 
4838f000cacSChristoph Hellwig 	for (i = 0; i < nr_rsps; i++) {
4848f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
4858f000cacSChristoph Hellwig 
4868f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
4878f000cacSChristoph Hellwig 		nvmet_rdma_free_rsp(ndev, rsp);
4888f000cacSChristoph Hellwig 	}
4898f000cacSChristoph Hellwig 	kfree(queue->rsps);
4908f000cacSChristoph Hellwig }
4918f000cacSChristoph Hellwig 
4928f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
4938f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *cmd)
4948f000cacSChristoph Hellwig {
49520209384SMax Gurtovoy 	int ret;
4968f000cacSChristoph Hellwig 
497748ff840SParav Pandit 	ib_dma_sync_single_for_device(ndev->device,
498748ff840SParav Pandit 		cmd->sge[0].addr, cmd->sge[0].length,
499748ff840SParav Pandit 		DMA_FROM_DEVICE);
500748ff840SParav Pandit 
501b0012dd3SMax Gurtovoy 	if (cmd->nsrq)
502b0012dd3SMax Gurtovoy 		ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
50320209384SMax Gurtovoy 	else
50421f90243SIsrael Rukshin 		ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
50520209384SMax Gurtovoy 
50620209384SMax Gurtovoy 	if (unlikely(ret))
50720209384SMax Gurtovoy 		pr_err("post_recv cmd failed\n");
50820209384SMax Gurtovoy 
50920209384SMax Gurtovoy 	return ret;
5108f000cacSChristoph Hellwig }
5118f000cacSChristoph Hellwig 
5128f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
5138f000cacSChristoph Hellwig {
5148f000cacSChristoph Hellwig 	spin_lock(&queue->rsp_wr_wait_lock);
5158f000cacSChristoph Hellwig 	while (!list_empty(&queue->rsp_wr_wait_list)) {
5168f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp;
5178f000cacSChristoph Hellwig 		bool ret;
5188f000cacSChristoph Hellwig 
5198f000cacSChristoph Hellwig 		rsp = list_entry(queue->rsp_wr_wait_list.next,
5208f000cacSChristoph Hellwig 				struct nvmet_rdma_rsp, wait_list);
5218f000cacSChristoph Hellwig 		list_del(&rsp->wait_list);
5228f000cacSChristoph Hellwig 
5238f000cacSChristoph Hellwig 		spin_unlock(&queue->rsp_wr_wait_lock);
5248f000cacSChristoph Hellwig 		ret = nvmet_rdma_execute_command(rsp);
5258f000cacSChristoph Hellwig 		spin_lock(&queue->rsp_wr_wait_lock);
5268f000cacSChristoph Hellwig 
5278f000cacSChristoph Hellwig 		if (!ret) {
5288f000cacSChristoph Hellwig 			list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
5298f000cacSChristoph Hellwig 			break;
5308f000cacSChristoph Hellwig 		}
5318f000cacSChristoph Hellwig 	}
5328f000cacSChristoph Hellwig 	spin_unlock(&queue->rsp_wr_wait_lock);
5338f000cacSChristoph Hellwig }
5348f000cacSChristoph Hellwig 
5358f000cacSChristoph Hellwig 
5368f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
5378f000cacSChristoph Hellwig {
5388f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = rsp->queue;
5398f000cacSChristoph Hellwig 
5408f000cacSChristoph Hellwig 	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
5418f000cacSChristoph Hellwig 
5428f000cacSChristoph Hellwig 	if (rsp->n_rdma) {
54321f90243SIsrael Rukshin 		rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
5448f000cacSChristoph Hellwig 				queue->cm_id->port_num, rsp->req.sg,
5458f000cacSChristoph Hellwig 				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
5468f000cacSChristoph Hellwig 	}
5478f000cacSChristoph Hellwig 
5480d5ee2b2SSteve Wise 	if (rsp->req.sg != rsp->cmd->inline_sg)
549*c6e3f133SIsrael Rukshin 		nvmet_req_free_sgls(&rsp->req);
5508f000cacSChristoph Hellwig 
5518f000cacSChristoph Hellwig 	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
5528f000cacSChristoph Hellwig 		nvmet_rdma_process_wr_wait_list(queue);
5538f000cacSChristoph Hellwig 
5548f000cacSChristoph Hellwig 	nvmet_rdma_put_rsp(rsp);
5558f000cacSChristoph Hellwig }
5568f000cacSChristoph Hellwig 
5578f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
5588f000cacSChristoph Hellwig {
5598f000cacSChristoph Hellwig 	if (queue->nvme_sq.ctrl) {
5608f000cacSChristoph Hellwig 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
5618f000cacSChristoph Hellwig 	} else {
5628f000cacSChristoph Hellwig 		/*
5638f000cacSChristoph Hellwig 		 * we didn't setup the controller yet in case
5648f000cacSChristoph Hellwig 		 * of admin connect error, just disconnect and
5658f000cacSChristoph Hellwig 		 * cleanup the queue
5668f000cacSChristoph Hellwig 		 */
5678f000cacSChristoph Hellwig 		nvmet_rdma_queue_disconnect(queue);
5688f000cacSChristoph Hellwig 	}
5698f000cacSChristoph Hellwig }
5708f000cacSChristoph Hellwig 
5718f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
5728f000cacSChristoph Hellwig {
5738f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
5748f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
575d7dcdf9dSIsrael Rukshin 	struct nvmet_rdma_queue *queue = cq->cq_context;
5768f000cacSChristoph Hellwig 
5778f000cacSChristoph Hellwig 	nvmet_rdma_release_rsp(rsp);
5788f000cacSChristoph Hellwig 
5798f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS &&
5808f000cacSChristoph Hellwig 		     wc->status != IB_WC_WR_FLUSH_ERR)) {
5818f000cacSChristoph Hellwig 		pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
5828f000cacSChristoph Hellwig 			wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
583d7dcdf9dSIsrael Rukshin 		nvmet_rdma_error_comp(queue);
5848f000cacSChristoph Hellwig 	}
5858f000cacSChristoph Hellwig }
5868f000cacSChristoph Hellwig 
5878f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req)
5888f000cacSChristoph Hellwig {
5898f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
5908f000cacSChristoph Hellwig 		container_of(req, struct nvmet_rdma_rsp, req);
5918f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
59223f96d1fSBart Van Assche 	struct ib_send_wr *first_wr;
5938f000cacSChristoph Hellwig 
5948f000cacSChristoph Hellwig 	if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
5958f000cacSChristoph Hellwig 		rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
5968f000cacSChristoph Hellwig 		rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
5978f000cacSChristoph Hellwig 	} else {
5988f000cacSChristoph Hellwig 		rsp->send_wr.opcode = IB_WR_SEND;
5998f000cacSChristoph Hellwig 	}
6008f000cacSChristoph Hellwig 
6018f000cacSChristoph Hellwig 	if (nvmet_rdma_need_data_out(rsp))
6028f000cacSChristoph Hellwig 		first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
6038f000cacSChristoph Hellwig 				cm_id->port_num, NULL, &rsp->send_wr);
6048f000cacSChristoph Hellwig 	else
6058f000cacSChristoph Hellwig 		first_wr = &rsp->send_wr;
6068f000cacSChristoph Hellwig 
6078f000cacSChristoph Hellwig 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
608748ff840SParav Pandit 
609748ff840SParav Pandit 	ib_dma_sync_single_for_device(rsp->queue->dev->device,
610748ff840SParav Pandit 		rsp->send_sge.addr, rsp->send_sge.length,
611748ff840SParav Pandit 		DMA_TO_DEVICE);
612748ff840SParav Pandit 
6130a3173a5SJason Gunthorpe 	if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
6148f000cacSChristoph Hellwig 		pr_err("sending cmd response failed\n");
6158f000cacSChristoph Hellwig 		nvmet_rdma_release_rsp(rsp);
6168f000cacSChristoph Hellwig 	}
6178f000cacSChristoph Hellwig }
6188f000cacSChristoph Hellwig 
6198f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
6208f000cacSChristoph Hellwig {
6218f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
6228f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
6238f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = cq->cq_context;
6248f000cacSChristoph Hellwig 
6258f000cacSChristoph Hellwig 	WARN_ON(rsp->n_rdma <= 0);
6268f000cacSChristoph Hellwig 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
62721f90243SIsrael Rukshin 	rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
6288f000cacSChristoph Hellwig 			queue->cm_id->port_num, rsp->req.sg,
6298f000cacSChristoph Hellwig 			rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
6308f000cacSChristoph Hellwig 	rsp->n_rdma = 0;
6318f000cacSChristoph Hellwig 
6328f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
633549f01aeSVijay Immanuel 		nvmet_req_uninit(&rsp->req);
6348f000cacSChristoph Hellwig 		nvmet_rdma_release_rsp(rsp);
6358f000cacSChristoph Hellwig 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
6368f000cacSChristoph Hellwig 			pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
6378f000cacSChristoph Hellwig 				wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
6388f000cacSChristoph Hellwig 			nvmet_rdma_error_comp(queue);
6398f000cacSChristoph Hellwig 		}
6408f000cacSChristoph Hellwig 		return;
6418f000cacSChristoph Hellwig 	}
6428f000cacSChristoph Hellwig 
643be3f3114SChristoph Hellwig 	rsp->req.execute(&rsp->req);
6448f000cacSChristoph Hellwig }
6458f000cacSChristoph Hellwig 
6468f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
6478f000cacSChristoph Hellwig 		u64 off)
6488f000cacSChristoph Hellwig {
6490d5ee2b2SSteve Wise 	int sg_count = num_pages(len);
6500d5ee2b2SSteve Wise 	struct scatterlist *sg;
6510d5ee2b2SSteve Wise 	int i;
6520d5ee2b2SSteve Wise 
6530d5ee2b2SSteve Wise 	sg = rsp->cmd->inline_sg;
6540d5ee2b2SSteve Wise 	for (i = 0; i < sg_count; i++, sg++) {
6550d5ee2b2SSteve Wise 		if (i < sg_count - 1)
6560d5ee2b2SSteve Wise 			sg_unmark_end(sg);
6570d5ee2b2SSteve Wise 		else
6580d5ee2b2SSteve Wise 			sg_mark_end(sg);
6590d5ee2b2SSteve Wise 		sg->offset = off;
6600d5ee2b2SSteve Wise 		sg->length = min_t(int, len, PAGE_SIZE - off);
6610d5ee2b2SSteve Wise 		len -= sg->length;
6620d5ee2b2SSteve Wise 		if (!i)
6630d5ee2b2SSteve Wise 			off = 0;
6640d5ee2b2SSteve Wise 	}
6650d5ee2b2SSteve Wise 
6660d5ee2b2SSteve Wise 	rsp->req.sg = rsp->cmd->inline_sg;
6670d5ee2b2SSteve Wise 	rsp->req.sg_cnt = sg_count;
6688f000cacSChristoph Hellwig }
6698f000cacSChristoph Hellwig 
6708f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
6718f000cacSChristoph Hellwig {
6728f000cacSChristoph Hellwig 	struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
6738f000cacSChristoph Hellwig 	u64 off = le64_to_cpu(sgl->addr);
6748f000cacSChristoph Hellwig 	u32 len = le32_to_cpu(sgl->length);
6758f000cacSChristoph Hellwig 
676762a11dfSChaitanya Kulkarni 	if (!nvme_is_write(rsp->req.cmd)) {
677762a11dfSChaitanya Kulkarni 		rsp->req.error_loc =
678762a11dfSChaitanya Kulkarni 			offsetof(struct nvme_common_command, opcode);
6798f000cacSChristoph Hellwig 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
680762a11dfSChaitanya Kulkarni 	}
6818f000cacSChristoph Hellwig 
6820d5ee2b2SSteve Wise 	if (off + len > rsp->queue->dev->inline_data_size) {
6838f000cacSChristoph Hellwig 		pr_err("invalid inline data offset!\n");
6848f000cacSChristoph Hellwig 		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
6858f000cacSChristoph Hellwig 	}
6868f000cacSChristoph Hellwig 
6878f000cacSChristoph Hellwig 	/* no data command? */
6888f000cacSChristoph Hellwig 	if (!len)
6898f000cacSChristoph Hellwig 		return 0;
6908f000cacSChristoph Hellwig 
6918f000cacSChristoph Hellwig 	nvmet_rdma_use_inline_sg(rsp, len, off);
6928f000cacSChristoph Hellwig 	rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
6935e62d5c9SChristoph Hellwig 	rsp->req.transfer_len += len;
6948f000cacSChristoph Hellwig 	return 0;
6958f000cacSChristoph Hellwig }
6968f000cacSChristoph Hellwig 
6978f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
6988f000cacSChristoph Hellwig 		struct nvme_keyed_sgl_desc *sgl, bool invalidate)
6998f000cacSChristoph Hellwig {
7008f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
7018f000cacSChristoph Hellwig 	u64 addr = le64_to_cpu(sgl->addr);
7028f000cacSChristoph Hellwig 	u32 key = get_unaligned_le32(sgl->key);
7038f000cacSChristoph Hellwig 	int ret;
7048f000cacSChristoph Hellwig 
7055b2322e4SLogan Gunthorpe 	rsp->req.transfer_len = get_unaligned_le24(sgl->length);
7065b2322e4SLogan Gunthorpe 
7078f000cacSChristoph Hellwig 	/* no data command? */
7085b2322e4SLogan Gunthorpe 	if (!rsp->req.transfer_len)
7098f000cacSChristoph Hellwig 		return 0;
7108f000cacSChristoph Hellwig 
711*c6e3f133SIsrael Rukshin 	ret = nvmet_req_alloc_sgls(&rsp->req);
71259534b9dSIsrael Rukshin 	if (unlikely(ret < 0))
7135b2322e4SLogan Gunthorpe 		goto error_out;
7148f000cacSChristoph Hellwig 
7158f000cacSChristoph Hellwig 	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
7168f000cacSChristoph Hellwig 			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
7178f000cacSChristoph Hellwig 			nvmet_data_dir(&rsp->req));
71859534b9dSIsrael Rukshin 	if (unlikely(ret < 0))
7195b2322e4SLogan Gunthorpe 		goto error_out;
7208f000cacSChristoph Hellwig 	rsp->n_rdma += ret;
7218f000cacSChristoph Hellwig 
7228f000cacSChristoph Hellwig 	if (invalidate) {
7238f000cacSChristoph Hellwig 		rsp->invalidate_rkey = key;
7248f000cacSChristoph Hellwig 		rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
7258f000cacSChristoph Hellwig 	}
7268f000cacSChristoph Hellwig 
7278f000cacSChristoph Hellwig 	return 0;
7285b2322e4SLogan Gunthorpe 
7295b2322e4SLogan Gunthorpe error_out:
7305b2322e4SLogan Gunthorpe 	rsp->req.transfer_len = 0;
7315b2322e4SLogan Gunthorpe 	return NVME_SC_INTERNAL;
7328f000cacSChristoph Hellwig }
7338f000cacSChristoph Hellwig 
7348f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
7358f000cacSChristoph Hellwig {
7368f000cacSChristoph Hellwig 	struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
7378f000cacSChristoph Hellwig 
7388f000cacSChristoph Hellwig 	switch (sgl->type >> 4) {
7398f000cacSChristoph Hellwig 	case NVME_SGL_FMT_DATA_DESC:
7408f000cacSChristoph Hellwig 		switch (sgl->type & 0xf) {
7418f000cacSChristoph Hellwig 		case NVME_SGL_FMT_OFFSET:
7428f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_inline(rsp);
7438f000cacSChristoph Hellwig 		default:
7448f000cacSChristoph Hellwig 			pr_err("invalid SGL subtype: %#x\n", sgl->type);
745762a11dfSChaitanya Kulkarni 			rsp->req.error_loc =
746762a11dfSChaitanya Kulkarni 				offsetof(struct nvme_common_command, dptr);
7478f000cacSChristoph Hellwig 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
7488f000cacSChristoph Hellwig 		}
7498f000cacSChristoph Hellwig 	case NVME_KEY_SGL_FMT_DATA_DESC:
7508f000cacSChristoph Hellwig 		switch (sgl->type & 0xf) {
7518f000cacSChristoph Hellwig 		case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
7528f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
7538f000cacSChristoph Hellwig 		case NVME_SGL_FMT_ADDRESS:
7548f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
7558f000cacSChristoph Hellwig 		default:
7568f000cacSChristoph Hellwig 			pr_err("invalid SGL subtype: %#x\n", sgl->type);
757762a11dfSChaitanya Kulkarni 			rsp->req.error_loc =
758762a11dfSChaitanya Kulkarni 				offsetof(struct nvme_common_command, dptr);
7598f000cacSChristoph Hellwig 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
7608f000cacSChristoph Hellwig 		}
7618f000cacSChristoph Hellwig 	default:
7628f000cacSChristoph Hellwig 		pr_err("invalid SGL type: %#x\n", sgl->type);
763762a11dfSChaitanya Kulkarni 		rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
7648f000cacSChristoph Hellwig 		return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
7658f000cacSChristoph Hellwig 	}
7668f000cacSChristoph Hellwig }
7678f000cacSChristoph Hellwig 
7688f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
7698f000cacSChristoph Hellwig {
7708f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = rsp->queue;
7718f000cacSChristoph Hellwig 
7728f000cacSChristoph Hellwig 	if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
7738f000cacSChristoph Hellwig 			&queue->sq_wr_avail) < 0)) {
7748f000cacSChristoph Hellwig 		pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
7758f000cacSChristoph Hellwig 				1 + rsp->n_rdma, queue->idx,
7768f000cacSChristoph Hellwig 				queue->nvme_sq.ctrl->cntlid);
7778f000cacSChristoph Hellwig 		atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
7788f000cacSChristoph Hellwig 		return false;
7798f000cacSChristoph Hellwig 	}
7808f000cacSChristoph Hellwig 
7818f000cacSChristoph Hellwig 	if (nvmet_rdma_need_data_in(rsp)) {
78221f90243SIsrael Rukshin 		if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
7838f000cacSChristoph Hellwig 				queue->cm_id->port_num, &rsp->read_cqe, NULL))
7848f000cacSChristoph Hellwig 			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
7858f000cacSChristoph Hellwig 	} else {
786be3f3114SChristoph Hellwig 		rsp->req.execute(&rsp->req);
7878f000cacSChristoph Hellwig 	}
7888f000cacSChristoph Hellwig 
7898f000cacSChristoph Hellwig 	return true;
7908f000cacSChristoph Hellwig }
7918f000cacSChristoph Hellwig 
7928f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
7938f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *cmd)
7948f000cacSChristoph Hellwig {
7958f000cacSChristoph Hellwig 	u16 status;
7968f000cacSChristoph Hellwig 
797748ff840SParav Pandit 	ib_dma_sync_single_for_cpu(queue->dev->device,
798748ff840SParav Pandit 		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
799748ff840SParav Pandit 		DMA_FROM_DEVICE);
800748ff840SParav Pandit 	ib_dma_sync_single_for_cpu(queue->dev->device,
801748ff840SParav Pandit 		cmd->send_sge.addr, cmd->send_sge.length,
802748ff840SParav Pandit 		DMA_TO_DEVICE);
803748ff840SParav Pandit 
8048f000cacSChristoph Hellwig 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
8058f000cacSChristoph Hellwig 			&queue->nvme_sq, &nvmet_rdma_ops))
8068f000cacSChristoph Hellwig 		return;
8078f000cacSChristoph Hellwig 
8088f000cacSChristoph Hellwig 	status = nvmet_rdma_map_sgl(cmd);
8098f000cacSChristoph Hellwig 	if (status)
8108f000cacSChristoph Hellwig 		goto out_err;
8118f000cacSChristoph Hellwig 
8128f000cacSChristoph Hellwig 	if (unlikely(!nvmet_rdma_execute_command(cmd))) {
8138f000cacSChristoph Hellwig 		spin_lock(&queue->rsp_wr_wait_lock);
8148f000cacSChristoph Hellwig 		list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
8158f000cacSChristoph Hellwig 		spin_unlock(&queue->rsp_wr_wait_lock);
8168f000cacSChristoph Hellwig 	}
8178f000cacSChristoph Hellwig 
8188f000cacSChristoph Hellwig 	return;
8198f000cacSChristoph Hellwig 
8208f000cacSChristoph Hellwig out_err:
8218f000cacSChristoph Hellwig 	nvmet_req_complete(&cmd->req, status);
8228f000cacSChristoph Hellwig }
8238f000cacSChristoph Hellwig 
8248f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
8258f000cacSChristoph Hellwig {
8268f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd *cmd =
8278f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
8288f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = cq->cq_context;
8298f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp;
8308f000cacSChristoph Hellwig 
8318f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
8328f000cacSChristoph Hellwig 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
8338f000cacSChristoph Hellwig 			pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
8348f000cacSChristoph Hellwig 				wc->wr_cqe, ib_wc_status_msg(wc->status),
8358f000cacSChristoph Hellwig 				wc->status);
8368f000cacSChristoph Hellwig 			nvmet_rdma_error_comp(queue);
8378f000cacSChristoph Hellwig 		}
8388f000cacSChristoph Hellwig 		return;
8398f000cacSChristoph Hellwig 	}
8408f000cacSChristoph Hellwig 
8418f000cacSChristoph Hellwig 	if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
8428f000cacSChristoph Hellwig 		pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
8438f000cacSChristoph Hellwig 		nvmet_rdma_error_comp(queue);
8448f000cacSChristoph Hellwig 		return;
8458f000cacSChristoph Hellwig 	}
8468f000cacSChristoph Hellwig 
8478f000cacSChristoph Hellwig 	cmd->queue = queue;
8488f000cacSChristoph Hellwig 	rsp = nvmet_rdma_get_rsp(queue);
8498407879cSSagi Grimberg 	if (unlikely(!rsp)) {
8508407879cSSagi Grimberg 		/*
8518407879cSSagi Grimberg 		 * we get here only under memory pressure,
8528407879cSSagi Grimberg 		 * silently drop and have the host retry
8538407879cSSagi Grimberg 		 * as we can't even fail it.
8548407879cSSagi Grimberg 		 */
8558407879cSSagi Grimberg 		nvmet_rdma_post_recv(queue->dev, cmd);
8568407879cSSagi Grimberg 		return;
8578407879cSSagi Grimberg 	}
8588d61413dSSagi Grimberg 	rsp->queue = queue;
8598f000cacSChristoph Hellwig 	rsp->cmd = cmd;
8608f000cacSChristoph Hellwig 	rsp->flags = 0;
8618f000cacSChristoph Hellwig 	rsp->req.cmd = cmd->nvme_cmd;
8628d61413dSSagi Grimberg 	rsp->req.port = queue->port;
8638d61413dSSagi Grimberg 	rsp->n_rdma = 0;
8648f000cacSChristoph Hellwig 
8658f000cacSChristoph Hellwig 	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
8668f000cacSChristoph Hellwig 		unsigned long flags;
8678f000cacSChristoph Hellwig 
8688f000cacSChristoph Hellwig 		spin_lock_irqsave(&queue->state_lock, flags);
8698f000cacSChristoph Hellwig 		if (queue->state == NVMET_RDMA_Q_CONNECTING)
8708f000cacSChristoph Hellwig 			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
8718f000cacSChristoph Hellwig 		else
8728f000cacSChristoph Hellwig 			nvmet_rdma_put_rsp(rsp);
8738f000cacSChristoph Hellwig 		spin_unlock_irqrestore(&queue->state_lock, flags);
8748f000cacSChristoph Hellwig 		return;
8758f000cacSChristoph Hellwig 	}
8768f000cacSChristoph Hellwig 
8778f000cacSChristoph Hellwig 	nvmet_rdma_handle_command(queue, rsp);
8788f000cacSChristoph Hellwig }
8798f000cacSChristoph Hellwig 
880b0012dd3SMax Gurtovoy static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
8818f000cacSChristoph Hellwig {
882b0012dd3SMax Gurtovoy 	nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
883b0012dd3SMax Gurtovoy 			     false);
884b0012dd3SMax Gurtovoy 	ib_destroy_srq(nsrq->srq);
8858f000cacSChristoph Hellwig 
886b0012dd3SMax Gurtovoy 	kfree(nsrq);
8878f000cacSChristoph Hellwig }
8888f000cacSChristoph Hellwig 
889b0012dd3SMax Gurtovoy static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
890b0012dd3SMax Gurtovoy {
891b0012dd3SMax Gurtovoy 	int i;
892b0012dd3SMax Gurtovoy 
893b0012dd3SMax Gurtovoy 	if (!ndev->srqs)
894b0012dd3SMax Gurtovoy 		return;
895b0012dd3SMax Gurtovoy 
896b0012dd3SMax Gurtovoy 	for (i = 0; i < ndev->srq_count; i++)
897b0012dd3SMax Gurtovoy 		nvmet_rdma_destroy_srq(ndev->srqs[i]);
898b0012dd3SMax Gurtovoy 
899b0012dd3SMax Gurtovoy 	kfree(ndev->srqs);
900b0012dd3SMax Gurtovoy }
901b0012dd3SMax Gurtovoy 
902b0012dd3SMax Gurtovoy static struct nvmet_rdma_srq *
903b0012dd3SMax Gurtovoy nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
9048f000cacSChristoph Hellwig {
9058f000cacSChristoph Hellwig 	struct ib_srq_init_attr srq_attr = { NULL, };
906b0012dd3SMax Gurtovoy 	size_t srq_size = ndev->srq_size;
907b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq *nsrq;
9088f000cacSChristoph Hellwig 	struct ib_srq *srq;
9098f000cacSChristoph Hellwig 	int ret, i;
9108f000cacSChristoph Hellwig 
911b0012dd3SMax Gurtovoy 	nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
912b0012dd3SMax Gurtovoy 	if (!nsrq)
913b0012dd3SMax Gurtovoy 		return ERR_PTR(-ENOMEM);
9148f000cacSChristoph Hellwig 
9158f000cacSChristoph Hellwig 	srq_attr.attr.max_wr = srq_size;
9160d5ee2b2SSteve Wise 	srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
9178f000cacSChristoph Hellwig 	srq_attr.attr.srq_limit = 0;
9188f000cacSChristoph Hellwig 	srq_attr.srq_type = IB_SRQT_BASIC;
9198f000cacSChristoph Hellwig 	srq = ib_create_srq(ndev->pd, &srq_attr);
9208f000cacSChristoph Hellwig 	if (IS_ERR(srq)) {
921b0012dd3SMax Gurtovoy 		ret = PTR_ERR(srq);
922b0012dd3SMax Gurtovoy 		goto out_free;
923b0012dd3SMax Gurtovoy 	}
924b0012dd3SMax Gurtovoy 
925b0012dd3SMax Gurtovoy 	nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
926b0012dd3SMax Gurtovoy 	if (IS_ERR(nsrq->cmds)) {
927b0012dd3SMax Gurtovoy 		ret = PTR_ERR(nsrq->cmds);
928b0012dd3SMax Gurtovoy 		goto out_destroy_srq;
929b0012dd3SMax Gurtovoy 	}
930b0012dd3SMax Gurtovoy 
931b0012dd3SMax Gurtovoy 	nsrq->srq = srq;
932b0012dd3SMax Gurtovoy 	nsrq->ndev = ndev;
933b0012dd3SMax Gurtovoy 
934b0012dd3SMax Gurtovoy 	for (i = 0; i < srq_size; i++) {
935b0012dd3SMax Gurtovoy 		nsrq->cmds[i].nsrq = nsrq;
936b0012dd3SMax Gurtovoy 		ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
937b0012dd3SMax Gurtovoy 		if (ret)
938b0012dd3SMax Gurtovoy 			goto out_free_cmds;
939b0012dd3SMax Gurtovoy 	}
940b0012dd3SMax Gurtovoy 
941b0012dd3SMax Gurtovoy 	return nsrq;
942b0012dd3SMax Gurtovoy 
943b0012dd3SMax Gurtovoy out_free_cmds:
944b0012dd3SMax Gurtovoy 	nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
945b0012dd3SMax Gurtovoy out_destroy_srq:
946b0012dd3SMax Gurtovoy 	ib_destroy_srq(srq);
947b0012dd3SMax Gurtovoy out_free:
948b0012dd3SMax Gurtovoy 	kfree(nsrq);
949b0012dd3SMax Gurtovoy 	return ERR_PTR(ret);
950b0012dd3SMax Gurtovoy }
951b0012dd3SMax Gurtovoy 
952b0012dd3SMax Gurtovoy static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
953b0012dd3SMax Gurtovoy {
954b0012dd3SMax Gurtovoy 	int i, ret;
955b0012dd3SMax Gurtovoy 
956b0012dd3SMax Gurtovoy 	if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
9578f000cacSChristoph Hellwig 		/*
9588f000cacSChristoph Hellwig 		 * If SRQs aren't supported we just go ahead and use normal
9598f000cacSChristoph Hellwig 		 * non-shared receive queues.
9608f000cacSChristoph Hellwig 		 */
9618f000cacSChristoph Hellwig 		pr_info("SRQ requested but not supported.\n");
9628f000cacSChristoph Hellwig 		return 0;
9638f000cacSChristoph Hellwig 	}
9648f000cacSChristoph Hellwig 
965b0012dd3SMax Gurtovoy 	ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
966b0012dd3SMax Gurtovoy 			     nvmet_rdma_srq_size);
967b0012dd3SMax Gurtovoy 	ndev->srq_count = min(ndev->device->num_comp_vectors,
968b0012dd3SMax Gurtovoy 			      ndev->device->attrs.max_srq);
969b0012dd3SMax Gurtovoy 
970b0012dd3SMax Gurtovoy 	ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
971b0012dd3SMax Gurtovoy 	if (!ndev->srqs)
972b0012dd3SMax Gurtovoy 		return -ENOMEM;
973b0012dd3SMax Gurtovoy 
974b0012dd3SMax Gurtovoy 	for (i = 0; i < ndev->srq_count; i++) {
975b0012dd3SMax Gurtovoy 		ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
976b0012dd3SMax Gurtovoy 		if (IS_ERR(ndev->srqs[i])) {
977b0012dd3SMax Gurtovoy 			ret = PTR_ERR(ndev->srqs[i]);
978b0012dd3SMax Gurtovoy 			goto err_srq;
9798f000cacSChristoph Hellwig 		}
98020209384SMax Gurtovoy 	}
9818f000cacSChristoph Hellwig 
9828f000cacSChristoph Hellwig 	return 0;
9838f000cacSChristoph Hellwig 
984b0012dd3SMax Gurtovoy err_srq:
985b0012dd3SMax Gurtovoy 	while (--i >= 0)
986b0012dd3SMax Gurtovoy 		nvmet_rdma_destroy_srq(ndev->srqs[i]);
987b0012dd3SMax Gurtovoy 	kfree(ndev->srqs);
9888f000cacSChristoph Hellwig 	return ret;
9898f000cacSChristoph Hellwig }
9908f000cacSChristoph Hellwig 
9918f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref)
9928f000cacSChristoph Hellwig {
9938f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev =
9948f000cacSChristoph Hellwig 		container_of(ref, struct nvmet_rdma_device, ref);
9958f000cacSChristoph Hellwig 
9968f000cacSChristoph Hellwig 	mutex_lock(&device_list_mutex);
9978f000cacSChristoph Hellwig 	list_del(&ndev->entry);
9988f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
9998f000cacSChristoph Hellwig 
1000b0012dd3SMax Gurtovoy 	nvmet_rdma_destroy_srqs(ndev);
10018f000cacSChristoph Hellwig 	ib_dealloc_pd(ndev->pd);
10028f000cacSChristoph Hellwig 
10038f000cacSChristoph Hellwig 	kfree(ndev);
10048f000cacSChristoph Hellwig }
10058f000cacSChristoph Hellwig 
10068f000cacSChristoph Hellwig static struct nvmet_rdma_device *
10078f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
10088f000cacSChristoph Hellwig {
1009a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = cm_id->context;
1010a032e4f6SSagi Grimberg 	struct nvmet_port *nport = port->nport;
10118f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev;
10120d5ee2b2SSteve Wise 	int inline_page_count;
10130d5ee2b2SSteve Wise 	int inline_sge_count;
10148f000cacSChristoph Hellwig 	int ret;
10158f000cacSChristoph Hellwig 
10168f000cacSChristoph Hellwig 	mutex_lock(&device_list_mutex);
10178f000cacSChristoph Hellwig 	list_for_each_entry(ndev, &device_list, entry) {
10188f000cacSChristoph Hellwig 		if (ndev->device->node_guid == cm_id->device->node_guid &&
10198f000cacSChristoph Hellwig 		    kref_get_unless_zero(&ndev->ref))
10208f000cacSChristoph Hellwig 			goto out_unlock;
10218f000cacSChristoph Hellwig 	}
10228f000cacSChristoph Hellwig 
10238f000cacSChristoph Hellwig 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
10248f000cacSChristoph Hellwig 	if (!ndev)
10258f000cacSChristoph Hellwig 		goto out_err;
10268f000cacSChristoph Hellwig 
1027a032e4f6SSagi Grimberg 	inline_page_count = num_pages(nport->inline_data_size);
10280d5ee2b2SSteve Wise 	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
10290a3173a5SJason Gunthorpe 				cm_id->device->attrs.max_recv_sge) - 1;
10300d5ee2b2SSteve Wise 	if (inline_page_count > inline_sge_count) {
10310d5ee2b2SSteve Wise 		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
1032a032e4f6SSagi Grimberg 			nport->inline_data_size, cm_id->device->name,
10330d5ee2b2SSteve Wise 			inline_sge_count * PAGE_SIZE);
1034a032e4f6SSagi Grimberg 		nport->inline_data_size = inline_sge_count * PAGE_SIZE;
10350d5ee2b2SSteve Wise 		inline_page_count = inline_sge_count;
10360d5ee2b2SSteve Wise 	}
1037a032e4f6SSagi Grimberg 	ndev->inline_data_size = nport->inline_data_size;
10380d5ee2b2SSteve Wise 	ndev->inline_page_count = inline_page_count;
10398f000cacSChristoph Hellwig 	ndev->device = cm_id->device;
10408f000cacSChristoph Hellwig 	kref_init(&ndev->ref);
10418f000cacSChristoph Hellwig 
1042ed082d36SChristoph Hellwig 	ndev->pd = ib_alloc_pd(ndev->device, 0);
10438f000cacSChristoph Hellwig 	if (IS_ERR(ndev->pd))
10448f000cacSChristoph Hellwig 		goto out_free_dev;
10458f000cacSChristoph Hellwig 
10468f000cacSChristoph Hellwig 	if (nvmet_rdma_use_srq) {
1047b0012dd3SMax Gurtovoy 		ret = nvmet_rdma_init_srqs(ndev);
10488f000cacSChristoph Hellwig 		if (ret)
10498f000cacSChristoph Hellwig 			goto out_free_pd;
10508f000cacSChristoph Hellwig 	}
10518f000cacSChristoph Hellwig 
10528f000cacSChristoph Hellwig 	list_add(&ndev->entry, &device_list);
10538f000cacSChristoph Hellwig out_unlock:
10548f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
10558f000cacSChristoph Hellwig 	pr_debug("added %s.\n", ndev->device->name);
10568f000cacSChristoph Hellwig 	return ndev;
10578f000cacSChristoph Hellwig 
10588f000cacSChristoph Hellwig out_free_pd:
10598f000cacSChristoph Hellwig 	ib_dealloc_pd(ndev->pd);
10608f000cacSChristoph Hellwig out_free_dev:
10618f000cacSChristoph Hellwig 	kfree(ndev);
10628f000cacSChristoph Hellwig out_err:
10638f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
10648f000cacSChristoph Hellwig 	return NULL;
10658f000cacSChristoph Hellwig }
10668f000cacSChristoph Hellwig 
10678f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
10688f000cacSChristoph Hellwig {
10698f000cacSChristoph Hellwig 	struct ib_qp_init_attr qp_attr;
10708f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
1071b0012dd3SMax Gurtovoy 	int nr_cqe, ret, i, factor;
10728f000cacSChristoph Hellwig 
10738f000cacSChristoph Hellwig 	/*
10748f000cacSChristoph Hellwig 	 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
10758f000cacSChristoph Hellwig 	 */
10768f000cacSChristoph Hellwig 	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
10778f000cacSChristoph Hellwig 
10788f000cacSChristoph Hellwig 	queue->cq = ib_alloc_cq(ndev->device, queue,
1079b0012dd3SMax Gurtovoy 			nr_cqe + 1, queue->comp_vector,
10808f000cacSChristoph Hellwig 			IB_POLL_WORKQUEUE);
10818f000cacSChristoph Hellwig 	if (IS_ERR(queue->cq)) {
10828f000cacSChristoph Hellwig 		ret = PTR_ERR(queue->cq);
10838f000cacSChristoph Hellwig 		pr_err("failed to create CQ cqe= %d ret= %d\n",
10848f000cacSChristoph Hellwig 		       nr_cqe + 1, ret);
10858f000cacSChristoph Hellwig 		goto out;
10868f000cacSChristoph Hellwig 	}
10878f000cacSChristoph Hellwig 
10888f000cacSChristoph Hellwig 	memset(&qp_attr, 0, sizeof(qp_attr));
10898f000cacSChristoph Hellwig 	qp_attr.qp_context = queue;
10908f000cacSChristoph Hellwig 	qp_attr.event_handler = nvmet_rdma_qp_event;
10918f000cacSChristoph Hellwig 	qp_attr.send_cq = queue->cq;
10928f000cacSChristoph Hellwig 	qp_attr.recv_cq = queue->cq;
10938f000cacSChristoph Hellwig 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
10948f000cacSChristoph Hellwig 	qp_attr.qp_type = IB_QPT_RC;
10958f000cacSChristoph Hellwig 	/* +1 for drain */
10968f000cacSChristoph Hellwig 	qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1097c363f249SMax Gurtovoy 	factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1098c363f249SMax Gurtovoy 				   1 << NVMET_RDMA_MAX_MDTS);
1099c363f249SMax Gurtovoy 	qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
11008f000cacSChristoph Hellwig 	qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
110133023fb8SSteve Wise 					ndev->device->attrs.max_send_sge);
11028f000cacSChristoph Hellwig 
1103b0012dd3SMax Gurtovoy 	if (queue->nsrq) {
1104b0012dd3SMax Gurtovoy 		qp_attr.srq = queue->nsrq->srq;
11058f000cacSChristoph Hellwig 	} else {
11068f000cacSChristoph Hellwig 		/* +1 for drain */
11078f000cacSChristoph Hellwig 		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
11080d5ee2b2SSteve Wise 		qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
11098f000cacSChristoph Hellwig 	}
11108f000cacSChristoph Hellwig 
11118f000cacSChristoph Hellwig 	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
11128f000cacSChristoph Hellwig 	if (ret) {
11138f000cacSChristoph Hellwig 		pr_err("failed to create_qp ret= %d\n", ret);
11148f000cacSChristoph Hellwig 		goto err_destroy_cq;
11158f000cacSChristoph Hellwig 	}
111621f90243SIsrael Rukshin 	queue->qp = queue->cm_id->qp;
11178f000cacSChristoph Hellwig 
11188f000cacSChristoph Hellwig 	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
11198f000cacSChristoph Hellwig 
11208f000cacSChristoph Hellwig 	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
11218f000cacSChristoph Hellwig 		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
11228f000cacSChristoph Hellwig 		 qp_attr.cap.max_send_wr, queue->cm_id);
11238f000cacSChristoph Hellwig 
1124b0012dd3SMax Gurtovoy 	if (!queue->nsrq) {
11258f000cacSChristoph Hellwig 		for (i = 0; i < queue->recv_queue_size; i++) {
11268f000cacSChristoph Hellwig 			queue->cmds[i].queue = queue;
112720209384SMax Gurtovoy 			ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
112820209384SMax Gurtovoy 			if (ret)
112920209384SMax Gurtovoy 				goto err_destroy_qp;
11308f000cacSChristoph Hellwig 		}
11318f000cacSChristoph Hellwig 	}
11328f000cacSChristoph Hellwig 
11338f000cacSChristoph Hellwig out:
11348f000cacSChristoph Hellwig 	return ret;
11358f000cacSChristoph Hellwig 
113620209384SMax Gurtovoy err_destroy_qp:
113720209384SMax Gurtovoy 	rdma_destroy_qp(queue->cm_id);
11388f000cacSChristoph Hellwig err_destroy_cq:
11398f000cacSChristoph Hellwig 	ib_free_cq(queue->cq);
11408f000cacSChristoph Hellwig 	goto out;
11418f000cacSChristoph Hellwig }
11428f000cacSChristoph Hellwig 
11438f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
11448f000cacSChristoph Hellwig {
114521f90243SIsrael Rukshin 	ib_drain_qp(queue->qp);
114621f90243SIsrael Rukshin 	if (queue->cm_id)
1147e1a2ee24SIsrael Rukshin 		rdma_destroy_id(queue->cm_id);
114821f90243SIsrael Rukshin 	ib_destroy_qp(queue->qp);
11498f000cacSChristoph Hellwig 	ib_free_cq(queue->cq);
11508f000cacSChristoph Hellwig }
11518f000cacSChristoph Hellwig 
11528f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
11538f000cacSChristoph Hellwig {
1154424125a0SSagi Grimberg 	pr_debug("freeing queue %d\n", queue->idx);
11558f000cacSChristoph Hellwig 
11568f000cacSChristoph Hellwig 	nvmet_sq_destroy(&queue->nvme_sq);
11578f000cacSChristoph Hellwig 
11588f000cacSChristoph Hellwig 	nvmet_rdma_destroy_queue_ib(queue);
1159b0012dd3SMax Gurtovoy 	if (!queue->nsrq) {
11608f000cacSChristoph Hellwig 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
11618f000cacSChristoph Hellwig 				queue->recv_queue_size,
11628f000cacSChristoph Hellwig 				!queue->host_qid);
11638f000cacSChristoph Hellwig 	}
11648f000cacSChristoph Hellwig 	nvmet_rdma_free_rsps(queue);
11658f000cacSChristoph Hellwig 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
11668f000cacSChristoph Hellwig 	kfree(queue);
11678f000cacSChristoph Hellwig }
11688f000cacSChristoph Hellwig 
11698f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w)
11708f000cacSChristoph Hellwig {
11718f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue =
11728f000cacSChristoph Hellwig 		container_of(w, struct nvmet_rdma_queue, release_work);
11738f000cacSChristoph Hellwig 	struct nvmet_rdma_device *dev = queue->dev;
11748f000cacSChristoph Hellwig 
11758f000cacSChristoph Hellwig 	nvmet_rdma_free_queue(queue);
1176d8f7750aSSagi Grimberg 
11778f000cacSChristoph Hellwig 	kref_put(&dev->ref, nvmet_rdma_free_dev);
11788f000cacSChristoph Hellwig }
11798f000cacSChristoph Hellwig 
11808f000cacSChristoph Hellwig static int
11818f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
11828f000cacSChristoph Hellwig 				struct nvmet_rdma_queue *queue)
11838f000cacSChristoph Hellwig {
11848f000cacSChristoph Hellwig 	struct nvme_rdma_cm_req *req;
11858f000cacSChristoph Hellwig 
11868f000cacSChristoph Hellwig 	req = (struct nvme_rdma_cm_req *)conn->private_data;
11878f000cacSChristoph Hellwig 	if (!req || conn->private_data_len == 0)
11888f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_LEN;
11898f000cacSChristoph Hellwig 
11908f000cacSChristoph Hellwig 	if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
11918f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_RECFMT;
11928f000cacSChristoph Hellwig 
11938f000cacSChristoph Hellwig 	queue->host_qid = le16_to_cpu(req->qid);
11948f000cacSChristoph Hellwig 
11958f000cacSChristoph Hellwig 	/*
1196b825b44cSJay Freyensee 	 * req->hsqsize corresponds to our recv queue size plus 1
11978f000cacSChristoph Hellwig 	 * req->hrqsize corresponds to our send queue size
11988f000cacSChristoph Hellwig 	 */
1199b825b44cSJay Freyensee 	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
12008f000cacSChristoph Hellwig 	queue->send_queue_size = le16_to_cpu(req->hrqsize);
12018f000cacSChristoph Hellwig 
12027aa1f427SSagi Grimberg 	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
12038f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_HSQSIZE;
12048f000cacSChristoph Hellwig 
12058f000cacSChristoph Hellwig 	/* XXX: Should we enforce some kind of max for IO queues? */
12068f000cacSChristoph Hellwig 
12078f000cacSChristoph Hellwig 	return 0;
12088f000cacSChristoph Hellwig }
12098f000cacSChristoph Hellwig 
12108f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
12118f000cacSChristoph Hellwig 				enum nvme_rdma_cm_status status)
12128f000cacSChristoph Hellwig {
12138f000cacSChristoph Hellwig 	struct nvme_rdma_cm_rej rej;
12148f000cacSChristoph Hellwig 
12157a01a6eaSMax Gurtovoy 	pr_debug("rejecting connect request: status %d (%s)\n",
12167a01a6eaSMax Gurtovoy 		 status, nvme_rdma_cm_msg(status));
12177a01a6eaSMax Gurtovoy 
12188f000cacSChristoph Hellwig 	rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
12198f000cacSChristoph Hellwig 	rej.sts = cpu_to_le16(status);
12208f000cacSChristoph Hellwig 
12218f000cacSChristoph Hellwig 	return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
12228f000cacSChristoph Hellwig }
12238f000cacSChristoph Hellwig 
12248f000cacSChristoph Hellwig static struct nvmet_rdma_queue *
12258f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
12268f000cacSChristoph Hellwig 		struct rdma_cm_id *cm_id,
12278f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
12288f000cacSChristoph Hellwig {
12298f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
12308f000cacSChristoph Hellwig 	int ret;
12318f000cacSChristoph Hellwig 
12328f000cacSChristoph Hellwig 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
12338f000cacSChristoph Hellwig 	if (!queue) {
12348f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
12358f000cacSChristoph Hellwig 		goto out_reject;
12368f000cacSChristoph Hellwig 	}
12378f000cacSChristoph Hellwig 
12388f000cacSChristoph Hellwig 	ret = nvmet_sq_init(&queue->nvme_sq);
123970d4281cSBart Van Assche 	if (ret) {
124070d4281cSBart Van Assche 		ret = NVME_RDMA_CM_NO_RSC;
12418f000cacSChristoph Hellwig 		goto out_free_queue;
124270d4281cSBart Van Assche 	}
12438f000cacSChristoph Hellwig 
12448f000cacSChristoph Hellwig 	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
12458f000cacSChristoph Hellwig 	if (ret)
12468f000cacSChristoph Hellwig 		goto out_destroy_sq;
12478f000cacSChristoph Hellwig 
12488f000cacSChristoph Hellwig 	/*
12498f000cacSChristoph Hellwig 	 * Schedules the actual release because calling rdma_destroy_id from
12508f000cacSChristoph Hellwig 	 * inside a CM callback would trigger a deadlock. (great API design..)
12518f000cacSChristoph Hellwig 	 */
12528f000cacSChristoph Hellwig 	INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
12538f000cacSChristoph Hellwig 	queue->dev = ndev;
12548f000cacSChristoph Hellwig 	queue->cm_id = cm_id;
12558f000cacSChristoph Hellwig 
12568f000cacSChristoph Hellwig 	spin_lock_init(&queue->state_lock);
12578f000cacSChristoph Hellwig 	queue->state = NVMET_RDMA_Q_CONNECTING;
12588f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->rsp_wait_list);
12598f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
12608f000cacSChristoph Hellwig 	spin_lock_init(&queue->rsp_wr_wait_lock);
12618f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->free_rsps);
12628f000cacSChristoph Hellwig 	spin_lock_init(&queue->rsps_lock);
1263766dbb17SSagi Grimberg 	INIT_LIST_HEAD(&queue->queue_list);
12648f000cacSChristoph Hellwig 
12658f000cacSChristoph Hellwig 	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
12668f000cacSChristoph Hellwig 	if (queue->idx < 0) {
12678f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
12686ccaeb56SChristophe JAILLET 		goto out_destroy_sq;
12698f000cacSChristoph Hellwig 	}
12708f000cacSChristoph Hellwig 
1271b0012dd3SMax Gurtovoy 	/*
1272b0012dd3SMax Gurtovoy 	 * Spread the io queues across completion vectors,
1273b0012dd3SMax Gurtovoy 	 * but still keep all admin queues on vector 0.
1274b0012dd3SMax Gurtovoy 	 */
1275b0012dd3SMax Gurtovoy 	queue->comp_vector = !queue->host_qid ? 0 :
1276b0012dd3SMax Gurtovoy 		queue->idx % ndev->device->num_comp_vectors;
1277b0012dd3SMax Gurtovoy 
1278b0012dd3SMax Gurtovoy 
12798f000cacSChristoph Hellwig 	ret = nvmet_rdma_alloc_rsps(queue);
12808f000cacSChristoph Hellwig 	if (ret) {
12818f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
12828f000cacSChristoph Hellwig 		goto out_ida_remove;
12838f000cacSChristoph Hellwig 	}
12848f000cacSChristoph Hellwig 
1285b0012dd3SMax Gurtovoy 	if (ndev->srqs) {
1286b0012dd3SMax Gurtovoy 		queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
1287b0012dd3SMax Gurtovoy 	} else {
12888f000cacSChristoph Hellwig 		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
12898f000cacSChristoph Hellwig 				queue->recv_queue_size,
12908f000cacSChristoph Hellwig 				!queue->host_qid);
12918f000cacSChristoph Hellwig 		if (IS_ERR(queue->cmds)) {
12928f000cacSChristoph Hellwig 			ret = NVME_RDMA_CM_NO_RSC;
12938f000cacSChristoph Hellwig 			goto out_free_responses;
12948f000cacSChristoph Hellwig 		}
12958f000cacSChristoph Hellwig 	}
12968f000cacSChristoph Hellwig 
12978f000cacSChristoph Hellwig 	ret = nvmet_rdma_create_queue_ib(queue);
12988f000cacSChristoph Hellwig 	if (ret) {
12998f000cacSChristoph Hellwig 		pr_err("%s: creating RDMA queue failed (%d).\n",
13008f000cacSChristoph Hellwig 			__func__, ret);
13018f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
13028f000cacSChristoph Hellwig 		goto out_free_cmds;
13038f000cacSChristoph Hellwig 	}
13048f000cacSChristoph Hellwig 
13058f000cacSChristoph Hellwig 	return queue;
13068f000cacSChristoph Hellwig 
13078f000cacSChristoph Hellwig out_free_cmds:
1308b0012dd3SMax Gurtovoy 	if (!queue->nsrq) {
13098f000cacSChristoph Hellwig 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
13108f000cacSChristoph Hellwig 				queue->recv_queue_size,
13118f000cacSChristoph Hellwig 				!queue->host_qid);
13128f000cacSChristoph Hellwig 	}
13138f000cacSChristoph Hellwig out_free_responses:
13148f000cacSChristoph Hellwig 	nvmet_rdma_free_rsps(queue);
13158f000cacSChristoph Hellwig out_ida_remove:
13168f000cacSChristoph Hellwig 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
13178f000cacSChristoph Hellwig out_destroy_sq:
13188f000cacSChristoph Hellwig 	nvmet_sq_destroy(&queue->nvme_sq);
13198f000cacSChristoph Hellwig out_free_queue:
13208f000cacSChristoph Hellwig 	kfree(queue);
13218f000cacSChristoph Hellwig out_reject:
13228f000cacSChristoph Hellwig 	nvmet_rdma_cm_reject(cm_id, ret);
13238f000cacSChristoph Hellwig 	return NULL;
13248f000cacSChristoph Hellwig }
13258f000cacSChristoph Hellwig 
13268f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
13278f000cacSChristoph Hellwig {
13288f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = priv;
13298f000cacSChristoph Hellwig 
13308f000cacSChristoph Hellwig 	switch (event->event) {
13318f000cacSChristoph Hellwig 	case IB_EVENT_COMM_EST:
13328f000cacSChristoph Hellwig 		rdma_notify(queue->cm_id, event->event);
13338f000cacSChristoph Hellwig 		break;
1334b0012dd3SMax Gurtovoy 	case IB_EVENT_QP_LAST_WQE_REACHED:
1335b0012dd3SMax Gurtovoy 		pr_debug("received last WQE reached event for queue=0x%p\n",
1336b0012dd3SMax Gurtovoy 			 queue);
1337b0012dd3SMax Gurtovoy 		break;
13388f000cacSChristoph Hellwig 	default:
1339675796beSMax Gurtovoy 		pr_err("received IB QP event: %s (%d)\n",
1340675796beSMax Gurtovoy 		       ib_event_msg(event->event), event->event);
13418f000cacSChristoph Hellwig 		break;
13428f000cacSChristoph Hellwig 	}
13438f000cacSChristoph Hellwig }
13448f000cacSChristoph Hellwig 
13458f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
13468f000cacSChristoph Hellwig 		struct nvmet_rdma_queue *queue,
13478f000cacSChristoph Hellwig 		struct rdma_conn_param *p)
13488f000cacSChristoph Hellwig {
13498f000cacSChristoph Hellwig 	struct rdma_conn_param  param = { };
13508f000cacSChristoph Hellwig 	struct nvme_rdma_cm_rep priv = { };
13518f000cacSChristoph Hellwig 	int ret = -ENOMEM;
13528f000cacSChristoph Hellwig 
13538f000cacSChristoph Hellwig 	param.rnr_retry_count = 7;
13548f000cacSChristoph Hellwig 	param.flow_control = 1;
13558f000cacSChristoph Hellwig 	param.initiator_depth = min_t(u8, p->initiator_depth,
13568f000cacSChristoph Hellwig 		queue->dev->device->attrs.max_qp_init_rd_atom);
13578f000cacSChristoph Hellwig 	param.private_data = &priv;
13588f000cacSChristoph Hellwig 	param.private_data_len = sizeof(priv);
13598f000cacSChristoph Hellwig 	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
13608f000cacSChristoph Hellwig 	priv.crqsize = cpu_to_le16(queue->recv_queue_size);
13618f000cacSChristoph Hellwig 
13628f000cacSChristoph Hellwig 	ret = rdma_accept(cm_id, &param);
13638f000cacSChristoph Hellwig 	if (ret)
13648f000cacSChristoph Hellwig 		pr_err("rdma_accept failed (error code = %d)\n", ret);
13658f000cacSChristoph Hellwig 
13668f000cacSChristoph Hellwig 	return ret;
13678f000cacSChristoph Hellwig }
13688f000cacSChristoph Hellwig 
13698f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
13708f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
13718f000cacSChristoph Hellwig {
1372a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = cm_id->context;
13738f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev;
13748f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
13758f000cacSChristoph Hellwig 	int ret = -EINVAL;
13768f000cacSChristoph Hellwig 
13778f000cacSChristoph Hellwig 	ndev = nvmet_rdma_find_get_device(cm_id);
13788f000cacSChristoph Hellwig 	if (!ndev) {
13798f000cacSChristoph Hellwig 		nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
13808f000cacSChristoph Hellwig 		return -ECONNREFUSED;
13818f000cacSChristoph Hellwig 	}
13828f000cacSChristoph Hellwig 
13838f000cacSChristoph Hellwig 	queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
13848f000cacSChristoph Hellwig 	if (!queue) {
13858f000cacSChristoph Hellwig 		ret = -ENOMEM;
13868f000cacSChristoph Hellwig 		goto put_device;
13878f000cacSChristoph Hellwig 	}
1388a032e4f6SSagi Grimberg 	queue->port = port->nport;
13898f000cacSChristoph Hellwig 
1390777dc823SSagi Grimberg 	if (queue->host_qid == 0) {
1391777dc823SSagi Grimberg 		/* Let inflight controller teardown complete */
1392d39aa497SChristoph Hellwig 		flush_scheduled_work();
1393777dc823SSagi Grimberg 	}
1394777dc823SSagi Grimberg 
13958f000cacSChristoph Hellwig 	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1396e1a2ee24SIsrael Rukshin 	if (ret) {
139721f90243SIsrael Rukshin 		/*
139821f90243SIsrael Rukshin 		 * Don't destroy the cm_id in free path, as we implicitly
139921f90243SIsrael Rukshin 		 * destroy the cm_id here with non-zero ret code.
140021f90243SIsrael Rukshin 		 */
140121f90243SIsrael Rukshin 		queue->cm_id = NULL;
140221f90243SIsrael Rukshin 		goto free_queue;
1403e1a2ee24SIsrael Rukshin 	}
14048f000cacSChristoph Hellwig 
14058f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
14068f000cacSChristoph Hellwig 	list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
14078f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
14088f000cacSChristoph Hellwig 
14098f000cacSChristoph Hellwig 	return 0;
14108f000cacSChristoph Hellwig 
141121f90243SIsrael Rukshin free_queue:
141221f90243SIsrael Rukshin 	nvmet_rdma_free_queue(queue);
14138f000cacSChristoph Hellwig put_device:
14148f000cacSChristoph Hellwig 	kref_put(&ndev->ref, nvmet_rdma_free_dev);
14158f000cacSChristoph Hellwig 
14168f000cacSChristoph Hellwig 	return ret;
14178f000cacSChristoph Hellwig }
14188f000cacSChristoph Hellwig 
14198f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
14208f000cacSChristoph Hellwig {
14218f000cacSChristoph Hellwig 	unsigned long flags;
14228f000cacSChristoph Hellwig 
14238f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->state_lock, flags);
14248f000cacSChristoph Hellwig 	if (queue->state != NVMET_RDMA_Q_CONNECTING) {
14258f000cacSChristoph Hellwig 		pr_warn("trying to establish a connected queue\n");
14268f000cacSChristoph Hellwig 		goto out_unlock;
14278f000cacSChristoph Hellwig 	}
14288f000cacSChristoph Hellwig 	queue->state = NVMET_RDMA_Q_LIVE;
14298f000cacSChristoph Hellwig 
14308f000cacSChristoph Hellwig 	while (!list_empty(&queue->rsp_wait_list)) {
14318f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *cmd;
14328f000cacSChristoph Hellwig 
14338f000cacSChristoph Hellwig 		cmd = list_first_entry(&queue->rsp_wait_list,
14348f000cacSChristoph Hellwig 					struct nvmet_rdma_rsp, wait_list);
14358f000cacSChristoph Hellwig 		list_del(&cmd->wait_list);
14368f000cacSChristoph Hellwig 
14378f000cacSChristoph Hellwig 		spin_unlock_irqrestore(&queue->state_lock, flags);
14388f000cacSChristoph Hellwig 		nvmet_rdma_handle_command(queue, cmd);
14398f000cacSChristoph Hellwig 		spin_lock_irqsave(&queue->state_lock, flags);
14408f000cacSChristoph Hellwig 	}
14418f000cacSChristoph Hellwig 
14428f000cacSChristoph Hellwig out_unlock:
14438f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->state_lock, flags);
14448f000cacSChristoph Hellwig }
14458f000cacSChristoph Hellwig 
14468f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
14478f000cacSChristoph Hellwig {
14488f000cacSChristoph Hellwig 	bool disconnect = false;
14498f000cacSChristoph Hellwig 	unsigned long flags;
14508f000cacSChristoph Hellwig 
14518f000cacSChristoph Hellwig 	pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
14528f000cacSChristoph Hellwig 
14538f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->state_lock, flags);
14548f000cacSChristoph Hellwig 	switch (queue->state) {
14558f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_CONNECTING:
14568f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_LIVE:
14578f000cacSChristoph Hellwig 		queue->state = NVMET_RDMA_Q_DISCONNECTING;
1458d8f7750aSSagi Grimberg 		disconnect = true;
14598f000cacSChristoph Hellwig 		break;
14608f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_DISCONNECTING:
14618f000cacSChristoph Hellwig 		break;
14628f000cacSChristoph Hellwig 	}
14638f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->state_lock, flags);
14648f000cacSChristoph Hellwig 
14658f000cacSChristoph Hellwig 	if (disconnect) {
14668f000cacSChristoph Hellwig 		rdma_disconnect(queue->cm_id);
1467d39aa497SChristoph Hellwig 		schedule_work(&queue->release_work);
14688f000cacSChristoph Hellwig 	}
14698f000cacSChristoph Hellwig }
14708f000cacSChristoph Hellwig 
14718f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
14728f000cacSChristoph Hellwig {
14738f000cacSChristoph Hellwig 	bool disconnect = false;
14748f000cacSChristoph Hellwig 
14758f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
14768f000cacSChristoph Hellwig 	if (!list_empty(&queue->queue_list)) {
14778f000cacSChristoph Hellwig 		list_del_init(&queue->queue_list);
14788f000cacSChristoph Hellwig 		disconnect = true;
14798f000cacSChristoph Hellwig 	}
14808f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
14818f000cacSChristoph Hellwig 
14828f000cacSChristoph Hellwig 	if (disconnect)
14838f000cacSChristoph Hellwig 		__nvmet_rdma_queue_disconnect(queue);
14848f000cacSChristoph Hellwig }
14858f000cacSChristoph Hellwig 
14868f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
14878f000cacSChristoph Hellwig 		struct nvmet_rdma_queue *queue)
14888f000cacSChristoph Hellwig {
14898f000cacSChristoph Hellwig 	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
14908f000cacSChristoph Hellwig 
1491766dbb17SSagi Grimberg 	mutex_lock(&nvmet_rdma_queue_mutex);
1492766dbb17SSagi Grimberg 	if (!list_empty(&queue->queue_list))
1493766dbb17SSagi Grimberg 		list_del_init(&queue->queue_list);
1494766dbb17SSagi Grimberg 	mutex_unlock(&nvmet_rdma_queue_mutex);
1495766dbb17SSagi Grimberg 
1496766dbb17SSagi Grimberg 	pr_err("failed to connect queue %d\n", queue->idx);
1497d39aa497SChristoph Hellwig 	schedule_work(&queue->release_work);
14988f000cacSChristoph Hellwig }
14998f000cacSChristoph Hellwig 
1500d8f7750aSSagi Grimberg /**
1501d8f7750aSSagi Grimberg  * nvme_rdma_device_removal() - Handle RDMA device removal
1502f1d4ef7dSSagi Grimberg  * @cm_id:	rdma_cm id, used for nvmet port
1503d8f7750aSSagi Grimberg  * @queue:      nvmet rdma queue (cm id qp_context)
1504d8f7750aSSagi Grimberg  *
1505d8f7750aSSagi Grimberg  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1506f1d4ef7dSSagi Grimberg  * to unplug. Note that this event can be generated on a normal
1507f1d4ef7dSSagi Grimberg  * queue cm_id and/or a device bound listener cm_id (where in this
1508f1d4ef7dSSagi Grimberg  * case queue will be null).
1509d8f7750aSSagi Grimberg  *
1510f1d4ef7dSSagi Grimberg  * We registered an ib_client to handle device removal for queues,
1511f1d4ef7dSSagi Grimberg  * so we only need to handle the listening port cm_ids. In this case
1512d8f7750aSSagi Grimberg  * we nullify the priv to prevent double cm_id destruction and destroying
1513d8f7750aSSagi Grimberg  * the cm_id implicitely by returning a non-zero rc to the callout.
1514d8f7750aSSagi Grimberg  */
1515d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1516d8f7750aSSagi Grimberg 		struct nvmet_rdma_queue *queue)
1517d8f7750aSSagi Grimberg {
1518a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port;
1519d8f7750aSSagi Grimberg 
1520f1d4ef7dSSagi Grimberg 	if (queue) {
1521f1d4ef7dSSagi Grimberg 		/*
1522f1d4ef7dSSagi Grimberg 		 * This is a queue cm_id. we have registered
1523f1d4ef7dSSagi Grimberg 		 * an ib_client to handle queues removal
1524f1d4ef7dSSagi Grimberg 		 * so don't interfear and just return.
1525f1d4ef7dSSagi Grimberg 		 */
1526f1d4ef7dSSagi Grimberg 		return 0;
1527f1d4ef7dSSagi Grimberg 	}
1528f1d4ef7dSSagi Grimberg 
1529f1d4ef7dSSagi Grimberg 	port = cm_id->context;
1530d8f7750aSSagi Grimberg 
1531d8f7750aSSagi Grimberg 	/*
1532d8f7750aSSagi Grimberg 	 * This is a listener cm_id. Make sure that
1533d8f7750aSSagi Grimberg 	 * future remove_port won't invoke a double
1534d8f7750aSSagi Grimberg 	 * cm_id destroy. use atomic xchg to make sure
1535d8f7750aSSagi Grimberg 	 * we don't compete with remove_port.
1536d8f7750aSSagi Grimberg 	 */
1537a032e4f6SSagi Grimberg 	if (xchg(&port->cm_id, NULL) != cm_id)
1538d8f7750aSSagi Grimberg 		return 0;
1539d8f7750aSSagi Grimberg 
1540d8f7750aSSagi Grimberg 	/*
1541d8f7750aSSagi Grimberg 	 * We need to return 1 so that the core will destroy
1542d8f7750aSSagi Grimberg 	 * it's own ID.  What a great API design..
1543d8f7750aSSagi Grimberg 	 */
1544d8f7750aSSagi Grimberg 	return 1;
1545d8f7750aSSagi Grimberg }
1546d8f7750aSSagi Grimberg 
15478f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
15488f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
15498f000cacSChristoph Hellwig {
15508f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = NULL;
15518f000cacSChristoph Hellwig 	int ret = 0;
15528f000cacSChristoph Hellwig 
15538f000cacSChristoph Hellwig 	if (cm_id->qp)
15548f000cacSChristoph Hellwig 		queue = cm_id->qp->qp_context;
15558f000cacSChristoph Hellwig 
15568f000cacSChristoph Hellwig 	pr_debug("%s (%d): status %d id %p\n",
15578f000cacSChristoph Hellwig 		rdma_event_msg(event->event), event->event,
15588f000cacSChristoph Hellwig 		event->status, cm_id);
15598f000cacSChristoph Hellwig 
15608f000cacSChristoph Hellwig 	switch (event->event) {
15618f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_CONNECT_REQUEST:
15628f000cacSChristoph Hellwig 		ret = nvmet_rdma_queue_connect(cm_id, event);
15638f000cacSChristoph Hellwig 		break;
15648f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_ESTABLISHED:
15658f000cacSChristoph Hellwig 		nvmet_rdma_queue_established(queue);
15668f000cacSChristoph Hellwig 		break;
15678f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_ADDR_CHANGE:
1568a032e4f6SSagi Grimberg 		if (!queue) {
1569a032e4f6SSagi Grimberg 			struct nvmet_rdma_port *port = cm_id->context;
1570a032e4f6SSagi Grimberg 
1571a032e4f6SSagi Grimberg 			schedule_delayed_work(&port->repair_work, 0);
1572a032e4f6SSagi Grimberg 			break;
1573a032e4f6SSagi Grimberg 		}
1574a032e4f6SSagi Grimberg 		/* FALLTHROUGH */
15758f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_DISCONNECTED:
15768f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
15778f000cacSChristoph Hellwig 		nvmet_rdma_queue_disconnect(queue);
1578d8f7750aSSagi Grimberg 		break;
1579d8f7750aSSagi Grimberg 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
1580d8f7750aSSagi Grimberg 		ret = nvmet_rdma_device_removal(cm_id, queue);
15818f000cacSChristoph Hellwig 		break;
15828f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_REJECTED:
1583512fb1b3SSteve Wise 		pr_debug("Connection rejected: %s\n",
1584512fb1b3SSteve Wise 			 rdma_reject_msg(cm_id, event->status));
1585512fb1b3SSteve Wise 		/* FALLTHROUGH */
15868f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_UNREACHABLE:
15878f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_CONNECT_ERROR:
15888f000cacSChristoph Hellwig 		nvmet_rdma_queue_connect_fail(cm_id, queue);
15898f000cacSChristoph Hellwig 		break;
15908f000cacSChristoph Hellwig 	default:
15918f000cacSChristoph Hellwig 		pr_err("received unrecognized RDMA CM event %d\n",
15928f000cacSChristoph Hellwig 			event->event);
15938f000cacSChristoph Hellwig 		break;
15948f000cacSChristoph Hellwig 	}
15958f000cacSChristoph Hellwig 
15968f000cacSChristoph Hellwig 	return ret;
15978f000cacSChristoph Hellwig }
15988f000cacSChristoph Hellwig 
15998f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
16008f000cacSChristoph Hellwig {
16018f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
16028f000cacSChristoph Hellwig 
16038f000cacSChristoph Hellwig restart:
16048f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
16058f000cacSChristoph Hellwig 	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
16068f000cacSChristoph Hellwig 		if (queue->nvme_sq.ctrl == ctrl) {
16078f000cacSChristoph Hellwig 			list_del_init(&queue->queue_list);
16088f000cacSChristoph Hellwig 			mutex_unlock(&nvmet_rdma_queue_mutex);
16098f000cacSChristoph Hellwig 
16108f000cacSChristoph Hellwig 			__nvmet_rdma_queue_disconnect(queue);
16118f000cacSChristoph Hellwig 			goto restart;
16128f000cacSChristoph Hellwig 		}
16138f000cacSChristoph Hellwig 	}
16148f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
16158f000cacSChristoph Hellwig }
16168f000cacSChristoph Hellwig 
1617a032e4f6SSagi Grimberg static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
16188f000cacSChristoph Hellwig {
1619a032e4f6SSagi Grimberg 	struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
1620a032e4f6SSagi Grimberg 
1621a032e4f6SSagi Grimberg 	if (cm_id)
1622a032e4f6SSagi Grimberg 		rdma_destroy_id(cm_id);
1623a032e4f6SSagi Grimberg }
1624a032e4f6SSagi Grimberg 
1625a032e4f6SSagi Grimberg static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
1626a032e4f6SSagi Grimberg {
1627a032e4f6SSagi Grimberg 	struct sockaddr *addr = (struct sockaddr *)&port->addr;
16288f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id;
16298f000cacSChristoph Hellwig 	int ret;
16308f000cacSChristoph Hellwig 
16318f000cacSChristoph Hellwig 	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
16328f000cacSChristoph Hellwig 			RDMA_PS_TCP, IB_QPT_RC);
16338f000cacSChristoph Hellwig 	if (IS_ERR(cm_id)) {
16348f000cacSChristoph Hellwig 		pr_err("CM ID creation failed\n");
16358f000cacSChristoph Hellwig 		return PTR_ERR(cm_id);
16368f000cacSChristoph Hellwig 	}
16378f000cacSChristoph Hellwig 
1638670c2a3aSSagi Grimberg 	/*
1639670c2a3aSSagi Grimberg 	 * Allow both IPv4 and IPv6 sockets to bind a single port
1640670c2a3aSSagi Grimberg 	 * at the same time.
1641670c2a3aSSagi Grimberg 	 */
1642670c2a3aSSagi Grimberg 	ret = rdma_set_afonly(cm_id, 1);
16438f000cacSChristoph Hellwig 	if (ret) {
1644670c2a3aSSagi Grimberg 		pr_err("rdma_set_afonly failed (%d)\n", ret);
1645670c2a3aSSagi Grimberg 		goto out_destroy_id;
1646670c2a3aSSagi Grimberg 	}
1647670c2a3aSSagi Grimberg 
1648a032e4f6SSagi Grimberg 	ret = rdma_bind_addr(cm_id, addr);
1649670c2a3aSSagi Grimberg 	if (ret) {
1650a032e4f6SSagi Grimberg 		pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
16518f000cacSChristoph Hellwig 		goto out_destroy_id;
16528f000cacSChristoph Hellwig 	}
16538f000cacSChristoph Hellwig 
16548f000cacSChristoph Hellwig 	ret = rdma_listen(cm_id, 128);
16558f000cacSChristoph Hellwig 	if (ret) {
1656a032e4f6SSagi Grimberg 		pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
16578f000cacSChristoph Hellwig 		goto out_destroy_id;
16588f000cacSChristoph Hellwig 	}
16598f000cacSChristoph Hellwig 
1660a032e4f6SSagi Grimberg 	port->cm_id = cm_id;
16618f000cacSChristoph Hellwig 	return 0;
16628f000cacSChristoph Hellwig 
16638f000cacSChristoph Hellwig out_destroy_id:
16648f000cacSChristoph Hellwig 	rdma_destroy_id(cm_id);
16658f000cacSChristoph Hellwig 	return ret;
16668f000cacSChristoph Hellwig }
16678f000cacSChristoph Hellwig 
1668a032e4f6SSagi Grimberg static void nvmet_rdma_repair_port_work(struct work_struct *w)
16698f000cacSChristoph Hellwig {
1670a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
1671a032e4f6SSagi Grimberg 			struct nvmet_rdma_port, repair_work);
1672a032e4f6SSagi Grimberg 	int ret;
16738f000cacSChristoph Hellwig 
1674a032e4f6SSagi Grimberg 	nvmet_rdma_disable_port(port);
1675a032e4f6SSagi Grimberg 	ret = nvmet_rdma_enable_port(port);
1676a032e4f6SSagi Grimberg 	if (ret)
1677a032e4f6SSagi Grimberg 		schedule_delayed_work(&port->repair_work, 5 * HZ);
1678a032e4f6SSagi Grimberg }
1679a032e4f6SSagi Grimberg 
1680a032e4f6SSagi Grimberg static int nvmet_rdma_add_port(struct nvmet_port *nport)
1681a032e4f6SSagi Grimberg {
1682a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port;
1683a032e4f6SSagi Grimberg 	__kernel_sa_family_t af;
1684a032e4f6SSagi Grimberg 	int ret;
1685a032e4f6SSagi Grimberg 
1686a032e4f6SSagi Grimberg 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1687a032e4f6SSagi Grimberg 	if (!port)
1688a032e4f6SSagi Grimberg 		return -ENOMEM;
1689a032e4f6SSagi Grimberg 
1690a032e4f6SSagi Grimberg 	nport->priv = port;
1691a032e4f6SSagi Grimberg 	port->nport = nport;
1692a032e4f6SSagi Grimberg 	INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
1693a032e4f6SSagi Grimberg 
1694a032e4f6SSagi Grimberg 	switch (nport->disc_addr.adrfam) {
1695a032e4f6SSagi Grimberg 	case NVMF_ADDR_FAMILY_IP4:
1696a032e4f6SSagi Grimberg 		af = AF_INET;
1697a032e4f6SSagi Grimberg 		break;
1698a032e4f6SSagi Grimberg 	case NVMF_ADDR_FAMILY_IP6:
1699a032e4f6SSagi Grimberg 		af = AF_INET6;
1700a032e4f6SSagi Grimberg 		break;
1701a032e4f6SSagi Grimberg 	default:
1702a032e4f6SSagi Grimberg 		pr_err("address family %d not supported\n",
1703a032e4f6SSagi Grimberg 			nport->disc_addr.adrfam);
1704a032e4f6SSagi Grimberg 		ret = -EINVAL;
1705a032e4f6SSagi Grimberg 		goto out_free_port;
1706a032e4f6SSagi Grimberg 	}
1707a032e4f6SSagi Grimberg 
1708a032e4f6SSagi Grimberg 	if (nport->inline_data_size < 0) {
1709a032e4f6SSagi Grimberg 		nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1710a032e4f6SSagi Grimberg 	} else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1711a032e4f6SSagi Grimberg 		pr_warn("inline_data_size %u is too large, reducing to %u\n",
1712a032e4f6SSagi Grimberg 			nport->inline_data_size,
1713a032e4f6SSagi Grimberg 			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1714a032e4f6SSagi Grimberg 		nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1715a032e4f6SSagi Grimberg 	}
1716a032e4f6SSagi Grimberg 
1717a032e4f6SSagi Grimberg 	ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1718a032e4f6SSagi Grimberg 			nport->disc_addr.trsvcid, &port->addr);
1719a032e4f6SSagi Grimberg 	if (ret) {
1720a032e4f6SSagi Grimberg 		pr_err("malformed ip/port passed: %s:%s\n",
1721a032e4f6SSagi Grimberg 			nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1722a032e4f6SSagi Grimberg 		goto out_free_port;
1723a032e4f6SSagi Grimberg 	}
1724a032e4f6SSagi Grimberg 
1725a032e4f6SSagi Grimberg 	ret = nvmet_rdma_enable_port(port);
1726a032e4f6SSagi Grimberg 	if (ret)
1727a032e4f6SSagi Grimberg 		goto out_free_port;
1728a032e4f6SSagi Grimberg 
1729a032e4f6SSagi Grimberg 	pr_info("enabling port %d (%pISpcs)\n",
1730a032e4f6SSagi Grimberg 		le16_to_cpu(nport->disc_addr.portid),
1731a032e4f6SSagi Grimberg 		(struct sockaddr *)&port->addr);
1732a032e4f6SSagi Grimberg 
1733a032e4f6SSagi Grimberg 	return 0;
1734a032e4f6SSagi Grimberg 
1735a032e4f6SSagi Grimberg out_free_port:
1736a032e4f6SSagi Grimberg 	kfree(port);
1737a032e4f6SSagi Grimberg 	return ret;
1738a032e4f6SSagi Grimberg }
1739a032e4f6SSagi Grimberg 
1740a032e4f6SSagi Grimberg static void nvmet_rdma_remove_port(struct nvmet_port *nport)
1741a032e4f6SSagi Grimberg {
1742a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = nport->priv;
1743a032e4f6SSagi Grimberg 
1744a032e4f6SSagi Grimberg 	cancel_delayed_work_sync(&port->repair_work);
1745a032e4f6SSagi Grimberg 	nvmet_rdma_disable_port(port);
1746a032e4f6SSagi Grimberg 	kfree(port);
17478f000cacSChristoph Hellwig }
17488f000cacSChristoph Hellwig 
17494c652685SSagi Grimberg static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1750a032e4f6SSagi Grimberg 		struct nvmet_port *nport, char *traddr)
17514c652685SSagi Grimberg {
1752a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = nport->priv;
1753a032e4f6SSagi Grimberg 	struct rdma_cm_id *cm_id = port->cm_id;
17544c652685SSagi Grimberg 
17554c652685SSagi Grimberg 	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
17564c652685SSagi Grimberg 		struct nvmet_rdma_rsp *rsp =
17574c652685SSagi Grimberg 			container_of(req, struct nvmet_rdma_rsp, req);
17584c652685SSagi Grimberg 		struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
17594c652685SSagi Grimberg 		struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
17604c652685SSagi Grimberg 
17614c652685SSagi Grimberg 		sprintf(traddr, "%pISc", addr);
17624c652685SSagi Grimberg 	} else {
1763a032e4f6SSagi Grimberg 		memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
17644c652685SSagi Grimberg 	}
17654c652685SSagi Grimberg }
17664c652685SSagi Grimberg 
1767ec6d20e1SMax Gurtovoy static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
1768ec6d20e1SMax Gurtovoy {
1769ec6d20e1SMax Gurtovoy 	return NVMET_RDMA_MAX_MDTS;
1770ec6d20e1SMax Gurtovoy }
1771ec6d20e1SMax Gurtovoy 
1772e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
17738f000cacSChristoph Hellwig 	.owner			= THIS_MODULE,
17748f000cacSChristoph Hellwig 	.type			= NVMF_TRTYPE_RDMA,
17758f000cacSChristoph Hellwig 	.msdbd			= 1,
17768f000cacSChristoph Hellwig 	.has_keyed_sgls		= 1,
17778f000cacSChristoph Hellwig 	.add_port		= nvmet_rdma_add_port,
17788f000cacSChristoph Hellwig 	.remove_port		= nvmet_rdma_remove_port,
17798f000cacSChristoph Hellwig 	.queue_response		= nvmet_rdma_queue_response,
17808f000cacSChristoph Hellwig 	.delete_ctrl		= nvmet_rdma_delete_ctrl,
17814c652685SSagi Grimberg 	.disc_traddr		= nvmet_rdma_disc_port_addr,
1782ec6d20e1SMax Gurtovoy 	.get_mdts		= nvmet_rdma_get_mdts,
17838f000cacSChristoph Hellwig };
17848f000cacSChristoph Hellwig 
1785f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1786f1d4ef7dSSagi Grimberg {
178743b92fd2SIsrael Rukshin 	struct nvmet_rdma_queue *queue, *tmp;
1788a3dd7d00SMax Gurtovoy 	struct nvmet_rdma_device *ndev;
1789a3dd7d00SMax Gurtovoy 	bool found = false;
1790f1d4ef7dSSagi Grimberg 
1791a3dd7d00SMax Gurtovoy 	mutex_lock(&device_list_mutex);
1792a3dd7d00SMax Gurtovoy 	list_for_each_entry(ndev, &device_list, entry) {
1793a3dd7d00SMax Gurtovoy 		if (ndev->device == ib_device) {
1794a3dd7d00SMax Gurtovoy 			found = true;
1795a3dd7d00SMax Gurtovoy 			break;
1796a3dd7d00SMax Gurtovoy 		}
1797a3dd7d00SMax Gurtovoy 	}
1798a3dd7d00SMax Gurtovoy 	mutex_unlock(&device_list_mutex);
1799a3dd7d00SMax Gurtovoy 
1800a3dd7d00SMax Gurtovoy 	if (!found)
1801a3dd7d00SMax Gurtovoy 		return;
1802a3dd7d00SMax Gurtovoy 
1803a3dd7d00SMax Gurtovoy 	/*
1804a3dd7d00SMax Gurtovoy 	 * IB Device that is used by nvmet controllers is being removed,
1805a3dd7d00SMax Gurtovoy 	 * delete all queues using this device.
1806a3dd7d00SMax Gurtovoy 	 */
1807f1d4ef7dSSagi Grimberg 	mutex_lock(&nvmet_rdma_queue_mutex);
180843b92fd2SIsrael Rukshin 	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
180943b92fd2SIsrael Rukshin 				 queue_list) {
1810f1d4ef7dSSagi Grimberg 		if (queue->dev->device != ib_device)
1811f1d4ef7dSSagi Grimberg 			continue;
1812f1d4ef7dSSagi Grimberg 
1813f1d4ef7dSSagi Grimberg 		pr_info("Removing queue %d\n", queue->idx);
181443b92fd2SIsrael Rukshin 		list_del_init(&queue->queue_list);
1815f1d4ef7dSSagi Grimberg 		__nvmet_rdma_queue_disconnect(queue);
1816f1d4ef7dSSagi Grimberg 	}
1817f1d4ef7dSSagi Grimberg 	mutex_unlock(&nvmet_rdma_queue_mutex);
1818f1d4ef7dSSagi Grimberg 
1819f1d4ef7dSSagi Grimberg 	flush_scheduled_work();
1820f1d4ef7dSSagi Grimberg }
1821f1d4ef7dSSagi Grimberg 
1822f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = {
1823f1d4ef7dSSagi Grimberg 	.name   = "nvmet_rdma",
1824f1d4ef7dSSagi Grimberg 	.remove = nvmet_rdma_remove_one
1825f1d4ef7dSSagi Grimberg };
1826f1d4ef7dSSagi Grimberg 
18278f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void)
18288f000cacSChristoph Hellwig {
1829f1d4ef7dSSagi Grimberg 	int ret;
1830f1d4ef7dSSagi Grimberg 
1831f1d4ef7dSSagi Grimberg 	ret = ib_register_client(&nvmet_rdma_ib_client);
1832f1d4ef7dSSagi Grimberg 	if (ret)
1833f1d4ef7dSSagi Grimberg 		return ret;
1834f1d4ef7dSSagi Grimberg 
1835f1d4ef7dSSagi Grimberg 	ret = nvmet_register_transport(&nvmet_rdma_ops);
1836f1d4ef7dSSagi Grimberg 	if (ret)
1837f1d4ef7dSSagi Grimberg 		goto err_ib_client;
1838f1d4ef7dSSagi Grimberg 
1839f1d4ef7dSSagi Grimberg 	return 0;
1840f1d4ef7dSSagi Grimberg 
1841f1d4ef7dSSagi Grimberg err_ib_client:
1842f1d4ef7dSSagi Grimberg 	ib_unregister_client(&nvmet_rdma_ib_client);
1843f1d4ef7dSSagi Grimberg 	return ret;
18448f000cacSChristoph Hellwig }
18458f000cacSChristoph Hellwig 
18468f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void)
18478f000cacSChristoph Hellwig {
18488f000cacSChristoph Hellwig 	nvmet_unregister_transport(&nvmet_rdma_ops);
1849f1d4ef7dSSagi Grimberg 	ib_unregister_client(&nvmet_rdma_ib_client);
1850cb4876e8SSagi Grimberg 	WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
18518f000cacSChristoph Hellwig 	ida_destroy(&nvmet_rdma_queue_ida);
18528f000cacSChristoph Hellwig }
18538f000cacSChristoph Hellwig 
18548f000cacSChristoph Hellwig module_init(nvmet_rdma_init);
18558f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit);
18568f000cacSChristoph Hellwig 
18578f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2");
18588f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
1859