xref: /openbmc/linux/drivers/nvme/target/rdma.c (revision 495758bb)
13641bd32SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
28f000cacSChristoph Hellwig /*
38f000cacSChristoph Hellwig  * NVMe over Fabrics RDMA target.
48f000cacSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
58f000cacSChristoph Hellwig  */
68f000cacSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
78f000cacSChristoph Hellwig #include <linux/atomic.h>
8fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
98f000cacSChristoph Hellwig #include <linux/ctype.h>
108f000cacSChristoph Hellwig #include <linux/delay.h>
118f000cacSChristoph Hellwig #include <linux/err.h>
128f000cacSChristoph Hellwig #include <linux/init.h>
138f000cacSChristoph Hellwig #include <linux/module.h>
148f000cacSChristoph Hellwig #include <linux/nvme.h>
158f000cacSChristoph Hellwig #include <linux/slab.h>
168f000cacSChristoph Hellwig #include <linux/string.h>
178f000cacSChristoph Hellwig #include <linux/wait.h>
188f000cacSChristoph Hellwig #include <linux/inet.h>
198f000cacSChristoph Hellwig #include <asm/unaligned.h>
208f000cacSChristoph Hellwig 
218f000cacSChristoph Hellwig #include <rdma/ib_verbs.h>
228f000cacSChristoph Hellwig #include <rdma/rdma_cm.h>
238f000cacSChristoph Hellwig #include <rdma/rw.h>
248094ba0aSLeon Romanovsky #include <rdma/ib_cm.h>
258f000cacSChristoph Hellwig 
268f000cacSChristoph Hellwig #include <linux/nvme-rdma.h>
278f000cacSChristoph Hellwig #include "nvmet.h"
288f000cacSChristoph Hellwig 
298f000cacSChristoph Hellwig /*
300d5ee2b2SSteve Wise  * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
318f000cacSChristoph Hellwig  */
320d5ee2b2SSteve Wise #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE	PAGE_SIZE
330d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_SGE		4
340d5ee2b2SSteve Wise #define NVMET_RDMA_MAX_INLINE_DATA_SIZE		max_t(int, SZ_16K, PAGE_SIZE)
358f000cacSChristoph Hellwig 
36ec6d20e1SMax Gurtovoy /* Assume mpsmin == device_page_size == 4KB */
37ec6d20e1SMax Gurtovoy #define NVMET_RDMA_MAX_MDTS			8
38b09160c3SIsrael Rukshin #define NVMET_RDMA_MAX_METADATA_MDTS		5
39ec6d20e1SMax Gurtovoy 
40b0012dd3SMax Gurtovoy struct nvmet_rdma_srq;
41b0012dd3SMax Gurtovoy 
428f000cacSChristoph Hellwig struct nvmet_rdma_cmd {
430d5ee2b2SSteve Wise 	struct ib_sge		sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
448f000cacSChristoph Hellwig 	struct ib_cqe		cqe;
458f000cacSChristoph Hellwig 	struct ib_recv_wr	wr;
460d5ee2b2SSteve Wise 	struct scatterlist	inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
478f000cacSChristoph Hellwig 	struct nvme_command     *nvme_cmd;
488f000cacSChristoph Hellwig 	struct nvmet_rdma_queue	*queue;
49b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq   *nsrq;
508f000cacSChristoph Hellwig };
518f000cacSChristoph Hellwig 
528f000cacSChristoph Hellwig enum {
538f000cacSChristoph Hellwig 	NVMET_RDMA_REQ_INLINE_DATA	= (1 << 0),
548f000cacSChristoph Hellwig 	NVMET_RDMA_REQ_INVALIDATE_RKEY	= (1 << 1),
558f000cacSChristoph Hellwig };
568f000cacSChristoph Hellwig 
578f000cacSChristoph Hellwig struct nvmet_rdma_rsp {
588f000cacSChristoph Hellwig 	struct ib_sge		send_sge;
598f000cacSChristoph Hellwig 	struct ib_cqe		send_cqe;
608f000cacSChristoph Hellwig 	struct ib_send_wr	send_wr;
618f000cacSChristoph Hellwig 
628f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*cmd;
638f000cacSChristoph Hellwig 	struct nvmet_rdma_queue	*queue;
648f000cacSChristoph Hellwig 
658f000cacSChristoph Hellwig 	struct ib_cqe		read_cqe;
66b09160c3SIsrael Rukshin 	struct ib_cqe		write_cqe;
678f000cacSChristoph Hellwig 	struct rdma_rw_ctx	rw;
688f000cacSChristoph Hellwig 
698f000cacSChristoph Hellwig 	struct nvmet_req	req;
708f000cacSChristoph Hellwig 
718407879cSSagi Grimberg 	bool			allocated;
728f000cacSChristoph Hellwig 	u8			n_rdma;
738f000cacSChristoph Hellwig 	u32			flags;
748f000cacSChristoph Hellwig 	u32			invalidate_rkey;
758f000cacSChristoph Hellwig 
768f000cacSChristoph Hellwig 	struct list_head	wait_list;
778f000cacSChristoph Hellwig 	struct list_head	free_list;
788f000cacSChristoph Hellwig };
798f000cacSChristoph Hellwig 
808f000cacSChristoph Hellwig enum nvmet_rdma_queue_state {
818f000cacSChristoph Hellwig 	NVMET_RDMA_Q_CONNECTING,
828f000cacSChristoph Hellwig 	NVMET_RDMA_Q_LIVE,
838f000cacSChristoph Hellwig 	NVMET_RDMA_Q_DISCONNECTING,
848f000cacSChristoph Hellwig };
858f000cacSChristoph Hellwig 
868f000cacSChristoph Hellwig struct nvmet_rdma_queue {
878f000cacSChristoph Hellwig 	struct rdma_cm_id	*cm_id;
8821f90243SIsrael Rukshin 	struct ib_qp		*qp;
898f000cacSChristoph Hellwig 	struct nvmet_port	*port;
908f000cacSChristoph Hellwig 	struct ib_cq		*cq;
918f000cacSChristoph Hellwig 	atomic_t		sq_wr_avail;
928f000cacSChristoph Hellwig 	struct nvmet_rdma_device *dev;
93b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq   *nsrq;
948f000cacSChristoph Hellwig 	spinlock_t		state_lock;
958f000cacSChristoph Hellwig 	enum nvmet_rdma_queue_state state;
968f000cacSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
978f000cacSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
988f000cacSChristoph Hellwig 
998f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp	*rsps;
1008f000cacSChristoph Hellwig 	struct list_head	free_rsps;
1018f000cacSChristoph Hellwig 	spinlock_t		rsps_lock;
1028f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd	*cmds;
1038f000cacSChristoph Hellwig 
1048f000cacSChristoph Hellwig 	struct work_struct	release_work;
1058f000cacSChristoph Hellwig 	struct list_head	rsp_wait_list;
1068f000cacSChristoph Hellwig 	struct list_head	rsp_wr_wait_list;
1078f000cacSChristoph Hellwig 	spinlock_t		rsp_wr_wait_lock;
1088f000cacSChristoph Hellwig 
1098f000cacSChristoph Hellwig 	int			idx;
1108f000cacSChristoph Hellwig 	int			host_qid;
111b0012dd3SMax Gurtovoy 	int			comp_vector;
1128f000cacSChristoph Hellwig 	int			recv_queue_size;
1138f000cacSChristoph Hellwig 	int			send_queue_size;
1148f000cacSChristoph Hellwig 
1158f000cacSChristoph Hellwig 	struct list_head	queue_list;
1168f000cacSChristoph Hellwig };
1178f000cacSChristoph Hellwig 
118a032e4f6SSagi Grimberg struct nvmet_rdma_port {
119a032e4f6SSagi Grimberg 	struct nvmet_port	*nport;
120a032e4f6SSagi Grimberg 	struct sockaddr_storage addr;
121a032e4f6SSagi Grimberg 	struct rdma_cm_id	*cm_id;
122a032e4f6SSagi Grimberg 	struct delayed_work	repair_work;
123a032e4f6SSagi Grimberg };
124a032e4f6SSagi Grimberg 
125b0012dd3SMax Gurtovoy struct nvmet_rdma_srq {
126b0012dd3SMax Gurtovoy 	struct ib_srq            *srq;
127b0012dd3SMax Gurtovoy 	struct nvmet_rdma_cmd    *cmds;
128b0012dd3SMax Gurtovoy 	struct nvmet_rdma_device *ndev;
129b0012dd3SMax Gurtovoy };
130b0012dd3SMax Gurtovoy 
1318f000cacSChristoph Hellwig struct nvmet_rdma_device {
1328f000cacSChristoph Hellwig 	struct ib_device	*device;
1338f000cacSChristoph Hellwig 	struct ib_pd		*pd;
134b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq	**srqs;
135b0012dd3SMax Gurtovoy 	int			srq_count;
1368f000cacSChristoph Hellwig 	size_t			srq_size;
1378f000cacSChristoph Hellwig 	struct kref		ref;
1388f000cacSChristoph Hellwig 	struct list_head	entry;
1390d5ee2b2SSteve Wise 	int			inline_data_size;
1400d5ee2b2SSteve Wise 	int			inline_page_count;
1418f000cacSChristoph Hellwig };
1428f000cacSChristoph Hellwig 
1438f000cacSChristoph Hellwig static bool nvmet_rdma_use_srq;
1448f000cacSChristoph Hellwig module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
1458f000cacSChristoph Hellwig MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
1468f000cacSChristoph Hellwig 
147b0012dd3SMax Gurtovoy static int srq_size_set(const char *val, const struct kernel_param *kp);
148b0012dd3SMax Gurtovoy static const struct kernel_param_ops srq_size_ops = {
149b0012dd3SMax Gurtovoy 	.set = srq_size_set,
150b0012dd3SMax Gurtovoy 	.get = param_get_int,
151b0012dd3SMax Gurtovoy };
152b0012dd3SMax Gurtovoy 
153b0012dd3SMax Gurtovoy static int nvmet_rdma_srq_size = 1024;
154b0012dd3SMax Gurtovoy module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
155b0012dd3SMax Gurtovoy MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
156b0012dd3SMax Gurtovoy 
1578f000cacSChristoph Hellwig static DEFINE_IDA(nvmet_rdma_queue_ida);
1588f000cacSChristoph Hellwig static LIST_HEAD(nvmet_rdma_queue_list);
1598f000cacSChristoph Hellwig static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
1608f000cacSChristoph Hellwig 
1618f000cacSChristoph Hellwig static LIST_HEAD(device_list);
1628f000cacSChristoph Hellwig static DEFINE_MUTEX(device_list_mutex);
1638f000cacSChristoph Hellwig 
1648f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
1658f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
1668f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1678f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
168b09160c3SIsrael Rukshin static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
1698f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
1708f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
1715cbab630SRaju Rangoju static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
1725cbab630SRaju Rangoju 				struct nvmet_rdma_rsp *r);
1735cbab630SRaju Rangoju static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
1745cbab630SRaju Rangoju 				struct nvmet_rdma_rsp *r);
1758f000cacSChristoph Hellwig 
176e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops;
1778f000cacSChristoph Hellwig 
srq_size_set(const char * val,const struct kernel_param * kp)178b0012dd3SMax Gurtovoy static int srq_size_set(const char *val, const struct kernel_param *kp)
179b0012dd3SMax Gurtovoy {
180b0012dd3SMax Gurtovoy 	int n = 0, ret;
181b0012dd3SMax Gurtovoy 
182b0012dd3SMax Gurtovoy 	ret = kstrtoint(val, 10, &n);
183b0012dd3SMax Gurtovoy 	if (ret != 0 || n < 256)
184b0012dd3SMax Gurtovoy 		return -EINVAL;
185b0012dd3SMax Gurtovoy 
186b0012dd3SMax Gurtovoy 	return param_set_int(val, kp);
187b0012dd3SMax Gurtovoy }
188b0012dd3SMax Gurtovoy 
num_pages(int len)1890d5ee2b2SSteve Wise static int num_pages(int len)
1900d5ee2b2SSteve Wise {
1910d5ee2b2SSteve Wise 	return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
1920d5ee2b2SSteve Wise }
1930d5ee2b2SSteve Wise 
nvmet_rdma_need_data_in(struct nvmet_rdma_rsp * rsp)1948f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
1958f000cacSChristoph Hellwig {
1968f000cacSChristoph Hellwig 	return nvme_is_write(rsp->req.cmd) &&
1975e62d5c9SChristoph Hellwig 		rsp->req.transfer_len &&
1988f000cacSChristoph Hellwig 		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
1998f000cacSChristoph Hellwig }
2008f000cacSChristoph Hellwig 
nvmet_rdma_need_data_out(struct nvmet_rdma_rsp * rsp)2018f000cacSChristoph Hellwig static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
2028f000cacSChristoph Hellwig {
2038f000cacSChristoph Hellwig 	return !nvme_is_write(rsp->req.cmd) &&
2045e62d5c9SChristoph Hellwig 		rsp->req.transfer_len &&
205fc6c9730SMax Gurtovoy 		!rsp->req.cqe->status &&
2068f000cacSChristoph Hellwig 		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
2078f000cacSChristoph Hellwig }
2088f000cacSChristoph Hellwig 
2098f000cacSChristoph Hellwig static inline struct nvmet_rdma_rsp *
nvmet_rdma_get_rsp(struct nvmet_rdma_queue * queue)2108f000cacSChristoph Hellwig nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
2118f000cacSChristoph Hellwig {
2128f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp;
2138f000cacSChristoph Hellwig 	unsigned long flags;
2148f000cacSChristoph Hellwig 
2158f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->rsps_lock, flags);
2168407879cSSagi Grimberg 	rsp = list_first_entry_or_null(&queue->free_rsps,
2178f000cacSChristoph Hellwig 				struct nvmet_rdma_rsp, free_list);
2188407879cSSagi Grimberg 	if (likely(rsp))
2198f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
2208f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
2218f000cacSChristoph Hellwig 
2228407879cSSagi Grimberg 	if (unlikely(!rsp)) {
2235cbab630SRaju Rangoju 		int ret;
2245cbab630SRaju Rangoju 
2255cbab630SRaju Rangoju 		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2268407879cSSagi Grimberg 		if (unlikely(!rsp))
2278407879cSSagi Grimberg 			return NULL;
2285cbab630SRaju Rangoju 		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
2295cbab630SRaju Rangoju 		if (unlikely(ret)) {
2305cbab630SRaju Rangoju 			kfree(rsp);
2315cbab630SRaju Rangoju 			return NULL;
2325cbab630SRaju Rangoju 		}
2335cbab630SRaju Rangoju 
2348407879cSSagi Grimberg 		rsp->allocated = true;
2358407879cSSagi Grimberg 	}
2368407879cSSagi Grimberg 
2378f000cacSChristoph Hellwig 	return rsp;
2388f000cacSChristoph Hellwig }
2398f000cacSChristoph Hellwig 
2408f000cacSChristoph Hellwig static inline void
nvmet_rdma_put_rsp(struct nvmet_rdma_rsp * rsp)2418f000cacSChristoph Hellwig nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
2428f000cacSChristoph Hellwig {
2438f000cacSChristoph Hellwig 	unsigned long flags;
2448f000cacSChristoph Hellwig 
245ad1f8249SIsrael Rukshin 	if (unlikely(rsp->allocated)) {
2465cbab630SRaju Rangoju 		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
2478407879cSSagi Grimberg 		kfree(rsp);
2488407879cSSagi Grimberg 		return;
2498407879cSSagi Grimberg 	}
2508407879cSSagi Grimberg 
2518f000cacSChristoph Hellwig 	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
2528f000cacSChristoph Hellwig 	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
2538f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
2548f000cacSChristoph Hellwig }
2558f000cacSChristoph Hellwig 
nvmet_rdma_free_inline_pages(struct nvmet_rdma_device * ndev,struct nvmet_rdma_cmd * c)2560d5ee2b2SSteve Wise static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
2570d5ee2b2SSteve Wise 				struct nvmet_rdma_cmd *c)
2580d5ee2b2SSteve Wise {
2590d5ee2b2SSteve Wise 	struct scatterlist *sg;
2600d5ee2b2SSteve Wise 	struct ib_sge *sge;
2610d5ee2b2SSteve Wise 	int i;
2620d5ee2b2SSteve Wise 
2630d5ee2b2SSteve Wise 	if (!ndev->inline_data_size)
2640d5ee2b2SSteve Wise 		return;
2650d5ee2b2SSteve Wise 
2660d5ee2b2SSteve Wise 	sg = c->inline_sg;
2670d5ee2b2SSteve Wise 	sge = &c->sge[1];
2680d5ee2b2SSteve Wise 
2690d5ee2b2SSteve Wise 	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
2700d5ee2b2SSteve Wise 		if (sge->length)
2710d5ee2b2SSteve Wise 			ib_dma_unmap_page(ndev->device, sge->addr,
2720d5ee2b2SSteve Wise 					sge->length, DMA_FROM_DEVICE);
2730d5ee2b2SSteve Wise 		if (sg_page(sg))
2740d5ee2b2SSteve Wise 			__free_page(sg_page(sg));
2750d5ee2b2SSteve Wise 	}
2760d5ee2b2SSteve Wise }
2770d5ee2b2SSteve Wise 
nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device * ndev,struct nvmet_rdma_cmd * c)2780d5ee2b2SSteve Wise static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
2790d5ee2b2SSteve Wise 				struct nvmet_rdma_cmd *c)
2800d5ee2b2SSteve Wise {
2810d5ee2b2SSteve Wise 	struct scatterlist *sg;
2820d5ee2b2SSteve Wise 	struct ib_sge *sge;
2830d5ee2b2SSteve Wise 	struct page *pg;
2840d5ee2b2SSteve Wise 	int len;
2850d5ee2b2SSteve Wise 	int i;
2860d5ee2b2SSteve Wise 
2870d5ee2b2SSteve Wise 	if (!ndev->inline_data_size)
2880d5ee2b2SSteve Wise 		return 0;
2890d5ee2b2SSteve Wise 
2900d5ee2b2SSteve Wise 	sg = c->inline_sg;
2910d5ee2b2SSteve Wise 	sg_init_table(sg, ndev->inline_page_count);
2920d5ee2b2SSteve Wise 	sge = &c->sge[1];
2930d5ee2b2SSteve Wise 	len = ndev->inline_data_size;
2940d5ee2b2SSteve Wise 
2950d5ee2b2SSteve Wise 	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
2960d5ee2b2SSteve Wise 		pg = alloc_page(GFP_KERNEL);
2970d5ee2b2SSteve Wise 		if (!pg)
2980d5ee2b2SSteve Wise 			goto out_err;
2990d5ee2b2SSteve Wise 		sg_assign_page(sg, pg);
3000d5ee2b2SSteve Wise 		sge->addr = ib_dma_map_page(ndev->device,
3010d5ee2b2SSteve Wise 			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
3020d5ee2b2SSteve Wise 		if (ib_dma_mapping_error(ndev->device, sge->addr))
3030d5ee2b2SSteve Wise 			goto out_err;
3040d5ee2b2SSteve Wise 		sge->length = min_t(int, len, PAGE_SIZE);
3050d5ee2b2SSteve Wise 		sge->lkey = ndev->pd->local_dma_lkey;
3060d5ee2b2SSteve Wise 		len -= sge->length;
3070d5ee2b2SSteve Wise 	}
3080d5ee2b2SSteve Wise 
3090d5ee2b2SSteve Wise 	return 0;
3100d5ee2b2SSteve Wise out_err:
3110d5ee2b2SSteve Wise 	for (; i >= 0; i--, sg--, sge--) {
3120d5ee2b2SSteve Wise 		if (sge->length)
3130d5ee2b2SSteve Wise 			ib_dma_unmap_page(ndev->device, sge->addr,
3140d5ee2b2SSteve Wise 					sge->length, DMA_FROM_DEVICE);
3150d5ee2b2SSteve Wise 		if (sg_page(sg))
3160d5ee2b2SSteve Wise 			__free_page(sg_page(sg));
3170d5ee2b2SSteve Wise 	}
3180d5ee2b2SSteve Wise 	return -ENOMEM;
3190d5ee2b2SSteve Wise }
3200d5ee2b2SSteve Wise 
nvmet_rdma_alloc_cmd(struct nvmet_rdma_device * ndev,struct nvmet_rdma_cmd * c,bool admin)3218f000cacSChristoph Hellwig static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
3228f000cacSChristoph Hellwig 			struct nvmet_rdma_cmd *c, bool admin)
3238f000cacSChristoph Hellwig {
3248f000cacSChristoph Hellwig 	/* NVMe command / RDMA RECV */
3258f000cacSChristoph Hellwig 	c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
3268f000cacSChristoph Hellwig 	if (!c->nvme_cmd)
3278f000cacSChristoph Hellwig 		goto out;
3288f000cacSChristoph Hellwig 
3298f000cacSChristoph Hellwig 	c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
3308f000cacSChristoph Hellwig 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
3318f000cacSChristoph Hellwig 	if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
3328f000cacSChristoph Hellwig 		goto out_free_cmd;
3338f000cacSChristoph Hellwig 
3348f000cacSChristoph Hellwig 	c->sge[0].length = sizeof(*c->nvme_cmd);
3358f000cacSChristoph Hellwig 	c->sge[0].lkey = ndev->pd->local_dma_lkey;
3368f000cacSChristoph Hellwig 
3370d5ee2b2SSteve Wise 	if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
3388f000cacSChristoph Hellwig 		goto out_unmap_cmd;
3398f000cacSChristoph Hellwig 
3408f000cacSChristoph Hellwig 	c->cqe.done = nvmet_rdma_recv_done;
3418f000cacSChristoph Hellwig 
3428f000cacSChristoph Hellwig 	c->wr.wr_cqe = &c->cqe;
3438f000cacSChristoph Hellwig 	c->wr.sg_list = c->sge;
3440d5ee2b2SSteve Wise 	c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
3458f000cacSChristoph Hellwig 
3468f000cacSChristoph Hellwig 	return 0;
3478f000cacSChristoph Hellwig 
3488f000cacSChristoph Hellwig out_unmap_cmd:
3498f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
3508f000cacSChristoph Hellwig 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
3518f000cacSChristoph Hellwig out_free_cmd:
3528f000cacSChristoph Hellwig 	kfree(c->nvme_cmd);
3538f000cacSChristoph Hellwig 
3548f000cacSChristoph Hellwig out:
3558f000cacSChristoph Hellwig 	return -ENOMEM;
3568f000cacSChristoph Hellwig }
3578f000cacSChristoph Hellwig 
nvmet_rdma_free_cmd(struct nvmet_rdma_device * ndev,struct nvmet_rdma_cmd * c,bool admin)3588f000cacSChristoph Hellwig static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
3598f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *c, bool admin)
3608f000cacSChristoph Hellwig {
3610d5ee2b2SSteve Wise 	if (!admin)
3620d5ee2b2SSteve Wise 		nvmet_rdma_free_inline_pages(ndev, c);
3638f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
3648f000cacSChristoph Hellwig 				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
3658f000cacSChristoph Hellwig 	kfree(c->nvme_cmd);
3668f000cacSChristoph Hellwig }
3678f000cacSChristoph Hellwig 
3688f000cacSChristoph Hellwig static struct nvmet_rdma_cmd *
nvmet_rdma_alloc_cmds(struct nvmet_rdma_device * ndev,int nr_cmds,bool admin)3698f000cacSChristoph Hellwig nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
3708f000cacSChristoph Hellwig 		int nr_cmds, bool admin)
3718f000cacSChristoph Hellwig {
3728f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd *cmds;
3738f000cacSChristoph Hellwig 	int ret = -EINVAL, i;
3748f000cacSChristoph Hellwig 
3758f000cacSChristoph Hellwig 	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
3768f000cacSChristoph Hellwig 	if (!cmds)
3778f000cacSChristoph Hellwig 		goto out;
3788f000cacSChristoph Hellwig 
3798f000cacSChristoph Hellwig 	for (i = 0; i < nr_cmds; i++) {
3808f000cacSChristoph Hellwig 		ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
3818f000cacSChristoph Hellwig 		if (ret)
3828f000cacSChristoph Hellwig 			goto out_free;
3838f000cacSChristoph Hellwig 	}
3848f000cacSChristoph Hellwig 
3858f000cacSChristoph Hellwig 	return cmds;
3868f000cacSChristoph Hellwig 
3878f000cacSChristoph Hellwig out_free:
3888f000cacSChristoph Hellwig 	while (--i >= 0)
3898f000cacSChristoph Hellwig 		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
3908f000cacSChristoph Hellwig 	kfree(cmds);
3918f000cacSChristoph Hellwig out:
3928f000cacSChristoph Hellwig 	return ERR_PTR(ret);
3938f000cacSChristoph Hellwig }
3948f000cacSChristoph Hellwig 
nvmet_rdma_free_cmds(struct nvmet_rdma_device * ndev,struct nvmet_rdma_cmd * cmds,int nr_cmds,bool admin)3958f000cacSChristoph Hellwig static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
3968f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
3978f000cacSChristoph Hellwig {
3988f000cacSChristoph Hellwig 	int i;
3998f000cacSChristoph Hellwig 
4008f000cacSChristoph Hellwig 	for (i = 0; i < nr_cmds; i++)
4018f000cacSChristoph Hellwig 		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
4028f000cacSChristoph Hellwig 	kfree(cmds);
4038f000cacSChristoph Hellwig }
4048f000cacSChristoph Hellwig 
nvmet_rdma_alloc_rsp(struct nvmet_rdma_device * ndev,struct nvmet_rdma_rsp * r)4058f000cacSChristoph Hellwig static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
4068f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *r)
4078f000cacSChristoph Hellwig {
4088f000cacSChristoph Hellwig 	/* NVMe CQE / RDMA SEND */
409fc6c9730SMax Gurtovoy 	r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
410fc6c9730SMax Gurtovoy 	if (!r->req.cqe)
4118f000cacSChristoph Hellwig 		goto out;
4128f000cacSChristoph Hellwig 
413fc6c9730SMax Gurtovoy 	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
414fc6c9730SMax Gurtovoy 			sizeof(*r->req.cqe), DMA_TO_DEVICE);
4158f000cacSChristoph Hellwig 	if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
4168f000cacSChristoph Hellwig 		goto out_free_rsp;
4178f000cacSChristoph Hellwig 
418*495758bbSLogan Gunthorpe 	if (ib_dma_pci_p2p_dma_supported(ndev->device))
4198dc2ed3fSMax Gurtovoy 		r->req.p2p_client = &ndev->device->dev;
420fc6c9730SMax Gurtovoy 	r->send_sge.length = sizeof(*r->req.cqe);
4218f000cacSChristoph Hellwig 	r->send_sge.lkey = ndev->pd->local_dma_lkey;
4228f000cacSChristoph Hellwig 
4238f000cacSChristoph Hellwig 	r->send_cqe.done = nvmet_rdma_send_done;
4248f000cacSChristoph Hellwig 
4258f000cacSChristoph Hellwig 	r->send_wr.wr_cqe = &r->send_cqe;
4268f000cacSChristoph Hellwig 	r->send_wr.sg_list = &r->send_sge;
4278f000cacSChristoph Hellwig 	r->send_wr.num_sge = 1;
4288f000cacSChristoph Hellwig 	r->send_wr.send_flags = IB_SEND_SIGNALED;
4298f000cacSChristoph Hellwig 
4308f000cacSChristoph Hellwig 	/* Data In / RDMA READ */
4318f000cacSChristoph Hellwig 	r->read_cqe.done = nvmet_rdma_read_data_done;
432b09160c3SIsrael Rukshin 	/* Data Out / RDMA WRITE */
433b09160c3SIsrael Rukshin 	r->write_cqe.done = nvmet_rdma_write_data_done;
434b09160c3SIsrael Rukshin 
4358f000cacSChristoph Hellwig 	return 0;
4368f000cacSChristoph Hellwig 
4378f000cacSChristoph Hellwig out_free_rsp:
438fc6c9730SMax Gurtovoy 	kfree(r->req.cqe);
4398f000cacSChristoph Hellwig out:
4408f000cacSChristoph Hellwig 	return -ENOMEM;
4418f000cacSChristoph Hellwig }
4428f000cacSChristoph Hellwig 
nvmet_rdma_free_rsp(struct nvmet_rdma_device * ndev,struct nvmet_rdma_rsp * r)4438f000cacSChristoph Hellwig static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
4448f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *r)
4458f000cacSChristoph Hellwig {
4468f000cacSChristoph Hellwig 	ib_dma_unmap_single(ndev->device, r->send_sge.addr,
447fc6c9730SMax Gurtovoy 				sizeof(*r->req.cqe), DMA_TO_DEVICE);
448fc6c9730SMax Gurtovoy 	kfree(r->req.cqe);
4498f000cacSChristoph Hellwig }
4508f000cacSChristoph Hellwig 
4518f000cacSChristoph Hellwig static int
nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue * queue)4528f000cacSChristoph Hellwig nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
4538f000cacSChristoph Hellwig {
4548f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
4558f000cacSChristoph Hellwig 	int nr_rsps = queue->recv_queue_size * 2;
4568f000cacSChristoph Hellwig 	int ret = -EINVAL, i;
4578f000cacSChristoph Hellwig 
4588f000cacSChristoph Hellwig 	queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
4598f000cacSChristoph Hellwig 			GFP_KERNEL);
4608f000cacSChristoph Hellwig 	if (!queue->rsps)
4618f000cacSChristoph Hellwig 		goto out;
4628f000cacSChristoph Hellwig 
4638f000cacSChristoph Hellwig 	for (i = 0; i < nr_rsps; i++) {
4648f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
4658f000cacSChristoph Hellwig 
4668f000cacSChristoph Hellwig 		ret = nvmet_rdma_alloc_rsp(ndev, rsp);
4678f000cacSChristoph Hellwig 		if (ret)
4688f000cacSChristoph Hellwig 			goto out_free;
4698f000cacSChristoph Hellwig 
4708f000cacSChristoph Hellwig 		list_add_tail(&rsp->free_list, &queue->free_rsps);
4718f000cacSChristoph Hellwig 	}
4728f000cacSChristoph Hellwig 
4738f000cacSChristoph Hellwig 	return 0;
4748f000cacSChristoph Hellwig 
4758f000cacSChristoph Hellwig out_free:
4768f000cacSChristoph Hellwig 	while (--i >= 0) {
4778f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
4788f000cacSChristoph Hellwig 
4798f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
4808f000cacSChristoph Hellwig 		nvmet_rdma_free_rsp(ndev, rsp);
4818f000cacSChristoph Hellwig 	}
4828f000cacSChristoph Hellwig 	kfree(queue->rsps);
4838f000cacSChristoph Hellwig out:
4848f000cacSChristoph Hellwig 	return ret;
4858f000cacSChristoph Hellwig }
4868f000cacSChristoph Hellwig 
nvmet_rdma_free_rsps(struct nvmet_rdma_queue * queue)4878f000cacSChristoph Hellwig static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
4888f000cacSChristoph Hellwig {
4898f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
4908f000cacSChristoph Hellwig 	int i, nr_rsps = queue->recv_queue_size * 2;
4918f000cacSChristoph Hellwig 
4928f000cacSChristoph Hellwig 	for (i = 0; i < nr_rsps; i++) {
4938f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
4948f000cacSChristoph Hellwig 
4958f000cacSChristoph Hellwig 		list_del(&rsp->free_list);
4968f000cacSChristoph Hellwig 		nvmet_rdma_free_rsp(ndev, rsp);
4978f000cacSChristoph Hellwig 	}
4988f000cacSChristoph Hellwig 	kfree(queue->rsps);
4998f000cacSChristoph Hellwig }
5008f000cacSChristoph Hellwig 
nvmet_rdma_post_recv(struct nvmet_rdma_device * ndev,struct nvmet_rdma_cmd * cmd)5018f000cacSChristoph Hellwig static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
5028f000cacSChristoph Hellwig 		struct nvmet_rdma_cmd *cmd)
5038f000cacSChristoph Hellwig {
50420209384SMax Gurtovoy 	int ret;
5058f000cacSChristoph Hellwig 
506748ff840SParav Pandit 	ib_dma_sync_single_for_device(ndev->device,
507748ff840SParav Pandit 		cmd->sge[0].addr, cmd->sge[0].length,
508748ff840SParav Pandit 		DMA_FROM_DEVICE);
509748ff840SParav Pandit 
510b0012dd3SMax Gurtovoy 	if (cmd->nsrq)
511b0012dd3SMax Gurtovoy 		ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
51220209384SMax Gurtovoy 	else
51321f90243SIsrael Rukshin 		ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
51420209384SMax Gurtovoy 
51520209384SMax Gurtovoy 	if (unlikely(ret))
51620209384SMax Gurtovoy 		pr_err("post_recv cmd failed\n");
51720209384SMax Gurtovoy 
51820209384SMax Gurtovoy 	return ret;
5198f000cacSChristoph Hellwig }
5208f000cacSChristoph Hellwig 
nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue * queue)5218f000cacSChristoph Hellwig static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
5228f000cacSChristoph Hellwig {
5238f000cacSChristoph Hellwig 	spin_lock(&queue->rsp_wr_wait_lock);
5248f000cacSChristoph Hellwig 	while (!list_empty(&queue->rsp_wr_wait_list)) {
5258f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *rsp;
5268f000cacSChristoph Hellwig 		bool ret;
5278f000cacSChristoph Hellwig 
5288f000cacSChristoph Hellwig 		rsp = list_entry(queue->rsp_wr_wait_list.next,
5298f000cacSChristoph Hellwig 				struct nvmet_rdma_rsp, wait_list);
5308f000cacSChristoph Hellwig 		list_del(&rsp->wait_list);
5318f000cacSChristoph Hellwig 
5328f000cacSChristoph Hellwig 		spin_unlock(&queue->rsp_wr_wait_lock);
5338f000cacSChristoph Hellwig 		ret = nvmet_rdma_execute_command(rsp);
5348f000cacSChristoph Hellwig 		spin_lock(&queue->rsp_wr_wait_lock);
5358f000cacSChristoph Hellwig 
5368f000cacSChristoph Hellwig 		if (!ret) {
5378f000cacSChristoph Hellwig 			list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
5388f000cacSChristoph Hellwig 			break;
5398f000cacSChristoph Hellwig 		}
5408f000cacSChristoph Hellwig 	}
5418f000cacSChristoph Hellwig 	spin_unlock(&queue->rsp_wr_wait_lock);
5428f000cacSChristoph Hellwig }
5438f000cacSChristoph Hellwig 
nvmet_rdma_check_pi_status(struct ib_mr * sig_mr)544b09160c3SIsrael Rukshin static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
545b09160c3SIsrael Rukshin {
546b09160c3SIsrael Rukshin 	struct ib_mr_status mr_status;
547b09160c3SIsrael Rukshin 	int ret;
548b09160c3SIsrael Rukshin 	u16 status = 0;
549b09160c3SIsrael Rukshin 
550b09160c3SIsrael Rukshin 	ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
551b09160c3SIsrael Rukshin 	if (ret) {
552b09160c3SIsrael Rukshin 		pr_err("ib_check_mr_status failed, ret %d\n", ret);
553b09160c3SIsrael Rukshin 		return NVME_SC_INVALID_PI;
554b09160c3SIsrael Rukshin 	}
555b09160c3SIsrael Rukshin 
556b09160c3SIsrael Rukshin 	if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
557b09160c3SIsrael Rukshin 		switch (mr_status.sig_err.err_type) {
558b09160c3SIsrael Rukshin 		case IB_SIG_BAD_GUARD:
559b09160c3SIsrael Rukshin 			status = NVME_SC_GUARD_CHECK;
560b09160c3SIsrael Rukshin 			break;
561b09160c3SIsrael Rukshin 		case IB_SIG_BAD_REFTAG:
562b09160c3SIsrael Rukshin 			status = NVME_SC_REFTAG_CHECK;
563b09160c3SIsrael Rukshin 			break;
564b09160c3SIsrael Rukshin 		case IB_SIG_BAD_APPTAG:
565b09160c3SIsrael Rukshin 			status = NVME_SC_APPTAG_CHECK;
566b09160c3SIsrael Rukshin 			break;
567b09160c3SIsrael Rukshin 		}
568b09160c3SIsrael Rukshin 		pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
569b09160c3SIsrael Rukshin 		       mr_status.sig_err.err_type,
570b09160c3SIsrael Rukshin 		       mr_status.sig_err.expected,
571b09160c3SIsrael Rukshin 		       mr_status.sig_err.actual);
572b09160c3SIsrael Rukshin 	}
573b09160c3SIsrael Rukshin 
574b09160c3SIsrael Rukshin 	return status;
575b09160c3SIsrael Rukshin }
576b09160c3SIsrael Rukshin 
nvmet_rdma_set_sig_domain(struct blk_integrity * bi,struct nvme_command * cmd,struct ib_sig_domain * domain,u16 control,u8 pi_type)577b09160c3SIsrael Rukshin static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
578b09160c3SIsrael Rukshin 		struct nvme_command *cmd, struct ib_sig_domain *domain,
579b09160c3SIsrael Rukshin 		u16 control, u8 pi_type)
580b09160c3SIsrael Rukshin {
581b09160c3SIsrael Rukshin 	domain->sig_type = IB_SIG_TYPE_T10_DIF;
582b09160c3SIsrael Rukshin 	domain->sig.dif.bg_type = IB_T10DIF_CRC;
583b09160c3SIsrael Rukshin 	domain->sig.dif.pi_interval = 1 << bi->interval_exp;
584b09160c3SIsrael Rukshin 	domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
585b09160c3SIsrael Rukshin 	if (control & NVME_RW_PRINFO_PRCHK_REF)
586b09160c3SIsrael Rukshin 		domain->sig.dif.ref_remap = true;
587b09160c3SIsrael Rukshin 
588b09160c3SIsrael Rukshin 	domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
589b09160c3SIsrael Rukshin 	domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
590b09160c3SIsrael Rukshin 	domain->sig.dif.app_escape = true;
591b09160c3SIsrael Rukshin 	if (pi_type == NVME_NS_DPS_PI_TYPE3)
592b09160c3SIsrael Rukshin 		domain->sig.dif.ref_escape = true;
593b09160c3SIsrael Rukshin }
594b09160c3SIsrael Rukshin 
nvmet_rdma_set_sig_attrs(struct nvmet_req * req,struct ib_sig_attrs * sig_attrs)595b09160c3SIsrael Rukshin static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
596b09160c3SIsrael Rukshin 				     struct ib_sig_attrs *sig_attrs)
597b09160c3SIsrael Rukshin {
598b09160c3SIsrael Rukshin 	struct nvme_command *cmd = req->cmd;
599b09160c3SIsrael Rukshin 	u16 control = le16_to_cpu(cmd->rw.control);
600b09160c3SIsrael Rukshin 	u8 pi_type = req->ns->pi_type;
601b09160c3SIsrael Rukshin 	struct blk_integrity *bi;
602b09160c3SIsrael Rukshin 
603b09160c3SIsrael Rukshin 	bi = bdev_get_integrity(req->ns->bdev);
604b09160c3SIsrael Rukshin 
605b09160c3SIsrael Rukshin 	memset(sig_attrs, 0, sizeof(*sig_attrs));
606b09160c3SIsrael Rukshin 
607b09160c3SIsrael Rukshin 	if (control & NVME_RW_PRINFO_PRACT) {
608b09160c3SIsrael Rukshin 		/* for WRITE_INSERT/READ_STRIP no wire domain */
609b09160c3SIsrael Rukshin 		sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
610b09160c3SIsrael Rukshin 		nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
611b09160c3SIsrael Rukshin 					  pi_type);
612b09160c3SIsrael Rukshin 		/* Clear the PRACT bit since HCA will generate/verify the PI */
613b09160c3SIsrael Rukshin 		control &= ~NVME_RW_PRINFO_PRACT;
614b09160c3SIsrael Rukshin 		cmd->rw.control = cpu_to_le16(control);
615b09160c3SIsrael Rukshin 		/* PI is added by the HW */
616b09160c3SIsrael Rukshin 		req->transfer_len += req->metadata_len;
617b09160c3SIsrael Rukshin 	} else {
618b09160c3SIsrael Rukshin 		/* for WRITE_PASS/READ_PASS both wire/memory domains exist */
619b09160c3SIsrael Rukshin 		nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
620b09160c3SIsrael Rukshin 					  pi_type);
621b09160c3SIsrael Rukshin 		nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
622b09160c3SIsrael Rukshin 					  pi_type);
623b09160c3SIsrael Rukshin 	}
624b09160c3SIsrael Rukshin 
625b09160c3SIsrael Rukshin 	if (control & NVME_RW_PRINFO_PRCHK_REF)
626b09160c3SIsrael Rukshin 		sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
627b09160c3SIsrael Rukshin 	if (control & NVME_RW_PRINFO_PRCHK_GUARD)
628b09160c3SIsrael Rukshin 		sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
629b09160c3SIsrael Rukshin 	if (control & NVME_RW_PRINFO_PRCHK_APP)
630b09160c3SIsrael Rukshin 		sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
631b09160c3SIsrael Rukshin }
632b09160c3SIsrael Rukshin 
nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp * rsp,u64 addr,u32 key,struct ib_sig_attrs * sig_attrs)633b09160c3SIsrael Rukshin static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
634b09160c3SIsrael Rukshin 				  struct ib_sig_attrs *sig_attrs)
635b09160c3SIsrael Rukshin {
636b09160c3SIsrael Rukshin 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
637b09160c3SIsrael Rukshin 	struct nvmet_req *req = &rsp->req;
638b09160c3SIsrael Rukshin 	int ret;
639b09160c3SIsrael Rukshin 
640b09160c3SIsrael Rukshin 	if (req->metadata_len)
641b09160c3SIsrael Rukshin 		ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
642b09160c3SIsrael Rukshin 			cm_id->port_num, req->sg, req->sg_cnt,
643b09160c3SIsrael Rukshin 			req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
644b09160c3SIsrael Rukshin 			addr, key, nvmet_data_dir(req));
645b09160c3SIsrael Rukshin 	else
646b09160c3SIsrael Rukshin 		ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
647b09160c3SIsrael Rukshin 				       req->sg, req->sg_cnt, 0, addr, key,
648b09160c3SIsrael Rukshin 				       nvmet_data_dir(req));
649b09160c3SIsrael Rukshin 
650b09160c3SIsrael Rukshin 	return ret;
651b09160c3SIsrael Rukshin }
652b09160c3SIsrael Rukshin 
nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp * rsp)653b09160c3SIsrael Rukshin static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
654b09160c3SIsrael Rukshin {
655b09160c3SIsrael Rukshin 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
656b09160c3SIsrael Rukshin 	struct nvmet_req *req = &rsp->req;
657b09160c3SIsrael Rukshin 
658b09160c3SIsrael Rukshin 	if (req->metadata_len)
659b09160c3SIsrael Rukshin 		rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
660b09160c3SIsrael Rukshin 			cm_id->port_num, req->sg, req->sg_cnt,
661b09160c3SIsrael Rukshin 			req->metadata_sg, req->metadata_sg_cnt,
662b09160c3SIsrael Rukshin 			nvmet_data_dir(req));
663b09160c3SIsrael Rukshin 	else
664b09160c3SIsrael Rukshin 		rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
665b09160c3SIsrael Rukshin 				    req->sg, req->sg_cnt, nvmet_data_dir(req));
666b09160c3SIsrael Rukshin }
6678f000cacSChristoph Hellwig 
nvmet_rdma_release_rsp(struct nvmet_rdma_rsp * rsp)6688f000cacSChristoph Hellwig static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
6698f000cacSChristoph Hellwig {
6708f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = rsp->queue;
6718f000cacSChristoph Hellwig 
6728f000cacSChristoph Hellwig 	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
6738f000cacSChristoph Hellwig 
674b09160c3SIsrael Rukshin 	if (rsp->n_rdma)
675b09160c3SIsrael Rukshin 		nvmet_rdma_rw_ctx_destroy(rsp);
6768f000cacSChristoph Hellwig 
6770d5ee2b2SSteve Wise 	if (rsp->req.sg != rsp->cmd->inline_sg)
678c6e3f133SIsrael Rukshin 		nvmet_req_free_sgls(&rsp->req);
6798f000cacSChristoph Hellwig 
6808f000cacSChristoph Hellwig 	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
6818f000cacSChristoph Hellwig 		nvmet_rdma_process_wr_wait_list(queue);
6828f000cacSChristoph Hellwig 
6838f000cacSChristoph Hellwig 	nvmet_rdma_put_rsp(rsp);
6848f000cacSChristoph Hellwig }
6858f000cacSChristoph Hellwig 
nvmet_rdma_error_comp(struct nvmet_rdma_queue * queue)6868f000cacSChristoph Hellwig static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
6878f000cacSChristoph Hellwig {
6888f000cacSChristoph Hellwig 	if (queue->nvme_sq.ctrl) {
6898f000cacSChristoph Hellwig 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
6908f000cacSChristoph Hellwig 	} else {
6918f000cacSChristoph Hellwig 		/*
6928f000cacSChristoph Hellwig 		 * we didn't setup the controller yet in case
6938f000cacSChristoph Hellwig 		 * of admin connect error, just disconnect and
6948f000cacSChristoph Hellwig 		 * cleanup the queue
6958f000cacSChristoph Hellwig 		 */
6968f000cacSChristoph Hellwig 		nvmet_rdma_queue_disconnect(queue);
6978f000cacSChristoph Hellwig 	}
6988f000cacSChristoph Hellwig }
6998f000cacSChristoph Hellwig 
nvmet_rdma_send_done(struct ib_cq * cq,struct ib_wc * wc)7008f000cacSChristoph Hellwig static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
7018f000cacSChristoph Hellwig {
7028f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
7038f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
7048cc365f9SMichal Kalderon 	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
7058f000cacSChristoph Hellwig 
7068f000cacSChristoph Hellwig 	nvmet_rdma_release_rsp(rsp);
7078f000cacSChristoph Hellwig 
7088f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS &&
7098f000cacSChristoph Hellwig 		     wc->status != IB_WC_WR_FLUSH_ERR)) {
7108f000cacSChristoph Hellwig 		pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
7118f000cacSChristoph Hellwig 			wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
712d7dcdf9dSIsrael Rukshin 		nvmet_rdma_error_comp(queue);
7138f000cacSChristoph Hellwig 	}
7148f000cacSChristoph Hellwig }
7158f000cacSChristoph Hellwig 
nvmet_rdma_queue_response(struct nvmet_req * req)7168f000cacSChristoph Hellwig static void nvmet_rdma_queue_response(struct nvmet_req *req)
7178f000cacSChristoph Hellwig {
7188f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
7198f000cacSChristoph Hellwig 		container_of(req, struct nvmet_rdma_rsp, req);
7208f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
72123f96d1fSBart Van Assche 	struct ib_send_wr *first_wr;
7228f000cacSChristoph Hellwig 
7238f000cacSChristoph Hellwig 	if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
7248f000cacSChristoph Hellwig 		rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
7258f000cacSChristoph Hellwig 		rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
7268f000cacSChristoph Hellwig 	} else {
7278f000cacSChristoph Hellwig 		rsp->send_wr.opcode = IB_WR_SEND;
7288f000cacSChristoph Hellwig 	}
7298f000cacSChristoph Hellwig 
730b09160c3SIsrael Rukshin 	if (nvmet_rdma_need_data_out(rsp)) {
731b09160c3SIsrael Rukshin 		if (rsp->req.metadata_len)
732b09160c3SIsrael Rukshin 			first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
733b09160c3SIsrael Rukshin 					cm_id->port_num, &rsp->write_cqe, NULL);
734b09160c3SIsrael Rukshin 		else
7358f000cacSChristoph Hellwig 			first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
7368f000cacSChristoph Hellwig 					cm_id->port_num, NULL, &rsp->send_wr);
737b09160c3SIsrael Rukshin 	} else {
7388f000cacSChristoph Hellwig 		first_wr = &rsp->send_wr;
739b09160c3SIsrael Rukshin 	}
7408f000cacSChristoph Hellwig 
7418f000cacSChristoph Hellwig 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
742748ff840SParav Pandit 
743748ff840SParav Pandit 	ib_dma_sync_single_for_device(rsp->queue->dev->device,
744748ff840SParav Pandit 		rsp->send_sge.addr, rsp->send_sge.length,
745748ff840SParav Pandit 		DMA_TO_DEVICE);
746748ff840SParav Pandit 
7470a3173a5SJason Gunthorpe 	if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
7488f000cacSChristoph Hellwig 		pr_err("sending cmd response failed\n");
7498f000cacSChristoph Hellwig 		nvmet_rdma_release_rsp(rsp);
7508f000cacSChristoph Hellwig 	}
7518f000cacSChristoph Hellwig }
7528f000cacSChristoph Hellwig 
nvmet_rdma_read_data_done(struct ib_cq * cq,struct ib_wc * wc)7538f000cacSChristoph Hellwig static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
7548f000cacSChristoph Hellwig {
7558f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp =
7568f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
757ca0f1a80SYamin Friedman 	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
758b09160c3SIsrael Rukshin 	u16 status = 0;
7598f000cacSChristoph Hellwig 
7608f000cacSChristoph Hellwig 	WARN_ON(rsp->n_rdma <= 0);
7618f000cacSChristoph Hellwig 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
7628f000cacSChristoph Hellwig 	rsp->n_rdma = 0;
7638f000cacSChristoph Hellwig 
7648f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
765b09160c3SIsrael Rukshin 		nvmet_rdma_rw_ctx_destroy(rsp);
766549f01aeSVijay Immanuel 		nvmet_req_uninit(&rsp->req);
7678f000cacSChristoph Hellwig 		nvmet_rdma_release_rsp(rsp);
7688f000cacSChristoph Hellwig 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
7698f000cacSChristoph Hellwig 			pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
7708f000cacSChristoph Hellwig 				wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
7718f000cacSChristoph Hellwig 			nvmet_rdma_error_comp(queue);
7728f000cacSChristoph Hellwig 		}
7738f000cacSChristoph Hellwig 		return;
7748f000cacSChristoph Hellwig 	}
7758f000cacSChristoph Hellwig 
776b09160c3SIsrael Rukshin 	if (rsp->req.metadata_len)
777b09160c3SIsrael Rukshin 		status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
778b09160c3SIsrael Rukshin 	nvmet_rdma_rw_ctx_destroy(rsp);
779b09160c3SIsrael Rukshin 
780b09160c3SIsrael Rukshin 	if (unlikely(status))
781b09160c3SIsrael Rukshin 		nvmet_req_complete(&rsp->req, status);
782b09160c3SIsrael Rukshin 	else
783be3f3114SChristoph Hellwig 		rsp->req.execute(&rsp->req);
7848f000cacSChristoph Hellwig }
7858f000cacSChristoph Hellwig 
nvmet_rdma_write_data_done(struct ib_cq * cq,struct ib_wc * wc)786b09160c3SIsrael Rukshin static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
787b09160c3SIsrael Rukshin {
788b09160c3SIsrael Rukshin 	struct nvmet_rdma_rsp *rsp =
789b09160c3SIsrael Rukshin 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
7908cc365f9SMichal Kalderon 	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
791b09160c3SIsrael Rukshin 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
792b09160c3SIsrael Rukshin 	u16 status;
793b09160c3SIsrael Rukshin 
794b09160c3SIsrael Rukshin 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
795b09160c3SIsrael Rukshin 		return;
796b09160c3SIsrael Rukshin 
797b09160c3SIsrael Rukshin 	WARN_ON(rsp->n_rdma <= 0);
798b09160c3SIsrael Rukshin 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
799b09160c3SIsrael Rukshin 	rsp->n_rdma = 0;
800b09160c3SIsrael Rukshin 
801b09160c3SIsrael Rukshin 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
802b09160c3SIsrael Rukshin 		nvmet_rdma_rw_ctx_destroy(rsp);
803b09160c3SIsrael Rukshin 		nvmet_req_uninit(&rsp->req);
804b09160c3SIsrael Rukshin 		nvmet_rdma_release_rsp(rsp);
805b09160c3SIsrael Rukshin 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
806abec6561SLv Yunlong 			pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
807abec6561SLv Yunlong 				ib_wc_status_msg(wc->status), wc->status);
808b09160c3SIsrael Rukshin 			nvmet_rdma_error_comp(queue);
809b09160c3SIsrael Rukshin 		}
810b09160c3SIsrael Rukshin 		return;
811b09160c3SIsrael Rukshin 	}
812b09160c3SIsrael Rukshin 
813b09160c3SIsrael Rukshin 	/*
814b09160c3SIsrael Rukshin 	 * Upon RDMA completion check the signature status
815b09160c3SIsrael Rukshin 	 * - if succeeded send good NVMe response
816b09160c3SIsrael Rukshin 	 * - if failed send bad NVMe response with appropriate error
817b09160c3SIsrael Rukshin 	 */
818b09160c3SIsrael Rukshin 	status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
819b09160c3SIsrael Rukshin 	if (unlikely(status))
820b09160c3SIsrael Rukshin 		rsp->req.cqe->status = cpu_to_le16(status << 1);
821b09160c3SIsrael Rukshin 	nvmet_rdma_rw_ctx_destroy(rsp);
822b09160c3SIsrael Rukshin 
823b09160c3SIsrael Rukshin 	if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
824b09160c3SIsrael Rukshin 		pr_err("sending cmd response failed\n");
825b09160c3SIsrael Rukshin 		nvmet_rdma_release_rsp(rsp);
826b09160c3SIsrael Rukshin 	}
827b09160c3SIsrael Rukshin }
828b09160c3SIsrael Rukshin 
nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp * rsp,u32 len,u64 off)8298f000cacSChristoph Hellwig static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
8308f000cacSChristoph Hellwig 		u64 off)
8318f000cacSChristoph Hellwig {
8320d5ee2b2SSteve Wise 	int sg_count = num_pages(len);
8330d5ee2b2SSteve Wise 	struct scatterlist *sg;
8340d5ee2b2SSteve Wise 	int i;
8350d5ee2b2SSteve Wise 
8360d5ee2b2SSteve Wise 	sg = rsp->cmd->inline_sg;
8370d5ee2b2SSteve Wise 	for (i = 0; i < sg_count; i++, sg++) {
8380d5ee2b2SSteve Wise 		if (i < sg_count - 1)
8390d5ee2b2SSteve Wise 			sg_unmark_end(sg);
8400d5ee2b2SSteve Wise 		else
8410d5ee2b2SSteve Wise 			sg_mark_end(sg);
8420d5ee2b2SSteve Wise 		sg->offset = off;
8430d5ee2b2SSteve Wise 		sg->length = min_t(int, len, PAGE_SIZE - off);
8440d5ee2b2SSteve Wise 		len -= sg->length;
8450d5ee2b2SSteve Wise 		if (!i)
8460d5ee2b2SSteve Wise 			off = 0;
8470d5ee2b2SSteve Wise 	}
8480d5ee2b2SSteve Wise 
8490d5ee2b2SSteve Wise 	rsp->req.sg = rsp->cmd->inline_sg;
8500d5ee2b2SSteve Wise 	rsp->req.sg_cnt = sg_count;
8518f000cacSChristoph Hellwig }
8528f000cacSChristoph Hellwig 
nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp * rsp)8538f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
8548f000cacSChristoph Hellwig {
8558f000cacSChristoph Hellwig 	struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
8568f000cacSChristoph Hellwig 	u64 off = le64_to_cpu(sgl->addr);
8578f000cacSChristoph Hellwig 	u32 len = le32_to_cpu(sgl->length);
8588f000cacSChristoph Hellwig 
859762a11dfSChaitanya Kulkarni 	if (!nvme_is_write(rsp->req.cmd)) {
860762a11dfSChaitanya Kulkarni 		rsp->req.error_loc =
861762a11dfSChaitanya Kulkarni 			offsetof(struct nvme_common_command, opcode);
8628f000cacSChristoph Hellwig 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
863762a11dfSChaitanya Kulkarni 	}
8648f000cacSChristoph Hellwig 
8650d5ee2b2SSteve Wise 	if (off + len > rsp->queue->dev->inline_data_size) {
8668f000cacSChristoph Hellwig 		pr_err("invalid inline data offset!\n");
8678f000cacSChristoph Hellwig 		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
8688f000cacSChristoph Hellwig 	}
8698f000cacSChristoph Hellwig 
8708f000cacSChristoph Hellwig 	/* no data command? */
8718f000cacSChristoph Hellwig 	if (!len)
8728f000cacSChristoph Hellwig 		return 0;
8738f000cacSChristoph Hellwig 
8748f000cacSChristoph Hellwig 	nvmet_rdma_use_inline_sg(rsp, len, off);
8758f000cacSChristoph Hellwig 	rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
8765e62d5c9SChristoph Hellwig 	rsp->req.transfer_len += len;
8778f000cacSChristoph Hellwig 	return 0;
8788f000cacSChristoph Hellwig }
8798f000cacSChristoph Hellwig 
nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp * rsp,struct nvme_keyed_sgl_desc * sgl,bool invalidate)8808f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
8818f000cacSChristoph Hellwig 		struct nvme_keyed_sgl_desc *sgl, bool invalidate)
8828f000cacSChristoph Hellwig {
8838f000cacSChristoph Hellwig 	u64 addr = le64_to_cpu(sgl->addr);
8848f000cacSChristoph Hellwig 	u32 key = get_unaligned_le32(sgl->key);
885b09160c3SIsrael Rukshin 	struct ib_sig_attrs sig_attrs;
8868f000cacSChristoph Hellwig 	int ret;
8878f000cacSChristoph Hellwig 
8885b2322e4SLogan Gunthorpe 	rsp->req.transfer_len = get_unaligned_le24(sgl->length);
8895b2322e4SLogan Gunthorpe 
8908f000cacSChristoph Hellwig 	/* no data command? */
8915b2322e4SLogan Gunthorpe 	if (!rsp->req.transfer_len)
8928f000cacSChristoph Hellwig 		return 0;
8938f000cacSChristoph Hellwig 
894b09160c3SIsrael Rukshin 	if (rsp->req.metadata_len)
895b09160c3SIsrael Rukshin 		nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
896b09160c3SIsrael Rukshin 
897c6e3f133SIsrael Rukshin 	ret = nvmet_req_alloc_sgls(&rsp->req);
89859534b9dSIsrael Rukshin 	if (unlikely(ret < 0))
8995b2322e4SLogan Gunthorpe 		goto error_out;
9008f000cacSChristoph Hellwig 
901b09160c3SIsrael Rukshin 	ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
90259534b9dSIsrael Rukshin 	if (unlikely(ret < 0))
9035b2322e4SLogan Gunthorpe 		goto error_out;
9048f000cacSChristoph Hellwig 	rsp->n_rdma += ret;
9058f000cacSChristoph Hellwig 
9068f000cacSChristoph Hellwig 	if (invalidate) {
9078f000cacSChristoph Hellwig 		rsp->invalidate_rkey = key;
9088f000cacSChristoph Hellwig 		rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
9098f000cacSChristoph Hellwig 	}
9108f000cacSChristoph Hellwig 
9118f000cacSChristoph Hellwig 	return 0;
9125b2322e4SLogan Gunthorpe 
9135b2322e4SLogan Gunthorpe error_out:
9145b2322e4SLogan Gunthorpe 	rsp->req.transfer_len = 0;
9155b2322e4SLogan Gunthorpe 	return NVME_SC_INTERNAL;
9168f000cacSChristoph Hellwig }
9178f000cacSChristoph Hellwig 
nvmet_rdma_map_sgl(struct nvmet_rdma_rsp * rsp)9188f000cacSChristoph Hellwig static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
9198f000cacSChristoph Hellwig {
9208f000cacSChristoph Hellwig 	struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
9218f000cacSChristoph Hellwig 
9228f000cacSChristoph Hellwig 	switch (sgl->type >> 4) {
9238f000cacSChristoph Hellwig 	case NVME_SGL_FMT_DATA_DESC:
9248f000cacSChristoph Hellwig 		switch (sgl->type & 0xf) {
9258f000cacSChristoph Hellwig 		case NVME_SGL_FMT_OFFSET:
9268f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_inline(rsp);
9278f000cacSChristoph Hellwig 		default:
9288f000cacSChristoph Hellwig 			pr_err("invalid SGL subtype: %#x\n", sgl->type);
929762a11dfSChaitanya Kulkarni 			rsp->req.error_loc =
930762a11dfSChaitanya Kulkarni 				offsetof(struct nvme_common_command, dptr);
9318f000cacSChristoph Hellwig 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
9328f000cacSChristoph Hellwig 		}
9338f000cacSChristoph Hellwig 	case NVME_KEY_SGL_FMT_DATA_DESC:
9348f000cacSChristoph Hellwig 		switch (sgl->type & 0xf) {
9358f000cacSChristoph Hellwig 		case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
9368f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
9378f000cacSChristoph Hellwig 		case NVME_SGL_FMT_ADDRESS:
9388f000cacSChristoph Hellwig 			return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
9398f000cacSChristoph Hellwig 		default:
9408f000cacSChristoph Hellwig 			pr_err("invalid SGL subtype: %#x\n", sgl->type);
941762a11dfSChaitanya Kulkarni 			rsp->req.error_loc =
942762a11dfSChaitanya Kulkarni 				offsetof(struct nvme_common_command, dptr);
9438f000cacSChristoph Hellwig 			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
9448f000cacSChristoph Hellwig 		}
9458f000cacSChristoph Hellwig 	default:
9468f000cacSChristoph Hellwig 		pr_err("invalid SGL type: %#x\n", sgl->type);
947762a11dfSChaitanya Kulkarni 		rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
9488f000cacSChristoph Hellwig 		return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
9498f000cacSChristoph Hellwig 	}
9508f000cacSChristoph Hellwig }
9518f000cacSChristoph Hellwig 
nvmet_rdma_execute_command(struct nvmet_rdma_rsp * rsp)9528f000cacSChristoph Hellwig static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
9538f000cacSChristoph Hellwig {
9548f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = rsp->queue;
9558f000cacSChristoph Hellwig 
9568f000cacSChristoph Hellwig 	if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
9578f000cacSChristoph Hellwig 			&queue->sq_wr_avail) < 0)) {
9588f000cacSChristoph Hellwig 		pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
9598f000cacSChristoph Hellwig 				1 + rsp->n_rdma, queue->idx,
9608f000cacSChristoph Hellwig 				queue->nvme_sq.ctrl->cntlid);
9618f000cacSChristoph Hellwig 		atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
9628f000cacSChristoph Hellwig 		return false;
9638f000cacSChristoph Hellwig 	}
9648f000cacSChristoph Hellwig 
9658f000cacSChristoph Hellwig 	if (nvmet_rdma_need_data_in(rsp)) {
96621f90243SIsrael Rukshin 		if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
9678f000cacSChristoph Hellwig 				queue->cm_id->port_num, &rsp->read_cqe, NULL))
9688f000cacSChristoph Hellwig 			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
9698f000cacSChristoph Hellwig 	} else {
970be3f3114SChristoph Hellwig 		rsp->req.execute(&rsp->req);
9718f000cacSChristoph Hellwig 	}
9728f000cacSChristoph Hellwig 
9738f000cacSChristoph Hellwig 	return true;
9748f000cacSChristoph Hellwig }
9758f000cacSChristoph Hellwig 
nvmet_rdma_handle_command(struct nvmet_rdma_queue * queue,struct nvmet_rdma_rsp * cmd)9768f000cacSChristoph Hellwig static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
9778f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *cmd)
9788f000cacSChristoph Hellwig {
9798f000cacSChristoph Hellwig 	u16 status;
9808f000cacSChristoph Hellwig 
981748ff840SParav Pandit 	ib_dma_sync_single_for_cpu(queue->dev->device,
982748ff840SParav Pandit 		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
983748ff840SParav Pandit 		DMA_FROM_DEVICE);
984748ff840SParav Pandit 	ib_dma_sync_single_for_cpu(queue->dev->device,
985748ff840SParav Pandit 		cmd->send_sge.addr, cmd->send_sge.length,
986748ff840SParav Pandit 		DMA_TO_DEVICE);
987748ff840SParav Pandit 
9888f000cacSChristoph Hellwig 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
9898f000cacSChristoph Hellwig 			&queue->nvme_sq, &nvmet_rdma_ops))
9908f000cacSChristoph Hellwig 		return;
9918f000cacSChristoph Hellwig 
9928f000cacSChristoph Hellwig 	status = nvmet_rdma_map_sgl(cmd);
9938f000cacSChristoph Hellwig 	if (status)
9948f000cacSChristoph Hellwig 		goto out_err;
9958f000cacSChristoph Hellwig 
9968f000cacSChristoph Hellwig 	if (unlikely(!nvmet_rdma_execute_command(cmd))) {
9978f000cacSChristoph Hellwig 		spin_lock(&queue->rsp_wr_wait_lock);
9988f000cacSChristoph Hellwig 		list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
9998f000cacSChristoph Hellwig 		spin_unlock(&queue->rsp_wr_wait_lock);
10008f000cacSChristoph Hellwig 	}
10018f000cacSChristoph Hellwig 
10028f000cacSChristoph Hellwig 	return;
10038f000cacSChristoph Hellwig 
10048f000cacSChristoph Hellwig out_err:
10058f000cacSChristoph Hellwig 	nvmet_req_complete(&cmd->req, status);
10068f000cacSChristoph Hellwig }
10078f000cacSChristoph Hellwig 
nvmet_rdma_recv_done(struct ib_cq * cq,struct ib_wc * wc)10088f000cacSChristoph Hellwig static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
10098f000cacSChristoph Hellwig {
10108f000cacSChristoph Hellwig 	struct nvmet_rdma_cmd *cmd =
10118f000cacSChristoph Hellwig 		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
1012ca0f1a80SYamin Friedman 	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
10138f000cacSChristoph Hellwig 	struct nvmet_rdma_rsp *rsp;
10148f000cacSChristoph Hellwig 
10158f000cacSChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
10168f000cacSChristoph Hellwig 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
10178f000cacSChristoph Hellwig 			pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
10188f000cacSChristoph Hellwig 				wc->wr_cqe, ib_wc_status_msg(wc->status),
10198f000cacSChristoph Hellwig 				wc->status);
10208f000cacSChristoph Hellwig 			nvmet_rdma_error_comp(queue);
10218f000cacSChristoph Hellwig 		}
10228f000cacSChristoph Hellwig 		return;
10238f000cacSChristoph Hellwig 	}
10248f000cacSChristoph Hellwig 
10258f000cacSChristoph Hellwig 	if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
10268f000cacSChristoph Hellwig 		pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
10278f000cacSChristoph Hellwig 		nvmet_rdma_error_comp(queue);
10288f000cacSChristoph Hellwig 		return;
10298f000cacSChristoph Hellwig 	}
10308f000cacSChristoph Hellwig 
10318f000cacSChristoph Hellwig 	cmd->queue = queue;
10328f000cacSChristoph Hellwig 	rsp = nvmet_rdma_get_rsp(queue);
10338407879cSSagi Grimberg 	if (unlikely(!rsp)) {
10348407879cSSagi Grimberg 		/*
10358407879cSSagi Grimberg 		 * we get here only under memory pressure,
10368407879cSSagi Grimberg 		 * silently drop and have the host retry
10378407879cSSagi Grimberg 		 * as we can't even fail it.
10388407879cSSagi Grimberg 		 */
10398407879cSSagi Grimberg 		nvmet_rdma_post_recv(queue->dev, cmd);
10408407879cSSagi Grimberg 		return;
10418407879cSSagi Grimberg 	}
10428d61413dSSagi Grimberg 	rsp->queue = queue;
10438f000cacSChristoph Hellwig 	rsp->cmd = cmd;
10448f000cacSChristoph Hellwig 	rsp->flags = 0;
10458f000cacSChristoph Hellwig 	rsp->req.cmd = cmd->nvme_cmd;
10468d61413dSSagi Grimberg 	rsp->req.port = queue->port;
10478d61413dSSagi Grimberg 	rsp->n_rdma = 0;
10488f000cacSChristoph Hellwig 
10498f000cacSChristoph Hellwig 	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
10508f000cacSChristoph Hellwig 		unsigned long flags;
10518f000cacSChristoph Hellwig 
10528f000cacSChristoph Hellwig 		spin_lock_irqsave(&queue->state_lock, flags);
10538f000cacSChristoph Hellwig 		if (queue->state == NVMET_RDMA_Q_CONNECTING)
10548f000cacSChristoph Hellwig 			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
10558f000cacSChristoph Hellwig 		else
10568f000cacSChristoph Hellwig 			nvmet_rdma_put_rsp(rsp);
10578f000cacSChristoph Hellwig 		spin_unlock_irqrestore(&queue->state_lock, flags);
10588f000cacSChristoph Hellwig 		return;
10598f000cacSChristoph Hellwig 	}
10608f000cacSChristoph Hellwig 
10618f000cacSChristoph Hellwig 	nvmet_rdma_handle_command(queue, rsp);
10628f000cacSChristoph Hellwig }
10638f000cacSChristoph Hellwig 
nvmet_rdma_destroy_srq(struct nvmet_rdma_srq * nsrq)1064b0012dd3SMax Gurtovoy static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
10658f000cacSChristoph Hellwig {
1066b0012dd3SMax Gurtovoy 	nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
1067b0012dd3SMax Gurtovoy 			     false);
1068b0012dd3SMax Gurtovoy 	ib_destroy_srq(nsrq->srq);
10698f000cacSChristoph Hellwig 
1070b0012dd3SMax Gurtovoy 	kfree(nsrq);
10718f000cacSChristoph Hellwig }
10728f000cacSChristoph Hellwig 
nvmet_rdma_destroy_srqs(struct nvmet_rdma_device * ndev)1073b0012dd3SMax Gurtovoy static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
1074b0012dd3SMax Gurtovoy {
1075b0012dd3SMax Gurtovoy 	int i;
1076b0012dd3SMax Gurtovoy 
1077b0012dd3SMax Gurtovoy 	if (!ndev->srqs)
1078b0012dd3SMax Gurtovoy 		return;
1079b0012dd3SMax Gurtovoy 
1080b0012dd3SMax Gurtovoy 	for (i = 0; i < ndev->srq_count; i++)
1081b0012dd3SMax Gurtovoy 		nvmet_rdma_destroy_srq(ndev->srqs[i]);
1082b0012dd3SMax Gurtovoy 
1083b0012dd3SMax Gurtovoy 	kfree(ndev->srqs);
1084b0012dd3SMax Gurtovoy }
1085b0012dd3SMax Gurtovoy 
1086b0012dd3SMax Gurtovoy static struct nvmet_rdma_srq *
nvmet_rdma_init_srq(struct nvmet_rdma_device * ndev)1087b0012dd3SMax Gurtovoy nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
10888f000cacSChristoph Hellwig {
10898f000cacSChristoph Hellwig 	struct ib_srq_init_attr srq_attr = { NULL, };
1090b0012dd3SMax Gurtovoy 	size_t srq_size = ndev->srq_size;
1091b0012dd3SMax Gurtovoy 	struct nvmet_rdma_srq *nsrq;
10928f000cacSChristoph Hellwig 	struct ib_srq *srq;
10938f000cacSChristoph Hellwig 	int ret, i;
10948f000cacSChristoph Hellwig 
1095b0012dd3SMax Gurtovoy 	nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
1096b0012dd3SMax Gurtovoy 	if (!nsrq)
1097b0012dd3SMax Gurtovoy 		return ERR_PTR(-ENOMEM);
10988f000cacSChristoph Hellwig 
10998f000cacSChristoph Hellwig 	srq_attr.attr.max_wr = srq_size;
11000d5ee2b2SSteve Wise 	srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
11018f000cacSChristoph Hellwig 	srq_attr.attr.srq_limit = 0;
11028f000cacSChristoph Hellwig 	srq_attr.srq_type = IB_SRQT_BASIC;
11038f000cacSChristoph Hellwig 	srq = ib_create_srq(ndev->pd, &srq_attr);
11048f000cacSChristoph Hellwig 	if (IS_ERR(srq)) {
1105b0012dd3SMax Gurtovoy 		ret = PTR_ERR(srq);
1106b0012dd3SMax Gurtovoy 		goto out_free;
1107b0012dd3SMax Gurtovoy 	}
1108b0012dd3SMax Gurtovoy 
1109b0012dd3SMax Gurtovoy 	nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
1110b0012dd3SMax Gurtovoy 	if (IS_ERR(nsrq->cmds)) {
1111b0012dd3SMax Gurtovoy 		ret = PTR_ERR(nsrq->cmds);
1112b0012dd3SMax Gurtovoy 		goto out_destroy_srq;
1113b0012dd3SMax Gurtovoy 	}
1114b0012dd3SMax Gurtovoy 
1115b0012dd3SMax Gurtovoy 	nsrq->srq = srq;
1116b0012dd3SMax Gurtovoy 	nsrq->ndev = ndev;
1117b0012dd3SMax Gurtovoy 
1118b0012dd3SMax Gurtovoy 	for (i = 0; i < srq_size; i++) {
1119b0012dd3SMax Gurtovoy 		nsrq->cmds[i].nsrq = nsrq;
1120b0012dd3SMax Gurtovoy 		ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
1121b0012dd3SMax Gurtovoy 		if (ret)
1122b0012dd3SMax Gurtovoy 			goto out_free_cmds;
1123b0012dd3SMax Gurtovoy 	}
1124b0012dd3SMax Gurtovoy 
1125b0012dd3SMax Gurtovoy 	return nsrq;
1126b0012dd3SMax Gurtovoy 
1127b0012dd3SMax Gurtovoy out_free_cmds:
1128b0012dd3SMax Gurtovoy 	nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
1129b0012dd3SMax Gurtovoy out_destroy_srq:
1130b0012dd3SMax Gurtovoy 	ib_destroy_srq(srq);
1131b0012dd3SMax Gurtovoy out_free:
1132b0012dd3SMax Gurtovoy 	kfree(nsrq);
1133b0012dd3SMax Gurtovoy 	return ERR_PTR(ret);
1134b0012dd3SMax Gurtovoy }
1135b0012dd3SMax Gurtovoy 
nvmet_rdma_init_srqs(struct nvmet_rdma_device * ndev)1136b0012dd3SMax Gurtovoy static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
1137b0012dd3SMax Gurtovoy {
1138b0012dd3SMax Gurtovoy 	int i, ret;
1139b0012dd3SMax Gurtovoy 
1140b0012dd3SMax Gurtovoy 	if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
11418f000cacSChristoph Hellwig 		/*
11428f000cacSChristoph Hellwig 		 * If SRQs aren't supported we just go ahead and use normal
11438f000cacSChristoph Hellwig 		 * non-shared receive queues.
11448f000cacSChristoph Hellwig 		 */
11458f000cacSChristoph Hellwig 		pr_info("SRQ requested but not supported.\n");
11468f000cacSChristoph Hellwig 		return 0;
11478f000cacSChristoph Hellwig 	}
11488f000cacSChristoph Hellwig 
1149b0012dd3SMax Gurtovoy 	ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
1150b0012dd3SMax Gurtovoy 			     nvmet_rdma_srq_size);
1151b0012dd3SMax Gurtovoy 	ndev->srq_count = min(ndev->device->num_comp_vectors,
1152b0012dd3SMax Gurtovoy 			      ndev->device->attrs.max_srq);
1153b0012dd3SMax Gurtovoy 
1154b0012dd3SMax Gurtovoy 	ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
1155b0012dd3SMax Gurtovoy 	if (!ndev->srqs)
1156b0012dd3SMax Gurtovoy 		return -ENOMEM;
1157b0012dd3SMax Gurtovoy 
1158b0012dd3SMax Gurtovoy 	for (i = 0; i < ndev->srq_count; i++) {
1159b0012dd3SMax Gurtovoy 		ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
1160b0012dd3SMax Gurtovoy 		if (IS_ERR(ndev->srqs[i])) {
1161b0012dd3SMax Gurtovoy 			ret = PTR_ERR(ndev->srqs[i]);
1162b0012dd3SMax Gurtovoy 			goto err_srq;
11638f000cacSChristoph Hellwig 		}
116420209384SMax Gurtovoy 	}
11658f000cacSChristoph Hellwig 
11668f000cacSChristoph Hellwig 	return 0;
11678f000cacSChristoph Hellwig 
1168b0012dd3SMax Gurtovoy err_srq:
1169b0012dd3SMax Gurtovoy 	while (--i >= 0)
1170b0012dd3SMax Gurtovoy 		nvmet_rdma_destroy_srq(ndev->srqs[i]);
1171b0012dd3SMax Gurtovoy 	kfree(ndev->srqs);
11728f000cacSChristoph Hellwig 	return ret;
11738f000cacSChristoph Hellwig }
11748f000cacSChristoph Hellwig 
nvmet_rdma_free_dev(struct kref * ref)11758f000cacSChristoph Hellwig static void nvmet_rdma_free_dev(struct kref *ref)
11768f000cacSChristoph Hellwig {
11778f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev =
11788f000cacSChristoph Hellwig 		container_of(ref, struct nvmet_rdma_device, ref);
11798f000cacSChristoph Hellwig 
11808f000cacSChristoph Hellwig 	mutex_lock(&device_list_mutex);
11818f000cacSChristoph Hellwig 	list_del(&ndev->entry);
11828f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
11838f000cacSChristoph Hellwig 
1184b0012dd3SMax Gurtovoy 	nvmet_rdma_destroy_srqs(ndev);
11858f000cacSChristoph Hellwig 	ib_dealloc_pd(ndev->pd);
11868f000cacSChristoph Hellwig 
11878f000cacSChristoph Hellwig 	kfree(ndev);
11888f000cacSChristoph Hellwig }
11898f000cacSChristoph Hellwig 
11908f000cacSChristoph Hellwig static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id * cm_id)11918f000cacSChristoph Hellwig nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
11928f000cacSChristoph Hellwig {
1193a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = cm_id->context;
1194a032e4f6SSagi Grimberg 	struct nvmet_port *nport = port->nport;
11958f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev;
11960d5ee2b2SSteve Wise 	int inline_page_count;
11970d5ee2b2SSteve Wise 	int inline_sge_count;
11988f000cacSChristoph Hellwig 	int ret;
11998f000cacSChristoph Hellwig 
12008f000cacSChristoph Hellwig 	mutex_lock(&device_list_mutex);
12018f000cacSChristoph Hellwig 	list_for_each_entry(ndev, &device_list, entry) {
12028f000cacSChristoph Hellwig 		if (ndev->device->node_guid == cm_id->device->node_guid &&
12038f000cacSChristoph Hellwig 		    kref_get_unless_zero(&ndev->ref))
12048f000cacSChristoph Hellwig 			goto out_unlock;
12058f000cacSChristoph Hellwig 	}
12068f000cacSChristoph Hellwig 
12078f000cacSChristoph Hellwig 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
12088f000cacSChristoph Hellwig 	if (!ndev)
12098f000cacSChristoph Hellwig 		goto out_err;
12108f000cacSChristoph Hellwig 
1211a032e4f6SSagi Grimberg 	inline_page_count = num_pages(nport->inline_data_size);
12120d5ee2b2SSteve Wise 	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
12130a3173a5SJason Gunthorpe 				cm_id->device->attrs.max_recv_sge) - 1;
12140d5ee2b2SSteve Wise 	if (inline_page_count > inline_sge_count) {
12150d5ee2b2SSteve Wise 		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
1216a032e4f6SSagi Grimberg 			nport->inline_data_size, cm_id->device->name,
12170d5ee2b2SSteve Wise 			inline_sge_count * PAGE_SIZE);
1218a032e4f6SSagi Grimberg 		nport->inline_data_size = inline_sge_count * PAGE_SIZE;
12190d5ee2b2SSteve Wise 		inline_page_count = inline_sge_count;
12200d5ee2b2SSteve Wise 	}
1221a032e4f6SSagi Grimberg 	ndev->inline_data_size = nport->inline_data_size;
12220d5ee2b2SSteve Wise 	ndev->inline_page_count = inline_page_count;
12237a846656SIsrael Rukshin 
1224e945c653SJason Gunthorpe 	if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
1225e945c653SJason Gunthorpe 				  IBK_INTEGRITY_HANDOVER)) {
12267a846656SIsrael Rukshin 		pr_warn("T10-PI is not supported by device %s. Disabling it\n",
12277a846656SIsrael Rukshin 			cm_id->device->name);
12287a846656SIsrael Rukshin 		nport->pi_enable = false;
12297a846656SIsrael Rukshin 	}
12307a846656SIsrael Rukshin 
12318f000cacSChristoph Hellwig 	ndev->device = cm_id->device;
12328f000cacSChristoph Hellwig 	kref_init(&ndev->ref);
12338f000cacSChristoph Hellwig 
1234ed082d36SChristoph Hellwig 	ndev->pd = ib_alloc_pd(ndev->device, 0);
12358f000cacSChristoph Hellwig 	if (IS_ERR(ndev->pd))
12368f000cacSChristoph Hellwig 		goto out_free_dev;
12378f000cacSChristoph Hellwig 
12388f000cacSChristoph Hellwig 	if (nvmet_rdma_use_srq) {
1239b0012dd3SMax Gurtovoy 		ret = nvmet_rdma_init_srqs(ndev);
12408f000cacSChristoph Hellwig 		if (ret)
12418f000cacSChristoph Hellwig 			goto out_free_pd;
12428f000cacSChristoph Hellwig 	}
12438f000cacSChristoph Hellwig 
12448f000cacSChristoph Hellwig 	list_add(&ndev->entry, &device_list);
12458f000cacSChristoph Hellwig out_unlock:
12468f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
12478f000cacSChristoph Hellwig 	pr_debug("added %s.\n", ndev->device->name);
12488f000cacSChristoph Hellwig 	return ndev;
12498f000cacSChristoph Hellwig 
12508f000cacSChristoph Hellwig out_free_pd:
12518f000cacSChristoph Hellwig 	ib_dealloc_pd(ndev->pd);
12528f000cacSChristoph Hellwig out_free_dev:
12538f000cacSChristoph Hellwig 	kfree(ndev);
12548f000cacSChristoph Hellwig out_err:
12558f000cacSChristoph Hellwig 	mutex_unlock(&device_list_mutex);
12568f000cacSChristoph Hellwig 	return NULL;
12578f000cacSChristoph Hellwig }
12588f000cacSChristoph Hellwig 
nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue * queue)12598f000cacSChristoph Hellwig static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
12608f000cacSChristoph Hellwig {
12618abd7e2aSChaitanya Kulkarni 	struct ib_qp_init_attr qp_attr = { };
12628f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev = queue->dev;
1263b0012dd3SMax Gurtovoy 	int nr_cqe, ret, i, factor;
12648f000cacSChristoph Hellwig 
12658f000cacSChristoph Hellwig 	/*
12668f000cacSChristoph Hellwig 	 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
12678f000cacSChristoph Hellwig 	 */
12688f000cacSChristoph Hellwig 	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
12698f000cacSChristoph Hellwig 
1270ca0f1a80SYamin Friedman 	queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
1271ca0f1a80SYamin Friedman 				   queue->comp_vector, IB_POLL_WORKQUEUE);
12728f000cacSChristoph Hellwig 	if (IS_ERR(queue->cq)) {
12738f000cacSChristoph Hellwig 		ret = PTR_ERR(queue->cq);
12748f000cacSChristoph Hellwig 		pr_err("failed to create CQ cqe= %d ret= %d\n",
12758f000cacSChristoph Hellwig 		       nr_cqe + 1, ret);
12768f000cacSChristoph Hellwig 		goto out;
12778f000cacSChristoph Hellwig 	}
12788f000cacSChristoph Hellwig 
12798f000cacSChristoph Hellwig 	qp_attr.qp_context = queue;
12808f000cacSChristoph Hellwig 	qp_attr.event_handler = nvmet_rdma_qp_event;
12818f000cacSChristoph Hellwig 	qp_attr.send_cq = queue->cq;
12828f000cacSChristoph Hellwig 	qp_attr.recv_cq = queue->cq;
12838f000cacSChristoph Hellwig 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
12848f000cacSChristoph Hellwig 	qp_attr.qp_type = IB_QPT_RC;
12858f000cacSChristoph Hellwig 	/* +1 for drain */
12868f000cacSChristoph Hellwig 	qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1287c363f249SMax Gurtovoy 	factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1288c363f249SMax Gurtovoy 				   1 << NVMET_RDMA_MAX_MDTS);
1289c363f249SMax Gurtovoy 	qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
12908f000cacSChristoph Hellwig 	qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
129133023fb8SSteve Wise 					ndev->device->attrs.max_send_sge);
12928f000cacSChristoph Hellwig 
1293b0012dd3SMax Gurtovoy 	if (queue->nsrq) {
1294b0012dd3SMax Gurtovoy 		qp_attr.srq = queue->nsrq->srq;
12958f000cacSChristoph Hellwig 	} else {
12968f000cacSChristoph Hellwig 		/* +1 for drain */
12978f000cacSChristoph Hellwig 		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
12980d5ee2b2SSteve Wise 		qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
12998f000cacSChristoph Hellwig 	}
13008f000cacSChristoph Hellwig 
1301b09160c3SIsrael Rukshin 	if (queue->port->pi_enable && queue->host_qid)
1302b09160c3SIsrael Rukshin 		qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
1303b09160c3SIsrael Rukshin 
13048f000cacSChristoph Hellwig 	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
13058f000cacSChristoph Hellwig 	if (ret) {
13068f000cacSChristoph Hellwig 		pr_err("failed to create_qp ret= %d\n", ret);
13078f000cacSChristoph Hellwig 		goto err_destroy_cq;
13088f000cacSChristoph Hellwig 	}
130921f90243SIsrael Rukshin 	queue->qp = queue->cm_id->qp;
13108f000cacSChristoph Hellwig 
13118f000cacSChristoph Hellwig 	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
13128f000cacSChristoph Hellwig 
13138f000cacSChristoph Hellwig 	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
13148f000cacSChristoph Hellwig 		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
13158f000cacSChristoph Hellwig 		 qp_attr.cap.max_send_wr, queue->cm_id);
13168f000cacSChristoph Hellwig 
1317b0012dd3SMax Gurtovoy 	if (!queue->nsrq) {
13188f000cacSChristoph Hellwig 		for (i = 0; i < queue->recv_queue_size; i++) {
13198f000cacSChristoph Hellwig 			queue->cmds[i].queue = queue;
132020209384SMax Gurtovoy 			ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
132120209384SMax Gurtovoy 			if (ret)
132220209384SMax Gurtovoy 				goto err_destroy_qp;
13238f000cacSChristoph Hellwig 		}
13248f000cacSChristoph Hellwig 	}
13258f000cacSChristoph Hellwig 
13268f000cacSChristoph Hellwig out:
13278f000cacSChristoph Hellwig 	return ret;
13288f000cacSChristoph Hellwig 
132920209384SMax Gurtovoy err_destroy_qp:
133020209384SMax Gurtovoy 	rdma_destroy_qp(queue->cm_id);
13318f000cacSChristoph Hellwig err_destroy_cq:
1332ca0f1a80SYamin Friedman 	ib_cq_pool_put(queue->cq, nr_cqe + 1);
13338f000cacSChristoph Hellwig 	goto out;
13348f000cacSChristoph Hellwig }
13358f000cacSChristoph Hellwig 
nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue * queue)13368f000cacSChristoph Hellwig static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
13378f000cacSChristoph Hellwig {
133821f90243SIsrael Rukshin 	ib_drain_qp(queue->qp);
133921f90243SIsrael Rukshin 	if (queue->cm_id)
1340e1a2ee24SIsrael Rukshin 		rdma_destroy_id(queue->cm_id);
134121f90243SIsrael Rukshin 	ib_destroy_qp(queue->qp);
1342ca0f1a80SYamin Friedman 	ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
1343ca0f1a80SYamin Friedman 		       queue->send_queue_size + 1);
13448f000cacSChristoph Hellwig }
13458f000cacSChristoph Hellwig 
nvmet_rdma_free_queue(struct nvmet_rdma_queue * queue)13468f000cacSChristoph Hellwig static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
13478f000cacSChristoph Hellwig {
1348424125a0SSagi Grimberg 	pr_debug("freeing queue %d\n", queue->idx);
13498f000cacSChristoph Hellwig 
13508f000cacSChristoph Hellwig 	nvmet_sq_destroy(&queue->nvme_sq);
13518f000cacSChristoph Hellwig 
13528f000cacSChristoph Hellwig 	nvmet_rdma_destroy_queue_ib(queue);
1353b0012dd3SMax Gurtovoy 	if (!queue->nsrq) {
13548f000cacSChristoph Hellwig 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
13558f000cacSChristoph Hellwig 				queue->recv_queue_size,
13568f000cacSChristoph Hellwig 				!queue->host_qid);
13578f000cacSChristoph Hellwig 	}
13588f000cacSChristoph Hellwig 	nvmet_rdma_free_rsps(queue);
13597c256639SSagi Grimberg 	ida_free(&nvmet_rdma_queue_ida, queue->idx);
13608f000cacSChristoph Hellwig 	kfree(queue);
13618f000cacSChristoph Hellwig }
13628f000cacSChristoph Hellwig 
nvmet_rdma_release_queue_work(struct work_struct * w)13638f000cacSChristoph Hellwig static void nvmet_rdma_release_queue_work(struct work_struct *w)
13648f000cacSChristoph Hellwig {
13658f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue =
13668f000cacSChristoph Hellwig 		container_of(w, struct nvmet_rdma_queue, release_work);
13678f000cacSChristoph Hellwig 	struct nvmet_rdma_device *dev = queue->dev;
13688f000cacSChristoph Hellwig 
13698f000cacSChristoph Hellwig 	nvmet_rdma_free_queue(queue);
1370d8f7750aSSagi Grimberg 
13718f000cacSChristoph Hellwig 	kref_put(&dev->ref, nvmet_rdma_free_dev);
13728f000cacSChristoph Hellwig }
13738f000cacSChristoph Hellwig 
13748f000cacSChristoph Hellwig static int
nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param * conn,struct nvmet_rdma_queue * queue)13758f000cacSChristoph Hellwig nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
13768f000cacSChristoph Hellwig 				struct nvmet_rdma_queue *queue)
13778f000cacSChristoph Hellwig {
13788f000cacSChristoph Hellwig 	struct nvme_rdma_cm_req *req;
13798f000cacSChristoph Hellwig 
13808f000cacSChristoph Hellwig 	req = (struct nvme_rdma_cm_req *)conn->private_data;
13818f000cacSChristoph Hellwig 	if (!req || conn->private_data_len == 0)
13828f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_LEN;
13838f000cacSChristoph Hellwig 
13848f000cacSChristoph Hellwig 	if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
13858f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_RECFMT;
13868f000cacSChristoph Hellwig 
13878f000cacSChristoph Hellwig 	queue->host_qid = le16_to_cpu(req->qid);
13888f000cacSChristoph Hellwig 
13898f000cacSChristoph Hellwig 	/*
1390b825b44cSJay Freyensee 	 * req->hsqsize corresponds to our recv queue size plus 1
13918f000cacSChristoph Hellwig 	 * req->hrqsize corresponds to our send queue size
13928f000cacSChristoph Hellwig 	 */
1393b825b44cSJay Freyensee 	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
13948f000cacSChristoph Hellwig 	queue->send_queue_size = le16_to_cpu(req->hrqsize);
13958f000cacSChristoph Hellwig 
13967aa1f427SSagi Grimberg 	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
13978f000cacSChristoph Hellwig 		return NVME_RDMA_CM_INVALID_HSQSIZE;
13988f000cacSChristoph Hellwig 
13998f000cacSChristoph Hellwig 	/* XXX: Should we enforce some kind of max for IO queues? */
14008f000cacSChristoph Hellwig 
14018f000cacSChristoph Hellwig 	return 0;
14028f000cacSChristoph Hellwig }
14038f000cacSChristoph Hellwig 
nvmet_rdma_cm_reject(struct rdma_cm_id * cm_id,enum nvme_rdma_cm_status status)14048f000cacSChristoph Hellwig static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
14058f000cacSChristoph Hellwig 				enum nvme_rdma_cm_status status)
14068f000cacSChristoph Hellwig {
14078f000cacSChristoph Hellwig 	struct nvme_rdma_cm_rej rej;
14088f000cacSChristoph Hellwig 
14097a01a6eaSMax Gurtovoy 	pr_debug("rejecting connect request: status %d (%s)\n",
14107a01a6eaSMax Gurtovoy 		 status, nvme_rdma_cm_msg(status));
14117a01a6eaSMax Gurtovoy 
14128f000cacSChristoph Hellwig 	rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
14138f000cacSChristoph Hellwig 	rej.sts = cpu_to_le16(status);
14148f000cacSChristoph Hellwig 
14158094ba0aSLeon Romanovsky 	return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
14168094ba0aSLeon Romanovsky 			   IB_CM_REJ_CONSUMER_DEFINED);
14178f000cacSChristoph Hellwig }
14188f000cacSChristoph Hellwig 
14198f000cacSChristoph Hellwig static struct nvmet_rdma_queue *
nvmet_rdma_alloc_queue(struct nvmet_rdma_device * ndev,struct rdma_cm_id * cm_id,struct rdma_cm_event * event)14208f000cacSChristoph Hellwig nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
14218f000cacSChristoph Hellwig 		struct rdma_cm_id *cm_id,
14228f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
14238f000cacSChristoph Hellwig {
1424b09160c3SIsrael Rukshin 	struct nvmet_rdma_port *port = cm_id->context;
14258f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
14268f000cacSChristoph Hellwig 	int ret;
14278f000cacSChristoph Hellwig 
14288f000cacSChristoph Hellwig 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
14298f000cacSChristoph Hellwig 	if (!queue) {
14308f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
14318f000cacSChristoph Hellwig 		goto out_reject;
14328f000cacSChristoph Hellwig 	}
14338f000cacSChristoph Hellwig 
14348f000cacSChristoph Hellwig 	ret = nvmet_sq_init(&queue->nvme_sq);
143570d4281cSBart Van Assche 	if (ret) {
143670d4281cSBart Van Assche 		ret = NVME_RDMA_CM_NO_RSC;
14378f000cacSChristoph Hellwig 		goto out_free_queue;
143870d4281cSBart Van Assche 	}
14398f000cacSChristoph Hellwig 
14408f000cacSChristoph Hellwig 	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
14418f000cacSChristoph Hellwig 	if (ret)
14428f000cacSChristoph Hellwig 		goto out_destroy_sq;
14438f000cacSChristoph Hellwig 
14448f000cacSChristoph Hellwig 	/*
14458f000cacSChristoph Hellwig 	 * Schedules the actual release because calling rdma_destroy_id from
14468f000cacSChristoph Hellwig 	 * inside a CM callback would trigger a deadlock. (great API design..)
14478f000cacSChristoph Hellwig 	 */
14488f000cacSChristoph Hellwig 	INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
14498f000cacSChristoph Hellwig 	queue->dev = ndev;
14508f000cacSChristoph Hellwig 	queue->cm_id = cm_id;
1451b09160c3SIsrael Rukshin 	queue->port = port->nport;
14528f000cacSChristoph Hellwig 
14538f000cacSChristoph Hellwig 	spin_lock_init(&queue->state_lock);
14548f000cacSChristoph Hellwig 	queue->state = NVMET_RDMA_Q_CONNECTING;
14558f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->rsp_wait_list);
14568f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
14578f000cacSChristoph Hellwig 	spin_lock_init(&queue->rsp_wr_wait_lock);
14588f000cacSChristoph Hellwig 	INIT_LIST_HEAD(&queue->free_rsps);
14598f000cacSChristoph Hellwig 	spin_lock_init(&queue->rsps_lock);
1460766dbb17SSagi Grimberg 	INIT_LIST_HEAD(&queue->queue_list);
14618f000cacSChristoph Hellwig 
14627c256639SSagi Grimberg 	queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
14638f000cacSChristoph Hellwig 	if (queue->idx < 0) {
14648f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
14656ccaeb56SChristophe JAILLET 		goto out_destroy_sq;
14668f000cacSChristoph Hellwig 	}
14678f000cacSChristoph Hellwig 
1468b0012dd3SMax Gurtovoy 	/*
1469b0012dd3SMax Gurtovoy 	 * Spread the io queues across completion vectors,
1470b0012dd3SMax Gurtovoy 	 * but still keep all admin queues on vector 0.
1471b0012dd3SMax Gurtovoy 	 */
1472b0012dd3SMax Gurtovoy 	queue->comp_vector = !queue->host_qid ? 0 :
1473b0012dd3SMax Gurtovoy 		queue->idx % ndev->device->num_comp_vectors;
1474b0012dd3SMax Gurtovoy 
1475b0012dd3SMax Gurtovoy 
14768f000cacSChristoph Hellwig 	ret = nvmet_rdma_alloc_rsps(queue);
14778f000cacSChristoph Hellwig 	if (ret) {
14788f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
14798f000cacSChristoph Hellwig 		goto out_ida_remove;
14808f000cacSChristoph Hellwig 	}
14818f000cacSChristoph Hellwig 
1482b0012dd3SMax Gurtovoy 	if (ndev->srqs) {
1483b0012dd3SMax Gurtovoy 		queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
1484b0012dd3SMax Gurtovoy 	} else {
14858f000cacSChristoph Hellwig 		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
14868f000cacSChristoph Hellwig 				queue->recv_queue_size,
14878f000cacSChristoph Hellwig 				!queue->host_qid);
14888f000cacSChristoph Hellwig 		if (IS_ERR(queue->cmds)) {
14898f000cacSChristoph Hellwig 			ret = NVME_RDMA_CM_NO_RSC;
14908f000cacSChristoph Hellwig 			goto out_free_responses;
14918f000cacSChristoph Hellwig 		}
14928f000cacSChristoph Hellwig 	}
14938f000cacSChristoph Hellwig 
14948f000cacSChristoph Hellwig 	ret = nvmet_rdma_create_queue_ib(queue);
14958f000cacSChristoph Hellwig 	if (ret) {
14968f000cacSChristoph Hellwig 		pr_err("%s: creating RDMA queue failed (%d).\n",
14978f000cacSChristoph Hellwig 			__func__, ret);
14988f000cacSChristoph Hellwig 		ret = NVME_RDMA_CM_NO_RSC;
14998f000cacSChristoph Hellwig 		goto out_free_cmds;
15008f000cacSChristoph Hellwig 	}
15018f000cacSChristoph Hellwig 
15028f000cacSChristoph Hellwig 	return queue;
15038f000cacSChristoph Hellwig 
15048f000cacSChristoph Hellwig out_free_cmds:
1505b0012dd3SMax Gurtovoy 	if (!queue->nsrq) {
15068f000cacSChristoph Hellwig 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
15078f000cacSChristoph Hellwig 				queue->recv_queue_size,
15088f000cacSChristoph Hellwig 				!queue->host_qid);
15098f000cacSChristoph Hellwig 	}
15108f000cacSChristoph Hellwig out_free_responses:
15118f000cacSChristoph Hellwig 	nvmet_rdma_free_rsps(queue);
15128f000cacSChristoph Hellwig out_ida_remove:
15137c256639SSagi Grimberg 	ida_free(&nvmet_rdma_queue_ida, queue->idx);
15148f000cacSChristoph Hellwig out_destroy_sq:
15158f000cacSChristoph Hellwig 	nvmet_sq_destroy(&queue->nvme_sq);
15168f000cacSChristoph Hellwig out_free_queue:
15178f000cacSChristoph Hellwig 	kfree(queue);
15188f000cacSChristoph Hellwig out_reject:
15198f000cacSChristoph Hellwig 	nvmet_rdma_cm_reject(cm_id, ret);
15208f000cacSChristoph Hellwig 	return NULL;
15218f000cacSChristoph Hellwig }
15228f000cacSChristoph Hellwig 
nvmet_rdma_qp_event(struct ib_event * event,void * priv)15238f000cacSChristoph Hellwig static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
15248f000cacSChristoph Hellwig {
15258f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = priv;
15268f000cacSChristoph Hellwig 
15278f000cacSChristoph Hellwig 	switch (event->event) {
15288f000cacSChristoph Hellwig 	case IB_EVENT_COMM_EST:
15298f000cacSChristoph Hellwig 		rdma_notify(queue->cm_id, event->event);
15308f000cacSChristoph Hellwig 		break;
1531b0012dd3SMax Gurtovoy 	case IB_EVENT_QP_LAST_WQE_REACHED:
1532b0012dd3SMax Gurtovoy 		pr_debug("received last WQE reached event for queue=0x%p\n",
1533b0012dd3SMax Gurtovoy 			 queue);
1534b0012dd3SMax Gurtovoy 		break;
15358f000cacSChristoph Hellwig 	default:
1536675796beSMax Gurtovoy 		pr_err("received IB QP event: %s (%d)\n",
1537675796beSMax Gurtovoy 		       ib_event_msg(event->event), event->event);
15388f000cacSChristoph Hellwig 		break;
15398f000cacSChristoph Hellwig 	}
15408f000cacSChristoph Hellwig }
15418f000cacSChristoph Hellwig 
nvmet_rdma_cm_accept(struct rdma_cm_id * cm_id,struct nvmet_rdma_queue * queue,struct rdma_conn_param * p)15428f000cacSChristoph Hellwig static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
15438f000cacSChristoph Hellwig 		struct nvmet_rdma_queue *queue,
15448f000cacSChristoph Hellwig 		struct rdma_conn_param *p)
15458f000cacSChristoph Hellwig {
15468f000cacSChristoph Hellwig 	struct rdma_conn_param  param = { };
15478f000cacSChristoph Hellwig 	struct nvme_rdma_cm_rep priv = { };
15488f000cacSChristoph Hellwig 	int ret = -ENOMEM;
15498f000cacSChristoph Hellwig 
15508f000cacSChristoph Hellwig 	param.rnr_retry_count = 7;
15518f000cacSChristoph Hellwig 	param.flow_control = 1;
15528f000cacSChristoph Hellwig 	param.initiator_depth = min_t(u8, p->initiator_depth,
15538f000cacSChristoph Hellwig 		queue->dev->device->attrs.max_qp_init_rd_atom);
15548f000cacSChristoph Hellwig 	param.private_data = &priv;
15558f000cacSChristoph Hellwig 	param.private_data_len = sizeof(priv);
15568f000cacSChristoph Hellwig 	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
15578f000cacSChristoph Hellwig 	priv.crqsize = cpu_to_le16(queue->recv_queue_size);
15588f000cacSChristoph Hellwig 
15598f000cacSChristoph Hellwig 	ret = rdma_accept(cm_id, &param);
15608f000cacSChristoph Hellwig 	if (ret)
15618f000cacSChristoph Hellwig 		pr_err("rdma_accept failed (error code = %d)\n", ret);
15628f000cacSChristoph Hellwig 
15638f000cacSChristoph Hellwig 	return ret;
15648f000cacSChristoph Hellwig }
15658f000cacSChristoph Hellwig 
nvmet_rdma_queue_connect(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)15668f000cacSChristoph Hellwig static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
15678f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
15688f000cacSChristoph Hellwig {
15698f000cacSChristoph Hellwig 	struct nvmet_rdma_device *ndev;
15708f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
15718f000cacSChristoph Hellwig 	int ret = -EINVAL;
15728f000cacSChristoph Hellwig 
15738f000cacSChristoph Hellwig 	ndev = nvmet_rdma_find_get_device(cm_id);
15748f000cacSChristoph Hellwig 	if (!ndev) {
15758f000cacSChristoph Hellwig 		nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
15768f000cacSChristoph Hellwig 		return -ECONNREFUSED;
15778f000cacSChristoph Hellwig 	}
15788f000cacSChristoph Hellwig 
15798f000cacSChristoph Hellwig 	queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
15808f000cacSChristoph Hellwig 	if (!queue) {
15818f000cacSChristoph Hellwig 		ret = -ENOMEM;
15828f000cacSChristoph Hellwig 		goto put_device;
15838f000cacSChristoph Hellwig 	}
15848f000cacSChristoph Hellwig 
1585777dc823SSagi Grimberg 	if (queue->host_qid == 0) {
1586777dc823SSagi Grimberg 		/* Let inflight controller teardown complete */
15878832cf92SSagi Grimberg 		flush_workqueue(nvmet_wq);
1588777dc823SSagi Grimberg 	}
1589777dc823SSagi Grimberg 
15908f000cacSChristoph Hellwig 	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1591e1a2ee24SIsrael Rukshin 	if (ret) {
159221f90243SIsrael Rukshin 		/*
159321f90243SIsrael Rukshin 		 * Don't destroy the cm_id in free path, as we implicitly
159421f90243SIsrael Rukshin 		 * destroy the cm_id here with non-zero ret code.
159521f90243SIsrael Rukshin 		 */
159621f90243SIsrael Rukshin 		queue->cm_id = NULL;
159721f90243SIsrael Rukshin 		goto free_queue;
1598e1a2ee24SIsrael Rukshin 	}
15998f000cacSChristoph Hellwig 
16008f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
16018f000cacSChristoph Hellwig 	list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
16028f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
16038f000cacSChristoph Hellwig 
16048f000cacSChristoph Hellwig 	return 0;
16058f000cacSChristoph Hellwig 
160621f90243SIsrael Rukshin free_queue:
160721f90243SIsrael Rukshin 	nvmet_rdma_free_queue(queue);
16088f000cacSChristoph Hellwig put_device:
16098f000cacSChristoph Hellwig 	kref_put(&ndev->ref, nvmet_rdma_free_dev);
16108f000cacSChristoph Hellwig 
16118f000cacSChristoph Hellwig 	return ret;
16128f000cacSChristoph Hellwig }
16138f000cacSChristoph Hellwig 
nvmet_rdma_queue_established(struct nvmet_rdma_queue * queue)16148f000cacSChristoph Hellwig static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
16158f000cacSChristoph Hellwig {
16168f000cacSChristoph Hellwig 	unsigned long flags;
16178f000cacSChristoph Hellwig 
16188f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->state_lock, flags);
16198f000cacSChristoph Hellwig 	if (queue->state != NVMET_RDMA_Q_CONNECTING) {
16208f000cacSChristoph Hellwig 		pr_warn("trying to establish a connected queue\n");
16218f000cacSChristoph Hellwig 		goto out_unlock;
16228f000cacSChristoph Hellwig 	}
16238f000cacSChristoph Hellwig 	queue->state = NVMET_RDMA_Q_LIVE;
16248f000cacSChristoph Hellwig 
16258f000cacSChristoph Hellwig 	while (!list_empty(&queue->rsp_wait_list)) {
16268f000cacSChristoph Hellwig 		struct nvmet_rdma_rsp *cmd;
16278f000cacSChristoph Hellwig 
16288f000cacSChristoph Hellwig 		cmd = list_first_entry(&queue->rsp_wait_list,
16298f000cacSChristoph Hellwig 					struct nvmet_rdma_rsp, wait_list);
16308f000cacSChristoph Hellwig 		list_del(&cmd->wait_list);
16318f000cacSChristoph Hellwig 
16328f000cacSChristoph Hellwig 		spin_unlock_irqrestore(&queue->state_lock, flags);
16338f000cacSChristoph Hellwig 		nvmet_rdma_handle_command(queue, cmd);
16348f000cacSChristoph Hellwig 		spin_lock_irqsave(&queue->state_lock, flags);
16358f000cacSChristoph Hellwig 	}
16368f000cacSChristoph Hellwig 
16378f000cacSChristoph Hellwig out_unlock:
16388f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->state_lock, flags);
16398f000cacSChristoph Hellwig }
16408f000cacSChristoph Hellwig 
__nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue * queue)16418f000cacSChristoph Hellwig static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
16428f000cacSChristoph Hellwig {
16438f000cacSChristoph Hellwig 	bool disconnect = false;
16448f000cacSChristoph Hellwig 	unsigned long flags;
16458f000cacSChristoph Hellwig 
16468f000cacSChristoph Hellwig 	pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
16478f000cacSChristoph Hellwig 
16488f000cacSChristoph Hellwig 	spin_lock_irqsave(&queue->state_lock, flags);
16498f000cacSChristoph Hellwig 	switch (queue->state) {
16508f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_CONNECTING:
16519ceb7863SIsrael Rukshin 		while (!list_empty(&queue->rsp_wait_list)) {
16529ceb7863SIsrael Rukshin 			struct nvmet_rdma_rsp *rsp;
16539ceb7863SIsrael Rukshin 
16549ceb7863SIsrael Rukshin 			rsp = list_first_entry(&queue->rsp_wait_list,
16559ceb7863SIsrael Rukshin 					       struct nvmet_rdma_rsp,
16569ceb7863SIsrael Rukshin 					       wait_list);
16579ceb7863SIsrael Rukshin 			list_del(&rsp->wait_list);
16589ceb7863SIsrael Rukshin 			nvmet_rdma_put_rsp(rsp);
16599ceb7863SIsrael Rukshin 		}
16609ceb7863SIsrael Rukshin 		fallthrough;
16618f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_LIVE:
16628f000cacSChristoph Hellwig 		queue->state = NVMET_RDMA_Q_DISCONNECTING;
1663d8f7750aSSagi Grimberg 		disconnect = true;
16648f000cacSChristoph Hellwig 		break;
16658f000cacSChristoph Hellwig 	case NVMET_RDMA_Q_DISCONNECTING:
16668f000cacSChristoph Hellwig 		break;
16678f000cacSChristoph Hellwig 	}
16688f000cacSChristoph Hellwig 	spin_unlock_irqrestore(&queue->state_lock, flags);
16698f000cacSChristoph Hellwig 
16708f000cacSChristoph Hellwig 	if (disconnect) {
16718f000cacSChristoph Hellwig 		rdma_disconnect(queue->cm_id);
16728832cf92SSagi Grimberg 		queue_work(nvmet_wq, &queue->release_work);
16738f000cacSChristoph Hellwig 	}
16748f000cacSChristoph Hellwig }
16758f000cacSChristoph Hellwig 
nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue * queue)16768f000cacSChristoph Hellwig static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
16778f000cacSChristoph Hellwig {
16788f000cacSChristoph Hellwig 	bool disconnect = false;
16798f000cacSChristoph Hellwig 
16808f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
16818f000cacSChristoph Hellwig 	if (!list_empty(&queue->queue_list)) {
16828f000cacSChristoph Hellwig 		list_del_init(&queue->queue_list);
16838f000cacSChristoph Hellwig 		disconnect = true;
16848f000cacSChristoph Hellwig 	}
16858f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
16868f000cacSChristoph Hellwig 
16878f000cacSChristoph Hellwig 	if (disconnect)
16888f000cacSChristoph Hellwig 		__nvmet_rdma_queue_disconnect(queue);
16898f000cacSChristoph Hellwig }
16908f000cacSChristoph Hellwig 
nvmet_rdma_queue_connect_fail(struct rdma_cm_id * cm_id,struct nvmet_rdma_queue * queue)16918f000cacSChristoph Hellwig static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
16928f000cacSChristoph Hellwig 		struct nvmet_rdma_queue *queue)
16938f000cacSChristoph Hellwig {
16948f000cacSChristoph Hellwig 	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
16958f000cacSChristoph Hellwig 
1696766dbb17SSagi Grimberg 	mutex_lock(&nvmet_rdma_queue_mutex);
1697766dbb17SSagi Grimberg 	if (!list_empty(&queue->queue_list))
1698766dbb17SSagi Grimberg 		list_del_init(&queue->queue_list);
1699766dbb17SSagi Grimberg 	mutex_unlock(&nvmet_rdma_queue_mutex);
1700766dbb17SSagi Grimberg 
1701766dbb17SSagi Grimberg 	pr_err("failed to connect queue %d\n", queue->idx);
17028832cf92SSagi Grimberg 	queue_work(nvmet_wq, &queue->release_work);
17038f000cacSChristoph Hellwig }
17048f000cacSChristoph Hellwig 
1705d8f7750aSSagi Grimberg /**
1706a8adf0cdSChaitanya Kulkarni  * nvmet_rdma_device_removal() - Handle RDMA device removal
1707f1d4ef7dSSagi Grimberg  * @cm_id:	rdma_cm id, used for nvmet port
1708d8f7750aSSagi Grimberg  * @queue:      nvmet rdma queue (cm id qp_context)
1709d8f7750aSSagi Grimberg  *
1710d8f7750aSSagi Grimberg  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1711f1d4ef7dSSagi Grimberg  * to unplug. Note that this event can be generated on a normal
1712f1d4ef7dSSagi Grimberg  * queue cm_id and/or a device bound listener cm_id (where in this
1713f1d4ef7dSSagi Grimberg  * case queue will be null).
1714d8f7750aSSagi Grimberg  *
1715f1d4ef7dSSagi Grimberg  * We registered an ib_client to handle device removal for queues,
1716f1d4ef7dSSagi Grimberg  * so we only need to handle the listening port cm_ids. In this case
1717d8f7750aSSagi Grimberg  * we nullify the priv to prevent double cm_id destruction and destroying
1718d8f7750aSSagi Grimberg  * the cm_id implicitely by returning a non-zero rc to the callout.
1719d8f7750aSSagi Grimberg  */
nvmet_rdma_device_removal(struct rdma_cm_id * cm_id,struct nvmet_rdma_queue * queue)1720d8f7750aSSagi Grimberg static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1721d8f7750aSSagi Grimberg 		struct nvmet_rdma_queue *queue)
1722d8f7750aSSagi Grimberg {
1723a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port;
1724d8f7750aSSagi Grimberg 
1725f1d4ef7dSSagi Grimberg 	if (queue) {
1726f1d4ef7dSSagi Grimberg 		/*
1727f1d4ef7dSSagi Grimberg 		 * This is a queue cm_id. we have registered
1728f1d4ef7dSSagi Grimberg 		 * an ib_client to handle queues removal
1729f1d4ef7dSSagi Grimberg 		 * so don't interfear and just return.
1730f1d4ef7dSSagi Grimberg 		 */
1731f1d4ef7dSSagi Grimberg 		return 0;
1732f1d4ef7dSSagi Grimberg 	}
1733f1d4ef7dSSagi Grimberg 
1734f1d4ef7dSSagi Grimberg 	port = cm_id->context;
1735d8f7750aSSagi Grimberg 
1736d8f7750aSSagi Grimberg 	/*
1737d8f7750aSSagi Grimberg 	 * This is a listener cm_id. Make sure that
1738d8f7750aSSagi Grimberg 	 * future remove_port won't invoke a double
1739d8f7750aSSagi Grimberg 	 * cm_id destroy. use atomic xchg to make sure
1740d8f7750aSSagi Grimberg 	 * we don't compete with remove_port.
1741d8f7750aSSagi Grimberg 	 */
1742a032e4f6SSagi Grimberg 	if (xchg(&port->cm_id, NULL) != cm_id)
1743d8f7750aSSagi Grimberg 		return 0;
1744d8f7750aSSagi Grimberg 
1745d8f7750aSSagi Grimberg 	/*
1746d8f7750aSSagi Grimberg 	 * We need to return 1 so that the core will destroy
1747d8f7750aSSagi Grimberg 	 * it's own ID.  What a great API design..
1748d8f7750aSSagi Grimberg 	 */
1749d8f7750aSSagi Grimberg 	return 1;
1750d8f7750aSSagi Grimberg }
1751d8f7750aSSagi Grimberg 
nvmet_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)17528f000cacSChristoph Hellwig static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
17538f000cacSChristoph Hellwig 		struct rdma_cm_event *event)
17548f000cacSChristoph Hellwig {
17558f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue = NULL;
17568f000cacSChristoph Hellwig 	int ret = 0;
17578f000cacSChristoph Hellwig 
17588f000cacSChristoph Hellwig 	if (cm_id->qp)
17598f000cacSChristoph Hellwig 		queue = cm_id->qp->qp_context;
17608f000cacSChristoph Hellwig 
17618f000cacSChristoph Hellwig 	pr_debug("%s (%d): status %d id %p\n",
17628f000cacSChristoph Hellwig 		rdma_event_msg(event->event), event->event,
17638f000cacSChristoph Hellwig 		event->status, cm_id);
17648f000cacSChristoph Hellwig 
17658f000cacSChristoph Hellwig 	switch (event->event) {
17668f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_CONNECT_REQUEST:
17678f000cacSChristoph Hellwig 		ret = nvmet_rdma_queue_connect(cm_id, event);
17688f000cacSChristoph Hellwig 		break;
17698f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_ESTABLISHED:
17708f000cacSChristoph Hellwig 		nvmet_rdma_queue_established(queue);
17718f000cacSChristoph Hellwig 		break;
17728f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_ADDR_CHANGE:
1773a032e4f6SSagi Grimberg 		if (!queue) {
1774a032e4f6SSagi Grimberg 			struct nvmet_rdma_port *port = cm_id->context;
1775a032e4f6SSagi Grimberg 
17768832cf92SSagi Grimberg 			queue_delayed_work(nvmet_wq, &port->repair_work, 0);
1777a032e4f6SSagi Grimberg 			break;
1778a032e4f6SSagi Grimberg 		}
1779df561f66SGustavo A. R. Silva 		fallthrough;
17808f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_DISCONNECTED:
17818f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
17828f000cacSChristoph Hellwig 		nvmet_rdma_queue_disconnect(queue);
1783d8f7750aSSagi Grimberg 		break;
1784d8f7750aSSagi Grimberg 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
1785d8f7750aSSagi Grimberg 		ret = nvmet_rdma_device_removal(cm_id, queue);
17868f000cacSChristoph Hellwig 		break;
17878f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_REJECTED:
1788512fb1b3SSteve Wise 		pr_debug("Connection rejected: %s\n",
1789512fb1b3SSteve Wise 			 rdma_reject_msg(cm_id, event->status));
1790df561f66SGustavo A. R. Silva 		fallthrough;
17918f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_UNREACHABLE:
17928f000cacSChristoph Hellwig 	case RDMA_CM_EVENT_CONNECT_ERROR:
17938f000cacSChristoph Hellwig 		nvmet_rdma_queue_connect_fail(cm_id, queue);
17948f000cacSChristoph Hellwig 		break;
17958f000cacSChristoph Hellwig 	default:
17968f000cacSChristoph Hellwig 		pr_err("received unrecognized RDMA CM event %d\n",
17978f000cacSChristoph Hellwig 			event->event);
17988f000cacSChristoph Hellwig 		break;
17998f000cacSChristoph Hellwig 	}
18008f000cacSChristoph Hellwig 
18018f000cacSChristoph Hellwig 	return ret;
18028f000cacSChristoph Hellwig }
18038f000cacSChristoph Hellwig 
nvmet_rdma_delete_ctrl(struct nvmet_ctrl * ctrl)18048f000cacSChristoph Hellwig static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
18058f000cacSChristoph Hellwig {
18068f000cacSChristoph Hellwig 	struct nvmet_rdma_queue *queue;
18078f000cacSChristoph Hellwig 
18088f000cacSChristoph Hellwig restart:
18098f000cacSChristoph Hellwig 	mutex_lock(&nvmet_rdma_queue_mutex);
18108f000cacSChristoph Hellwig 	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
18118f000cacSChristoph Hellwig 		if (queue->nvme_sq.ctrl == ctrl) {
18128f000cacSChristoph Hellwig 			list_del_init(&queue->queue_list);
18138f000cacSChristoph Hellwig 			mutex_unlock(&nvmet_rdma_queue_mutex);
18148f000cacSChristoph Hellwig 
18158f000cacSChristoph Hellwig 			__nvmet_rdma_queue_disconnect(queue);
18168f000cacSChristoph Hellwig 			goto restart;
18178f000cacSChristoph Hellwig 		}
18188f000cacSChristoph Hellwig 	}
18198f000cacSChristoph Hellwig 	mutex_unlock(&nvmet_rdma_queue_mutex);
18208f000cacSChristoph Hellwig }
18218f000cacSChristoph Hellwig 
nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port * port)1822fcf73a80SIsrael Rukshin static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
1823fcf73a80SIsrael Rukshin {
1824fcf73a80SIsrael Rukshin 	struct nvmet_rdma_queue *queue, *tmp;
1825fcf73a80SIsrael Rukshin 	struct nvmet_port *nport = port->nport;
1826fcf73a80SIsrael Rukshin 
1827fcf73a80SIsrael Rukshin 	mutex_lock(&nvmet_rdma_queue_mutex);
1828fcf73a80SIsrael Rukshin 	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1829fcf73a80SIsrael Rukshin 				 queue_list) {
1830fcf73a80SIsrael Rukshin 		if (queue->port != nport)
1831fcf73a80SIsrael Rukshin 			continue;
1832fcf73a80SIsrael Rukshin 
1833fcf73a80SIsrael Rukshin 		list_del_init(&queue->queue_list);
1834fcf73a80SIsrael Rukshin 		__nvmet_rdma_queue_disconnect(queue);
1835fcf73a80SIsrael Rukshin 	}
1836fcf73a80SIsrael Rukshin 	mutex_unlock(&nvmet_rdma_queue_mutex);
1837fcf73a80SIsrael Rukshin }
1838fcf73a80SIsrael Rukshin 
nvmet_rdma_disable_port(struct nvmet_rdma_port * port)1839a032e4f6SSagi Grimberg static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
18408f000cacSChristoph Hellwig {
1841a032e4f6SSagi Grimberg 	struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
1842a032e4f6SSagi Grimberg 
1843a032e4f6SSagi Grimberg 	if (cm_id)
1844a032e4f6SSagi Grimberg 		rdma_destroy_id(cm_id);
1845fcf73a80SIsrael Rukshin 
1846fcf73a80SIsrael Rukshin 	/*
1847fcf73a80SIsrael Rukshin 	 * Destroy the remaining queues, which are not belong to any
1848fcf73a80SIsrael Rukshin 	 * controller yet. Do it here after the RDMA-CM was destroyed
1849fcf73a80SIsrael Rukshin 	 * guarantees that no new queue will be created.
1850fcf73a80SIsrael Rukshin 	 */
1851fcf73a80SIsrael Rukshin 	nvmet_rdma_destroy_port_queues(port);
1852a032e4f6SSagi Grimberg }
1853a032e4f6SSagi Grimberg 
nvmet_rdma_enable_port(struct nvmet_rdma_port * port)1854a032e4f6SSagi Grimberg static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
1855a032e4f6SSagi Grimberg {
1856a032e4f6SSagi Grimberg 	struct sockaddr *addr = (struct sockaddr *)&port->addr;
18578f000cacSChristoph Hellwig 	struct rdma_cm_id *cm_id;
18588f000cacSChristoph Hellwig 	int ret;
18598f000cacSChristoph Hellwig 
18608f000cacSChristoph Hellwig 	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
18618f000cacSChristoph Hellwig 			RDMA_PS_TCP, IB_QPT_RC);
18628f000cacSChristoph Hellwig 	if (IS_ERR(cm_id)) {
18638f000cacSChristoph Hellwig 		pr_err("CM ID creation failed\n");
18648f000cacSChristoph Hellwig 		return PTR_ERR(cm_id);
18658f000cacSChristoph Hellwig 	}
18668f000cacSChristoph Hellwig 
1867670c2a3aSSagi Grimberg 	/*
1868670c2a3aSSagi Grimberg 	 * Allow both IPv4 and IPv6 sockets to bind a single port
1869670c2a3aSSagi Grimberg 	 * at the same time.
1870670c2a3aSSagi Grimberg 	 */
1871670c2a3aSSagi Grimberg 	ret = rdma_set_afonly(cm_id, 1);
18728f000cacSChristoph Hellwig 	if (ret) {
1873670c2a3aSSagi Grimberg 		pr_err("rdma_set_afonly failed (%d)\n", ret);
1874670c2a3aSSagi Grimberg 		goto out_destroy_id;
1875670c2a3aSSagi Grimberg 	}
1876670c2a3aSSagi Grimberg 
1877a032e4f6SSagi Grimberg 	ret = rdma_bind_addr(cm_id, addr);
1878670c2a3aSSagi Grimberg 	if (ret) {
1879a032e4f6SSagi Grimberg 		pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
18808f000cacSChristoph Hellwig 		goto out_destroy_id;
18818f000cacSChristoph Hellwig 	}
18828f000cacSChristoph Hellwig 
18838f000cacSChristoph Hellwig 	ret = rdma_listen(cm_id, 128);
18848f000cacSChristoph Hellwig 	if (ret) {
1885a032e4f6SSagi Grimberg 		pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
18868f000cacSChristoph Hellwig 		goto out_destroy_id;
18878f000cacSChristoph Hellwig 	}
18888f000cacSChristoph Hellwig 
1889a032e4f6SSagi Grimberg 	port->cm_id = cm_id;
18908f000cacSChristoph Hellwig 	return 0;
18918f000cacSChristoph Hellwig 
18928f000cacSChristoph Hellwig out_destroy_id:
18938f000cacSChristoph Hellwig 	rdma_destroy_id(cm_id);
18948f000cacSChristoph Hellwig 	return ret;
18958f000cacSChristoph Hellwig }
18968f000cacSChristoph Hellwig 
nvmet_rdma_repair_port_work(struct work_struct * w)1897a032e4f6SSagi Grimberg static void nvmet_rdma_repair_port_work(struct work_struct *w)
18988f000cacSChristoph Hellwig {
1899a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
1900a032e4f6SSagi Grimberg 			struct nvmet_rdma_port, repair_work);
1901a032e4f6SSagi Grimberg 	int ret;
19028f000cacSChristoph Hellwig 
1903a032e4f6SSagi Grimberg 	nvmet_rdma_disable_port(port);
1904a032e4f6SSagi Grimberg 	ret = nvmet_rdma_enable_port(port);
1905a032e4f6SSagi Grimberg 	if (ret)
19068832cf92SSagi Grimberg 		queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
1907a032e4f6SSagi Grimberg }
1908a032e4f6SSagi Grimberg 
nvmet_rdma_add_port(struct nvmet_port * nport)1909a032e4f6SSagi Grimberg static int nvmet_rdma_add_port(struct nvmet_port *nport)
1910a032e4f6SSagi Grimberg {
1911a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port;
1912a032e4f6SSagi Grimberg 	__kernel_sa_family_t af;
1913a032e4f6SSagi Grimberg 	int ret;
1914a032e4f6SSagi Grimberg 
1915a032e4f6SSagi Grimberg 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1916a032e4f6SSagi Grimberg 	if (!port)
1917a032e4f6SSagi Grimberg 		return -ENOMEM;
1918a032e4f6SSagi Grimberg 
1919a032e4f6SSagi Grimberg 	nport->priv = port;
1920a032e4f6SSagi Grimberg 	port->nport = nport;
1921a032e4f6SSagi Grimberg 	INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
1922a032e4f6SSagi Grimberg 
1923a032e4f6SSagi Grimberg 	switch (nport->disc_addr.adrfam) {
1924a032e4f6SSagi Grimberg 	case NVMF_ADDR_FAMILY_IP4:
1925a032e4f6SSagi Grimberg 		af = AF_INET;
1926a032e4f6SSagi Grimberg 		break;
1927a032e4f6SSagi Grimberg 	case NVMF_ADDR_FAMILY_IP6:
1928a032e4f6SSagi Grimberg 		af = AF_INET6;
1929a032e4f6SSagi Grimberg 		break;
1930a032e4f6SSagi Grimberg 	default:
1931a032e4f6SSagi Grimberg 		pr_err("address family %d not supported\n",
1932a032e4f6SSagi Grimberg 			nport->disc_addr.adrfam);
1933a032e4f6SSagi Grimberg 		ret = -EINVAL;
1934a032e4f6SSagi Grimberg 		goto out_free_port;
1935a032e4f6SSagi Grimberg 	}
1936a032e4f6SSagi Grimberg 
1937a032e4f6SSagi Grimberg 	if (nport->inline_data_size < 0) {
1938a032e4f6SSagi Grimberg 		nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1939a032e4f6SSagi Grimberg 	} else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1940a032e4f6SSagi Grimberg 		pr_warn("inline_data_size %u is too large, reducing to %u\n",
1941a032e4f6SSagi Grimberg 			nport->inline_data_size,
1942a032e4f6SSagi Grimberg 			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1943a032e4f6SSagi Grimberg 		nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1944a032e4f6SSagi Grimberg 	}
1945a032e4f6SSagi Grimberg 
1946a032e4f6SSagi Grimberg 	ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1947a032e4f6SSagi Grimberg 			nport->disc_addr.trsvcid, &port->addr);
1948a032e4f6SSagi Grimberg 	if (ret) {
1949a032e4f6SSagi Grimberg 		pr_err("malformed ip/port passed: %s:%s\n",
1950a032e4f6SSagi Grimberg 			nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1951a032e4f6SSagi Grimberg 		goto out_free_port;
1952a032e4f6SSagi Grimberg 	}
1953a032e4f6SSagi Grimberg 
1954a032e4f6SSagi Grimberg 	ret = nvmet_rdma_enable_port(port);
1955a032e4f6SSagi Grimberg 	if (ret)
1956a032e4f6SSagi Grimberg 		goto out_free_port;
1957a032e4f6SSagi Grimberg 
1958a032e4f6SSagi Grimberg 	pr_info("enabling port %d (%pISpcs)\n",
1959a032e4f6SSagi Grimberg 		le16_to_cpu(nport->disc_addr.portid),
1960a032e4f6SSagi Grimberg 		(struct sockaddr *)&port->addr);
1961a032e4f6SSagi Grimberg 
1962a032e4f6SSagi Grimberg 	return 0;
1963a032e4f6SSagi Grimberg 
1964a032e4f6SSagi Grimberg out_free_port:
1965a032e4f6SSagi Grimberg 	kfree(port);
1966a032e4f6SSagi Grimberg 	return ret;
1967a032e4f6SSagi Grimberg }
1968a032e4f6SSagi Grimberg 
nvmet_rdma_remove_port(struct nvmet_port * nport)1969a032e4f6SSagi Grimberg static void nvmet_rdma_remove_port(struct nvmet_port *nport)
1970a032e4f6SSagi Grimberg {
1971a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = nport->priv;
1972a032e4f6SSagi Grimberg 
1973a032e4f6SSagi Grimberg 	cancel_delayed_work_sync(&port->repair_work);
1974a032e4f6SSagi Grimberg 	nvmet_rdma_disable_port(port);
1975a032e4f6SSagi Grimberg 	kfree(port);
19768f000cacSChristoph Hellwig }
19778f000cacSChristoph Hellwig 
nvmet_rdma_disc_port_addr(struct nvmet_req * req,struct nvmet_port * nport,char * traddr)19784c652685SSagi Grimberg static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1979a032e4f6SSagi Grimberg 		struct nvmet_port *nport, char *traddr)
19804c652685SSagi Grimberg {
1981a032e4f6SSagi Grimberg 	struct nvmet_rdma_port *port = nport->priv;
1982a032e4f6SSagi Grimberg 	struct rdma_cm_id *cm_id = port->cm_id;
19834c652685SSagi Grimberg 
19844c652685SSagi Grimberg 	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
19854c652685SSagi Grimberg 		struct nvmet_rdma_rsp *rsp =
19864c652685SSagi Grimberg 			container_of(req, struct nvmet_rdma_rsp, req);
19874c652685SSagi Grimberg 		struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
19884c652685SSagi Grimberg 		struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
19894c652685SSagi Grimberg 
19904c652685SSagi Grimberg 		sprintf(traddr, "%pISc", addr);
19914c652685SSagi Grimberg 	} else {
1992a032e4f6SSagi Grimberg 		memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
19934c652685SSagi Grimberg 	}
19944c652685SSagi Grimberg }
19954c652685SSagi Grimberg 
nvmet_rdma_get_mdts(const struct nvmet_ctrl * ctrl)1996ec6d20e1SMax Gurtovoy static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
1997ec6d20e1SMax Gurtovoy {
1998b09160c3SIsrael Rukshin 	if (ctrl->pi_support)
1999b09160c3SIsrael Rukshin 		return NVMET_RDMA_MAX_METADATA_MDTS;
2000ec6d20e1SMax Gurtovoy 	return NVMET_RDMA_MAX_MDTS;
2001ec6d20e1SMax Gurtovoy }
2002ec6d20e1SMax Gurtovoy 
nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl * ctrl)2003c7d792f9SMax Gurtovoy static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
2004c7d792f9SMax Gurtovoy {
2005c7d792f9SMax Gurtovoy 	return NVME_RDMA_MAX_QUEUE_SIZE;
2006c7d792f9SMax Gurtovoy }
2007c7d792f9SMax Gurtovoy 
2008e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
20098f000cacSChristoph Hellwig 	.owner			= THIS_MODULE,
20108f000cacSChristoph Hellwig 	.type			= NVMF_TRTYPE_RDMA,
20118f000cacSChristoph Hellwig 	.msdbd			= 1,
20126fa350f7SMax Gurtovoy 	.flags			= NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
20138f000cacSChristoph Hellwig 	.add_port		= nvmet_rdma_add_port,
20148f000cacSChristoph Hellwig 	.remove_port		= nvmet_rdma_remove_port,
20158f000cacSChristoph Hellwig 	.queue_response		= nvmet_rdma_queue_response,
20168f000cacSChristoph Hellwig 	.delete_ctrl		= nvmet_rdma_delete_ctrl,
20174c652685SSagi Grimberg 	.disc_traddr		= nvmet_rdma_disc_port_addr,
2018ec6d20e1SMax Gurtovoy 	.get_mdts		= nvmet_rdma_get_mdts,
2019c7d792f9SMax Gurtovoy 	.get_max_queue_size	= nvmet_rdma_get_max_queue_size,
20208f000cacSChristoph Hellwig };
20218f000cacSChristoph Hellwig 
nvmet_rdma_remove_one(struct ib_device * ib_device,void * client_data)2022f1d4ef7dSSagi Grimberg static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2023f1d4ef7dSSagi Grimberg {
202443b92fd2SIsrael Rukshin 	struct nvmet_rdma_queue *queue, *tmp;
2025a3dd7d00SMax Gurtovoy 	struct nvmet_rdma_device *ndev;
2026a3dd7d00SMax Gurtovoy 	bool found = false;
2027f1d4ef7dSSagi Grimberg 
2028a3dd7d00SMax Gurtovoy 	mutex_lock(&device_list_mutex);
2029a3dd7d00SMax Gurtovoy 	list_for_each_entry(ndev, &device_list, entry) {
2030a3dd7d00SMax Gurtovoy 		if (ndev->device == ib_device) {
2031a3dd7d00SMax Gurtovoy 			found = true;
2032a3dd7d00SMax Gurtovoy 			break;
2033a3dd7d00SMax Gurtovoy 		}
2034a3dd7d00SMax Gurtovoy 	}
2035a3dd7d00SMax Gurtovoy 	mutex_unlock(&device_list_mutex);
2036a3dd7d00SMax Gurtovoy 
2037a3dd7d00SMax Gurtovoy 	if (!found)
2038a3dd7d00SMax Gurtovoy 		return;
2039a3dd7d00SMax Gurtovoy 
2040a3dd7d00SMax Gurtovoy 	/*
2041a3dd7d00SMax Gurtovoy 	 * IB Device that is used by nvmet controllers is being removed,
2042a3dd7d00SMax Gurtovoy 	 * delete all queues using this device.
2043a3dd7d00SMax Gurtovoy 	 */
2044f1d4ef7dSSagi Grimberg 	mutex_lock(&nvmet_rdma_queue_mutex);
204543b92fd2SIsrael Rukshin 	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
204643b92fd2SIsrael Rukshin 				 queue_list) {
2047f1d4ef7dSSagi Grimberg 		if (queue->dev->device != ib_device)
2048f1d4ef7dSSagi Grimberg 			continue;
2049f1d4ef7dSSagi Grimberg 
2050f1d4ef7dSSagi Grimberg 		pr_info("Removing queue %d\n", queue->idx);
205143b92fd2SIsrael Rukshin 		list_del_init(&queue->queue_list);
2052f1d4ef7dSSagi Grimberg 		__nvmet_rdma_queue_disconnect(queue);
2053f1d4ef7dSSagi Grimberg 	}
2054f1d4ef7dSSagi Grimberg 	mutex_unlock(&nvmet_rdma_queue_mutex);
2055f1d4ef7dSSagi Grimberg 
20568832cf92SSagi Grimberg 	flush_workqueue(nvmet_wq);
2057f1d4ef7dSSagi Grimberg }
2058f1d4ef7dSSagi Grimberg 
2059f1d4ef7dSSagi Grimberg static struct ib_client nvmet_rdma_ib_client = {
2060f1d4ef7dSSagi Grimberg 	.name   = "nvmet_rdma",
2061f1d4ef7dSSagi Grimberg 	.remove = nvmet_rdma_remove_one
2062f1d4ef7dSSagi Grimberg };
2063f1d4ef7dSSagi Grimberg 
nvmet_rdma_init(void)20648f000cacSChristoph Hellwig static int __init nvmet_rdma_init(void)
20658f000cacSChristoph Hellwig {
2066f1d4ef7dSSagi Grimberg 	int ret;
2067f1d4ef7dSSagi Grimberg 
2068f1d4ef7dSSagi Grimberg 	ret = ib_register_client(&nvmet_rdma_ib_client);
2069f1d4ef7dSSagi Grimberg 	if (ret)
2070f1d4ef7dSSagi Grimberg 		return ret;
2071f1d4ef7dSSagi Grimberg 
2072f1d4ef7dSSagi Grimberg 	ret = nvmet_register_transport(&nvmet_rdma_ops);
2073f1d4ef7dSSagi Grimberg 	if (ret)
2074f1d4ef7dSSagi Grimberg 		goto err_ib_client;
2075f1d4ef7dSSagi Grimberg 
2076f1d4ef7dSSagi Grimberg 	return 0;
2077f1d4ef7dSSagi Grimberg 
2078f1d4ef7dSSagi Grimberg err_ib_client:
2079f1d4ef7dSSagi Grimberg 	ib_unregister_client(&nvmet_rdma_ib_client);
2080f1d4ef7dSSagi Grimberg 	return ret;
20818f000cacSChristoph Hellwig }
20828f000cacSChristoph Hellwig 
nvmet_rdma_exit(void)20838f000cacSChristoph Hellwig static void __exit nvmet_rdma_exit(void)
20848f000cacSChristoph Hellwig {
20858f000cacSChristoph Hellwig 	nvmet_unregister_transport(&nvmet_rdma_ops);
2086f1d4ef7dSSagi Grimberg 	ib_unregister_client(&nvmet_rdma_ib_client);
2087cb4876e8SSagi Grimberg 	WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
20888f000cacSChristoph Hellwig 	ida_destroy(&nvmet_rdma_queue_ida);
20898f000cacSChristoph Hellwig }
20908f000cacSChristoph Hellwig 
20918f000cacSChristoph Hellwig module_init(nvmet_rdma_init);
20928f000cacSChristoph Hellwig module_exit(nvmet_rdma_exit);
20938f000cacSChristoph Hellwig 
20948f000cacSChristoph Hellwig MODULE_LICENSE("GPL v2");
20958f000cacSChristoph Hellwig MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
2096