xref: /openbmc/linux/drivers/nvme/host/rdma.c (revision a36954f5)
1 /*
2  * NVMe over Fabrics RDMA host code.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/types.h>
23 #include <linux/list.h>
24 #include <linux/mutex.h>
25 #include <linux/scatterlist.h>
26 #include <linux/nvme.h>
27 #include <asm/unaligned.h>
28 
29 #include <rdma/ib_verbs.h>
30 #include <rdma/rdma_cm.h>
31 #include <linux/nvme-rdma.h>
32 
33 #include "nvme.h"
34 #include "fabrics.h"
35 
36 
37 #define NVME_RDMA_CONNECT_TIMEOUT_MS	3000		/* 3 second */
38 
39 #define NVME_RDMA_MAX_SEGMENT_SIZE	0xffffff	/* 24-bit SGL field */
40 
41 #define NVME_RDMA_MAX_SEGMENTS		256
42 
43 #define NVME_RDMA_MAX_INLINE_SEGMENTS	1
44 
45 /*
46  * We handle AEN commands ourselves and don't even let the
47  * block layer know about them.
48  */
49 #define NVME_RDMA_NR_AEN_COMMANDS      1
50 #define NVME_RDMA_AQ_BLKMQ_DEPTH       \
51 	(NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
52 
53 struct nvme_rdma_device {
54 	struct ib_device       *dev;
55 	struct ib_pd	       *pd;
56 	struct kref		ref;
57 	struct list_head	entry;
58 };
59 
60 struct nvme_rdma_qe {
61 	struct ib_cqe		cqe;
62 	void			*data;
63 	u64			dma;
64 };
65 
66 struct nvme_rdma_queue;
67 struct nvme_rdma_request {
68 	struct nvme_request	req;
69 	struct ib_mr		*mr;
70 	struct nvme_rdma_qe	sqe;
71 	struct ib_sge		sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
72 	u32			num_sge;
73 	int			nents;
74 	bool			inline_data;
75 	struct ib_reg_wr	reg_wr;
76 	struct ib_cqe		reg_cqe;
77 	struct nvme_rdma_queue  *queue;
78 	struct sg_table		sg_table;
79 	struct scatterlist	first_sgl[];
80 };
81 
82 enum nvme_rdma_queue_flags {
83 	NVME_RDMA_Q_CONNECTED = (1 << 0),
84 	NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
85 	NVME_RDMA_Q_DELETING = (1 << 2),
86 	NVME_RDMA_Q_LIVE = (1 << 3),
87 };
88 
89 struct nvme_rdma_queue {
90 	struct nvme_rdma_qe	*rsp_ring;
91 	u8			sig_count;
92 	int			queue_size;
93 	size_t			cmnd_capsule_len;
94 	struct nvme_rdma_ctrl	*ctrl;
95 	struct nvme_rdma_device	*device;
96 	struct ib_cq		*ib_cq;
97 	struct ib_qp		*qp;
98 
99 	unsigned long		flags;
100 	struct rdma_cm_id	*cm_id;
101 	int			cm_error;
102 	struct completion	cm_done;
103 };
104 
105 struct nvme_rdma_ctrl {
106 	/* read and written in the hot path */
107 	spinlock_t		lock;
108 
109 	/* read only in the hot path */
110 	struct nvme_rdma_queue	*queues;
111 	u32			queue_count;
112 
113 	/* other member variables */
114 	struct blk_mq_tag_set	tag_set;
115 	struct work_struct	delete_work;
116 	struct work_struct	reset_work;
117 	struct work_struct	err_work;
118 
119 	struct nvme_rdma_qe	async_event_sqe;
120 
121 	struct delayed_work	reconnect_work;
122 
123 	struct list_head	list;
124 
125 	struct blk_mq_tag_set	admin_tag_set;
126 	struct nvme_rdma_device	*device;
127 
128 	u64			cap;
129 	u32			max_fr_pages;
130 
131 	struct sockaddr_storage addr;
132 	struct sockaddr_storage src_addr;
133 
134 	struct nvme_ctrl	ctrl;
135 };
136 
137 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
138 {
139 	return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
140 }
141 
142 static LIST_HEAD(device_list);
143 static DEFINE_MUTEX(device_list_mutex);
144 
145 static LIST_HEAD(nvme_rdma_ctrl_list);
146 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
147 
148 static struct workqueue_struct *nvme_rdma_wq;
149 
150 /*
151  * Disabling this option makes small I/O goes faster, but is fundamentally
152  * unsafe.  With it turned off we will have to register a global rkey that
153  * allows read and write access to all physical memory.
154  */
155 static bool register_always = true;
156 module_param(register_always, bool, 0444);
157 MODULE_PARM_DESC(register_always,
158 	 "Use memory registration even for contiguous memory regions");
159 
160 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
161 		struct rdma_cm_event *event);
162 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
163 
164 /* XXX: really should move to a generic header sooner or later.. */
165 static inline void put_unaligned_le24(u32 val, u8 *p)
166 {
167 	*p++ = val;
168 	*p++ = val >> 8;
169 	*p++ = val >> 16;
170 }
171 
172 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
173 {
174 	return queue - queue->ctrl->queues;
175 }
176 
177 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
178 {
179 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
180 }
181 
182 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
183 		size_t capsule_size, enum dma_data_direction dir)
184 {
185 	ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
186 	kfree(qe->data);
187 }
188 
189 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
190 		size_t capsule_size, enum dma_data_direction dir)
191 {
192 	qe->data = kzalloc(capsule_size, GFP_KERNEL);
193 	if (!qe->data)
194 		return -ENOMEM;
195 
196 	qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
197 	if (ib_dma_mapping_error(ibdev, qe->dma)) {
198 		kfree(qe->data);
199 		return -ENOMEM;
200 	}
201 
202 	return 0;
203 }
204 
205 static void nvme_rdma_free_ring(struct ib_device *ibdev,
206 		struct nvme_rdma_qe *ring, size_t ib_queue_size,
207 		size_t capsule_size, enum dma_data_direction dir)
208 {
209 	int i;
210 
211 	for (i = 0; i < ib_queue_size; i++)
212 		nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
213 	kfree(ring);
214 }
215 
216 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
217 		size_t ib_queue_size, size_t capsule_size,
218 		enum dma_data_direction dir)
219 {
220 	struct nvme_rdma_qe *ring;
221 	int i;
222 
223 	ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
224 	if (!ring)
225 		return NULL;
226 
227 	for (i = 0; i < ib_queue_size; i++) {
228 		if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
229 			goto out_free_ring;
230 	}
231 
232 	return ring;
233 
234 out_free_ring:
235 	nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
236 	return NULL;
237 }
238 
239 static void nvme_rdma_qp_event(struct ib_event *event, void *context)
240 {
241 	pr_debug("QP event %s (%d)\n",
242 		 ib_event_msg(event->event), event->event);
243 
244 }
245 
246 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
247 {
248 	wait_for_completion_interruptible_timeout(&queue->cm_done,
249 			msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
250 	return queue->cm_error;
251 }
252 
253 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
254 {
255 	struct nvme_rdma_device *dev = queue->device;
256 	struct ib_qp_init_attr init_attr;
257 	int ret;
258 
259 	memset(&init_attr, 0, sizeof(init_attr));
260 	init_attr.event_handler = nvme_rdma_qp_event;
261 	/* +1 for drain */
262 	init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
263 	/* +1 for drain */
264 	init_attr.cap.max_recv_wr = queue->queue_size + 1;
265 	init_attr.cap.max_recv_sge = 1;
266 	init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
267 	init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
268 	init_attr.qp_type = IB_QPT_RC;
269 	init_attr.send_cq = queue->ib_cq;
270 	init_attr.recv_cq = queue->ib_cq;
271 
272 	ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
273 
274 	queue->qp = queue->cm_id->qp;
275 	return ret;
276 }
277 
278 static int nvme_rdma_reinit_request(void *data, struct request *rq)
279 {
280 	struct nvme_rdma_ctrl *ctrl = data;
281 	struct nvme_rdma_device *dev = ctrl->device;
282 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
283 	int ret = 0;
284 
285 	if (!req->mr->need_inval)
286 		goto out;
287 
288 	ib_dereg_mr(req->mr);
289 
290 	req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
291 			ctrl->max_fr_pages);
292 	if (IS_ERR(req->mr)) {
293 		ret = PTR_ERR(req->mr);
294 		req->mr = NULL;
295 		goto out;
296 	}
297 
298 	req->mr->need_inval = false;
299 
300 out:
301 	return ret;
302 }
303 
304 static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
305 		struct request *rq, unsigned int queue_idx)
306 {
307 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
308 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
309 	struct nvme_rdma_device *dev = queue->device;
310 
311 	if (req->mr)
312 		ib_dereg_mr(req->mr);
313 
314 	nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
315 			DMA_TO_DEVICE);
316 }
317 
318 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
319 		struct request *rq, unsigned int hctx_idx)
320 {
321 	return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
322 }
323 
324 static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
325 		struct request *rq, unsigned int hctx_idx)
326 {
327 	return __nvme_rdma_exit_request(set->driver_data, rq, 0);
328 }
329 
330 static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
331 		struct request *rq, unsigned int queue_idx)
332 {
333 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
334 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
335 	struct nvme_rdma_device *dev = queue->device;
336 	struct ib_device *ibdev = dev->dev;
337 	int ret;
338 
339 	ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
340 			DMA_TO_DEVICE);
341 	if (ret)
342 		return ret;
343 
344 	req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
345 			ctrl->max_fr_pages);
346 	if (IS_ERR(req->mr)) {
347 		ret = PTR_ERR(req->mr);
348 		goto out_free_qe;
349 	}
350 
351 	req->queue = queue;
352 
353 	return 0;
354 
355 out_free_qe:
356 	nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
357 			DMA_TO_DEVICE);
358 	return -ENOMEM;
359 }
360 
361 static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
362 		struct request *rq, unsigned int hctx_idx,
363 		unsigned int numa_node)
364 {
365 	return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
366 }
367 
368 static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
369 		struct request *rq, unsigned int hctx_idx,
370 		unsigned int numa_node)
371 {
372 	return __nvme_rdma_init_request(set->driver_data, rq, 0);
373 }
374 
375 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
376 		unsigned int hctx_idx)
377 {
378 	struct nvme_rdma_ctrl *ctrl = data;
379 	struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
380 
381 	BUG_ON(hctx_idx >= ctrl->queue_count);
382 
383 	hctx->driver_data = queue;
384 	return 0;
385 }
386 
387 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
388 		unsigned int hctx_idx)
389 {
390 	struct nvme_rdma_ctrl *ctrl = data;
391 	struct nvme_rdma_queue *queue = &ctrl->queues[0];
392 
393 	BUG_ON(hctx_idx != 0);
394 
395 	hctx->driver_data = queue;
396 	return 0;
397 }
398 
399 static void nvme_rdma_free_dev(struct kref *ref)
400 {
401 	struct nvme_rdma_device *ndev =
402 		container_of(ref, struct nvme_rdma_device, ref);
403 
404 	mutex_lock(&device_list_mutex);
405 	list_del(&ndev->entry);
406 	mutex_unlock(&device_list_mutex);
407 
408 	ib_dealloc_pd(ndev->pd);
409 	kfree(ndev);
410 }
411 
412 static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
413 {
414 	kref_put(&dev->ref, nvme_rdma_free_dev);
415 }
416 
417 static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
418 {
419 	return kref_get_unless_zero(&dev->ref);
420 }
421 
422 static struct nvme_rdma_device *
423 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
424 {
425 	struct nvme_rdma_device *ndev;
426 
427 	mutex_lock(&device_list_mutex);
428 	list_for_each_entry(ndev, &device_list, entry) {
429 		if (ndev->dev->node_guid == cm_id->device->node_guid &&
430 		    nvme_rdma_dev_get(ndev))
431 			goto out_unlock;
432 	}
433 
434 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
435 	if (!ndev)
436 		goto out_err;
437 
438 	ndev->dev = cm_id->device;
439 	kref_init(&ndev->ref);
440 
441 	ndev->pd = ib_alloc_pd(ndev->dev,
442 		register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
443 	if (IS_ERR(ndev->pd))
444 		goto out_free_dev;
445 
446 	if (!(ndev->dev->attrs.device_cap_flags &
447 	      IB_DEVICE_MEM_MGT_EXTENSIONS)) {
448 		dev_err(&ndev->dev->dev,
449 			"Memory registrations not supported.\n");
450 		goto out_free_pd;
451 	}
452 
453 	list_add(&ndev->entry, &device_list);
454 out_unlock:
455 	mutex_unlock(&device_list_mutex);
456 	return ndev;
457 
458 out_free_pd:
459 	ib_dealloc_pd(ndev->pd);
460 out_free_dev:
461 	kfree(ndev);
462 out_err:
463 	mutex_unlock(&device_list_mutex);
464 	return NULL;
465 }
466 
467 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
468 {
469 	struct nvme_rdma_device *dev;
470 	struct ib_device *ibdev;
471 
472 	if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
473 		return;
474 
475 	dev = queue->device;
476 	ibdev = dev->dev;
477 	rdma_destroy_qp(queue->cm_id);
478 	ib_free_cq(queue->ib_cq);
479 
480 	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
481 			sizeof(struct nvme_completion), DMA_FROM_DEVICE);
482 
483 	nvme_rdma_dev_put(dev);
484 }
485 
486 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
487 		struct nvme_rdma_device *dev)
488 {
489 	struct ib_device *ibdev = dev->dev;
490 	const int send_wr_factor = 3;			/* MR, SEND, INV */
491 	const int cq_factor = send_wr_factor + 1;	/* + RECV */
492 	int comp_vector, idx = nvme_rdma_queue_idx(queue);
493 
494 	int ret;
495 
496 	queue->device = dev;
497 
498 	/*
499 	 * The admin queue is barely used once the controller is live, so don't
500 	 * bother to spread it out.
501 	 */
502 	if (idx == 0)
503 		comp_vector = 0;
504 	else
505 		comp_vector = idx % ibdev->num_comp_vectors;
506 
507 
508 	/* +1 for ib_stop_cq */
509 	queue->ib_cq = ib_alloc_cq(dev->dev, queue,
510 				cq_factor * queue->queue_size + 1, comp_vector,
511 				IB_POLL_SOFTIRQ);
512 	if (IS_ERR(queue->ib_cq)) {
513 		ret = PTR_ERR(queue->ib_cq);
514 		goto out;
515 	}
516 
517 	ret = nvme_rdma_create_qp(queue, send_wr_factor);
518 	if (ret)
519 		goto out_destroy_ib_cq;
520 
521 	queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
522 			sizeof(struct nvme_completion), DMA_FROM_DEVICE);
523 	if (!queue->rsp_ring) {
524 		ret = -ENOMEM;
525 		goto out_destroy_qp;
526 	}
527 	set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
528 
529 	return 0;
530 
531 out_destroy_qp:
532 	ib_destroy_qp(queue->qp);
533 out_destroy_ib_cq:
534 	ib_free_cq(queue->ib_cq);
535 out:
536 	return ret;
537 }
538 
539 static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
540 		int idx, size_t queue_size)
541 {
542 	struct nvme_rdma_queue *queue;
543 	struct sockaddr *src_addr = NULL;
544 	int ret;
545 
546 	queue = &ctrl->queues[idx];
547 	queue->ctrl = ctrl;
548 	init_completion(&queue->cm_done);
549 
550 	if (idx > 0)
551 		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
552 	else
553 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
554 
555 	queue->queue_size = queue_size;
556 
557 	queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
558 			RDMA_PS_TCP, IB_QPT_RC);
559 	if (IS_ERR(queue->cm_id)) {
560 		dev_info(ctrl->ctrl.device,
561 			"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
562 		return PTR_ERR(queue->cm_id);
563 	}
564 
565 	if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
566 		src_addr = (struct sockaddr *)&ctrl->src_addr;
567 
568 	queue->cm_error = -ETIMEDOUT;
569 	ret = rdma_resolve_addr(queue->cm_id, src_addr,
570 			(struct sockaddr *)&ctrl->addr,
571 			NVME_RDMA_CONNECT_TIMEOUT_MS);
572 	if (ret) {
573 		dev_info(ctrl->ctrl.device,
574 			"rdma_resolve_addr failed (%d).\n", ret);
575 		goto out_destroy_cm_id;
576 	}
577 
578 	ret = nvme_rdma_wait_for_cm(queue);
579 	if (ret) {
580 		dev_info(ctrl->ctrl.device,
581 			"rdma_resolve_addr wait failed (%d).\n", ret);
582 		goto out_destroy_cm_id;
583 	}
584 
585 	clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
586 	set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
587 
588 	return 0;
589 
590 out_destroy_cm_id:
591 	nvme_rdma_destroy_queue_ib(queue);
592 	rdma_destroy_id(queue->cm_id);
593 	return ret;
594 }
595 
596 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
597 {
598 	rdma_disconnect(queue->cm_id);
599 	ib_drain_qp(queue->qp);
600 }
601 
602 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
603 {
604 	nvme_rdma_destroy_queue_ib(queue);
605 	rdma_destroy_id(queue->cm_id);
606 }
607 
608 static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
609 {
610 	if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
611 		return;
612 	nvme_rdma_stop_queue(queue);
613 	nvme_rdma_free_queue(queue);
614 }
615 
616 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
617 {
618 	int i;
619 
620 	for (i = 1; i < ctrl->queue_count; i++)
621 		nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
622 }
623 
624 static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
625 {
626 	int i, ret = 0;
627 
628 	for (i = 1; i < ctrl->queue_count; i++) {
629 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
630 		if (ret) {
631 			dev_info(ctrl->ctrl.device,
632 				"failed to connect i/o queue: %d\n", ret);
633 			goto out_free_queues;
634 		}
635 		set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
636 	}
637 
638 	return 0;
639 
640 out_free_queues:
641 	nvme_rdma_free_io_queues(ctrl);
642 	return ret;
643 }
644 
645 static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
646 {
647 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
648 	unsigned int nr_io_queues;
649 	int i, ret;
650 
651 	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
652 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
653 	if (ret)
654 		return ret;
655 
656 	ctrl->queue_count = nr_io_queues + 1;
657 	if (ctrl->queue_count < 2)
658 		return 0;
659 
660 	dev_info(ctrl->ctrl.device,
661 		"creating %d I/O queues.\n", nr_io_queues);
662 
663 	for (i = 1; i < ctrl->queue_count; i++) {
664 		ret = nvme_rdma_init_queue(ctrl, i,
665 					   ctrl->ctrl.opts->queue_size);
666 		if (ret) {
667 			dev_info(ctrl->ctrl.device,
668 				"failed to initialize i/o queue: %d\n", ret);
669 			goto out_free_queues;
670 		}
671 	}
672 
673 	return 0;
674 
675 out_free_queues:
676 	for (i--; i >= 1; i--)
677 		nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
678 
679 	return ret;
680 }
681 
682 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
683 {
684 	nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
685 			sizeof(struct nvme_command), DMA_TO_DEVICE);
686 	nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
687 	blk_cleanup_queue(ctrl->ctrl.admin_q);
688 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
689 	nvme_rdma_dev_put(ctrl->device);
690 }
691 
692 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
693 {
694 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
695 
696 	if (list_empty(&ctrl->list))
697 		goto free_ctrl;
698 
699 	mutex_lock(&nvme_rdma_ctrl_mutex);
700 	list_del(&ctrl->list);
701 	mutex_unlock(&nvme_rdma_ctrl_mutex);
702 
703 	kfree(ctrl->queues);
704 	nvmf_free_options(nctrl->opts);
705 free_ctrl:
706 	kfree(ctrl);
707 }
708 
709 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
710 {
711 	/* If we are resetting/deleting then do nothing */
712 	if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
713 		WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
714 			ctrl->ctrl.state == NVME_CTRL_LIVE);
715 		return;
716 	}
717 
718 	if (nvmf_should_reconnect(&ctrl->ctrl)) {
719 		dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
720 			ctrl->ctrl.opts->reconnect_delay);
721 		queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
722 				ctrl->ctrl.opts->reconnect_delay * HZ);
723 	} else {
724 		dev_info(ctrl->ctrl.device, "Removing controller...\n");
725 		queue_work(nvme_rdma_wq, &ctrl->delete_work);
726 	}
727 }
728 
729 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
730 {
731 	struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
732 			struct nvme_rdma_ctrl, reconnect_work);
733 	bool changed;
734 	int ret;
735 
736 	++ctrl->ctrl.opts->nr_reconnects;
737 
738 	if (ctrl->queue_count > 1) {
739 		nvme_rdma_free_io_queues(ctrl);
740 
741 		ret = blk_mq_reinit_tagset(&ctrl->tag_set);
742 		if (ret)
743 			goto requeue;
744 	}
745 
746 	nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
747 
748 	ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set);
749 	if (ret)
750 		goto requeue;
751 
752 	ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
753 	if (ret)
754 		goto requeue;
755 
756 	blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
757 
758 	ret = nvmf_connect_admin_queue(&ctrl->ctrl);
759 	if (ret)
760 		goto stop_admin_q;
761 
762 	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
763 
764 	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
765 	if (ret)
766 		goto stop_admin_q;
767 
768 	nvme_start_keep_alive(&ctrl->ctrl);
769 
770 	if (ctrl->queue_count > 1) {
771 		ret = nvme_rdma_init_io_queues(ctrl);
772 		if (ret)
773 			goto stop_admin_q;
774 
775 		ret = nvme_rdma_connect_io_queues(ctrl);
776 		if (ret)
777 			goto stop_admin_q;
778 	}
779 
780 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
781 	WARN_ON_ONCE(!changed);
782 	ctrl->ctrl.opts->nr_reconnects = 0;
783 
784 	if (ctrl->queue_count > 1) {
785 		nvme_start_queues(&ctrl->ctrl);
786 		nvme_queue_scan(&ctrl->ctrl);
787 		nvme_queue_async_events(&ctrl->ctrl);
788 	}
789 
790 	dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
791 
792 	return;
793 
794 stop_admin_q:
795 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
796 requeue:
797 	dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
798 			ctrl->ctrl.opts->nr_reconnects);
799 	nvme_rdma_reconnect_or_remove(ctrl);
800 }
801 
802 static void nvme_rdma_error_recovery_work(struct work_struct *work)
803 {
804 	struct nvme_rdma_ctrl *ctrl = container_of(work,
805 			struct nvme_rdma_ctrl, err_work);
806 	int i;
807 
808 	nvme_stop_keep_alive(&ctrl->ctrl);
809 
810 	for (i = 0; i < ctrl->queue_count; i++) {
811 		clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
812 		clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
813 	}
814 
815 	if (ctrl->queue_count > 1)
816 		nvme_stop_queues(&ctrl->ctrl);
817 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
818 
819 	/* We must take care of fastfail/requeue all our inflight requests */
820 	if (ctrl->queue_count > 1)
821 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
822 					nvme_cancel_request, &ctrl->ctrl);
823 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
824 				nvme_cancel_request, &ctrl->ctrl);
825 
826 	nvme_rdma_reconnect_or_remove(ctrl);
827 }
828 
829 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
830 {
831 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
832 		return;
833 
834 	queue_work(nvme_rdma_wq, &ctrl->err_work);
835 }
836 
837 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
838 		const char *op)
839 {
840 	struct nvme_rdma_queue *queue = cq->cq_context;
841 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
842 
843 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
844 		dev_info(ctrl->ctrl.device,
845 			     "%s for CQE 0x%p failed with status %s (%d)\n",
846 			     op, wc->wr_cqe,
847 			     ib_wc_status_msg(wc->status), wc->status);
848 	nvme_rdma_error_recovery(ctrl);
849 }
850 
851 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
852 {
853 	if (unlikely(wc->status != IB_WC_SUCCESS))
854 		nvme_rdma_wr_error(cq, wc, "MEMREG");
855 }
856 
857 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
858 {
859 	if (unlikely(wc->status != IB_WC_SUCCESS))
860 		nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
861 }
862 
863 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
864 		struct nvme_rdma_request *req)
865 {
866 	struct ib_send_wr *bad_wr;
867 	struct ib_send_wr wr = {
868 		.opcode		    = IB_WR_LOCAL_INV,
869 		.next		    = NULL,
870 		.num_sge	    = 0,
871 		.send_flags	    = 0,
872 		.ex.invalidate_rkey = req->mr->rkey,
873 	};
874 
875 	req->reg_cqe.done = nvme_rdma_inv_rkey_done;
876 	wr.wr_cqe = &req->reg_cqe;
877 
878 	return ib_post_send(queue->qp, &wr, &bad_wr);
879 }
880 
881 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
882 		struct request *rq)
883 {
884 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
885 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
886 	struct nvme_rdma_device *dev = queue->device;
887 	struct ib_device *ibdev = dev->dev;
888 	int res;
889 
890 	if (!blk_rq_bytes(rq))
891 		return;
892 
893 	if (req->mr->need_inval) {
894 		res = nvme_rdma_inv_rkey(queue, req);
895 		if (res < 0) {
896 			dev_err(ctrl->ctrl.device,
897 				"Queueing INV WR for rkey %#x failed (%d)\n",
898 				req->mr->rkey, res);
899 			nvme_rdma_error_recovery(queue->ctrl);
900 		}
901 	}
902 
903 	ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
904 			req->nents, rq_data_dir(rq) ==
905 				    WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
906 
907 	nvme_cleanup_cmd(rq);
908 	sg_free_table_chained(&req->sg_table, true);
909 }
910 
911 static int nvme_rdma_set_sg_null(struct nvme_command *c)
912 {
913 	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
914 
915 	sg->addr = 0;
916 	put_unaligned_le24(0, sg->length);
917 	put_unaligned_le32(0, sg->key);
918 	sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
919 	return 0;
920 }
921 
922 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
923 		struct nvme_rdma_request *req, struct nvme_command *c)
924 {
925 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
926 
927 	req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
928 	req->sge[1].length = sg_dma_len(req->sg_table.sgl);
929 	req->sge[1].lkey = queue->device->pd->local_dma_lkey;
930 
931 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
932 	sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
933 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
934 
935 	req->inline_data = true;
936 	req->num_sge++;
937 	return 0;
938 }
939 
940 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
941 		struct nvme_rdma_request *req, struct nvme_command *c)
942 {
943 	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
944 
945 	sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
946 	put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
947 	put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
948 	sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
949 	return 0;
950 }
951 
952 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
953 		struct nvme_rdma_request *req, struct nvme_command *c,
954 		int count)
955 {
956 	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
957 	int nr;
958 
959 	nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
960 	if (nr < count) {
961 		if (nr < 0)
962 			return nr;
963 		return -EINVAL;
964 	}
965 
966 	ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
967 
968 	req->reg_cqe.done = nvme_rdma_memreg_done;
969 	memset(&req->reg_wr, 0, sizeof(req->reg_wr));
970 	req->reg_wr.wr.opcode = IB_WR_REG_MR;
971 	req->reg_wr.wr.wr_cqe = &req->reg_cqe;
972 	req->reg_wr.wr.num_sge = 0;
973 	req->reg_wr.mr = req->mr;
974 	req->reg_wr.key = req->mr->rkey;
975 	req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
976 			     IB_ACCESS_REMOTE_READ |
977 			     IB_ACCESS_REMOTE_WRITE;
978 
979 	req->mr->need_inval = true;
980 
981 	sg->addr = cpu_to_le64(req->mr->iova);
982 	put_unaligned_le24(req->mr->length, sg->length);
983 	put_unaligned_le32(req->mr->rkey, sg->key);
984 	sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
985 			NVME_SGL_FMT_INVALIDATE;
986 
987 	return 0;
988 }
989 
990 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
991 		struct request *rq, struct nvme_command *c)
992 {
993 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
994 	struct nvme_rdma_device *dev = queue->device;
995 	struct ib_device *ibdev = dev->dev;
996 	int count, ret;
997 
998 	req->num_sge = 1;
999 	req->inline_data = false;
1000 	req->mr->need_inval = false;
1001 
1002 	c->common.flags |= NVME_CMD_SGL_METABUF;
1003 
1004 	if (!blk_rq_bytes(rq))
1005 		return nvme_rdma_set_sg_null(c);
1006 
1007 	req->sg_table.sgl = req->first_sgl;
1008 	ret = sg_alloc_table_chained(&req->sg_table,
1009 			blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
1010 	if (ret)
1011 		return -ENOMEM;
1012 
1013 	req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
1014 
1015 	count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
1016 		    rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1017 	if (unlikely(count <= 0)) {
1018 		sg_free_table_chained(&req->sg_table, true);
1019 		return -EIO;
1020 	}
1021 
1022 	if (count == 1) {
1023 		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1024 		    blk_rq_payload_bytes(rq) <=
1025 				nvme_rdma_inline_data_size(queue))
1026 			return nvme_rdma_map_sg_inline(queue, req, c);
1027 
1028 		if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
1029 			return nvme_rdma_map_sg_single(queue, req, c);
1030 	}
1031 
1032 	return nvme_rdma_map_sg_fr(queue, req, c, count);
1033 }
1034 
1035 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1036 {
1037 	if (unlikely(wc->status != IB_WC_SUCCESS))
1038 		nvme_rdma_wr_error(cq, wc, "SEND");
1039 }
1040 
1041 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1042 		struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1043 		struct ib_send_wr *first, bool flush)
1044 {
1045 	struct ib_send_wr wr, *bad_wr;
1046 	int ret;
1047 
1048 	sge->addr   = qe->dma;
1049 	sge->length = sizeof(struct nvme_command),
1050 	sge->lkey   = queue->device->pd->local_dma_lkey;
1051 
1052 	qe->cqe.done = nvme_rdma_send_done;
1053 
1054 	wr.next       = NULL;
1055 	wr.wr_cqe     = &qe->cqe;
1056 	wr.sg_list    = sge;
1057 	wr.num_sge    = num_sge;
1058 	wr.opcode     = IB_WR_SEND;
1059 	wr.send_flags = 0;
1060 
1061 	/*
1062 	 * Unsignalled send completions are another giant desaster in the
1063 	 * IB Verbs spec:  If we don't regularly post signalled sends
1064 	 * the send queue will fill up and only a QP reset will rescue us.
1065 	 * Would have been way to obvious to handle this in hardware or
1066 	 * at least the RDMA stack..
1067 	 *
1068 	 * This messy and racy code sniplet is copy and pasted from the iSER
1069 	 * initiator, and the magic '32' comes from there as well.
1070 	 *
1071 	 * Always signal the flushes. The magic request used for the flush
1072 	 * sequencer is not allocated in our driver's tagset and it's
1073 	 * triggered to be freed by blk_cleanup_queue(). So we need to
1074 	 * always mark it as signaled to ensure that the "wr_cqe", which is
1075 	 * embedded in request's payload, is not freed when __ib_process_cq()
1076 	 * calls wr_cqe->done().
1077 	 */
1078 	if ((++queue->sig_count % 32) == 0 || flush)
1079 		wr.send_flags |= IB_SEND_SIGNALED;
1080 
1081 	if (first)
1082 		first->next = &wr;
1083 	else
1084 		first = &wr;
1085 
1086 	ret = ib_post_send(queue->qp, first, &bad_wr);
1087 	if (ret) {
1088 		dev_err(queue->ctrl->ctrl.device,
1089 			     "%s failed with error code %d\n", __func__, ret);
1090 	}
1091 	return ret;
1092 }
1093 
1094 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1095 		struct nvme_rdma_qe *qe)
1096 {
1097 	struct ib_recv_wr wr, *bad_wr;
1098 	struct ib_sge list;
1099 	int ret;
1100 
1101 	list.addr   = qe->dma;
1102 	list.length = sizeof(struct nvme_completion);
1103 	list.lkey   = queue->device->pd->local_dma_lkey;
1104 
1105 	qe->cqe.done = nvme_rdma_recv_done;
1106 
1107 	wr.next     = NULL;
1108 	wr.wr_cqe   = &qe->cqe;
1109 	wr.sg_list  = &list;
1110 	wr.num_sge  = 1;
1111 
1112 	ret = ib_post_recv(queue->qp, &wr, &bad_wr);
1113 	if (ret) {
1114 		dev_err(queue->ctrl->ctrl.device,
1115 			"%s failed with error code %d\n", __func__, ret);
1116 	}
1117 	return ret;
1118 }
1119 
1120 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1121 {
1122 	u32 queue_idx = nvme_rdma_queue_idx(queue);
1123 
1124 	if (queue_idx == 0)
1125 		return queue->ctrl->admin_tag_set.tags[queue_idx];
1126 	return queue->ctrl->tag_set.tags[queue_idx - 1];
1127 }
1128 
1129 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1130 {
1131 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1132 	struct nvme_rdma_queue *queue = &ctrl->queues[0];
1133 	struct ib_device *dev = queue->device->dev;
1134 	struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1135 	struct nvme_command *cmd = sqe->data;
1136 	struct ib_sge sge;
1137 	int ret;
1138 
1139 	if (WARN_ON_ONCE(aer_idx != 0))
1140 		return;
1141 
1142 	ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1143 
1144 	memset(cmd, 0, sizeof(*cmd));
1145 	cmd->common.opcode = nvme_admin_async_event;
1146 	cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
1147 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
1148 	nvme_rdma_set_sg_null(cmd);
1149 
1150 	ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1151 			DMA_TO_DEVICE);
1152 
1153 	ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
1154 	WARN_ON_ONCE(ret);
1155 }
1156 
1157 static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1158 		struct nvme_completion *cqe, struct ib_wc *wc, int tag)
1159 {
1160 	struct request *rq;
1161 	struct nvme_rdma_request *req;
1162 	int ret = 0;
1163 
1164 	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
1165 	if (!rq) {
1166 		dev_err(queue->ctrl->ctrl.device,
1167 			"tag 0x%x on QP %#x not found\n",
1168 			cqe->command_id, queue->qp->qp_num);
1169 		nvme_rdma_error_recovery(queue->ctrl);
1170 		return ret;
1171 	}
1172 	req = blk_mq_rq_to_pdu(rq);
1173 
1174 	if (rq->tag == tag)
1175 		ret = 1;
1176 
1177 	if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
1178 	    wc->ex.invalidate_rkey == req->mr->rkey)
1179 		req->mr->need_inval = false;
1180 
1181 	nvme_end_request(rq, cqe->status, cqe->result);
1182 	return ret;
1183 }
1184 
1185 static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
1186 {
1187 	struct nvme_rdma_qe *qe =
1188 		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1189 	struct nvme_rdma_queue *queue = cq->cq_context;
1190 	struct ib_device *ibdev = queue->device->dev;
1191 	struct nvme_completion *cqe = qe->data;
1192 	const size_t len = sizeof(struct nvme_completion);
1193 	int ret = 0;
1194 
1195 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1196 		nvme_rdma_wr_error(cq, wc, "RECV");
1197 		return 0;
1198 	}
1199 
1200 	ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1201 	/*
1202 	 * AEN requests are special as they don't time out and can
1203 	 * survive any kind of queue freeze and often don't respond to
1204 	 * aborts.  We don't even bother to allocate a struct request
1205 	 * for them but rather special case them here.
1206 	 */
1207 	if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
1208 			cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
1209 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1210 				&cqe->result);
1211 	else
1212 		ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
1213 	ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1214 
1215 	nvme_rdma_post_recv(queue, qe);
1216 	return ret;
1217 }
1218 
1219 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1220 {
1221 	__nvme_rdma_recv_done(cq, wc, -1);
1222 }
1223 
1224 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1225 {
1226 	int ret, i;
1227 
1228 	for (i = 0; i < queue->queue_size; i++) {
1229 		ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1230 		if (ret)
1231 			goto out_destroy_queue_ib;
1232 	}
1233 
1234 	return 0;
1235 
1236 out_destroy_queue_ib:
1237 	nvme_rdma_destroy_queue_ib(queue);
1238 	return ret;
1239 }
1240 
1241 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1242 		struct rdma_cm_event *ev)
1243 {
1244 	struct rdma_cm_id *cm_id = queue->cm_id;
1245 	int status = ev->status;
1246 	const char *rej_msg;
1247 	const struct nvme_rdma_cm_rej *rej_data;
1248 	u8 rej_data_len;
1249 
1250 	rej_msg = rdma_reject_msg(cm_id, status);
1251 	rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1252 
1253 	if (rej_data && rej_data_len >= sizeof(u16)) {
1254 		u16 sts = le16_to_cpu(rej_data->sts);
1255 
1256 		dev_err(queue->ctrl->ctrl.device,
1257 		      "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1258 		      status, rej_msg, sts, nvme_rdma_cm_msg(sts));
1259 	} else {
1260 		dev_err(queue->ctrl->ctrl.device,
1261 			"Connect rejected: status %d (%s).\n", status, rej_msg);
1262 	}
1263 
1264 	return -ECONNRESET;
1265 }
1266 
1267 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1268 {
1269 	struct nvme_rdma_device *dev;
1270 	int ret;
1271 
1272 	dev = nvme_rdma_find_get_device(queue->cm_id);
1273 	if (!dev) {
1274 		dev_err(queue->cm_id->device->dev.parent,
1275 			"no client data found!\n");
1276 		return -ECONNREFUSED;
1277 	}
1278 
1279 	ret = nvme_rdma_create_queue_ib(queue, dev);
1280 	if (ret) {
1281 		nvme_rdma_dev_put(dev);
1282 		goto out;
1283 	}
1284 
1285 	ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
1286 	if (ret) {
1287 		dev_err(queue->ctrl->ctrl.device,
1288 			"rdma_resolve_route failed (%d).\n",
1289 			queue->cm_error);
1290 		goto out_destroy_queue;
1291 	}
1292 
1293 	return 0;
1294 
1295 out_destroy_queue:
1296 	nvme_rdma_destroy_queue_ib(queue);
1297 out:
1298 	return ret;
1299 }
1300 
1301 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1302 {
1303 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1304 	struct rdma_conn_param param = { };
1305 	struct nvme_rdma_cm_req priv = { };
1306 	int ret;
1307 
1308 	param.qp_num = queue->qp->qp_num;
1309 	param.flow_control = 1;
1310 
1311 	param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
1312 	/* maximum retry count */
1313 	param.retry_count = 7;
1314 	param.rnr_retry_count = 7;
1315 	param.private_data = &priv;
1316 	param.private_data_len = sizeof(priv);
1317 
1318 	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1319 	priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
1320 	/*
1321 	 * set the admin queue depth to the minimum size
1322 	 * specified by the Fabrics standard.
1323 	 */
1324 	if (priv.qid == 0) {
1325 		priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
1326 		priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
1327 	} else {
1328 		/*
1329 		 * current interpretation of the fabrics spec
1330 		 * is at minimum you make hrqsize sqsize+1, or a
1331 		 * 1's based representation of sqsize.
1332 		 */
1333 		priv.hrqsize = cpu_to_le16(queue->queue_size);
1334 		priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1335 	}
1336 
1337 	ret = rdma_connect(queue->cm_id, &param);
1338 	if (ret) {
1339 		dev_err(ctrl->ctrl.device,
1340 			"rdma_connect failed (%d).\n", ret);
1341 		goto out_destroy_queue_ib;
1342 	}
1343 
1344 	return 0;
1345 
1346 out_destroy_queue_ib:
1347 	nvme_rdma_destroy_queue_ib(queue);
1348 	return ret;
1349 }
1350 
1351 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1352 		struct rdma_cm_event *ev)
1353 {
1354 	struct nvme_rdma_queue *queue = cm_id->context;
1355 	int cm_error = 0;
1356 
1357 	dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1358 		rdma_event_msg(ev->event), ev->event,
1359 		ev->status, cm_id);
1360 
1361 	switch (ev->event) {
1362 	case RDMA_CM_EVENT_ADDR_RESOLVED:
1363 		cm_error = nvme_rdma_addr_resolved(queue);
1364 		break;
1365 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
1366 		cm_error = nvme_rdma_route_resolved(queue);
1367 		break;
1368 	case RDMA_CM_EVENT_ESTABLISHED:
1369 		queue->cm_error = nvme_rdma_conn_established(queue);
1370 		/* complete cm_done regardless of success/failure */
1371 		complete(&queue->cm_done);
1372 		return 0;
1373 	case RDMA_CM_EVENT_REJECTED:
1374 		cm_error = nvme_rdma_conn_rejected(queue, ev);
1375 		break;
1376 	case RDMA_CM_EVENT_ADDR_ERROR:
1377 	case RDMA_CM_EVENT_ROUTE_ERROR:
1378 	case RDMA_CM_EVENT_CONNECT_ERROR:
1379 	case RDMA_CM_EVENT_UNREACHABLE:
1380 		dev_dbg(queue->ctrl->ctrl.device,
1381 			"CM error event %d\n", ev->event);
1382 		cm_error = -ECONNRESET;
1383 		break;
1384 	case RDMA_CM_EVENT_DISCONNECTED:
1385 	case RDMA_CM_EVENT_ADDR_CHANGE:
1386 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1387 		dev_dbg(queue->ctrl->ctrl.device,
1388 			"disconnect received - connection closed\n");
1389 		nvme_rdma_error_recovery(queue->ctrl);
1390 		break;
1391 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
1392 		/* device removal is handled via the ib_client API */
1393 		break;
1394 	default:
1395 		dev_err(queue->ctrl->ctrl.device,
1396 			"Unexpected RDMA CM event (%d)\n", ev->event);
1397 		nvme_rdma_error_recovery(queue->ctrl);
1398 		break;
1399 	}
1400 
1401 	if (cm_error) {
1402 		queue->cm_error = cm_error;
1403 		complete(&queue->cm_done);
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static enum blk_eh_timer_return
1410 nvme_rdma_timeout(struct request *rq, bool reserved)
1411 {
1412 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1413 
1414 	/* queue error recovery */
1415 	nvme_rdma_error_recovery(req->queue->ctrl);
1416 
1417 	/* fail with DNR on cmd timeout */
1418 	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1419 
1420 	return BLK_EH_HANDLED;
1421 }
1422 
1423 /*
1424  * We cannot accept any other command until the Connect command has completed.
1425  */
1426 static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1427 		struct request *rq)
1428 {
1429 	if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
1430 		struct nvme_command *cmd = nvme_req(rq)->cmd;
1431 
1432 		if (!blk_rq_is_passthrough(rq) ||
1433 		    cmd->common.opcode != nvme_fabrics_command ||
1434 		    cmd->fabrics.fctype != nvme_fabrics_type_connect)
1435 			return false;
1436 	}
1437 
1438 	return true;
1439 }
1440 
1441 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1442 		const struct blk_mq_queue_data *bd)
1443 {
1444 	struct nvme_ns *ns = hctx->queue->queuedata;
1445 	struct nvme_rdma_queue *queue = hctx->driver_data;
1446 	struct request *rq = bd->rq;
1447 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1448 	struct nvme_rdma_qe *sqe = &req->sqe;
1449 	struct nvme_command *c = sqe->data;
1450 	bool flush = false;
1451 	struct ib_device *dev;
1452 	int ret;
1453 
1454 	WARN_ON_ONCE(rq->tag < 0);
1455 
1456 	if (!nvme_rdma_queue_is_ready(queue, rq))
1457 		return BLK_MQ_RQ_QUEUE_BUSY;
1458 
1459 	dev = queue->device->dev;
1460 	ib_dma_sync_single_for_cpu(dev, sqe->dma,
1461 			sizeof(struct nvme_command), DMA_TO_DEVICE);
1462 
1463 	ret = nvme_setup_cmd(ns, rq, c);
1464 	if (ret != BLK_MQ_RQ_QUEUE_OK)
1465 		return ret;
1466 
1467 	blk_mq_start_request(rq);
1468 
1469 	ret = nvme_rdma_map_data(queue, rq, c);
1470 	if (ret < 0) {
1471 		dev_err(queue->ctrl->ctrl.device,
1472 			     "Failed to map data (%d)\n", ret);
1473 		nvme_cleanup_cmd(rq);
1474 		goto err;
1475 	}
1476 
1477 	ib_dma_sync_single_for_device(dev, sqe->dma,
1478 			sizeof(struct nvme_command), DMA_TO_DEVICE);
1479 
1480 	if (req_op(rq) == REQ_OP_FLUSH)
1481 		flush = true;
1482 	ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
1483 			req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
1484 	if (ret) {
1485 		nvme_rdma_unmap_data(queue, rq);
1486 		goto err;
1487 	}
1488 
1489 	return BLK_MQ_RQ_QUEUE_OK;
1490 err:
1491 	return (ret == -ENOMEM || ret == -EAGAIN) ?
1492 		BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1493 }
1494 
1495 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1496 {
1497 	struct nvme_rdma_queue *queue = hctx->driver_data;
1498 	struct ib_cq *cq = queue->ib_cq;
1499 	struct ib_wc wc;
1500 	int found = 0;
1501 
1502 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1503 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1504 		struct ib_cqe *cqe = wc.wr_cqe;
1505 
1506 		if (cqe) {
1507 			if (cqe->done == nvme_rdma_recv_done)
1508 				found |= __nvme_rdma_recv_done(cq, &wc, tag);
1509 			else
1510 				cqe->done(cq, &wc);
1511 		}
1512 	}
1513 
1514 	return found;
1515 }
1516 
1517 static void nvme_rdma_complete_rq(struct request *rq)
1518 {
1519 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1520 
1521 	nvme_rdma_unmap_data(req->queue, rq);
1522 	nvme_complete_rq(rq);
1523 }
1524 
1525 static const struct blk_mq_ops nvme_rdma_mq_ops = {
1526 	.queue_rq	= nvme_rdma_queue_rq,
1527 	.complete	= nvme_rdma_complete_rq,
1528 	.init_request	= nvme_rdma_init_request,
1529 	.exit_request	= nvme_rdma_exit_request,
1530 	.reinit_request	= nvme_rdma_reinit_request,
1531 	.init_hctx	= nvme_rdma_init_hctx,
1532 	.poll		= nvme_rdma_poll,
1533 	.timeout	= nvme_rdma_timeout,
1534 };
1535 
1536 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
1537 	.queue_rq	= nvme_rdma_queue_rq,
1538 	.complete	= nvme_rdma_complete_rq,
1539 	.init_request	= nvme_rdma_init_admin_request,
1540 	.exit_request	= nvme_rdma_exit_admin_request,
1541 	.reinit_request	= nvme_rdma_reinit_request,
1542 	.init_hctx	= nvme_rdma_init_admin_hctx,
1543 	.timeout	= nvme_rdma_timeout,
1544 };
1545 
1546 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1547 {
1548 	int error;
1549 
1550 	error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
1551 	if (error)
1552 		return error;
1553 
1554 	ctrl->device = ctrl->queues[0].device;
1555 
1556 	/*
1557 	 * We need a reference on the device as long as the tag_set is alive,
1558 	 * as the MRs in the request structures need a valid ib_device.
1559 	 */
1560 	error = -EINVAL;
1561 	if (!nvme_rdma_dev_get(ctrl->device))
1562 		goto out_free_queue;
1563 
1564 	ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
1565 		ctrl->device->dev->attrs.max_fast_reg_page_list_len);
1566 
1567 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
1568 	ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops;
1569 	ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
1570 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
1571 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
1572 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
1573 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
1574 	ctrl->admin_tag_set.driver_data = ctrl;
1575 	ctrl->admin_tag_set.nr_hw_queues = 1;
1576 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
1577 
1578 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
1579 	if (error)
1580 		goto out_put_dev;
1581 
1582 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
1583 	if (IS_ERR(ctrl->ctrl.admin_q)) {
1584 		error = PTR_ERR(ctrl->ctrl.admin_q);
1585 		goto out_free_tagset;
1586 	}
1587 
1588 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
1589 	if (error)
1590 		goto out_cleanup_queue;
1591 
1592 	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
1593 
1594 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
1595 	if (error) {
1596 		dev_err(ctrl->ctrl.device,
1597 			"prop_get NVME_REG_CAP failed\n");
1598 		goto out_cleanup_queue;
1599 	}
1600 
1601 	ctrl->ctrl.sqsize =
1602 		min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
1603 
1604 	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
1605 	if (error)
1606 		goto out_cleanup_queue;
1607 
1608 	ctrl->ctrl.max_hw_sectors =
1609 		(ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
1610 
1611 	error = nvme_init_identify(&ctrl->ctrl);
1612 	if (error)
1613 		goto out_cleanup_queue;
1614 
1615 	error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
1616 			&ctrl->async_event_sqe, sizeof(struct nvme_command),
1617 			DMA_TO_DEVICE);
1618 	if (error)
1619 		goto out_cleanup_queue;
1620 
1621 	nvme_start_keep_alive(&ctrl->ctrl);
1622 
1623 	return 0;
1624 
1625 out_cleanup_queue:
1626 	blk_cleanup_queue(ctrl->ctrl.admin_q);
1627 out_free_tagset:
1628 	/* disconnect and drain the queue before freeing the tagset */
1629 	nvme_rdma_stop_queue(&ctrl->queues[0]);
1630 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
1631 out_put_dev:
1632 	nvme_rdma_dev_put(ctrl->device);
1633 out_free_queue:
1634 	nvme_rdma_free_queue(&ctrl->queues[0]);
1635 	return error;
1636 }
1637 
1638 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
1639 {
1640 	nvme_stop_keep_alive(&ctrl->ctrl);
1641 	cancel_work_sync(&ctrl->err_work);
1642 	cancel_delayed_work_sync(&ctrl->reconnect_work);
1643 
1644 	if (ctrl->queue_count > 1) {
1645 		nvme_stop_queues(&ctrl->ctrl);
1646 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
1647 					nvme_cancel_request, &ctrl->ctrl);
1648 		nvme_rdma_free_io_queues(ctrl);
1649 	}
1650 
1651 	if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
1652 		nvme_shutdown_ctrl(&ctrl->ctrl);
1653 
1654 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
1655 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
1656 				nvme_cancel_request, &ctrl->ctrl);
1657 	nvme_rdma_destroy_admin_queue(ctrl);
1658 }
1659 
1660 static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1661 {
1662 	nvme_uninit_ctrl(&ctrl->ctrl);
1663 	if (shutdown)
1664 		nvme_rdma_shutdown_ctrl(ctrl);
1665 
1666 	if (ctrl->ctrl.tagset) {
1667 		blk_cleanup_queue(ctrl->ctrl.connect_q);
1668 		blk_mq_free_tag_set(&ctrl->tag_set);
1669 		nvme_rdma_dev_put(ctrl->device);
1670 	}
1671 
1672 	nvme_put_ctrl(&ctrl->ctrl);
1673 }
1674 
1675 static void nvme_rdma_del_ctrl_work(struct work_struct *work)
1676 {
1677 	struct nvme_rdma_ctrl *ctrl = container_of(work,
1678 				struct nvme_rdma_ctrl, delete_work);
1679 
1680 	__nvme_rdma_remove_ctrl(ctrl, true);
1681 }
1682 
1683 static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
1684 {
1685 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
1686 		return -EBUSY;
1687 
1688 	if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
1689 		return -EBUSY;
1690 
1691 	return 0;
1692 }
1693 
1694 static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
1695 {
1696 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1697 	int ret = 0;
1698 
1699 	/*
1700 	 * Keep a reference until all work is flushed since
1701 	 * __nvme_rdma_del_ctrl can free the ctrl mem
1702 	 */
1703 	if (!kref_get_unless_zero(&ctrl->ctrl.kref))
1704 		return -EBUSY;
1705 	ret = __nvme_rdma_del_ctrl(ctrl);
1706 	if (!ret)
1707 		flush_work(&ctrl->delete_work);
1708 	nvme_put_ctrl(&ctrl->ctrl);
1709 	return ret;
1710 }
1711 
1712 static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
1713 {
1714 	struct nvme_rdma_ctrl *ctrl = container_of(work,
1715 				struct nvme_rdma_ctrl, delete_work);
1716 
1717 	__nvme_rdma_remove_ctrl(ctrl, false);
1718 }
1719 
1720 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1721 {
1722 	struct nvme_rdma_ctrl *ctrl = container_of(work,
1723 					struct nvme_rdma_ctrl, reset_work);
1724 	int ret;
1725 	bool changed;
1726 
1727 	nvme_rdma_shutdown_ctrl(ctrl);
1728 
1729 	ret = nvme_rdma_configure_admin_queue(ctrl);
1730 	if (ret) {
1731 		/* ctrl is already shutdown, just remove the ctrl */
1732 		INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work);
1733 		goto del_dead_ctrl;
1734 	}
1735 
1736 	if (ctrl->queue_count > 1) {
1737 		ret = blk_mq_reinit_tagset(&ctrl->tag_set);
1738 		if (ret)
1739 			goto del_dead_ctrl;
1740 
1741 		ret = nvme_rdma_init_io_queues(ctrl);
1742 		if (ret)
1743 			goto del_dead_ctrl;
1744 
1745 		ret = nvme_rdma_connect_io_queues(ctrl);
1746 		if (ret)
1747 			goto del_dead_ctrl;
1748 	}
1749 
1750 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1751 	WARN_ON_ONCE(!changed);
1752 
1753 	if (ctrl->queue_count > 1) {
1754 		nvme_start_queues(&ctrl->ctrl);
1755 		nvme_queue_scan(&ctrl->ctrl);
1756 		nvme_queue_async_events(&ctrl->ctrl);
1757 	}
1758 
1759 	return;
1760 
1761 del_dead_ctrl:
1762 	/* Deleting this dead controller... */
1763 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
1764 	WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
1765 }
1766 
1767 static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
1768 {
1769 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1770 
1771 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
1772 		return -EBUSY;
1773 
1774 	if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
1775 		return -EBUSY;
1776 
1777 	flush_work(&ctrl->reset_work);
1778 
1779 	return 0;
1780 }
1781 
1782 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1783 	.name			= "rdma",
1784 	.module			= THIS_MODULE,
1785 	.is_fabrics		= true,
1786 	.reg_read32		= nvmf_reg_read32,
1787 	.reg_read64		= nvmf_reg_read64,
1788 	.reg_write32		= nvmf_reg_write32,
1789 	.reset_ctrl		= nvme_rdma_reset_ctrl,
1790 	.free_ctrl		= nvme_rdma_free_ctrl,
1791 	.submit_async_event	= nvme_rdma_submit_async_event,
1792 	.delete_ctrl		= nvme_rdma_del_ctrl,
1793 	.get_subsysnqn		= nvmf_get_subsysnqn,
1794 	.get_address		= nvmf_get_address,
1795 };
1796 
1797 static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1798 {
1799 	int ret;
1800 
1801 	ret = nvme_rdma_init_io_queues(ctrl);
1802 	if (ret)
1803 		return ret;
1804 
1805 	/*
1806 	 * We need a reference on the device as long as the tag_set is alive,
1807 	 * as the MRs in the request structures need a valid ib_device.
1808 	 */
1809 	ret = -EINVAL;
1810 	if (!nvme_rdma_dev_get(ctrl->device))
1811 		goto out_free_io_queues;
1812 
1813 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
1814 	ctrl->tag_set.ops = &nvme_rdma_mq_ops;
1815 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
1816 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
1817 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
1818 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1819 	ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
1820 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
1821 	ctrl->tag_set.driver_data = ctrl;
1822 	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
1823 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
1824 
1825 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
1826 	if (ret)
1827 		goto out_put_dev;
1828 	ctrl->ctrl.tagset = &ctrl->tag_set;
1829 
1830 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
1831 	if (IS_ERR(ctrl->ctrl.connect_q)) {
1832 		ret = PTR_ERR(ctrl->ctrl.connect_q);
1833 		goto out_free_tag_set;
1834 	}
1835 
1836 	ret = nvme_rdma_connect_io_queues(ctrl);
1837 	if (ret)
1838 		goto out_cleanup_connect_q;
1839 
1840 	return 0;
1841 
1842 out_cleanup_connect_q:
1843 	blk_cleanup_queue(ctrl->ctrl.connect_q);
1844 out_free_tag_set:
1845 	blk_mq_free_tag_set(&ctrl->tag_set);
1846 out_put_dev:
1847 	nvme_rdma_dev_put(ctrl->device);
1848 out_free_io_queues:
1849 	nvme_rdma_free_io_queues(ctrl);
1850 	return ret;
1851 }
1852 
1853 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1854 		struct nvmf_ctrl_options *opts)
1855 {
1856 	struct nvme_rdma_ctrl *ctrl;
1857 	int ret;
1858 	bool changed;
1859 	char *port;
1860 
1861 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1862 	if (!ctrl)
1863 		return ERR_PTR(-ENOMEM);
1864 	ctrl->ctrl.opts = opts;
1865 	INIT_LIST_HEAD(&ctrl->list);
1866 
1867 	if (opts->mask & NVMF_OPT_TRSVCID)
1868 		port = opts->trsvcid;
1869 	else
1870 		port = __stringify(NVME_RDMA_IP_PORT);
1871 
1872 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
1873 			opts->traddr, port, &ctrl->addr);
1874 	if (ret) {
1875 		pr_err("malformed address passed: %s:%s\n", opts->traddr, port);
1876 		goto out_free_ctrl;
1877 	}
1878 
1879 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
1880 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
1881 			opts->host_traddr, NULL, &ctrl->src_addr);
1882 		if (ret) {
1883 			pr_err("malformed src address passed: %s\n",
1884 			       opts->host_traddr);
1885 			goto out_free_ctrl;
1886 		}
1887 	}
1888 
1889 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1890 				0 /* no quirks, we're perfect! */);
1891 	if (ret)
1892 		goto out_free_ctrl;
1893 
1894 	INIT_DELAYED_WORK(&ctrl->reconnect_work,
1895 			nvme_rdma_reconnect_ctrl_work);
1896 	INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
1897 	INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
1898 	INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
1899 	spin_lock_init(&ctrl->lock);
1900 
1901 	ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
1902 	ctrl->ctrl.sqsize = opts->queue_size - 1;
1903 	ctrl->ctrl.kato = opts->kato;
1904 
1905 	ret = -ENOMEM;
1906 	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
1907 				GFP_KERNEL);
1908 	if (!ctrl->queues)
1909 		goto out_uninit_ctrl;
1910 
1911 	ret = nvme_rdma_configure_admin_queue(ctrl);
1912 	if (ret)
1913 		goto out_kfree_queues;
1914 
1915 	/* sanity check icdoff */
1916 	if (ctrl->ctrl.icdoff) {
1917 		dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1918 		goto out_remove_admin_queue;
1919 	}
1920 
1921 	/* sanity check keyed sgls */
1922 	if (!(ctrl->ctrl.sgls & (1 << 20))) {
1923 		dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
1924 		goto out_remove_admin_queue;
1925 	}
1926 
1927 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
1928 		/* warn if maxcmd is lower than queue_size */
1929 		dev_warn(ctrl->ctrl.device,
1930 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
1931 			opts->queue_size, ctrl->ctrl.maxcmd);
1932 		opts->queue_size = ctrl->ctrl.maxcmd;
1933 	}
1934 
1935 	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
1936 		/* warn if sqsize is lower than queue_size */
1937 		dev_warn(ctrl->ctrl.device,
1938 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
1939 			opts->queue_size, ctrl->ctrl.sqsize + 1);
1940 		opts->queue_size = ctrl->ctrl.sqsize + 1;
1941 	}
1942 
1943 	if (opts->nr_io_queues) {
1944 		ret = nvme_rdma_create_io_queues(ctrl);
1945 		if (ret)
1946 			goto out_remove_admin_queue;
1947 	}
1948 
1949 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1950 	WARN_ON_ONCE(!changed);
1951 
1952 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
1953 		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
1954 
1955 	kref_get(&ctrl->ctrl.kref);
1956 
1957 	mutex_lock(&nvme_rdma_ctrl_mutex);
1958 	list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
1959 	mutex_unlock(&nvme_rdma_ctrl_mutex);
1960 
1961 	if (opts->nr_io_queues) {
1962 		nvme_queue_scan(&ctrl->ctrl);
1963 		nvme_queue_async_events(&ctrl->ctrl);
1964 	}
1965 
1966 	return &ctrl->ctrl;
1967 
1968 out_remove_admin_queue:
1969 	nvme_stop_keep_alive(&ctrl->ctrl);
1970 	nvme_rdma_destroy_admin_queue(ctrl);
1971 out_kfree_queues:
1972 	kfree(ctrl->queues);
1973 out_uninit_ctrl:
1974 	nvme_uninit_ctrl(&ctrl->ctrl);
1975 	nvme_put_ctrl(&ctrl->ctrl);
1976 	if (ret > 0)
1977 		ret = -EIO;
1978 	return ERR_PTR(ret);
1979 out_free_ctrl:
1980 	kfree(ctrl);
1981 	return ERR_PTR(ret);
1982 }
1983 
1984 static struct nvmf_transport_ops nvme_rdma_transport = {
1985 	.name		= "rdma",
1986 	.required_opts	= NVMF_OPT_TRADDR,
1987 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
1988 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
1989 	.create_ctrl	= nvme_rdma_create_ctrl,
1990 };
1991 
1992 static void nvme_rdma_add_one(struct ib_device *ib_device)
1993 {
1994 }
1995 
1996 static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1997 {
1998 	struct nvme_rdma_ctrl *ctrl;
1999 
2000 	/* Delete all controllers using this device */
2001 	mutex_lock(&nvme_rdma_ctrl_mutex);
2002 	list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2003 		if (ctrl->device->dev != ib_device)
2004 			continue;
2005 		dev_info(ctrl->ctrl.device,
2006 			"Removing ctrl: NQN \"%s\", addr %pISp\n",
2007 			ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2008 		__nvme_rdma_del_ctrl(ctrl);
2009 	}
2010 	mutex_unlock(&nvme_rdma_ctrl_mutex);
2011 
2012 	flush_workqueue(nvme_rdma_wq);
2013 }
2014 
2015 static struct ib_client nvme_rdma_ib_client = {
2016 	.name   = "nvme_rdma",
2017 	.add = nvme_rdma_add_one,
2018 	.remove = nvme_rdma_remove_one
2019 };
2020 
2021 static int __init nvme_rdma_init_module(void)
2022 {
2023 	int ret;
2024 
2025 	nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
2026 	if (!nvme_rdma_wq)
2027 		return -ENOMEM;
2028 
2029 	ret = ib_register_client(&nvme_rdma_ib_client);
2030 	if (ret)
2031 		goto err_destroy_wq;
2032 
2033 	ret = nvmf_register_transport(&nvme_rdma_transport);
2034 	if (ret)
2035 		goto err_unreg_client;
2036 
2037 	return 0;
2038 
2039 err_unreg_client:
2040 	ib_unregister_client(&nvme_rdma_ib_client);
2041 err_destroy_wq:
2042 	destroy_workqueue(nvme_rdma_wq);
2043 	return ret;
2044 }
2045 
2046 static void __exit nvme_rdma_cleanup_module(void)
2047 {
2048 	nvmf_unregister_transport(&nvme_rdma_transport);
2049 	ib_unregister_client(&nvme_rdma_ib_client);
2050 	destroy_workqueue(nvme_rdma_wq);
2051 }
2052 
2053 module_init(nvme_rdma_init_module);
2054 module_exit(nvme_rdma_cleanup_module);
2055 
2056 MODULE_LICENSE("GPL v2");
2057