rdma.c (9b031c86506cef9acae45e61339fcf9deaabb793) rdma.c (ca0f1a8055be2a04073af435dc68419334481638)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics RDMA target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/atomic.h>
8#include <linux/ctype.h>

--- 6 unchanged lines hidden (view full) ---

15#include <linux/string.h>
16#include <linux/wait.h>
17#include <linux/inet.h>
18#include <asm/unaligned.h>
19
20#include <rdma/ib_verbs.h>
21#include <rdma/rdma_cm.h>
22#include <rdma/rw.h>
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics RDMA target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/atomic.h>
8#include <linux/ctype.h>

--- 6 unchanged lines hidden (view full) ---

15#include <linux/string.h>
16#include <linux/wait.h>
17#include <linux/inet.h>
18#include <asm/unaligned.h>
19
20#include <rdma/ib_verbs.h>
21#include <rdma/rdma_cm.h>
22#include <rdma/rw.h>
23#include <rdma/ib_cm.h>
23
24#include <linux/nvme-rdma.h>
25#include "nvmet.h"
26
27/*
28 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
29 */
30#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
31#define NVMET_RDMA_MAX_INLINE_SGE 4
32#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
33
34/* Assume mpsmin == device_page_size == 4KB */
35#define NVMET_RDMA_MAX_MDTS 8
24
25#include <linux/nvme-rdma.h>
26#include "nvmet.h"
27
28/*
29 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
30 */
31#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
32#define NVMET_RDMA_MAX_INLINE_SGE 4
33#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
34
35/* Assume mpsmin == device_page_size == 4KB */
36#define NVMET_RDMA_MAX_MDTS 8
37#define NVMET_RDMA_MAX_METADATA_MDTS 5
36
38
39struct nvmet_rdma_srq;
40
37struct nvmet_rdma_cmd {
38 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
39 struct ib_cqe cqe;
40 struct ib_recv_wr wr;
41 struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
42 struct nvme_command *nvme_cmd;
43 struct nvmet_rdma_queue *queue;
41struct nvmet_rdma_cmd {
42 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
43 struct ib_cqe cqe;
44 struct ib_recv_wr wr;
45 struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
46 struct nvme_command *nvme_cmd;
47 struct nvmet_rdma_queue *queue;
48 struct nvmet_rdma_srq *nsrq;
44};
45
46enum {
47 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
48 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
49};
50
51struct nvmet_rdma_rsp {
52 struct ib_sge send_sge;
53 struct ib_cqe send_cqe;
54 struct ib_send_wr send_wr;
55
56 struct nvmet_rdma_cmd *cmd;
57 struct nvmet_rdma_queue *queue;
58
59 struct ib_cqe read_cqe;
49};
50
51enum {
52 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
53 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
54};
55
56struct nvmet_rdma_rsp {
57 struct ib_sge send_sge;
58 struct ib_cqe send_cqe;
59 struct ib_send_wr send_wr;
60
61 struct nvmet_rdma_cmd *cmd;
62 struct nvmet_rdma_queue *queue;
63
64 struct ib_cqe read_cqe;
65 struct ib_cqe write_cqe;
60 struct rdma_rw_ctx rw;
61
62 struct nvmet_req req;
63
64 bool allocated;
65 u8 n_rdma;
66 u32 flags;
67 u32 invalidate_rkey;

--- 10 unchanged lines hidden (view full) ---

78
79struct nvmet_rdma_queue {
80 struct rdma_cm_id *cm_id;
81 struct ib_qp *qp;
82 struct nvmet_port *port;
83 struct ib_cq *cq;
84 atomic_t sq_wr_avail;
85 struct nvmet_rdma_device *dev;
66 struct rdma_rw_ctx rw;
67
68 struct nvmet_req req;
69
70 bool allocated;
71 u8 n_rdma;
72 u32 flags;
73 u32 invalidate_rkey;

--- 10 unchanged lines hidden (view full) ---

84
85struct nvmet_rdma_queue {
86 struct rdma_cm_id *cm_id;
87 struct ib_qp *qp;
88 struct nvmet_port *port;
89 struct ib_cq *cq;
90 atomic_t sq_wr_avail;
91 struct nvmet_rdma_device *dev;
92 struct nvmet_rdma_srq *nsrq;
86 spinlock_t state_lock;
87 enum nvmet_rdma_queue_state state;
88 struct nvmet_cq nvme_cq;
89 struct nvmet_sq nvme_sq;
90
91 struct nvmet_rdma_rsp *rsps;
92 struct list_head free_rsps;
93 spinlock_t rsps_lock;
94 struct nvmet_rdma_cmd *cmds;
95
96 struct work_struct release_work;
97 struct list_head rsp_wait_list;
98 struct list_head rsp_wr_wait_list;
99 spinlock_t rsp_wr_wait_lock;
100
101 int idx;
102 int host_qid;
93 spinlock_t state_lock;
94 enum nvmet_rdma_queue_state state;
95 struct nvmet_cq nvme_cq;
96 struct nvmet_sq nvme_sq;
97
98 struct nvmet_rdma_rsp *rsps;
99 struct list_head free_rsps;
100 spinlock_t rsps_lock;
101 struct nvmet_rdma_cmd *cmds;
102
103 struct work_struct release_work;
104 struct list_head rsp_wait_list;
105 struct list_head rsp_wr_wait_list;
106 spinlock_t rsp_wr_wait_lock;
107
108 int idx;
109 int host_qid;
110 int comp_vector;
103 int recv_queue_size;
104 int send_queue_size;
105
106 struct list_head queue_list;
107};
108
109struct nvmet_rdma_port {
110 struct nvmet_port *nport;
111 struct sockaddr_storage addr;
112 struct rdma_cm_id *cm_id;
113 struct delayed_work repair_work;
114};
115
111 int recv_queue_size;
112 int send_queue_size;
113
114 struct list_head queue_list;
115};
116
117struct nvmet_rdma_port {
118 struct nvmet_port *nport;
119 struct sockaddr_storage addr;
120 struct rdma_cm_id *cm_id;
121 struct delayed_work repair_work;
122};
123
124struct nvmet_rdma_srq {
125 struct ib_srq *srq;
126 struct nvmet_rdma_cmd *cmds;
127 struct nvmet_rdma_device *ndev;
128};
129
116struct nvmet_rdma_device {
117 struct ib_device *device;
118 struct ib_pd *pd;
130struct nvmet_rdma_device {
131 struct ib_device *device;
132 struct ib_pd *pd;
119 struct ib_srq *srq;
120 struct nvmet_rdma_cmd *srq_cmds;
133 struct nvmet_rdma_srq **srqs;
134 int srq_count;
121 size_t srq_size;
122 struct kref ref;
123 struct list_head entry;
124 int inline_data_size;
125 int inline_page_count;
126};
127
128static bool nvmet_rdma_use_srq;
129module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
130MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
131
135 size_t srq_size;
136 struct kref ref;
137 struct list_head entry;
138 int inline_data_size;
139 int inline_page_count;
140};
141
142static bool nvmet_rdma_use_srq;
143module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
144MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
145
146static int srq_size_set(const char *val, const struct kernel_param *kp);
147static const struct kernel_param_ops srq_size_ops = {
148 .set = srq_size_set,
149 .get = param_get_int,
150};
151
152static int nvmet_rdma_srq_size = 1024;
153module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
154MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
155
132static DEFINE_IDA(nvmet_rdma_queue_ida);
133static LIST_HEAD(nvmet_rdma_queue_list);
134static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
135
136static LIST_HEAD(device_list);
137static DEFINE_MUTEX(device_list_mutex);
138
139static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
140static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
141static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
142static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
156static DEFINE_IDA(nvmet_rdma_queue_ida);
157static LIST_HEAD(nvmet_rdma_queue_list);
158static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
159
160static LIST_HEAD(device_list);
161static DEFINE_MUTEX(device_list_mutex);
162
163static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
164static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
165static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
166static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
167static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
143static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
144static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
145static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
146 struct nvmet_rdma_rsp *r);
147static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
148 struct nvmet_rdma_rsp *r);
149
150static const struct nvmet_fabrics_ops nvmet_rdma_ops;
151
168static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
169static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
170static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
171 struct nvmet_rdma_rsp *r);
172static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
173 struct nvmet_rdma_rsp *r);
174
175static const struct nvmet_fabrics_ops nvmet_rdma_ops;
176
177static int srq_size_set(const char *val, const struct kernel_param *kp)
178{
179 int n = 0, ret;
180
181 ret = kstrtoint(val, 10, &n);
182 if (ret != 0 || n < 256)
183 return -EINVAL;
184
185 return param_set_int(val, kp);
186}
187
152static int num_pages(int len)
153{
154 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
155}
156
157static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
158{
159 return nvme_is_write(rsp->req.cmd) &&

--- 226 unchanged lines hidden (view full) ---

386
387 r->send_wr.wr_cqe = &r->send_cqe;
388 r->send_wr.sg_list = &r->send_sge;
389 r->send_wr.num_sge = 1;
390 r->send_wr.send_flags = IB_SEND_SIGNALED;
391
392 /* Data In / RDMA READ */
393 r->read_cqe.done = nvmet_rdma_read_data_done;
188static int num_pages(int len)
189{
190 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
191}
192
193static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
194{
195 return nvme_is_write(rsp->req.cmd) &&

--- 226 unchanged lines hidden (view full) ---

422
423 r->send_wr.wr_cqe = &r->send_cqe;
424 r->send_wr.sg_list = &r->send_sge;
425 r->send_wr.num_sge = 1;
426 r->send_wr.send_flags = IB_SEND_SIGNALED;
427
428 /* Data In / RDMA READ */
429 r->read_cqe.done = nvmet_rdma_read_data_done;
430 /* Data Out / RDMA WRITE */
431 r->write_cqe.done = nvmet_rdma_write_data_done;
432
394 return 0;
395
396out_free_rsp:
397 kfree(r->req.cqe);
398out:
399 return -ENOMEM;
400}
401

--- 59 unchanged lines hidden (view full) ---

461 struct nvmet_rdma_cmd *cmd)
462{
463 int ret;
464
465 ib_dma_sync_single_for_device(ndev->device,
466 cmd->sge[0].addr, cmd->sge[0].length,
467 DMA_FROM_DEVICE);
468
433 return 0;
434
435out_free_rsp:
436 kfree(r->req.cqe);
437out:
438 return -ENOMEM;
439}
440

--- 59 unchanged lines hidden (view full) ---

500 struct nvmet_rdma_cmd *cmd)
501{
502 int ret;
503
504 ib_dma_sync_single_for_device(ndev->device,
505 cmd->sge[0].addr, cmd->sge[0].length,
506 DMA_FROM_DEVICE);
507
469 if (ndev->srq)
470 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
508 if (cmd->nsrq)
509 ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
471 else
472 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
473
474 if (unlikely(ret))
475 pr_err("post_recv cmd failed\n");
476
477 return ret;
478}

--- 16 unchanged lines hidden (view full) ---

495 if (!ret) {
496 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
497 break;
498 }
499 }
500 spin_unlock(&queue->rsp_wr_wait_lock);
501}
502
510 else
511 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
512
513 if (unlikely(ret))
514 pr_err("post_recv cmd failed\n");
515
516 return ret;
517}

--- 16 unchanged lines hidden (view full) ---

534 if (!ret) {
535 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
536 break;
537 }
538 }
539 spin_unlock(&queue->rsp_wr_wait_lock);
540}
541
542static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
543{
544 struct ib_mr_status mr_status;
545 int ret;
546 u16 status = 0;
503
547
548 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
549 if (ret) {
550 pr_err("ib_check_mr_status failed, ret %d\n", ret);
551 return NVME_SC_INVALID_PI;
552 }
553
554 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
555 switch (mr_status.sig_err.err_type) {
556 case IB_SIG_BAD_GUARD:
557 status = NVME_SC_GUARD_CHECK;
558 break;
559 case IB_SIG_BAD_REFTAG:
560 status = NVME_SC_REFTAG_CHECK;
561 break;
562 case IB_SIG_BAD_APPTAG:
563 status = NVME_SC_APPTAG_CHECK;
564 break;
565 }
566 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
567 mr_status.sig_err.err_type,
568 mr_status.sig_err.expected,
569 mr_status.sig_err.actual);
570 }
571
572 return status;
573}
574
575static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
576 struct nvme_command *cmd, struct ib_sig_domain *domain,
577 u16 control, u8 pi_type)
578{
579 domain->sig_type = IB_SIG_TYPE_T10_DIF;
580 domain->sig.dif.bg_type = IB_T10DIF_CRC;
581 domain->sig.dif.pi_interval = 1 << bi->interval_exp;
582 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
583 if (control & NVME_RW_PRINFO_PRCHK_REF)
584 domain->sig.dif.ref_remap = true;
585
586 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
587 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
588 domain->sig.dif.app_escape = true;
589 if (pi_type == NVME_NS_DPS_PI_TYPE3)
590 domain->sig.dif.ref_escape = true;
591}
592
593static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
594 struct ib_sig_attrs *sig_attrs)
595{
596 struct nvme_command *cmd = req->cmd;
597 u16 control = le16_to_cpu(cmd->rw.control);
598 u8 pi_type = req->ns->pi_type;
599 struct blk_integrity *bi;
600
601 bi = bdev_get_integrity(req->ns->bdev);
602
603 memset(sig_attrs, 0, sizeof(*sig_attrs));
604
605 if (control & NVME_RW_PRINFO_PRACT) {
606 /* for WRITE_INSERT/READ_STRIP no wire domain */
607 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
608 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
609 pi_type);
610 /* Clear the PRACT bit since HCA will generate/verify the PI */
611 control &= ~NVME_RW_PRINFO_PRACT;
612 cmd->rw.control = cpu_to_le16(control);
613 /* PI is added by the HW */
614 req->transfer_len += req->metadata_len;
615 } else {
616 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
617 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
618 pi_type);
619 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
620 pi_type);
621 }
622
623 if (control & NVME_RW_PRINFO_PRCHK_REF)
624 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
625 if (control & NVME_RW_PRINFO_PRCHK_GUARD)
626 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
627 if (control & NVME_RW_PRINFO_PRCHK_APP)
628 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
629}
630
631static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
632 struct ib_sig_attrs *sig_attrs)
633{
634 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
635 struct nvmet_req *req = &rsp->req;
636 int ret;
637
638 if (req->metadata_len)
639 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
640 cm_id->port_num, req->sg, req->sg_cnt,
641 req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
642 addr, key, nvmet_data_dir(req));
643 else
644 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
645 req->sg, req->sg_cnt, 0, addr, key,
646 nvmet_data_dir(req));
647
648 return ret;
649}
650
651static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
652{
653 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
654 struct nvmet_req *req = &rsp->req;
655
656 if (req->metadata_len)
657 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
658 cm_id->port_num, req->sg, req->sg_cnt,
659 req->metadata_sg, req->metadata_sg_cnt,
660 nvmet_data_dir(req));
661 else
662 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
663 req->sg, req->sg_cnt, nvmet_data_dir(req));
664}
665
504static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
505{
506 struct nvmet_rdma_queue *queue = rsp->queue;
507
508 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
509
666static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
667{
668 struct nvmet_rdma_queue *queue = rsp->queue;
669
670 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
671
510 if (rsp->n_rdma) {
511 rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
512 queue->cm_id->port_num, rsp->req.sg,
513 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
514 }
672 if (rsp->n_rdma)
673 nvmet_rdma_rw_ctx_destroy(rsp);
515
516 if (rsp->req.sg != rsp->cmd->inline_sg)
674
675 if (rsp->req.sg != rsp->cmd->inline_sg)
517 nvmet_req_free_sgl(&rsp->req);
676 nvmet_req_free_sgls(&rsp->req);
518
519 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
520 nvmet_rdma_process_wr_wait_list(queue);
521
522 nvmet_rdma_put_rsp(rsp);
523}
524
525static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)

--- 35 unchanged lines hidden (view full) ---

561
562 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
563 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
564 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
565 } else {
566 rsp->send_wr.opcode = IB_WR_SEND;
567 }
568
677
678 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
679 nvmet_rdma_process_wr_wait_list(queue);
680
681 nvmet_rdma_put_rsp(rsp);
682}
683
684static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)

--- 35 unchanged lines hidden (view full) ---

720
721 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
722 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
723 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
724 } else {
725 rsp->send_wr.opcode = IB_WR_SEND;
726 }
727
569 if (nvmet_rdma_need_data_out(rsp))
570 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
571 cm_id->port_num, NULL, &rsp->send_wr);
572 else
728 if (nvmet_rdma_need_data_out(rsp)) {
729 if (rsp->req.metadata_len)
730 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
731 cm_id->port_num, &rsp->write_cqe, NULL);
732 else
733 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
734 cm_id->port_num, NULL, &rsp->send_wr);
735 } else {
573 first_wr = &rsp->send_wr;
736 first_wr = &rsp->send_wr;
737 }
574
575 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
576
577 ib_dma_sync_single_for_device(rsp->queue->dev->device,
578 rsp->send_sge.addr, rsp->send_sge.length,
579 DMA_TO_DEVICE);
580
581 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
582 pr_err("sending cmd response failed\n");
583 nvmet_rdma_release_rsp(rsp);
584 }
585}
586
587static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
588{
589 struct nvmet_rdma_rsp *rsp =
590 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
738
739 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
740
741 ib_dma_sync_single_for_device(rsp->queue->dev->device,
742 rsp->send_sge.addr, rsp->send_sge.length,
743 DMA_TO_DEVICE);
744
745 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
746 pr_err("sending cmd response failed\n");
747 nvmet_rdma_release_rsp(rsp);
748 }
749}
750
751static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
752{
753 struct nvmet_rdma_rsp *rsp =
754 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
591 struct nvmet_rdma_queue *queue = cq->cq_context;
755 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
756 u16 status = 0;
592
593 WARN_ON(rsp->n_rdma <= 0);
594 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
757
758 WARN_ON(rsp->n_rdma <= 0);
759 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
595 rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
596 queue->cm_id->port_num, rsp->req.sg,
597 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
598 rsp->n_rdma = 0;
599
600 if (unlikely(wc->status != IB_WC_SUCCESS)) {
760 rsp->n_rdma = 0;
761
762 if (unlikely(wc->status != IB_WC_SUCCESS)) {
763 nvmet_rdma_rw_ctx_destroy(rsp);
601 nvmet_req_uninit(&rsp->req);
602 nvmet_rdma_release_rsp(rsp);
603 if (wc->status != IB_WC_WR_FLUSH_ERR) {
604 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
605 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
606 nvmet_rdma_error_comp(queue);
607 }
608 return;
609 }
610
764 nvmet_req_uninit(&rsp->req);
765 nvmet_rdma_release_rsp(rsp);
766 if (wc->status != IB_WC_WR_FLUSH_ERR) {
767 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
768 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
769 nvmet_rdma_error_comp(queue);
770 }
771 return;
772 }
773
611 rsp->req.execute(&rsp->req);
774 if (rsp->req.metadata_len)
775 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
776 nvmet_rdma_rw_ctx_destroy(rsp);
777
778 if (unlikely(status))
779 nvmet_req_complete(&rsp->req, status);
780 else
781 rsp->req.execute(&rsp->req);
612}
613
782}
783
784static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
785{
786 struct nvmet_rdma_rsp *rsp =
787 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
788 struct nvmet_rdma_queue *queue = cq->cq_context;
789 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
790 u16 status;
791
792 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
793 return;
794
795 WARN_ON(rsp->n_rdma <= 0);
796 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
797 rsp->n_rdma = 0;
798
799 if (unlikely(wc->status != IB_WC_SUCCESS)) {
800 nvmet_rdma_rw_ctx_destroy(rsp);
801 nvmet_req_uninit(&rsp->req);
802 nvmet_rdma_release_rsp(rsp);
803 if (wc->status != IB_WC_WR_FLUSH_ERR) {
804 pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n",
805 wc->wr_cqe, ib_wc_status_msg(wc->status),
806 wc->status);
807 nvmet_rdma_error_comp(queue);
808 }
809 return;
810 }
811
812 /*
813 * Upon RDMA completion check the signature status
814 * - if succeeded send good NVMe response
815 * - if failed send bad NVMe response with appropriate error
816 */
817 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
818 if (unlikely(status))
819 rsp->req.cqe->status = cpu_to_le16(status << 1);
820 nvmet_rdma_rw_ctx_destroy(rsp);
821
822 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
823 pr_err("sending cmd response failed\n");
824 nvmet_rdma_release_rsp(rsp);
825 }
826}
827
614static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
615 u64 off)
616{
617 int sg_count = num_pages(len);
618 struct scatterlist *sg;
619 int i;
620
621 sg = rsp->cmd->inline_sg;

--- 38 unchanged lines hidden (view full) ---

660 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
661 rsp->req.transfer_len += len;
662 return 0;
663}
664
665static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
666 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
667{
828static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
829 u64 off)
830{
831 int sg_count = num_pages(len);
832 struct scatterlist *sg;
833 int i;
834
835 sg = rsp->cmd->inline_sg;

--- 38 unchanged lines hidden (view full) ---

874 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
875 rsp->req.transfer_len += len;
876 return 0;
877}
878
879static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
880 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
881{
668 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
669 u64 addr = le64_to_cpu(sgl->addr);
670 u32 key = get_unaligned_le32(sgl->key);
882 u64 addr = le64_to_cpu(sgl->addr);
883 u32 key = get_unaligned_le32(sgl->key);
884 struct ib_sig_attrs sig_attrs;
671 int ret;
672
673 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
674
675 /* no data command? */
676 if (!rsp->req.transfer_len)
677 return 0;
678
885 int ret;
886
887 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
888
889 /* no data command? */
890 if (!rsp->req.transfer_len)
891 return 0;
892
679 ret = nvmet_req_alloc_sgl(&rsp->req);
893 if (rsp->req.metadata_len)
894 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
895
896 ret = nvmet_req_alloc_sgls(&rsp->req);
680 if (unlikely(ret < 0))
681 goto error_out;
682
897 if (unlikely(ret < 0))
898 goto error_out;
899
683 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
684 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
685 nvmet_data_dir(&rsp->req));
900 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
686 if (unlikely(ret < 0))
687 goto error_out;
688 rsp->n_rdma += ret;
689
690 if (invalidate) {
691 rsp->invalidate_rkey = key;
692 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
693 }

--- 94 unchanged lines hidden (view full) ---

788out_err:
789 nvmet_req_complete(&cmd->req, status);
790}
791
792static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
793{
794 struct nvmet_rdma_cmd *cmd =
795 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
901 if (unlikely(ret < 0))
902 goto error_out;
903 rsp->n_rdma += ret;
904
905 if (invalidate) {
906 rsp->invalidate_rkey = key;
907 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
908 }

--- 94 unchanged lines hidden (view full) ---

1003out_err:
1004 nvmet_req_complete(&cmd->req, status);
1005}
1006
1007static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1008{
1009 struct nvmet_rdma_cmd *cmd =
1010 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
796 struct nvmet_rdma_queue *queue = cq->cq_context;
1011 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
797 struct nvmet_rdma_rsp *rsp;
798
799 if (unlikely(wc->status != IB_WC_SUCCESS)) {
800 if (wc->status != IB_WC_WR_FLUSH_ERR) {
801 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
802 wc->wr_cqe, ib_wc_status_msg(wc->status),
803 wc->status);
804 nvmet_rdma_error_comp(queue);

--- 35 unchanged lines hidden (view full) ---

840 nvmet_rdma_put_rsp(rsp);
841 spin_unlock_irqrestore(&queue->state_lock, flags);
842 return;
843 }
844
845 nvmet_rdma_handle_command(queue, rsp);
846}
847
1012 struct nvmet_rdma_rsp *rsp;
1013
1014 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1015 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1016 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
1017 wc->wr_cqe, ib_wc_status_msg(wc->status),
1018 wc->status);
1019 nvmet_rdma_error_comp(queue);

--- 35 unchanged lines hidden (view full) ---

1055 nvmet_rdma_put_rsp(rsp);
1056 spin_unlock_irqrestore(&queue->state_lock, flags);
1057 return;
1058 }
1059
1060 nvmet_rdma_handle_command(queue, rsp);
1061}
1062
848static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
1063static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
849{
1064{
850 if (!ndev->srq)
1065 nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
1066 false);
1067 ib_destroy_srq(nsrq->srq);
1068
1069 kfree(nsrq);
1070}
1071
1072static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
1073{
1074 int i;
1075
1076 if (!ndev->srqs)
851 return;
852
1077 return;
1078
853 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
854 ib_destroy_srq(ndev->srq);
1079 for (i = 0; i < ndev->srq_count; i++)
1080 nvmet_rdma_destroy_srq(ndev->srqs[i]);
1081
1082 kfree(ndev->srqs);
855}
856
1083}
1084
857static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
1085static struct nvmet_rdma_srq *
1086nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
858{
859 struct ib_srq_init_attr srq_attr = { NULL, };
1087{
1088 struct ib_srq_init_attr srq_attr = { NULL, };
1089 size_t srq_size = ndev->srq_size;
1090 struct nvmet_rdma_srq *nsrq;
860 struct ib_srq *srq;
1091 struct ib_srq *srq;
861 size_t srq_size;
862 int ret, i;
863
1092 int ret, i;
1093
864 srq_size = 4095; /* XXX: tune */
1094 nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
1095 if (!nsrq)
1096 return ERR_PTR(-ENOMEM);
865
866 srq_attr.attr.max_wr = srq_size;
867 srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
868 srq_attr.attr.srq_limit = 0;
869 srq_attr.srq_type = IB_SRQT_BASIC;
870 srq = ib_create_srq(ndev->pd, &srq_attr);
871 if (IS_ERR(srq)) {
1097
1098 srq_attr.attr.max_wr = srq_size;
1099 srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
1100 srq_attr.attr.srq_limit = 0;
1101 srq_attr.srq_type = IB_SRQT_BASIC;
1102 srq = ib_create_srq(ndev->pd, &srq_attr);
1103 if (IS_ERR(srq)) {
872 /*
873 * If SRQs aren't supported we just go ahead and use normal
874 * non-shared receive queues.
875 */
876 pr_info("SRQ requested but not supported.\n");
877 return 0;
1104 ret = PTR_ERR(srq);
1105 goto out_free;
878 }
879
1106 }
1107
880 ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
881 if (IS_ERR(ndev->srq_cmds)) {
882 ret = PTR_ERR(ndev->srq_cmds);
1108 nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
1109 if (IS_ERR(nsrq->cmds)) {
1110 ret = PTR_ERR(nsrq->cmds);
883 goto out_destroy_srq;
884 }
885
1111 goto out_destroy_srq;
1112 }
1113
886 ndev->srq = srq;
887 ndev->srq_size = srq_size;
1114 nsrq->srq = srq;
1115 nsrq->ndev = ndev;
888
889 for (i = 0; i < srq_size; i++) {
1116
1117 for (i = 0; i < srq_size; i++) {
890 ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
1118 nsrq->cmds[i].nsrq = nsrq;
1119 ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
891 if (ret)
892 goto out_free_cmds;
893 }
894
1120 if (ret)
1121 goto out_free_cmds;
1122 }
1123
895 return 0;
1124 return nsrq;
896
897out_free_cmds:
1125
1126out_free_cmds:
898 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
1127 nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
899out_destroy_srq:
900 ib_destroy_srq(srq);
1128out_destroy_srq:
1129 ib_destroy_srq(srq);
1130out_free:
1131 kfree(nsrq);
1132 return ERR_PTR(ret);
1133}
1134
1135static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
1136{
1137 int i, ret;
1138
1139 if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
1140 /*
1141 * If SRQs aren't supported we just go ahead and use normal
1142 * non-shared receive queues.
1143 */
1144 pr_info("SRQ requested but not supported.\n");
1145 return 0;
1146 }
1147
1148 ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
1149 nvmet_rdma_srq_size);
1150 ndev->srq_count = min(ndev->device->num_comp_vectors,
1151 ndev->device->attrs.max_srq);
1152
1153 ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
1154 if (!ndev->srqs)
1155 return -ENOMEM;
1156
1157 for (i = 0; i < ndev->srq_count; i++) {
1158 ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
1159 if (IS_ERR(ndev->srqs[i])) {
1160 ret = PTR_ERR(ndev->srqs[i]);
1161 goto err_srq;
1162 }
1163 }
1164
1165 return 0;
1166
1167err_srq:
1168 while (--i >= 0)
1169 nvmet_rdma_destroy_srq(ndev->srqs[i]);
1170 kfree(ndev->srqs);
901 return ret;
902}
903
904static void nvmet_rdma_free_dev(struct kref *ref)
905{
906 struct nvmet_rdma_device *ndev =
907 container_of(ref, struct nvmet_rdma_device, ref);
908
909 mutex_lock(&device_list_mutex);
910 list_del(&ndev->entry);
911 mutex_unlock(&device_list_mutex);
912
1171 return ret;
1172}
1173
1174static void nvmet_rdma_free_dev(struct kref *ref)
1175{
1176 struct nvmet_rdma_device *ndev =
1177 container_of(ref, struct nvmet_rdma_device, ref);
1178
1179 mutex_lock(&device_list_mutex);
1180 list_del(&ndev->entry);
1181 mutex_unlock(&device_list_mutex);
1182
913 nvmet_rdma_destroy_srq(ndev);
1183 nvmet_rdma_destroy_srqs(ndev);
914 ib_dealloc_pd(ndev->pd);
915
916 kfree(ndev);
917}
918
919static struct nvmet_rdma_device *
920nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
921{

--- 30 unchanged lines hidden (view full) ---

952 ndev->device = cm_id->device;
953 kref_init(&ndev->ref);
954
955 ndev->pd = ib_alloc_pd(ndev->device, 0);
956 if (IS_ERR(ndev->pd))
957 goto out_free_dev;
958
959 if (nvmet_rdma_use_srq) {
1184 ib_dealloc_pd(ndev->pd);
1185
1186 kfree(ndev);
1187}
1188
1189static struct nvmet_rdma_device *
1190nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
1191{

--- 30 unchanged lines hidden (view full) ---

1222 ndev->device = cm_id->device;
1223 kref_init(&ndev->ref);
1224
1225 ndev->pd = ib_alloc_pd(ndev->device, 0);
1226 if (IS_ERR(ndev->pd))
1227 goto out_free_dev;
1228
1229 if (nvmet_rdma_use_srq) {
960 ret = nvmet_rdma_init_srq(ndev);
1230 ret = nvmet_rdma_init_srqs(ndev);
961 if (ret)
962 goto out_free_pd;
963 }
964
965 list_add(&ndev->entry, &device_list);
966out_unlock:
967 mutex_unlock(&device_list_mutex);
968 pr_debug("added %s.\n", ndev->device->name);

--- 7 unchanged lines hidden (view full) ---

976 mutex_unlock(&device_list_mutex);
977 return NULL;
978}
979
980static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
981{
982 struct ib_qp_init_attr qp_attr;
983 struct nvmet_rdma_device *ndev = queue->dev;
1231 if (ret)
1232 goto out_free_pd;
1233 }
1234
1235 list_add(&ndev->entry, &device_list);
1236out_unlock:
1237 mutex_unlock(&device_list_mutex);
1238 pr_debug("added %s.\n", ndev->device->name);

--- 7 unchanged lines hidden (view full) ---

1246 mutex_unlock(&device_list_mutex);
1247 return NULL;
1248}
1249
1250static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
1251{
1252 struct ib_qp_init_attr qp_attr;
1253 struct nvmet_rdma_device *ndev = queue->dev;
984 int comp_vector, nr_cqe, ret, i, factor;
1254 int nr_cqe, ret, i, factor;
985
986 /*
1255
1256 /*
987 * Spread the io queues across completion vectors,
988 * but still keep all admin queues on vector 0.
989 */
990 comp_vector = !queue->host_qid ? 0 :
991 queue->idx % ndev->device->num_comp_vectors;
992
993 /*
994 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
995 */
996 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
997
1257 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
1258 */
1259 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
1260
998 queue->cq = ib_alloc_cq(ndev->device, queue,
999 nr_cqe + 1, comp_vector,
1000 IB_POLL_WORKQUEUE);
1261 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
1262 queue->comp_vector, IB_POLL_WORKQUEUE);
1001 if (IS_ERR(queue->cq)) {
1002 ret = PTR_ERR(queue->cq);
1003 pr_err("failed to create CQ cqe= %d ret= %d\n",
1004 nr_cqe + 1, ret);
1005 goto out;
1006 }
1007
1008 memset(&qp_attr, 0, sizeof(qp_attr));

--- 6 unchanged lines hidden (view full) ---

1015 /* +1 for drain */
1016 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1017 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1018 1 << NVMET_RDMA_MAX_MDTS);
1019 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
1020 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
1021 ndev->device->attrs.max_send_sge);
1022
1263 if (IS_ERR(queue->cq)) {
1264 ret = PTR_ERR(queue->cq);
1265 pr_err("failed to create CQ cqe= %d ret= %d\n",
1266 nr_cqe + 1, ret);
1267 goto out;
1268 }
1269
1270 memset(&qp_attr, 0, sizeof(qp_attr));

--- 6 unchanged lines hidden (view full) ---

1277 /* +1 for drain */
1278 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1279 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1280 1 << NVMET_RDMA_MAX_MDTS);
1281 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
1282 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
1283 ndev->device->attrs.max_send_sge);
1284
1023 if (ndev->srq) {
1024 qp_attr.srq = ndev->srq;
1285 if (queue->nsrq) {
1286 qp_attr.srq = queue->nsrq->srq;
1025 } else {
1026 /* +1 for drain */
1027 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1028 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1029 }
1030
1287 } else {
1288 /* +1 for drain */
1289 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1290 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1291 }
1292
1293 if (queue->port->pi_enable && queue->host_qid)
1294 qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
1295
1031 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1032 if (ret) {
1033 pr_err("failed to create_qp ret= %d\n", ret);
1034 goto err_destroy_cq;
1035 }
1036 queue->qp = queue->cm_id->qp;
1037
1038 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1039
1040 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1041 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1042 qp_attr.cap.max_send_wr, queue->cm_id);
1043
1296 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1297 if (ret) {
1298 pr_err("failed to create_qp ret= %d\n", ret);
1299 goto err_destroy_cq;
1300 }
1301 queue->qp = queue->cm_id->qp;
1302
1303 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1304
1305 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1306 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1307 qp_attr.cap.max_send_wr, queue->cm_id);
1308
1044 if (!ndev->srq) {
1309 if (!queue->nsrq) {
1045 for (i = 0; i < queue->recv_queue_size; i++) {
1046 queue->cmds[i].queue = queue;
1047 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1048 if (ret)
1049 goto err_destroy_qp;
1050 }
1051 }
1052
1053out:
1054 return ret;
1055
1056err_destroy_qp:
1057 rdma_destroy_qp(queue->cm_id);
1058err_destroy_cq:
1310 for (i = 0; i < queue->recv_queue_size; i++) {
1311 queue->cmds[i].queue = queue;
1312 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1313 if (ret)
1314 goto err_destroy_qp;
1315 }
1316 }
1317
1318out:
1319 return ret;
1320
1321err_destroy_qp:
1322 rdma_destroy_qp(queue->cm_id);
1323err_destroy_cq:
1059 ib_free_cq(queue->cq);
1324 ib_cq_pool_put(queue->cq, nr_cqe + 1);
1060 goto out;
1061}
1062
1063static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1064{
1065 ib_drain_qp(queue->qp);
1066 if (queue->cm_id)
1067 rdma_destroy_id(queue->cm_id);
1068 ib_destroy_qp(queue->qp);
1325 goto out;
1326}
1327
1328static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1329{
1330 ib_drain_qp(queue->qp);
1331 if (queue->cm_id)
1332 rdma_destroy_id(queue->cm_id);
1333 ib_destroy_qp(queue->qp);
1069 ib_free_cq(queue->cq);
1334 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
1335 queue->send_queue_size + 1);
1070}
1071
1072static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1073{
1074 pr_debug("freeing queue %d\n", queue->idx);
1075
1076 nvmet_sq_destroy(&queue->nvme_sq);
1077
1078 nvmet_rdma_destroy_queue_ib(queue);
1336}
1337
1338static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1339{
1340 pr_debug("freeing queue %d\n", queue->idx);
1341
1342 nvmet_sq_destroy(&queue->nvme_sq);
1343
1344 nvmet_rdma_destroy_queue_ib(queue);
1079 if (!queue->dev->srq) {
1345 if (!queue->nsrq) {
1080 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1081 queue->recv_queue_size,
1082 !queue->host_qid);
1083 }
1084 nvmet_rdma_free_rsps(queue);
1085 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1086 kfree(queue);
1087}

--- 45 unchanged lines hidden (view full) ---

1133 struct nvme_rdma_cm_rej rej;
1134
1135 pr_debug("rejecting connect request: status %d (%s)\n",
1136 status, nvme_rdma_cm_msg(status));
1137
1138 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1139 rej.sts = cpu_to_le16(status);
1140
1346 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1347 queue->recv_queue_size,
1348 !queue->host_qid);
1349 }
1350 nvmet_rdma_free_rsps(queue);
1351 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1352 kfree(queue);
1353}

--- 45 unchanged lines hidden (view full) ---

1399 struct nvme_rdma_cm_rej rej;
1400
1401 pr_debug("rejecting connect request: status %d (%s)\n",
1402 status, nvme_rdma_cm_msg(status));
1403
1404 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1405 rej.sts = cpu_to_le16(status);
1406
1141 return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1407 return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
1408 IB_CM_REJ_CONSUMER_DEFINED);
1142}
1143
1144static struct nvmet_rdma_queue *
1145nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1146 struct rdma_cm_id *cm_id,
1147 struct rdma_cm_event *event)
1148{
1409}
1410
1411static struct nvmet_rdma_queue *
1412nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1413 struct rdma_cm_id *cm_id,
1414 struct rdma_cm_event *event)
1415{
1416 struct nvmet_rdma_port *port = cm_id->context;
1149 struct nvmet_rdma_queue *queue;
1150 int ret;
1151
1152 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1153 if (!queue) {
1154 ret = NVME_RDMA_CM_NO_RSC;
1155 goto out_reject;
1156 }

--- 10 unchanged lines hidden (view full) ---

1167
1168 /*
1169 * Schedules the actual release because calling rdma_destroy_id from
1170 * inside a CM callback would trigger a deadlock. (great API design..)
1171 */
1172 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1173 queue->dev = ndev;
1174 queue->cm_id = cm_id;
1417 struct nvmet_rdma_queue *queue;
1418 int ret;
1419
1420 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1421 if (!queue) {
1422 ret = NVME_RDMA_CM_NO_RSC;
1423 goto out_reject;
1424 }

--- 10 unchanged lines hidden (view full) ---

1435
1436 /*
1437 * Schedules the actual release because calling rdma_destroy_id from
1438 * inside a CM callback would trigger a deadlock. (great API design..)
1439 */
1440 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1441 queue->dev = ndev;
1442 queue->cm_id = cm_id;
1443 queue->port = port->nport;
1175
1176 spin_lock_init(&queue->state_lock);
1177 queue->state = NVMET_RDMA_Q_CONNECTING;
1178 INIT_LIST_HEAD(&queue->rsp_wait_list);
1179 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1180 spin_lock_init(&queue->rsp_wr_wait_lock);
1181 INIT_LIST_HEAD(&queue->free_rsps);
1182 spin_lock_init(&queue->rsps_lock);
1183 INIT_LIST_HEAD(&queue->queue_list);
1184
1185 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1186 if (queue->idx < 0) {
1187 ret = NVME_RDMA_CM_NO_RSC;
1188 goto out_destroy_sq;
1189 }
1190
1444
1445 spin_lock_init(&queue->state_lock);
1446 queue->state = NVMET_RDMA_Q_CONNECTING;
1447 INIT_LIST_HEAD(&queue->rsp_wait_list);
1448 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1449 spin_lock_init(&queue->rsp_wr_wait_lock);
1450 INIT_LIST_HEAD(&queue->free_rsps);
1451 spin_lock_init(&queue->rsps_lock);
1452 INIT_LIST_HEAD(&queue->queue_list);
1453
1454 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1455 if (queue->idx < 0) {
1456 ret = NVME_RDMA_CM_NO_RSC;
1457 goto out_destroy_sq;
1458 }
1459
1460 /*
1461 * Spread the io queues across completion vectors,
1462 * but still keep all admin queues on vector 0.
1463 */
1464 queue->comp_vector = !queue->host_qid ? 0 :
1465 queue->idx % ndev->device->num_comp_vectors;
1466
1467
1191 ret = nvmet_rdma_alloc_rsps(queue);
1192 if (ret) {
1193 ret = NVME_RDMA_CM_NO_RSC;
1194 goto out_ida_remove;
1195 }
1196
1468 ret = nvmet_rdma_alloc_rsps(queue);
1469 if (ret) {
1470 ret = NVME_RDMA_CM_NO_RSC;
1471 goto out_ida_remove;
1472 }
1473
1197 if (!ndev->srq) {
1474 if (ndev->srqs) {
1475 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
1476 } else {
1198 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1199 queue->recv_queue_size,
1200 !queue->host_qid);
1201 if (IS_ERR(queue->cmds)) {
1202 ret = NVME_RDMA_CM_NO_RSC;
1203 goto out_free_responses;
1204 }
1205 }

--- 4 unchanged lines hidden (view full) ---

1210 __func__, ret);
1211 ret = NVME_RDMA_CM_NO_RSC;
1212 goto out_free_cmds;
1213 }
1214
1215 return queue;
1216
1217out_free_cmds:
1477 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1478 queue->recv_queue_size,
1479 !queue->host_qid);
1480 if (IS_ERR(queue->cmds)) {
1481 ret = NVME_RDMA_CM_NO_RSC;
1482 goto out_free_responses;
1483 }
1484 }

--- 4 unchanged lines hidden (view full) ---

1489 __func__, ret);
1490 ret = NVME_RDMA_CM_NO_RSC;
1491 goto out_free_cmds;
1492 }
1493
1494 return queue;
1495
1496out_free_cmds:
1218 if (!ndev->srq) {
1497 if (!queue->nsrq) {
1219 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1220 queue->recv_queue_size,
1221 !queue->host_qid);
1222 }
1223out_free_responses:
1224 nvmet_rdma_free_rsps(queue);
1225out_ida_remove:
1226 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);

--- 9 unchanged lines hidden (view full) ---

1236static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1237{
1238 struct nvmet_rdma_queue *queue = priv;
1239
1240 switch (event->event) {
1241 case IB_EVENT_COMM_EST:
1242 rdma_notify(queue->cm_id, event->event);
1243 break;
1498 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1499 queue->recv_queue_size,
1500 !queue->host_qid);
1501 }
1502out_free_responses:
1503 nvmet_rdma_free_rsps(queue);
1504out_ida_remove:
1505 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);

--- 9 unchanged lines hidden (view full) ---

1515static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1516{
1517 struct nvmet_rdma_queue *queue = priv;
1518
1519 switch (event->event) {
1520 case IB_EVENT_COMM_EST:
1521 rdma_notify(queue->cm_id, event->event);
1522 break;
1523 case IB_EVENT_QP_LAST_WQE_REACHED:
1524 pr_debug("received last WQE reached event for queue=0x%p\n",
1525 queue);
1526 break;
1244 default:
1245 pr_err("received IB QP event: %s (%d)\n",
1246 ib_event_msg(event->event), event->event);
1247 break;
1248 }
1249}
1250
1251static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,

--- 18 unchanged lines hidden (view full) ---

1270 pr_err("rdma_accept failed (error code = %d)\n", ret);
1271
1272 return ret;
1273}
1274
1275static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1276 struct rdma_cm_event *event)
1277{
1527 default:
1528 pr_err("received IB QP event: %s (%d)\n",
1529 ib_event_msg(event->event), event->event);
1530 break;
1531 }
1532}
1533
1534static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,

--- 18 unchanged lines hidden (view full) ---

1553 pr_err("rdma_accept failed (error code = %d)\n", ret);
1554
1555 return ret;
1556}
1557
1558static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1559 struct rdma_cm_event *event)
1560{
1278 struct nvmet_rdma_port *port = cm_id->context;
1279 struct nvmet_rdma_device *ndev;
1280 struct nvmet_rdma_queue *queue;
1281 int ret = -EINVAL;
1282
1283 ndev = nvmet_rdma_find_get_device(cm_id);
1284 if (!ndev) {
1285 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1286 return -ECONNREFUSED;
1287 }
1288
1289 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1290 if (!queue) {
1291 ret = -ENOMEM;
1292 goto put_device;
1293 }
1561 struct nvmet_rdma_device *ndev;
1562 struct nvmet_rdma_queue *queue;
1563 int ret = -EINVAL;
1564
1565 ndev = nvmet_rdma_find_get_device(cm_id);
1566 if (!ndev) {
1567 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1568 return -ECONNREFUSED;
1569 }
1570
1571 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1572 if (!queue) {
1573 ret = -ENOMEM;
1574 goto put_device;
1575 }
1294 queue->port = port->nport;
1295
1296 if (queue->host_qid == 0) {
1297 /* Let inflight controller teardown complete */
1298 flush_scheduled_work();
1299 }
1300
1301 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1302 if (ret) {

--- 255 unchanged lines hidden (view full) ---

1558 }
1559
1560 ret = rdma_listen(cm_id, 128);
1561 if (ret) {
1562 pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
1563 goto out_destroy_id;
1564 }
1565
1576
1577 if (queue->host_qid == 0) {
1578 /* Let inflight controller teardown complete */
1579 flush_scheduled_work();
1580 }
1581
1582 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1583 if (ret) {

--- 255 unchanged lines hidden (view full) ---

1839 }
1840
1841 ret = rdma_listen(cm_id, 128);
1842 if (ret) {
1843 pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
1844 goto out_destroy_id;
1845 }
1846
1847 if (port->nport->pi_enable &&
1848 !(cm_id->device->attrs.device_cap_flags &
1849 IB_DEVICE_INTEGRITY_HANDOVER)) {
1850 pr_err("T10-PI is not supported for %pISpcs\n", addr);
1851 ret = -EINVAL;
1852 goto out_destroy_id;
1853 }
1854
1566 port->cm_id = cm_id;
1567 return 0;
1568
1569out_destroy_id:
1570 rdma_destroy_id(cm_id);
1571 return ret;
1572}
1573

--- 93 unchanged lines hidden (view full) ---

1667 sprintf(traddr, "%pISc", addr);
1668 } else {
1669 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1670 }
1671}
1672
1673static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
1674{
1855 port->cm_id = cm_id;
1856 return 0;
1857
1858out_destroy_id:
1859 rdma_destroy_id(cm_id);
1860 return ret;
1861}
1862

--- 93 unchanged lines hidden (view full) ---

1956 sprintf(traddr, "%pISc", addr);
1957 } else {
1958 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1959 }
1960}
1961
1962static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
1963{
1964 if (ctrl->pi_support)
1965 return NVMET_RDMA_MAX_METADATA_MDTS;
1675 return NVMET_RDMA_MAX_MDTS;
1676}
1677
1678static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1679 .owner = THIS_MODULE,
1680 .type = NVMF_TRTYPE_RDMA,
1681 .msdbd = 1,
1966 return NVMET_RDMA_MAX_MDTS;
1967}
1968
1969static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1970 .owner = THIS_MODULE,
1971 .type = NVMF_TRTYPE_RDMA,
1972 .msdbd = 1,
1682 .has_keyed_sgls = 1,
1973 .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
1683 .add_port = nvmet_rdma_add_port,
1684 .remove_port = nvmet_rdma_remove_port,
1685 .queue_response = nvmet_rdma_queue_response,
1686 .delete_ctrl = nvmet_rdma_delete_ctrl,
1687 .disc_traddr = nvmet_rdma_disc_port_addr,
1688 .get_mdts = nvmet_rdma_get_mdts,
1689};
1690

--- 74 unchanged lines hidden ---
1974 .add_port = nvmet_rdma_add_port,
1975 .remove_port = nvmet_rdma_remove_port,
1976 .queue_response = nvmet_rdma_queue_response,
1977 .delete_ctrl = nvmet_rdma_delete_ctrl,
1978 .disc_traddr = nvmet_rdma_disc_port_addr,
1979 .get_mdts = nvmet_rdma_get_mdts,
1980};
1981

--- 74 unchanged lines hidden ---