rdma.c (a032e4f6d60d0aca4f6570d2ad33105a2b9ba385) rdma.c (21f9024355e58772ec5d7fc3534aa5e29d72a8b6)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics RDMA target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/atomic.h>
8#include <linux/ctype.h>

--- 64 unchanged lines hidden (view full) ---

73enum nvmet_rdma_queue_state {
74 NVMET_RDMA_Q_CONNECTING,
75 NVMET_RDMA_Q_LIVE,
76 NVMET_RDMA_Q_DISCONNECTING,
77};
78
79struct nvmet_rdma_queue {
80 struct rdma_cm_id *cm_id;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics RDMA target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/atomic.h>
8#include <linux/ctype.h>

--- 64 unchanged lines hidden (view full) ---

73enum nvmet_rdma_queue_state {
74 NVMET_RDMA_Q_CONNECTING,
75 NVMET_RDMA_Q_LIVE,
76 NVMET_RDMA_Q_DISCONNECTING,
77};
78
79struct nvmet_rdma_queue {
80 struct rdma_cm_id *cm_id;
81 struct ib_qp *qp;
81 struct nvmet_port *port;
82 struct ib_cq *cq;
83 atomic_t sq_wr_avail;
84 struct nvmet_rdma_device *dev;
85 spinlock_t state_lock;
86 enum nvmet_rdma_queue_state state;
87 struct nvmet_cq nvme_cq;
88 struct nvmet_sq nvme_sq;

--- 380 unchanged lines hidden (view full) ---

469
470 ib_dma_sync_single_for_device(ndev->device,
471 cmd->sge[0].addr, cmd->sge[0].length,
472 DMA_FROM_DEVICE);
473
474 if (ndev->srq)
475 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
476 else
82 struct nvmet_port *port;
83 struct ib_cq *cq;
84 atomic_t sq_wr_avail;
85 struct nvmet_rdma_device *dev;
86 spinlock_t state_lock;
87 enum nvmet_rdma_queue_state state;
88 struct nvmet_cq nvme_cq;
89 struct nvmet_sq nvme_sq;

--- 380 unchanged lines hidden (view full) ---

470
471 ib_dma_sync_single_for_device(ndev->device,
472 cmd->sge[0].addr, cmd->sge[0].length,
473 DMA_FROM_DEVICE);
474
475 if (ndev->srq)
476 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
477 else
477 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
478 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
478
479 if (unlikely(ret))
480 pr_err("post_recv cmd failed\n");
481
482 return ret;
483}
484
485static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)

--- 22 unchanged lines hidden (view full) ---

508
509static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
510{
511 struct nvmet_rdma_queue *queue = rsp->queue;
512
513 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
514
515 if (rsp->n_rdma) {
479
480 if (unlikely(ret))
481 pr_err("post_recv cmd failed\n");
482
483 return ret;
484}
485
486static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)

--- 22 unchanged lines hidden (view full) ---

509
510static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
511{
512 struct nvmet_rdma_queue *queue = rsp->queue;
513
514 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
515
516 if (rsp->n_rdma) {
516 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
517 rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
517 queue->cm_id->port_num, rsp->req.sg,
518 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
519 }
520
521 if (rsp->req.sg != rsp->cmd->inline_sg)
522 nvmet_req_free_sgl(&rsp->req);
523
524 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))

--- 67 unchanged lines hidden (view full) ---

592static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
593{
594 struct nvmet_rdma_rsp *rsp =
595 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
596 struct nvmet_rdma_queue *queue = cq->cq_context;
597
598 WARN_ON(rsp->n_rdma <= 0);
599 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
518 queue->cm_id->port_num, rsp->req.sg,
519 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
520 }
521
522 if (rsp->req.sg != rsp->cmd->inline_sg)
523 nvmet_req_free_sgl(&rsp->req);
524
525 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))

--- 67 unchanged lines hidden (view full) ---

593static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
594{
595 struct nvmet_rdma_rsp *rsp =
596 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
597 struct nvmet_rdma_queue *queue = cq->cq_context;
598
599 WARN_ON(rsp->n_rdma <= 0);
600 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
600 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
601 rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
601 queue->cm_id->port_num, rsp->req.sg,
602 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
603 rsp->n_rdma = 0;
604
605 if (unlikely(wc->status != IB_WC_SUCCESS)) {
606 nvmet_req_uninit(&rsp->req);
607 nvmet_rdma_release_rsp(rsp);
608 if (wc->status != IB_WC_WR_FLUSH_ERR) {

--- 138 unchanged lines hidden (view full) ---

747 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
748 1 + rsp->n_rdma, queue->idx,
749 queue->nvme_sq.ctrl->cntlid);
750 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
751 return false;
752 }
753
754 if (nvmet_rdma_need_data_in(rsp)) {
602 queue->cm_id->port_num, rsp->req.sg,
603 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
604 rsp->n_rdma = 0;
605
606 if (unlikely(wc->status != IB_WC_SUCCESS)) {
607 nvmet_req_uninit(&rsp->req);
608 nvmet_rdma_release_rsp(rsp);
609 if (wc->status != IB_WC_WR_FLUSH_ERR) {

--- 138 unchanged lines hidden (view full) ---

748 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
749 1 + rsp->n_rdma, queue->idx,
750 queue->nvme_sq.ctrl->cntlid);
751 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
752 return false;
753 }
754
755 if (nvmet_rdma_need_data_in(rsp)) {
755 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
756 if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
756 queue->cm_id->port_num, &rsp->read_cqe, NULL))
757 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
758 } else {
759 rsp->req.execute(&rsp->req);
760 }
761
762 return true;
763}

--- 269 unchanged lines hidden (view full) ---

1033 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1034 }
1035
1036 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1037 if (ret) {
1038 pr_err("failed to create_qp ret= %d\n", ret);
1039 goto err_destroy_cq;
1040 }
757 queue->cm_id->port_num, &rsp->read_cqe, NULL))
758 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
759 } else {
760 rsp->req.execute(&rsp->req);
761 }
762
763 return true;
764}

--- 269 unchanged lines hidden (view full) ---

1034 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1035 }
1036
1037 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1038 if (ret) {
1039 pr_err("failed to create_qp ret= %d\n", ret);
1040 goto err_destroy_cq;
1041 }
1042 queue->qp = queue->cm_id->qp;
1041
1042 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1043
1044 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1045 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1046 qp_attr.cap.max_send_wr, queue->cm_id);
1047
1048 if (!ndev->srq) {

--- 12 unchanged lines hidden (view full) ---

1061 rdma_destroy_qp(queue->cm_id);
1062err_destroy_cq:
1063 ib_free_cq(queue->cq);
1064 goto out;
1065}
1066
1067static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1068{
1043
1044 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1045
1046 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1047 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1048 qp_attr.cap.max_send_wr, queue->cm_id);
1049
1050 if (!ndev->srq) {

--- 12 unchanged lines hidden (view full) ---

1063 rdma_destroy_qp(queue->cm_id);
1064err_destroy_cq:
1065 ib_free_cq(queue->cq);
1066 goto out;
1067}
1068
1069static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1070{
1069 struct ib_qp *qp = queue->cm_id->qp;
1070
1071 ib_drain_qp(qp);
1072 rdma_destroy_id(queue->cm_id);
1073 ib_destroy_qp(qp);
1071 ib_drain_qp(queue->qp);
1072 if (queue->cm_id)
1073 rdma_destroy_id(queue->cm_id);
1074 ib_destroy_qp(queue->qp);
1074 ib_free_cq(queue->cq);
1075}
1076
1077static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1078{
1079 pr_debug("freeing queue %d\n", queue->idx);
1080
1081 nvmet_sq_destroy(&queue->nvme_sq);

--- 218 unchanged lines hidden (view full) ---

1300
1301 if (queue->host_qid == 0) {
1302 /* Let inflight controller teardown complete */
1303 flush_scheduled_work();
1304 }
1305
1306 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1307 if (ret) {
1075 ib_free_cq(queue->cq);
1076}
1077
1078static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1079{
1080 pr_debug("freeing queue %d\n", queue->idx);
1081
1082 nvmet_sq_destroy(&queue->nvme_sq);

--- 218 unchanged lines hidden (view full) ---

1301
1302 if (queue->host_qid == 0) {
1303 /* Let inflight controller teardown complete */
1304 flush_scheduled_work();
1305 }
1306
1307 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1308 if (ret) {
1308 schedule_work(&queue->release_work);
1309 /* Destroying rdma_cm id is not needed here */
1310 return 0;
1309 /*
1310 * Don't destroy the cm_id in free path, as we implicitly
1311 * destroy the cm_id here with non-zero ret code.
1312 */
1313 queue->cm_id = NULL;
1314 goto free_queue;
1311 }
1312
1313 mutex_lock(&nvmet_rdma_queue_mutex);
1314 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1315 mutex_unlock(&nvmet_rdma_queue_mutex);
1316
1317 return 0;
1318
1315 }
1316
1317 mutex_lock(&nvmet_rdma_queue_mutex);
1318 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1319 mutex_unlock(&nvmet_rdma_queue_mutex);
1320
1321 return 0;
1322
1323free_queue:
1324 nvmet_rdma_free_queue(queue);
1319put_device:
1320 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1321
1322 return ret;
1323}
1324
1325static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1326{

--- 438 unchanged lines hidden ---
1325put_device:
1326 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1327
1328 return ret;
1329}
1330
1331static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1332{

--- 438 unchanged lines hidden ---