Lines Matching defs:cm_id
87 struct rdma_cm_id *cm_id;
121 struct rdma_cm_id *cm_id;
628 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
633 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
634 cm_id->port_num, req->sg, req->sg_cnt,
638 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
647 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
651 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
652 cm_id->port_num, req->sg, req->sg_cnt,
656 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
712 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
724 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
725 cm_id->port_num, &rsp->write_cqe, NULL);
727 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
728 cm_id->port_num, NULL, &rsp->send_wr);
739 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
783 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
815 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
959 queue->cm_id->port_num, &rsp->read_cqe, NULL))
1196 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
1198 struct nvmet_rdma_port *port = cm_id->context;
1207 if (ndev->device->node_guid == cm_id->device->node_guid &&
1217 inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
1218 cm_id->device->attrs.max_recv_sge) - 1;
1221 nport->inline_data_size, cm_id->device->name,
1229 if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
1232 cm_id->device->name);
1236 ndev->device = cm_id->device;
1292 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1309 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1314 queue->qp = queue->cm_id->qp;
1318 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1320 qp_attr.cap.max_send_wr, queue->cm_id);
1335 rdma_destroy_qp(queue->cm_id);
1344 if (queue->cm_id)
1345 rdma_destroy_id(queue->cm_id);
1409 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1420 return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
1426 struct rdma_cm_id *cm_id,
1429 struct nvmet_rdma_port *port = cm_id->context;
1455 queue->cm_id = cm_id;
1524 nvmet_rdma_cm_reject(cm_id, ret);
1534 rdma_notify(queue->cm_id, event->event);
1547 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1564 ret = rdma_accept(cm_id, ¶m);
1571 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1578 ndev = nvmet_rdma_find_get_device(cm_id);
1580 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1584 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1595 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1598 * Don't destroy the cm_id in free path, as we implicitly
1599 * destroy the cm_id here with non-zero ret code.
1601 queue->cm_id = NULL;
1651 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1676 rdma_disconnect(queue->cm_id);
1696 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1712 * @cm_id: rdma_cm id, used for nvmet port
1717 * queue cm_id and/or a device bound listener cm_id (where in this
1722 * we nullify the priv to prevent double cm_id destruction and destroying
1723 * the cm_id implicitely by returning a non-zero rc to the callout.
1725 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1732 * This is a queue cm_id. we have registered
1739 port = cm_id->context;
1742 * This is a listener cm_id. Make sure that
1744 * cm_id destroy. use atomic xchg to make sure
1747 if (xchg(&port->cm_id, NULL) != cm_id)
1757 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1763 if (cm_id->qp)
1764 queue = cm_id->qp->qp_context;
1768 event->status, cm_id);
1772 ret = nvmet_rdma_queue_connect(cm_id, event);
1779 struct nvmet_rdma_port *port = cm_id->context;
1790 ret = nvmet_rdma_device_removal(cm_id, queue);
1794 rdma_reject_msg(cm_id, event->status));
1798 nvmet_rdma_queue_connect_fail(cm_id, queue);
1846 struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
1848 if (cm_id)
1849 rdma_destroy_id(cm_id);
1862 struct rdma_cm_id *cm_id;
1865 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1867 if (IS_ERR(cm_id)) {
1869 return PTR_ERR(cm_id);
1876 ret = rdma_set_afonly(cm_id, 1);
1882 ret = rdma_bind_addr(cm_id, addr);
1888 ret = rdma_listen(cm_id, 128);
1894 port->cm_id = cm_id;
1898 rdma_destroy_id(cm_id);
1987 struct rdma_cm_id *cm_id = port->cm_id;
1989 if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1992 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;