Lines Matching full:rdma

3  * RDMA transport layer based on the trans_fd.c implementation.
32 #include <rdma/ib_verbs.h>
33 #include <rdma/rdma_cm.h>
46 * struct p9_trans_rdma - RDMA transport instance
49 * @cm_id: The RDMA CM ID
152 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options() local
154 if (rdma->port != P9_PORT) in p9_rdma_show_options()
155 seq_printf(m, ",port=%u", rdma->port); in p9_rdma_show_options()
156 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options()
157 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options()
158 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options()
159 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options()
160 if (rdma->timeout != P9_RDMA_TIMEOUT) in p9_rdma_show_options()
161 seq_printf(m, ",timeout=%lu", rdma->timeout); in p9_rdma_show_options()
162 if (rdma->privport) in p9_rdma_show_options()
168 * parse_opts - parse mount options into rdma options structure
170 * @opts: rdma transport-specific structure to parse options into
242 struct p9_trans_rdma *rdma = c->trans; in p9_cm_event_handler() local
245 BUG_ON(rdma->state != P9_RDMA_INIT); in p9_cm_event_handler()
246 rdma->state = P9_RDMA_ADDR_RESOLVED; in p9_cm_event_handler()
250 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); in p9_cm_event_handler()
251 rdma->state = P9_RDMA_ROUTE_RESOLVED; in p9_cm_event_handler()
255 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); in p9_cm_event_handler()
256 rdma->state = P9_RDMA_CONNECTED; in p9_cm_event_handler()
260 if (rdma) in p9_cm_event_handler()
261 rdma->state = P9_RDMA_CLOSED; in p9_cm_event_handler()
280 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
285 complete(&rdma->cm_done); in p9_cm_event_handler()
293 struct p9_trans_rdma *rdma = client->trans; in recv_done() local
301 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in recv_done()
328 up(&rdma->rq_sem); in recv_done()
335 rdma->state = P9_RDMA_FLUSHING; in recv_done()
344 struct p9_trans_rdma *rdma = client->trans; in send_done() local
348 ib_dma_unmap_single(rdma->cm_id->device, in send_done()
351 up(&rdma->sq_sem); in send_done()
362 static void rdma_destroy_trans(struct p9_trans_rdma *rdma) in rdma_destroy_trans() argument
364 if (!rdma) in rdma_destroy_trans()
367 if (rdma->qp && !IS_ERR(rdma->qp)) in rdma_destroy_trans()
368 ib_destroy_qp(rdma->qp); in rdma_destroy_trans()
370 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans()
371 ib_dealloc_pd(rdma->pd); in rdma_destroy_trans()
373 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans()
374 ib_free_cq(rdma->cq); in rdma_destroy_trans()
376 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
377 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
379 kfree(rdma); in rdma_destroy_trans()
385 struct p9_trans_rdma *rdma = client->trans; in post_recv() local
390 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
393 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
400 sge.lkey = rdma->pd->local_dma_lkey; in post_recv()
407 ret = ib_post_recv(rdma->qp, &wr, NULL); in post_recv()
409 ib_dma_unmap_single(rdma->cm_id->device, c->busa, in post_recv()
420 struct p9_trans_rdma *rdma = client->trans; in rdma_request() local
436 if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { in rdma_request()
437 if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { in rdma_request()
444 atomic_inc(&rdma->excess_rc); in rdma_request()
463 if (down_interruptible(&rdma->rq_sem)) { in rdma_request()
485 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
488 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request()
497 sge.lkey = rdma->pd->local_dma_lkey; in rdma_request()
506 if (down_interruptible(&rdma->sq_sem)) { in rdma_request()
516 err = ib_post_send(rdma->qp, &wr, NULL); in rdma_request()
524 ib_dma_unmap_single(rdma->cm_id->device, c->busa, in rdma_request()
535 atomic_inc(&rdma->excess_rc); in rdma_request()
541 spin_lock_irqsave(&rdma->req_lock, flags); in rdma_request()
542 if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) { in rdma_request()
543 rdma->state = P9_RDMA_CLOSING; in rdma_request()
544 spin_unlock_irqrestore(&rdma->req_lock, flags); in rdma_request()
545 rdma_disconnect(rdma->cm_id); in rdma_request()
547 spin_unlock_irqrestore(&rdma->req_lock, flags); in rdma_request()
553 struct p9_trans_rdma *rdma; in rdma_close() local
558 rdma = client->trans; in rdma_close()
559 if (!rdma) in rdma_close()
563 rdma_disconnect(rdma->cm_id); in rdma_close()
564 rdma_destroy_trans(rdma); in rdma_close()
568 * alloc_rdma - Allocate and initialize the rdma transport structure
573 struct p9_trans_rdma *rdma; in alloc_rdma() local
575 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); in alloc_rdma()
576 if (!rdma) in alloc_rdma()
579 rdma->port = opts->port; in alloc_rdma()
580 rdma->privport = opts->privport; in alloc_rdma()
581 rdma->sq_depth = opts->sq_depth; in alloc_rdma()
582 rdma->rq_depth = opts->rq_depth; in alloc_rdma()
583 rdma->timeout = opts->timeout; in alloc_rdma()
584 spin_lock_init(&rdma->req_lock); in alloc_rdma()
585 init_completion(&rdma->cm_done); in alloc_rdma()
586 sema_init(&rdma->sq_sem, rdma->sq_depth); in alloc_rdma()
587 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma()
588 atomic_set(&rdma->excess_rc, 0); in alloc_rdma()
590 return rdma; in alloc_rdma()
606 struct p9_trans_rdma *rdma = client->trans; in rdma_cancelled() local
607 atomic_inc(&rdma->excess_rc); in rdma_cancelled()
611 static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) in p9_rdma_bind_privport() argument
621 err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); in p9_rdma_bind_privport()
639 struct p9_trans_rdma *rdma; in rdma_create_trans() local
651 /* Create and initialize the RDMA transport structure */ in rdma_create_trans()
652 rdma = alloc_rdma(&opts); in rdma_create_trans()
653 if (!rdma) in rdma_create_trans()
656 /* Create the RDMA CM ID */ in rdma_create_trans()
657 rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, in rdma_create_trans()
659 if (IS_ERR(rdma->cm_id)) in rdma_create_trans()
663 client->trans = rdma; in rdma_create_trans()
667 err = p9_rdma_bind_privport(rdma); in rdma_create_trans()
676 rdma->addr.sin_family = AF_INET; in rdma_create_trans()
677 rdma->addr.sin_addr.s_addr = in_aton(addr); in rdma_create_trans()
678 rdma->addr.sin_port = htons(opts.port); in rdma_create_trans()
679 err = rdma_resolve_addr(rdma->cm_id, NULL, in rdma_create_trans()
680 (struct sockaddr *)&rdma->addr, in rdma_create_trans()
681 rdma->timeout); in rdma_create_trans()
684 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
685 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) in rdma_create_trans()
689 err = rdma_resolve_route(rdma->cm_id, rdma->timeout); in rdma_create_trans()
692 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
693 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) in rdma_create_trans()
697 rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, in rdma_create_trans()
700 if (IS_ERR(rdma->cq)) in rdma_create_trans()
704 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); in rdma_create_trans()
705 if (IS_ERR(rdma->pd)) in rdma_create_trans()
718 qp_attr.send_cq = rdma->cq; in rdma_create_trans()
719 qp_attr.recv_cq = rdma->cq; in rdma_create_trans()
720 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); in rdma_create_trans()
723 rdma->qp = rdma->cm_id->qp; in rdma_create_trans()
731 err = rdma_connect(rdma->cm_id, &conn_param); in rdma_create_trans()
734 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
735 if (err || (rdma->state != P9_RDMA_CONNECTED)) in rdma_create_trans()
743 rdma_destroy_trans(rdma); in rdma_create_trans()
748 .name = "rdma",
762 * p9_trans_rdma_init - Register the 9P RDMA transport driver
777 MODULE_ALIAS_9P("rdma");
780 MODULE_DESCRIPTION("RDMA Transport for 9P");