1f531a5dbSChuck Lever /* 2f531a5dbSChuck Lever * Copyright (c) 2015 Oracle. All rights reserved. 3f531a5dbSChuck Lever * 4f531a5dbSChuck Lever * Support for backward direction RPCs on RPC/RDMA. 5f531a5dbSChuck Lever */ 6f531a5dbSChuck Lever 7f531a5dbSChuck Lever #include <linux/module.h> 863cae470SChuck Lever #include <linux/sunrpc/xprt.h> 963cae470SChuck Lever #include <linux/sunrpc/svc.h> 1076566773SChuck Lever #include <linux/sunrpc/svc_xprt.h> 11f531a5dbSChuck Lever 12f531a5dbSChuck Lever #include "xprt_rdma.h" 13f531a5dbSChuck Lever 14f531a5dbSChuck Lever #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 15f531a5dbSChuck Lever # define RPCDBG_FACILITY RPCDBG_TRANS 16f531a5dbSChuck Lever #endif 17f531a5dbSChuck Lever 18c8bbe0c7SChuck Lever #undef RPCRDMA_BACKCHANNEL_DEBUG 1963cae470SChuck Lever 20f531a5dbSChuck Lever static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt, 21f531a5dbSChuck Lever struct rpc_rqst *rqst) 22f531a5dbSChuck Lever { 23f531a5dbSChuck Lever struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 24f531a5dbSChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 25f531a5dbSChuck Lever 26f531a5dbSChuck Lever spin_lock(&buf->rb_reqslock); 27f531a5dbSChuck Lever list_del(&req->rl_all); 28f531a5dbSChuck Lever spin_unlock(&buf->rb_reqslock); 29f531a5dbSChuck Lever 3013650c23SChuck Lever rpcrdma_destroy_req(req); 31f531a5dbSChuck Lever 32f531a5dbSChuck Lever kfree(rqst); 33f531a5dbSChuck Lever } 34f531a5dbSChuck Lever 35f531a5dbSChuck Lever static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, 36f531a5dbSChuck Lever struct rpc_rqst *rqst) 37f531a5dbSChuck Lever { 38f531a5dbSChuck Lever struct rpcrdma_regbuf *rb; 39f531a5dbSChuck Lever struct rpcrdma_req *req; 40f531a5dbSChuck Lever size_t size; 41f531a5dbSChuck Lever 42f531a5dbSChuck Lever req = rpcrdma_create_req(r_xprt); 43abfb6897SDan Carpenter if (IS_ERR(req)) 44abfb6897SDan Carpenter return PTR_ERR(req); 45f531a5dbSChuck Lever req->rl_backchannel = true; 46f531a5dbSChuck Lever 4713650c23SChuck Lever rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE, 4899ef4db3SChuck Lever DMA_TO_DEVICE, GFP_KERNEL); 49f531a5dbSChuck Lever if (IS_ERR(rb)) 50f531a5dbSChuck Lever goto out_fail; 51f531a5dbSChuck Lever req->rl_rdmabuf = rb; 52f531a5dbSChuck Lever 5308cf2efdSChuck Lever size = r_xprt->rx_data.inline_rsize; 5413650c23SChuck Lever rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL); 55f531a5dbSChuck Lever if (IS_ERR(rb)) 56f531a5dbSChuck Lever goto out_fail; 57f531a5dbSChuck Lever req->rl_sendbuf = rb; 5862aee0e3SChuck Lever xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, 5962aee0e3SChuck Lever min_t(size_t, size, PAGE_SIZE)); 605a6d1db4SChuck Lever rpcrdma_set_xprtdata(rqst, req); 61f531a5dbSChuck Lever return 0; 62f531a5dbSChuck Lever 63f531a5dbSChuck Lever out_fail: 64f531a5dbSChuck Lever rpcrdma_bc_free_rqst(r_xprt, rqst); 65f531a5dbSChuck Lever return -ENOMEM; 66f531a5dbSChuck Lever } 67f531a5dbSChuck Lever 68f531a5dbSChuck Lever /* Allocate and add receive buffers to the rpcrdma_buffer's 69f531a5dbSChuck Lever * existing list of rep's. These are released when the 70f531a5dbSChuck Lever * transport is destroyed. 71f531a5dbSChuck Lever */ 72f531a5dbSChuck Lever static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, 73f531a5dbSChuck Lever unsigned int count) 74f531a5dbSChuck Lever { 75f531a5dbSChuck Lever struct rpcrdma_rep *rep; 76f531a5dbSChuck Lever int rc = 0; 77f531a5dbSChuck Lever 78f531a5dbSChuck Lever while (count--) { 79f531a5dbSChuck Lever rep = rpcrdma_create_rep(r_xprt); 80f531a5dbSChuck Lever if (IS_ERR(rep)) { 81f531a5dbSChuck Lever pr_err("RPC: %s: reply buffer alloc failed\n", 82f531a5dbSChuck Lever __func__); 83f531a5dbSChuck Lever rc = PTR_ERR(rep); 84f531a5dbSChuck Lever break; 85f531a5dbSChuck Lever } 86f531a5dbSChuck Lever 879b06688bSChuck Lever rpcrdma_recv_buffer_put(rep); 88f531a5dbSChuck Lever } 89f531a5dbSChuck Lever 90f531a5dbSChuck Lever return rc; 91f531a5dbSChuck Lever } 92f531a5dbSChuck Lever 93f531a5dbSChuck Lever /** 94f531a5dbSChuck Lever * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests 95f531a5dbSChuck Lever * @xprt: transport associated with these backchannel resources 96f531a5dbSChuck Lever * @reqs: number of concurrent incoming requests to expect 97f531a5dbSChuck Lever * 98f531a5dbSChuck Lever * Returns 0 on success; otherwise a negative errno 99f531a5dbSChuck Lever */ 100f531a5dbSChuck Lever int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) 101f531a5dbSChuck Lever { 102f531a5dbSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 103f531a5dbSChuck Lever struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 104f531a5dbSChuck Lever struct rpc_rqst *rqst; 105f531a5dbSChuck Lever unsigned int i; 106f531a5dbSChuck Lever int rc; 107f531a5dbSChuck Lever 108f531a5dbSChuck Lever /* The backchannel reply path returns each rpc_rqst to the 109f531a5dbSChuck Lever * bc_pa_list _after_ the reply is sent. If the server is 110f531a5dbSChuck Lever * faster than the client, it can send another backward 111f531a5dbSChuck Lever * direction request before the rpc_rqst is returned to the 112f531a5dbSChuck Lever * list. The client rejects the request in this case. 113f531a5dbSChuck Lever * 114f531a5dbSChuck Lever * Twice as many rpc_rqsts are prepared to ensure there is 115f531a5dbSChuck Lever * always an rpc_rqst available as soon as a reply is sent. 116f531a5dbSChuck Lever */ 117124fa17dSChuck Lever if (reqs > RPCRDMA_BACKWARD_WRS >> 1) 118124fa17dSChuck Lever goto out_err; 119124fa17dSChuck Lever 120f531a5dbSChuck Lever for (i = 0; i < (reqs << 1); i++) { 121f531a5dbSChuck Lever rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 122d2c23c00SMarkus Elfring if (!rqst) 123f531a5dbSChuck Lever goto out_free; 124d2c23c00SMarkus Elfring 125c8bbe0c7SChuck Lever dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 126f531a5dbSChuck Lever 127f531a5dbSChuck Lever rqst->rq_xprt = &r_xprt->rx_xprt; 128f531a5dbSChuck Lever INIT_LIST_HEAD(&rqst->rq_list); 129f531a5dbSChuck Lever INIT_LIST_HEAD(&rqst->rq_bc_list); 130f531a5dbSChuck Lever 131f531a5dbSChuck Lever if (rpcrdma_bc_setup_rqst(r_xprt, rqst)) 132f531a5dbSChuck Lever goto out_free; 133f531a5dbSChuck Lever 134f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 135f531a5dbSChuck Lever list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 136f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 137f531a5dbSChuck Lever } 138f531a5dbSChuck Lever 139f531a5dbSChuck Lever rc = rpcrdma_bc_setup_reps(r_xprt, reqs); 140f531a5dbSChuck Lever if (rc) 141f531a5dbSChuck Lever goto out_free; 142f531a5dbSChuck Lever 143f531a5dbSChuck Lever rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); 144f531a5dbSChuck Lever if (rc) 145f531a5dbSChuck Lever goto out_free; 146f531a5dbSChuck Lever 147f531a5dbSChuck Lever buffer->rb_bc_srv_max_requests = reqs; 148f531a5dbSChuck Lever request_module("svcrdma"); 149f531a5dbSChuck Lever 150f531a5dbSChuck Lever return 0; 151f531a5dbSChuck Lever 152f531a5dbSChuck Lever out_free: 153f531a5dbSChuck Lever xprt_rdma_bc_destroy(xprt, reqs); 154f531a5dbSChuck Lever 155124fa17dSChuck Lever out_err: 156f531a5dbSChuck Lever pr_err("RPC: %s: setup backchannel transport failed\n", __func__); 157f531a5dbSChuck Lever return -ENOMEM; 158f531a5dbSChuck Lever } 159f531a5dbSChuck Lever 160f531a5dbSChuck Lever /** 16176566773SChuck Lever * xprt_rdma_bc_up - Create transport endpoint for backchannel service 16276566773SChuck Lever * @serv: server endpoint 16376566773SChuck Lever * @net: network namespace 16476566773SChuck Lever * 16576566773SChuck Lever * The "xprt" is an implied argument: it supplies the name of the 16676566773SChuck Lever * backchannel transport class. 16776566773SChuck Lever * 16876566773SChuck Lever * Returns zero on success, negative errno on failure 16976566773SChuck Lever */ 17076566773SChuck Lever int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net) 17176566773SChuck Lever { 17276566773SChuck Lever int ret; 17376566773SChuck Lever 17476566773SChuck Lever ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0); 17576566773SChuck Lever if (ret < 0) 17676566773SChuck Lever return ret; 17776566773SChuck Lever return 0; 17876566773SChuck Lever } 17976566773SChuck Lever 18076566773SChuck Lever /** 1816b26cc8cSChuck Lever * xprt_rdma_bc_maxpayload - Return maximum backchannel message size 1826b26cc8cSChuck Lever * @xprt: transport 1836b26cc8cSChuck Lever * 1846b26cc8cSChuck Lever * Returns maximum size, in bytes, of a backchannel message 1856b26cc8cSChuck Lever */ 1866b26cc8cSChuck Lever size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt) 1876b26cc8cSChuck Lever { 1886b26cc8cSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 1896b26cc8cSChuck Lever struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 1906b26cc8cSChuck Lever size_t maxmsg; 1916b26cc8cSChuck Lever 1926b26cc8cSChuck Lever maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize); 19362aee0e3SChuck Lever maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE); 1946b26cc8cSChuck Lever return maxmsg - RPCRDMA_HDRLEN_MIN; 1956b26cc8cSChuck Lever } 1966b26cc8cSChuck Lever 1976b26cc8cSChuck Lever /** 19883128a60SChuck Lever * rpcrdma_bc_marshal_reply - Send backwards direction reply 19983128a60SChuck Lever * @rqst: buffer containing RPC reply data 20083128a60SChuck Lever * 20183128a60SChuck Lever * Returns zero on success. 20283128a60SChuck Lever */ 20383128a60SChuck Lever int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) 20483128a60SChuck Lever { 20583128a60SChuck Lever struct rpc_xprt *xprt = rqst->rq_xprt; 20683128a60SChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 20783128a60SChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 20883128a60SChuck Lever struct rpcrdma_msg *headerp; 20983128a60SChuck Lever 21083128a60SChuck Lever headerp = rdmab_to_msg(req->rl_rdmabuf); 21183128a60SChuck Lever headerp->rm_xid = rqst->rq_xid; 21283128a60SChuck Lever headerp->rm_vers = rpcrdma_version; 21383128a60SChuck Lever headerp->rm_credit = 21483128a60SChuck Lever cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests); 21583128a60SChuck Lever headerp->rm_type = rdma_msg; 21683128a60SChuck Lever headerp->rm_body.rm_chunks[0] = xdr_zero; 21783128a60SChuck Lever headerp->rm_body.rm_chunks[1] = xdr_zero; 21883128a60SChuck Lever headerp->rm_body.rm_chunks[2] = xdr_zero; 21983128a60SChuck Lever 220655fec69SChuck Lever if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN, 221655fec69SChuck Lever &rqst->rq_snd_buf, rpcrdma_noch)) 22254cbd6b0SChuck Lever return -EIO; 223655fec69SChuck Lever return 0; 22483128a60SChuck Lever } 22583128a60SChuck Lever 22683128a60SChuck Lever /** 227f531a5dbSChuck Lever * xprt_rdma_bc_destroy - Release resources for handling backchannel requests 228f531a5dbSChuck Lever * @xprt: transport associated with these backchannel resources 229f531a5dbSChuck Lever * @reqs: number of incoming requests to destroy; ignored 230f531a5dbSChuck Lever */ 231f531a5dbSChuck Lever void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) 232f531a5dbSChuck Lever { 233f531a5dbSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 234f531a5dbSChuck Lever struct rpc_rqst *rqst, *tmp; 235f531a5dbSChuck Lever 236f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 237f531a5dbSChuck Lever list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 238f531a5dbSChuck Lever list_del(&rqst->rq_bc_pa_list); 239f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 240f531a5dbSChuck Lever 241f531a5dbSChuck Lever rpcrdma_bc_free_rqst(r_xprt, rqst); 242f531a5dbSChuck Lever 243f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 244f531a5dbSChuck Lever } 245f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 246f531a5dbSChuck Lever } 247f531a5dbSChuck Lever 248f531a5dbSChuck Lever /** 249f531a5dbSChuck Lever * xprt_rdma_bc_free_rqst - Release a backchannel rqst 250f531a5dbSChuck Lever * @rqst: request to release 251f531a5dbSChuck Lever */ 252f531a5dbSChuck Lever void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst) 253f531a5dbSChuck Lever { 254f531a5dbSChuck Lever struct rpc_xprt *xprt = rqst->rq_xprt; 255f531a5dbSChuck Lever 256c8bbe0c7SChuck Lever dprintk("RPC: %s: freeing rqst %p (req %p)\n", 257c8bbe0c7SChuck Lever __func__, rqst, rpcr_to_rdmar(rqst)); 258c8bbe0c7SChuck Lever 259f531a5dbSChuck Lever smp_mb__before_atomic(); 260f531a5dbSChuck Lever WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)); 261f531a5dbSChuck Lever clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 262f531a5dbSChuck Lever smp_mb__after_atomic(); 263f531a5dbSChuck Lever 264f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 265f531a5dbSChuck Lever list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 266f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 267f531a5dbSChuck Lever } 26863cae470SChuck Lever 26963cae470SChuck Lever /** 27063cae470SChuck Lever * rpcrdma_bc_receive_call - Handle a backward direction call 27163cae470SChuck Lever * @xprt: transport receiving the call 27263cae470SChuck Lever * @rep: receive buffer containing the call 27363cae470SChuck Lever * 27463cae470SChuck Lever * Operational assumptions: 27563cae470SChuck Lever * o Backchannel credits are ignored, just as the NFS server 27663cae470SChuck Lever * forechannel currently does 27763cae470SChuck Lever * o The ULP manages a replay cache (eg, NFSv4.1 sessions). 27863cae470SChuck Lever * No replay detection is done at the transport level 27963cae470SChuck Lever */ 28063cae470SChuck Lever void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, 28163cae470SChuck Lever struct rpcrdma_rep *rep) 28263cae470SChuck Lever { 28363cae470SChuck Lever struct rpc_xprt *xprt = &r_xprt->rx_xprt; 28463cae470SChuck Lever struct svc_serv *bc_serv; 28563cae470SChuck Lever struct rpcrdma_req *req; 28663cae470SChuck Lever struct rpc_rqst *rqst; 28763cae470SChuck Lever struct xdr_buf *buf; 28863cae470SChuck Lever size_t size; 28963cae470SChuck Lever __be32 *p; 29063cae470SChuck Lever 291*41c8f70fSChuck Lever p = xdr_inline_decode(&rep->rr_stream, 0); 292*41c8f70fSChuck Lever size = xdr_stream_remaining(&rep->rr_stream); 293*41c8f70fSChuck Lever 29463cae470SChuck Lever #ifdef RPCRDMA_BACKCHANNEL_DEBUG 29563cae470SChuck Lever pr_info("RPC: %s: callback XID %08x, length=%u\n", 296*41c8f70fSChuck Lever __func__, be32_to_cpup(p), size); 297*41c8f70fSChuck Lever pr_info("RPC: %s: %*ph\n", __func__, size, p); 29863cae470SChuck Lever #endif 29963cae470SChuck Lever 30063cae470SChuck Lever /* Grab a free bc rqst */ 30163cae470SChuck Lever spin_lock(&xprt->bc_pa_lock); 30263cae470SChuck Lever if (list_empty(&xprt->bc_pa_list)) { 30363cae470SChuck Lever spin_unlock(&xprt->bc_pa_lock); 30463cae470SChuck Lever goto out_overflow; 30563cae470SChuck Lever } 30663cae470SChuck Lever rqst = list_first_entry(&xprt->bc_pa_list, 30763cae470SChuck Lever struct rpc_rqst, rq_bc_pa_list); 30863cae470SChuck Lever list_del(&rqst->rq_bc_pa_list); 30963cae470SChuck Lever spin_unlock(&xprt->bc_pa_lock); 310c8bbe0c7SChuck Lever dprintk("RPC: %s: using rqst %p\n", __func__, rqst); 31163cae470SChuck Lever 31263cae470SChuck Lever /* Prepare rqst */ 31363cae470SChuck Lever rqst->rq_reply_bytes_recvd = 0; 31463cae470SChuck Lever rqst->rq_bytes_sent = 0; 315*41c8f70fSChuck Lever rqst->rq_xid = *p; 3169f74660bSChuck Lever 3179f74660bSChuck Lever rqst->rq_private_buf.len = size; 31863cae470SChuck Lever set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 31963cae470SChuck Lever 32063cae470SChuck Lever buf = &rqst->rq_rcv_buf; 32163cae470SChuck Lever memset(buf, 0, sizeof(*buf)); 32263cae470SChuck Lever buf->head[0].iov_base = p; 32363cae470SChuck Lever buf->head[0].iov_len = size; 32463cae470SChuck Lever buf->len = size; 32563cae470SChuck Lever 32663cae470SChuck Lever /* The receive buffer has to be hooked to the rpcrdma_req 327*41c8f70fSChuck Lever * so that it is not released while the req is pointing 328*41c8f70fSChuck Lever * to its buffer, and so that it can be reposted after 329*41c8f70fSChuck Lever * the Upper Layer is done decoding it. 33063cae470SChuck Lever */ 33163cae470SChuck Lever req = rpcr_to_rdmar(rqst); 332c8bbe0c7SChuck Lever dprintk("RPC: %s: attaching rep %p to req %p\n", 33363cae470SChuck Lever __func__, rep, req); 33463cae470SChuck Lever req->rl_reply = rep; 33563cae470SChuck Lever 33663cae470SChuck Lever /* Defeat the retransmit detection logic in send_request */ 33763cae470SChuck Lever req->rl_connect_cookie = 0; 33863cae470SChuck Lever 33963cae470SChuck Lever /* Queue rqst for ULP's callback service */ 34063cae470SChuck Lever bc_serv = xprt->bc_serv; 34163cae470SChuck Lever spin_lock(&bc_serv->sv_cb_lock); 34263cae470SChuck Lever list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list); 34363cae470SChuck Lever spin_unlock(&bc_serv->sv_cb_lock); 34463cae470SChuck Lever 34563cae470SChuck Lever wake_up(&bc_serv->sv_cb_waitq); 34663cae470SChuck Lever 34763cae470SChuck Lever r_xprt->rx_stats.bcall_count++; 34863cae470SChuck Lever return; 34963cae470SChuck Lever 35063cae470SChuck Lever out_overflow: 35163cae470SChuck Lever pr_warn("RPC/RDMA backchannel overflow\n"); 35263cae470SChuck Lever xprt_disconnect_done(xprt); 35363cae470SChuck Lever /* This receive buffer gets reposted automatically 35463cae470SChuck Lever * when the connection is re-established. 35563cae470SChuck Lever */ 35663cae470SChuck Lever return; 35763cae470SChuck Lever } 358