1f531a5dbSChuck Lever /* 2f531a5dbSChuck Lever * Copyright (c) 2015 Oracle. All rights reserved. 3f531a5dbSChuck Lever * 4f531a5dbSChuck Lever * Support for backward direction RPCs on RPC/RDMA. 5f531a5dbSChuck Lever */ 6f531a5dbSChuck Lever 7f531a5dbSChuck Lever #include <linux/module.h> 863cae470SChuck Lever #include <linux/sunrpc/xprt.h> 963cae470SChuck Lever #include <linux/sunrpc/svc.h> 1076566773SChuck Lever #include <linux/sunrpc/svc_xprt.h> 11f531a5dbSChuck Lever 12f531a5dbSChuck Lever #include "xprt_rdma.h" 13f531a5dbSChuck Lever 14f531a5dbSChuck Lever #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 15f531a5dbSChuck Lever # define RPCDBG_FACILITY RPCDBG_TRANS 16f531a5dbSChuck Lever #endif 17f531a5dbSChuck Lever 1863cae470SChuck Lever #define RPCRDMA_BACKCHANNEL_DEBUG 1963cae470SChuck Lever 20f531a5dbSChuck Lever static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt, 21f531a5dbSChuck Lever struct rpc_rqst *rqst) 22f531a5dbSChuck Lever { 23f531a5dbSChuck Lever struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 24f531a5dbSChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 25f531a5dbSChuck Lever 26f531a5dbSChuck Lever spin_lock(&buf->rb_reqslock); 27f531a5dbSChuck Lever list_del(&req->rl_all); 28f531a5dbSChuck Lever spin_unlock(&buf->rb_reqslock); 29f531a5dbSChuck Lever 30f531a5dbSChuck Lever rpcrdma_destroy_req(&r_xprt->rx_ia, req); 31f531a5dbSChuck Lever 32f531a5dbSChuck Lever kfree(rqst); 33f531a5dbSChuck Lever } 34f531a5dbSChuck Lever 35f531a5dbSChuck Lever static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, 36f531a5dbSChuck Lever struct rpc_rqst *rqst) 37f531a5dbSChuck Lever { 38f531a5dbSChuck Lever struct rpcrdma_ia *ia = &r_xprt->rx_ia; 39f531a5dbSChuck Lever struct rpcrdma_regbuf *rb; 40f531a5dbSChuck Lever struct rpcrdma_req *req; 41f531a5dbSChuck Lever struct xdr_buf *buf; 42f531a5dbSChuck Lever size_t size; 43f531a5dbSChuck Lever 44f531a5dbSChuck Lever req = rpcrdma_create_req(r_xprt); 45*abfb6897SDan Carpenter if (IS_ERR(req)) 46*abfb6897SDan Carpenter return PTR_ERR(req); 47f531a5dbSChuck Lever req->rl_backchannel = true; 48f531a5dbSChuck Lever 49f531a5dbSChuck Lever size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); 50f531a5dbSChuck Lever rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL); 51f531a5dbSChuck Lever if (IS_ERR(rb)) 52f531a5dbSChuck Lever goto out_fail; 53f531a5dbSChuck Lever req->rl_rdmabuf = rb; 54f531a5dbSChuck Lever 55f531a5dbSChuck Lever size += RPCRDMA_INLINE_READ_THRESHOLD(rqst); 56f531a5dbSChuck Lever rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL); 57f531a5dbSChuck Lever if (IS_ERR(rb)) 58f531a5dbSChuck Lever goto out_fail; 59f531a5dbSChuck Lever rb->rg_owner = req; 60f531a5dbSChuck Lever req->rl_sendbuf = rb; 61f531a5dbSChuck Lever /* so that rpcr_to_rdmar works when receiving a request */ 62f531a5dbSChuck Lever rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base; 63f531a5dbSChuck Lever 64f531a5dbSChuck Lever buf = &rqst->rq_snd_buf; 65f531a5dbSChuck Lever buf->head[0].iov_base = rqst->rq_buffer; 66f531a5dbSChuck Lever buf->head[0].iov_len = 0; 67f531a5dbSChuck Lever buf->tail[0].iov_base = NULL; 68f531a5dbSChuck Lever buf->tail[0].iov_len = 0; 69f531a5dbSChuck Lever buf->page_len = 0; 70f531a5dbSChuck Lever buf->len = 0; 71f531a5dbSChuck Lever buf->buflen = size; 72f531a5dbSChuck Lever 73f531a5dbSChuck Lever return 0; 74f531a5dbSChuck Lever 75f531a5dbSChuck Lever out_fail: 76f531a5dbSChuck Lever rpcrdma_bc_free_rqst(r_xprt, rqst); 77f531a5dbSChuck Lever return -ENOMEM; 78f531a5dbSChuck Lever } 79f531a5dbSChuck Lever 80f531a5dbSChuck Lever /* Allocate and add receive buffers to the rpcrdma_buffer's 81f531a5dbSChuck Lever * existing list of rep's. These are released when the 82f531a5dbSChuck Lever * transport is destroyed. 83f531a5dbSChuck Lever */ 84f531a5dbSChuck Lever static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, 85f531a5dbSChuck Lever unsigned int count) 86f531a5dbSChuck Lever { 87f531a5dbSChuck Lever struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; 88f531a5dbSChuck Lever struct rpcrdma_rep *rep; 89f531a5dbSChuck Lever unsigned long flags; 90f531a5dbSChuck Lever int rc = 0; 91f531a5dbSChuck Lever 92f531a5dbSChuck Lever while (count--) { 93f531a5dbSChuck Lever rep = rpcrdma_create_rep(r_xprt); 94f531a5dbSChuck Lever if (IS_ERR(rep)) { 95f531a5dbSChuck Lever pr_err("RPC: %s: reply buffer alloc failed\n", 96f531a5dbSChuck Lever __func__); 97f531a5dbSChuck Lever rc = PTR_ERR(rep); 98f531a5dbSChuck Lever break; 99f531a5dbSChuck Lever } 100f531a5dbSChuck Lever 101f531a5dbSChuck Lever spin_lock_irqsave(&buffers->rb_lock, flags); 102f531a5dbSChuck Lever list_add(&rep->rr_list, &buffers->rb_recv_bufs); 103f531a5dbSChuck Lever spin_unlock_irqrestore(&buffers->rb_lock, flags); 104f531a5dbSChuck Lever } 105f531a5dbSChuck Lever 106f531a5dbSChuck Lever return rc; 107f531a5dbSChuck Lever } 108f531a5dbSChuck Lever 109f531a5dbSChuck Lever /** 110f531a5dbSChuck Lever * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests 111f531a5dbSChuck Lever * @xprt: transport associated with these backchannel resources 112f531a5dbSChuck Lever * @reqs: number of concurrent incoming requests to expect 113f531a5dbSChuck Lever * 114f531a5dbSChuck Lever * Returns 0 on success; otherwise a negative errno 115f531a5dbSChuck Lever */ 116f531a5dbSChuck Lever int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) 117f531a5dbSChuck Lever { 118f531a5dbSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 119f531a5dbSChuck Lever struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 120f531a5dbSChuck Lever struct rpc_rqst *rqst; 121f531a5dbSChuck Lever unsigned int i; 122f531a5dbSChuck Lever int rc; 123f531a5dbSChuck Lever 124f531a5dbSChuck Lever /* The backchannel reply path returns each rpc_rqst to the 125f531a5dbSChuck Lever * bc_pa_list _after_ the reply is sent. If the server is 126f531a5dbSChuck Lever * faster than the client, it can send another backward 127f531a5dbSChuck Lever * direction request before the rpc_rqst is returned to the 128f531a5dbSChuck Lever * list. The client rejects the request in this case. 129f531a5dbSChuck Lever * 130f531a5dbSChuck Lever * Twice as many rpc_rqsts are prepared to ensure there is 131f531a5dbSChuck Lever * always an rpc_rqst available as soon as a reply is sent. 132f531a5dbSChuck Lever */ 133124fa17dSChuck Lever if (reqs > RPCRDMA_BACKWARD_WRS >> 1) 134124fa17dSChuck Lever goto out_err; 135124fa17dSChuck Lever 136f531a5dbSChuck Lever for (i = 0; i < (reqs << 1); i++) { 137f531a5dbSChuck Lever rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 138f531a5dbSChuck Lever if (!rqst) { 139f531a5dbSChuck Lever pr_err("RPC: %s: Failed to create bc rpc_rqst\n", 140f531a5dbSChuck Lever __func__); 141f531a5dbSChuck Lever goto out_free; 142f531a5dbSChuck Lever } 143f531a5dbSChuck Lever 144f531a5dbSChuck Lever rqst->rq_xprt = &r_xprt->rx_xprt; 145f531a5dbSChuck Lever INIT_LIST_HEAD(&rqst->rq_list); 146f531a5dbSChuck Lever INIT_LIST_HEAD(&rqst->rq_bc_list); 147f531a5dbSChuck Lever 148f531a5dbSChuck Lever if (rpcrdma_bc_setup_rqst(r_xprt, rqst)) 149f531a5dbSChuck Lever goto out_free; 150f531a5dbSChuck Lever 151f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 152f531a5dbSChuck Lever list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 153f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 154f531a5dbSChuck Lever } 155f531a5dbSChuck Lever 156f531a5dbSChuck Lever rc = rpcrdma_bc_setup_reps(r_xprt, reqs); 157f531a5dbSChuck Lever if (rc) 158f531a5dbSChuck Lever goto out_free; 159f531a5dbSChuck Lever 160f531a5dbSChuck Lever rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); 161f531a5dbSChuck Lever if (rc) 162f531a5dbSChuck Lever goto out_free; 163f531a5dbSChuck Lever 164f531a5dbSChuck Lever buffer->rb_bc_srv_max_requests = reqs; 165f531a5dbSChuck Lever request_module("svcrdma"); 166f531a5dbSChuck Lever 167f531a5dbSChuck Lever return 0; 168f531a5dbSChuck Lever 169f531a5dbSChuck Lever out_free: 170f531a5dbSChuck Lever xprt_rdma_bc_destroy(xprt, reqs); 171f531a5dbSChuck Lever 172124fa17dSChuck Lever out_err: 173f531a5dbSChuck Lever pr_err("RPC: %s: setup backchannel transport failed\n", __func__); 174f531a5dbSChuck Lever return -ENOMEM; 175f531a5dbSChuck Lever } 176f531a5dbSChuck Lever 177f531a5dbSChuck Lever /** 17876566773SChuck Lever * xprt_rdma_bc_up - Create transport endpoint for backchannel service 17976566773SChuck Lever * @serv: server endpoint 18076566773SChuck Lever * @net: network namespace 18176566773SChuck Lever * 18276566773SChuck Lever * The "xprt" is an implied argument: it supplies the name of the 18376566773SChuck Lever * backchannel transport class. 18476566773SChuck Lever * 18576566773SChuck Lever * Returns zero on success, negative errno on failure 18676566773SChuck Lever */ 18776566773SChuck Lever int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net) 18876566773SChuck Lever { 18976566773SChuck Lever int ret; 19076566773SChuck Lever 19176566773SChuck Lever ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0); 19276566773SChuck Lever if (ret < 0) 19376566773SChuck Lever return ret; 19476566773SChuck Lever return 0; 19576566773SChuck Lever } 19676566773SChuck Lever 19776566773SChuck Lever /** 19883128a60SChuck Lever * rpcrdma_bc_marshal_reply - Send backwards direction reply 19983128a60SChuck Lever * @rqst: buffer containing RPC reply data 20083128a60SChuck Lever * 20183128a60SChuck Lever * Returns zero on success. 20283128a60SChuck Lever */ 20383128a60SChuck Lever int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) 20483128a60SChuck Lever { 20583128a60SChuck Lever struct rpc_xprt *xprt = rqst->rq_xprt; 20683128a60SChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 20783128a60SChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 20883128a60SChuck Lever struct rpcrdma_msg *headerp; 20983128a60SChuck Lever size_t rpclen; 21083128a60SChuck Lever 21183128a60SChuck Lever headerp = rdmab_to_msg(req->rl_rdmabuf); 21283128a60SChuck Lever headerp->rm_xid = rqst->rq_xid; 21383128a60SChuck Lever headerp->rm_vers = rpcrdma_version; 21483128a60SChuck Lever headerp->rm_credit = 21583128a60SChuck Lever cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests); 21683128a60SChuck Lever headerp->rm_type = rdma_msg; 21783128a60SChuck Lever headerp->rm_body.rm_chunks[0] = xdr_zero; 21883128a60SChuck Lever headerp->rm_body.rm_chunks[1] = xdr_zero; 21983128a60SChuck Lever headerp->rm_body.rm_chunks[2] = xdr_zero; 22083128a60SChuck Lever 22183128a60SChuck Lever rpclen = rqst->rq_svec[0].iov_len; 22283128a60SChuck Lever 22383128a60SChuck Lever pr_info("RPC: %s: rpclen %zd headerp 0x%p lkey 0x%x\n", 22483128a60SChuck Lever __func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf)); 22583128a60SChuck Lever pr_info("RPC: %s: RPC/RDMA: %*ph\n", 22683128a60SChuck Lever __func__, (int)RPCRDMA_HDRLEN_MIN, headerp); 22783128a60SChuck Lever pr_info("RPC: %s: RPC: %*ph\n", 22883128a60SChuck Lever __func__, (int)rpclen, rqst->rq_svec[0].iov_base); 22983128a60SChuck Lever 23083128a60SChuck Lever req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); 23183128a60SChuck Lever req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN; 23283128a60SChuck Lever req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); 23383128a60SChuck Lever 23483128a60SChuck Lever req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 23583128a60SChuck Lever req->rl_send_iov[1].length = rpclen; 23683128a60SChuck Lever req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); 23783128a60SChuck Lever 23883128a60SChuck Lever req->rl_niovs = 2; 23983128a60SChuck Lever return 0; 24083128a60SChuck Lever } 24183128a60SChuck Lever 24283128a60SChuck Lever /** 243f531a5dbSChuck Lever * xprt_rdma_bc_destroy - Release resources for handling backchannel requests 244f531a5dbSChuck Lever * @xprt: transport associated with these backchannel resources 245f531a5dbSChuck Lever * @reqs: number of incoming requests to destroy; ignored 246f531a5dbSChuck Lever */ 247f531a5dbSChuck Lever void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) 248f531a5dbSChuck Lever { 249f531a5dbSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 250f531a5dbSChuck Lever struct rpc_rqst *rqst, *tmp; 251f531a5dbSChuck Lever 252f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 253f531a5dbSChuck Lever list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 254f531a5dbSChuck Lever list_del(&rqst->rq_bc_pa_list); 255f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 256f531a5dbSChuck Lever 257f531a5dbSChuck Lever rpcrdma_bc_free_rqst(r_xprt, rqst); 258f531a5dbSChuck Lever 259f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 260f531a5dbSChuck Lever } 261f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 262f531a5dbSChuck Lever } 263f531a5dbSChuck Lever 264f531a5dbSChuck Lever /** 265f531a5dbSChuck Lever * xprt_rdma_bc_free_rqst - Release a backchannel rqst 266f531a5dbSChuck Lever * @rqst: request to release 267f531a5dbSChuck Lever */ 268f531a5dbSChuck Lever void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst) 269f531a5dbSChuck Lever { 270f531a5dbSChuck Lever struct rpc_xprt *xprt = rqst->rq_xprt; 271f531a5dbSChuck Lever 272f531a5dbSChuck Lever smp_mb__before_atomic(); 273f531a5dbSChuck Lever WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)); 274f531a5dbSChuck Lever clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 275f531a5dbSChuck Lever smp_mb__after_atomic(); 276f531a5dbSChuck Lever 277f531a5dbSChuck Lever spin_lock_bh(&xprt->bc_pa_lock); 278f531a5dbSChuck Lever list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 279f531a5dbSChuck Lever spin_unlock_bh(&xprt->bc_pa_lock); 280f531a5dbSChuck Lever } 28163cae470SChuck Lever 28263cae470SChuck Lever /** 28363cae470SChuck Lever * rpcrdma_bc_receive_call - Handle a backward direction call 28463cae470SChuck Lever * @xprt: transport receiving the call 28563cae470SChuck Lever * @rep: receive buffer containing the call 28663cae470SChuck Lever * 28763cae470SChuck Lever * Called in the RPC reply handler, which runs in a tasklet. 28863cae470SChuck Lever * Be quick about it. 28963cae470SChuck Lever * 29063cae470SChuck Lever * Operational assumptions: 29163cae470SChuck Lever * o Backchannel credits are ignored, just as the NFS server 29263cae470SChuck Lever * forechannel currently does 29363cae470SChuck Lever * o The ULP manages a replay cache (eg, NFSv4.1 sessions). 29463cae470SChuck Lever * No replay detection is done at the transport level 29563cae470SChuck Lever */ 29663cae470SChuck Lever void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, 29763cae470SChuck Lever struct rpcrdma_rep *rep) 29863cae470SChuck Lever { 29963cae470SChuck Lever struct rpc_xprt *xprt = &r_xprt->rx_xprt; 30063cae470SChuck Lever struct rpcrdma_msg *headerp; 30163cae470SChuck Lever struct svc_serv *bc_serv; 30263cae470SChuck Lever struct rpcrdma_req *req; 30363cae470SChuck Lever struct rpc_rqst *rqst; 30463cae470SChuck Lever struct xdr_buf *buf; 30563cae470SChuck Lever size_t size; 30663cae470SChuck Lever __be32 *p; 30763cae470SChuck Lever 30863cae470SChuck Lever headerp = rdmab_to_msg(rep->rr_rdmabuf); 30963cae470SChuck Lever #ifdef RPCRDMA_BACKCHANNEL_DEBUG 31063cae470SChuck Lever pr_info("RPC: %s: callback XID %08x, length=%u\n", 31163cae470SChuck Lever __func__, be32_to_cpu(headerp->rm_xid), rep->rr_len); 31263cae470SChuck Lever pr_info("RPC: %s: %*ph\n", __func__, rep->rr_len, headerp); 31363cae470SChuck Lever #endif 31463cae470SChuck Lever 31563cae470SChuck Lever /* Sanity check: 31663cae470SChuck Lever * Need at least enough bytes for RPC/RDMA header, as code 31763cae470SChuck Lever * here references the header fields by array offset. Also, 31863cae470SChuck Lever * backward calls are always inline, so ensure there 31963cae470SChuck Lever * are some bytes beyond the RPC/RDMA header. 32063cae470SChuck Lever */ 32163cae470SChuck Lever if (rep->rr_len < RPCRDMA_HDRLEN_MIN + 24) 32263cae470SChuck Lever goto out_short; 32363cae470SChuck Lever p = (__be32 *)((unsigned char *)headerp + RPCRDMA_HDRLEN_MIN); 32463cae470SChuck Lever size = rep->rr_len - RPCRDMA_HDRLEN_MIN; 32563cae470SChuck Lever 32663cae470SChuck Lever /* Grab a free bc rqst */ 32763cae470SChuck Lever spin_lock(&xprt->bc_pa_lock); 32863cae470SChuck Lever if (list_empty(&xprt->bc_pa_list)) { 32963cae470SChuck Lever spin_unlock(&xprt->bc_pa_lock); 33063cae470SChuck Lever goto out_overflow; 33163cae470SChuck Lever } 33263cae470SChuck Lever rqst = list_first_entry(&xprt->bc_pa_list, 33363cae470SChuck Lever struct rpc_rqst, rq_bc_pa_list); 33463cae470SChuck Lever list_del(&rqst->rq_bc_pa_list); 33563cae470SChuck Lever spin_unlock(&xprt->bc_pa_lock); 33663cae470SChuck Lever #ifdef RPCRDMA_BACKCHANNEL_DEBUG 33763cae470SChuck Lever pr_info("RPC: %s: using rqst %p\n", __func__, rqst); 33863cae470SChuck Lever #endif 33963cae470SChuck Lever 34063cae470SChuck Lever /* Prepare rqst */ 34163cae470SChuck Lever rqst->rq_reply_bytes_recvd = 0; 34263cae470SChuck Lever rqst->rq_bytes_sent = 0; 34363cae470SChuck Lever rqst->rq_xid = headerp->rm_xid; 34463cae470SChuck Lever set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 34563cae470SChuck Lever 34663cae470SChuck Lever buf = &rqst->rq_rcv_buf; 34763cae470SChuck Lever memset(buf, 0, sizeof(*buf)); 34863cae470SChuck Lever buf->head[0].iov_base = p; 34963cae470SChuck Lever buf->head[0].iov_len = size; 35063cae470SChuck Lever buf->len = size; 35163cae470SChuck Lever 35263cae470SChuck Lever /* The receive buffer has to be hooked to the rpcrdma_req 35363cae470SChuck Lever * so that it can be reposted after the server is done 35463cae470SChuck Lever * parsing it but just before sending the backward 35563cae470SChuck Lever * direction reply. 35663cae470SChuck Lever */ 35763cae470SChuck Lever req = rpcr_to_rdmar(rqst); 35863cae470SChuck Lever #ifdef RPCRDMA_BACKCHANNEL_DEBUG 35963cae470SChuck Lever pr_info("RPC: %s: attaching rep %p to req %p\n", 36063cae470SChuck Lever __func__, rep, req); 36163cae470SChuck Lever #endif 36263cae470SChuck Lever req->rl_reply = rep; 36363cae470SChuck Lever 36463cae470SChuck Lever /* Defeat the retransmit detection logic in send_request */ 36563cae470SChuck Lever req->rl_connect_cookie = 0; 36663cae470SChuck Lever 36763cae470SChuck Lever /* Queue rqst for ULP's callback service */ 36863cae470SChuck Lever bc_serv = xprt->bc_serv; 36963cae470SChuck Lever spin_lock(&bc_serv->sv_cb_lock); 37063cae470SChuck Lever list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list); 37163cae470SChuck Lever spin_unlock(&bc_serv->sv_cb_lock); 37263cae470SChuck Lever 37363cae470SChuck Lever wake_up(&bc_serv->sv_cb_waitq); 37463cae470SChuck Lever 37563cae470SChuck Lever r_xprt->rx_stats.bcall_count++; 37663cae470SChuck Lever return; 37763cae470SChuck Lever 37863cae470SChuck Lever out_overflow: 37963cae470SChuck Lever pr_warn("RPC/RDMA backchannel overflow\n"); 38063cae470SChuck Lever xprt_disconnect_done(xprt); 38163cae470SChuck Lever /* This receive buffer gets reposted automatically 38263cae470SChuck Lever * when the connection is re-established. 38363cae470SChuck Lever */ 38463cae470SChuck Lever return; 38563cae470SChuck Lever 38663cae470SChuck Lever out_short: 38763cae470SChuck Lever pr_warn("RPC/RDMA short backward direction call\n"); 38863cae470SChuck Lever 38963cae470SChuck Lever if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) 39063cae470SChuck Lever xprt_disconnect_done(xprt); 39163cae470SChuck Lever else 39263cae470SChuck Lever pr_warn("RPC: %s: reposting rep %p\n", 39363cae470SChuck Lever __func__, rep); 39463cae470SChuck Lever } 395