1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2f531a5dbSChuck Lever /* 3f531a5dbSChuck Lever * Copyright (c) 2015 Oracle. All rights reserved. 4f531a5dbSChuck Lever * 5f531a5dbSChuck Lever * Support for backward direction RPCs on RPC/RDMA. 6f531a5dbSChuck Lever */ 7f531a5dbSChuck Lever 863cae470SChuck Lever #include <linux/sunrpc/xprt.h> 963cae470SChuck Lever #include <linux/sunrpc/svc.h> 1076566773SChuck Lever #include <linux/sunrpc/svc_xprt.h> 11bd2abef3SChuck Lever #include <linux/sunrpc/svc_rdma.h> 12f531a5dbSChuck Lever 13f531a5dbSChuck Lever #include "xprt_rdma.h" 14b6e717cbSChuck Lever #include <trace/events/rpcrdma.h> 15f531a5dbSChuck Lever 16f531a5dbSChuck Lever #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 17f531a5dbSChuck Lever # define RPCDBG_FACILITY RPCDBG_TRANS 18f531a5dbSChuck Lever #endif 19f531a5dbSChuck Lever 20c8bbe0c7SChuck Lever #undef RPCRDMA_BACKCHANNEL_DEBUG 2163cae470SChuck Lever 22f531a5dbSChuck Lever /** 23f531a5dbSChuck Lever * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests 24f531a5dbSChuck Lever * @xprt: transport associated with these backchannel resources 25f531a5dbSChuck Lever * @reqs: number of concurrent incoming requests to expect 26f531a5dbSChuck Lever * 27f531a5dbSChuck Lever * Returns 0 on success; otherwise a negative errno 28f531a5dbSChuck Lever */ 29f531a5dbSChuck Lever int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) 30f531a5dbSChuck Lever { 31f531a5dbSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 32f531a5dbSChuck Lever 33*3f9c7e76SChuck Lever r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1; 34fc1eb807SChuck Lever trace_xprtrdma_cb_setup(r_xprt, reqs); 35f531a5dbSChuck Lever return 0; 36f531a5dbSChuck Lever } 37f531a5dbSChuck Lever 38f531a5dbSChuck Lever /** 396b26cc8cSChuck Lever * xprt_rdma_bc_maxpayload - Return maximum backchannel message size 406b26cc8cSChuck Lever * @xprt: transport 416b26cc8cSChuck Lever * 426b26cc8cSChuck Lever * Returns maximum size, in bytes, of a backchannel message 436b26cc8cSChuck Lever */ 446b26cc8cSChuck Lever size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt) 456b26cc8cSChuck Lever { 466b26cc8cSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 476b26cc8cSChuck Lever struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 486b26cc8cSChuck Lever size_t maxmsg; 496b26cc8cSChuck Lever 506b26cc8cSChuck Lever maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize); 5162aee0e3SChuck Lever maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE); 526b26cc8cSChuck Lever return maxmsg - RPCRDMA_HDRLEN_MIN; 536b26cc8cSChuck Lever } 546b26cc8cSChuck Lever 55cf73daf5SChuck Lever static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) 5683128a60SChuck Lever { 577ec910e7SChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 5883128a60SChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 597ec910e7SChuck Lever __be32 *p; 6083128a60SChuck Lever 617ec910e7SChuck Lever rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); 627ec910e7SChuck Lever xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf, 638cec3dbaSChuck Lever rdmab_data(req->rl_rdmabuf), rqst); 647ec910e7SChuck Lever 657ec910e7SChuck Lever p = xdr_reserve_space(&req->rl_stream, 28); 667ec910e7SChuck Lever if (unlikely(!p)) 677ec910e7SChuck Lever return -EIO; 687ec910e7SChuck Lever *p++ = rqst->rq_xid; 697ec910e7SChuck Lever *p++ = rpcrdma_version; 707ec910e7SChuck Lever *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests); 717ec910e7SChuck Lever *p++ = rdma_msg; 727ec910e7SChuck Lever *p++ = xdr_zero; 737ec910e7SChuck Lever *p++ = xdr_zero; 747ec910e7SChuck Lever *p = xdr_zero; 7583128a60SChuck Lever 76857f9acaSChuck Lever if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN, 77655fec69SChuck Lever &rqst->rq_snd_buf, rpcrdma_noch)) 7854cbd6b0SChuck Lever return -EIO; 79fc1eb807SChuck Lever 80fc1eb807SChuck Lever trace_xprtrdma_cb_reply(rqst); 81655fec69SChuck Lever return 0; 8283128a60SChuck Lever } 8383128a60SChuck Lever 8483128a60SChuck Lever /** 85cf73daf5SChuck Lever * xprt_rdma_bc_send_reply - marshal and send a backchannel reply 86cf73daf5SChuck Lever * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf 87cf73daf5SChuck Lever * 88cf73daf5SChuck Lever * Caller holds the transport's write lock. 89cf73daf5SChuck Lever * 90cf73daf5SChuck Lever * Returns: 91cf73daf5SChuck Lever * %0 if the RPC message has been sent 92cf73daf5SChuck Lever * %-ENOTCONN if the caller should reconnect and call again 93cf73daf5SChuck Lever * %-EIO if a permanent error occurred and the request was not 94cf73daf5SChuck Lever * sent. Do not try to send this message again. 95cf73daf5SChuck Lever */ 96cf73daf5SChuck Lever int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst) 97cf73daf5SChuck Lever { 980c0829bcSChuck Lever struct rpc_xprt *xprt = rqst->rq_xprt; 990c0829bcSChuck Lever struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 100cf73daf5SChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 101cf73daf5SChuck Lever int rc; 102cf73daf5SChuck Lever 1030c0829bcSChuck Lever if (!xprt_connected(xprt)) 1040c0829bcSChuck Lever return -ENOTCONN; 105cf73daf5SChuck Lever 1060c0829bcSChuck Lever if (!xprt_request_get_cong(xprt, rqst)) 10775891f50STrond Myklebust return -EBADSLT; 10875891f50STrond Myklebust 109cf73daf5SChuck Lever rc = rpcrdma_bc_marshal_reply(rqst); 110cf73daf5SChuck Lever if (rc < 0) 111cf73daf5SChuck Lever goto failed_marshal; 112cf73daf5SChuck Lever 113cf73daf5SChuck Lever if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 114cf73daf5SChuck Lever goto drop_connection; 115cf73daf5SChuck Lever return 0; 116cf73daf5SChuck Lever 117cf73daf5SChuck Lever failed_marshal: 118cf73daf5SChuck Lever if (rc != -ENOTCONN) 119cf73daf5SChuck Lever return rc; 120cf73daf5SChuck Lever drop_connection: 1210c0829bcSChuck Lever xprt_rdma_close(xprt); 122cf73daf5SChuck Lever return -ENOTCONN; 123cf73daf5SChuck Lever } 124cf73daf5SChuck Lever 125cf73daf5SChuck Lever /** 126f531a5dbSChuck Lever * xprt_rdma_bc_destroy - Release resources for handling backchannel requests 127f531a5dbSChuck Lever * @xprt: transport associated with these backchannel resources 128f531a5dbSChuck Lever * @reqs: number of incoming requests to destroy; ignored 129f531a5dbSChuck Lever */ 130f531a5dbSChuck Lever void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) 131f531a5dbSChuck Lever { 132f531a5dbSChuck Lever struct rpc_rqst *rqst, *tmp; 133f531a5dbSChuck Lever 134f7d46681SChuck Lever spin_lock(&xprt->bc_pa_lock); 135f531a5dbSChuck Lever list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 136f531a5dbSChuck Lever list_del(&rqst->rq_bc_pa_list); 137f7d46681SChuck Lever spin_unlock(&xprt->bc_pa_lock); 138f531a5dbSChuck Lever 13992f4433eSChuck Lever rpcrdma_req_destroy(rpcr_to_rdmar(rqst)); 140f531a5dbSChuck Lever 141f7d46681SChuck Lever spin_lock(&xprt->bc_pa_lock); 142f531a5dbSChuck Lever } 143f7d46681SChuck Lever spin_unlock(&xprt->bc_pa_lock); 144f531a5dbSChuck Lever } 145f531a5dbSChuck Lever 146f531a5dbSChuck Lever /** 147f531a5dbSChuck Lever * xprt_rdma_bc_free_rqst - Release a backchannel rqst 148f531a5dbSChuck Lever * @rqst: request to release 149f531a5dbSChuck Lever */ 150f531a5dbSChuck Lever void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst) 151f531a5dbSChuck Lever { 1527c8d9e7cSChuck Lever struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 153f531a5dbSChuck Lever struct rpc_xprt *xprt = rqst->rq_xprt; 154f531a5dbSChuck Lever 1557c8d9e7cSChuck Lever rpcrdma_recv_buffer_put(req->rl_reply); 1567c8d9e7cSChuck Lever req->rl_reply = NULL; 157c8bbe0c7SChuck Lever 158f7d46681SChuck Lever spin_lock(&xprt->bc_pa_lock); 159f531a5dbSChuck Lever list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 160f7d46681SChuck Lever spin_unlock(&xprt->bc_pa_lock); 161f531a5dbSChuck Lever } 16263cae470SChuck Lever 163*3f9c7e76SChuck Lever static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt) 164*3f9c7e76SChuck Lever { 165*3f9c7e76SChuck Lever struct rpc_xprt *xprt = &r_xprt->rx_xprt; 166*3f9c7e76SChuck Lever struct rpcrdma_req *req; 167*3f9c7e76SChuck Lever struct rpc_rqst *rqst; 168*3f9c7e76SChuck Lever size_t size; 169*3f9c7e76SChuck Lever 170*3f9c7e76SChuck Lever spin_lock(&xprt->bc_pa_lock); 171*3f9c7e76SChuck Lever rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst, 172*3f9c7e76SChuck Lever rq_bc_pa_list); 173*3f9c7e76SChuck Lever if (!rqst) 174*3f9c7e76SChuck Lever goto create_req; 175*3f9c7e76SChuck Lever list_del(&rqst->rq_bc_pa_list); 176*3f9c7e76SChuck Lever spin_unlock(&xprt->bc_pa_lock); 177*3f9c7e76SChuck Lever return rqst; 178*3f9c7e76SChuck Lever 179*3f9c7e76SChuck Lever create_req: 180*3f9c7e76SChuck Lever spin_unlock(&xprt->bc_pa_lock); 181*3f9c7e76SChuck Lever 182*3f9c7e76SChuck Lever /* Set a limit to prevent a remote from overrunning our resources. 183*3f9c7e76SChuck Lever */ 184*3f9c7e76SChuck Lever if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS) 185*3f9c7e76SChuck Lever return NULL; 186*3f9c7e76SChuck Lever 187*3f9c7e76SChuck Lever size = min_t(size_t, r_xprt->rx_data.inline_rsize, PAGE_SIZE); 188*3f9c7e76SChuck Lever req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL); 189*3f9c7e76SChuck Lever if (!req) 190*3f9c7e76SChuck Lever return NULL; 191*3f9c7e76SChuck Lever 192*3f9c7e76SChuck Lever xprt->bc_alloc_count++; 193*3f9c7e76SChuck Lever rqst = &req->rl_slot; 194*3f9c7e76SChuck Lever rqst->rq_xprt = xprt; 195*3f9c7e76SChuck Lever __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 196*3f9c7e76SChuck Lever xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size); 197*3f9c7e76SChuck Lever return rqst; 198*3f9c7e76SChuck Lever } 199*3f9c7e76SChuck Lever 20063cae470SChuck Lever /** 20163cae470SChuck Lever * rpcrdma_bc_receive_call - Handle a backward direction call 2029ab6d89eSChuck Lever * @r_xprt: transport receiving the call 20363cae470SChuck Lever * @rep: receive buffer containing the call 20463cae470SChuck Lever * 20563cae470SChuck Lever * Operational assumptions: 20663cae470SChuck Lever * o Backchannel credits are ignored, just as the NFS server 20763cae470SChuck Lever * forechannel currently does 20863cae470SChuck Lever * o The ULP manages a replay cache (eg, NFSv4.1 sessions). 20963cae470SChuck Lever * No replay detection is done at the transport level 21063cae470SChuck Lever */ 21163cae470SChuck Lever void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, 21263cae470SChuck Lever struct rpcrdma_rep *rep) 21363cae470SChuck Lever { 21463cae470SChuck Lever struct rpc_xprt *xprt = &r_xprt->rx_xprt; 21563cae470SChuck Lever struct svc_serv *bc_serv; 21663cae470SChuck Lever struct rpcrdma_req *req; 21763cae470SChuck Lever struct rpc_rqst *rqst; 21863cae470SChuck Lever struct xdr_buf *buf; 21963cae470SChuck Lever size_t size; 22063cae470SChuck Lever __be32 *p; 22163cae470SChuck Lever 22241c8f70fSChuck Lever p = xdr_inline_decode(&rep->rr_stream, 0); 22341c8f70fSChuck Lever size = xdr_stream_remaining(&rep->rr_stream); 22441c8f70fSChuck Lever 22563cae470SChuck Lever #ifdef RPCRDMA_BACKCHANNEL_DEBUG 22663cae470SChuck Lever pr_info("RPC: %s: callback XID %08x, length=%u\n", 22741c8f70fSChuck Lever __func__, be32_to_cpup(p), size); 22841c8f70fSChuck Lever pr_info("RPC: %s: %*ph\n", __func__, size, p); 22963cae470SChuck Lever #endif 23063cae470SChuck Lever 231*3f9c7e76SChuck Lever rqst = rpcrdma_bc_rqst_get(r_xprt); 232*3f9c7e76SChuck Lever if (!rqst) 23363cae470SChuck Lever goto out_overflow; 23463cae470SChuck Lever 23563cae470SChuck Lever rqst->rq_reply_bytes_recvd = 0; 23641c8f70fSChuck Lever rqst->rq_xid = *p; 2379f74660bSChuck Lever 2389f74660bSChuck Lever rqst->rq_private_buf.len = size; 23963cae470SChuck Lever 24063cae470SChuck Lever buf = &rqst->rq_rcv_buf; 24163cae470SChuck Lever memset(buf, 0, sizeof(*buf)); 24263cae470SChuck Lever buf->head[0].iov_base = p; 24363cae470SChuck Lever buf->head[0].iov_len = size; 24463cae470SChuck Lever buf->len = size; 24563cae470SChuck Lever 24663cae470SChuck Lever /* The receive buffer has to be hooked to the rpcrdma_req 24741c8f70fSChuck Lever * so that it is not released while the req is pointing 24841c8f70fSChuck Lever * to its buffer, and so that it can be reposted after 24941c8f70fSChuck Lever * the Upper Layer is done decoding it. 25063cae470SChuck Lever */ 25163cae470SChuck Lever req = rpcr_to_rdmar(rqst); 25263cae470SChuck Lever req->rl_reply = rep; 253fc1eb807SChuck Lever trace_xprtrdma_cb_call(rqst); 25463cae470SChuck Lever 25563cae470SChuck Lever /* Queue rqst for ULP's callback service */ 25663cae470SChuck Lever bc_serv = xprt->bc_serv; 25763cae470SChuck Lever spin_lock(&bc_serv->sv_cb_lock); 25863cae470SChuck Lever list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list); 25963cae470SChuck Lever spin_unlock(&bc_serv->sv_cb_lock); 26063cae470SChuck Lever 26163cae470SChuck Lever wake_up(&bc_serv->sv_cb_waitq); 26263cae470SChuck Lever 26363cae470SChuck Lever r_xprt->rx_stats.bcall_count++; 26463cae470SChuck Lever return; 26563cae470SChuck Lever 26663cae470SChuck Lever out_overflow: 26763cae470SChuck Lever pr_warn("RPC/RDMA backchannel overflow\n"); 2680c0829bcSChuck Lever xprt_force_disconnect(xprt); 26963cae470SChuck Lever /* This receive buffer gets reposted automatically 27063cae470SChuck Lever * when the connection is re-established. 27163cae470SChuck Lever */ 27263cae470SChuck Lever return; 27363cae470SChuck Lever } 274