1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2015 Oracle. All rights reserved. 4 * 5 * Support for backward direction RPCs on RPC/RDMA. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/sunrpc/xprt.h> 10 #include <linux/sunrpc/svc.h> 11 #include <linux/sunrpc/svc_xprt.h> 12 13 #include "xprt_rdma.h" 14 15 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 16 # define RPCDBG_FACILITY RPCDBG_TRANS 17 #endif 18 19 #undef RPCRDMA_BACKCHANNEL_DEBUG 20 21 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt, 22 struct rpc_rqst *rqst) 23 { 24 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 25 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 26 27 spin_lock(&buf->rb_reqslock); 28 list_del(&req->rl_all); 29 spin_unlock(&buf->rb_reqslock); 30 31 rpcrdma_destroy_req(req); 32 33 kfree(rqst); 34 } 35 36 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, 37 struct rpc_rqst *rqst) 38 { 39 struct rpcrdma_regbuf *rb; 40 struct rpcrdma_req *req; 41 size_t size; 42 43 req = rpcrdma_create_req(r_xprt); 44 if (IS_ERR(req)) 45 return PTR_ERR(req); 46 __set_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags); 47 48 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE, 49 DMA_TO_DEVICE, GFP_KERNEL); 50 if (IS_ERR(rb)) 51 goto out_fail; 52 req->rl_rdmabuf = rb; 53 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb)); 54 55 size = r_xprt->rx_data.inline_rsize; 56 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL); 57 if (IS_ERR(rb)) 58 goto out_fail; 59 req->rl_sendbuf = rb; 60 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, 61 min_t(size_t, size, PAGE_SIZE)); 62 rpcrdma_set_xprtdata(rqst, req); 63 return 0; 64 65 out_fail: 66 rpcrdma_bc_free_rqst(r_xprt, rqst); 67 return -ENOMEM; 68 } 69 70 /* Allocate and add receive buffers to the rpcrdma_buffer's 71 * existing list of rep's. These are released when the 72 * transport is destroyed. 73 */ 74 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, 75 unsigned int count) 76 { 77 struct rpcrdma_rep *rep; 78 int rc = 0; 79 80 while (count--) { 81 rep = rpcrdma_create_rep(r_xprt); 82 if (IS_ERR(rep)) { 83 pr_err("RPC: %s: reply buffer alloc failed\n", 84 __func__); 85 rc = PTR_ERR(rep); 86 break; 87 } 88 89 rpcrdma_recv_buffer_put(rep); 90 } 91 92 return rc; 93 } 94 95 /** 96 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests 97 * @xprt: transport associated with these backchannel resources 98 * @reqs: number of concurrent incoming requests to expect 99 * 100 * Returns 0 on success; otherwise a negative errno 101 */ 102 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) 103 { 104 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 105 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 106 struct rpc_rqst *rqst; 107 unsigned int i; 108 int rc; 109 110 /* The backchannel reply path returns each rpc_rqst to the 111 * bc_pa_list _after_ the reply is sent. If the server is 112 * faster than the client, it can send another backward 113 * direction request before the rpc_rqst is returned to the 114 * list. The client rejects the request in this case. 115 * 116 * Twice as many rpc_rqsts are prepared to ensure there is 117 * always an rpc_rqst available as soon as a reply is sent. 118 */ 119 if (reqs > RPCRDMA_BACKWARD_WRS >> 1) 120 goto out_err; 121 122 for (i = 0; i < (reqs << 1); i++) { 123 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 124 if (!rqst) 125 goto out_free; 126 127 dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 128 129 rqst->rq_xprt = &r_xprt->rx_xprt; 130 INIT_LIST_HEAD(&rqst->rq_list); 131 INIT_LIST_HEAD(&rqst->rq_bc_list); 132 133 if (rpcrdma_bc_setup_rqst(r_xprt, rqst)) 134 goto out_free; 135 136 spin_lock_bh(&xprt->bc_pa_lock); 137 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 138 spin_unlock_bh(&xprt->bc_pa_lock); 139 } 140 141 rc = rpcrdma_bc_setup_reps(r_xprt, reqs); 142 if (rc) 143 goto out_free; 144 145 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); 146 if (rc) 147 goto out_free; 148 149 buffer->rb_bc_srv_max_requests = reqs; 150 request_module("svcrdma"); 151 152 return 0; 153 154 out_free: 155 xprt_rdma_bc_destroy(xprt, reqs); 156 157 out_err: 158 pr_err("RPC: %s: setup backchannel transport failed\n", __func__); 159 return -ENOMEM; 160 } 161 162 /** 163 * xprt_rdma_bc_up - Create transport endpoint for backchannel service 164 * @serv: server endpoint 165 * @net: network namespace 166 * 167 * The "xprt" is an implied argument: it supplies the name of the 168 * backchannel transport class. 169 * 170 * Returns zero on success, negative errno on failure 171 */ 172 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net) 173 { 174 int ret; 175 176 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0); 177 if (ret < 0) 178 return ret; 179 return 0; 180 } 181 182 /** 183 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size 184 * @xprt: transport 185 * 186 * Returns maximum size, in bytes, of a backchannel message 187 */ 188 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt) 189 { 190 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 191 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 192 size_t maxmsg; 193 194 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize); 195 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE); 196 return maxmsg - RPCRDMA_HDRLEN_MIN; 197 } 198 199 /** 200 * rpcrdma_bc_marshal_reply - Send backwards direction reply 201 * @rqst: buffer containing RPC reply data 202 * 203 * Returns zero on success. 204 */ 205 int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) 206 { 207 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 208 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 209 __be32 *p; 210 211 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); 212 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf, 213 req->rl_rdmabuf->rg_base); 214 215 p = xdr_reserve_space(&req->rl_stream, 28); 216 if (unlikely(!p)) 217 return -EIO; 218 *p++ = rqst->rq_xid; 219 *p++ = rpcrdma_version; 220 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests); 221 *p++ = rdma_msg; 222 *p++ = xdr_zero; 223 *p++ = xdr_zero; 224 *p = xdr_zero; 225 226 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN, 227 &rqst->rq_snd_buf, rpcrdma_noch)) 228 return -EIO; 229 return 0; 230 } 231 232 /** 233 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests 234 * @xprt: transport associated with these backchannel resources 235 * @reqs: number of incoming requests to destroy; ignored 236 */ 237 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) 238 { 239 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 240 struct rpc_rqst *rqst, *tmp; 241 242 spin_lock_bh(&xprt->bc_pa_lock); 243 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 244 list_del(&rqst->rq_bc_pa_list); 245 spin_unlock_bh(&xprt->bc_pa_lock); 246 247 rpcrdma_bc_free_rqst(r_xprt, rqst); 248 249 spin_lock_bh(&xprt->bc_pa_lock); 250 } 251 spin_unlock_bh(&xprt->bc_pa_lock); 252 } 253 254 /** 255 * xprt_rdma_bc_free_rqst - Release a backchannel rqst 256 * @rqst: request to release 257 */ 258 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst) 259 { 260 struct rpc_xprt *xprt = rqst->rq_xprt; 261 262 dprintk("RPC: %s: freeing rqst %p (req %p)\n", 263 __func__, rqst, rpcr_to_rdmar(rqst)); 264 265 smp_mb__before_atomic(); 266 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)); 267 clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 268 smp_mb__after_atomic(); 269 270 spin_lock_bh(&xprt->bc_pa_lock); 271 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 272 spin_unlock_bh(&xprt->bc_pa_lock); 273 } 274 275 /** 276 * rpcrdma_bc_receive_call - Handle a backward direction call 277 * @xprt: transport receiving the call 278 * @rep: receive buffer containing the call 279 * 280 * Operational assumptions: 281 * o Backchannel credits are ignored, just as the NFS server 282 * forechannel currently does 283 * o The ULP manages a replay cache (eg, NFSv4.1 sessions). 284 * No replay detection is done at the transport level 285 */ 286 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, 287 struct rpcrdma_rep *rep) 288 { 289 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 290 struct svc_serv *bc_serv; 291 struct rpcrdma_req *req; 292 struct rpc_rqst *rqst; 293 struct xdr_buf *buf; 294 size_t size; 295 __be32 *p; 296 297 p = xdr_inline_decode(&rep->rr_stream, 0); 298 size = xdr_stream_remaining(&rep->rr_stream); 299 300 #ifdef RPCRDMA_BACKCHANNEL_DEBUG 301 pr_info("RPC: %s: callback XID %08x, length=%u\n", 302 __func__, be32_to_cpup(p), size); 303 pr_info("RPC: %s: %*ph\n", __func__, size, p); 304 #endif 305 306 /* Grab a free bc rqst */ 307 spin_lock(&xprt->bc_pa_lock); 308 if (list_empty(&xprt->bc_pa_list)) { 309 spin_unlock(&xprt->bc_pa_lock); 310 goto out_overflow; 311 } 312 rqst = list_first_entry(&xprt->bc_pa_list, 313 struct rpc_rqst, rq_bc_pa_list); 314 list_del(&rqst->rq_bc_pa_list); 315 spin_unlock(&xprt->bc_pa_lock); 316 dprintk("RPC: %s: using rqst %p\n", __func__, rqst); 317 318 /* Prepare rqst */ 319 rqst->rq_reply_bytes_recvd = 0; 320 rqst->rq_bytes_sent = 0; 321 rqst->rq_xid = *p; 322 323 rqst->rq_private_buf.len = size; 324 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 325 326 buf = &rqst->rq_rcv_buf; 327 memset(buf, 0, sizeof(*buf)); 328 buf->head[0].iov_base = p; 329 buf->head[0].iov_len = size; 330 buf->len = size; 331 332 /* The receive buffer has to be hooked to the rpcrdma_req 333 * so that it is not released while the req is pointing 334 * to its buffer, and so that it can be reposted after 335 * the Upper Layer is done decoding it. 336 */ 337 req = rpcr_to_rdmar(rqst); 338 dprintk("RPC: %s: attaching rep %p to req %p\n", 339 __func__, rep, req); 340 req->rl_reply = rep; 341 342 /* Defeat the retransmit detection logic in send_request */ 343 req->rl_connect_cookie = 0; 344 345 /* Queue rqst for ULP's callback service */ 346 bc_serv = xprt->bc_serv; 347 spin_lock(&bc_serv->sv_cb_lock); 348 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list); 349 spin_unlock(&bc_serv->sv_cb_lock); 350 351 wake_up(&bc_serv->sv_cb_waitq); 352 353 r_xprt->rx_stats.bcall_count++; 354 return; 355 356 out_overflow: 357 pr_warn("RPC/RDMA backchannel overflow\n"); 358 xprt_disconnect_done(xprt); 359 /* This receive buffer gets reposted automatically 360 * when the connection is re-established. 361 */ 362 return; 363 } 364