1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015 Oracle.  All rights reserved.
4  *
5  * Support for backward direction RPCs on RPC/RDMA.
6  */
7 
8 #include <linux/module.h>
9 #include <linux/sunrpc/xprt.h>
10 #include <linux/sunrpc/svc.h>
11 #include <linux/sunrpc/svc_xprt.h>
12 
13 #include "xprt_rdma.h"
14 
15 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
16 # define RPCDBG_FACILITY	RPCDBG_TRANS
17 #endif
18 
19 #undef RPCRDMA_BACKCHANNEL_DEBUG
20 
21 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
22 				 struct rpc_rqst *rqst)
23 {
24 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
25 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
26 
27 	spin_lock(&buf->rb_reqslock);
28 	list_del(&req->rl_all);
29 	spin_unlock(&buf->rb_reqslock);
30 
31 	rpcrdma_destroy_req(req);
32 
33 	kfree(rqst);
34 }
35 
36 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
37 				 struct rpc_rqst *rqst)
38 {
39 	struct rpcrdma_regbuf *rb;
40 	struct rpcrdma_req *req;
41 	size_t size;
42 
43 	req = rpcrdma_create_req(r_xprt);
44 	if (IS_ERR(req))
45 		return PTR_ERR(req);
46 
47 	size = r_xprt->rx_data.inline_rsize;
48 	rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
49 	if (IS_ERR(rb))
50 		goto out_fail;
51 	req->rl_sendbuf = rb;
52 	xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
53 		     min_t(size_t, size, PAGE_SIZE));
54 	rpcrdma_set_xprtdata(rqst, req);
55 	return 0;
56 
57 out_fail:
58 	rpcrdma_bc_free_rqst(r_xprt, rqst);
59 	return -ENOMEM;
60 }
61 
62 /* Allocate and add receive buffers to the rpcrdma_buffer's
63  * existing list of rep's. These are released when the
64  * transport is destroyed.
65  */
66 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
67 				 unsigned int count)
68 {
69 	int rc = 0;
70 
71 	while (count--) {
72 		rc = rpcrdma_create_rep(r_xprt);
73 		if (rc)
74 			break;
75 	}
76 	return rc;
77 }
78 
79 /**
80  * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
81  * @xprt: transport associated with these backchannel resources
82  * @reqs: number of concurrent incoming requests to expect
83  *
84  * Returns 0 on success; otherwise a negative errno
85  */
86 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
87 {
88 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
89 	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
90 	struct rpc_rqst *rqst;
91 	unsigned int i;
92 	int rc;
93 
94 	/* The backchannel reply path returns each rpc_rqst to the
95 	 * bc_pa_list _after_ the reply is sent. If the server is
96 	 * faster than the client, it can send another backward
97 	 * direction request before the rpc_rqst is returned to the
98 	 * list. The client rejects the request in this case.
99 	 *
100 	 * Twice as many rpc_rqsts are prepared to ensure there is
101 	 * always an rpc_rqst available as soon as a reply is sent.
102 	 */
103 	if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
104 		goto out_err;
105 
106 	for (i = 0; i < (reqs << 1); i++) {
107 		rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
108 		if (!rqst)
109 			goto out_free;
110 
111 		dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
112 
113 		rqst->rq_xprt = &r_xprt->rx_xprt;
114 		INIT_LIST_HEAD(&rqst->rq_list);
115 		INIT_LIST_HEAD(&rqst->rq_bc_list);
116 		__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
117 
118 		if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
119 			goto out_free;
120 
121 		spin_lock_bh(&xprt->bc_pa_lock);
122 		list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
123 		spin_unlock_bh(&xprt->bc_pa_lock);
124 	}
125 
126 	rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
127 	if (rc)
128 		goto out_free;
129 
130 	rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
131 	if (rc)
132 		goto out_free;
133 
134 	buffer->rb_bc_srv_max_requests = reqs;
135 	request_module("svcrdma");
136 	trace_xprtrdma_cb_setup(r_xprt, reqs);
137 	return 0;
138 
139 out_free:
140 	xprt_rdma_bc_destroy(xprt, reqs);
141 
142 out_err:
143 	pr_err("RPC:       %s: setup backchannel transport failed\n", __func__);
144 	return -ENOMEM;
145 }
146 
147 /**
148  * xprt_rdma_bc_up - Create transport endpoint for backchannel service
149  * @serv: server endpoint
150  * @net: network namespace
151  *
152  * The "xprt" is an implied argument: it supplies the name of the
153  * backchannel transport class.
154  *
155  * Returns zero on success, negative errno on failure
156  */
157 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
158 {
159 	int ret;
160 
161 	ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
162 	if (ret < 0)
163 		return ret;
164 	return 0;
165 }
166 
167 /**
168  * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
169  * @xprt: transport
170  *
171  * Returns maximum size, in bytes, of a backchannel message
172  */
173 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
174 {
175 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
176 	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
177 	size_t maxmsg;
178 
179 	maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
180 	maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
181 	return maxmsg - RPCRDMA_HDRLEN_MIN;
182 }
183 
184 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
185 {
186 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
187 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
188 	__be32 *p;
189 
190 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
191 	xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
192 			req->rl_rdmabuf->rg_base);
193 
194 	p = xdr_reserve_space(&req->rl_stream, 28);
195 	if (unlikely(!p))
196 		return -EIO;
197 	*p++ = rqst->rq_xid;
198 	*p++ = rpcrdma_version;
199 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
200 	*p++ = rdma_msg;
201 	*p++ = xdr_zero;
202 	*p++ = xdr_zero;
203 	*p = xdr_zero;
204 
205 	if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
206 				      &rqst->rq_snd_buf, rpcrdma_noch))
207 		return -EIO;
208 
209 	trace_xprtrdma_cb_reply(rqst);
210 	return 0;
211 }
212 
213 /**
214  * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
215  * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
216  *
217  * Caller holds the transport's write lock.
218  *
219  * Returns:
220  *	%0 if the RPC message has been sent
221  *	%-ENOTCONN if the caller should reconnect and call again
222  *	%-EIO if a permanent error occurred and the request was not
223  *		sent. Do not try to send this message again.
224  */
225 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
226 {
227 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
228 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
229 	int rc;
230 
231 	if (!xprt_connected(rqst->rq_xprt))
232 		goto drop_connection;
233 
234 	rc = rpcrdma_bc_marshal_reply(rqst);
235 	if (rc < 0)
236 		goto failed_marshal;
237 
238 	if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
239 		goto drop_connection;
240 	return 0;
241 
242 failed_marshal:
243 	if (rc != -ENOTCONN)
244 		return rc;
245 drop_connection:
246 	xprt_disconnect_done(rqst->rq_xprt);
247 	return -ENOTCONN;
248 }
249 
250 /**
251  * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
252  * @xprt: transport associated with these backchannel resources
253  * @reqs: number of incoming requests to destroy; ignored
254  */
255 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
256 {
257 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
258 	struct rpc_rqst *rqst, *tmp;
259 
260 	spin_lock_bh(&xprt->bc_pa_lock);
261 	list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
262 		list_del(&rqst->rq_bc_pa_list);
263 		spin_unlock_bh(&xprt->bc_pa_lock);
264 
265 		rpcrdma_bc_free_rqst(r_xprt, rqst);
266 
267 		spin_lock_bh(&xprt->bc_pa_lock);
268 	}
269 	spin_unlock_bh(&xprt->bc_pa_lock);
270 }
271 
272 /**
273  * xprt_rdma_bc_free_rqst - Release a backchannel rqst
274  * @rqst: request to release
275  */
276 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
277 {
278 	struct rpc_xprt *xprt = rqst->rq_xprt;
279 
280 	dprintk("RPC:       %s: freeing rqst %p (req %p)\n",
281 		__func__, rqst, rpcr_to_rdmar(rqst));
282 
283 	spin_lock_bh(&xprt->bc_pa_lock);
284 	list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
285 	spin_unlock_bh(&xprt->bc_pa_lock);
286 }
287 
288 /**
289  * rpcrdma_bc_receive_call - Handle a backward direction call
290  * @r_xprt: transport receiving the call
291  * @rep: receive buffer containing the call
292  *
293  * Operational assumptions:
294  *    o Backchannel credits are ignored, just as the NFS server
295  *      forechannel currently does
296  *    o The ULP manages a replay cache (eg, NFSv4.1 sessions).
297  *      No replay detection is done at the transport level
298  */
299 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
300 			     struct rpcrdma_rep *rep)
301 {
302 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
303 	struct svc_serv *bc_serv;
304 	struct rpcrdma_req *req;
305 	struct rpc_rqst *rqst;
306 	struct xdr_buf *buf;
307 	size_t size;
308 	__be32 *p;
309 
310 	p = xdr_inline_decode(&rep->rr_stream, 0);
311 	size = xdr_stream_remaining(&rep->rr_stream);
312 
313 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
314 	pr_info("RPC:       %s: callback XID %08x, length=%u\n",
315 		__func__, be32_to_cpup(p), size);
316 	pr_info("RPC:       %s: %*ph\n", __func__, size, p);
317 #endif
318 
319 	/* Grab a free bc rqst */
320 	spin_lock(&xprt->bc_pa_lock);
321 	if (list_empty(&xprt->bc_pa_list)) {
322 		spin_unlock(&xprt->bc_pa_lock);
323 		goto out_overflow;
324 	}
325 	rqst = list_first_entry(&xprt->bc_pa_list,
326 				struct rpc_rqst, rq_bc_pa_list);
327 	list_del(&rqst->rq_bc_pa_list);
328 	spin_unlock(&xprt->bc_pa_lock);
329 
330 	/* Prepare rqst */
331 	rqst->rq_reply_bytes_recvd = 0;
332 	rqst->rq_bytes_sent = 0;
333 	rqst->rq_xid = *p;
334 
335 	rqst->rq_private_buf.len = size;
336 
337 	buf = &rqst->rq_rcv_buf;
338 	memset(buf, 0, sizeof(*buf));
339 	buf->head[0].iov_base = p;
340 	buf->head[0].iov_len = size;
341 	buf->len = size;
342 
343 	/* The receive buffer has to be hooked to the rpcrdma_req
344 	 * so that it is not released while the req is pointing
345 	 * to its buffer, and so that it can be reposted after
346 	 * the Upper Layer is done decoding it.
347 	 */
348 	req = rpcr_to_rdmar(rqst);
349 	req->rl_reply = rep;
350 	trace_xprtrdma_cb_call(rqst);
351 
352 	/* Queue rqst for ULP's callback service */
353 	bc_serv = xprt->bc_serv;
354 	spin_lock(&bc_serv->sv_cb_lock);
355 	list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
356 	spin_unlock(&bc_serv->sv_cb_lock);
357 
358 	wake_up(&bc_serv->sv_cb_waitq);
359 
360 	r_xprt->rx_stats.bcall_count++;
361 	return;
362 
363 out_overflow:
364 	pr_warn("RPC/RDMA backchannel overflow\n");
365 	xprt_disconnect_done(xprt);
366 	/* This receive buffer gets reposted automatically
367 	 * when the connection is re-established.
368 	 */
369 	return;
370 }
371