1 /*
2  * Copyright (c) 2015 Oracle.  All rights reserved.
3  *
4  * Support for backward direction RPCs on RPC/RDMA.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
11 
12 #include "xprt_rdma.h"
13 
14 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
15 # define RPCDBG_FACILITY	RPCDBG_TRANS
16 #endif
17 
18 #undef RPCRDMA_BACKCHANNEL_DEBUG
19 
20 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
21 				 struct rpc_rqst *rqst)
22 {
23 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
24 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
25 
26 	spin_lock(&buf->rb_reqslock);
27 	list_del(&req->rl_all);
28 	spin_unlock(&buf->rb_reqslock);
29 
30 	rpcrdma_destroy_req(&r_xprt->rx_ia, req);
31 
32 	kfree(rqst);
33 }
34 
35 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
36 				 struct rpc_rqst *rqst)
37 {
38 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
39 	struct rpcrdma_regbuf *rb;
40 	struct rpcrdma_req *req;
41 	struct xdr_buf *buf;
42 	size_t size;
43 
44 	req = rpcrdma_create_req(r_xprt);
45 	if (IS_ERR(req))
46 		return PTR_ERR(req);
47 	req->rl_backchannel = true;
48 
49 	size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
50 	rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
51 	if (IS_ERR(rb))
52 		goto out_fail;
53 	req->rl_rdmabuf = rb;
54 
55 	size += RPCRDMA_INLINE_READ_THRESHOLD(rqst);
56 	rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
57 	if (IS_ERR(rb))
58 		goto out_fail;
59 	rb->rg_owner = req;
60 	req->rl_sendbuf = rb;
61 	/* so that rpcr_to_rdmar works when receiving a request */
62 	rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base;
63 
64 	buf = &rqst->rq_snd_buf;
65 	buf->head[0].iov_base = rqst->rq_buffer;
66 	buf->head[0].iov_len = 0;
67 	buf->tail[0].iov_base = NULL;
68 	buf->tail[0].iov_len = 0;
69 	buf->page_len = 0;
70 	buf->len = 0;
71 	buf->buflen = size;
72 
73 	return 0;
74 
75 out_fail:
76 	rpcrdma_bc_free_rqst(r_xprt, rqst);
77 	return -ENOMEM;
78 }
79 
80 /* Allocate and add receive buffers to the rpcrdma_buffer's
81  * existing list of rep's. These are released when the
82  * transport is destroyed.
83  */
84 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
85 				 unsigned int count)
86 {
87 	struct rpcrdma_rep *rep;
88 	int rc = 0;
89 
90 	while (count--) {
91 		rep = rpcrdma_create_rep(r_xprt);
92 		if (IS_ERR(rep)) {
93 			pr_err("RPC:       %s: reply buffer alloc failed\n",
94 			       __func__);
95 			rc = PTR_ERR(rep);
96 			break;
97 		}
98 
99 		rpcrdma_recv_buffer_put(rep);
100 	}
101 
102 	return rc;
103 }
104 
105 /**
106  * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
107  * @xprt: transport associated with these backchannel resources
108  * @reqs: number of concurrent incoming requests to expect
109  *
110  * Returns 0 on success; otherwise a negative errno
111  */
112 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
113 {
114 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
115 	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
116 	struct rpc_rqst *rqst;
117 	unsigned int i;
118 	int rc;
119 
120 	/* The backchannel reply path returns each rpc_rqst to the
121 	 * bc_pa_list _after_ the reply is sent. If the server is
122 	 * faster than the client, it can send another backward
123 	 * direction request before the rpc_rqst is returned to the
124 	 * list. The client rejects the request in this case.
125 	 *
126 	 * Twice as many rpc_rqsts are prepared to ensure there is
127 	 * always an rpc_rqst available as soon as a reply is sent.
128 	 */
129 	if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
130 		goto out_err;
131 
132 	for (i = 0; i < (reqs << 1); i++) {
133 		rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
134 		if (!rqst) {
135 			pr_err("RPC:       %s: Failed to create bc rpc_rqst\n",
136 			       __func__);
137 			goto out_free;
138 		}
139 		dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
140 
141 		rqst->rq_xprt = &r_xprt->rx_xprt;
142 		INIT_LIST_HEAD(&rqst->rq_list);
143 		INIT_LIST_HEAD(&rqst->rq_bc_list);
144 
145 		if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
146 			goto out_free;
147 
148 		spin_lock_bh(&xprt->bc_pa_lock);
149 		list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
150 		spin_unlock_bh(&xprt->bc_pa_lock);
151 	}
152 
153 	rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
154 	if (rc)
155 		goto out_free;
156 
157 	rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
158 	if (rc)
159 		goto out_free;
160 
161 	buffer->rb_bc_srv_max_requests = reqs;
162 	request_module("svcrdma");
163 
164 	return 0;
165 
166 out_free:
167 	xprt_rdma_bc_destroy(xprt, reqs);
168 
169 out_err:
170 	pr_err("RPC:       %s: setup backchannel transport failed\n", __func__);
171 	return -ENOMEM;
172 }
173 
174 /**
175  * xprt_rdma_bc_up - Create transport endpoint for backchannel service
176  * @serv: server endpoint
177  * @net: network namespace
178  *
179  * The "xprt" is an implied argument: it supplies the name of the
180  * backchannel transport class.
181  *
182  * Returns zero on success, negative errno on failure
183  */
184 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
185 {
186 	int ret;
187 
188 	ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
189 	if (ret < 0)
190 		return ret;
191 	return 0;
192 }
193 
194 /**
195  * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
196  * @xprt: transport
197  *
198  * Returns maximum size, in bytes, of a backchannel message
199  */
200 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
201 {
202 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
203 	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
204 	size_t maxmsg;
205 
206 	maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
207 	return maxmsg - RPCRDMA_HDRLEN_MIN;
208 }
209 
210 /**
211  * rpcrdma_bc_marshal_reply - Send backwards direction reply
212  * @rqst: buffer containing RPC reply data
213  *
214  * Returns zero on success.
215  */
216 int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
217 {
218 	struct rpc_xprt *xprt = rqst->rq_xprt;
219 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
220 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
221 	struct rpcrdma_msg *headerp;
222 	size_t rpclen;
223 
224 	headerp = rdmab_to_msg(req->rl_rdmabuf);
225 	headerp->rm_xid = rqst->rq_xid;
226 	headerp->rm_vers = rpcrdma_version;
227 	headerp->rm_credit =
228 			cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
229 	headerp->rm_type = rdma_msg;
230 	headerp->rm_body.rm_chunks[0] = xdr_zero;
231 	headerp->rm_body.rm_chunks[1] = xdr_zero;
232 	headerp->rm_body.rm_chunks[2] = xdr_zero;
233 
234 	rpclen = rqst->rq_svec[0].iov_len;
235 
236 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
237 	pr_info("RPC:       %s: rpclen %zd headerp 0x%p lkey 0x%x\n",
238 		__func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf));
239 	pr_info("RPC:       %s: RPC/RDMA: %*ph\n",
240 		__func__, (int)RPCRDMA_HDRLEN_MIN, headerp);
241 	pr_info("RPC:       %s:      RPC: %*ph\n",
242 		__func__, (int)rpclen, rqst->rq_svec[0].iov_base);
243 #endif
244 
245 	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
246 	req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN;
247 	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
248 
249 	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
250 	req->rl_send_iov[1].length = rpclen;
251 	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
252 
253 	req->rl_niovs = 2;
254 	return 0;
255 }
256 
257 /**
258  * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
259  * @xprt: transport associated with these backchannel resources
260  * @reqs: number of incoming requests to destroy; ignored
261  */
262 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
263 {
264 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
265 	struct rpc_rqst *rqst, *tmp;
266 
267 	spin_lock_bh(&xprt->bc_pa_lock);
268 	list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
269 		list_del(&rqst->rq_bc_pa_list);
270 		spin_unlock_bh(&xprt->bc_pa_lock);
271 
272 		rpcrdma_bc_free_rqst(r_xprt, rqst);
273 
274 		spin_lock_bh(&xprt->bc_pa_lock);
275 	}
276 	spin_unlock_bh(&xprt->bc_pa_lock);
277 }
278 
279 /**
280  * xprt_rdma_bc_free_rqst - Release a backchannel rqst
281  * @rqst: request to release
282  */
283 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
284 {
285 	struct rpc_xprt *xprt = rqst->rq_xprt;
286 
287 	dprintk("RPC:       %s: freeing rqst %p (req %p)\n",
288 		__func__, rqst, rpcr_to_rdmar(rqst));
289 
290 	smp_mb__before_atomic();
291 	WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
292 	clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
293 	smp_mb__after_atomic();
294 
295 	spin_lock_bh(&xprt->bc_pa_lock);
296 	list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
297 	spin_unlock_bh(&xprt->bc_pa_lock);
298 }
299 
300 /**
301  * rpcrdma_bc_receive_call - Handle a backward direction call
302  * @xprt: transport receiving the call
303  * @rep: receive buffer containing the call
304  *
305  * Called in the RPC reply handler, which runs in a tasklet.
306  * Be quick about it.
307  *
308  * Operational assumptions:
309  *    o Backchannel credits are ignored, just as the NFS server
310  *      forechannel currently does
311  *    o The ULP manages a replay cache (eg, NFSv4.1 sessions).
312  *      No replay detection is done at the transport level
313  */
314 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
315 			     struct rpcrdma_rep *rep)
316 {
317 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
318 	struct rpcrdma_msg *headerp;
319 	struct svc_serv *bc_serv;
320 	struct rpcrdma_req *req;
321 	struct rpc_rqst *rqst;
322 	struct xdr_buf *buf;
323 	size_t size;
324 	__be32 *p;
325 
326 	headerp = rdmab_to_msg(rep->rr_rdmabuf);
327 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
328 	pr_info("RPC:       %s: callback XID %08x, length=%u\n",
329 		__func__, be32_to_cpu(headerp->rm_xid), rep->rr_len);
330 	pr_info("RPC:       %s: %*ph\n", __func__, rep->rr_len, headerp);
331 #endif
332 
333 	/* Sanity check:
334 	 * Need at least enough bytes for RPC/RDMA header, as code
335 	 * here references the header fields by array offset. Also,
336 	 * backward calls are always inline, so ensure there
337 	 * are some bytes beyond the RPC/RDMA header.
338 	 */
339 	if (rep->rr_len < RPCRDMA_HDRLEN_MIN + 24)
340 		goto out_short;
341 	p = (__be32 *)((unsigned char *)headerp + RPCRDMA_HDRLEN_MIN);
342 	size = rep->rr_len - RPCRDMA_HDRLEN_MIN;
343 
344 	/* Grab a free bc rqst */
345 	spin_lock(&xprt->bc_pa_lock);
346 	if (list_empty(&xprt->bc_pa_list)) {
347 		spin_unlock(&xprt->bc_pa_lock);
348 		goto out_overflow;
349 	}
350 	rqst = list_first_entry(&xprt->bc_pa_list,
351 				struct rpc_rqst, rq_bc_pa_list);
352 	list_del(&rqst->rq_bc_pa_list);
353 	spin_unlock(&xprt->bc_pa_lock);
354 	dprintk("RPC:       %s: using rqst %p\n", __func__, rqst);
355 
356 	/* Prepare rqst */
357 	rqst->rq_reply_bytes_recvd = 0;
358 	rqst->rq_bytes_sent = 0;
359 	rqst->rq_xid = headerp->rm_xid;
360 
361 	rqst->rq_private_buf.len = size;
362 	set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
363 
364 	buf = &rqst->rq_rcv_buf;
365 	memset(buf, 0, sizeof(*buf));
366 	buf->head[0].iov_base = p;
367 	buf->head[0].iov_len = size;
368 	buf->len = size;
369 
370 	/* The receive buffer has to be hooked to the rpcrdma_req
371 	 * so that it can be reposted after the server is done
372 	 * parsing it but just before sending the backward
373 	 * direction reply.
374 	 */
375 	req = rpcr_to_rdmar(rqst);
376 	dprintk("RPC:       %s: attaching rep %p to req %p\n",
377 		__func__, rep, req);
378 	req->rl_reply = rep;
379 
380 	/* Defeat the retransmit detection logic in send_request */
381 	req->rl_connect_cookie = 0;
382 
383 	/* Queue rqst for ULP's callback service */
384 	bc_serv = xprt->bc_serv;
385 	spin_lock(&bc_serv->sv_cb_lock);
386 	list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
387 	spin_unlock(&bc_serv->sv_cb_lock);
388 
389 	wake_up(&bc_serv->sv_cb_waitq);
390 
391 	r_xprt->rx_stats.bcall_count++;
392 	return;
393 
394 out_overflow:
395 	pr_warn("RPC/RDMA backchannel overflow\n");
396 	xprt_disconnect_done(xprt);
397 	/* This receive buffer gets reposted automatically
398 	 * when the connection is re-established.
399 	 */
400 	return;
401 
402 out_short:
403 	pr_warn("RPC/RDMA short backward direction call\n");
404 
405 	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
406 		xprt_disconnect_done(xprt);
407 	else
408 		pr_warn("RPC:       %s: reposting rep %p\n",
409 			__func__, rep);
410 }
411