1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015 Oracle.  All rights reserved.
4  *
5  * Support for backward direction RPCs on RPC/RDMA.
6  */
7 
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
11 #include <linux/sunrpc/svc_rdma.h>
12 
13 #include "xprt_rdma.h"
14 #include <trace/events/rpcrdma.h>
15 
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY	RPCDBG_TRANS
18 #endif
19 
20 #undef RPCRDMA_BACKCHANNEL_DEBUG
21 
22 /**
23  * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24  * @xprt: transport associated with these backchannel resources
25  * @reqs: number of concurrent incoming requests to expect
26  *
27  * Returns 0 on success; otherwise a negative errno
28  */
29 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
30 {
31 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
32 
33 	r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
34 	trace_xprtrdma_cb_setup(r_xprt, reqs);
35 	return 0;
36 }
37 
38 /**
39  * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
40  * @xprt: transport
41  *
42  * Returns maximum size, in bytes, of a backchannel message
43  */
44 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
45 {
46 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
47 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
48 	size_t maxmsg;
49 
50 	maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
51 	maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
52 	return maxmsg - RPCRDMA_HDRLEN_MIN;
53 }
54 
55 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
56 {
57 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
58 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
59 	__be32 *p;
60 
61 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
62 	xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
63 			rdmab_data(req->rl_rdmabuf), rqst);
64 
65 	p = xdr_reserve_space(&req->rl_stream, 28);
66 	if (unlikely(!p))
67 		return -EIO;
68 	*p++ = rqst->rq_xid;
69 	*p++ = rpcrdma_version;
70 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
71 	*p++ = rdma_msg;
72 	*p++ = xdr_zero;
73 	*p++ = xdr_zero;
74 	*p = xdr_zero;
75 
76 	if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
77 				      &rqst->rq_snd_buf, rpcrdma_noch))
78 		return -EIO;
79 
80 	trace_xprtrdma_cb_reply(rqst);
81 	return 0;
82 }
83 
84 /**
85  * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
86  * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
87  *
88  * Caller holds the transport's write lock.
89  *
90  * Returns:
91  *	%0 if the RPC message has been sent
92  *	%-ENOTCONN if the caller should reconnect and call again
93  *	%-EIO if a permanent error occurred and the request was not
94  *		sent. Do not try to send this message again.
95  */
96 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
97 {
98 	struct rpc_xprt *xprt = rqst->rq_xprt;
99 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
100 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
101 	int rc;
102 
103 	if (!xprt_connected(xprt))
104 		return -ENOTCONN;
105 
106 	if (!xprt_request_get_cong(xprt, rqst))
107 		return -EBADSLT;
108 
109 	rc = rpcrdma_bc_marshal_reply(rqst);
110 	if (rc < 0)
111 		goto failed_marshal;
112 
113 	if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
114 		goto drop_connection;
115 	return 0;
116 
117 failed_marshal:
118 	if (rc != -ENOTCONN)
119 		return rc;
120 drop_connection:
121 	xprt_rdma_close(xprt);
122 	return -ENOTCONN;
123 }
124 
125 /**
126  * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
127  * @xprt: transport associated with these backchannel resources
128  * @reqs: number of incoming requests to destroy; ignored
129  */
130 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
131 {
132 	struct rpc_rqst *rqst, *tmp;
133 
134 	spin_lock(&xprt->bc_pa_lock);
135 	list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
136 		list_del(&rqst->rq_bc_pa_list);
137 		spin_unlock(&xprt->bc_pa_lock);
138 
139 		rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
140 
141 		spin_lock(&xprt->bc_pa_lock);
142 	}
143 	spin_unlock(&xprt->bc_pa_lock);
144 }
145 
146 /**
147  * xprt_rdma_bc_free_rqst - Release a backchannel rqst
148  * @rqst: request to release
149  */
150 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
151 {
152 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
153 	struct rpc_xprt *xprt = rqst->rq_xprt;
154 
155 	rpcrdma_recv_buffer_put(req->rl_reply);
156 	req->rl_reply = NULL;
157 
158 	spin_lock(&xprt->bc_pa_lock);
159 	list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
160 	spin_unlock(&xprt->bc_pa_lock);
161 }
162 
163 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
164 {
165 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
166 	struct rpcrdma_req *req;
167 	struct rpc_rqst *rqst;
168 	size_t size;
169 
170 	spin_lock(&xprt->bc_pa_lock);
171 	rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
172 					rq_bc_pa_list);
173 	if (!rqst)
174 		goto create_req;
175 	list_del(&rqst->rq_bc_pa_list);
176 	spin_unlock(&xprt->bc_pa_lock);
177 	return rqst;
178 
179 create_req:
180 	spin_unlock(&xprt->bc_pa_lock);
181 
182 	/* Set a limit to prevent a remote from overrunning our resources.
183 	 */
184 	if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
185 		return NULL;
186 
187 	size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
188 	req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
189 	if (!req)
190 		return NULL;
191 
192 	xprt->bc_alloc_count++;
193 	rqst = &req->rl_slot;
194 	rqst->rq_xprt = xprt;
195 	__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
196 	xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
197 	return rqst;
198 }
199 
200 /**
201  * rpcrdma_bc_receive_call - Handle a backward direction call
202  * @r_xprt: transport receiving the call
203  * @rep: receive buffer containing the call
204  *
205  * Operational assumptions:
206  *    o Backchannel credits are ignored, just as the NFS server
207  *      forechannel currently does
208  *    o The ULP manages a replay cache (eg, NFSv4.1 sessions).
209  *      No replay detection is done at the transport level
210  */
211 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
212 			     struct rpcrdma_rep *rep)
213 {
214 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
215 	struct svc_serv *bc_serv;
216 	struct rpcrdma_req *req;
217 	struct rpc_rqst *rqst;
218 	struct xdr_buf *buf;
219 	size_t size;
220 	__be32 *p;
221 
222 	p = xdr_inline_decode(&rep->rr_stream, 0);
223 	size = xdr_stream_remaining(&rep->rr_stream);
224 
225 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
226 	pr_info("RPC:       %s: callback XID %08x, length=%u\n",
227 		__func__, be32_to_cpup(p), size);
228 	pr_info("RPC:       %s: %*ph\n", __func__, size, p);
229 #endif
230 
231 	rqst = rpcrdma_bc_rqst_get(r_xprt);
232 	if (!rqst)
233 		goto out_overflow;
234 
235 	rqst->rq_reply_bytes_recvd = 0;
236 	rqst->rq_xid = *p;
237 
238 	rqst->rq_private_buf.len = size;
239 
240 	buf = &rqst->rq_rcv_buf;
241 	memset(buf, 0, sizeof(*buf));
242 	buf->head[0].iov_base = p;
243 	buf->head[0].iov_len = size;
244 	buf->len = size;
245 
246 	/* The receive buffer has to be hooked to the rpcrdma_req
247 	 * so that it is not released while the req is pointing
248 	 * to its buffer, and so that it can be reposted after
249 	 * the Upper Layer is done decoding it.
250 	 */
251 	req = rpcr_to_rdmar(rqst);
252 	req->rl_reply = rep;
253 	trace_xprtrdma_cb_call(rqst);
254 
255 	/* Queue rqst for ULP's callback service */
256 	bc_serv = xprt->bc_serv;
257 	spin_lock(&bc_serv->sv_cb_lock);
258 	list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
259 	spin_unlock(&bc_serv->sv_cb_lock);
260 
261 	wake_up(&bc_serv->sv_cb_waitq);
262 
263 	r_xprt->rx_stats.bcall_count++;
264 	return;
265 
266 out_overflow:
267 	pr_warn("RPC/RDMA backchannel overflow\n");
268 	xprt_force_disconnect(xprt);
269 	/* This receive buffer gets reposted automatically
270 	 * when the connection is re-established.
271 	 */
272 	return;
273 }
274