xref: /openbmc/linux/net/sunrpc/backchannel_rqst.c (revision 2c684d89)
1 /******************************************************************************
2 
3 (c) 2007 Network Appliance, Inc.  All Rights Reserved.
4 (c) 2009 NetApp.  All Rights Reserved.
5 
6 NetApp provides this source code under the GPL v2 License.
7 The GPL v2 license is available at
8 http://opensource.org/licenses/gpl-license.php.
9 
10 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 
22 ******************************************************************************/
23 
24 #include <linux/tcp.h>
25 #include <linux/slab.h>
26 #include <linux/sunrpc/xprt.h>
27 #include <linux/export.h>
28 #include <linux/sunrpc/bc_xprt.h>
29 
30 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
31 #define RPCDBG_FACILITY	RPCDBG_TRANS
32 #endif
33 
34 /*
35  * Helper routines that track the number of preallocation elements
36  * on the transport.
37  */
38 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39 {
40 	return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
41 }
42 
43 static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44 {
45 	atomic_add(n, &xprt->bc_free_slots);
46 	xprt->bc_alloc_count += n;
47 }
48 
49 static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50 {
51 	atomic_sub(n, &xprt->bc_free_slots);
52 	return xprt->bc_alloc_count -= n;
53 }
54 
55 /*
56  * Free the preallocated rpc_rqst structure and the memory
57  * buffers hanging off of it.
58  */
59 static void xprt_free_allocation(struct rpc_rqst *req)
60 {
61 	struct xdr_buf *xbufp;
62 
63 	dprintk("RPC:        free allocations for req= %p\n", req);
64 	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
65 	xbufp = &req->rq_rcv_buf;
66 	free_page((unsigned long)xbufp->head[0].iov_base);
67 	xbufp = &req->rq_snd_buf;
68 	free_page((unsigned long)xbufp->head[0].iov_base);
69 	kfree(req);
70 }
71 
72 static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73 {
74 	struct page *page;
75 	/* Preallocate one XDR receive buffer */
76 	page = alloc_page(gfp_flags);
77 	if (page == NULL)
78 		return -ENOMEM;
79 	buf->head[0].iov_base = page_address(page);
80 	buf->head[0].iov_len = PAGE_SIZE;
81 	buf->tail[0].iov_base = NULL;
82 	buf->tail[0].iov_len = 0;
83 	buf->page_len = 0;
84 	buf->len = 0;
85 	buf->buflen = PAGE_SIZE;
86 	return 0;
87 }
88 
89 static
90 struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
91 {
92 	struct rpc_rqst *req;
93 
94 	/* Pre-allocate one backchannel rpc_rqst */
95 	req = kzalloc(sizeof(*req), gfp_flags);
96 	if (req == NULL)
97 		return NULL;
98 
99 	req->rq_xprt = xprt;
100 	INIT_LIST_HEAD(&req->rq_list);
101 	INIT_LIST_HEAD(&req->rq_bc_list);
102 
103 	/* Preallocate one XDR receive buffer */
104 	if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
105 		printk(KERN_ERR "Failed to create bc receive xbuf\n");
106 		goto out_free;
107 	}
108 	req->rq_rcv_buf.len = PAGE_SIZE;
109 
110 	/* Preallocate one XDR send buffer */
111 	if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
112 		printk(KERN_ERR "Failed to create bc snd xbuf\n");
113 		goto out_free;
114 	}
115 	return req;
116 out_free:
117 	xprt_free_allocation(req);
118 	return NULL;
119 }
120 
121 /*
122  * Preallocate up to min_reqs structures and related buffers for use
123  * by the backchannel.  This function can be called multiple times
124  * when creating new sessions that use the same rpc_xprt.  The
125  * preallocated buffers are added to the pool of resources used by
126  * the rpc_xprt.  Anyone of these resources may be used used by an
127  * incoming callback request.  It's up to the higher levels in the
128  * stack to enforce that the maximum number of session slots is not
129  * being exceeded.
130  *
131  * Some callback arguments can be large.  For example, a pNFS server
132  * using multiple deviceids.  The list can be unbound, but the client
133  * has the ability to tell the server the maximum size of the callback
134  * requests.  Each deviceID is 16 bytes, so allocate one page
135  * for the arguments to have enough room to receive a number of these
136  * deviceIDs.  The NFS client indicates to the pNFS server that its
137  * callback requests can be up to 4096 bytes in size.
138  */
139 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
140 {
141 	if (!xprt->ops->bc_setup)
142 		return 0;
143 	return xprt->ops->bc_setup(xprt, min_reqs);
144 }
145 EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
146 
147 int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
148 {
149 	struct rpc_rqst *req;
150 	struct list_head tmp_list;
151 	int i;
152 
153 	dprintk("RPC:       setup backchannel transport\n");
154 
155 	/*
156 	 * We use a temporary list to keep track of the preallocated
157 	 * buffers.  Once we're done building the list we splice it
158 	 * into the backchannel preallocation list off of the rpc_xprt
159 	 * struct.  This helps minimize the amount of time the list
160 	 * lock is held on the rpc_xprt struct.  It also makes cleanup
161 	 * easier in case of memory allocation errors.
162 	 */
163 	INIT_LIST_HEAD(&tmp_list);
164 	for (i = 0; i < min_reqs; i++) {
165 		/* Pre-allocate one backchannel rpc_rqst */
166 		req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
167 		if (req == NULL) {
168 			printk(KERN_ERR "Failed to create bc rpc_rqst\n");
169 			goto out_free;
170 		}
171 
172 		/* Add the allocated buffer to the tmp list */
173 		dprintk("RPC:       adding req= %p\n", req);
174 		list_add(&req->rq_bc_pa_list, &tmp_list);
175 	}
176 
177 	/*
178 	 * Add the temporary list to the backchannel preallocation list
179 	 */
180 	spin_lock_bh(&xprt->bc_pa_lock);
181 	list_splice(&tmp_list, &xprt->bc_pa_list);
182 	xprt_inc_alloc_count(xprt, min_reqs);
183 	spin_unlock_bh(&xprt->bc_pa_lock);
184 
185 	dprintk("RPC:       setup backchannel transport done\n");
186 	return 0;
187 
188 out_free:
189 	/*
190 	 * Memory allocation failed, free the temporary list
191 	 */
192 	while (!list_empty(&tmp_list)) {
193 		req = list_first_entry(&tmp_list,
194 				struct rpc_rqst,
195 				rq_bc_pa_list);
196 		list_del(&req->rq_bc_pa_list);
197 		xprt_free_allocation(req);
198 	}
199 
200 	dprintk("RPC:       setup backchannel transport failed\n");
201 	return -ENOMEM;
202 }
203 
204 /**
205  * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
206  * @xprt:	the transport holding the preallocated strucures
207  * @max_reqs	the maximum number of preallocated structures to destroy
208  *
209  * Since these structures may have been allocated by multiple calls
210  * to xprt_setup_backchannel, we only destroy up to the maximum number
211  * of reqs specified by the caller.
212  */
213 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
214 {
215 	if (xprt->ops->bc_destroy)
216 		xprt->ops->bc_destroy(xprt, max_reqs);
217 }
218 EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
219 
220 void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
221 {
222 	struct rpc_rqst *req = NULL, *tmp = NULL;
223 
224 	dprintk("RPC:        destroy backchannel transport\n");
225 
226 	if (max_reqs == 0)
227 		goto out;
228 
229 	spin_lock_bh(&xprt->bc_pa_lock);
230 	xprt_dec_alloc_count(xprt, max_reqs);
231 	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
232 		dprintk("RPC:        req=%p\n", req);
233 		list_del(&req->rq_bc_pa_list);
234 		xprt_free_allocation(req);
235 		if (--max_reqs == 0)
236 			break;
237 	}
238 	spin_unlock_bh(&xprt->bc_pa_lock);
239 
240 out:
241 	dprintk("RPC:        backchannel list empty= %s\n",
242 		list_empty(&xprt->bc_pa_list) ? "true" : "false");
243 }
244 
245 static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
246 {
247 	struct rpc_rqst *req = NULL;
248 
249 	dprintk("RPC:       allocate a backchannel request\n");
250 	if (atomic_read(&xprt->bc_free_slots) <= 0)
251 		goto not_found;
252 	if (list_empty(&xprt->bc_pa_list)) {
253 		req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
254 		if (!req)
255 			goto not_found;
256 		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
257 		xprt->bc_alloc_count++;
258 	}
259 	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
260 				rq_bc_pa_list);
261 	req->rq_reply_bytes_recvd = 0;
262 	req->rq_bytes_sent = 0;
263 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
264 			sizeof(req->rq_private_buf));
265 	req->rq_xid = xid;
266 	req->rq_connect_cookie = xprt->connect_cookie;
267 not_found:
268 	dprintk("RPC:       backchannel req=%p\n", req);
269 	return req;
270 }
271 
272 /*
273  * Return the preallocated rpc_rqst structure and XDR buffers
274  * associated with this rpc_task.
275  */
276 void xprt_free_bc_request(struct rpc_rqst *req)
277 {
278 	struct rpc_xprt *xprt = req->rq_xprt;
279 
280 	xprt->ops->bc_free_rqst(req);
281 }
282 
283 void xprt_free_bc_rqst(struct rpc_rqst *req)
284 {
285 	struct rpc_xprt *xprt = req->rq_xprt;
286 
287 	dprintk("RPC:       free backchannel req=%p\n", req);
288 
289 	req->rq_connect_cookie = xprt->connect_cookie - 1;
290 	smp_mb__before_atomic();
291 	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
292 	smp_mb__after_atomic();
293 
294 	/*
295 	 * Return it to the list of preallocations so that it
296 	 * may be reused by a new callback request.
297 	 */
298 	spin_lock_bh(&xprt->bc_pa_lock);
299 	if (xprt_need_to_requeue(xprt)) {
300 		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
301 		xprt->bc_alloc_count++;
302 		req = NULL;
303 	}
304 	spin_unlock_bh(&xprt->bc_pa_lock);
305 	if (req != NULL) {
306 		/*
307 		 * The last remaining session was destroyed while this
308 		 * entry was in use.  Free the entry and don't attempt
309 		 * to add back to the list because there is no need to
310 		 * have anymore preallocated entries.
311 		 */
312 		dprintk("RPC:       Last session removed req=%p\n", req);
313 		xprt_free_allocation(req);
314 		return;
315 	}
316 }
317 
318 /*
319  * One or more rpc_rqst structure have been preallocated during the
320  * backchannel setup.  Buffer space for the send and private XDR buffers
321  * has been preallocated as well.  Use xprt_alloc_bc_request to allocate
322  * to this request.  Use xprt_free_bc_request to return it.
323  *
324  * We know that we're called in soft interrupt context, grab the spin_lock
325  * since there is no need to grab the bottom half spin_lock.
326  *
327  * Return an available rpc_rqst, otherwise NULL if non are available.
328  */
329 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
330 {
331 	struct rpc_rqst *req;
332 
333 	spin_lock(&xprt->bc_pa_lock);
334 	list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
335 		if (req->rq_connect_cookie != xprt->connect_cookie)
336 			continue;
337 		if (req->rq_xid == xid)
338 			goto found;
339 	}
340 	req = xprt_alloc_bc_request(xprt, xid);
341 found:
342 	spin_unlock(&xprt->bc_pa_lock);
343 	return req;
344 }
345 
346 /*
347  * Add callback request to callback list.  The callback
348  * service sleeps on the sv_cb_waitq waiting for new
349  * requests.  Wake it up after adding enqueing the
350  * request.
351  */
352 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
353 {
354 	struct rpc_xprt *xprt = req->rq_xprt;
355 	struct svc_serv *bc_serv = xprt->bc_serv;
356 
357 	spin_lock(&xprt->bc_pa_lock);
358 	list_del(&req->rq_bc_pa_list);
359 	xprt_dec_alloc_count(xprt, 1);
360 	spin_unlock(&xprt->bc_pa_lock);
361 
362 	req->rq_private_buf.len = copied;
363 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
364 
365 	dprintk("RPC:       add callback request to list\n");
366 	spin_lock(&bc_serv->sv_cb_lock);
367 	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
368 	wake_up(&bc_serv->sv_cb_waitq);
369 	spin_unlock(&bc_serv->sv_cb_lock);
370 }
371 
372