xref: /openbmc/linux/net/sunrpc/backchannel_rqst.c (revision 2eb5f31b)
1 /******************************************************************************
2 
3 (c) 2007 Network Appliance, Inc.  All Rights Reserved.
4 (c) 2009 NetApp.  All Rights Reserved.
5 
6 NetApp provides this source code under the GPL v2 License.
7 The GPL v2 license is available at
8 http://opensource.org/licenses/gpl-license.php.
9 
10 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 
22 ******************************************************************************/
23 
24 #include <linux/tcp.h>
25 #include <linux/slab.h>
26 #include <linux/sunrpc/xprt.h>
27 #include <linux/export.h>
28 #include <linux/sunrpc/bc_xprt.h>
29 
30 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
31 #define RPCDBG_FACILITY	RPCDBG_TRANS
32 #endif
33 
34 /*
35  * Helper routines that track the number of preallocation elements
36  * on the transport.
37  */
38 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39 {
40 	return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
41 }
42 
43 static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44 {
45 	atomic_add(n, &xprt->bc_free_slots);
46 	xprt->bc_alloc_count += n;
47 }
48 
49 static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50 {
51 	atomic_sub(n, &xprt->bc_free_slots);
52 	return xprt->bc_alloc_count -= n;
53 }
54 
55 /*
56  * Free the preallocated rpc_rqst structure and the memory
57  * buffers hanging off of it.
58  */
59 static void xprt_free_allocation(struct rpc_rqst *req)
60 {
61 	struct xdr_buf *xbufp;
62 
63 	dprintk("RPC:        free allocations for req= %p\n", req);
64 	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
65 	xbufp = &req->rq_rcv_buf;
66 	free_page((unsigned long)xbufp->head[0].iov_base);
67 	xbufp = &req->rq_snd_buf;
68 	free_page((unsigned long)xbufp->head[0].iov_base);
69 	kfree(req);
70 }
71 
72 static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73 {
74 	struct page *page;
75 	/* Preallocate one XDR receive buffer */
76 	page = alloc_page(gfp_flags);
77 	if (page == NULL)
78 		return -ENOMEM;
79 	buf->head[0].iov_base = page_address(page);
80 	buf->head[0].iov_len = PAGE_SIZE;
81 	buf->tail[0].iov_base = NULL;
82 	buf->tail[0].iov_len = 0;
83 	buf->page_len = 0;
84 	buf->len = 0;
85 	buf->buflen = PAGE_SIZE;
86 	return 0;
87 }
88 
89 static
90 struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
91 {
92 	struct rpc_rqst *req;
93 
94 	/* Pre-allocate one backchannel rpc_rqst */
95 	req = kzalloc(sizeof(*req), gfp_flags);
96 	if (req == NULL)
97 		return NULL;
98 
99 	req->rq_xprt = xprt;
100 	INIT_LIST_HEAD(&req->rq_list);
101 	INIT_LIST_HEAD(&req->rq_bc_list);
102 
103 	/* Preallocate one XDR receive buffer */
104 	if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
105 		printk(KERN_ERR "Failed to create bc receive xbuf\n");
106 		goto out_free;
107 	}
108 	req->rq_rcv_buf.len = PAGE_SIZE;
109 
110 	/* Preallocate one XDR send buffer */
111 	if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
112 		printk(KERN_ERR "Failed to create bc snd xbuf\n");
113 		goto out_free;
114 	}
115 	return req;
116 out_free:
117 	xprt_free_allocation(req);
118 	return NULL;
119 }
120 
121 /*
122  * Preallocate up to min_reqs structures and related buffers for use
123  * by the backchannel.  This function can be called multiple times
124  * when creating new sessions that use the same rpc_xprt.  The
125  * preallocated buffers are added to the pool of resources used by
126  * the rpc_xprt.  Anyone of these resources may be used used by an
127  * incoming callback request.  It's up to the higher levels in the
128  * stack to enforce that the maximum number of session slots is not
129  * being exceeded.
130  *
131  * Some callback arguments can be large.  For example, a pNFS server
132  * using multiple deviceids.  The list can be unbound, but the client
133  * has the ability to tell the server the maximum size of the callback
134  * requests.  Each deviceID is 16 bytes, so allocate one page
135  * for the arguments to have enough room to receive a number of these
136  * deviceIDs.  The NFS client indicates to the pNFS server that its
137  * callback requests can be up to 4096 bytes in size.
138  */
139 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
140 {
141 	struct rpc_rqst *req;
142 	struct list_head tmp_list;
143 	int i;
144 
145 	dprintk("RPC:       setup backchannel transport\n");
146 
147 	/*
148 	 * We use a temporary list to keep track of the preallocated
149 	 * buffers.  Once we're done building the list we splice it
150 	 * into the backchannel preallocation list off of the rpc_xprt
151 	 * struct.  This helps minimize the amount of time the list
152 	 * lock is held on the rpc_xprt struct.  It also makes cleanup
153 	 * easier in case of memory allocation errors.
154 	 */
155 	INIT_LIST_HEAD(&tmp_list);
156 	for (i = 0; i < min_reqs; i++) {
157 		/* Pre-allocate one backchannel rpc_rqst */
158 		req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
159 		if (req == NULL) {
160 			printk(KERN_ERR "Failed to create bc rpc_rqst\n");
161 			goto out_free;
162 		}
163 
164 		/* Add the allocated buffer to the tmp list */
165 		dprintk("RPC:       adding req= %p\n", req);
166 		list_add(&req->rq_bc_pa_list, &tmp_list);
167 	}
168 
169 	/*
170 	 * Add the temporary list to the backchannel preallocation list
171 	 */
172 	spin_lock_bh(&xprt->bc_pa_lock);
173 	list_splice(&tmp_list, &xprt->bc_pa_list);
174 	xprt_inc_alloc_count(xprt, min_reqs);
175 	spin_unlock_bh(&xprt->bc_pa_lock);
176 
177 	dprintk("RPC:       setup backchannel transport done\n");
178 	return 0;
179 
180 out_free:
181 	/*
182 	 * Memory allocation failed, free the temporary list
183 	 */
184 	while (!list_empty(&tmp_list)) {
185 		req = list_first_entry(&tmp_list,
186 				struct rpc_rqst,
187 				rq_bc_pa_list);
188 		list_del(&req->rq_bc_pa_list);
189 		xprt_free_allocation(req);
190 	}
191 
192 	dprintk("RPC:       setup backchannel transport failed\n");
193 	return -ENOMEM;
194 }
195 EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
196 
197 /**
198  * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
199  * @xprt:	the transport holding the preallocated strucures
200  * @max_reqs	the maximum number of preallocated structures to destroy
201  *
202  * Since these structures may have been allocated by multiple calls
203  * to xprt_setup_backchannel, we only destroy up to the maximum number
204  * of reqs specified by the caller.
205  */
206 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
207 {
208 	struct rpc_rqst *req = NULL, *tmp = NULL;
209 
210 	dprintk("RPC:        destroy backchannel transport\n");
211 
212 	if (max_reqs == 0)
213 		goto out;
214 
215 	spin_lock_bh(&xprt->bc_pa_lock);
216 	xprt_dec_alloc_count(xprt, max_reqs);
217 	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
218 		dprintk("RPC:        req=%p\n", req);
219 		list_del(&req->rq_bc_pa_list);
220 		xprt_free_allocation(req);
221 		if (--max_reqs == 0)
222 			break;
223 	}
224 	spin_unlock_bh(&xprt->bc_pa_lock);
225 
226 out:
227 	dprintk("RPC:        backchannel list empty= %s\n",
228 		list_empty(&xprt->bc_pa_list) ? "true" : "false");
229 }
230 EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
231 
232 static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
233 {
234 	struct rpc_rqst *req = NULL;
235 
236 	dprintk("RPC:       allocate a backchannel request\n");
237 	if (atomic_read(&xprt->bc_free_slots) <= 0)
238 		goto not_found;
239 	if (list_empty(&xprt->bc_pa_list)) {
240 		req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 		if (!req)
242 			goto not_found;
243 		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 		xprt->bc_alloc_count++;
245 	}
246 	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 				rq_bc_pa_list);
248 	req->rq_reply_bytes_recvd = 0;
249 	req->rq_bytes_sent = 0;
250 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
251 			sizeof(req->rq_private_buf));
252 	req->rq_xid = xid;
253 	req->rq_connect_cookie = xprt->connect_cookie;
254 not_found:
255 	dprintk("RPC:       backchannel req=%p\n", req);
256 	return req;
257 }
258 
259 /*
260  * Return the preallocated rpc_rqst structure and XDR buffers
261  * associated with this rpc_task.
262  */
263 void xprt_free_bc_request(struct rpc_rqst *req)
264 {
265 	struct rpc_xprt *xprt = req->rq_xprt;
266 
267 	dprintk("RPC:       free backchannel req=%p\n", req);
268 
269 	req->rq_connect_cookie = xprt->connect_cookie - 1;
270 	smp_mb__before_atomic();
271 	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
272 	smp_mb__after_atomic();
273 
274 	/*
275 	 * Return it to the list of preallocations so that it
276 	 * may be reused by a new callback request.
277 	 */
278 	spin_lock_bh(&xprt->bc_pa_lock);
279 	if (xprt_need_to_requeue(xprt)) {
280 		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
281 		xprt->bc_alloc_count++;
282 		req = NULL;
283 	}
284 	spin_unlock_bh(&xprt->bc_pa_lock);
285 	if (req != NULL) {
286 		/*
287 		 * The last remaining session was destroyed while this
288 		 * entry was in use.  Free the entry and don't attempt
289 		 * to add back to the list because there is no need to
290 		 * have anymore preallocated entries.
291 		 */
292 		dprintk("RPC:       Last session removed req=%p\n", req);
293 		xprt_free_allocation(req);
294 		return;
295 	}
296 }
297 
298 /*
299  * One or more rpc_rqst structure have been preallocated during the
300  * backchannel setup.  Buffer space for the send and private XDR buffers
301  * has been preallocated as well.  Use xprt_alloc_bc_request to allocate
302  * to this request.  Use xprt_free_bc_request to return it.
303  *
304  * We know that we're called in soft interrupt context, grab the spin_lock
305  * since there is no need to grab the bottom half spin_lock.
306  *
307  * Return an available rpc_rqst, otherwise NULL if non are available.
308  */
309 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
310 {
311 	struct rpc_rqst *req;
312 
313 	spin_lock(&xprt->bc_pa_lock);
314 	list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
315 		if (req->rq_connect_cookie != xprt->connect_cookie)
316 			continue;
317 		if (req->rq_xid == xid)
318 			goto found;
319 	}
320 	req = xprt_alloc_bc_request(xprt, xid);
321 found:
322 	spin_unlock(&xprt->bc_pa_lock);
323 	return req;
324 }
325 
326 /*
327  * Add callback request to callback list.  The callback
328  * service sleeps on the sv_cb_waitq waiting for new
329  * requests.  Wake it up after adding enqueing the
330  * request.
331  */
332 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
333 {
334 	struct rpc_xprt *xprt = req->rq_xprt;
335 	struct svc_serv *bc_serv = xprt->bc_serv;
336 
337 	spin_lock(&xprt->bc_pa_lock);
338 	list_del(&req->rq_bc_pa_list);
339 	xprt_dec_alloc_count(xprt, 1);
340 	spin_unlock(&xprt->bc_pa_lock);
341 
342 	req->rq_private_buf.len = copied;
343 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
344 
345 	dprintk("RPC:       add callback request to list\n");
346 	spin_lock(&bc_serv->sv_cb_lock);
347 	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
348 	wake_up(&bc_serv->sv_cb_waitq);
349 	spin_unlock(&bc_serv->sv_cb_lock);
350 }
351 
352