xref: /openbmc/linux/net/sunrpc/xprt.c (revision 22fd411a)
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -	When a process places a call, it allocates a request slot if
10  *	one is available. Otherwise, it sleeps on the backlog queue
11  *	(xprt_reserve).
12  *  -	Next, the caller puts together the RPC message, stuffs it into
13  *	the request struct, and calls xprt_transmit().
14  *  -	xprt_transmit sends the message and installs the caller on the
15  *	transport's wait list. At the same time, if a reply is expected,
16  *	it installs a timer that is run after the packet's timeout has
17  *	expired.
18  *  -	When a packet arrives, the data_ready handler walks the list of
19  *	pending requests for that transport. If a matching XID is found, the
20  *	caller is woken up, and the timer removed.
21  *  -	When no reply arrives within the timeout interval, the timer is
22  *	fired by the kernel and runs xprt_timer(). It either adjusts the
23  *	timeout values (minor timeout) or wakes up the caller with a status
24  *	of -ETIMEDOUT.
25  *  -	When the caller receives a notification from RPC that a reply arrived,
26  *	it should release the RPC slot, and process the reply.
27  *	If the call timed out, it may choose to retry the operation by
28  *	adjusting the initial timeout value, and simply calling rpc_call
29  *	again.
30  *
31  *  Support for async RPC is done through a set of RPC-specific scheduling
32  *  primitives that `transparently' work for processes as well as async
33  *  tasks that rely on callbacks.
34  *
35  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36  *
37  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38  */
39 
40 #include <linux/module.h>
41 
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47 
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 
52 #include "sunrpc.h"
53 
54 /*
55  * Local variables
56  */
57 
58 #ifdef RPC_DEBUG
59 # define RPCDBG_FACILITY	RPCDBG_XPRT
60 #endif
61 
62 /*
63  * Local functions
64  */
65 static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
66 static void	xprt_connect_status(struct rpc_task *task);
67 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
68 
69 static DEFINE_SPINLOCK(xprt_list_lock);
70 static LIST_HEAD(xprt_list);
71 
72 /*
73  * The transport code maintains an estimate on the maximum number of out-
74  * standing RPC requests, using a smoothed version of the congestion
75  * avoidance implemented in 44BSD. This is basically the Van Jacobson
76  * congestion algorithm: If a retransmit occurs, the congestion window is
77  * halved; otherwise, it is incremented by 1/cwnd when
78  *
79  *	-	a reply is received and
80  *	-	a full number of requests are outstanding and
81  *	-	the congestion window hasn't been updated recently.
82  */
83 #define RPC_CWNDSHIFT		(8U)
84 #define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
85 #define RPC_INITCWND		RPC_CWNDSCALE
86 #define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
87 
88 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
89 
90 /**
91  * xprt_register_transport - register a transport implementation
92  * @transport: transport to register
93  *
94  * If a transport implementation is loaded as a kernel module, it can
95  * call this interface to make itself known to the RPC client.
96  *
97  * Returns:
98  * 0:		transport successfully registered
99  * -EEXIST:	transport already registered
100  * -EINVAL:	transport module being unloaded
101  */
102 int xprt_register_transport(struct xprt_class *transport)
103 {
104 	struct xprt_class *t;
105 	int result;
106 
107 	result = -EEXIST;
108 	spin_lock(&xprt_list_lock);
109 	list_for_each_entry(t, &xprt_list, list) {
110 		/* don't register the same transport class twice */
111 		if (t->ident == transport->ident)
112 			goto out;
113 	}
114 
115 	list_add_tail(&transport->list, &xprt_list);
116 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
117 	       transport->name);
118 	result = 0;
119 
120 out:
121 	spin_unlock(&xprt_list_lock);
122 	return result;
123 }
124 EXPORT_SYMBOL_GPL(xprt_register_transport);
125 
126 /**
127  * xprt_unregister_transport - unregister a transport implementation
128  * @transport: transport to unregister
129  *
130  * Returns:
131  * 0:		transport successfully unregistered
132  * -ENOENT:	transport never registered
133  */
134 int xprt_unregister_transport(struct xprt_class *transport)
135 {
136 	struct xprt_class *t;
137 	int result;
138 
139 	result = 0;
140 	spin_lock(&xprt_list_lock);
141 	list_for_each_entry(t, &xprt_list, list) {
142 		if (t == transport) {
143 			printk(KERN_INFO
144 				"RPC: Unregistered %s transport module.\n",
145 				transport->name);
146 			list_del_init(&transport->list);
147 			goto out;
148 		}
149 	}
150 	result = -ENOENT;
151 
152 out:
153 	spin_unlock(&xprt_list_lock);
154 	return result;
155 }
156 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157 
158 /**
159  * xprt_load_transport - load a transport implementation
160  * @transport_name: transport to load
161  *
162  * Returns:
163  * 0:		transport successfully loaded
164  * -ENOENT:	transport module not available
165  */
166 int xprt_load_transport(const char *transport_name)
167 {
168 	struct xprt_class *t;
169 	int result;
170 
171 	result = 0;
172 	spin_lock(&xprt_list_lock);
173 	list_for_each_entry(t, &xprt_list, list) {
174 		if (strcmp(t->name, transport_name) == 0) {
175 			spin_unlock(&xprt_list_lock);
176 			goto out;
177 		}
178 	}
179 	spin_unlock(&xprt_list_lock);
180 	result = request_module("xprt%s", transport_name);
181 out:
182 	return result;
183 }
184 EXPORT_SYMBOL_GPL(xprt_load_transport);
185 
186 /**
187  * xprt_reserve_xprt - serialize write access to transports
188  * @task: task that is requesting access to the transport
189  *
190  * This prevents mixing the payload of separate requests, and prevents
191  * transport connects from colliding with writes.  No congestion control
192  * is provided.
193  */
194 int xprt_reserve_xprt(struct rpc_task *task)
195 {
196 	struct rpc_rqst *req = task->tk_rqstp;
197 	struct rpc_xprt	*xprt = req->rq_xprt;
198 
199 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
200 		if (task == xprt->snd_task)
201 			return 1;
202 		goto out_sleep;
203 	}
204 	xprt->snd_task = task;
205 	if (req) {
206 		req->rq_bytes_sent = 0;
207 		req->rq_ntrans++;
208 	}
209 	return 1;
210 
211 out_sleep:
212 	dprintk("RPC: %5u failed to lock transport %p\n",
213 			task->tk_pid, xprt);
214 	task->tk_timeout = 0;
215 	task->tk_status = -EAGAIN;
216 	if (req && req->rq_ntrans)
217 		rpc_sleep_on(&xprt->resend, task, NULL);
218 	else
219 		rpc_sleep_on(&xprt->sending, task, NULL);
220 	return 0;
221 }
222 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
223 
224 static void xprt_clear_locked(struct rpc_xprt *xprt)
225 {
226 	xprt->snd_task = NULL;
227 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
228 		smp_mb__before_clear_bit();
229 		clear_bit(XPRT_LOCKED, &xprt->state);
230 		smp_mb__after_clear_bit();
231 	} else
232 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
233 }
234 
235 /*
236  * xprt_reserve_xprt_cong - serialize write access to transports
237  * @task: task that is requesting access to the transport
238  *
239  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
240  * integrated into the decision of whether a request is allowed to be
241  * woken up and given access to the transport.
242  */
243 int xprt_reserve_xprt_cong(struct rpc_task *task)
244 {
245 	struct rpc_xprt	*xprt = task->tk_xprt;
246 	struct rpc_rqst *req = task->tk_rqstp;
247 
248 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
249 		if (task == xprt->snd_task)
250 			return 1;
251 		goto out_sleep;
252 	}
253 	if (__xprt_get_cong(xprt, task)) {
254 		xprt->snd_task = task;
255 		if (req) {
256 			req->rq_bytes_sent = 0;
257 			req->rq_ntrans++;
258 		}
259 		return 1;
260 	}
261 	xprt_clear_locked(xprt);
262 out_sleep:
263 	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
264 	task->tk_timeout = 0;
265 	task->tk_status = -EAGAIN;
266 	if (req && req->rq_ntrans)
267 		rpc_sleep_on(&xprt->resend, task, NULL);
268 	else
269 		rpc_sleep_on(&xprt->sending, task, NULL);
270 	return 0;
271 }
272 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
273 
274 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
275 {
276 	int retval;
277 
278 	spin_lock_bh(&xprt->transport_lock);
279 	retval = xprt->ops->reserve_xprt(task);
280 	spin_unlock_bh(&xprt->transport_lock);
281 	return retval;
282 }
283 
284 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
285 {
286 	struct rpc_task *task;
287 	struct rpc_rqst *req;
288 
289 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
290 		return;
291 
292 	task = rpc_wake_up_next(&xprt->resend);
293 	if (!task) {
294 		task = rpc_wake_up_next(&xprt->sending);
295 		if (!task)
296 			goto out_unlock;
297 	}
298 
299 	req = task->tk_rqstp;
300 	xprt->snd_task = task;
301 	if (req) {
302 		req->rq_bytes_sent = 0;
303 		req->rq_ntrans++;
304 	}
305 	return;
306 
307 out_unlock:
308 	xprt_clear_locked(xprt);
309 }
310 
311 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
312 {
313 	struct rpc_task *task;
314 
315 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
316 		return;
317 	if (RPCXPRT_CONGESTED(xprt))
318 		goto out_unlock;
319 	task = rpc_wake_up_next(&xprt->resend);
320 	if (!task) {
321 		task = rpc_wake_up_next(&xprt->sending);
322 		if (!task)
323 			goto out_unlock;
324 	}
325 	if (__xprt_get_cong(xprt, task)) {
326 		struct rpc_rqst *req = task->tk_rqstp;
327 		xprt->snd_task = task;
328 		if (req) {
329 			req->rq_bytes_sent = 0;
330 			req->rq_ntrans++;
331 		}
332 		return;
333 	}
334 out_unlock:
335 	xprt_clear_locked(xprt);
336 }
337 
338 /**
339  * xprt_release_xprt - allow other requests to use a transport
340  * @xprt: transport with other tasks potentially waiting
341  * @task: task that is releasing access to the transport
342  *
343  * Note that "task" can be NULL.  No congestion control is provided.
344  */
345 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
346 {
347 	if (xprt->snd_task == task) {
348 		xprt_clear_locked(xprt);
349 		__xprt_lock_write_next(xprt);
350 	}
351 }
352 EXPORT_SYMBOL_GPL(xprt_release_xprt);
353 
354 /**
355  * xprt_release_xprt_cong - allow other requests to use a transport
356  * @xprt: transport with other tasks potentially waiting
357  * @task: task that is releasing access to the transport
358  *
359  * Note that "task" can be NULL.  Another task is awoken to use the
360  * transport if the transport's congestion window allows it.
361  */
362 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
363 {
364 	if (xprt->snd_task == task) {
365 		xprt_clear_locked(xprt);
366 		__xprt_lock_write_next_cong(xprt);
367 	}
368 }
369 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
370 
371 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
372 {
373 	spin_lock_bh(&xprt->transport_lock);
374 	xprt->ops->release_xprt(xprt, task);
375 	spin_unlock_bh(&xprt->transport_lock);
376 }
377 
378 /*
379  * Van Jacobson congestion avoidance. Check if the congestion window
380  * overflowed. Put the task to sleep if this is the case.
381  */
382 static int
383 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
384 {
385 	struct rpc_rqst *req = task->tk_rqstp;
386 
387 	if (req->rq_cong)
388 		return 1;
389 	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
390 			task->tk_pid, xprt->cong, xprt->cwnd);
391 	if (RPCXPRT_CONGESTED(xprt))
392 		return 0;
393 	req->rq_cong = 1;
394 	xprt->cong += RPC_CWNDSCALE;
395 	return 1;
396 }
397 
398 /*
399  * Adjust the congestion window, and wake up the next task
400  * that has been sleeping due to congestion
401  */
402 static void
403 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
404 {
405 	if (!req->rq_cong)
406 		return;
407 	req->rq_cong = 0;
408 	xprt->cong -= RPC_CWNDSCALE;
409 	__xprt_lock_write_next_cong(xprt);
410 }
411 
412 /**
413  * xprt_release_rqst_cong - housekeeping when request is complete
414  * @task: RPC request that recently completed
415  *
416  * Useful for transports that require congestion control.
417  */
418 void xprt_release_rqst_cong(struct rpc_task *task)
419 {
420 	__xprt_put_cong(task->tk_xprt, task->tk_rqstp);
421 }
422 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
423 
424 /**
425  * xprt_adjust_cwnd - adjust transport congestion window
426  * @task: recently completed RPC request used to adjust window
427  * @result: result code of completed RPC request
428  *
429  * We use a time-smoothed congestion estimator to avoid heavy oscillation.
430  */
431 void xprt_adjust_cwnd(struct rpc_task *task, int result)
432 {
433 	struct rpc_rqst *req = task->tk_rqstp;
434 	struct rpc_xprt *xprt = task->tk_xprt;
435 	unsigned long cwnd = xprt->cwnd;
436 
437 	if (result >= 0 && cwnd <= xprt->cong) {
438 		/* The (cwnd >> 1) term makes sure
439 		 * the result gets rounded properly. */
440 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
441 		if (cwnd > RPC_MAXCWND(xprt))
442 			cwnd = RPC_MAXCWND(xprt);
443 		__xprt_lock_write_next_cong(xprt);
444 	} else if (result == -ETIMEDOUT) {
445 		cwnd >>= 1;
446 		if (cwnd < RPC_CWNDSCALE)
447 			cwnd = RPC_CWNDSCALE;
448 	}
449 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
450 			xprt->cong, xprt->cwnd, cwnd);
451 	xprt->cwnd = cwnd;
452 	__xprt_put_cong(xprt, req);
453 }
454 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
455 
456 /**
457  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
458  * @xprt: transport with waiting tasks
459  * @status: result code to plant in each task before waking it
460  *
461  */
462 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
463 {
464 	if (status < 0)
465 		rpc_wake_up_status(&xprt->pending, status);
466 	else
467 		rpc_wake_up(&xprt->pending);
468 }
469 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
470 
471 /**
472  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
473  * @task: task to be put to sleep
474  * @action: function pointer to be executed after wait
475  */
476 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
477 {
478 	struct rpc_rqst *req = task->tk_rqstp;
479 	struct rpc_xprt *xprt = req->rq_xprt;
480 
481 	task->tk_timeout = req->rq_timeout;
482 	rpc_sleep_on(&xprt->pending, task, action);
483 }
484 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
485 
486 /**
487  * xprt_write_space - wake the task waiting for transport output buffer space
488  * @xprt: transport with waiting tasks
489  *
490  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
491  */
492 void xprt_write_space(struct rpc_xprt *xprt)
493 {
494 	if (unlikely(xprt->shutdown))
495 		return;
496 
497 	spin_lock_bh(&xprt->transport_lock);
498 	if (xprt->snd_task) {
499 		dprintk("RPC:       write space: waking waiting task on "
500 				"xprt %p\n", xprt);
501 		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
502 	}
503 	spin_unlock_bh(&xprt->transport_lock);
504 }
505 EXPORT_SYMBOL_GPL(xprt_write_space);
506 
507 /**
508  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
509  * @task: task whose timeout is to be set
510  *
511  * Set a request's retransmit timeout based on the transport's
512  * default timeout parameters.  Used by transports that don't adjust
513  * the retransmit timeout based on round-trip time estimation.
514  */
515 void xprt_set_retrans_timeout_def(struct rpc_task *task)
516 {
517 	task->tk_timeout = task->tk_rqstp->rq_timeout;
518 }
519 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
520 
521 /*
522  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
523  * @task: task whose timeout is to be set
524  *
525  * Set a request's retransmit timeout using the RTT estimator.
526  */
527 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
528 {
529 	int timer = task->tk_msg.rpc_proc->p_timer;
530 	struct rpc_clnt *clnt = task->tk_client;
531 	struct rpc_rtt *rtt = clnt->cl_rtt;
532 	struct rpc_rqst *req = task->tk_rqstp;
533 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
534 
535 	task->tk_timeout = rpc_calc_rto(rtt, timer);
536 	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
537 	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
538 		task->tk_timeout = max_timeout;
539 }
540 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
541 
542 static void xprt_reset_majortimeo(struct rpc_rqst *req)
543 {
544 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
545 
546 	req->rq_majortimeo = req->rq_timeout;
547 	if (to->to_exponential)
548 		req->rq_majortimeo <<= to->to_retries;
549 	else
550 		req->rq_majortimeo += to->to_increment * to->to_retries;
551 	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
552 		req->rq_majortimeo = to->to_maxval;
553 	req->rq_majortimeo += jiffies;
554 }
555 
556 /**
557  * xprt_adjust_timeout - adjust timeout values for next retransmit
558  * @req: RPC request containing parameters to use for the adjustment
559  *
560  */
561 int xprt_adjust_timeout(struct rpc_rqst *req)
562 {
563 	struct rpc_xprt *xprt = req->rq_xprt;
564 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
565 	int status = 0;
566 
567 	if (time_before(jiffies, req->rq_majortimeo)) {
568 		if (to->to_exponential)
569 			req->rq_timeout <<= 1;
570 		else
571 			req->rq_timeout += to->to_increment;
572 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
573 			req->rq_timeout = to->to_maxval;
574 		req->rq_retries++;
575 	} else {
576 		req->rq_timeout = to->to_initval;
577 		req->rq_retries = 0;
578 		xprt_reset_majortimeo(req);
579 		/* Reset the RTT counters == "slow start" */
580 		spin_lock_bh(&xprt->transport_lock);
581 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
582 		spin_unlock_bh(&xprt->transport_lock);
583 		status = -ETIMEDOUT;
584 	}
585 
586 	if (req->rq_timeout == 0) {
587 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
588 		req->rq_timeout = 5 * HZ;
589 	}
590 	return status;
591 }
592 
593 static void xprt_autoclose(struct work_struct *work)
594 {
595 	struct rpc_xprt *xprt =
596 		container_of(work, struct rpc_xprt, task_cleanup);
597 
598 	xprt->ops->close(xprt);
599 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
600 	xprt_release_write(xprt, NULL);
601 }
602 
603 /**
604  * xprt_disconnect_done - mark a transport as disconnected
605  * @xprt: transport to flag for disconnect
606  *
607  */
608 void xprt_disconnect_done(struct rpc_xprt *xprt)
609 {
610 	dprintk("RPC:       disconnected transport %p\n", xprt);
611 	spin_lock_bh(&xprt->transport_lock);
612 	xprt_clear_connected(xprt);
613 	xprt_wake_pending_tasks(xprt, -EAGAIN);
614 	spin_unlock_bh(&xprt->transport_lock);
615 }
616 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
617 
618 /**
619  * xprt_force_disconnect - force a transport to disconnect
620  * @xprt: transport to disconnect
621  *
622  */
623 void xprt_force_disconnect(struct rpc_xprt *xprt)
624 {
625 	/* Don't race with the test_bit() in xprt_clear_locked() */
626 	spin_lock_bh(&xprt->transport_lock);
627 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
628 	/* Try to schedule an autoclose RPC call */
629 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
630 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
631 	xprt_wake_pending_tasks(xprt, -EAGAIN);
632 	spin_unlock_bh(&xprt->transport_lock);
633 }
634 
635 /**
636  * xprt_conditional_disconnect - force a transport to disconnect
637  * @xprt: transport to disconnect
638  * @cookie: 'connection cookie'
639  *
640  * This attempts to break the connection if and only if 'cookie' matches
641  * the current transport 'connection cookie'. It ensures that we don't
642  * try to break the connection more than once when we need to retransmit
643  * a batch of RPC requests.
644  *
645  */
646 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
647 {
648 	/* Don't race with the test_bit() in xprt_clear_locked() */
649 	spin_lock_bh(&xprt->transport_lock);
650 	if (cookie != xprt->connect_cookie)
651 		goto out;
652 	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
653 		goto out;
654 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
655 	/* Try to schedule an autoclose RPC call */
656 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
657 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
658 	xprt_wake_pending_tasks(xprt, -EAGAIN);
659 out:
660 	spin_unlock_bh(&xprt->transport_lock);
661 }
662 
663 static void
664 xprt_init_autodisconnect(unsigned long data)
665 {
666 	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
667 
668 	spin_lock(&xprt->transport_lock);
669 	if (!list_empty(&xprt->recv) || xprt->shutdown)
670 		goto out_abort;
671 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
672 		goto out_abort;
673 	spin_unlock(&xprt->transport_lock);
674 	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
675 	queue_work(rpciod_workqueue, &xprt->task_cleanup);
676 	return;
677 out_abort:
678 	spin_unlock(&xprt->transport_lock);
679 }
680 
681 /**
682  * xprt_connect - schedule a transport connect operation
683  * @task: RPC task that is requesting the connect
684  *
685  */
686 void xprt_connect(struct rpc_task *task)
687 {
688 	struct rpc_xprt	*xprt = task->tk_xprt;
689 
690 	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
691 			xprt, (xprt_connected(xprt) ? "is" : "is not"));
692 
693 	if (!xprt_bound(xprt)) {
694 		task->tk_status = -EAGAIN;
695 		return;
696 	}
697 	if (!xprt_lock_write(xprt, task))
698 		return;
699 
700 	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
701 		xprt->ops->close(xprt);
702 
703 	if (xprt_connected(xprt))
704 		xprt_release_write(xprt, task);
705 	else {
706 		if (task->tk_rqstp)
707 			task->tk_rqstp->rq_bytes_sent = 0;
708 
709 		task->tk_timeout = task->tk_rqstp->rq_timeout;
710 		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
711 
712 		if (test_bit(XPRT_CLOSING, &xprt->state))
713 			return;
714 		if (xprt_test_and_set_connecting(xprt))
715 			return;
716 		xprt->stat.connect_start = jiffies;
717 		xprt->ops->connect(task);
718 	}
719 }
720 
721 static void xprt_connect_status(struct rpc_task *task)
722 {
723 	struct rpc_xprt	*xprt = task->tk_xprt;
724 
725 	if (task->tk_status == 0) {
726 		xprt->stat.connect_count++;
727 		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
728 		dprintk("RPC: %5u xprt_connect_status: connection established\n",
729 				task->tk_pid);
730 		return;
731 	}
732 
733 	switch (task->tk_status) {
734 	case -EAGAIN:
735 		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
736 		break;
737 	case -ETIMEDOUT:
738 		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
739 				"out\n", task->tk_pid);
740 		break;
741 	default:
742 		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
743 				"server %s\n", task->tk_pid, -task->tk_status,
744 				task->tk_client->cl_server);
745 		xprt_release_write(xprt, task);
746 		task->tk_status = -EIO;
747 	}
748 }
749 
750 /**
751  * xprt_lookup_rqst - find an RPC request corresponding to an XID
752  * @xprt: transport on which the original request was transmitted
753  * @xid: RPC XID of incoming reply
754  *
755  */
756 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
757 {
758 	struct rpc_rqst *entry;
759 
760 	list_for_each_entry(entry, &xprt->recv, rq_list)
761 		if (entry->rq_xid == xid)
762 			return entry;
763 
764 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
765 			ntohl(xid));
766 	xprt->stat.bad_xids++;
767 	return NULL;
768 }
769 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
770 
771 static void xprt_update_rtt(struct rpc_task *task)
772 {
773 	struct rpc_rqst *req = task->tk_rqstp;
774 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
775 	unsigned timer = task->tk_msg.rpc_proc->p_timer;
776 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
777 
778 	if (timer) {
779 		if (req->rq_ntrans == 1)
780 			rpc_update_rtt(rtt, timer, m);
781 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
782 	}
783 }
784 
785 /**
786  * xprt_complete_rqst - called when reply processing is complete
787  * @task: RPC request that recently completed
788  * @copied: actual number of bytes received from the transport
789  *
790  * Caller holds transport lock.
791  */
792 void xprt_complete_rqst(struct rpc_task *task, int copied)
793 {
794 	struct rpc_rqst *req = task->tk_rqstp;
795 	struct rpc_xprt *xprt = req->rq_xprt;
796 
797 	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
798 			task->tk_pid, ntohl(req->rq_xid), copied);
799 
800 	xprt->stat.recvs++;
801 	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
802 	if (xprt->ops->timer != NULL)
803 		xprt_update_rtt(task);
804 
805 	list_del_init(&req->rq_list);
806 	req->rq_private_buf.len = copied;
807 	/* Ensure all writes are done before we update */
808 	/* req->rq_reply_bytes_recvd */
809 	smp_wmb();
810 	req->rq_reply_bytes_recvd = copied;
811 	rpc_wake_up_queued_task(&xprt->pending, task);
812 }
813 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
814 
815 static void xprt_timer(struct rpc_task *task)
816 {
817 	struct rpc_rqst *req = task->tk_rqstp;
818 	struct rpc_xprt *xprt = req->rq_xprt;
819 
820 	if (task->tk_status != -ETIMEDOUT)
821 		return;
822 	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
823 
824 	spin_lock_bh(&xprt->transport_lock);
825 	if (!req->rq_reply_bytes_recvd) {
826 		if (xprt->ops->timer)
827 			xprt->ops->timer(task);
828 	} else
829 		task->tk_status = 0;
830 	spin_unlock_bh(&xprt->transport_lock);
831 }
832 
833 static inline int xprt_has_timer(struct rpc_xprt *xprt)
834 {
835 	return xprt->idle_timeout != 0;
836 }
837 
838 /**
839  * xprt_prepare_transmit - reserve the transport before sending a request
840  * @task: RPC task about to send a request
841  *
842  */
843 int xprt_prepare_transmit(struct rpc_task *task)
844 {
845 	struct rpc_rqst	*req = task->tk_rqstp;
846 	struct rpc_xprt	*xprt = req->rq_xprt;
847 	int err = 0;
848 
849 	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
850 
851 	spin_lock_bh(&xprt->transport_lock);
852 	if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
853 		err = req->rq_reply_bytes_recvd;
854 		goto out_unlock;
855 	}
856 	if (!xprt->ops->reserve_xprt(task))
857 		err = -EAGAIN;
858 out_unlock:
859 	spin_unlock_bh(&xprt->transport_lock);
860 	return err;
861 }
862 
863 void xprt_end_transmit(struct rpc_task *task)
864 {
865 	xprt_release_write(task->tk_rqstp->rq_xprt, task);
866 }
867 
868 /**
869  * xprt_transmit - send an RPC request on a transport
870  * @task: controlling RPC task
871  *
872  * We have to copy the iovec because sendmsg fiddles with its contents.
873  */
874 void xprt_transmit(struct rpc_task *task)
875 {
876 	struct rpc_rqst	*req = task->tk_rqstp;
877 	struct rpc_xprt	*xprt = req->rq_xprt;
878 	int status;
879 
880 	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
881 
882 	if (!req->rq_reply_bytes_recvd) {
883 		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
884 			/*
885 			 * Add to the list only if we're expecting a reply
886 			 */
887 			spin_lock_bh(&xprt->transport_lock);
888 			/* Update the softirq receive buffer */
889 			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
890 					sizeof(req->rq_private_buf));
891 			/* Add request to the receive list */
892 			list_add_tail(&req->rq_list, &xprt->recv);
893 			spin_unlock_bh(&xprt->transport_lock);
894 			xprt_reset_majortimeo(req);
895 			/* Turn off autodisconnect */
896 			del_singleshot_timer_sync(&xprt->timer);
897 		}
898 	} else if (!req->rq_bytes_sent)
899 		return;
900 
901 	req->rq_connect_cookie = xprt->connect_cookie;
902 	req->rq_xtime = ktime_get();
903 	status = xprt->ops->send_request(task);
904 	if (status != 0) {
905 		task->tk_status = status;
906 		return;
907 	}
908 
909 	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
910 	spin_lock_bh(&xprt->transport_lock);
911 
912 	xprt->ops->set_retrans_timeout(task);
913 
914 	xprt->stat.sends++;
915 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
916 	xprt->stat.bklog_u += xprt->backlog.qlen;
917 
918 	/* Don't race with disconnect */
919 	if (!xprt_connected(xprt))
920 		task->tk_status = -ENOTCONN;
921 	else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
922 		/*
923 		 * Sleep on the pending queue since
924 		 * we're expecting a reply.
925 		 */
926 		rpc_sleep_on(&xprt->pending, task, xprt_timer);
927 	}
928 	spin_unlock_bh(&xprt->transport_lock);
929 }
930 
931 static void xprt_alloc_slot(struct rpc_task *task)
932 {
933 	struct rpc_xprt	*xprt = task->tk_xprt;
934 
935 	task->tk_status = 0;
936 	if (task->tk_rqstp)
937 		return;
938 	if (!list_empty(&xprt->free)) {
939 		struct rpc_rqst	*req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
940 		list_del_init(&req->rq_list);
941 		task->tk_rqstp = req;
942 		xprt_request_init(task, xprt);
943 		return;
944 	}
945 	dprintk("RPC:       waiting for request slot\n");
946 	task->tk_status = -EAGAIN;
947 	task->tk_timeout = 0;
948 	rpc_sleep_on(&xprt->backlog, task, NULL);
949 }
950 
951 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
952 {
953 	memset(req, 0, sizeof(*req));	/* mark unused */
954 
955 	spin_lock(&xprt->reserve_lock);
956 	list_add(&req->rq_list, &xprt->free);
957 	rpc_wake_up_next(&xprt->backlog);
958 	spin_unlock(&xprt->reserve_lock);
959 }
960 
961 struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
962 {
963 	struct rpc_xprt *xprt;
964 
965 	xprt = kzalloc(size, GFP_KERNEL);
966 	if (xprt == NULL)
967 		goto out;
968 	kref_init(&xprt->kref);
969 
970 	xprt->max_reqs = max_req;
971 	xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
972 	if (xprt->slot == NULL)
973 		goto out_free;
974 
975 	xprt->xprt_net = get_net(net);
976 	return xprt;
977 
978 out_free:
979 	kfree(xprt);
980 out:
981 	return NULL;
982 }
983 EXPORT_SYMBOL_GPL(xprt_alloc);
984 
985 void xprt_free(struct rpc_xprt *xprt)
986 {
987 	put_net(xprt->xprt_net);
988 	kfree(xprt->slot);
989 	kfree(xprt);
990 }
991 EXPORT_SYMBOL_GPL(xprt_free);
992 
993 /**
994  * xprt_reserve - allocate an RPC request slot
995  * @task: RPC task requesting a slot allocation
996  *
997  * If no more slots are available, place the task on the transport's
998  * backlog queue.
999  */
1000 void xprt_reserve(struct rpc_task *task)
1001 {
1002 	struct rpc_xprt	*xprt = task->tk_xprt;
1003 
1004 	task->tk_status = -EIO;
1005 	spin_lock(&xprt->reserve_lock);
1006 	xprt_alloc_slot(task);
1007 	spin_unlock(&xprt->reserve_lock);
1008 }
1009 
1010 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1011 {
1012 	return (__force __be32)xprt->xid++;
1013 }
1014 
1015 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1016 {
1017 	xprt->xid = net_random();
1018 }
1019 
1020 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1021 {
1022 	struct rpc_rqst	*req = task->tk_rqstp;
1023 
1024 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1025 	req->rq_task	= task;
1026 	req->rq_xprt    = xprt;
1027 	req->rq_buffer  = NULL;
1028 	req->rq_xid     = xprt_alloc_xid(xprt);
1029 	req->rq_release_snd_buf = NULL;
1030 	xprt_reset_majortimeo(req);
1031 	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1032 			req, ntohl(req->rq_xid));
1033 }
1034 
1035 /**
1036  * xprt_release - release an RPC request slot
1037  * @task: task which is finished with the slot
1038  *
1039  */
1040 void xprt_release(struct rpc_task *task)
1041 {
1042 	struct rpc_xprt	*xprt;
1043 	struct rpc_rqst	*req;
1044 
1045 	if (!(req = task->tk_rqstp))
1046 		return;
1047 
1048 	xprt = req->rq_xprt;
1049 	rpc_count_iostats(task);
1050 	spin_lock_bh(&xprt->transport_lock);
1051 	xprt->ops->release_xprt(xprt, task);
1052 	if (xprt->ops->release_request)
1053 		xprt->ops->release_request(task);
1054 	if (!list_empty(&req->rq_list))
1055 		list_del(&req->rq_list);
1056 	xprt->last_used = jiffies;
1057 	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1058 		mod_timer(&xprt->timer,
1059 				xprt->last_used + xprt->idle_timeout);
1060 	spin_unlock_bh(&xprt->transport_lock);
1061 	if (req->rq_buffer)
1062 		xprt->ops->buf_free(req->rq_buffer);
1063 	if (req->rq_cred != NULL)
1064 		put_rpccred(req->rq_cred);
1065 	task->tk_rqstp = NULL;
1066 	if (req->rq_release_snd_buf)
1067 		req->rq_release_snd_buf(req);
1068 
1069 	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1070 	if (likely(!bc_prealloc(req)))
1071 		xprt_free_slot(xprt, req);
1072 	else
1073 		xprt_free_bc_request(req);
1074 }
1075 
1076 /**
1077  * xprt_create_transport - create an RPC transport
1078  * @args: rpc transport creation arguments
1079  *
1080  */
1081 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1082 {
1083 	struct rpc_xprt	*xprt;
1084 	struct rpc_rqst	*req;
1085 	struct xprt_class *t;
1086 
1087 	spin_lock(&xprt_list_lock);
1088 	list_for_each_entry(t, &xprt_list, list) {
1089 		if (t->ident == args->ident) {
1090 			spin_unlock(&xprt_list_lock);
1091 			goto found;
1092 		}
1093 	}
1094 	spin_unlock(&xprt_list_lock);
1095 	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
1096 	return ERR_PTR(-EIO);
1097 
1098 found:
1099 	xprt = t->setup(args);
1100 	if (IS_ERR(xprt)) {
1101 		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1102 				-PTR_ERR(xprt));
1103 		return xprt;
1104 	}
1105 	if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
1106 		/* ->setup returned a pre-initialized xprt: */
1107 		return xprt;
1108 
1109 	spin_lock_init(&xprt->transport_lock);
1110 	spin_lock_init(&xprt->reserve_lock);
1111 
1112 	INIT_LIST_HEAD(&xprt->free);
1113 	INIT_LIST_HEAD(&xprt->recv);
1114 #if defined(CONFIG_NFS_V4_1)
1115 	spin_lock_init(&xprt->bc_pa_lock);
1116 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1117 #endif /* CONFIG_NFS_V4_1 */
1118 
1119 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1120 	if (xprt_has_timer(xprt))
1121 		setup_timer(&xprt->timer, xprt_init_autodisconnect,
1122 			    (unsigned long)xprt);
1123 	else
1124 		init_timer(&xprt->timer);
1125 	xprt->last_used = jiffies;
1126 	xprt->cwnd = RPC_INITCWND;
1127 	xprt->bind_index = 0;
1128 
1129 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1130 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1131 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1132 	rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1133 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1134 
1135 	/* initialize free list */
1136 	for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1137 		list_add(&req->rq_list, &xprt->free);
1138 
1139 	xprt_init_xid(xprt);
1140 
1141 	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1142 			xprt->max_reqs);
1143 	return xprt;
1144 }
1145 
1146 /**
1147  * xprt_destroy - destroy an RPC transport, killing off all requests.
1148  * @kref: kref for the transport to destroy
1149  *
1150  */
1151 static void xprt_destroy(struct kref *kref)
1152 {
1153 	struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
1154 
1155 	dprintk("RPC:       destroying transport %p\n", xprt);
1156 	xprt->shutdown = 1;
1157 	del_timer_sync(&xprt->timer);
1158 
1159 	rpc_destroy_wait_queue(&xprt->binding);
1160 	rpc_destroy_wait_queue(&xprt->pending);
1161 	rpc_destroy_wait_queue(&xprt->sending);
1162 	rpc_destroy_wait_queue(&xprt->resend);
1163 	rpc_destroy_wait_queue(&xprt->backlog);
1164 	cancel_work_sync(&xprt->task_cleanup);
1165 	/*
1166 	 * Tear down transport state and free the rpc_xprt
1167 	 */
1168 	xprt->ops->destroy(xprt);
1169 }
1170 
1171 /**
1172  * xprt_put - release a reference to an RPC transport.
1173  * @xprt: pointer to the transport
1174  *
1175  */
1176 void xprt_put(struct rpc_xprt *xprt)
1177 {
1178 	kref_put(&xprt->kref, xprt_destroy);
1179 }
1180 
1181 /**
1182  * xprt_get - return a reference to an RPC transport.
1183  * @xprt: pointer to the transport
1184  *
1185  */
1186 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1187 {
1188 	kref_get(&xprt->kref);
1189 	return xprt;
1190 }
1191