xref: /openbmc/linux/net/sunrpc/xprt.c (revision 21278aea)
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -	When a process places a call, it allocates a request slot if
10  *	one is available. Otherwise, it sleeps on the backlog queue
11  *	(xprt_reserve).
12  *  -	Next, the caller puts together the RPC message, stuffs it into
13  *	the request struct, and calls xprt_transmit().
14  *  -	xprt_transmit sends the message and installs the caller on the
15  *	transport's wait list. At the same time, if a reply is expected,
16  *	it installs a timer that is run after the packet's timeout has
17  *	expired.
18  *  -	When a packet arrives, the data_ready handler walks the list of
19  *	pending requests for that transport. If a matching XID is found, the
20  *	caller is woken up, and the timer removed.
21  *  -	When no reply arrives within the timeout interval, the timer is
22  *	fired by the kernel and runs xprt_timer(). It either adjusts the
23  *	timeout values (minor timeout) or wakes up the caller with a status
24  *	of -ETIMEDOUT.
25  *  -	When the caller receives a notification from RPC that a reply arrived,
26  *	it should release the RPC slot, and process the reply.
27  *	If the call timed out, it may choose to retry the operation by
28  *	adjusting the initial timeout value, and simply calling rpc_call
29  *	again.
30  *
31  *  Support for async RPC is done through a set of RPC-specific scheduling
32  *  primitives that `transparently' work for processes as well as async
33  *  tasks that rely on callbacks.
34  *
35  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36  *
37  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38  */
39 
40 #include <linux/module.h>
41 
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47 
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 
52 #include "sunrpc.h"
53 
54 /*
55  * Local variables
56  */
57 
58 #ifdef RPC_DEBUG
59 # define RPCDBG_FACILITY	RPCDBG_XPRT
60 #endif
61 
62 /*
63  * Local functions
64  */
65 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
66 static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
67 static void	xprt_connect_status(struct rpc_task *task);
68 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69 static void	 xprt_destroy(struct rpc_xprt *xprt);
70 
71 static DEFINE_SPINLOCK(xprt_list_lock);
72 static LIST_HEAD(xprt_list);
73 
74 /**
75  * xprt_register_transport - register a transport implementation
76  * @transport: transport to register
77  *
78  * If a transport implementation is loaded as a kernel module, it can
79  * call this interface to make itself known to the RPC client.
80  *
81  * Returns:
82  * 0:		transport successfully registered
83  * -EEXIST:	transport already registered
84  * -EINVAL:	transport module being unloaded
85  */
86 int xprt_register_transport(struct xprt_class *transport)
87 {
88 	struct xprt_class *t;
89 	int result;
90 
91 	result = -EEXIST;
92 	spin_lock(&xprt_list_lock);
93 	list_for_each_entry(t, &xprt_list, list) {
94 		/* don't register the same transport class twice */
95 		if (t->ident == transport->ident)
96 			goto out;
97 	}
98 
99 	list_add_tail(&transport->list, &xprt_list);
100 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
101 	       transport->name);
102 	result = 0;
103 
104 out:
105 	spin_unlock(&xprt_list_lock);
106 	return result;
107 }
108 EXPORT_SYMBOL_GPL(xprt_register_transport);
109 
110 /**
111  * xprt_unregister_transport - unregister a transport implementation
112  * @transport: transport to unregister
113  *
114  * Returns:
115  * 0:		transport successfully unregistered
116  * -ENOENT:	transport never registered
117  */
118 int xprt_unregister_transport(struct xprt_class *transport)
119 {
120 	struct xprt_class *t;
121 	int result;
122 
123 	result = 0;
124 	spin_lock(&xprt_list_lock);
125 	list_for_each_entry(t, &xprt_list, list) {
126 		if (t == transport) {
127 			printk(KERN_INFO
128 				"RPC: Unregistered %s transport module.\n",
129 				transport->name);
130 			list_del_init(&transport->list);
131 			goto out;
132 		}
133 	}
134 	result = -ENOENT;
135 
136 out:
137 	spin_unlock(&xprt_list_lock);
138 	return result;
139 }
140 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
141 
142 /**
143  * xprt_load_transport - load a transport implementation
144  * @transport_name: transport to load
145  *
146  * Returns:
147  * 0:		transport successfully loaded
148  * -ENOENT:	transport module not available
149  */
150 int xprt_load_transport(const char *transport_name)
151 {
152 	struct xprt_class *t;
153 	int result;
154 
155 	result = 0;
156 	spin_lock(&xprt_list_lock);
157 	list_for_each_entry(t, &xprt_list, list) {
158 		if (strcmp(t->name, transport_name) == 0) {
159 			spin_unlock(&xprt_list_lock);
160 			goto out;
161 		}
162 	}
163 	spin_unlock(&xprt_list_lock);
164 	result = request_module("xprt%s", transport_name);
165 out:
166 	return result;
167 }
168 EXPORT_SYMBOL_GPL(xprt_load_transport);
169 
170 /**
171  * xprt_reserve_xprt - serialize write access to transports
172  * @task: task that is requesting access to the transport
173  * @xprt: pointer to the target transport
174  *
175  * This prevents mixing the payload of separate requests, and prevents
176  * transport connects from colliding with writes.  No congestion control
177  * is provided.
178  */
179 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
180 {
181 	struct rpc_rqst *req = task->tk_rqstp;
182 	int priority;
183 
184 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
185 		if (task == xprt->snd_task)
186 			return 1;
187 		goto out_sleep;
188 	}
189 	xprt->snd_task = task;
190 	if (req != NULL)
191 		req->rq_ntrans++;
192 
193 	return 1;
194 
195 out_sleep:
196 	dprintk("RPC: %5u failed to lock transport %p\n",
197 			task->tk_pid, xprt);
198 	task->tk_timeout = 0;
199 	task->tk_status = -EAGAIN;
200 	if (req == NULL)
201 		priority = RPC_PRIORITY_LOW;
202 	else if (!req->rq_ntrans)
203 		priority = RPC_PRIORITY_NORMAL;
204 	else
205 		priority = RPC_PRIORITY_HIGH;
206 	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
207 	return 0;
208 }
209 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
210 
211 static void xprt_clear_locked(struct rpc_xprt *xprt)
212 {
213 	xprt->snd_task = NULL;
214 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
215 		smp_mb__before_atomic();
216 		clear_bit(XPRT_LOCKED, &xprt->state);
217 		smp_mb__after_atomic();
218 	} else
219 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
220 }
221 
222 /*
223  * xprt_reserve_xprt_cong - serialize write access to transports
224  * @task: task that is requesting access to the transport
225  *
226  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
227  * integrated into the decision of whether a request is allowed to be
228  * woken up and given access to the transport.
229  */
230 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
231 {
232 	struct rpc_rqst *req = task->tk_rqstp;
233 	int priority;
234 
235 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
236 		if (task == xprt->snd_task)
237 			return 1;
238 		goto out_sleep;
239 	}
240 	if (req == NULL) {
241 		xprt->snd_task = task;
242 		return 1;
243 	}
244 	if (__xprt_get_cong(xprt, task)) {
245 		xprt->snd_task = task;
246 		req->rq_ntrans++;
247 		return 1;
248 	}
249 	xprt_clear_locked(xprt);
250 out_sleep:
251 	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
252 	task->tk_timeout = 0;
253 	task->tk_status = -EAGAIN;
254 	if (req == NULL)
255 		priority = RPC_PRIORITY_LOW;
256 	else if (!req->rq_ntrans)
257 		priority = RPC_PRIORITY_NORMAL;
258 	else
259 		priority = RPC_PRIORITY_HIGH;
260 	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
261 	return 0;
262 }
263 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
264 
265 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
266 {
267 	int retval;
268 
269 	spin_lock_bh(&xprt->transport_lock);
270 	retval = xprt->ops->reserve_xprt(xprt, task);
271 	spin_unlock_bh(&xprt->transport_lock);
272 	return retval;
273 }
274 
275 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
276 {
277 	struct rpc_xprt *xprt = data;
278 	struct rpc_rqst *req;
279 
280 	req = task->tk_rqstp;
281 	xprt->snd_task = task;
282 	if (req)
283 		req->rq_ntrans++;
284 	return true;
285 }
286 
287 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
288 {
289 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
290 		return;
291 
292 	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
293 		return;
294 	xprt_clear_locked(xprt);
295 }
296 
297 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
298 {
299 	struct rpc_xprt *xprt = data;
300 	struct rpc_rqst *req;
301 
302 	req = task->tk_rqstp;
303 	if (req == NULL) {
304 		xprt->snd_task = task;
305 		return true;
306 	}
307 	if (__xprt_get_cong(xprt, task)) {
308 		xprt->snd_task = task;
309 		req->rq_ntrans++;
310 		return true;
311 	}
312 	return false;
313 }
314 
315 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
316 {
317 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
318 		return;
319 	if (RPCXPRT_CONGESTED(xprt))
320 		goto out_unlock;
321 	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
322 		return;
323 out_unlock:
324 	xprt_clear_locked(xprt);
325 }
326 
327 /**
328  * xprt_release_xprt - allow other requests to use a transport
329  * @xprt: transport with other tasks potentially waiting
330  * @task: task that is releasing access to the transport
331  *
332  * Note that "task" can be NULL.  No congestion control is provided.
333  */
334 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
335 {
336 	if (xprt->snd_task == task) {
337 		if (task != NULL) {
338 			struct rpc_rqst *req = task->tk_rqstp;
339 			if (req != NULL)
340 				req->rq_bytes_sent = 0;
341 		}
342 		xprt_clear_locked(xprt);
343 		__xprt_lock_write_next(xprt);
344 	}
345 }
346 EXPORT_SYMBOL_GPL(xprt_release_xprt);
347 
348 /**
349  * xprt_release_xprt_cong - allow other requests to use a transport
350  * @xprt: transport with other tasks potentially waiting
351  * @task: task that is releasing access to the transport
352  *
353  * Note that "task" can be NULL.  Another task is awoken to use the
354  * transport if the transport's congestion window allows it.
355  */
356 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
357 {
358 	if (xprt->snd_task == task) {
359 		if (task != NULL) {
360 			struct rpc_rqst *req = task->tk_rqstp;
361 			if (req != NULL)
362 				req->rq_bytes_sent = 0;
363 		}
364 		xprt_clear_locked(xprt);
365 		__xprt_lock_write_next_cong(xprt);
366 	}
367 }
368 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
369 
370 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
371 {
372 	spin_lock_bh(&xprt->transport_lock);
373 	xprt->ops->release_xprt(xprt, task);
374 	spin_unlock_bh(&xprt->transport_lock);
375 }
376 
377 /*
378  * Van Jacobson congestion avoidance. Check if the congestion window
379  * overflowed. Put the task to sleep if this is the case.
380  */
381 static int
382 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
383 {
384 	struct rpc_rqst *req = task->tk_rqstp;
385 
386 	if (req->rq_cong)
387 		return 1;
388 	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
389 			task->tk_pid, xprt->cong, xprt->cwnd);
390 	if (RPCXPRT_CONGESTED(xprt))
391 		return 0;
392 	req->rq_cong = 1;
393 	xprt->cong += RPC_CWNDSCALE;
394 	return 1;
395 }
396 
397 /*
398  * Adjust the congestion window, and wake up the next task
399  * that has been sleeping due to congestion
400  */
401 static void
402 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
403 {
404 	if (!req->rq_cong)
405 		return;
406 	req->rq_cong = 0;
407 	xprt->cong -= RPC_CWNDSCALE;
408 	__xprt_lock_write_next_cong(xprt);
409 }
410 
411 /**
412  * xprt_release_rqst_cong - housekeeping when request is complete
413  * @task: RPC request that recently completed
414  *
415  * Useful for transports that require congestion control.
416  */
417 void xprt_release_rqst_cong(struct rpc_task *task)
418 {
419 	struct rpc_rqst *req = task->tk_rqstp;
420 
421 	__xprt_put_cong(req->rq_xprt, req);
422 }
423 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
424 
425 /**
426  * xprt_adjust_cwnd - adjust transport congestion window
427  * @xprt: pointer to xprt
428  * @task: recently completed RPC request used to adjust window
429  * @result: result code of completed RPC request
430  *
431  * The transport code maintains an estimate on the maximum number of out-
432  * standing RPC requests, using a smoothed version of the congestion
433  * avoidance implemented in 44BSD. This is basically the Van Jacobson
434  * congestion algorithm: If a retransmit occurs, the congestion window is
435  * halved; otherwise, it is incremented by 1/cwnd when
436  *
437  *	-	a reply is received and
438  *	-	a full number of requests are outstanding and
439  *	-	the congestion window hasn't been updated recently.
440  */
441 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
442 {
443 	struct rpc_rqst *req = task->tk_rqstp;
444 	unsigned long cwnd = xprt->cwnd;
445 
446 	if (result >= 0 && cwnd <= xprt->cong) {
447 		/* The (cwnd >> 1) term makes sure
448 		 * the result gets rounded properly. */
449 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
450 		if (cwnd > RPC_MAXCWND(xprt))
451 			cwnd = RPC_MAXCWND(xprt);
452 		__xprt_lock_write_next_cong(xprt);
453 	} else if (result == -ETIMEDOUT) {
454 		cwnd >>= 1;
455 		if (cwnd < RPC_CWNDSCALE)
456 			cwnd = RPC_CWNDSCALE;
457 	}
458 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
459 			xprt->cong, xprt->cwnd, cwnd);
460 	xprt->cwnd = cwnd;
461 	__xprt_put_cong(xprt, req);
462 }
463 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
464 
465 /**
466  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
467  * @xprt: transport with waiting tasks
468  * @status: result code to plant in each task before waking it
469  *
470  */
471 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
472 {
473 	if (status < 0)
474 		rpc_wake_up_status(&xprt->pending, status);
475 	else
476 		rpc_wake_up(&xprt->pending);
477 }
478 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
479 
480 /**
481  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
482  * @task: task to be put to sleep
483  * @action: function pointer to be executed after wait
484  *
485  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
486  * we don't in general want to force a socket disconnection due to
487  * an incomplete RPC call transmission.
488  */
489 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
490 {
491 	struct rpc_rqst *req = task->tk_rqstp;
492 	struct rpc_xprt *xprt = req->rq_xprt;
493 
494 	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
495 	rpc_sleep_on(&xprt->pending, task, action);
496 }
497 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
498 
499 /**
500  * xprt_write_space - wake the task waiting for transport output buffer space
501  * @xprt: transport with waiting tasks
502  *
503  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
504  */
505 void xprt_write_space(struct rpc_xprt *xprt)
506 {
507 	spin_lock_bh(&xprt->transport_lock);
508 	if (xprt->snd_task) {
509 		dprintk("RPC:       write space: waking waiting task on "
510 				"xprt %p\n", xprt);
511 		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
512 	}
513 	spin_unlock_bh(&xprt->transport_lock);
514 }
515 EXPORT_SYMBOL_GPL(xprt_write_space);
516 
517 /**
518  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
519  * @task: task whose timeout is to be set
520  *
521  * Set a request's retransmit timeout based on the transport's
522  * default timeout parameters.  Used by transports that don't adjust
523  * the retransmit timeout based on round-trip time estimation.
524  */
525 void xprt_set_retrans_timeout_def(struct rpc_task *task)
526 {
527 	task->tk_timeout = task->tk_rqstp->rq_timeout;
528 }
529 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
530 
531 /**
532  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
533  * @task: task whose timeout is to be set
534  *
535  * Set a request's retransmit timeout using the RTT estimator.
536  */
537 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
538 {
539 	int timer = task->tk_msg.rpc_proc->p_timer;
540 	struct rpc_clnt *clnt = task->tk_client;
541 	struct rpc_rtt *rtt = clnt->cl_rtt;
542 	struct rpc_rqst *req = task->tk_rqstp;
543 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
544 
545 	task->tk_timeout = rpc_calc_rto(rtt, timer);
546 	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
547 	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
548 		task->tk_timeout = max_timeout;
549 }
550 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
551 
552 static void xprt_reset_majortimeo(struct rpc_rqst *req)
553 {
554 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
555 
556 	req->rq_majortimeo = req->rq_timeout;
557 	if (to->to_exponential)
558 		req->rq_majortimeo <<= to->to_retries;
559 	else
560 		req->rq_majortimeo += to->to_increment * to->to_retries;
561 	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
562 		req->rq_majortimeo = to->to_maxval;
563 	req->rq_majortimeo += jiffies;
564 }
565 
566 /**
567  * xprt_adjust_timeout - adjust timeout values for next retransmit
568  * @req: RPC request containing parameters to use for the adjustment
569  *
570  */
571 int xprt_adjust_timeout(struct rpc_rqst *req)
572 {
573 	struct rpc_xprt *xprt = req->rq_xprt;
574 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
575 	int status = 0;
576 
577 	if (time_before(jiffies, req->rq_majortimeo)) {
578 		if (to->to_exponential)
579 			req->rq_timeout <<= 1;
580 		else
581 			req->rq_timeout += to->to_increment;
582 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
583 			req->rq_timeout = to->to_maxval;
584 		req->rq_retries++;
585 	} else {
586 		req->rq_timeout = to->to_initval;
587 		req->rq_retries = 0;
588 		xprt_reset_majortimeo(req);
589 		/* Reset the RTT counters == "slow start" */
590 		spin_lock_bh(&xprt->transport_lock);
591 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
592 		spin_unlock_bh(&xprt->transport_lock);
593 		status = -ETIMEDOUT;
594 	}
595 
596 	if (req->rq_timeout == 0) {
597 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
598 		req->rq_timeout = 5 * HZ;
599 	}
600 	return status;
601 }
602 
603 static void xprt_autoclose(struct work_struct *work)
604 {
605 	struct rpc_xprt *xprt =
606 		container_of(work, struct rpc_xprt, task_cleanup);
607 
608 	xprt->ops->close(xprt);
609 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
610 	xprt_release_write(xprt, NULL);
611 }
612 
613 /**
614  * xprt_disconnect_done - mark a transport as disconnected
615  * @xprt: transport to flag for disconnect
616  *
617  */
618 void xprt_disconnect_done(struct rpc_xprt *xprt)
619 {
620 	dprintk("RPC:       disconnected transport %p\n", xprt);
621 	spin_lock_bh(&xprt->transport_lock);
622 	xprt_clear_connected(xprt);
623 	xprt_wake_pending_tasks(xprt, -EAGAIN);
624 	spin_unlock_bh(&xprt->transport_lock);
625 }
626 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
627 
628 /**
629  * xprt_force_disconnect - force a transport to disconnect
630  * @xprt: transport to disconnect
631  *
632  */
633 void xprt_force_disconnect(struct rpc_xprt *xprt)
634 {
635 	/* Don't race with the test_bit() in xprt_clear_locked() */
636 	spin_lock_bh(&xprt->transport_lock);
637 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
638 	/* Try to schedule an autoclose RPC call */
639 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
640 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
641 	xprt_wake_pending_tasks(xprt, -EAGAIN);
642 	spin_unlock_bh(&xprt->transport_lock);
643 }
644 
645 /**
646  * xprt_conditional_disconnect - force a transport to disconnect
647  * @xprt: transport to disconnect
648  * @cookie: 'connection cookie'
649  *
650  * This attempts to break the connection if and only if 'cookie' matches
651  * the current transport 'connection cookie'. It ensures that we don't
652  * try to break the connection more than once when we need to retransmit
653  * a batch of RPC requests.
654  *
655  */
656 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
657 {
658 	/* Don't race with the test_bit() in xprt_clear_locked() */
659 	spin_lock_bh(&xprt->transport_lock);
660 	if (cookie != xprt->connect_cookie)
661 		goto out;
662 	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
663 		goto out;
664 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
665 	/* Try to schedule an autoclose RPC call */
666 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
667 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
668 	xprt_wake_pending_tasks(xprt, -EAGAIN);
669 out:
670 	spin_unlock_bh(&xprt->transport_lock);
671 }
672 
673 static void
674 xprt_init_autodisconnect(unsigned long data)
675 {
676 	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
677 
678 	spin_lock(&xprt->transport_lock);
679 	if (!list_empty(&xprt->recv))
680 		goto out_abort;
681 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
682 		goto out_abort;
683 	spin_unlock(&xprt->transport_lock);
684 	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
685 	queue_work(rpciod_workqueue, &xprt->task_cleanup);
686 	return;
687 out_abort:
688 	spin_unlock(&xprt->transport_lock);
689 }
690 
691 /**
692  * xprt_connect - schedule a transport connect operation
693  * @task: RPC task that is requesting the connect
694  *
695  */
696 void xprt_connect(struct rpc_task *task)
697 {
698 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
699 
700 	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
701 			xprt, (xprt_connected(xprt) ? "is" : "is not"));
702 
703 	if (!xprt_bound(xprt)) {
704 		task->tk_status = -EAGAIN;
705 		return;
706 	}
707 	if (!xprt_lock_write(xprt, task))
708 		return;
709 
710 	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
711 		xprt->ops->close(xprt);
712 
713 	if (xprt_connected(xprt))
714 		xprt_release_write(xprt, task);
715 	else {
716 		task->tk_rqstp->rq_bytes_sent = 0;
717 		task->tk_timeout = task->tk_rqstp->rq_timeout;
718 		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
719 
720 		if (test_bit(XPRT_CLOSING, &xprt->state))
721 			return;
722 		if (xprt_test_and_set_connecting(xprt))
723 			return;
724 		xprt->stat.connect_start = jiffies;
725 		xprt->ops->connect(xprt, task);
726 	}
727 }
728 
729 static void xprt_connect_status(struct rpc_task *task)
730 {
731 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
732 
733 	if (task->tk_status == 0) {
734 		xprt->stat.connect_count++;
735 		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
736 		dprintk("RPC: %5u xprt_connect_status: connection established\n",
737 				task->tk_pid);
738 		return;
739 	}
740 
741 	switch (task->tk_status) {
742 	case -ECONNREFUSED:
743 	case -ECONNRESET:
744 	case -ECONNABORTED:
745 	case -ENETUNREACH:
746 	case -EHOSTUNREACH:
747 	case -EAGAIN:
748 		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
749 		break;
750 	case -ETIMEDOUT:
751 		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
752 				"out\n", task->tk_pid);
753 		break;
754 	default:
755 		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
756 				"server %s\n", task->tk_pid, -task->tk_status,
757 				xprt->servername);
758 		xprt_release_write(xprt, task);
759 		task->tk_status = -EIO;
760 	}
761 }
762 
763 /**
764  * xprt_lookup_rqst - find an RPC request corresponding to an XID
765  * @xprt: transport on which the original request was transmitted
766  * @xid: RPC XID of incoming reply
767  *
768  */
769 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
770 {
771 	struct rpc_rqst *entry;
772 
773 	list_for_each_entry(entry, &xprt->recv, rq_list)
774 		if (entry->rq_xid == xid)
775 			return entry;
776 
777 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
778 			ntohl(xid));
779 	xprt->stat.bad_xids++;
780 	return NULL;
781 }
782 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
783 
784 static void xprt_update_rtt(struct rpc_task *task)
785 {
786 	struct rpc_rqst *req = task->tk_rqstp;
787 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
788 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
789 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
790 
791 	if (timer) {
792 		if (req->rq_ntrans == 1)
793 			rpc_update_rtt(rtt, timer, m);
794 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
795 	}
796 }
797 
798 /**
799  * xprt_complete_rqst - called when reply processing is complete
800  * @task: RPC request that recently completed
801  * @copied: actual number of bytes received from the transport
802  *
803  * Caller holds transport lock.
804  */
805 void xprt_complete_rqst(struct rpc_task *task, int copied)
806 {
807 	struct rpc_rqst *req = task->tk_rqstp;
808 	struct rpc_xprt *xprt = req->rq_xprt;
809 
810 	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
811 			task->tk_pid, ntohl(req->rq_xid), copied);
812 
813 	xprt->stat.recvs++;
814 	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
815 	if (xprt->ops->timer != NULL)
816 		xprt_update_rtt(task);
817 
818 	list_del_init(&req->rq_list);
819 	req->rq_private_buf.len = copied;
820 	/* Ensure all writes are done before we update */
821 	/* req->rq_reply_bytes_recvd */
822 	smp_wmb();
823 	req->rq_reply_bytes_recvd = copied;
824 	rpc_wake_up_queued_task(&xprt->pending, task);
825 }
826 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
827 
828 static void xprt_timer(struct rpc_task *task)
829 {
830 	struct rpc_rqst *req = task->tk_rqstp;
831 	struct rpc_xprt *xprt = req->rq_xprt;
832 
833 	if (task->tk_status != -ETIMEDOUT)
834 		return;
835 	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
836 
837 	spin_lock_bh(&xprt->transport_lock);
838 	if (!req->rq_reply_bytes_recvd) {
839 		if (xprt->ops->timer)
840 			xprt->ops->timer(xprt, task);
841 	} else
842 		task->tk_status = 0;
843 	spin_unlock_bh(&xprt->transport_lock);
844 }
845 
846 static inline int xprt_has_timer(struct rpc_xprt *xprt)
847 {
848 	return xprt->idle_timeout != 0;
849 }
850 
851 /**
852  * xprt_prepare_transmit - reserve the transport before sending a request
853  * @task: RPC task about to send a request
854  *
855  */
856 bool xprt_prepare_transmit(struct rpc_task *task)
857 {
858 	struct rpc_rqst	*req = task->tk_rqstp;
859 	struct rpc_xprt	*xprt = req->rq_xprt;
860 	bool ret = false;
861 
862 	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
863 
864 	spin_lock_bh(&xprt->transport_lock);
865 	if (!req->rq_bytes_sent) {
866 		if (req->rq_reply_bytes_recvd) {
867 			task->tk_status = req->rq_reply_bytes_recvd;
868 			goto out_unlock;
869 		}
870 		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
871 		    && xprt_connected(xprt)
872 		    && req->rq_connect_cookie == xprt->connect_cookie) {
873 			xprt->ops->set_retrans_timeout(task);
874 			rpc_sleep_on(&xprt->pending, task, xprt_timer);
875 			goto out_unlock;
876 		}
877 	}
878 	if (!xprt->ops->reserve_xprt(xprt, task)) {
879 		task->tk_status = -EAGAIN;
880 		goto out_unlock;
881 	}
882 	ret = true;
883 out_unlock:
884 	spin_unlock_bh(&xprt->transport_lock);
885 	return ret;
886 }
887 
888 void xprt_end_transmit(struct rpc_task *task)
889 {
890 	xprt_release_write(task->tk_rqstp->rq_xprt, task);
891 }
892 
893 /**
894  * xprt_transmit - send an RPC request on a transport
895  * @task: controlling RPC task
896  *
897  * We have to copy the iovec because sendmsg fiddles with its contents.
898  */
899 void xprt_transmit(struct rpc_task *task)
900 {
901 	struct rpc_rqst	*req = task->tk_rqstp;
902 	struct rpc_xprt	*xprt = req->rq_xprt;
903 	int status, numreqs;
904 
905 	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
906 
907 	if (!req->rq_reply_bytes_recvd) {
908 		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
909 			/*
910 			 * Add to the list only if we're expecting a reply
911 			 */
912 			spin_lock_bh(&xprt->transport_lock);
913 			/* Update the softirq receive buffer */
914 			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
915 					sizeof(req->rq_private_buf));
916 			/* Add request to the receive list */
917 			list_add_tail(&req->rq_list, &xprt->recv);
918 			spin_unlock_bh(&xprt->transport_lock);
919 			xprt_reset_majortimeo(req);
920 			/* Turn off autodisconnect */
921 			del_singleshot_timer_sync(&xprt->timer);
922 		}
923 	} else if (!req->rq_bytes_sent)
924 		return;
925 
926 	req->rq_xtime = ktime_get();
927 	status = xprt->ops->send_request(task);
928 	if (status != 0) {
929 		task->tk_status = status;
930 		return;
931 	}
932 
933 	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
934 	task->tk_flags |= RPC_TASK_SENT;
935 	spin_lock_bh(&xprt->transport_lock);
936 
937 	xprt->ops->set_retrans_timeout(task);
938 
939 	numreqs = atomic_read(&xprt->num_reqs);
940 	if (numreqs > xprt->stat.max_slots)
941 		xprt->stat.max_slots = numreqs;
942 	xprt->stat.sends++;
943 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
944 	xprt->stat.bklog_u += xprt->backlog.qlen;
945 	xprt->stat.sending_u += xprt->sending.qlen;
946 	xprt->stat.pending_u += xprt->pending.qlen;
947 
948 	/* Don't race with disconnect */
949 	if (!xprt_connected(xprt))
950 		task->tk_status = -ENOTCONN;
951 	else {
952 		/*
953 		 * Sleep on the pending queue since
954 		 * we're expecting a reply.
955 		 */
956 		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
957 			rpc_sleep_on(&xprt->pending, task, xprt_timer);
958 		req->rq_connect_cookie = xprt->connect_cookie;
959 	}
960 	spin_unlock_bh(&xprt->transport_lock);
961 }
962 
963 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
964 {
965 	set_bit(XPRT_CONGESTED, &xprt->state);
966 	rpc_sleep_on(&xprt->backlog, task, NULL);
967 }
968 
969 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
970 {
971 	if (rpc_wake_up_next(&xprt->backlog) == NULL)
972 		clear_bit(XPRT_CONGESTED, &xprt->state);
973 }
974 
975 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
976 {
977 	bool ret = false;
978 
979 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
980 		goto out;
981 	spin_lock(&xprt->reserve_lock);
982 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
983 		rpc_sleep_on(&xprt->backlog, task, NULL);
984 		ret = true;
985 	}
986 	spin_unlock(&xprt->reserve_lock);
987 out:
988 	return ret;
989 }
990 
991 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
992 {
993 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
994 
995 	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
996 		goto out;
997 	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
998 	if (req != NULL)
999 		goto out;
1000 	atomic_dec(&xprt->num_reqs);
1001 	req = ERR_PTR(-ENOMEM);
1002 out:
1003 	return req;
1004 }
1005 
1006 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1007 {
1008 	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1009 		kfree(req);
1010 		return true;
1011 	}
1012 	return false;
1013 }
1014 
1015 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1016 {
1017 	struct rpc_rqst *req;
1018 
1019 	spin_lock(&xprt->reserve_lock);
1020 	if (!list_empty(&xprt->free)) {
1021 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1022 		list_del(&req->rq_list);
1023 		goto out_init_req;
1024 	}
1025 	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1026 	if (!IS_ERR(req))
1027 		goto out_init_req;
1028 	switch (PTR_ERR(req)) {
1029 	case -ENOMEM:
1030 		dprintk("RPC:       dynamic allocation of request slot "
1031 				"failed! Retrying\n");
1032 		task->tk_status = -ENOMEM;
1033 		break;
1034 	case -EAGAIN:
1035 		xprt_add_backlog(xprt, task);
1036 		dprintk("RPC:       waiting for request slot\n");
1037 	default:
1038 		task->tk_status = -EAGAIN;
1039 	}
1040 	spin_unlock(&xprt->reserve_lock);
1041 	return;
1042 out_init_req:
1043 	task->tk_status = 0;
1044 	task->tk_rqstp = req;
1045 	xprt_request_init(task, xprt);
1046 	spin_unlock(&xprt->reserve_lock);
1047 }
1048 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1049 
1050 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1051 {
1052 	/* Note: grabbing the xprt_lock_write() ensures that we throttle
1053 	 * new slot allocation if the transport is congested (i.e. when
1054 	 * reconnecting a stream transport or when out of socket write
1055 	 * buffer space).
1056 	 */
1057 	if (xprt_lock_write(xprt, task)) {
1058 		xprt_alloc_slot(xprt, task);
1059 		xprt_release_write(xprt, task);
1060 	}
1061 }
1062 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1063 
1064 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1065 {
1066 	spin_lock(&xprt->reserve_lock);
1067 	if (!xprt_dynamic_free_slot(xprt, req)) {
1068 		memset(req, 0, sizeof(*req));	/* mark unused */
1069 		list_add(&req->rq_list, &xprt->free);
1070 	}
1071 	xprt_wake_up_backlog(xprt);
1072 	spin_unlock(&xprt->reserve_lock);
1073 }
1074 
1075 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1076 {
1077 	struct rpc_rqst *req;
1078 	while (!list_empty(&xprt->free)) {
1079 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1080 		list_del(&req->rq_list);
1081 		kfree(req);
1082 	}
1083 }
1084 
1085 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1086 		unsigned int num_prealloc,
1087 		unsigned int max_alloc)
1088 {
1089 	struct rpc_xprt *xprt;
1090 	struct rpc_rqst *req;
1091 	int i;
1092 
1093 	xprt = kzalloc(size, GFP_KERNEL);
1094 	if (xprt == NULL)
1095 		goto out;
1096 
1097 	xprt_init(xprt, net);
1098 
1099 	for (i = 0; i < num_prealloc; i++) {
1100 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1101 		if (!req)
1102 			goto out_free;
1103 		list_add(&req->rq_list, &xprt->free);
1104 	}
1105 	if (max_alloc > num_prealloc)
1106 		xprt->max_reqs = max_alloc;
1107 	else
1108 		xprt->max_reqs = num_prealloc;
1109 	xprt->min_reqs = num_prealloc;
1110 	atomic_set(&xprt->num_reqs, num_prealloc);
1111 
1112 	return xprt;
1113 
1114 out_free:
1115 	xprt_free(xprt);
1116 out:
1117 	return NULL;
1118 }
1119 EXPORT_SYMBOL_GPL(xprt_alloc);
1120 
1121 void xprt_free(struct rpc_xprt *xprt)
1122 {
1123 	put_net(xprt->xprt_net);
1124 	xprt_free_all_slots(xprt);
1125 	kfree(xprt);
1126 }
1127 EXPORT_SYMBOL_GPL(xprt_free);
1128 
1129 /**
1130  * xprt_reserve - allocate an RPC request slot
1131  * @task: RPC task requesting a slot allocation
1132  *
1133  * If the transport is marked as being congested, or if no more
1134  * slots are available, place the task on the transport's
1135  * backlog queue.
1136  */
1137 void xprt_reserve(struct rpc_task *task)
1138 {
1139 	struct rpc_xprt	*xprt;
1140 
1141 	task->tk_status = 0;
1142 	if (task->tk_rqstp != NULL)
1143 		return;
1144 
1145 	task->tk_timeout = 0;
1146 	task->tk_status = -EAGAIN;
1147 	rcu_read_lock();
1148 	xprt = rcu_dereference(task->tk_client->cl_xprt);
1149 	if (!xprt_throttle_congested(xprt, task))
1150 		xprt->ops->alloc_slot(xprt, task);
1151 	rcu_read_unlock();
1152 }
1153 
1154 /**
1155  * xprt_retry_reserve - allocate an RPC request slot
1156  * @task: RPC task requesting a slot allocation
1157  *
1158  * If no more slots are available, place the task on the transport's
1159  * backlog queue.
1160  * Note that the only difference with xprt_reserve is that we now
1161  * ignore the value of the XPRT_CONGESTED flag.
1162  */
1163 void xprt_retry_reserve(struct rpc_task *task)
1164 {
1165 	struct rpc_xprt	*xprt;
1166 
1167 	task->tk_status = 0;
1168 	if (task->tk_rqstp != NULL)
1169 		return;
1170 
1171 	task->tk_timeout = 0;
1172 	task->tk_status = -EAGAIN;
1173 	rcu_read_lock();
1174 	xprt = rcu_dereference(task->tk_client->cl_xprt);
1175 	xprt->ops->alloc_slot(xprt, task);
1176 	rcu_read_unlock();
1177 }
1178 
1179 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1180 {
1181 	return (__force __be32)xprt->xid++;
1182 }
1183 
1184 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1185 {
1186 	xprt->xid = prandom_u32();
1187 }
1188 
1189 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1190 {
1191 	struct rpc_rqst	*req = task->tk_rqstp;
1192 
1193 	INIT_LIST_HEAD(&req->rq_list);
1194 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1195 	req->rq_task	= task;
1196 	req->rq_xprt    = xprt;
1197 	req->rq_buffer  = NULL;
1198 	req->rq_xid     = xprt_alloc_xid(xprt);
1199 	req->rq_connect_cookie = xprt->connect_cookie - 1;
1200 	req->rq_bytes_sent = 0;
1201 	req->rq_snd_buf.len = 0;
1202 	req->rq_snd_buf.buflen = 0;
1203 	req->rq_rcv_buf.len = 0;
1204 	req->rq_rcv_buf.buflen = 0;
1205 	req->rq_release_snd_buf = NULL;
1206 	xprt_reset_majortimeo(req);
1207 	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1208 			req, ntohl(req->rq_xid));
1209 }
1210 
1211 /**
1212  * xprt_release - release an RPC request slot
1213  * @task: task which is finished with the slot
1214  *
1215  */
1216 void xprt_release(struct rpc_task *task)
1217 {
1218 	struct rpc_xprt	*xprt;
1219 	struct rpc_rqst	*req = task->tk_rqstp;
1220 
1221 	if (req == NULL) {
1222 		if (task->tk_client) {
1223 			rcu_read_lock();
1224 			xprt = rcu_dereference(task->tk_client->cl_xprt);
1225 			if (xprt->snd_task == task)
1226 				xprt_release_write(xprt, task);
1227 			rcu_read_unlock();
1228 		}
1229 		return;
1230 	}
1231 
1232 	xprt = req->rq_xprt;
1233 	if (task->tk_ops->rpc_count_stats != NULL)
1234 		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1235 	else if (task->tk_client)
1236 		rpc_count_iostats(task, task->tk_client->cl_metrics);
1237 	spin_lock_bh(&xprt->transport_lock);
1238 	xprt->ops->release_xprt(xprt, task);
1239 	if (xprt->ops->release_request)
1240 		xprt->ops->release_request(task);
1241 	if (!list_empty(&req->rq_list))
1242 		list_del(&req->rq_list);
1243 	xprt->last_used = jiffies;
1244 	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1245 		mod_timer(&xprt->timer,
1246 				xprt->last_used + xprt->idle_timeout);
1247 	spin_unlock_bh(&xprt->transport_lock);
1248 	if (req->rq_buffer)
1249 		xprt->ops->buf_free(req->rq_buffer);
1250 	if (req->rq_cred != NULL)
1251 		put_rpccred(req->rq_cred);
1252 	task->tk_rqstp = NULL;
1253 	if (req->rq_release_snd_buf)
1254 		req->rq_release_snd_buf(req);
1255 
1256 	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1257 	if (likely(!bc_prealloc(req)))
1258 		xprt_free_slot(xprt, req);
1259 	else
1260 		xprt_free_bc_request(req);
1261 }
1262 
1263 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1264 {
1265 	atomic_set(&xprt->count, 1);
1266 
1267 	spin_lock_init(&xprt->transport_lock);
1268 	spin_lock_init(&xprt->reserve_lock);
1269 
1270 	INIT_LIST_HEAD(&xprt->free);
1271 	INIT_LIST_HEAD(&xprt->recv);
1272 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1273 	spin_lock_init(&xprt->bc_pa_lock);
1274 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1275 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1276 
1277 	xprt->last_used = jiffies;
1278 	xprt->cwnd = RPC_INITCWND;
1279 	xprt->bind_index = 0;
1280 
1281 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1282 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1283 	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1284 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1285 
1286 	xprt_init_xid(xprt);
1287 
1288 	xprt->xprt_net = get_net(net);
1289 }
1290 
1291 /**
1292  * xprt_create_transport - create an RPC transport
1293  * @args: rpc transport creation arguments
1294  *
1295  */
1296 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1297 {
1298 	struct rpc_xprt	*xprt;
1299 	struct xprt_class *t;
1300 
1301 	spin_lock(&xprt_list_lock);
1302 	list_for_each_entry(t, &xprt_list, list) {
1303 		if (t->ident == args->ident) {
1304 			spin_unlock(&xprt_list_lock);
1305 			goto found;
1306 		}
1307 	}
1308 	spin_unlock(&xprt_list_lock);
1309 	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
1310 	return ERR_PTR(-EIO);
1311 
1312 found:
1313 	xprt = t->setup(args);
1314 	if (IS_ERR(xprt)) {
1315 		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1316 				-PTR_ERR(xprt));
1317 		goto out;
1318 	}
1319 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1320 		xprt->idle_timeout = 0;
1321 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1322 	if (xprt_has_timer(xprt))
1323 		setup_timer(&xprt->timer, xprt_init_autodisconnect,
1324 			    (unsigned long)xprt);
1325 	else
1326 		init_timer(&xprt->timer);
1327 
1328 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1329 		xprt_destroy(xprt);
1330 		return ERR_PTR(-EINVAL);
1331 	}
1332 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1333 	if (xprt->servername == NULL) {
1334 		xprt_destroy(xprt);
1335 		return ERR_PTR(-ENOMEM);
1336 	}
1337 
1338 	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1339 			xprt->max_reqs);
1340 out:
1341 	return xprt;
1342 }
1343 
1344 /**
1345  * xprt_destroy - destroy an RPC transport, killing off all requests.
1346  * @xprt: transport to destroy
1347  *
1348  */
1349 static void xprt_destroy(struct rpc_xprt *xprt)
1350 {
1351 	dprintk("RPC:       destroying transport %p\n", xprt);
1352 	del_timer_sync(&xprt->timer);
1353 
1354 	rpc_destroy_wait_queue(&xprt->binding);
1355 	rpc_destroy_wait_queue(&xprt->pending);
1356 	rpc_destroy_wait_queue(&xprt->sending);
1357 	rpc_destroy_wait_queue(&xprt->backlog);
1358 	cancel_work_sync(&xprt->task_cleanup);
1359 	kfree(xprt->servername);
1360 	/*
1361 	 * Tear down transport state and free the rpc_xprt
1362 	 */
1363 	xprt->ops->destroy(xprt);
1364 }
1365 
1366 /**
1367  * xprt_put - release a reference to an RPC transport.
1368  * @xprt: pointer to the transport
1369  *
1370  */
1371 void xprt_put(struct rpc_xprt *xprt)
1372 {
1373 	if (atomic_dec_and_test(&xprt->count))
1374 		xprt_destroy(xprt);
1375 }
1376