1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/net/sunrpc/xprt.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This is a generic RPC call interface supporting congestion avoidance, 61da177e4SLinus Torvalds * and asynchronous calls. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * The interface works like this: 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * - When a process places a call, it allocates a request slot if 111da177e4SLinus Torvalds * one is available. Otherwise, it sleeps on the backlog queue 121da177e4SLinus Torvalds * (xprt_reserve). 131da177e4SLinus Torvalds * - Next, the caller puts together the RPC message, stuffs it into 1455aa4f58SChuck Lever * the request struct, and calls xprt_transmit(). 1555aa4f58SChuck Lever * - xprt_transmit sends the message and installs the caller on the 1655ae1aabSRicardo Labiaga * transport's wait list. At the same time, if a reply is expected, 1755ae1aabSRicardo Labiaga * it installs a timer that is run after the packet's timeout has 1855ae1aabSRicardo Labiaga * expired. 191da177e4SLinus Torvalds * - When a packet arrives, the data_ready handler walks the list of 2055aa4f58SChuck Lever * pending requests for that transport. If a matching XID is found, the 211da177e4SLinus Torvalds * caller is woken up, and the timer removed. 221da177e4SLinus Torvalds * - When no reply arrives within the timeout interval, the timer is 231da177e4SLinus Torvalds * fired by the kernel and runs xprt_timer(). It either adjusts the 241da177e4SLinus Torvalds * timeout values (minor timeout) or wakes up the caller with a status 251da177e4SLinus Torvalds * of -ETIMEDOUT. 261da177e4SLinus Torvalds * - When the caller receives a notification from RPC that a reply arrived, 271da177e4SLinus Torvalds * it should release the RPC slot, and process the reply. 281da177e4SLinus Torvalds * If the call timed out, it may choose to retry the operation by 291da177e4SLinus Torvalds * adjusting the initial timeout value, and simply calling rpc_call 301da177e4SLinus Torvalds * again. 311da177e4SLinus Torvalds * 321da177e4SLinus Torvalds * Support for async RPC is done through a set of RPC-specific scheduling 331da177e4SLinus Torvalds * primitives that `transparently' work for processes as well as async 341da177e4SLinus Torvalds * tasks that rely on callbacks. 351da177e4SLinus Torvalds * 361da177e4SLinus Torvalds * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 3755aa4f58SChuck Lever * 3855aa4f58SChuck Lever * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 41a246b010SChuck Lever #include <linux/module.h> 42a246b010SChuck Lever 431da177e4SLinus Torvalds #include <linux/types.h> 44a246b010SChuck Lever #include <linux/interrupt.h> 451da177e4SLinus Torvalds #include <linux/workqueue.h> 46bf3fcf89SChuck Lever #include <linux/net.h> 47ff839970SChuck Lever #include <linux/ktime.h> 481da177e4SLinus Torvalds 49a246b010SChuck Lever #include <linux/sunrpc/clnt.h> 5011c556b3SChuck Lever #include <linux/sunrpc/metrics.h> 51c9acb42eSTrond Myklebust #include <linux/sunrpc/bc_xprt.h> 52fda1bfefSTrond Myklebust #include <linux/rcupdate.h> 53a1231fdaSTrond Myklebust #include <linux/sched/mm.h> 541da177e4SLinus Torvalds 553705ad64SJeff Layton #include <trace/events/sunrpc.h> 563705ad64SJeff Layton 5755ae1aabSRicardo Labiaga #include "sunrpc.h" 5855ae1aabSRicardo Labiaga 591da177e4SLinus Torvalds /* 601da177e4SLinus Torvalds * Local variables 611da177e4SLinus Torvalds */ 621da177e4SLinus Torvalds 63f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 641da177e4SLinus Torvalds # define RPCDBG_FACILITY RPCDBG_XPRT 651da177e4SLinus Torvalds #endif 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds /* 681da177e4SLinus Torvalds * Local functions 691da177e4SLinus Torvalds */ 7021de0a95STrond Myklebust static void xprt_init(struct rpc_xprt *xprt, struct net *net); 7137ac86c3SChuck Lever static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); 724e0038b6STrond Myklebust static void xprt_destroy(struct rpc_xprt *xprt); 731da177e4SLinus Torvalds 745ba03e82SJiri Slaby static DEFINE_SPINLOCK(xprt_list_lock); 7581c098afS\"Talpey, Thomas\ static LIST_HEAD(xprt_list); 7681c098afS\"Talpey, Thomas\ 779e910bffSTrond Myklebust static unsigned long xprt_request_timeout(const struct rpc_rqst *req) 789e910bffSTrond Myklebust { 799e910bffSTrond Myklebust unsigned long timeout = jiffies + req->rq_timeout; 809e910bffSTrond Myklebust 819e910bffSTrond Myklebust if (time_before(timeout, req->rq_majortimeo)) 829e910bffSTrond Myklebust return timeout; 839e910bffSTrond Myklebust return req->rq_majortimeo; 849e910bffSTrond Myklebust } 859e910bffSTrond Myklebust 8612a80469SChuck Lever /** 8781c098afS\"Talpey, Thomas\ * xprt_register_transport - register a transport implementation 8881c098afS\"Talpey, Thomas\ * @transport: transport to register 8981c098afS\"Talpey, Thomas\ * 9081c098afS\"Talpey, Thomas\ * If a transport implementation is loaded as a kernel module, it can 9181c098afS\"Talpey, Thomas\ * call this interface to make itself known to the RPC client. 9281c098afS\"Talpey, Thomas\ * 9381c098afS\"Talpey, Thomas\ * Returns: 9481c098afS\"Talpey, Thomas\ * 0: transport successfully registered 9581c098afS\"Talpey, Thomas\ * -EEXIST: transport already registered 9681c098afS\"Talpey, Thomas\ * -EINVAL: transport module being unloaded 9781c098afS\"Talpey, Thomas\ */ 9881c098afS\"Talpey, Thomas\ int xprt_register_transport(struct xprt_class *transport) 9981c098afS\"Talpey, Thomas\ { 10081c098afS\"Talpey, Thomas\ struct xprt_class *t; 10181c098afS\"Talpey, Thomas\ int result; 10281c098afS\"Talpey, Thomas\ 10381c098afS\"Talpey, Thomas\ result = -EEXIST; 10481c098afS\"Talpey, Thomas\ spin_lock(&xprt_list_lock); 10581c098afS\"Talpey, Thomas\ list_for_each_entry(t, &xprt_list, list) { 10681c098afS\"Talpey, Thomas\ /* don't register the same transport class twice */ 1074fa016ebS\"Talpey, Thomas\ if (t->ident == transport->ident) 10881c098afS\"Talpey, Thomas\ goto out; 10981c098afS\"Talpey, Thomas\ } 11081c098afS\"Talpey, Thomas\ 11181c098afS\"Talpey, Thomas\ list_add_tail(&transport->list, &xprt_list); 11281c098afS\"Talpey, Thomas\ printk(KERN_INFO "RPC: Registered %s transport module.\n", 11381c098afS\"Talpey, Thomas\ transport->name); 11481c098afS\"Talpey, Thomas\ result = 0; 11581c098afS\"Talpey, Thomas\ 11681c098afS\"Talpey, Thomas\ out: 11781c098afS\"Talpey, Thomas\ spin_unlock(&xprt_list_lock); 11881c098afS\"Talpey, Thomas\ return result; 11981c098afS\"Talpey, Thomas\ } 12081c098afS\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_register_transport); 12181c098afS\"Talpey, Thomas\ 12281c098afS\"Talpey, Thomas\ /** 12381c098afS\"Talpey, Thomas\ * xprt_unregister_transport - unregister a transport implementation 12465b6e42cSRandy Dunlap * @transport: transport to unregister 12581c098afS\"Talpey, Thomas\ * 12681c098afS\"Talpey, Thomas\ * Returns: 12781c098afS\"Talpey, Thomas\ * 0: transport successfully unregistered 12881c098afS\"Talpey, Thomas\ * -ENOENT: transport never registered 12981c098afS\"Talpey, Thomas\ */ 13081c098afS\"Talpey, Thomas\ int xprt_unregister_transport(struct xprt_class *transport) 13181c098afS\"Talpey, Thomas\ { 13281c098afS\"Talpey, Thomas\ struct xprt_class *t; 13381c098afS\"Talpey, Thomas\ int result; 13481c098afS\"Talpey, Thomas\ 13581c098afS\"Talpey, Thomas\ result = 0; 13681c098afS\"Talpey, Thomas\ spin_lock(&xprt_list_lock); 13781c098afS\"Talpey, Thomas\ list_for_each_entry(t, &xprt_list, list) { 13881c098afS\"Talpey, Thomas\ if (t == transport) { 13981c098afS\"Talpey, Thomas\ printk(KERN_INFO 14081c098afS\"Talpey, Thomas\ "RPC: Unregistered %s transport module.\n", 14181c098afS\"Talpey, Thomas\ transport->name); 14281c098afS\"Talpey, Thomas\ list_del_init(&transport->list); 14381c098afS\"Talpey, Thomas\ goto out; 14481c098afS\"Talpey, Thomas\ } 14581c098afS\"Talpey, Thomas\ } 14681c098afS\"Talpey, Thomas\ result = -ENOENT; 14781c098afS\"Talpey, Thomas\ 14881c098afS\"Talpey, Thomas\ out: 14981c098afS\"Talpey, Thomas\ spin_unlock(&xprt_list_lock); 15081c098afS\"Talpey, Thomas\ return result; 15181c098afS\"Talpey, Thomas\ } 15281c098afS\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_unregister_transport); 15381c098afS\"Talpey, Thomas\ 15481c098afS\"Talpey, Thomas\ /** 155441e3e24STom Talpey * xprt_load_transport - load a transport implementation 156441e3e24STom Talpey * @transport_name: transport to load 157441e3e24STom Talpey * 158441e3e24STom Talpey * Returns: 159441e3e24STom Talpey * 0: transport successfully loaded 160441e3e24STom Talpey * -ENOENT: transport module not available 161441e3e24STom Talpey */ 162441e3e24STom Talpey int xprt_load_transport(const char *transport_name) 163441e3e24STom Talpey { 164441e3e24STom Talpey struct xprt_class *t; 165441e3e24STom Talpey int result; 166441e3e24STom Talpey 167441e3e24STom Talpey result = 0; 168441e3e24STom Talpey spin_lock(&xprt_list_lock); 169441e3e24STom Talpey list_for_each_entry(t, &xprt_list, list) { 170441e3e24STom Talpey if (strcmp(t->name, transport_name) == 0) { 171441e3e24STom Talpey spin_unlock(&xprt_list_lock); 172441e3e24STom Talpey goto out; 173441e3e24STom Talpey } 174441e3e24STom Talpey } 175441e3e24STom Talpey spin_unlock(&xprt_list_lock); 176ef7ffe8fSAlex Riesen result = request_module("xprt%s", transport_name); 177441e3e24STom Talpey out: 178441e3e24STom Talpey return result; 179441e3e24STom Talpey } 180441e3e24STom Talpey EXPORT_SYMBOL_GPL(xprt_load_transport); 181441e3e24STom Talpey 182c544577dSTrond Myklebust static void xprt_clear_locked(struct rpc_xprt *xprt) 183c544577dSTrond Myklebust { 184c544577dSTrond Myklebust xprt->snd_task = NULL; 185c544577dSTrond Myklebust if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 186c544577dSTrond Myklebust smp_mb__before_atomic(); 187c544577dSTrond Myklebust clear_bit(XPRT_LOCKED, &xprt->state); 188c544577dSTrond Myklebust smp_mb__after_atomic(); 189c544577dSTrond Myklebust } else 190c544577dSTrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 191c544577dSTrond Myklebust } 192c544577dSTrond Myklebust 193441e3e24STom Talpey /** 19412a80469SChuck Lever * xprt_reserve_xprt - serialize write access to transports 19512a80469SChuck Lever * @task: task that is requesting access to the transport 196177c27bfSRandy Dunlap * @xprt: pointer to the target transport 19712a80469SChuck Lever * 19812a80469SChuck Lever * This prevents mixing the payload of separate requests, and prevents 19912a80469SChuck Lever * transport connects from colliding with writes. No congestion control 20012a80469SChuck Lever * is provided. 2011da177e4SLinus Torvalds */ 20243cedbf0STrond Myklebust int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 2031da177e4SLinus Torvalds { 20412a80469SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 20512a80469SChuck Lever 20612a80469SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 20712a80469SChuck Lever if (task == xprt->snd_task) 20812a80469SChuck Lever return 1; 20912a80469SChuck Lever goto out_sleep; 21012a80469SChuck Lever } 211c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 212c544577dSTrond Myklebust goto out_unlock; 21312a80469SChuck Lever xprt->snd_task = task; 2144d4a76f3Sj223yang@asset.uwaterloo.ca 21512a80469SChuck Lever return 1; 21612a80469SChuck Lever 217c544577dSTrond Myklebust out_unlock: 218c544577dSTrond Myklebust xprt_clear_locked(xprt); 21912a80469SChuck Lever out_sleep: 22046121cf7SChuck Lever dprintk("RPC: %5u failed to lock transport %p\n", 22112a80469SChuck Lever task->tk_pid, xprt); 22212a80469SChuck Lever task->tk_status = -EAGAIN; 2236b2e6856STrond Myklebust if (RPC_IS_SOFT(task)) 2246b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->sending, task, NULL, 2259e910bffSTrond Myklebust xprt_request_timeout(req)); 2266b2e6856STrond Myklebust else 22779c99152STrond Myklebust rpc_sleep_on(&xprt->sending, task, NULL); 22812a80469SChuck Lever return 0; 22912a80469SChuck Lever } 23012444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 23112a80469SChuck Lever 23275891f50STrond Myklebust static bool 23375891f50STrond Myklebust xprt_need_congestion_window_wait(struct rpc_xprt *xprt) 23475891f50STrond Myklebust { 23575891f50STrond Myklebust return test_bit(XPRT_CWND_WAIT, &xprt->state); 23675891f50STrond Myklebust } 23775891f50STrond Myklebust 23875891f50STrond Myklebust static void 23975891f50STrond Myklebust xprt_set_congestion_window_wait(struct rpc_xprt *xprt) 24075891f50STrond Myklebust { 24175891f50STrond Myklebust if (!list_empty(&xprt->xmit_queue)) { 24275891f50STrond Myklebust /* Peek at head of queue to see if it can make progress */ 24375891f50STrond Myklebust if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, 24475891f50STrond Myklebust rq_xmit)->rq_cong) 24575891f50STrond Myklebust return; 24675891f50STrond Myklebust } 24775891f50STrond Myklebust set_bit(XPRT_CWND_WAIT, &xprt->state); 24875891f50STrond Myklebust } 24975891f50STrond Myklebust 25075891f50STrond Myklebust static void 25175891f50STrond Myklebust xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) 25275891f50STrond Myklebust { 25375891f50STrond Myklebust if (!RPCXPRT_CONGESTED(xprt)) 25475891f50STrond Myklebust clear_bit(XPRT_CWND_WAIT, &xprt->state); 25575891f50STrond Myklebust } 25675891f50STrond Myklebust 25712a80469SChuck Lever /* 25812a80469SChuck Lever * xprt_reserve_xprt_cong - serialize write access to transports 25912a80469SChuck Lever * @task: task that is requesting access to the transport 26012a80469SChuck Lever * 26112a80469SChuck Lever * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 26212a80469SChuck Lever * integrated into the decision of whether a request is allowed to be 26312a80469SChuck Lever * woken up and given access to the transport. 26475891f50STrond Myklebust * Note that the lock is only granted if we know there are free slots. 26512a80469SChuck Lever */ 26643cedbf0STrond Myklebust int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 26712a80469SChuck Lever { 2681da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 2691da177e4SLinus Torvalds 2702226feb6SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 2711da177e4SLinus Torvalds if (task == xprt->snd_task) 2721da177e4SLinus Torvalds return 1; 2731da177e4SLinus Torvalds goto out_sleep; 2741da177e4SLinus Torvalds } 27543cedbf0STrond Myklebust if (req == NULL) { 27643cedbf0STrond Myklebust xprt->snd_task = task; 27743cedbf0STrond Myklebust return 1; 27843cedbf0STrond Myklebust } 279c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 280c544577dSTrond Myklebust goto out_unlock; 28175891f50STrond Myklebust if (!xprt_need_congestion_window_wait(xprt)) { 2821da177e4SLinus Torvalds xprt->snd_task = task; 2831da177e4SLinus Torvalds return 1; 2841da177e4SLinus Torvalds } 285c544577dSTrond Myklebust out_unlock: 286632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 2871da177e4SLinus Torvalds out_sleep: 28846121cf7SChuck Lever dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 2891da177e4SLinus Torvalds task->tk_status = -EAGAIN; 2906b2e6856STrond Myklebust if (RPC_IS_SOFT(task)) 2916b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->sending, task, NULL, 2929e910bffSTrond Myklebust xprt_request_timeout(req)); 2936b2e6856STrond Myklebust else 29479c99152STrond Myklebust rpc_sleep_on(&xprt->sending, task, NULL); 2951da177e4SLinus Torvalds return 0; 2961da177e4SLinus Torvalds } 29712444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 2981da177e4SLinus Torvalds 29912a80469SChuck Lever static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 3001da177e4SLinus Torvalds { 3011da177e4SLinus Torvalds int retval; 3021da177e4SLinus Torvalds 303bd79bc57STrond Myklebust if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) 304bd79bc57STrond Myklebust return 1; 305b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 30643cedbf0STrond Myklebust retval = xprt->ops->reserve_xprt(xprt, task); 307b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 3081da177e4SLinus Torvalds return retval; 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 311961a828dSTrond Myklebust static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 3121da177e4SLinus Torvalds { 313961a828dSTrond Myklebust struct rpc_xprt *xprt = data; 31449e9a890SChuck Lever 31549e9a890SChuck Lever xprt->snd_task = task; 316961a828dSTrond Myklebust return true; 317961a828dSTrond Myklebust } 318961a828dSTrond Myklebust 319961a828dSTrond Myklebust static void __xprt_lock_write_next(struct rpc_xprt *xprt) 320961a828dSTrond Myklebust { 321961a828dSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 32249e9a890SChuck Lever return; 323c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 324c544577dSTrond Myklebust goto out_unlock; 325f1dc237cSTrond Myklebust if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 326f1dc237cSTrond Myklebust __xprt_lock_write_func, xprt)) 327961a828dSTrond Myklebust return; 328c544577dSTrond Myklebust out_unlock: 329632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 33049e9a890SChuck Lever } 33149e9a890SChuck Lever 332961a828dSTrond Myklebust static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 333961a828dSTrond Myklebust { 334961a828dSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 335961a828dSTrond Myklebust return; 336c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 337c544577dSTrond Myklebust goto out_unlock; 33875891f50STrond Myklebust if (xprt_need_congestion_window_wait(xprt)) 339961a828dSTrond Myklebust goto out_unlock; 340f1dc237cSTrond Myklebust if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 34175891f50STrond Myklebust __xprt_lock_write_func, xprt)) 342961a828dSTrond Myklebust return; 3431da177e4SLinus Torvalds out_unlock: 344632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds 34749e9a890SChuck Lever /** 34849e9a890SChuck Lever * xprt_release_xprt - allow other requests to use a transport 34949e9a890SChuck Lever * @xprt: transport with other tasks potentially waiting 35049e9a890SChuck Lever * @task: task that is releasing access to the transport 35149e9a890SChuck Lever * 35249e9a890SChuck Lever * Note that "task" can be NULL. No congestion control is provided. 3531da177e4SLinus Torvalds */ 35449e9a890SChuck Lever void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 3551da177e4SLinus Torvalds { 3561da177e4SLinus Torvalds if (xprt->snd_task == task) { 357632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 3581da177e4SLinus Torvalds __xprt_lock_write_next(xprt); 3591da177e4SLinus Torvalds } 3601da177e4SLinus Torvalds } 36112444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_xprt); 3621da177e4SLinus Torvalds 36349e9a890SChuck Lever /** 36449e9a890SChuck Lever * xprt_release_xprt_cong - allow other requests to use a transport 36549e9a890SChuck Lever * @xprt: transport with other tasks potentially waiting 36649e9a890SChuck Lever * @task: task that is releasing access to the transport 36749e9a890SChuck Lever * 36849e9a890SChuck Lever * Note that "task" can be NULL. Another task is awoken to use the 36949e9a890SChuck Lever * transport if the transport's congestion window allows it. 37049e9a890SChuck Lever */ 37149e9a890SChuck Lever void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 37249e9a890SChuck Lever { 37349e9a890SChuck Lever if (xprt->snd_task == task) { 374632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 37549e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 37649e9a890SChuck Lever } 37749e9a890SChuck Lever } 37812444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 37949e9a890SChuck Lever 38049e9a890SChuck Lever static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 3811da177e4SLinus Torvalds { 382bd79bc57STrond Myklebust if (xprt->snd_task != task) 383bd79bc57STrond Myklebust return; 384b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 38549e9a890SChuck Lever xprt->ops->release_xprt(xprt, task); 386b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 3871da177e4SLinus Torvalds } 3881da177e4SLinus Torvalds 3891da177e4SLinus Torvalds /* 3901da177e4SLinus Torvalds * Van Jacobson congestion avoidance. Check if the congestion window 3911da177e4SLinus Torvalds * overflowed. Put the task to sleep if this is the case. 3921da177e4SLinus Torvalds */ 3931da177e4SLinus Torvalds static int 39475891f50STrond Myklebust __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 3951da177e4SLinus Torvalds { 3961da177e4SLinus Torvalds if (req->rq_cong) 3971da177e4SLinus Torvalds return 1; 39846121cf7SChuck Lever dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", 39975891f50STrond Myklebust req->rq_task->tk_pid, xprt->cong, xprt->cwnd); 40075891f50STrond Myklebust if (RPCXPRT_CONGESTED(xprt)) { 40175891f50STrond Myklebust xprt_set_congestion_window_wait(xprt); 4021da177e4SLinus Torvalds return 0; 40375891f50STrond Myklebust } 4041da177e4SLinus Torvalds req->rq_cong = 1; 4051da177e4SLinus Torvalds xprt->cong += RPC_CWNDSCALE; 4061da177e4SLinus Torvalds return 1; 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds 4091da177e4SLinus Torvalds /* 4101da177e4SLinus Torvalds * Adjust the congestion window, and wake up the next task 4111da177e4SLinus Torvalds * that has been sleeping due to congestion 4121da177e4SLinus Torvalds */ 4131da177e4SLinus Torvalds static void 4141da177e4SLinus Torvalds __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 4151da177e4SLinus Torvalds { 4161da177e4SLinus Torvalds if (!req->rq_cong) 4171da177e4SLinus Torvalds return; 4181da177e4SLinus Torvalds req->rq_cong = 0; 4191da177e4SLinus Torvalds xprt->cong -= RPC_CWNDSCALE; 42075891f50STrond Myklebust xprt_test_and_clear_congestion_window_wait(xprt); 42149e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds 42446c0ee8bSChuck Lever /** 42575891f50STrond Myklebust * xprt_request_get_cong - Request congestion control credits 42675891f50STrond Myklebust * @xprt: pointer to transport 42775891f50STrond Myklebust * @req: pointer to RPC request 42875891f50STrond Myklebust * 42975891f50STrond Myklebust * Useful for transports that require congestion control. 43075891f50STrond Myklebust */ 43175891f50STrond Myklebust bool 43275891f50STrond Myklebust xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 43375891f50STrond Myklebust { 43475891f50STrond Myklebust bool ret = false; 43575891f50STrond Myklebust 43675891f50STrond Myklebust if (req->rq_cong) 43775891f50STrond Myklebust return true; 438b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 43975891f50STrond Myklebust ret = __xprt_get_cong(xprt, req) != 0; 440b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 44175891f50STrond Myklebust return ret; 44275891f50STrond Myklebust } 44375891f50STrond Myklebust EXPORT_SYMBOL_GPL(xprt_request_get_cong); 44475891f50STrond Myklebust 44575891f50STrond Myklebust /** 446a58dd398SChuck Lever * xprt_release_rqst_cong - housekeeping when request is complete 447a58dd398SChuck Lever * @task: RPC request that recently completed 448a58dd398SChuck Lever * 449a58dd398SChuck Lever * Useful for transports that require congestion control. 450a58dd398SChuck Lever */ 451a58dd398SChuck Lever void xprt_release_rqst_cong(struct rpc_task *task) 452a58dd398SChuck Lever { 453a4f0835cSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 454a4f0835cSTrond Myklebust 455a4f0835cSTrond Myklebust __xprt_put_cong(req->rq_xprt, req); 456a58dd398SChuck Lever } 45712444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 458a58dd398SChuck Lever 4598593e010SChuck Lever static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) 4608593e010SChuck Lever { 4618593e010SChuck Lever if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) 4628593e010SChuck Lever __xprt_lock_write_next_cong(xprt); 4638593e010SChuck Lever } 4648593e010SChuck Lever 46575891f50STrond Myklebust /* 46675891f50STrond Myklebust * Clear the congestion window wait flag and wake up the next 46775891f50STrond Myklebust * entry on xprt->sending 46875891f50STrond Myklebust */ 46975891f50STrond Myklebust static void 47075891f50STrond Myklebust xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) 47175891f50STrond Myklebust { 47275891f50STrond Myklebust if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { 473b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 47475891f50STrond Myklebust __xprt_lock_write_next_cong(xprt); 475b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 47675891f50STrond Myklebust } 47775891f50STrond Myklebust } 47875891f50STrond Myklebust 479a58dd398SChuck Lever /** 48046c0ee8bSChuck Lever * xprt_adjust_cwnd - adjust transport congestion window 4816a24dfb6STrond Myklebust * @xprt: pointer to xprt 48246c0ee8bSChuck Lever * @task: recently completed RPC request used to adjust window 48346c0ee8bSChuck Lever * @result: result code of completed RPC request 48446c0ee8bSChuck Lever * 4854f4cf5adSChuck Lever * The transport code maintains an estimate on the maximum number of out- 4864f4cf5adSChuck Lever * standing RPC requests, using a smoothed version of the congestion 4874f4cf5adSChuck Lever * avoidance implemented in 44BSD. This is basically the Van Jacobson 4884f4cf5adSChuck Lever * congestion algorithm: If a retransmit occurs, the congestion window is 4894f4cf5adSChuck Lever * halved; otherwise, it is incremented by 1/cwnd when 4904f4cf5adSChuck Lever * 4914f4cf5adSChuck Lever * - a reply is received and 4924f4cf5adSChuck Lever * - a full number of requests are outstanding and 4934f4cf5adSChuck Lever * - the congestion window hasn't been updated recently. 4941da177e4SLinus Torvalds */ 4956a24dfb6STrond Myklebust void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 4961da177e4SLinus Torvalds { 49746c0ee8bSChuck Lever struct rpc_rqst *req = task->tk_rqstp; 49846c0ee8bSChuck Lever unsigned long cwnd = xprt->cwnd; 4991da177e4SLinus Torvalds 5001da177e4SLinus Torvalds if (result >= 0 && cwnd <= xprt->cong) { 5011da177e4SLinus Torvalds /* The (cwnd >> 1) term makes sure 5021da177e4SLinus Torvalds * the result gets rounded properly. */ 5031da177e4SLinus Torvalds cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 5041da177e4SLinus Torvalds if (cwnd > RPC_MAXCWND(xprt)) 5051da177e4SLinus Torvalds cwnd = RPC_MAXCWND(xprt); 50649e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 5071da177e4SLinus Torvalds } else if (result == -ETIMEDOUT) { 5081da177e4SLinus Torvalds cwnd >>= 1; 5091da177e4SLinus Torvalds if (cwnd < RPC_CWNDSCALE) 5101da177e4SLinus Torvalds cwnd = RPC_CWNDSCALE; 5111da177e4SLinus Torvalds } 5121da177e4SLinus Torvalds dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 5131da177e4SLinus Torvalds xprt->cong, xprt->cwnd, cwnd); 5141da177e4SLinus Torvalds xprt->cwnd = cwnd; 51546c0ee8bSChuck Lever __xprt_put_cong(xprt, req); 5161da177e4SLinus Torvalds } 51712444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 5181da177e4SLinus Torvalds 51944fbac22SChuck Lever /** 52044fbac22SChuck Lever * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 52144fbac22SChuck Lever * @xprt: transport with waiting tasks 52244fbac22SChuck Lever * @status: result code to plant in each task before waking it 52344fbac22SChuck Lever * 52444fbac22SChuck Lever */ 52544fbac22SChuck Lever void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 52644fbac22SChuck Lever { 52744fbac22SChuck Lever if (status < 0) 52844fbac22SChuck Lever rpc_wake_up_status(&xprt->pending, status); 52944fbac22SChuck Lever else 53044fbac22SChuck Lever rpc_wake_up(&xprt->pending); 53144fbac22SChuck Lever } 53212444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 53344fbac22SChuck Lever 534c7b2cae8SChuck Lever /** 535c7b2cae8SChuck Lever * xprt_wait_for_buffer_space - wait for transport output buffer to clear 536c544577dSTrond Myklebust * @xprt: transport 537a9a6b52eSTrond Myklebust * 538a9a6b52eSTrond Myklebust * Note that we only set the timer for the case of RPC_IS_SOFT(), since 539a9a6b52eSTrond Myklebust * we don't in general want to force a socket disconnection due to 540a9a6b52eSTrond Myklebust * an incomplete RPC call transmission. 541c7b2cae8SChuck Lever */ 542c544577dSTrond Myklebust void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) 543c7b2cae8SChuck Lever { 544c544577dSTrond Myklebust set_bit(XPRT_WRITE_SPACE, &xprt->state); 545c7b2cae8SChuck Lever } 54612444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 547c7b2cae8SChuck Lever 548c544577dSTrond Myklebust static bool 549c544577dSTrond Myklebust xprt_clear_write_space_locked(struct rpc_xprt *xprt) 550c544577dSTrond Myklebust { 551c544577dSTrond Myklebust if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { 552c544577dSTrond Myklebust __xprt_lock_write_next(xprt); 553c544577dSTrond Myklebust dprintk("RPC: write space: waking waiting task on " 554c544577dSTrond Myklebust "xprt %p\n", xprt); 555c544577dSTrond Myklebust return true; 556c544577dSTrond Myklebust } 557c544577dSTrond Myklebust return false; 558c544577dSTrond Myklebust } 559c544577dSTrond Myklebust 560c7b2cae8SChuck Lever /** 561c7b2cae8SChuck Lever * xprt_write_space - wake the task waiting for transport output buffer space 562c7b2cae8SChuck Lever * @xprt: transport with waiting tasks 563c7b2cae8SChuck Lever * 564c7b2cae8SChuck Lever * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 565c7b2cae8SChuck Lever */ 566c544577dSTrond Myklebust bool xprt_write_space(struct rpc_xprt *xprt) 567c7b2cae8SChuck Lever { 568c544577dSTrond Myklebust bool ret; 569c544577dSTrond Myklebust 570c544577dSTrond Myklebust if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) 571c544577dSTrond Myklebust return false; 572b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 573c544577dSTrond Myklebust ret = xprt_clear_write_space_locked(xprt); 574b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 575c544577dSTrond Myklebust return ret; 576c7b2cae8SChuck Lever } 57712444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_write_space); 578c7b2cae8SChuck Lever 579da953063STrond Myklebust static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime) 580da953063STrond Myklebust { 581da953063STrond Myklebust s64 delta = ktime_to_ns(ktime_get() - abstime); 582da953063STrond Myklebust return likely(delta >= 0) ? 583da953063STrond Myklebust jiffies - nsecs_to_jiffies(delta) : 584da953063STrond Myklebust jiffies + nsecs_to_jiffies(-delta); 585da953063STrond Myklebust } 586da953063STrond Myklebust 587da953063STrond Myklebust static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req) 5881da177e4SLinus Torvalds { 589ba7392bbSTrond Myklebust const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 590da953063STrond Myklebust unsigned long majortimeo = req->rq_timeout; 5911da177e4SLinus Torvalds 5921da177e4SLinus Torvalds if (to->to_exponential) 593da953063STrond Myklebust majortimeo <<= to->to_retries; 5941da177e4SLinus Torvalds else 595da953063STrond Myklebust majortimeo += to->to_increment * to->to_retries; 596da953063STrond Myklebust if (majortimeo > to->to_maxval || majortimeo == 0) 597da953063STrond Myklebust majortimeo = to->to_maxval; 598da953063STrond Myklebust return majortimeo; 599da953063STrond Myklebust } 600da953063STrond Myklebust 601da953063STrond Myklebust static void xprt_reset_majortimeo(struct rpc_rqst *req) 602da953063STrond Myklebust { 603da953063STrond Myklebust req->rq_majortimeo += xprt_calc_majortimeo(req); 604da953063STrond Myklebust } 605da953063STrond Myklebust 606da953063STrond Myklebust static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) 607da953063STrond Myklebust { 608da953063STrond Myklebust unsigned long time_init; 609da953063STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 610da953063STrond Myklebust 611da953063STrond Myklebust if (likely(xprt && xprt_connected(xprt))) 612da953063STrond Myklebust time_init = jiffies; 613da953063STrond Myklebust else 614da953063STrond Myklebust time_init = xprt_abs_ktime_to_jiffies(task->tk_start); 615da953063STrond Myklebust req->rq_timeout = task->tk_client->cl_timeout->to_initval; 616da953063STrond Myklebust req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); 6171da177e4SLinus Torvalds } 6181da177e4SLinus Torvalds 6199903cd1cSChuck Lever /** 6209903cd1cSChuck Lever * xprt_adjust_timeout - adjust timeout values for next retransmit 6219903cd1cSChuck Lever * @req: RPC request containing parameters to use for the adjustment 6229903cd1cSChuck Lever * 6231da177e4SLinus Torvalds */ 6241da177e4SLinus Torvalds int xprt_adjust_timeout(struct rpc_rqst *req) 6251da177e4SLinus Torvalds { 6261da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 627ba7392bbSTrond Myklebust const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 6281da177e4SLinus Torvalds int status = 0; 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds if (time_before(jiffies, req->rq_majortimeo)) { 6311da177e4SLinus Torvalds if (to->to_exponential) 6321da177e4SLinus Torvalds req->rq_timeout <<= 1; 6331da177e4SLinus Torvalds else 6341da177e4SLinus Torvalds req->rq_timeout += to->to_increment; 6351da177e4SLinus Torvalds if (to->to_maxval && req->rq_timeout >= to->to_maxval) 6361da177e4SLinus Torvalds req->rq_timeout = to->to_maxval; 6371da177e4SLinus Torvalds req->rq_retries++; 6381da177e4SLinus Torvalds } else { 6391da177e4SLinus Torvalds req->rq_timeout = to->to_initval; 6401da177e4SLinus Torvalds req->rq_retries = 0; 6411da177e4SLinus Torvalds xprt_reset_majortimeo(req); 6421da177e4SLinus Torvalds /* Reset the RTT counters == "slow start" */ 643b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 6441da177e4SLinus Torvalds rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 645b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 6461da177e4SLinus Torvalds status = -ETIMEDOUT; 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds if (req->rq_timeout == 0) { 6501da177e4SLinus Torvalds printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 6511da177e4SLinus Torvalds req->rq_timeout = 5 * HZ; 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds return status; 6541da177e4SLinus Torvalds } 6551da177e4SLinus Torvalds 65665f27f38SDavid Howells static void xprt_autoclose(struct work_struct *work) 6571da177e4SLinus Torvalds { 65865f27f38SDavid Howells struct rpc_xprt *xprt = 65965f27f38SDavid Howells container_of(work, struct rpc_xprt, task_cleanup); 660a1231fdaSTrond Myklebust unsigned int pflags = memalloc_nofs_save(); 6611da177e4SLinus Torvalds 66266af1e55STrond Myklebust clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 6634876cc77STrond Myklebust xprt->ops->close(xprt); 6641da177e4SLinus Torvalds xprt_release_write(xprt, NULL); 66579234c3dSTrond Myklebust wake_up_bit(&xprt->state, XPRT_LOCKED); 666a1231fdaSTrond Myklebust memalloc_nofs_restore(pflags); 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds 6699903cd1cSChuck Lever /** 67062da3b24STrond Myklebust * xprt_disconnect_done - mark a transport as disconnected 6719903cd1cSChuck Lever * @xprt: transport to flag for disconnect 6729903cd1cSChuck Lever * 6731da177e4SLinus Torvalds */ 67462da3b24STrond Myklebust void xprt_disconnect_done(struct rpc_xprt *xprt) 6751da177e4SLinus Torvalds { 6761da177e4SLinus Torvalds dprintk("RPC: disconnected transport %p\n", xprt); 677b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 6781da177e4SLinus Torvalds xprt_clear_connected(xprt); 679c544577dSTrond Myklebust xprt_clear_write_space_locked(xprt); 6808593e010SChuck Lever xprt_clear_congestion_window_wait_locked(xprt); 68127adc785STrond Myklebust xprt_wake_pending_tasks(xprt, -ENOTCONN); 682b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 6831da177e4SLinus Torvalds } 68462da3b24STrond Myklebust EXPORT_SYMBOL_GPL(xprt_disconnect_done); 6851da177e4SLinus Torvalds 68666af1e55STrond Myklebust /** 68766af1e55STrond Myklebust * xprt_force_disconnect - force a transport to disconnect 68866af1e55STrond Myklebust * @xprt: transport to disconnect 68966af1e55STrond Myklebust * 69066af1e55STrond Myklebust */ 69166af1e55STrond Myklebust void xprt_force_disconnect(struct rpc_xprt *xprt) 69266af1e55STrond Myklebust { 69366af1e55STrond Myklebust /* Don't race with the test_bit() in xprt_clear_locked() */ 694b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 69566af1e55STrond Myklebust set_bit(XPRT_CLOSE_WAIT, &xprt->state); 69666af1e55STrond Myklebust /* Try to schedule an autoclose RPC call */ 69766af1e55STrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 69840a5f1b1STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 6990445f92cSTrond Myklebust else if (xprt->snd_task) 7000445f92cSTrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->pending, 7010445f92cSTrond Myklebust xprt->snd_task, -ENOTCONN); 702b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 70366af1e55STrond Myklebust } 704e2a4f4fbSChuck Lever EXPORT_SYMBOL_GPL(xprt_force_disconnect); 70566af1e55STrond Myklebust 7067f3a1d1eSTrond Myklebust static unsigned int 7077f3a1d1eSTrond Myklebust xprt_connect_cookie(struct rpc_xprt *xprt) 7087f3a1d1eSTrond Myklebust { 7097f3a1d1eSTrond Myklebust return READ_ONCE(xprt->connect_cookie); 7107f3a1d1eSTrond Myklebust } 7117f3a1d1eSTrond Myklebust 7127f3a1d1eSTrond Myklebust static bool 7137f3a1d1eSTrond Myklebust xprt_request_retransmit_after_disconnect(struct rpc_task *task) 7147f3a1d1eSTrond Myklebust { 7157f3a1d1eSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 7167f3a1d1eSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 7177f3a1d1eSTrond Myklebust 7187f3a1d1eSTrond Myklebust return req->rq_connect_cookie != xprt_connect_cookie(xprt) || 7197f3a1d1eSTrond Myklebust !xprt_connected(xprt); 7207f3a1d1eSTrond Myklebust } 7217f3a1d1eSTrond Myklebust 7227c1d71cfSTrond Myklebust /** 7237c1d71cfSTrond Myklebust * xprt_conditional_disconnect - force a transport to disconnect 7247c1d71cfSTrond Myklebust * @xprt: transport to disconnect 7257c1d71cfSTrond Myklebust * @cookie: 'connection cookie' 7267c1d71cfSTrond Myklebust * 7277c1d71cfSTrond Myklebust * This attempts to break the connection if and only if 'cookie' matches 7287c1d71cfSTrond Myklebust * the current transport 'connection cookie'. It ensures that we don't 7297c1d71cfSTrond Myklebust * try to break the connection more than once when we need to retransmit 7307c1d71cfSTrond Myklebust * a batch of RPC requests. 7317c1d71cfSTrond Myklebust * 7327c1d71cfSTrond Myklebust */ 7337c1d71cfSTrond Myklebust void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 7347c1d71cfSTrond Myklebust { 7357c1d71cfSTrond Myklebust /* Don't race with the test_bit() in xprt_clear_locked() */ 736b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 7377c1d71cfSTrond Myklebust if (cookie != xprt->connect_cookie) 7387c1d71cfSTrond Myklebust goto out; 7392c2ee6d2SNeilBrown if (test_bit(XPRT_CLOSING, &xprt->state)) 7407c1d71cfSTrond Myklebust goto out; 7417c1d71cfSTrond Myklebust set_bit(XPRT_CLOSE_WAIT, &xprt->state); 7427c1d71cfSTrond Myklebust /* Try to schedule an autoclose RPC call */ 7437c1d71cfSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 74440a5f1b1STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 7452a491991STrond Myklebust xprt_wake_pending_tasks(xprt, -EAGAIN); 7467c1d71cfSTrond Myklebust out: 747b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 7487c1d71cfSTrond Myklebust } 7497c1d71cfSTrond Myklebust 750ad3331acSTrond Myklebust static bool 751ad3331acSTrond Myklebust xprt_has_timer(const struct rpc_xprt *xprt) 752ad3331acSTrond Myklebust { 753ad3331acSTrond Myklebust return xprt->idle_timeout != 0; 754ad3331acSTrond Myklebust } 755ad3331acSTrond Myklebust 756ad3331acSTrond Myklebust static void 757ad3331acSTrond Myklebust xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 758ad3331acSTrond Myklebust __must_hold(&xprt->transport_lock) 759ad3331acSTrond Myklebust { 76080d3c45fSDave Wysochanski xprt->last_used = jiffies; 76195f7691dSTrond Myklebust if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) 762ad3331acSTrond Myklebust mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 763ad3331acSTrond Myklebust } 764ad3331acSTrond Myklebust 7651da177e4SLinus Torvalds static void 766ff861c4dSKees Cook xprt_init_autodisconnect(struct timer_list *t) 7671da177e4SLinus Torvalds { 768ff861c4dSKees Cook struct rpc_xprt *xprt = from_timer(xprt, t, timer); 7691da177e4SLinus Torvalds 77095f7691dSTrond Myklebust if (!RB_EMPTY_ROOT(&xprt->recv_queue)) 771b5e92419STrond Myklebust return; 772ad3331acSTrond Myklebust /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 773ad3331acSTrond Myklebust xprt->last_used = jiffies; 7742226feb6SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 7751da177e4SLinus Torvalds return; 776b5e92419STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 7771da177e4SLinus Torvalds } 7781da177e4SLinus Torvalds 779718ba5b8STrond Myklebust bool xprt_lock_connect(struct rpc_xprt *xprt, 780718ba5b8STrond Myklebust struct rpc_task *task, 781718ba5b8STrond Myklebust void *cookie) 782718ba5b8STrond Myklebust { 783718ba5b8STrond Myklebust bool ret = false; 784718ba5b8STrond Myklebust 785b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 786718ba5b8STrond Myklebust if (!test_bit(XPRT_LOCKED, &xprt->state)) 787718ba5b8STrond Myklebust goto out; 788718ba5b8STrond Myklebust if (xprt->snd_task != task) 789718ba5b8STrond Myklebust goto out; 790718ba5b8STrond Myklebust xprt->snd_task = cookie; 791718ba5b8STrond Myklebust ret = true; 792718ba5b8STrond Myklebust out: 793b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 794718ba5b8STrond Myklebust return ret; 795718ba5b8STrond Myklebust } 796718ba5b8STrond Myklebust 797718ba5b8STrond Myklebust void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 798718ba5b8STrond Myklebust { 799b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 800718ba5b8STrond Myklebust if (xprt->snd_task != cookie) 801718ba5b8STrond Myklebust goto out; 802718ba5b8STrond Myklebust if (!test_bit(XPRT_LOCKED, &xprt->state)) 803718ba5b8STrond Myklebust goto out; 804718ba5b8STrond Myklebust xprt->snd_task =NULL; 805718ba5b8STrond Myklebust xprt->ops->release_xprt(xprt, NULL); 806ad3331acSTrond Myklebust xprt_schedule_autodisconnect(xprt); 807718ba5b8STrond Myklebust out: 808b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 80979234c3dSTrond Myklebust wake_up_bit(&xprt->state, XPRT_LOCKED); 810718ba5b8STrond Myklebust } 811718ba5b8STrond Myklebust 8129903cd1cSChuck Lever /** 8139903cd1cSChuck Lever * xprt_connect - schedule a transport connect operation 8149903cd1cSChuck Lever * @task: RPC task that is requesting the connect 8151da177e4SLinus Torvalds * 8161da177e4SLinus Torvalds */ 8171da177e4SLinus Torvalds void xprt_connect(struct rpc_task *task) 8181da177e4SLinus Torvalds { 819ad2368d6STrond Myklebust struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 8201da177e4SLinus Torvalds 82146121cf7SChuck Lever dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 8221da177e4SLinus Torvalds xprt, (xprt_connected(xprt) ? "is" : "is not")); 8231da177e4SLinus Torvalds 824ec739ef0SChuck Lever if (!xprt_bound(xprt)) { 82501d37c42STrond Myklebust task->tk_status = -EAGAIN; 8261da177e4SLinus Torvalds return; 8271da177e4SLinus Torvalds } 8281da177e4SLinus Torvalds if (!xprt_lock_write(xprt, task)) 8291da177e4SLinus Torvalds return; 830feb8ca37STrond Myklebust 831feb8ca37STrond Myklebust if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) 832feb8ca37STrond Myklebust xprt->ops->close(xprt); 833feb8ca37STrond Myklebust 834718ba5b8STrond Myklebust if (!xprt_connected(xprt)) { 8352c2ee6d2SNeilBrown task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 8366b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->pending, task, NULL, 8379e910bffSTrond Myklebust xprt_request_timeout(task->tk_rqstp)); 8380b9e7943STrond Myklebust 8390b9e7943STrond Myklebust if (test_bit(XPRT_CLOSING, &xprt->state)) 8400b9e7943STrond Myklebust return; 8410b9e7943STrond Myklebust if (xprt_test_and_set_connecting(xprt)) 8420b9e7943STrond Myklebust return; 8430a9a4304STrond Myklebust /* Race breaker */ 8440a9a4304STrond Myklebust if (!xprt_connected(xprt)) { 845262ca07dSChuck Lever xprt->stat.connect_start = jiffies; 8461b092092STrond Myklebust xprt->ops->connect(xprt, task); 8470a9a4304STrond Myklebust } else { 8480a9a4304STrond Myklebust xprt_clear_connecting(xprt); 8490a9a4304STrond Myklebust task->tk_status = 0; 8500a9a4304STrond Myklebust rpc_wake_up_queued_task(&xprt->pending, task); 8510a9a4304STrond Myklebust } 8521da177e4SLinus Torvalds } 853718ba5b8STrond Myklebust xprt_release_write(xprt, task); 8541da177e4SLinus Torvalds } 8551da177e4SLinus Torvalds 856675dd90aSChuck Lever /** 857675dd90aSChuck Lever * xprt_reconnect_delay - compute the wait before scheduling a connect 858675dd90aSChuck Lever * @xprt: transport instance 859675dd90aSChuck Lever * 860675dd90aSChuck Lever */ 861675dd90aSChuck Lever unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) 862675dd90aSChuck Lever { 863675dd90aSChuck Lever unsigned long start, now = jiffies; 864675dd90aSChuck Lever 865675dd90aSChuck Lever start = xprt->stat.connect_start + xprt->reestablish_timeout; 866675dd90aSChuck Lever if (time_after(start, now)) 867675dd90aSChuck Lever return start - now; 868675dd90aSChuck Lever return 0; 869675dd90aSChuck Lever } 870675dd90aSChuck Lever EXPORT_SYMBOL_GPL(xprt_reconnect_delay); 871675dd90aSChuck Lever 872675dd90aSChuck Lever /** 873675dd90aSChuck Lever * xprt_reconnect_backoff - compute the new re-establish timeout 874675dd90aSChuck Lever * @xprt: transport instance 875675dd90aSChuck Lever * @init_to: initial reestablish timeout 876675dd90aSChuck Lever * 877675dd90aSChuck Lever */ 878675dd90aSChuck Lever void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) 879675dd90aSChuck Lever { 880675dd90aSChuck Lever xprt->reestablish_timeout <<= 1; 881675dd90aSChuck Lever if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) 882675dd90aSChuck Lever xprt->reestablish_timeout = xprt->max_reconnect_timeout; 883675dd90aSChuck Lever if (xprt->reestablish_timeout < init_to) 884675dd90aSChuck Lever xprt->reestablish_timeout = init_to; 885675dd90aSChuck Lever } 886675dd90aSChuck Lever EXPORT_SYMBOL_GPL(xprt_reconnect_backoff); 887675dd90aSChuck Lever 88895f7691dSTrond Myklebust enum xprt_xid_rb_cmp { 88995f7691dSTrond Myklebust XID_RB_EQUAL, 89095f7691dSTrond Myklebust XID_RB_LEFT, 89195f7691dSTrond Myklebust XID_RB_RIGHT, 89295f7691dSTrond Myklebust }; 89395f7691dSTrond Myklebust static enum xprt_xid_rb_cmp 89495f7691dSTrond Myklebust xprt_xid_cmp(__be32 xid1, __be32 xid2) 89595f7691dSTrond Myklebust { 89695f7691dSTrond Myklebust if (xid1 == xid2) 89795f7691dSTrond Myklebust return XID_RB_EQUAL; 89895f7691dSTrond Myklebust if ((__force u32)xid1 < (__force u32)xid2) 89995f7691dSTrond Myklebust return XID_RB_LEFT; 90095f7691dSTrond Myklebust return XID_RB_RIGHT; 90195f7691dSTrond Myklebust } 90295f7691dSTrond Myklebust 90395f7691dSTrond Myklebust static struct rpc_rqst * 90495f7691dSTrond Myklebust xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) 90595f7691dSTrond Myklebust { 90695f7691dSTrond Myklebust struct rb_node *n = xprt->recv_queue.rb_node; 90795f7691dSTrond Myklebust struct rpc_rqst *req; 90895f7691dSTrond Myklebust 90995f7691dSTrond Myklebust while (n != NULL) { 91095f7691dSTrond Myklebust req = rb_entry(n, struct rpc_rqst, rq_recv); 91195f7691dSTrond Myklebust switch (xprt_xid_cmp(xid, req->rq_xid)) { 91295f7691dSTrond Myklebust case XID_RB_LEFT: 91395f7691dSTrond Myklebust n = n->rb_left; 91495f7691dSTrond Myklebust break; 91595f7691dSTrond Myklebust case XID_RB_RIGHT: 91695f7691dSTrond Myklebust n = n->rb_right; 91795f7691dSTrond Myklebust break; 91895f7691dSTrond Myklebust case XID_RB_EQUAL: 91995f7691dSTrond Myklebust return req; 92095f7691dSTrond Myklebust } 92195f7691dSTrond Myklebust } 92295f7691dSTrond Myklebust return NULL; 92395f7691dSTrond Myklebust } 92495f7691dSTrond Myklebust 92595f7691dSTrond Myklebust static void 92695f7691dSTrond Myklebust xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) 92795f7691dSTrond Myklebust { 92895f7691dSTrond Myklebust struct rb_node **p = &xprt->recv_queue.rb_node; 92995f7691dSTrond Myklebust struct rb_node *n = NULL; 93095f7691dSTrond Myklebust struct rpc_rqst *req; 93195f7691dSTrond Myklebust 93295f7691dSTrond Myklebust while (*p != NULL) { 93395f7691dSTrond Myklebust n = *p; 93495f7691dSTrond Myklebust req = rb_entry(n, struct rpc_rqst, rq_recv); 93595f7691dSTrond Myklebust switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) { 93695f7691dSTrond Myklebust case XID_RB_LEFT: 93795f7691dSTrond Myklebust p = &n->rb_left; 93895f7691dSTrond Myklebust break; 93995f7691dSTrond Myklebust case XID_RB_RIGHT: 94095f7691dSTrond Myklebust p = &n->rb_right; 94195f7691dSTrond Myklebust break; 94295f7691dSTrond Myklebust case XID_RB_EQUAL: 94395f7691dSTrond Myklebust WARN_ON_ONCE(new != req); 94495f7691dSTrond Myklebust return; 94595f7691dSTrond Myklebust } 94695f7691dSTrond Myklebust } 94795f7691dSTrond Myklebust rb_link_node(&new->rq_recv, n, p); 94895f7691dSTrond Myklebust rb_insert_color(&new->rq_recv, &xprt->recv_queue); 94995f7691dSTrond Myklebust } 95095f7691dSTrond Myklebust 95195f7691dSTrond Myklebust static void 95295f7691dSTrond Myklebust xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) 95395f7691dSTrond Myklebust { 95495f7691dSTrond Myklebust rb_erase(&req->rq_recv, &xprt->recv_queue); 95595f7691dSTrond Myklebust } 95695f7691dSTrond Myklebust 9579903cd1cSChuck Lever /** 9589903cd1cSChuck Lever * xprt_lookup_rqst - find an RPC request corresponding to an XID 9599903cd1cSChuck Lever * @xprt: transport on which the original request was transmitted 9609903cd1cSChuck Lever * @xid: RPC XID of incoming reply 9619903cd1cSChuck Lever * 96275c84151STrond Myklebust * Caller holds xprt->queue_lock. 9631da177e4SLinus Torvalds */ 964d8ed029dSAlexey Dobriyan struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 9651da177e4SLinus Torvalds { 9668f3a6de3SPavel Emelyanov struct rpc_rqst *entry; 9671da177e4SLinus Torvalds 96895f7691dSTrond Myklebust entry = xprt_request_rb_find(xprt, xid); 96995f7691dSTrond Myklebust if (entry != NULL) { 9703705ad64SJeff Layton trace_xprt_lookup_rqst(xprt, xid, 0); 9710b87a46bSChuck Lever entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); 972262ca07dSChuck Lever return entry; 9733705ad64SJeff Layton } 97446121cf7SChuck Lever 97546121cf7SChuck Lever dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 97646121cf7SChuck Lever ntohl(xid)); 9773705ad64SJeff Layton trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 978262ca07dSChuck Lever xprt->stat.bad_xids++; 979262ca07dSChuck Lever return NULL; 9801da177e4SLinus Torvalds } 98112444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 9821da177e4SLinus Torvalds 983cf9946cdSTrond Myklebust static bool 984cf9946cdSTrond Myklebust xprt_is_pinned_rqst(struct rpc_rqst *req) 985cf9946cdSTrond Myklebust { 986cf9946cdSTrond Myklebust return atomic_read(&req->rq_pin) != 0; 987cf9946cdSTrond Myklebust } 988cf9946cdSTrond Myklebust 989729749bbSTrond Myklebust /** 990729749bbSTrond Myklebust * xprt_pin_rqst - Pin a request on the transport receive list 991729749bbSTrond Myklebust * @req: Request to pin 992729749bbSTrond Myklebust * 993729749bbSTrond Myklebust * Caller must ensure this is atomic with the call to xprt_lookup_rqst() 9941f7d1c73SChuck Lever * so should be holding xprt->queue_lock. 995729749bbSTrond Myklebust */ 996729749bbSTrond Myklebust void xprt_pin_rqst(struct rpc_rqst *req) 997729749bbSTrond Myklebust { 998cf9946cdSTrond Myklebust atomic_inc(&req->rq_pin); 999729749bbSTrond Myklebust } 10009590d083SChuck Lever EXPORT_SYMBOL_GPL(xprt_pin_rqst); 1001729749bbSTrond Myklebust 1002729749bbSTrond Myklebust /** 1003729749bbSTrond Myklebust * xprt_unpin_rqst - Unpin a request on the transport receive list 1004729749bbSTrond Myklebust * @req: Request to pin 1005729749bbSTrond Myklebust * 10061f7d1c73SChuck Lever * Caller should be holding xprt->queue_lock. 1007729749bbSTrond Myklebust */ 1008729749bbSTrond Myklebust void xprt_unpin_rqst(struct rpc_rqst *req) 1009729749bbSTrond Myklebust { 1010cf9946cdSTrond Myklebust if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) { 1011cf9946cdSTrond Myklebust atomic_dec(&req->rq_pin); 1012cf9946cdSTrond Myklebust return; 1013cf9946cdSTrond Myklebust } 1014cf9946cdSTrond Myklebust if (atomic_dec_and_test(&req->rq_pin)) 1015cf9946cdSTrond Myklebust wake_up_var(&req->rq_pin); 1016729749bbSTrond Myklebust } 10179590d083SChuck Lever EXPORT_SYMBOL_GPL(xprt_unpin_rqst); 1018729749bbSTrond Myklebust 1019729749bbSTrond Myklebust static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) 1020729749bbSTrond Myklebust { 1021cf9946cdSTrond Myklebust wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req)); 1022729749bbSTrond Myklebust } 1023729749bbSTrond Myklebust 1024edc81dcdSTrond Myklebust static bool 1025edc81dcdSTrond Myklebust xprt_request_data_received(struct rpc_task *task) 1026edc81dcdSTrond Myklebust { 1027edc81dcdSTrond Myklebust return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1028edc81dcdSTrond Myklebust READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; 1029edc81dcdSTrond Myklebust } 1030edc81dcdSTrond Myklebust 1031edc81dcdSTrond Myklebust static bool 1032edc81dcdSTrond Myklebust xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) 1033edc81dcdSTrond Myklebust { 1034edc81dcdSTrond Myklebust return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1035edc81dcdSTrond Myklebust READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; 1036edc81dcdSTrond Myklebust } 1037edc81dcdSTrond Myklebust 1038edc81dcdSTrond Myklebust /** 1039edc81dcdSTrond Myklebust * xprt_request_enqueue_receive - Add an request to the receive queue 1040edc81dcdSTrond Myklebust * @task: RPC task 1041edc81dcdSTrond Myklebust * 1042edc81dcdSTrond Myklebust */ 1043edc81dcdSTrond Myklebust void 1044edc81dcdSTrond Myklebust xprt_request_enqueue_receive(struct rpc_task *task) 1045edc81dcdSTrond Myklebust { 1046edc81dcdSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1047edc81dcdSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1048edc81dcdSTrond Myklebust 1049edc81dcdSTrond Myklebust if (!xprt_request_need_enqueue_receive(task, req)) 1050edc81dcdSTrond Myklebust return; 105175369089STrond Myklebust 105275369089STrond Myklebust xprt_request_prepare(task->tk_rqstp); 1053edc81dcdSTrond Myklebust spin_lock(&xprt->queue_lock); 1054edc81dcdSTrond Myklebust 1055edc81dcdSTrond Myklebust /* Update the softirq receive buffer */ 1056edc81dcdSTrond Myklebust memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1057edc81dcdSTrond Myklebust sizeof(req->rq_private_buf)); 1058edc81dcdSTrond Myklebust 1059edc81dcdSTrond Myklebust /* Add request to the receive list */ 106095f7691dSTrond Myklebust xprt_request_rb_insert(xprt, req); 1061edc81dcdSTrond Myklebust set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); 1062edc81dcdSTrond Myklebust spin_unlock(&xprt->queue_lock); 1063edc81dcdSTrond Myklebust 1064edc81dcdSTrond Myklebust /* Turn off autodisconnect */ 1065edc81dcdSTrond Myklebust del_singleshot_timer_sync(&xprt->timer); 1066edc81dcdSTrond Myklebust } 1067edc81dcdSTrond Myklebust 1068edc81dcdSTrond Myklebust /** 1069edc81dcdSTrond Myklebust * xprt_request_dequeue_receive_locked - Remove a request from the receive queue 1070edc81dcdSTrond Myklebust * @task: RPC task 1071edc81dcdSTrond Myklebust * 1072edc81dcdSTrond Myklebust * Caller must hold xprt->queue_lock. 1073edc81dcdSTrond Myklebust */ 1074edc81dcdSTrond Myklebust static void 1075edc81dcdSTrond Myklebust xprt_request_dequeue_receive_locked(struct rpc_task *task) 1076edc81dcdSTrond Myklebust { 107795f7691dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 107895f7691dSTrond Myklebust 1079edc81dcdSTrond Myklebust if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 108095f7691dSTrond Myklebust xprt_request_rb_remove(req->rq_xprt, req); 1081edc81dcdSTrond Myklebust } 1082edc81dcdSTrond Myklebust 1083ecd465eeSChuck Lever /** 1084ecd465eeSChuck Lever * xprt_update_rtt - Update RPC RTT statistics 1085ecd465eeSChuck Lever * @task: RPC request that recently completed 1086ecd465eeSChuck Lever * 108775c84151STrond Myklebust * Caller holds xprt->queue_lock. 1088ecd465eeSChuck Lever */ 1089ecd465eeSChuck Lever void xprt_update_rtt(struct rpc_task *task) 10901da177e4SLinus Torvalds { 10911570c1e4SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 10921570c1e4SChuck Lever struct rpc_rtt *rtt = task->tk_client->cl_rtt; 109395c96174SEric Dumazet unsigned int timer = task->tk_msg.rpc_proc->p_timer; 1094d60dbb20STrond Myklebust long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 10951570c1e4SChuck Lever 10961da177e4SLinus Torvalds if (timer) { 10971da177e4SLinus Torvalds if (req->rq_ntrans == 1) 1098ff839970SChuck Lever rpc_update_rtt(rtt, timer, m); 10991570c1e4SChuck Lever rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 11001da177e4SLinus Torvalds } 11011da177e4SLinus Torvalds } 1102ecd465eeSChuck Lever EXPORT_SYMBOL_GPL(xprt_update_rtt); 11031da177e4SLinus Torvalds 11041570c1e4SChuck Lever /** 11051570c1e4SChuck Lever * xprt_complete_rqst - called when reply processing is complete 11061570c1e4SChuck Lever * @task: RPC request that recently completed 11071570c1e4SChuck Lever * @copied: actual number of bytes received from the transport 11081570c1e4SChuck Lever * 110975c84151STrond Myklebust * Caller holds xprt->queue_lock. 11101570c1e4SChuck Lever */ 11111570c1e4SChuck Lever void xprt_complete_rqst(struct rpc_task *task, int copied) 11121570c1e4SChuck Lever { 11131570c1e4SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 1114fda13939STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 11151da177e4SLinus Torvalds 11161570c1e4SChuck Lever dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 11171570c1e4SChuck Lever task->tk_pid, ntohl(req->rq_xid), copied); 11183705ad64SJeff Layton trace_xprt_complete_rqst(xprt, req->rq_xid, copied); 11191da177e4SLinus Torvalds 1120fda13939STrond Myklebust xprt->stat.recvs++; 1121ef759a2eSChuck Lever 11221e799b67STrond Myklebust req->rq_private_buf.len = copied; 1123dd2b63d0SRicardo Labiaga /* Ensure all writes are done before we update */ 1124dd2b63d0SRicardo Labiaga /* req->rq_reply_bytes_recvd */ 112543ac3f29STrond Myklebust smp_wmb(); 1126dd2b63d0SRicardo Labiaga req->rq_reply_bytes_recvd = copied; 1127edc81dcdSTrond Myklebust xprt_request_dequeue_receive_locked(task); 1128fda13939STrond Myklebust rpc_wake_up_queued_task(&xprt->pending, task); 11291da177e4SLinus Torvalds } 113012444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_complete_rqst); 11311da177e4SLinus Torvalds 113246c0ee8bSChuck Lever static void xprt_timer(struct rpc_task *task) 11331da177e4SLinus Torvalds { 11341da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 11351da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 11361da177e4SLinus Torvalds 11375d00837bSTrond Myklebust if (task->tk_status != -ETIMEDOUT) 11385d00837bSTrond Myklebust return; 113946c0ee8bSChuck Lever 114082476d9fSChuck Lever trace_xprt_timer(xprt, req->rq_xid, task->tk_status); 1141dd2b63d0SRicardo Labiaga if (!req->rq_reply_bytes_recvd) { 114246c0ee8bSChuck Lever if (xprt->ops->timer) 11436a24dfb6STrond Myklebust xprt->ops->timer(xprt, task); 11445d00837bSTrond Myklebust } else 11455d00837bSTrond Myklebust task->tk_status = 0; 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds 11489903cd1cSChuck Lever /** 11498ba6a92dSTrond Myklebust * xprt_wait_for_reply_request_def - wait for reply 11508ba6a92dSTrond Myklebust * @task: pointer to rpc_task 11518ba6a92dSTrond Myklebust * 11528ba6a92dSTrond Myklebust * Set a request's retransmit timeout based on the transport's 11538ba6a92dSTrond Myklebust * default timeout parameters. Used by transports that don't adjust 11548ba6a92dSTrond Myklebust * the retransmit timeout based on round-trip time estimation, 11558ba6a92dSTrond Myklebust * and put the task to sleep on the pending queue. 11568ba6a92dSTrond Myklebust */ 11578ba6a92dSTrond Myklebust void xprt_wait_for_reply_request_def(struct rpc_task *task) 11588ba6a92dSTrond Myklebust { 11598ba6a92dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 11608ba6a92dSTrond Myklebust 11616b2e6856STrond Myklebust rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 11629e910bffSTrond Myklebust xprt_request_timeout(req)); 11638ba6a92dSTrond Myklebust } 11648ba6a92dSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def); 11658ba6a92dSTrond Myklebust 11668ba6a92dSTrond Myklebust /** 11678ba6a92dSTrond Myklebust * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator 11688ba6a92dSTrond Myklebust * @task: pointer to rpc_task 11698ba6a92dSTrond Myklebust * 11708ba6a92dSTrond Myklebust * Set a request's retransmit timeout using the RTT estimator, 11718ba6a92dSTrond Myklebust * and put the task to sleep on the pending queue. 11728ba6a92dSTrond Myklebust */ 11738ba6a92dSTrond Myklebust void xprt_wait_for_reply_request_rtt(struct rpc_task *task) 11748ba6a92dSTrond Myklebust { 11758ba6a92dSTrond Myklebust int timer = task->tk_msg.rpc_proc->p_timer; 11768ba6a92dSTrond Myklebust struct rpc_clnt *clnt = task->tk_client; 11778ba6a92dSTrond Myklebust struct rpc_rtt *rtt = clnt->cl_rtt; 11788ba6a92dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 11798ba6a92dSTrond Myklebust unsigned long max_timeout = clnt->cl_timeout->to_maxval; 11806b2e6856STrond Myklebust unsigned long timeout; 11818ba6a92dSTrond Myklebust 11826b2e6856STrond Myklebust timeout = rpc_calc_rto(rtt, timer); 11836b2e6856STrond Myklebust timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 11846b2e6856STrond Myklebust if (timeout > max_timeout || timeout == 0) 11856b2e6856STrond Myklebust timeout = max_timeout; 11866b2e6856STrond Myklebust rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 11876b2e6856STrond Myklebust jiffies + timeout); 11888ba6a92dSTrond Myklebust } 11898ba6a92dSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt); 11908ba6a92dSTrond Myklebust 11918ba6a92dSTrond Myklebust /** 11927f3a1d1eSTrond Myklebust * xprt_request_wait_receive - wait for the reply to an RPC request 11937f3a1d1eSTrond Myklebust * @task: RPC task about to send a request 11947f3a1d1eSTrond Myklebust * 11957f3a1d1eSTrond Myklebust */ 11967f3a1d1eSTrond Myklebust void xprt_request_wait_receive(struct rpc_task *task) 11977f3a1d1eSTrond Myklebust { 11987f3a1d1eSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 11997f3a1d1eSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 12007f3a1d1eSTrond Myklebust 12017f3a1d1eSTrond Myklebust if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 12027f3a1d1eSTrond Myklebust return; 12037f3a1d1eSTrond Myklebust /* 12047f3a1d1eSTrond Myklebust * Sleep on the pending queue if we're expecting a reply. 12057f3a1d1eSTrond Myklebust * The spinlock ensures atomicity between the test of 12067f3a1d1eSTrond Myklebust * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). 12077f3a1d1eSTrond Myklebust */ 12087f3a1d1eSTrond Myklebust spin_lock(&xprt->queue_lock); 12097f3a1d1eSTrond Myklebust if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { 12108ba6a92dSTrond Myklebust xprt->ops->wait_for_reply_request(task); 12117f3a1d1eSTrond Myklebust /* 12127f3a1d1eSTrond Myklebust * Send an extra queue wakeup call if the 12137f3a1d1eSTrond Myklebust * connection was dropped in case the call to 12147f3a1d1eSTrond Myklebust * rpc_sleep_on() raced. 12157f3a1d1eSTrond Myklebust */ 12167f3a1d1eSTrond Myklebust if (xprt_request_retransmit_after_disconnect(task)) 12177f3a1d1eSTrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->pending, 12187f3a1d1eSTrond Myklebust task, -ENOTCONN); 12197f3a1d1eSTrond Myklebust } 12207f3a1d1eSTrond Myklebust spin_unlock(&xprt->queue_lock); 12217f3a1d1eSTrond Myklebust } 12227f3a1d1eSTrond Myklebust 1223944b0429STrond Myklebust static bool 1224944b0429STrond Myklebust xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) 1225944b0429STrond Myklebust { 1226762e4e67STrond Myklebust return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1227944b0429STrond Myklebust } 1228944b0429STrond Myklebust 1229944b0429STrond Myklebust /** 1230944b0429STrond Myklebust * xprt_request_enqueue_transmit - queue a task for transmission 1231944b0429STrond Myklebust * @task: pointer to rpc_task 1232944b0429STrond Myklebust * 1233944b0429STrond Myklebust * Add a task to the transmission queue. 1234944b0429STrond Myklebust */ 1235944b0429STrond Myklebust void 1236944b0429STrond Myklebust xprt_request_enqueue_transmit(struct rpc_task *task) 1237944b0429STrond Myklebust { 1238918f3c1fSTrond Myklebust struct rpc_rqst *pos, *req = task->tk_rqstp; 1239944b0429STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1240944b0429STrond Myklebust 1241944b0429STrond Myklebust if (xprt_request_need_enqueue_transmit(task, req)) { 1242e66721f0STrond Myklebust req->rq_bytes_sent = 0; 1243944b0429STrond Myklebust spin_lock(&xprt->queue_lock); 124475891f50STrond Myklebust /* 124575891f50STrond Myklebust * Requests that carry congestion control credits are added 124675891f50STrond Myklebust * to the head of the list to avoid starvation issues. 124775891f50STrond Myklebust */ 124875891f50STrond Myklebust if (req->rq_cong) { 124975891f50STrond Myklebust xprt_clear_congestion_window_wait(xprt); 125075891f50STrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 125175891f50STrond Myklebust if (pos->rq_cong) 125275891f50STrond Myklebust continue; 125375891f50STrond Myklebust /* Note: req is added _before_ pos */ 125475891f50STrond Myklebust list_add_tail(&req->rq_xmit, &pos->rq_xmit); 125575891f50STrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 12560c77668dSChuck Lever trace_xprt_enq_xmit(task, 1); 125775891f50STrond Myklebust goto out; 125875891f50STrond Myklebust } 125986aeee0eSTrond Myklebust } else if (RPC_IS_SWAPPER(task)) { 126086aeee0eSTrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 126186aeee0eSTrond Myklebust if (pos->rq_cong || pos->rq_bytes_sent) 126286aeee0eSTrond Myklebust continue; 126386aeee0eSTrond Myklebust if (RPC_IS_SWAPPER(pos->rq_task)) 126486aeee0eSTrond Myklebust continue; 126586aeee0eSTrond Myklebust /* Note: req is added _before_ pos */ 126686aeee0eSTrond Myklebust list_add_tail(&req->rq_xmit, &pos->rq_xmit); 126786aeee0eSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 12680c77668dSChuck Lever trace_xprt_enq_xmit(task, 2); 126986aeee0eSTrond Myklebust goto out; 127086aeee0eSTrond Myklebust } 1271deaa5c96SChuck Lever } else if (!req->rq_seqno) { 1272918f3c1fSTrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1273918f3c1fSTrond Myklebust if (pos->rq_task->tk_owner != task->tk_owner) 1274918f3c1fSTrond Myklebust continue; 1275918f3c1fSTrond Myklebust list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); 1276918f3c1fSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit); 12770c77668dSChuck Lever trace_xprt_enq_xmit(task, 3); 1278918f3c1fSTrond Myklebust goto out; 1279918f3c1fSTrond Myklebust } 128075891f50STrond Myklebust } 1281944b0429STrond Myklebust list_add_tail(&req->rq_xmit, &xprt->xmit_queue); 1282918f3c1fSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 12830c77668dSChuck Lever trace_xprt_enq_xmit(task, 4); 1284918f3c1fSTrond Myklebust out: 1285944b0429STrond Myklebust set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1286944b0429STrond Myklebust spin_unlock(&xprt->queue_lock); 1287944b0429STrond Myklebust } 1288944b0429STrond Myklebust } 1289944b0429STrond Myklebust 1290944b0429STrond Myklebust /** 1291944b0429STrond Myklebust * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue 1292944b0429STrond Myklebust * @task: pointer to rpc_task 1293944b0429STrond Myklebust * 1294944b0429STrond Myklebust * Remove a task from the transmission queue 1295944b0429STrond Myklebust * Caller must hold xprt->queue_lock 1296944b0429STrond Myklebust */ 1297944b0429STrond Myklebust static void 1298944b0429STrond Myklebust xprt_request_dequeue_transmit_locked(struct rpc_task *task) 1299944b0429STrond Myklebust { 1300918f3c1fSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1301918f3c1fSTrond Myklebust 1302918f3c1fSTrond Myklebust if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1303918f3c1fSTrond Myklebust return; 1304918f3c1fSTrond Myklebust if (!list_empty(&req->rq_xmit)) { 1305918f3c1fSTrond Myklebust list_del(&req->rq_xmit); 1306918f3c1fSTrond Myklebust if (!list_empty(&req->rq_xmit2)) { 1307918f3c1fSTrond Myklebust struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, 1308918f3c1fSTrond Myklebust struct rpc_rqst, rq_xmit2); 1309918f3c1fSTrond Myklebust list_del(&req->rq_xmit2); 1310918f3c1fSTrond Myklebust list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue); 1311918f3c1fSTrond Myklebust } 1312918f3c1fSTrond Myklebust } else 1313918f3c1fSTrond Myklebust list_del(&req->rq_xmit2); 1314944b0429STrond Myklebust } 1315944b0429STrond Myklebust 1316944b0429STrond Myklebust /** 1317944b0429STrond Myklebust * xprt_request_dequeue_transmit - remove a task from the transmission queue 1318944b0429STrond Myklebust * @task: pointer to rpc_task 1319944b0429STrond Myklebust * 1320944b0429STrond Myklebust * Remove a task from the transmission queue 1321944b0429STrond Myklebust */ 1322944b0429STrond Myklebust static void 1323944b0429STrond Myklebust xprt_request_dequeue_transmit(struct rpc_task *task) 1324944b0429STrond Myklebust { 1325944b0429STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1326944b0429STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1327944b0429STrond Myklebust 1328944b0429STrond Myklebust spin_lock(&xprt->queue_lock); 1329944b0429STrond Myklebust xprt_request_dequeue_transmit_locked(task); 1330944b0429STrond Myklebust spin_unlock(&xprt->queue_lock); 1331944b0429STrond Myklebust } 1332944b0429STrond Myklebust 13337f3a1d1eSTrond Myklebust /** 1334cc204d01STrond Myklebust * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue 1335cc204d01STrond Myklebust * @task: pointer to rpc_task 1336cc204d01STrond Myklebust * 1337cc204d01STrond Myklebust * Remove a task from the transmit and receive queues, and ensure that 1338cc204d01STrond Myklebust * it is not pinned by the receive work item. 1339cc204d01STrond Myklebust */ 1340cc204d01STrond Myklebust void 1341cc204d01STrond Myklebust xprt_request_dequeue_xprt(struct rpc_task *task) 1342cc204d01STrond Myklebust { 1343cc204d01STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1344cc204d01STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1345cc204d01STrond Myklebust 1346cc204d01STrond Myklebust if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || 1347cc204d01STrond Myklebust test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || 1348cc204d01STrond Myklebust xprt_is_pinned_rqst(req)) { 1349cc204d01STrond Myklebust spin_lock(&xprt->queue_lock); 1350cc204d01STrond Myklebust xprt_request_dequeue_transmit_locked(task); 1351cc204d01STrond Myklebust xprt_request_dequeue_receive_locked(task); 1352cc204d01STrond Myklebust while (xprt_is_pinned_rqst(req)) { 1353cc204d01STrond Myklebust set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1354cc204d01STrond Myklebust spin_unlock(&xprt->queue_lock); 1355cc204d01STrond Myklebust xprt_wait_on_pinned_rqst(req); 1356cc204d01STrond Myklebust spin_lock(&xprt->queue_lock); 1357cc204d01STrond Myklebust clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1358cc204d01STrond Myklebust } 1359cc204d01STrond Myklebust spin_unlock(&xprt->queue_lock); 1360cc204d01STrond Myklebust } 1361cc204d01STrond Myklebust } 1362cc204d01STrond Myklebust 1363cc204d01STrond Myklebust /** 13649d96acbcSTrond Myklebust * xprt_request_prepare - prepare an encoded request for transport 13659d96acbcSTrond Myklebust * @req: pointer to rpc_rqst 13669d96acbcSTrond Myklebust * 13679d96acbcSTrond Myklebust * Calls into the transport layer to do whatever is needed to prepare 13689d96acbcSTrond Myklebust * the request for transmission or receive. 13699d96acbcSTrond Myklebust */ 13709d96acbcSTrond Myklebust void 13719d96acbcSTrond Myklebust xprt_request_prepare(struct rpc_rqst *req) 13729d96acbcSTrond Myklebust { 13739d96acbcSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 13749d96acbcSTrond Myklebust 13759d96acbcSTrond Myklebust if (xprt->ops->prepare_request) 13769d96acbcSTrond Myklebust xprt->ops->prepare_request(req); 13779d96acbcSTrond Myklebust } 13789d96acbcSTrond Myklebust 13799d96acbcSTrond Myklebust /** 1380762e4e67STrond Myklebust * xprt_request_need_retransmit - Test if a task needs retransmission 1381762e4e67STrond Myklebust * @task: pointer to rpc_task 1382762e4e67STrond Myklebust * 1383762e4e67STrond Myklebust * Test for whether a connection breakage requires the task to retransmit 1384762e4e67STrond Myklebust */ 1385762e4e67STrond Myklebust bool 1386762e4e67STrond Myklebust xprt_request_need_retransmit(struct rpc_task *task) 1387762e4e67STrond Myklebust { 1388762e4e67STrond Myklebust return xprt_request_retransmit_after_disconnect(task); 1389762e4e67STrond Myklebust } 1390762e4e67STrond Myklebust 1391762e4e67STrond Myklebust /** 13929903cd1cSChuck Lever * xprt_prepare_transmit - reserve the transport before sending a request 13939903cd1cSChuck Lever * @task: RPC task about to send a request 13949903cd1cSChuck Lever * 13951da177e4SLinus Torvalds */ 139690051ea7STrond Myklebust bool xprt_prepare_transmit(struct rpc_task *task) 13971da177e4SLinus Torvalds { 13981da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 13991da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 14001da177e4SLinus Torvalds 140146121cf7SChuck Lever dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 14021da177e4SLinus Torvalds 14035f2f6bd9STrond Myklebust if (!xprt_lock_write(xprt, task)) { 14045f2f6bd9STrond Myklebust /* Race breaker: someone may have transmitted us */ 1405944b0429STrond Myklebust if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 14065f2f6bd9STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->sending, 14075f2f6bd9STrond Myklebust task, 0); 14085f2f6bd9STrond Myklebust return false; 14095f2f6bd9STrond Myklebust 14108a19a0b6STrond Myklebust } 14115f2f6bd9STrond Myklebust return true; 14121da177e4SLinus Torvalds } 14131da177e4SLinus Torvalds 1414e0ab53deSTrond Myklebust void xprt_end_transmit(struct rpc_task *task) 14155e5ce5beSTrond Myklebust { 1416343952faSRahul Iyer xprt_release_write(task->tk_rqstp->rq_xprt, task); 14175e5ce5beSTrond Myklebust } 14185e5ce5beSTrond Myklebust 14199903cd1cSChuck Lever /** 142089f90fe1STrond Myklebust * xprt_request_transmit - send an RPC request on a transport 142189f90fe1STrond Myklebust * @req: pointer to request to transmit 142289f90fe1STrond Myklebust * @snd_task: RPC task that owns the transport lock 14239903cd1cSChuck Lever * 142489f90fe1STrond Myklebust * This performs the transmission of a single request. 142589f90fe1STrond Myklebust * Note that if the request is not the same as snd_task, then it 142689f90fe1STrond Myklebust * does need to be pinned. 142789f90fe1STrond Myklebust * Returns '0' on success. 14289903cd1cSChuck Lever */ 142989f90fe1STrond Myklebust static int 143089f90fe1STrond Myklebust xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) 14311da177e4SLinus Torvalds { 14321da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 143389f90fe1STrond Myklebust struct rpc_task *task = req->rq_task; 143490d91b0cSTrond Myklebust unsigned int connect_cookie; 1435dcbbeda8STrond Myklebust int is_retrans = RPC_WAS_SENT(task); 1436ff699ea8SChuck Lever int status; 14371da177e4SLinus Torvalds 1438edc81dcdSTrond Myklebust if (!req->rq_bytes_sent) { 143989f90fe1STrond Myklebust if (xprt_request_data_received(task)) { 144089f90fe1STrond Myklebust status = 0; 1441944b0429STrond Myklebust goto out_dequeue; 144289f90fe1STrond Myklebust } 14433021a5bbSTrond Myklebust /* Verify that our message lies in the RPCSEC_GSS window */ 1444edc81dcdSTrond Myklebust if (rpcauth_xmit_need_reencode(task)) { 144589f90fe1STrond Myklebust status = -EBADMSG; 1446944b0429STrond Myklebust goto out_dequeue; 14473021a5bbSTrond Myklebust } 1448a79f194aSTrond Myklebust if (task->tk_ops->rpc_call_prepare_transmit) { 1449a79f194aSTrond Myklebust task->tk_ops->rpc_call_prepare_transmit(task, 1450a79f194aSTrond Myklebust task->tk_calldata); 1451a79f194aSTrond Myklebust status = task->tk_status; 1452a79f194aSTrond Myklebust if (status < 0) 1453a79f194aSTrond Myklebust goto out_dequeue; 1454a79f194aSTrond Myklebust } 1455ae67bd38STrond Myklebust if (RPC_SIGNALLED(task)) { 1456ae67bd38STrond Myklebust status = -ERESTARTSYS; 1457ae67bd38STrond Myklebust goto out_dequeue; 1458ae67bd38STrond Myklebust } 14591da177e4SLinus Torvalds } 14601da177e4SLinus Torvalds 1461dcbbeda8STrond Myklebust /* 1462dcbbeda8STrond Myklebust * Update req->rq_ntrans before transmitting to avoid races with 1463dcbbeda8STrond Myklebust * xprt_update_rtt(), which needs to know that it is recording a 1464dcbbeda8STrond Myklebust * reply to the first transmission. 1465dcbbeda8STrond Myklebust */ 1466dcbbeda8STrond Myklebust req->rq_ntrans++; 1467dcbbeda8STrond Myklebust 146890d91b0cSTrond Myklebust connect_cookie = xprt->connect_cookie; 1469adfa7144STrond Myklebust status = xprt->ops->send_request(req); 1470c8485e4dSTrond Myklebust if (status != 0) { 1471dcbbeda8STrond Myklebust req->rq_ntrans--; 14720c77668dSChuck Lever trace_xprt_transmit(req, status); 147389f90fe1STrond Myklebust return status; 1474c8485e4dSTrond Myklebust } 14757ebbbc6eSTrond Myklebust 1476dcbbeda8STrond Myklebust if (is_retrans) 1477dcbbeda8STrond Myklebust task->tk_client->cl_stats->rpcretrans++; 1478dcbbeda8STrond Myklebust 14794a068258SChuck Lever xprt_inject_disconnect(xprt); 1480c8485e4dSTrond Myklebust 1481468f8613SBryan Schumaker task->tk_flags |= RPC_TASK_SENT; 1482b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 1483262ca07dSChuck Lever 1484262ca07dSChuck Lever xprt->stat.sends++; 1485262ca07dSChuck Lever xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 1486262ca07dSChuck Lever xprt->stat.bklog_u += xprt->backlog.qlen; 148715a45206SAndy Adamson xprt->stat.sending_u += xprt->sending.qlen; 148815a45206SAndy Adamson xprt->stat.pending_u += xprt->pending.qlen; 1489b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 149090d91b0cSTrond Myklebust 149190d91b0cSTrond Myklebust req->rq_connect_cookie = connect_cookie; 1492944b0429STrond Myklebust out_dequeue: 14930c77668dSChuck Lever trace_xprt_transmit(req, status); 1494944b0429STrond Myklebust xprt_request_dequeue_transmit(task); 149589f90fe1STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); 149689f90fe1STrond Myklebust return status; 149789f90fe1STrond Myklebust } 149889f90fe1STrond Myklebust 149989f90fe1STrond Myklebust /** 150089f90fe1STrond Myklebust * xprt_transmit - send an RPC request on a transport 150189f90fe1STrond Myklebust * @task: controlling RPC task 150289f90fe1STrond Myklebust * 150389f90fe1STrond Myklebust * Attempts to drain the transmit queue. On exit, either the transport 150489f90fe1STrond Myklebust * signalled an error that needs to be handled before transmission can 150589f90fe1STrond Myklebust * resume, or @task finished transmitting, and detected that it already 150689f90fe1STrond Myklebust * received a reply. 150789f90fe1STrond Myklebust */ 150889f90fe1STrond Myklebust void 150989f90fe1STrond Myklebust xprt_transmit(struct rpc_task *task) 151089f90fe1STrond Myklebust { 151189f90fe1STrond Myklebust struct rpc_rqst *next, *req = task->tk_rqstp; 151289f90fe1STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 151389f90fe1STrond Myklebust int status; 151489f90fe1STrond Myklebust 151589f90fe1STrond Myklebust spin_lock(&xprt->queue_lock); 151689f90fe1STrond Myklebust while (!list_empty(&xprt->xmit_queue)) { 151789f90fe1STrond Myklebust next = list_first_entry(&xprt->xmit_queue, 151889f90fe1STrond Myklebust struct rpc_rqst, rq_xmit); 151989f90fe1STrond Myklebust xprt_pin_rqst(next); 152089f90fe1STrond Myklebust spin_unlock(&xprt->queue_lock); 152189f90fe1STrond Myklebust status = xprt_request_transmit(next, task); 152289f90fe1STrond Myklebust if (status == -EBADMSG && next != req) 152389f90fe1STrond Myklebust status = 0; 152489f90fe1STrond Myklebust cond_resched(); 152589f90fe1STrond Myklebust spin_lock(&xprt->queue_lock); 152689f90fe1STrond Myklebust xprt_unpin_rqst(next); 152789f90fe1STrond Myklebust if (status == 0) { 152889f90fe1STrond Myklebust if (!xprt_request_data_received(task) || 152989f90fe1STrond Myklebust test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 153089f90fe1STrond Myklebust continue; 1531c544577dSTrond Myklebust } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 153289f90fe1STrond Myklebust task->tk_status = status; 153389f90fe1STrond Myklebust break; 153489f90fe1STrond Myklebust } 153589f90fe1STrond Myklebust spin_unlock(&xprt->queue_lock); 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds 1538ba60eb25STrond Myklebust static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1539ba60eb25STrond Myklebust { 1540ba60eb25STrond Myklebust set_bit(XPRT_CONGESTED, &xprt->state); 1541ba60eb25STrond Myklebust rpc_sleep_on(&xprt->backlog, task, NULL); 1542ba60eb25STrond Myklebust } 1543ba60eb25STrond Myklebust 1544ba60eb25STrond Myklebust static void xprt_wake_up_backlog(struct rpc_xprt *xprt) 1545ba60eb25STrond Myklebust { 1546ba60eb25STrond Myklebust if (rpc_wake_up_next(&xprt->backlog) == NULL) 1547ba60eb25STrond Myklebust clear_bit(XPRT_CONGESTED, &xprt->state); 1548ba60eb25STrond Myklebust } 1549ba60eb25STrond Myklebust 1550ba60eb25STrond Myklebust static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1551ba60eb25STrond Myklebust { 1552ba60eb25STrond Myklebust bool ret = false; 1553ba60eb25STrond Myklebust 1554ba60eb25STrond Myklebust if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1555ba60eb25STrond Myklebust goto out; 1556ba60eb25STrond Myklebust spin_lock(&xprt->reserve_lock); 1557ba60eb25STrond Myklebust if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1558ba60eb25STrond Myklebust rpc_sleep_on(&xprt->backlog, task, NULL); 1559ba60eb25STrond Myklebust ret = true; 1560ba60eb25STrond Myklebust } 1561ba60eb25STrond Myklebust spin_unlock(&xprt->reserve_lock); 1562ba60eb25STrond Myklebust out: 1563ba60eb25STrond Myklebust return ret; 1564ba60eb25STrond Myklebust } 1565ba60eb25STrond Myklebust 156692ea011fSTrond Myklebust static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) 1567d9ba131dSTrond Myklebust { 1568d9ba131dSTrond Myklebust struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1569d9ba131dSTrond Myklebust 1570ff699ea8SChuck Lever if (xprt->num_reqs >= xprt->max_reqs) 1571d9ba131dSTrond Myklebust goto out; 1572ff699ea8SChuck Lever ++xprt->num_reqs; 157392ea011fSTrond Myklebust spin_unlock(&xprt->reserve_lock); 157492ea011fSTrond Myklebust req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); 157592ea011fSTrond Myklebust spin_lock(&xprt->reserve_lock); 1576d9ba131dSTrond Myklebust if (req != NULL) 1577d9ba131dSTrond Myklebust goto out; 1578ff699ea8SChuck Lever --xprt->num_reqs; 1579d9ba131dSTrond Myklebust req = ERR_PTR(-ENOMEM); 1580d9ba131dSTrond Myklebust out: 1581d9ba131dSTrond Myklebust return req; 1582d9ba131dSTrond Myklebust } 1583d9ba131dSTrond Myklebust 1584d9ba131dSTrond Myklebust static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1585d9ba131dSTrond Myklebust { 1586ff699ea8SChuck Lever if (xprt->num_reqs > xprt->min_reqs) { 1587ff699ea8SChuck Lever --xprt->num_reqs; 1588d9ba131dSTrond Myklebust kfree(req); 1589d9ba131dSTrond Myklebust return true; 1590d9ba131dSTrond Myklebust } 1591d9ba131dSTrond Myklebust return false; 1592d9ba131dSTrond Myklebust } 1593d9ba131dSTrond Myklebust 1594f39c1bfbSTrond Myklebust void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 15951da177e4SLinus Torvalds { 1596d9ba131dSTrond Myklebust struct rpc_rqst *req; 15971da177e4SLinus Torvalds 1598f39c1bfbSTrond Myklebust spin_lock(&xprt->reserve_lock); 15991da177e4SLinus Torvalds if (!list_empty(&xprt->free)) { 1600d9ba131dSTrond Myklebust req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1601d9ba131dSTrond Myklebust list_del(&req->rq_list); 1602d9ba131dSTrond Myklebust goto out_init_req; 1603d9ba131dSTrond Myklebust } 160492ea011fSTrond Myklebust req = xprt_dynamic_alloc_slot(xprt); 1605d9ba131dSTrond Myklebust if (!IS_ERR(req)) 1606d9ba131dSTrond Myklebust goto out_init_req; 1607d9ba131dSTrond Myklebust switch (PTR_ERR(req)) { 1608d9ba131dSTrond Myklebust case -ENOMEM: 1609d9ba131dSTrond Myklebust dprintk("RPC: dynamic allocation of request slot " 1610d9ba131dSTrond Myklebust "failed! Retrying\n"); 16111afeaf5cSTrond Myklebust task->tk_status = -ENOMEM; 1612d9ba131dSTrond Myklebust break; 1613d9ba131dSTrond Myklebust case -EAGAIN: 1614ba60eb25STrond Myklebust xprt_add_backlog(xprt, task); 1615d9ba131dSTrond Myklebust dprintk("RPC: waiting for request slot\n"); 1616e9d47639SGustavo A. R. Silva /* fall through */ 16171afeaf5cSTrond Myklebust default: 1618d9ba131dSTrond Myklebust task->tk_status = -EAGAIN; 16191afeaf5cSTrond Myklebust } 1620f39c1bfbSTrond Myklebust spin_unlock(&xprt->reserve_lock); 1621d9ba131dSTrond Myklebust return; 1622d9ba131dSTrond Myklebust out_init_req: 1623ff699ea8SChuck Lever xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, 1624ff699ea8SChuck Lever xprt->num_reqs); 162537ac86c3SChuck Lever spin_unlock(&xprt->reserve_lock); 162637ac86c3SChuck Lever 1627d9ba131dSTrond Myklebust task->tk_status = 0; 16281da177e4SLinus Torvalds task->tk_rqstp = req; 16291da177e4SLinus Torvalds } 1630f39c1bfbSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1631f39c1bfbSTrond Myklebust 1632a9cde23aSChuck Lever void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1633ee5ebe85STrond Myklebust { 1634ee5ebe85STrond Myklebust spin_lock(&xprt->reserve_lock); 1635c25573b5STrond Myklebust if (!xprt_dynamic_free_slot(xprt, req)) { 1636c25573b5STrond Myklebust memset(req, 0, sizeof(*req)); /* mark unused */ 1637ee5ebe85STrond Myklebust list_add(&req->rq_list, &xprt->free); 1638c25573b5STrond Myklebust } 1639ba60eb25STrond Myklebust xprt_wake_up_backlog(xprt); 1640ee5ebe85STrond Myklebust spin_unlock(&xprt->reserve_lock); 1641ee5ebe85STrond Myklebust } 1642a9cde23aSChuck Lever EXPORT_SYMBOL_GPL(xprt_free_slot); 1643ee5ebe85STrond Myklebust 164421de0a95STrond Myklebust static void xprt_free_all_slots(struct rpc_xprt *xprt) 164521de0a95STrond Myklebust { 164621de0a95STrond Myklebust struct rpc_rqst *req; 164721de0a95STrond Myklebust while (!list_empty(&xprt->free)) { 164821de0a95STrond Myklebust req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 164921de0a95STrond Myklebust list_del(&req->rq_list); 165021de0a95STrond Myklebust kfree(req); 165121de0a95STrond Myklebust } 165221de0a95STrond Myklebust } 165321de0a95STrond Myklebust 1654d9ba131dSTrond Myklebust struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1655d9ba131dSTrond Myklebust unsigned int num_prealloc, 1656d9ba131dSTrond Myklebust unsigned int max_alloc) 1657bd1722d4SPavel Emelyanov { 1658bd1722d4SPavel Emelyanov struct rpc_xprt *xprt; 165921de0a95STrond Myklebust struct rpc_rqst *req; 166021de0a95STrond Myklebust int i; 1661bd1722d4SPavel Emelyanov 1662bd1722d4SPavel Emelyanov xprt = kzalloc(size, GFP_KERNEL); 1663bd1722d4SPavel Emelyanov if (xprt == NULL) 1664bd1722d4SPavel Emelyanov goto out; 1665bd1722d4SPavel Emelyanov 166621de0a95STrond Myklebust xprt_init(xprt, net); 166721de0a95STrond Myklebust 166821de0a95STrond Myklebust for (i = 0; i < num_prealloc; i++) { 166921de0a95STrond Myklebust req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 167021de0a95STrond Myklebust if (!req) 16718313164cSwangweidong goto out_free; 167221de0a95STrond Myklebust list_add(&req->rq_list, &xprt->free); 167321de0a95STrond Myklebust } 1674d9ba131dSTrond Myklebust if (max_alloc > num_prealloc) 1675d9ba131dSTrond Myklebust xprt->max_reqs = max_alloc; 1676d9ba131dSTrond Myklebust else 167721de0a95STrond Myklebust xprt->max_reqs = num_prealloc; 1678d9ba131dSTrond Myklebust xprt->min_reqs = num_prealloc; 1679ff699ea8SChuck Lever xprt->num_reqs = num_prealloc; 1680bd1722d4SPavel Emelyanov 1681bd1722d4SPavel Emelyanov return xprt; 1682bd1722d4SPavel Emelyanov 1683bd1722d4SPavel Emelyanov out_free: 168421de0a95STrond Myklebust xprt_free(xprt); 1685bd1722d4SPavel Emelyanov out: 1686bd1722d4SPavel Emelyanov return NULL; 1687bd1722d4SPavel Emelyanov } 1688bd1722d4SPavel Emelyanov EXPORT_SYMBOL_GPL(xprt_alloc); 1689bd1722d4SPavel Emelyanov 1690e204e621SPavel Emelyanov void xprt_free(struct rpc_xprt *xprt) 1691e204e621SPavel Emelyanov { 169237aa2133SPavel Emelyanov put_net(xprt->xprt_net); 169321de0a95STrond Myklebust xprt_free_all_slots(xprt); 1694fda1bfefSTrond Myklebust kfree_rcu(xprt, rcu); 1695e204e621SPavel Emelyanov } 1696e204e621SPavel Emelyanov EXPORT_SYMBOL_GPL(xprt_free); 1697e204e621SPavel Emelyanov 1698902c5887STrond Myklebust static void 1699902c5887STrond Myklebust xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) 1700902c5887STrond Myklebust { 1701902c5887STrond Myklebust req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; 1702902c5887STrond Myklebust } 1703902c5887STrond Myklebust 17049dc6edcfSTrond Myklebust static __be32 17059dc6edcfSTrond Myklebust xprt_alloc_xid(struct rpc_xprt *xprt) 17069dc6edcfSTrond Myklebust { 17079dc6edcfSTrond Myklebust __be32 xid; 17089dc6edcfSTrond Myklebust 17099dc6edcfSTrond Myklebust spin_lock(&xprt->reserve_lock); 17109dc6edcfSTrond Myklebust xid = (__force __be32)xprt->xid++; 17119dc6edcfSTrond Myklebust spin_unlock(&xprt->reserve_lock); 17129dc6edcfSTrond Myklebust return xid; 17139dc6edcfSTrond Myklebust } 17149dc6edcfSTrond Myklebust 17159dc6edcfSTrond Myklebust static void 17169dc6edcfSTrond Myklebust xprt_init_xid(struct rpc_xprt *xprt) 17179dc6edcfSTrond Myklebust { 17189dc6edcfSTrond Myklebust xprt->xid = prandom_u32(); 17199dc6edcfSTrond Myklebust } 17209dc6edcfSTrond Myklebust 17219dc6edcfSTrond Myklebust static void 17229dc6edcfSTrond Myklebust xprt_request_init(struct rpc_task *task) 17239dc6edcfSTrond Myklebust { 17249dc6edcfSTrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 17259dc6edcfSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 17269dc6edcfSTrond Myklebust 17279dc6edcfSTrond Myklebust req->rq_task = task; 17289dc6edcfSTrond Myklebust req->rq_xprt = xprt; 17299dc6edcfSTrond Myklebust req->rq_buffer = NULL; 17309dc6edcfSTrond Myklebust req->rq_xid = xprt_alloc_xid(xprt); 1731902c5887STrond Myklebust xprt_init_connect_cookie(req, xprt); 17329dc6edcfSTrond Myklebust req->rq_snd_buf.len = 0; 17339dc6edcfSTrond Myklebust req->rq_snd_buf.buflen = 0; 17349dc6edcfSTrond Myklebust req->rq_rcv_buf.len = 0; 17359dc6edcfSTrond Myklebust req->rq_rcv_buf.buflen = 0; 173671700bb9STrond Myklebust req->rq_snd_buf.bvec = NULL; 173771700bb9STrond Myklebust req->rq_rcv_buf.bvec = NULL; 17389dc6edcfSTrond Myklebust req->rq_release_snd_buf = NULL; 1739da953063STrond Myklebust xprt_init_majortimeo(task, req); 17409dc6edcfSTrond Myklebust dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, 17419dc6edcfSTrond Myklebust req, ntohl(req->rq_xid)); 17429dc6edcfSTrond Myklebust } 17439dc6edcfSTrond Myklebust 17449dc6edcfSTrond Myklebust static void 17459dc6edcfSTrond Myklebust xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) 17469dc6edcfSTrond Myklebust { 17479dc6edcfSTrond Myklebust xprt->ops->alloc_slot(xprt, task); 17489dc6edcfSTrond Myklebust if (task->tk_rqstp != NULL) 17499dc6edcfSTrond Myklebust xprt_request_init(task); 17509dc6edcfSTrond Myklebust } 17519dc6edcfSTrond Myklebust 17529903cd1cSChuck Lever /** 17539903cd1cSChuck Lever * xprt_reserve - allocate an RPC request slot 17549903cd1cSChuck Lever * @task: RPC task requesting a slot allocation 17559903cd1cSChuck Lever * 1756ba60eb25STrond Myklebust * If the transport is marked as being congested, or if no more 1757ba60eb25STrond Myklebust * slots are available, place the task on the transport's 17589903cd1cSChuck Lever * backlog queue. 17599903cd1cSChuck Lever */ 17609903cd1cSChuck Lever void xprt_reserve(struct rpc_task *task) 17611da177e4SLinus Torvalds { 1762fb43d172STrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 17631da177e4SLinus Torvalds 176443cedbf0STrond Myklebust task->tk_status = 0; 176543cedbf0STrond Myklebust if (task->tk_rqstp != NULL) 176643cedbf0STrond Myklebust return; 176743cedbf0STrond Myklebust 176843cedbf0STrond Myklebust task->tk_status = -EAGAIN; 1769ba60eb25STrond Myklebust if (!xprt_throttle_congested(xprt, task)) 17709dc6edcfSTrond Myklebust xprt_do_reserve(xprt, task); 1771ba60eb25STrond Myklebust } 1772ba60eb25STrond Myklebust 1773ba60eb25STrond Myklebust /** 1774ba60eb25STrond Myklebust * xprt_retry_reserve - allocate an RPC request slot 1775ba60eb25STrond Myklebust * @task: RPC task requesting a slot allocation 1776ba60eb25STrond Myklebust * 1777ba60eb25STrond Myklebust * If no more slots are available, place the task on the transport's 1778ba60eb25STrond Myklebust * backlog queue. 1779ba60eb25STrond Myklebust * Note that the only difference with xprt_reserve is that we now 1780ba60eb25STrond Myklebust * ignore the value of the XPRT_CONGESTED flag. 1781ba60eb25STrond Myklebust */ 1782ba60eb25STrond Myklebust void xprt_retry_reserve(struct rpc_task *task) 1783ba60eb25STrond Myklebust { 1784fb43d172STrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 1785ba60eb25STrond Myklebust 1786ba60eb25STrond Myklebust task->tk_status = 0; 1787ba60eb25STrond Myklebust if (task->tk_rqstp != NULL) 1788ba60eb25STrond Myklebust return; 1789ba60eb25STrond Myklebust 1790ba60eb25STrond Myklebust task->tk_status = -EAGAIN; 17919dc6edcfSTrond Myklebust xprt_do_reserve(xprt, task); 17921da177e4SLinus Torvalds } 17931da177e4SLinus Torvalds 17949903cd1cSChuck Lever /** 17959903cd1cSChuck Lever * xprt_release - release an RPC request slot 17969903cd1cSChuck Lever * @task: task which is finished with the slot 17979903cd1cSChuck Lever * 17981da177e4SLinus Torvalds */ 17999903cd1cSChuck Lever void xprt_release(struct rpc_task *task) 18001da177e4SLinus Torvalds { 180155ae1aabSRicardo Labiaga struct rpc_xprt *xprt; 180287ed5003STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 18031da177e4SLinus Torvalds 180487ed5003STrond Myklebust if (req == NULL) { 180587ed5003STrond Myklebust if (task->tk_client) { 1806fb43d172STrond Myklebust xprt = task->tk_xprt; 180787ed5003STrond Myklebust xprt_release_write(xprt, task); 180887ed5003STrond Myklebust } 18091da177e4SLinus Torvalds return; 181087ed5003STrond Myklebust } 181155ae1aabSRicardo Labiaga 181255ae1aabSRicardo Labiaga xprt = req->rq_xprt; 1813cc204d01STrond Myklebust xprt_request_dequeue_xprt(task); 1814b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 181549e9a890SChuck Lever xprt->ops->release_xprt(xprt, task); 1816a58dd398SChuck Lever if (xprt->ops->release_request) 1817a58dd398SChuck Lever xprt->ops->release_request(task); 1818ad3331acSTrond Myklebust xprt_schedule_autodisconnect(xprt); 1819b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 1820ee5ebe85STrond Myklebust if (req->rq_buffer) 18213435c74aSChuck Lever xprt->ops->buf_free(task); 18224a068258SChuck Lever xprt_inject_disconnect(xprt); 18239d96acbcSTrond Myklebust xdr_free_bvec(&req->rq_rcv_buf); 18240472e476STrond Myklebust xdr_free_bvec(&req->rq_snd_buf); 1825a17c2153STrond Myklebust if (req->rq_cred != NULL) 1826a17c2153STrond Myklebust put_rpccred(req->rq_cred); 18271da177e4SLinus Torvalds task->tk_rqstp = NULL; 1828ead5e1c2SJ. Bruce Fields if (req->rq_release_snd_buf) 1829ead5e1c2SJ. Bruce Fields req->rq_release_snd_buf(req); 183055ae1aabSRicardo Labiaga 183146121cf7SChuck Lever dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1832ee5ebe85STrond Myklebust if (likely(!bc_prealloc(req))) 1833a9cde23aSChuck Lever xprt->ops->free_slot(xprt, req); 1834ee5ebe85STrond Myklebust else 1835c9acb42eSTrond Myklebust xprt_free_bc_request(req); 18361da177e4SLinus Torvalds } 18371da177e4SLinus Torvalds 1838902c5887STrond Myklebust #ifdef CONFIG_SUNRPC_BACKCHANNEL 1839902c5887STrond Myklebust void 1840902c5887STrond Myklebust xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) 1841902c5887STrond Myklebust { 1842902c5887STrond Myklebust struct xdr_buf *xbufp = &req->rq_snd_buf; 1843902c5887STrond Myklebust 1844902c5887STrond Myklebust task->tk_rqstp = req; 1845902c5887STrond Myklebust req->rq_task = task; 1846902c5887STrond Myklebust xprt_init_connect_cookie(req, req->rq_xprt); 1847902c5887STrond Myklebust /* 1848902c5887STrond Myklebust * Set up the xdr_buf length. 1849902c5887STrond Myklebust * This also indicates that the buffer is XDR encoded already. 1850902c5887STrond Myklebust */ 1851902c5887STrond Myklebust xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 1852902c5887STrond Myklebust xbufp->tail[0].iov_len; 1853902c5887STrond Myklebust } 1854902c5887STrond Myklebust #endif 1855902c5887STrond Myklebust 185621de0a95STrond Myklebust static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1857c2866763SChuck Lever { 185830c5116bSTrond Myklebust kref_init(&xprt->kref); 1859c2866763SChuck Lever 1860c2866763SChuck Lever spin_lock_init(&xprt->transport_lock); 1861c2866763SChuck Lever spin_lock_init(&xprt->reserve_lock); 186275c84151STrond Myklebust spin_lock_init(&xprt->queue_lock); 1863c2866763SChuck Lever 1864c2866763SChuck Lever INIT_LIST_HEAD(&xprt->free); 186595f7691dSTrond Myklebust xprt->recv_queue = RB_ROOT; 1866944b0429STrond Myklebust INIT_LIST_HEAD(&xprt->xmit_queue); 18679e00abc3STrond Myklebust #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1868f9acac1aSRicardo Labiaga spin_lock_init(&xprt->bc_pa_lock); 1869f9acac1aSRicardo Labiaga INIT_LIST_HEAD(&xprt->bc_pa_list); 18709e00abc3STrond Myklebust #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 187180b14d5eSTrond Myklebust INIT_LIST_HEAD(&xprt->xprt_switch); 1872f9acac1aSRicardo Labiaga 1873c2866763SChuck Lever xprt->last_used = jiffies; 1874c2866763SChuck Lever xprt->cwnd = RPC_INITCWND; 1875a509050bSChuck Lever xprt->bind_index = 0; 1876c2866763SChuck Lever 1877c2866763SChuck Lever rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1878c2866763SChuck Lever rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 187979c99152STrond Myklebust rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 1880c2866763SChuck Lever rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1881c2866763SChuck Lever 1882c2866763SChuck Lever xprt_init_xid(xprt); 1883c2866763SChuck Lever 188421de0a95STrond Myklebust xprt->xprt_net = get_net(net); 18858d9266ffSTrond Myklebust } 18868d9266ffSTrond Myklebust 18878d9266ffSTrond Myklebust /** 18888d9266ffSTrond Myklebust * xprt_create_transport - create an RPC transport 18898d9266ffSTrond Myklebust * @args: rpc transport creation arguments 18908d9266ffSTrond Myklebust * 18918d9266ffSTrond Myklebust */ 18928d9266ffSTrond Myklebust struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 18938d9266ffSTrond Myklebust { 18948d9266ffSTrond Myklebust struct rpc_xprt *xprt; 18958d9266ffSTrond Myklebust struct xprt_class *t; 18968d9266ffSTrond Myklebust 18978d9266ffSTrond Myklebust spin_lock(&xprt_list_lock); 18988d9266ffSTrond Myklebust list_for_each_entry(t, &xprt_list, list) { 18998d9266ffSTrond Myklebust if (t->ident == args->ident) { 19008d9266ffSTrond Myklebust spin_unlock(&xprt_list_lock); 19018d9266ffSTrond Myklebust goto found; 19028d9266ffSTrond Myklebust } 19038d9266ffSTrond Myklebust } 19048d9266ffSTrond Myklebust spin_unlock(&xprt_list_lock); 19053c45ddf8SChuck Lever dprintk("RPC: transport (%d) not supported\n", args->ident); 19068d9266ffSTrond Myklebust return ERR_PTR(-EIO); 19078d9266ffSTrond Myklebust 19088d9266ffSTrond Myklebust found: 19098d9266ffSTrond Myklebust xprt = t->setup(args); 19108d9266ffSTrond Myklebust if (IS_ERR(xprt)) { 19118d9266ffSTrond Myklebust dprintk("RPC: xprt_create_transport: failed, %ld\n", 19128d9266ffSTrond Myklebust -PTR_ERR(xprt)); 191321de0a95STrond Myklebust goto out; 19148d9266ffSTrond Myklebust } 191533d90ac0SJ. Bruce Fields if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 191633d90ac0SJ. Bruce Fields xprt->idle_timeout = 0; 191721de0a95STrond Myklebust INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 191821de0a95STrond Myklebust if (xprt_has_timer(xprt)) 1919502980e8SAnna Schumaker timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); 192021de0a95STrond Myklebust else 1921ff861c4dSKees Cook timer_setup(&xprt->timer, NULL, 0); 19224e0038b6STrond Myklebust 19234e0038b6STrond Myklebust if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 19244e0038b6STrond Myklebust xprt_destroy(xprt); 19254e0038b6STrond Myklebust return ERR_PTR(-EINVAL); 19264e0038b6STrond Myklebust } 19274e0038b6STrond Myklebust xprt->servername = kstrdup(args->servername, GFP_KERNEL); 19284e0038b6STrond Myklebust if (xprt->servername == NULL) { 19294e0038b6STrond Myklebust xprt_destroy(xprt); 19304e0038b6STrond Myklebust return ERR_PTR(-ENOMEM); 19314e0038b6STrond Myklebust } 19324e0038b6STrond Myklebust 19333f940098SJeff Layton rpc_xprt_debugfs_register(xprt); 1934388f0c77SJeff Layton 1935c2866763SChuck Lever dprintk("RPC: created transport %p with %u slots\n", xprt, 1936c2866763SChuck Lever xprt->max_reqs); 193721de0a95STrond Myklebust out: 1938c2866763SChuck Lever return xprt; 1939c2866763SChuck Lever } 1940c2866763SChuck Lever 1941528fd354STrond Myklebust static void xprt_destroy_cb(struct work_struct *work) 1942528fd354STrond Myklebust { 1943528fd354STrond Myklebust struct rpc_xprt *xprt = 1944528fd354STrond Myklebust container_of(work, struct rpc_xprt, task_cleanup); 1945528fd354STrond Myklebust 1946528fd354STrond Myklebust rpc_xprt_debugfs_unregister(xprt); 1947528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->binding); 1948528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->pending); 1949528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->sending); 1950528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->backlog); 1951528fd354STrond Myklebust kfree(xprt->servername); 1952528fd354STrond Myklebust /* 1953528fd354STrond Myklebust * Tear down transport state and free the rpc_xprt 1954528fd354STrond Myklebust */ 1955528fd354STrond Myklebust xprt->ops->destroy(xprt); 1956528fd354STrond Myklebust } 1957528fd354STrond Myklebust 19589903cd1cSChuck Lever /** 19599903cd1cSChuck Lever * xprt_destroy - destroy an RPC transport, killing off all requests. 1960a8de240aSTrond Myklebust * @xprt: transport to destroy 19619903cd1cSChuck Lever * 19621da177e4SLinus Torvalds */ 1963a8de240aSTrond Myklebust static void xprt_destroy(struct rpc_xprt *xprt) 19641da177e4SLinus Torvalds { 19651da177e4SLinus Torvalds dprintk("RPC: destroying transport %p\n", xprt); 196679234c3dSTrond Myklebust 1967528fd354STrond Myklebust /* 1968528fd354STrond Myklebust * Exclude transport connect/disconnect handlers and autoclose 1969528fd354STrond Myklebust */ 197079234c3dSTrond Myklebust wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 197179234c3dSTrond Myklebust 19720065db32STrond Myklebust del_timer_sync(&xprt->timer); 1973c8541ecdSChuck Lever 1974c8541ecdSChuck Lever /* 1975528fd354STrond Myklebust * Destroy sockets etc from the system workqueue so they can 1976528fd354STrond Myklebust * safely flush receive work running on rpciod. 1977c8541ecdSChuck Lever */ 1978528fd354STrond Myklebust INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); 1979528fd354STrond Myklebust schedule_work(&xprt->task_cleanup); 19806b6ca86bSTrond Myklebust } 19811da177e4SLinus Torvalds 198230c5116bSTrond Myklebust static void xprt_destroy_kref(struct kref *kref) 198330c5116bSTrond Myklebust { 198430c5116bSTrond Myklebust xprt_destroy(container_of(kref, struct rpc_xprt, kref)); 198530c5116bSTrond Myklebust } 198630c5116bSTrond Myklebust 198730c5116bSTrond Myklebust /** 198830c5116bSTrond Myklebust * xprt_get - return a reference to an RPC transport. 198930c5116bSTrond Myklebust * @xprt: pointer to the transport 199030c5116bSTrond Myklebust * 199130c5116bSTrond Myklebust */ 199230c5116bSTrond Myklebust struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 199330c5116bSTrond Myklebust { 199430c5116bSTrond Myklebust if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) 199530c5116bSTrond Myklebust return xprt; 199630c5116bSTrond Myklebust return NULL; 199730c5116bSTrond Myklebust } 199830c5116bSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_get); 199930c5116bSTrond Myklebust 20006b6ca86bSTrond Myklebust /** 20016b6ca86bSTrond Myklebust * xprt_put - release a reference to an RPC transport. 20026b6ca86bSTrond Myklebust * @xprt: pointer to the transport 20036b6ca86bSTrond Myklebust * 20046b6ca86bSTrond Myklebust */ 20056b6ca86bSTrond Myklebust void xprt_put(struct rpc_xprt *xprt) 20066b6ca86bSTrond Myklebust { 200730c5116bSTrond Myklebust if (xprt != NULL) 200830c5116bSTrond Myklebust kref_put(&xprt->kref, xprt_destroy_kref); 20096b6ca86bSTrond Myklebust } 20105d252f90SChuck Lever EXPORT_SYMBOL_GPL(xprt_put); 2011