1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/net/sunrpc/xprt.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This is a generic RPC call interface supporting congestion avoidance, 61da177e4SLinus Torvalds * and asynchronous calls. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * The interface works like this: 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * - When a process places a call, it allocates a request slot if 111da177e4SLinus Torvalds * one is available. Otherwise, it sleeps on the backlog queue 121da177e4SLinus Torvalds * (xprt_reserve). 131da177e4SLinus Torvalds * - Next, the caller puts together the RPC message, stuffs it into 1455aa4f58SChuck Lever * the request struct, and calls xprt_transmit(). 1555aa4f58SChuck Lever * - xprt_transmit sends the message and installs the caller on the 1655ae1aabSRicardo Labiaga * transport's wait list. At the same time, if a reply is expected, 1755ae1aabSRicardo Labiaga * it installs a timer that is run after the packet's timeout has 1855ae1aabSRicardo Labiaga * expired. 191da177e4SLinus Torvalds * - When a packet arrives, the data_ready handler walks the list of 2055aa4f58SChuck Lever * pending requests for that transport. If a matching XID is found, the 211da177e4SLinus Torvalds * caller is woken up, and the timer removed. 221da177e4SLinus Torvalds * - When no reply arrives within the timeout interval, the timer is 231da177e4SLinus Torvalds * fired by the kernel and runs xprt_timer(). It either adjusts the 241da177e4SLinus Torvalds * timeout values (minor timeout) or wakes up the caller with a status 251da177e4SLinus Torvalds * of -ETIMEDOUT. 261da177e4SLinus Torvalds * - When the caller receives a notification from RPC that a reply arrived, 271da177e4SLinus Torvalds * it should release the RPC slot, and process the reply. 281da177e4SLinus Torvalds * If the call timed out, it may choose to retry the operation by 291da177e4SLinus Torvalds * adjusting the initial timeout value, and simply calling rpc_call 301da177e4SLinus Torvalds * again. 311da177e4SLinus Torvalds * 321da177e4SLinus Torvalds * Support for async RPC is done through a set of RPC-specific scheduling 331da177e4SLinus Torvalds * primitives that `transparently' work for processes as well as async 341da177e4SLinus Torvalds * tasks that rely on callbacks. 351da177e4SLinus Torvalds * 361da177e4SLinus Torvalds * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 3755aa4f58SChuck Lever * 3855aa4f58SChuck Lever * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 41a246b010SChuck Lever #include <linux/module.h> 42a246b010SChuck Lever 431da177e4SLinus Torvalds #include <linux/types.h> 44a246b010SChuck Lever #include <linux/interrupt.h> 451da177e4SLinus Torvalds #include <linux/workqueue.h> 46bf3fcf89SChuck Lever #include <linux/net.h> 47ff839970SChuck Lever #include <linux/ktime.h> 481da177e4SLinus Torvalds 49a246b010SChuck Lever #include <linux/sunrpc/clnt.h> 5011c556b3SChuck Lever #include <linux/sunrpc/metrics.h> 51c9acb42eSTrond Myklebust #include <linux/sunrpc/bc_xprt.h> 52fda1bfefSTrond Myklebust #include <linux/rcupdate.h> 53a1231fdaSTrond Myklebust #include <linux/sched/mm.h> 541da177e4SLinus Torvalds 553705ad64SJeff Layton #include <trace/events/sunrpc.h> 563705ad64SJeff Layton 5755ae1aabSRicardo Labiaga #include "sunrpc.h" 58587bc725SOlga Kornievskaia #include "sysfs.h" 59a4ae3081SChuck Lever #include "fail.h" 6055ae1aabSRicardo Labiaga 611da177e4SLinus Torvalds /* 621da177e4SLinus Torvalds * Local variables 631da177e4SLinus Torvalds */ 641da177e4SLinus Torvalds 65f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 661da177e4SLinus Torvalds # define RPCDBG_FACILITY RPCDBG_XPRT 671da177e4SLinus Torvalds #endif 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds /* 701da177e4SLinus Torvalds * Local functions 711da177e4SLinus Torvalds */ 7221de0a95STrond Myklebust static void xprt_init(struct rpc_xprt *xprt, struct net *net); 7337ac86c3SChuck Lever static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); 744e0038b6STrond Myklebust static void xprt_destroy(struct rpc_xprt *xprt); 75e877a88dSNeilBrown static void xprt_request_init(struct rpc_task *task); 76eb07d5a4SNeilBrown static int xprt_request_prepare(struct rpc_rqst *req); 771da177e4SLinus Torvalds 785ba03e82SJiri Slaby static DEFINE_SPINLOCK(xprt_list_lock); 7981c098afS\"Talpey, Thomas\ static LIST_HEAD(xprt_list); 8081c098afS\"Talpey, Thomas\ 819e910bffSTrond Myklebust static unsigned long xprt_request_timeout(const struct rpc_rqst *req) 829e910bffSTrond Myklebust { 839e910bffSTrond Myklebust unsigned long timeout = jiffies + req->rq_timeout; 849e910bffSTrond Myklebust 859e910bffSTrond Myklebust if (time_before(timeout, req->rq_majortimeo)) 869e910bffSTrond Myklebust return timeout; 879e910bffSTrond Myklebust return req->rq_majortimeo; 889e910bffSTrond Myklebust } 899e910bffSTrond Myklebust 9012a80469SChuck Lever /** 9181c098afS\"Talpey, Thomas\ * xprt_register_transport - register a transport implementation 9281c098afS\"Talpey, Thomas\ * @transport: transport to register 9381c098afS\"Talpey, Thomas\ * 9481c098afS\"Talpey, Thomas\ * If a transport implementation is loaded as a kernel module, it can 9581c098afS\"Talpey, Thomas\ * call this interface to make itself known to the RPC client. 9681c098afS\"Talpey, Thomas\ * 9781c098afS\"Talpey, Thomas\ * Returns: 9881c098afS\"Talpey, Thomas\ * 0: transport successfully registered 9981c098afS\"Talpey, Thomas\ * -EEXIST: transport already registered 10081c098afS\"Talpey, Thomas\ * -EINVAL: transport module being unloaded 10181c098afS\"Talpey, Thomas\ */ 10281c098afS\"Talpey, Thomas\ int xprt_register_transport(struct xprt_class *transport) 10381c098afS\"Talpey, Thomas\ { 10481c098afS\"Talpey, Thomas\ struct xprt_class *t; 10581c098afS\"Talpey, Thomas\ int result; 10681c098afS\"Talpey, Thomas\ 10781c098afS\"Talpey, Thomas\ result = -EEXIST; 10881c098afS\"Talpey, Thomas\ spin_lock(&xprt_list_lock); 10981c098afS\"Talpey, Thomas\ list_for_each_entry(t, &xprt_list, list) { 11081c098afS\"Talpey, Thomas\ /* don't register the same transport class twice */ 1114fa016ebS\"Talpey, Thomas\ if (t->ident == transport->ident) 11281c098afS\"Talpey, Thomas\ goto out; 11381c098afS\"Talpey, Thomas\ } 11481c098afS\"Talpey, Thomas\ 11581c098afS\"Talpey, Thomas\ list_add_tail(&transport->list, &xprt_list); 11681c098afS\"Talpey, Thomas\ printk(KERN_INFO "RPC: Registered %s transport module.\n", 11781c098afS\"Talpey, Thomas\ transport->name); 11881c098afS\"Talpey, Thomas\ result = 0; 11981c098afS\"Talpey, Thomas\ 12081c098afS\"Talpey, Thomas\ out: 12181c098afS\"Talpey, Thomas\ spin_unlock(&xprt_list_lock); 12281c098afS\"Talpey, Thomas\ return result; 12381c098afS\"Talpey, Thomas\ } 12481c098afS\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_register_transport); 12581c098afS\"Talpey, Thomas\ 12681c098afS\"Talpey, Thomas\ /** 12781c098afS\"Talpey, Thomas\ * xprt_unregister_transport - unregister a transport implementation 12865b6e42cSRandy Dunlap * @transport: transport to unregister 12981c098afS\"Talpey, Thomas\ * 13081c098afS\"Talpey, Thomas\ * Returns: 13181c098afS\"Talpey, Thomas\ * 0: transport successfully unregistered 13281c098afS\"Talpey, Thomas\ * -ENOENT: transport never registered 13381c098afS\"Talpey, Thomas\ */ 13481c098afS\"Talpey, Thomas\ int xprt_unregister_transport(struct xprt_class *transport) 13581c098afS\"Talpey, Thomas\ { 13681c098afS\"Talpey, Thomas\ struct xprt_class *t; 13781c098afS\"Talpey, Thomas\ int result; 13881c098afS\"Talpey, Thomas\ 13981c098afS\"Talpey, Thomas\ result = 0; 14081c098afS\"Talpey, Thomas\ spin_lock(&xprt_list_lock); 14181c098afS\"Talpey, Thomas\ list_for_each_entry(t, &xprt_list, list) { 14281c098afS\"Talpey, Thomas\ if (t == transport) { 14381c098afS\"Talpey, Thomas\ printk(KERN_INFO 14481c098afS\"Talpey, Thomas\ "RPC: Unregistered %s transport module.\n", 14581c098afS\"Talpey, Thomas\ transport->name); 14681c098afS\"Talpey, Thomas\ list_del_init(&transport->list); 14781c098afS\"Talpey, Thomas\ goto out; 14881c098afS\"Talpey, Thomas\ } 14981c098afS\"Talpey, Thomas\ } 15081c098afS\"Talpey, Thomas\ result = -ENOENT; 15181c098afS\"Talpey, Thomas\ 15281c098afS\"Talpey, Thomas\ out: 15381c098afS\"Talpey, Thomas\ spin_unlock(&xprt_list_lock); 15481c098afS\"Talpey, Thomas\ return result; 15581c098afS\"Talpey, Thomas\ } 15681c098afS\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_unregister_transport); 15781c098afS\"Talpey, Thomas\ 158d5aa6b22STrond Myklebust static void 159d5aa6b22STrond Myklebust xprt_class_release(const struct xprt_class *t) 160d5aa6b22STrond Myklebust { 161d5aa6b22STrond Myklebust module_put(t->owner); 162d5aa6b22STrond Myklebust } 163d5aa6b22STrond Myklebust 164d5aa6b22STrond Myklebust static const struct xprt_class * 1659bccd264STrond Myklebust xprt_class_find_by_ident_locked(int ident) 1669bccd264STrond Myklebust { 1679bccd264STrond Myklebust const struct xprt_class *t; 1689bccd264STrond Myklebust 1699bccd264STrond Myklebust list_for_each_entry(t, &xprt_list, list) { 1709bccd264STrond Myklebust if (t->ident != ident) 1719bccd264STrond Myklebust continue; 1729bccd264STrond Myklebust if (!try_module_get(t->owner)) 1739bccd264STrond Myklebust continue; 1749bccd264STrond Myklebust return t; 1759bccd264STrond Myklebust } 1769bccd264STrond Myklebust return NULL; 1779bccd264STrond Myklebust } 1789bccd264STrond Myklebust 1799bccd264STrond Myklebust static const struct xprt_class * 1809bccd264STrond Myklebust xprt_class_find_by_ident(int ident) 1819bccd264STrond Myklebust { 1829bccd264STrond Myklebust const struct xprt_class *t; 1839bccd264STrond Myklebust 1849bccd264STrond Myklebust spin_lock(&xprt_list_lock); 1859bccd264STrond Myklebust t = xprt_class_find_by_ident_locked(ident); 1869bccd264STrond Myklebust spin_unlock(&xprt_list_lock); 1879bccd264STrond Myklebust return t; 1889bccd264STrond Myklebust } 1899bccd264STrond Myklebust 1909bccd264STrond Myklebust static const struct xprt_class * 191d5aa6b22STrond Myklebust xprt_class_find_by_netid_locked(const char *netid) 192d5aa6b22STrond Myklebust { 193d5aa6b22STrond Myklebust const struct xprt_class *t; 194d5aa6b22STrond Myklebust unsigned int i; 195d5aa6b22STrond Myklebust 196d5aa6b22STrond Myklebust list_for_each_entry(t, &xprt_list, list) { 197d5aa6b22STrond Myklebust for (i = 0; t->netid[i][0] != '\0'; i++) { 198d5aa6b22STrond Myklebust if (strcmp(t->netid[i], netid) != 0) 199d5aa6b22STrond Myklebust continue; 200d5aa6b22STrond Myklebust if (!try_module_get(t->owner)) 201d5aa6b22STrond Myklebust continue; 202d5aa6b22STrond Myklebust return t; 203d5aa6b22STrond Myklebust } 204d5aa6b22STrond Myklebust } 205d5aa6b22STrond Myklebust return NULL; 206d5aa6b22STrond Myklebust } 207d5aa6b22STrond Myklebust 208d5aa6b22STrond Myklebust static const struct xprt_class * 209d5aa6b22STrond Myklebust xprt_class_find_by_netid(const char *netid) 210d5aa6b22STrond Myklebust { 211d5aa6b22STrond Myklebust const struct xprt_class *t; 212d5aa6b22STrond Myklebust 213d5aa6b22STrond Myklebust spin_lock(&xprt_list_lock); 214d5aa6b22STrond Myklebust t = xprt_class_find_by_netid_locked(netid); 215d5aa6b22STrond Myklebust if (!t) { 216d5aa6b22STrond Myklebust spin_unlock(&xprt_list_lock); 217d5aa6b22STrond Myklebust request_module("rpc%s", netid); 218d5aa6b22STrond Myklebust spin_lock(&xprt_list_lock); 219d5aa6b22STrond Myklebust t = xprt_class_find_by_netid_locked(netid); 220d5aa6b22STrond Myklebust } 221d5aa6b22STrond Myklebust spin_unlock(&xprt_list_lock); 222d5aa6b22STrond Myklebust return t; 223d5aa6b22STrond Myklebust } 224d5aa6b22STrond Myklebust 22581c098afS\"Talpey, Thomas\ /** 2261fc5f131STrond Myklebust * xprt_find_transport_ident - convert a netid into a transport identifier 2271fc5f131STrond Myklebust * @netid: transport to load 2281fc5f131STrond Myklebust * 2291fc5f131STrond Myklebust * Returns: 2301fc5f131STrond Myklebust * > 0: transport identifier 2311fc5f131STrond Myklebust * -ENOENT: transport module not available 2321fc5f131STrond Myklebust */ 2331fc5f131STrond Myklebust int xprt_find_transport_ident(const char *netid) 2341fc5f131STrond Myklebust { 2351fc5f131STrond Myklebust const struct xprt_class *t; 2361fc5f131STrond Myklebust int ret; 2371fc5f131STrond Myklebust 2381fc5f131STrond Myklebust t = xprt_class_find_by_netid(netid); 2391fc5f131STrond Myklebust if (!t) 2401fc5f131STrond Myklebust return -ENOENT; 2411fc5f131STrond Myklebust ret = t->ident; 2421fc5f131STrond Myklebust xprt_class_release(t); 2431fc5f131STrond Myklebust return ret; 2441fc5f131STrond Myklebust } 2451fc5f131STrond Myklebust EXPORT_SYMBOL_GPL(xprt_find_transport_ident); 2461fc5f131STrond Myklebust 247c544577dSTrond Myklebust static void xprt_clear_locked(struct rpc_xprt *xprt) 248c544577dSTrond Myklebust { 249c544577dSTrond Myklebust xprt->snd_task = NULL; 25033c3214bSTrond Myklebust if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) 25133c3214bSTrond Myklebust clear_bit_unlock(XPRT_LOCKED, &xprt->state); 25233c3214bSTrond Myklebust else 253c544577dSTrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 254c544577dSTrond Myklebust } 255c544577dSTrond Myklebust 256441e3e24STom Talpey /** 25712a80469SChuck Lever * xprt_reserve_xprt - serialize write access to transports 25812a80469SChuck Lever * @task: task that is requesting access to the transport 259177c27bfSRandy Dunlap * @xprt: pointer to the target transport 26012a80469SChuck Lever * 26112a80469SChuck Lever * This prevents mixing the payload of separate requests, and prevents 26212a80469SChuck Lever * transport connects from colliding with writes. No congestion control 26312a80469SChuck Lever * is provided. 2641da177e4SLinus Torvalds */ 26543cedbf0STrond Myklebust int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 2661da177e4SLinus Torvalds { 26712a80469SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 26812a80469SChuck Lever 26912a80469SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 27012a80469SChuck Lever if (task == xprt->snd_task) 271bf7ca707SChuck Lever goto out_locked; 27212a80469SChuck Lever goto out_sleep; 27312a80469SChuck Lever } 274c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 275c544577dSTrond Myklebust goto out_unlock; 27612a80469SChuck Lever xprt->snd_task = task; 2774d4a76f3Sj223yang@asset.uwaterloo.ca 278bf7ca707SChuck Lever out_locked: 279bf7ca707SChuck Lever trace_xprt_reserve_xprt(xprt, task); 28012a80469SChuck Lever return 1; 28112a80469SChuck Lever 282c544577dSTrond Myklebust out_unlock: 283c544577dSTrond Myklebust xprt_clear_locked(xprt); 28412a80469SChuck Lever out_sleep: 28512a80469SChuck Lever task->tk_status = -EAGAIN; 2866b2e6856STrond Myklebust if (RPC_IS_SOFT(task)) 2876b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->sending, task, NULL, 2889e910bffSTrond Myklebust xprt_request_timeout(req)); 2896b2e6856STrond Myklebust else 29079c99152STrond Myklebust rpc_sleep_on(&xprt->sending, task, NULL); 29112a80469SChuck Lever return 0; 29212a80469SChuck Lever } 29312444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 29412a80469SChuck Lever 29575891f50STrond Myklebust static bool 29675891f50STrond Myklebust xprt_need_congestion_window_wait(struct rpc_xprt *xprt) 29775891f50STrond Myklebust { 29875891f50STrond Myklebust return test_bit(XPRT_CWND_WAIT, &xprt->state); 29975891f50STrond Myklebust } 30075891f50STrond Myklebust 30175891f50STrond Myklebust static void 30275891f50STrond Myklebust xprt_set_congestion_window_wait(struct rpc_xprt *xprt) 30375891f50STrond Myklebust { 30475891f50STrond Myklebust if (!list_empty(&xprt->xmit_queue)) { 30575891f50STrond Myklebust /* Peek at head of queue to see if it can make progress */ 30675891f50STrond Myklebust if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, 30775891f50STrond Myklebust rq_xmit)->rq_cong) 30875891f50STrond Myklebust return; 30975891f50STrond Myklebust } 31075891f50STrond Myklebust set_bit(XPRT_CWND_WAIT, &xprt->state); 31175891f50STrond Myklebust } 31275891f50STrond Myklebust 31375891f50STrond Myklebust static void 31475891f50STrond Myklebust xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) 31575891f50STrond Myklebust { 31675891f50STrond Myklebust if (!RPCXPRT_CONGESTED(xprt)) 31775891f50STrond Myklebust clear_bit(XPRT_CWND_WAIT, &xprt->state); 31875891f50STrond Myklebust } 31975891f50STrond Myklebust 32012a80469SChuck Lever /* 32112a80469SChuck Lever * xprt_reserve_xprt_cong - serialize write access to transports 32212a80469SChuck Lever * @task: task that is requesting access to the transport 32312a80469SChuck Lever * 32412a80469SChuck Lever * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 32512a80469SChuck Lever * integrated into the decision of whether a request is allowed to be 32612a80469SChuck Lever * woken up and given access to the transport. 32775891f50STrond Myklebust * Note that the lock is only granted if we know there are free slots. 32812a80469SChuck Lever */ 32943cedbf0STrond Myklebust int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 33012a80469SChuck Lever { 3311da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 3321da177e4SLinus Torvalds 3332226feb6SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 3341da177e4SLinus Torvalds if (task == xprt->snd_task) 335bf7ca707SChuck Lever goto out_locked; 3361da177e4SLinus Torvalds goto out_sleep; 3371da177e4SLinus Torvalds } 33843cedbf0STrond Myklebust if (req == NULL) { 33943cedbf0STrond Myklebust xprt->snd_task = task; 340bf7ca707SChuck Lever goto out_locked; 34143cedbf0STrond Myklebust } 342c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 343c544577dSTrond Myklebust goto out_unlock; 34475891f50STrond Myklebust if (!xprt_need_congestion_window_wait(xprt)) { 3451da177e4SLinus Torvalds xprt->snd_task = task; 346bf7ca707SChuck Lever goto out_locked; 3471da177e4SLinus Torvalds } 348c544577dSTrond Myklebust out_unlock: 349632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 3501da177e4SLinus Torvalds out_sleep: 3511da177e4SLinus Torvalds task->tk_status = -EAGAIN; 3526b2e6856STrond Myklebust if (RPC_IS_SOFT(task)) 3536b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->sending, task, NULL, 3549e910bffSTrond Myklebust xprt_request_timeout(req)); 3556b2e6856STrond Myklebust else 35679c99152STrond Myklebust rpc_sleep_on(&xprt->sending, task, NULL); 3571da177e4SLinus Torvalds return 0; 358bf7ca707SChuck Lever out_locked: 359bf7ca707SChuck Lever trace_xprt_reserve_cong(xprt, task); 360bf7ca707SChuck Lever return 1; 3611da177e4SLinus Torvalds } 36212444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 3631da177e4SLinus Torvalds 36412a80469SChuck Lever static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 3651da177e4SLinus Torvalds { 3661da177e4SLinus Torvalds int retval; 3671da177e4SLinus Torvalds 368bd79bc57STrond Myklebust if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) 369bd79bc57STrond Myklebust return 1; 370b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 37143cedbf0STrond Myklebust retval = xprt->ops->reserve_xprt(xprt, task); 372b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 3731da177e4SLinus Torvalds return retval; 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds 376961a828dSTrond Myklebust static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 3771da177e4SLinus Torvalds { 378961a828dSTrond Myklebust struct rpc_xprt *xprt = data; 37949e9a890SChuck Lever 38049e9a890SChuck Lever xprt->snd_task = task; 381961a828dSTrond Myklebust return true; 382961a828dSTrond Myklebust } 383961a828dSTrond Myklebust 384961a828dSTrond Myklebust static void __xprt_lock_write_next(struct rpc_xprt *xprt) 385961a828dSTrond Myklebust { 386961a828dSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 38749e9a890SChuck Lever return; 388c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 389c544577dSTrond Myklebust goto out_unlock; 390f1dc237cSTrond Myklebust if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 391f1dc237cSTrond Myklebust __xprt_lock_write_func, xprt)) 392961a828dSTrond Myklebust return; 393c544577dSTrond Myklebust out_unlock: 394632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 39549e9a890SChuck Lever } 39649e9a890SChuck Lever 397961a828dSTrond Myklebust static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 398961a828dSTrond Myklebust { 399961a828dSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 400961a828dSTrond Myklebust return; 401c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 402c544577dSTrond Myklebust goto out_unlock; 40375891f50STrond Myklebust if (xprt_need_congestion_window_wait(xprt)) 404961a828dSTrond Myklebust goto out_unlock; 405f1dc237cSTrond Myklebust if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 40675891f50STrond Myklebust __xprt_lock_write_func, xprt)) 407961a828dSTrond Myklebust return; 4081da177e4SLinus Torvalds out_unlock: 409632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds 41249e9a890SChuck Lever /** 41349e9a890SChuck Lever * xprt_release_xprt - allow other requests to use a transport 41449e9a890SChuck Lever * @xprt: transport with other tasks potentially waiting 41549e9a890SChuck Lever * @task: task that is releasing access to the transport 41649e9a890SChuck Lever * 41749e9a890SChuck Lever * Note that "task" can be NULL. No congestion control is provided. 4181da177e4SLinus Torvalds */ 41949e9a890SChuck Lever void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 4201da177e4SLinus Torvalds { 4211da177e4SLinus Torvalds if (xprt->snd_task == task) { 422632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 4231da177e4SLinus Torvalds __xprt_lock_write_next(xprt); 4241da177e4SLinus Torvalds } 425bf7ca707SChuck Lever trace_xprt_release_xprt(xprt, task); 4261da177e4SLinus Torvalds } 42712444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_xprt); 4281da177e4SLinus Torvalds 42949e9a890SChuck Lever /** 43049e9a890SChuck Lever * xprt_release_xprt_cong - allow other requests to use a transport 43149e9a890SChuck Lever * @xprt: transport with other tasks potentially waiting 43249e9a890SChuck Lever * @task: task that is releasing access to the transport 43349e9a890SChuck Lever * 43449e9a890SChuck Lever * Note that "task" can be NULL. Another task is awoken to use the 43549e9a890SChuck Lever * transport if the transport's congestion window allows it. 43649e9a890SChuck Lever */ 43749e9a890SChuck Lever void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 43849e9a890SChuck Lever { 43949e9a890SChuck Lever if (xprt->snd_task == task) { 440632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 44149e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 44249e9a890SChuck Lever } 443bf7ca707SChuck Lever trace_xprt_release_cong(xprt, task); 44449e9a890SChuck Lever } 44512444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 44649e9a890SChuck Lever 447587bc725SOlga Kornievskaia void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 4481da177e4SLinus Torvalds { 449bd79bc57STrond Myklebust if (xprt->snd_task != task) 450bd79bc57STrond Myklebust return; 451b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 45249e9a890SChuck Lever xprt->ops->release_xprt(xprt, task); 453b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds /* 4571da177e4SLinus Torvalds * Van Jacobson congestion avoidance. Check if the congestion window 4581da177e4SLinus Torvalds * overflowed. Put the task to sleep if this is the case. 4591da177e4SLinus Torvalds */ 4601da177e4SLinus Torvalds static int 46175891f50STrond Myklebust __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 4621da177e4SLinus Torvalds { 4631da177e4SLinus Torvalds if (req->rq_cong) 4641da177e4SLinus Torvalds return 1; 465bf7ca707SChuck Lever trace_xprt_get_cong(xprt, req->rq_task); 46675891f50STrond Myklebust if (RPCXPRT_CONGESTED(xprt)) { 46775891f50STrond Myklebust xprt_set_congestion_window_wait(xprt); 4681da177e4SLinus Torvalds return 0; 46975891f50STrond Myklebust } 4701da177e4SLinus Torvalds req->rq_cong = 1; 4711da177e4SLinus Torvalds xprt->cong += RPC_CWNDSCALE; 4721da177e4SLinus Torvalds return 1; 4731da177e4SLinus Torvalds } 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds /* 4761da177e4SLinus Torvalds * Adjust the congestion window, and wake up the next task 4771da177e4SLinus Torvalds * that has been sleeping due to congestion 4781da177e4SLinus Torvalds */ 4791da177e4SLinus Torvalds static void 4801da177e4SLinus Torvalds __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 4811da177e4SLinus Torvalds { 4821da177e4SLinus Torvalds if (!req->rq_cong) 4831da177e4SLinus Torvalds return; 4841da177e4SLinus Torvalds req->rq_cong = 0; 4851da177e4SLinus Torvalds xprt->cong -= RPC_CWNDSCALE; 48675891f50STrond Myklebust xprt_test_and_clear_congestion_window_wait(xprt); 487bf7ca707SChuck Lever trace_xprt_put_cong(xprt, req->rq_task); 48849e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 4891da177e4SLinus Torvalds } 4901da177e4SLinus Torvalds 49146c0ee8bSChuck Lever /** 49275891f50STrond Myklebust * xprt_request_get_cong - Request congestion control credits 49375891f50STrond Myklebust * @xprt: pointer to transport 49475891f50STrond Myklebust * @req: pointer to RPC request 49575891f50STrond Myklebust * 49675891f50STrond Myklebust * Useful for transports that require congestion control. 49775891f50STrond Myklebust */ 49875891f50STrond Myklebust bool 49975891f50STrond Myklebust xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 50075891f50STrond Myklebust { 50175891f50STrond Myklebust bool ret = false; 50275891f50STrond Myklebust 50375891f50STrond Myklebust if (req->rq_cong) 50475891f50STrond Myklebust return true; 505b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 50675891f50STrond Myklebust ret = __xprt_get_cong(xprt, req) != 0; 507b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 50875891f50STrond Myklebust return ret; 50975891f50STrond Myklebust } 51075891f50STrond Myklebust EXPORT_SYMBOL_GPL(xprt_request_get_cong); 51175891f50STrond Myklebust 51275891f50STrond Myklebust /** 513a58dd398SChuck Lever * xprt_release_rqst_cong - housekeeping when request is complete 514a58dd398SChuck Lever * @task: RPC request that recently completed 515a58dd398SChuck Lever * 516a58dd398SChuck Lever * Useful for transports that require congestion control. 517a58dd398SChuck Lever */ 518a58dd398SChuck Lever void xprt_release_rqst_cong(struct rpc_task *task) 519a58dd398SChuck Lever { 520a4f0835cSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 521a4f0835cSTrond Myklebust 522a4f0835cSTrond Myklebust __xprt_put_cong(req->rq_xprt, req); 523a58dd398SChuck Lever } 52412444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 525a58dd398SChuck Lever 5268593e010SChuck Lever static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) 5278593e010SChuck Lever { 5288593e010SChuck Lever if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) 5298593e010SChuck Lever __xprt_lock_write_next_cong(xprt); 5308593e010SChuck Lever } 5318593e010SChuck Lever 53275891f50STrond Myklebust /* 53375891f50STrond Myklebust * Clear the congestion window wait flag and wake up the next 53475891f50STrond Myklebust * entry on xprt->sending 53575891f50STrond Myklebust */ 53675891f50STrond Myklebust static void 53775891f50STrond Myklebust xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) 53875891f50STrond Myklebust { 53975891f50STrond Myklebust if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { 540b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 54175891f50STrond Myklebust __xprt_lock_write_next_cong(xprt); 542b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 54375891f50STrond Myklebust } 54475891f50STrond Myklebust } 54575891f50STrond Myklebust 546a58dd398SChuck Lever /** 54746c0ee8bSChuck Lever * xprt_adjust_cwnd - adjust transport congestion window 5486a24dfb6STrond Myklebust * @xprt: pointer to xprt 54946c0ee8bSChuck Lever * @task: recently completed RPC request used to adjust window 55046c0ee8bSChuck Lever * @result: result code of completed RPC request 55146c0ee8bSChuck Lever * 5524f4cf5adSChuck Lever * The transport code maintains an estimate on the maximum number of out- 5534f4cf5adSChuck Lever * standing RPC requests, using a smoothed version of the congestion 5544f4cf5adSChuck Lever * avoidance implemented in 44BSD. This is basically the Van Jacobson 5554f4cf5adSChuck Lever * congestion algorithm: If a retransmit occurs, the congestion window is 5564f4cf5adSChuck Lever * halved; otherwise, it is incremented by 1/cwnd when 5574f4cf5adSChuck Lever * 5584f4cf5adSChuck Lever * - a reply is received and 5594f4cf5adSChuck Lever * - a full number of requests are outstanding and 5604f4cf5adSChuck Lever * - the congestion window hasn't been updated recently. 5611da177e4SLinus Torvalds */ 5626a24dfb6STrond Myklebust void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 5631da177e4SLinus Torvalds { 56446c0ee8bSChuck Lever struct rpc_rqst *req = task->tk_rqstp; 56546c0ee8bSChuck Lever unsigned long cwnd = xprt->cwnd; 5661da177e4SLinus Torvalds 5671da177e4SLinus Torvalds if (result >= 0 && cwnd <= xprt->cong) { 5681da177e4SLinus Torvalds /* The (cwnd >> 1) term makes sure 5691da177e4SLinus Torvalds * the result gets rounded properly. */ 5701da177e4SLinus Torvalds cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 5711da177e4SLinus Torvalds if (cwnd > RPC_MAXCWND(xprt)) 5721da177e4SLinus Torvalds cwnd = RPC_MAXCWND(xprt); 57349e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 5741da177e4SLinus Torvalds } else if (result == -ETIMEDOUT) { 5751da177e4SLinus Torvalds cwnd >>= 1; 5761da177e4SLinus Torvalds if (cwnd < RPC_CWNDSCALE) 5771da177e4SLinus Torvalds cwnd = RPC_CWNDSCALE; 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 5801da177e4SLinus Torvalds xprt->cong, xprt->cwnd, cwnd); 5811da177e4SLinus Torvalds xprt->cwnd = cwnd; 58246c0ee8bSChuck Lever __xprt_put_cong(xprt, req); 5831da177e4SLinus Torvalds } 58412444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 5851da177e4SLinus Torvalds 58644fbac22SChuck Lever /** 58744fbac22SChuck Lever * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 58844fbac22SChuck Lever * @xprt: transport with waiting tasks 58944fbac22SChuck Lever * @status: result code to plant in each task before waking it 59044fbac22SChuck Lever * 59144fbac22SChuck Lever */ 59244fbac22SChuck Lever void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 59344fbac22SChuck Lever { 59444fbac22SChuck Lever if (status < 0) 59544fbac22SChuck Lever rpc_wake_up_status(&xprt->pending, status); 59644fbac22SChuck Lever else 59744fbac22SChuck Lever rpc_wake_up(&xprt->pending); 59844fbac22SChuck Lever } 59912444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 60044fbac22SChuck Lever 601c7b2cae8SChuck Lever /** 602c7b2cae8SChuck Lever * xprt_wait_for_buffer_space - wait for transport output buffer to clear 603c544577dSTrond Myklebust * @xprt: transport 604a9a6b52eSTrond Myklebust * 605a9a6b52eSTrond Myklebust * Note that we only set the timer for the case of RPC_IS_SOFT(), since 606a9a6b52eSTrond Myklebust * we don't in general want to force a socket disconnection due to 607a9a6b52eSTrond Myklebust * an incomplete RPC call transmission. 608c7b2cae8SChuck Lever */ 609c544577dSTrond Myklebust void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) 610c7b2cae8SChuck Lever { 611c544577dSTrond Myklebust set_bit(XPRT_WRITE_SPACE, &xprt->state); 612c7b2cae8SChuck Lever } 61312444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 614c7b2cae8SChuck Lever 615c544577dSTrond Myklebust static bool 616c544577dSTrond Myklebust xprt_clear_write_space_locked(struct rpc_xprt *xprt) 617c544577dSTrond Myklebust { 618c544577dSTrond Myklebust if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { 619c544577dSTrond Myklebust __xprt_lock_write_next(xprt); 620c544577dSTrond Myklebust dprintk("RPC: write space: waking waiting task on " 621c544577dSTrond Myklebust "xprt %p\n", xprt); 622c544577dSTrond Myklebust return true; 623c544577dSTrond Myklebust } 624c544577dSTrond Myklebust return false; 625c544577dSTrond Myklebust } 626c544577dSTrond Myklebust 627c7b2cae8SChuck Lever /** 628c7b2cae8SChuck Lever * xprt_write_space - wake the task waiting for transport output buffer space 629c7b2cae8SChuck Lever * @xprt: transport with waiting tasks 630c7b2cae8SChuck Lever * 631c7b2cae8SChuck Lever * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 632c7b2cae8SChuck Lever */ 633c544577dSTrond Myklebust bool xprt_write_space(struct rpc_xprt *xprt) 634c7b2cae8SChuck Lever { 635c544577dSTrond Myklebust bool ret; 636c544577dSTrond Myklebust 637c544577dSTrond Myklebust if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) 638c544577dSTrond Myklebust return false; 639b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 640c544577dSTrond Myklebust ret = xprt_clear_write_space_locked(xprt); 641b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 642c544577dSTrond Myklebust return ret; 643c7b2cae8SChuck Lever } 64412444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_write_space); 645c7b2cae8SChuck Lever 646da953063STrond Myklebust static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime) 647da953063STrond Myklebust { 648da953063STrond Myklebust s64 delta = ktime_to_ns(ktime_get() - abstime); 649da953063STrond Myklebust return likely(delta >= 0) ? 650da953063STrond Myklebust jiffies - nsecs_to_jiffies(delta) : 651da953063STrond Myklebust jiffies + nsecs_to_jiffies(-delta); 652da953063STrond Myklebust } 653da953063STrond Myklebust 654da953063STrond Myklebust static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req) 6551da177e4SLinus Torvalds { 656ba7392bbSTrond Myklebust const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 657da953063STrond Myklebust unsigned long majortimeo = req->rq_timeout; 6581da177e4SLinus Torvalds 6591da177e4SLinus Torvalds if (to->to_exponential) 660da953063STrond Myklebust majortimeo <<= to->to_retries; 6611da177e4SLinus Torvalds else 662da953063STrond Myklebust majortimeo += to->to_increment * to->to_retries; 663da953063STrond Myklebust if (majortimeo > to->to_maxval || majortimeo == 0) 664da953063STrond Myklebust majortimeo = to->to_maxval; 665da953063STrond Myklebust return majortimeo; 666da953063STrond Myklebust } 667da953063STrond Myklebust 668da953063STrond Myklebust static void xprt_reset_majortimeo(struct rpc_rqst *req) 669da953063STrond Myklebust { 670da953063STrond Myklebust req->rq_majortimeo += xprt_calc_majortimeo(req); 671da953063STrond Myklebust } 672da953063STrond Myklebust 6737de62bc0SOlga Kornievskaia static void xprt_reset_minortimeo(struct rpc_rqst *req) 6747de62bc0SOlga Kornievskaia { 6757de62bc0SOlga Kornievskaia req->rq_minortimeo += req->rq_timeout; 6767de62bc0SOlga Kornievskaia } 6777de62bc0SOlga Kornievskaia 678da953063STrond Myklebust static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) 679da953063STrond Myklebust { 680da953063STrond Myklebust unsigned long time_init; 681da953063STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 682da953063STrond Myklebust 683da953063STrond Myklebust if (likely(xprt && xprt_connected(xprt))) 684da953063STrond Myklebust time_init = jiffies; 685da953063STrond Myklebust else 686da953063STrond Myklebust time_init = xprt_abs_ktime_to_jiffies(task->tk_start); 687da953063STrond Myklebust req->rq_timeout = task->tk_client->cl_timeout->to_initval; 688da953063STrond Myklebust req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); 6897de62bc0SOlga Kornievskaia req->rq_minortimeo = time_init + req->rq_timeout; 6901da177e4SLinus Torvalds } 6911da177e4SLinus Torvalds 6929903cd1cSChuck Lever /** 6939903cd1cSChuck Lever * xprt_adjust_timeout - adjust timeout values for next retransmit 6949903cd1cSChuck Lever * @req: RPC request containing parameters to use for the adjustment 6959903cd1cSChuck Lever * 6961da177e4SLinus Torvalds */ 6971da177e4SLinus Torvalds int xprt_adjust_timeout(struct rpc_rqst *req) 6981da177e4SLinus Torvalds { 6991da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 700ba7392bbSTrond Myklebust const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 7011da177e4SLinus Torvalds int status = 0; 7021da177e4SLinus Torvalds 70309252177SChris Dion if (time_before(jiffies, req->rq_majortimeo)) { 7047de62bc0SOlga Kornievskaia if (time_before(jiffies, req->rq_minortimeo)) 7057de62bc0SOlga Kornievskaia return status; 7061da177e4SLinus Torvalds if (to->to_exponential) 7071da177e4SLinus Torvalds req->rq_timeout <<= 1; 7081da177e4SLinus Torvalds else 7091da177e4SLinus Torvalds req->rq_timeout += to->to_increment; 7101da177e4SLinus Torvalds if (to->to_maxval && req->rq_timeout >= to->to_maxval) 7111da177e4SLinus Torvalds req->rq_timeout = to->to_maxval; 7121da177e4SLinus Torvalds req->rq_retries++; 7131da177e4SLinus Torvalds } else { 7141da177e4SLinus Torvalds req->rq_timeout = to->to_initval; 7151da177e4SLinus Torvalds req->rq_retries = 0; 7161da177e4SLinus Torvalds xprt_reset_majortimeo(req); 7171da177e4SLinus Torvalds /* Reset the RTT counters == "slow start" */ 718b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 7191da177e4SLinus Torvalds rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 720b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 7211da177e4SLinus Torvalds status = -ETIMEDOUT; 7221da177e4SLinus Torvalds } 7237de62bc0SOlga Kornievskaia xprt_reset_minortimeo(req); 7241da177e4SLinus Torvalds 7251da177e4SLinus Torvalds if (req->rq_timeout == 0) { 7261da177e4SLinus Torvalds printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 7271da177e4SLinus Torvalds req->rq_timeout = 5 * HZ; 7281da177e4SLinus Torvalds } 7291da177e4SLinus Torvalds return status; 7301da177e4SLinus Torvalds } 7311da177e4SLinus Torvalds 73265f27f38SDavid Howells static void xprt_autoclose(struct work_struct *work) 7331da177e4SLinus Torvalds { 73465f27f38SDavid Howells struct rpc_xprt *xprt = 73565f27f38SDavid Howells container_of(work, struct rpc_xprt, task_cleanup); 736a1231fdaSTrond Myklebust unsigned int pflags = memalloc_nofs_save(); 7371da177e4SLinus Torvalds 738911813d7SChuck Lever trace_xprt_disconnect_auto(xprt); 739d896ba83STrond Myklebust xprt->connect_cookie++; 740d896ba83STrond Myklebust smp_mb__before_atomic(); 74166af1e55STrond Myklebust clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 7424876cc77STrond Myklebust xprt->ops->close(xprt); 7431da177e4SLinus Torvalds xprt_release_write(xprt, NULL); 74479234c3dSTrond Myklebust wake_up_bit(&xprt->state, XPRT_LOCKED); 745a1231fdaSTrond Myklebust memalloc_nofs_restore(pflags); 7461da177e4SLinus Torvalds } 7471da177e4SLinus Torvalds 7489903cd1cSChuck Lever /** 74962da3b24STrond Myklebust * xprt_disconnect_done - mark a transport as disconnected 7509903cd1cSChuck Lever * @xprt: transport to flag for disconnect 7519903cd1cSChuck Lever * 7521da177e4SLinus Torvalds */ 75362da3b24STrond Myklebust void xprt_disconnect_done(struct rpc_xprt *xprt) 7541da177e4SLinus Torvalds { 755911813d7SChuck Lever trace_xprt_disconnect_done(xprt); 756b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 7571da177e4SLinus Torvalds xprt_clear_connected(xprt); 758c544577dSTrond Myklebust xprt_clear_write_space_locked(xprt); 7598593e010SChuck Lever xprt_clear_congestion_window_wait_locked(xprt); 76027adc785STrond Myklebust xprt_wake_pending_tasks(xprt, -ENOTCONN); 761b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 7621da177e4SLinus Torvalds } 76362da3b24STrond Myklebust EXPORT_SYMBOL_GPL(xprt_disconnect_done); 7641da177e4SLinus Torvalds 76566af1e55STrond Myklebust /** 766e26d9972STrond Myklebust * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call 767e26d9972STrond Myklebust * @xprt: transport to disconnect 768e26d9972STrond Myklebust */ 769e26d9972STrond Myklebust static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt) 770e26d9972STrond Myklebust { 7713be232f1STrond Myklebust if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state)) 7723be232f1STrond Myklebust return; 773e26d9972STrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 774e26d9972STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 775e26d9972STrond Myklebust else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) 776e26d9972STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->pending, 777e26d9972STrond Myklebust xprt->snd_task, -ENOTCONN); 778e26d9972STrond Myklebust } 779e26d9972STrond Myklebust 780e26d9972STrond Myklebust /** 78166af1e55STrond Myklebust * xprt_force_disconnect - force a transport to disconnect 78266af1e55STrond Myklebust * @xprt: transport to disconnect 78366af1e55STrond Myklebust * 78466af1e55STrond Myklebust */ 78566af1e55STrond Myklebust void xprt_force_disconnect(struct rpc_xprt *xprt) 78666af1e55STrond Myklebust { 787911813d7SChuck Lever trace_xprt_disconnect_force(xprt); 788911813d7SChuck Lever 78966af1e55STrond Myklebust /* Don't race with the test_bit() in xprt_clear_locked() */ 790b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 791e26d9972STrond Myklebust xprt_schedule_autoclose_locked(xprt); 792b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 79366af1e55STrond Myklebust } 794e2a4f4fbSChuck Lever EXPORT_SYMBOL_GPL(xprt_force_disconnect); 79566af1e55STrond Myklebust 7967f3a1d1eSTrond Myklebust static unsigned int 7977f3a1d1eSTrond Myklebust xprt_connect_cookie(struct rpc_xprt *xprt) 7987f3a1d1eSTrond Myklebust { 7997f3a1d1eSTrond Myklebust return READ_ONCE(xprt->connect_cookie); 8007f3a1d1eSTrond Myklebust } 8017f3a1d1eSTrond Myklebust 8027f3a1d1eSTrond Myklebust static bool 8037f3a1d1eSTrond Myklebust xprt_request_retransmit_after_disconnect(struct rpc_task *task) 8047f3a1d1eSTrond Myklebust { 8057f3a1d1eSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 8067f3a1d1eSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 8077f3a1d1eSTrond Myklebust 8087f3a1d1eSTrond Myklebust return req->rq_connect_cookie != xprt_connect_cookie(xprt) || 8097f3a1d1eSTrond Myklebust !xprt_connected(xprt); 8107f3a1d1eSTrond Myklebust } 8117f3a1d1eSTrond Myklebust 8127c1d71cfSTrond Myklebust /** 8137c1d71cfSTrond Myklebust * xprt_conditional_disconnect - force a transport to disconnect 8147c1d71cfSTrond Myklebust * @xprt: transport to disconnect 8157c1d71cfSTrond Myklebust * @cookie: 'connection cookie' 8167c1d71cfSTrond Myklebust * 8177c1d71cfSTrond Myklebust * This attempts to break the connection if and only if 'cookie' matches 8187c1d71cfSTrond Myklebust * the current transport 'connection cookie'. It ensures that we don't 8197c1d71cfSTrond Myklebust * try to break the connection more than once when we need to retransmit 8207c1d71cfSTrond Myklebust * a batch of RPC requests. 8217c1d71cfSTrond Myklebust * 8227c1d71cfSTrond Myklebust */ 8237c1d71cfSTrond Myklebust void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 8247c1d71cfSTrond Myklebust { 8257c1d71cfSTrond Myklebust /* Don't race with the test_bit() in xprt_clear_locked() */ 826b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 8277c1d71cfSTrond Myklebust if (cookie != xprt->connect_cookie) 8287c1d71cfSTrond Myklebust goto out; 8292c2ee6d2SNeilBrown if (test_bit(XPRT_CLOSING, &xprt->state)) 8307c1d71cfSTrond Myklebust goto out; 831e26d9972STrond Myklebust xprt_schedule_autoclose_locked(xprt); 8327c1d71cfSTrond Myklebust out: 833b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 8347c1d71cfSTrond Myklebust } 8357c1d71cfSTrond Myklebust 836ad3331acSTrond Myklebust static bool 837ad3331acSTrond Myklebust xprt_has_timer(const struct rpc_xprt *xprt) 838ad3331acSTrond Myklebust { 839ad3331acSTrond Myklebust return xprt->idle_timeout != 0; 840ad3331acSTrond Myklebust } 841ad3331acSTrond Myklebust 842ad3331acSTrond Myklebust static void 843ad3331acSTrond Myklebust xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 844ad3331acSTrond Myklebust __must_hold(&xprt->transport_lock) 845ad3331acSTrond Myklebust { 84680d3c45fSDave Wysochanski xprt->last_used = jiffies; 84795f7691dSTrond Myklebust if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) 848ad3331acSTrond Myklebust mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 849ad3331acSTrond Myklebust } 850ad3331acSTrond Myklebust 8511da177e4SLinus Torvalds static void 852ff861c4dSKees Cook xprt_init_autodisconnect(struct timer_list *t) 8531da177e4SLinus Torvalds { 854ff861c4dSKees Cook struct rpc_xprt *xprt = from_timer(xprt, t, timer); 8551da177e4SLinus Torvalds 85695f7691dSTrond Myklebust if (!RB_EMPTY_ROOT(&xprt->recv_queue)) 857b5e92419STrond Myklebust return; 858ad3331acSTrond Myklebust /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 859ad3331acSTrond Myklebust xprt->last_used = jiffies; 8602226feb6SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 8611da177e4SLinus Torvalds return; 862b5e92419STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 8631da177e4SLinus Torvalds } 8641da177e4SLinus Torvalds 865a4ae3081SChuck Lever #if IS_ENABLED(CONFIG_FAIL_SUNRPC) 866a4ae3081SChuck Lever static void xprt_inject_disconnect(struct rpc_xprt *xprt) 867a4ae3081SChuck Lever { 868a4ae3081SChuck Lever if (!fail_sunrpc.ignore_client_disconnect && 869a4ae3081SChuck Lever should_fail(&fail_sunrpc.attr, 1)) 870a4ae3081SChuck Lever xprt->ops->inject_disconnect(xprt); 871a4ae3081SChuck Lever } 872a4ae3081SChuck Lever #else 873a4ae3081SChuck Lever static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) 874a4ae3081SChuck Lever { 875a4ae3081SChuck Lever } 876a4ae3081SChuck Lever #endif 877a4ae3081SChuck Lever 878718ba5b8STrond Myklebust bool xprt_lock_connect(struct rpc_xprt *xprt, 879718ba5b8STrond Myklebust struct rpc_task *task, 880718ba5b8STrond Myklebust void *cookie) 881718ba5b8STrond Myklebust { 882718ba5b8STrond Myklebust bool ret = false; 883718ba5b8STrond Myklebust 884b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 885718ba5b8STrond Myklebust if (!test_bit(XPRT_LOCKED, &xprt->state)) 886718ba5b8STrond Myklebust goto out; 887718ba5b8STrond Myklebust if (xprt->snd_task != task) 888718ba5b8STrond Myklebust goto out; 889c2dc3e5fSTrond Myklebust set_bit(XPRT_SND_IS_COOKIE, &xprt->state); 890718ba5b8STrond Myklebust xprt->snd_task = cookie; 891718ba5b8STrond Myklebust ret = true; 892718ba5b8STrond Myklebust out: 893b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 894718ba5b8STrond Myklebust return ret; 895718ba5b8STrond Myklebust } 896f99fa508STrond Myklebust EXPORT_SYMBOL_GPL(xprt_lock_connect); 897718ba5b8STrond Myklebust 898718ba5b8STrond Myklebust void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 899718ba5b8STrond Myklebust { 900b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 901718ba5b8STrond Myklebust if (xprt->snd_task != cookie) 902718ba5b8STrond Myklebust goto out; 903718ba5b8STrond Myklebust if (!test_bit(XPRT_LOCKED, &xprt->state)) 904718ba5b8STrond Myklebust goto out; 905718ba5b8STrond Myklebust xprt->snd_task =NULL; 906c2dc3e5fSTrond Myklebust clear_bit(XPRT_SND_IS_COOKIE, &xprt->state); 907718ba5b8STrond Myklebust xprt->ops->release_xprt(xprt, NULL); 908ad3331acSTrond Myklebust xprt_schedule_autodisconnect(xprt); 909718ba5b8STrond Myklebust out: 910b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 91179234c3dSTrond Myklebust wake_up_bit(&xprt->state, XPRT_LOCKED); 912718ba5b8STrond Myklebust } 913f99fa508STrond Myklebust EXPORT_SYMBOL_GPL(xprt_unlock_connect); 914718ba5b8STrond Myklebust 9159903cd1cSChuck Lever /** 9169903cd1cSChuck Lever * xprt_connect - schedule a transport connect operation 9179903cd1cSChuck Lever * @task: RPC task that is requesting the connect 9181da177e4SLinus Torvalds * 9191da177e4SLinus Torvalds */ 9201da177e4SLinus Torvalds void xprt_connect(struct rpc_task *task) 9211da177e4SLinus Torvalds { 922ad2368d6STrond Myklebust struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 9231da177e4SLinus Torvalds 924db0a86c4SChuck Lever trace_xprt_connect(xprt); 9251da177e4SLinus Torvalds 926ec739ef0SChuck Lever if (!xprt_bound(xprt)) { 92701d37c42STrond Myklebust task->tk_status = -EAGAIN; 9281da177e4SLinus Torvalds return; 9291da177e4SLinus Torvalds } 9301da177e4SLinus Torvalds if (!xprt_lock_write(xprt, task)) 9311da177e4SLinus Torvalds return; 932feb8ca37STrond Myklebust 933f0043206STrond Myklebust if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 9342c2ee6d2SNeilBrown task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 9356b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->pending, task, NULL, 9369e910bffSTrond Myklebust xprt_request_timeout(task->tk_rqstp)); 9370b9e7943STrond Myklebust 9380b9e7943STrond Myklebust if (test_bit(XPRT_CLOSING, &xprt->state)) 9390b9e7943STrond Myklebust return; 9400b9e7943STrond Myklebust if (xprt_test_and_set_connecting(xprt)) 9410b9e7943STrond Myklebust return; 9420a9a4304STrond Myklebust /* Race breaker */ 9430a9a4304STrond Myklebust if (!xprt_connected(xprt)) { 944262ca07dSChuck Lever xprt->stat.connect_start = jiffies; 9451b092092STrond Myklebust xprt->ops->connect(xprt, task); 9460a9a4304STrond Myklebust } else { 9470a9a4304STrond Myklebust xprt_clear_connecting(xprt); 9480a9a4304STrond Myklebust task->tk_status = 0; 9490a9a4304STrond Myklebust rpc_wake_up_queued_task(&xprt->pending, task); 9500a9a4304STrond Myklebust } 9511da177e4SLinus Torvalds } 952718ba5b8STrond Myklebust xprt_release_write(xprt, task); 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds 955675dd90aSChuck Lever /** 956675dd90aSChuck Lever * xprt_reconnect_delay - compute the wait before scheduling a connect 957675dd90aSChuck Lever * @xprt: transport instance 958675dd90aSChuck Lever * 959675dd90aSChuck Lever */ 960675dd90aSChuck Lever unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) 961675dd90aSChuck Lever { 962675dd90aSChuck Lever unsigned long start, now = jiffies; 963675dd90aSChuck Lever 964675dd90aSChuck Lever start = xprt->stat.connect_start + xprt->reestablish_timeout; 965675dd90aSChuck Lever if (time_after(start, now)) 966675dd90aSChuck Lever return start - now; 967675dd90aSChuck Lever return 0; 968675dd90aSChuck Lever } 969675dd90aSChuck Lever EXPORT_SYMBOL_GPL(xprt_reconnect_delay); 970675dd90aSChuck Lever 971675dd90aSChuck Lever /** 972675dd90aSChuck Lever * xprt_reconnect_backoff - compute the new re-establish timeout 973675dd90aSChuck Lever * @xprt: transport instance 974675dd90aSChuck Lever * @init_to: initial reestablish timeout 975675dd90aSChuck Lever * 976675dd90aSChuck Lever */ 977675dd90aSChuck Lever void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) 978675dd90aSChuck Lever { 979675dd90aSChuck Lever xprt->reestablish_timeout <<= 1; 980675dd90aSChuck Lever if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) 981675dd90aSChuck Lever xprt->reestablish_timeout = xprt->max_reconnect_timeout; 982675dd90aSChuck Lever if (xprt->reestablish_timeout < init_to) 983675dd90aSChuck Lever xprt->reestablish_timeout = init_to; 984675dd90aSChuck Lever } 985675dd90aSChuck Lever EXPORT_SYMBOL_GPL(xprt_reconnect_backoff); 986675dd90aSChuck Lever 98795f7691dSTrond Myklebust enum xprt_xid_rb_cmp { 98895f7691dSTrond Myklebust XID_RB_EQUAL, 98995f7691dSTrond Myklebust XID_RB_LEFT, 99095f7691dSTrond Myklebust XID_RB_RIGHT, 99195f7691dSTrond Myklebust }; 99295f7691dSTrond Myklebust static enum xprt_xid_rb_cmp 99395f7691dSTrond Myklebust xprt_xid_cmp(__be32 xid1, __be32 xid2) 99495f7691dSTrond Myklebust { 99595f7691dSTrond Myklebust if (xid1 == xid2) 99695f7691dSTrond Myklebust return XID_RB_EQUAL; 99795f7691dSTrond Myklebust if ((__force u32)xid1 < (__force u32)xid2) 99895f7691dSTrond Myklebust return XID_RB_LEFT; 99995f7691dSTrond Myklebust return XID_RB_RIGHT; 100095f7691dSTrond Myklebust } 100195f7691dSTrond Myklebust 100295f7691dSTrond Myklebust static struct rpc_rqst * 100395f7691dSTrond Myklebust xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) 100495f7691dSTrond Myklebust { 100595f7691dSTrond Myklebust struct rb_node *n = xprt->recv_queue.rb_node; 100695f7691dSTrond Myklebust struct rpc_rqst *req; 100795f7691dSTrond Myklebust 100895f7691dSTrond Myklebust while (n != NULL) { 100995f7691dSTrond Myklebust req = rb_entry(n, struct rpc_rqst, rq_recv); 101095f7691dSTrond Myklebust switch (xprt_xid_cmp(xid, req->rq_xid)) { 101195f7691dSTrond Myklebust case XID_RB_LEFT: 101295f7691dSTrond Myklebust n = n->rb_left; 101395f7691dSTrond Myklebust break; 101495f7691dSTrond Myklebust case XID_RB_RIGHT: 101595f7691dSTrond Myklebust n = n->rb_right; 101695f7691dSTrond Myklebust break; 101795f7691dSTrond Myklebust case XID_RB_EQUAL: 101895f7691dSTrond Myklebust return req; 101995f7691dSTrond Myklebust } 102095f7691dSTrond Myklebust } 102195f7691dSTrond Myklebust return NULL; 102295f7691dSTrond Myklebust } 102395f7691dSTrond Myklebust 102495f7691dSTrond Myklebust static void 102595f7691dSTrond Myklebust xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) 102695f7691dSTrond Myklebust { 102795f7691dSTrond Myklebust struct rb_node **p = &xprt->recv_queue.rb_node; 102895f7691dSTrond Myklebust struct rb_node *n = NULL; 102995f7691dSTrond Myklebust struct rpc_rqst *req; 103095f7691dSTrond Myklebust 103195f7691dSTrond Myklebust while (*p != NULL) { 103295f7691dSTrond Myklebust n = *p; 103395f7691dSTrond Myklebust req = rb_entry(n, struct rpc_rqst, rq_recv); 103495f7691dSTrond Myklebust switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) { 103595f7691dSTrond Myklebust case XID_RB_LEFT: 103695f7691dSTrond Myklebust p = &n->rb_left; 103795f7691dSTrond Myklebust break; 103895f7691dSTrond Myklebust case XID_RB_RIGHT: 103995f7691dSTrond Myklebust p = &n->rb_right; 104095f7691dSTrond Myklebust break; 104195f7691dSTrond Myklebust case XID_RB_EQUAL: 104295f7691dSTrond Myklebust WARN_ON_ONCE(new != req); 104395f7691dSTrond Myklebust return; 104495f7691dSTrond Myklebust } 104595f7691dSTrond Myklebust } 104695f7691dSTrond Myklebust rb_link_node(&new->rq_recv, n, p); 104795f7691dSTrond Myklebust rb_insert_color(&new->rq_recv, &xprt->recv_queue); 104895f7691dSTrond Myklebust } 104995f7691dSTrond Myklebust 105095f7691dSTrond Myklebust static void 105195f7691dSTrond Myklebust xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) 105295f7691dSTrond Myklebust { 105395f7691dSTrond Myklebust rb_erase(&req->rq_recv, &xprt->recv_queue); 105495f7691dSTrond Myklebust } 105595f7691dSTrond Myklebust 10569903cd1cSChuck Lever /** 10579903cd1cSChuck Lever * xprt_lookup_rqst - find an RPC request corresponding to an XID 10589903cd1cSChuck Lever * @xprt: transport on which the original request was transmitted 10599903cd1cSChuck Lever * @xid: RPC XID of incoming reply 10609903cd1cSChuck Lever * 106175c84151STrond Myklebust * Caller holds xprt->queue_lock. 10621da177e4SLinus Torvalds */ 1063d8ed029dSAlexey Dobriyan struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 10641da177e4SLinus Torvalds { 10658f3a6de3SPavel Emelyanov struct rpc_rqst *entry; 10661da177e4SLinus Torvalds 106795f7691dSTrond Myklebust entry = xprt_request_rb_find(xprt, xid); 106895f7691dSTrond Myklebust if (entry != NULL) { 10693705ad64SJeff Layton trace_xprt_lookup_rqst(xprt, xid, 0); 10700b87a46bSChuck Lever entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); 1071262ca07dSChuck Lever return entry; 10723705ad64SJeff Layton } 107346121cf7SChuck Lever 107446121cf7SChuck Lever dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 107546121cf7SChuck Lever ntohl(xid)); 10763705ad64SJeff Layton trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 1077262ca07dSChuck Lever xprt->stat.bad_xids++; 1078262ca07dSChuck Lever return NULL; 10791da177e4SLinus Torvalds } 108012444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 10811da177e4SLinus Torvalds 1082cf9946cdSTrond Myklebust static bool 1083cf9946cdSTrond Myklebust xprt_is_pinned_rqst(struct rpc_rqst *req) 1084cf9946cdSTrond Myklebust { 1085cf9946cdSTrond Myklebust return atomic_read(&req->rq_pin) != 0; 1086cf9946cdSTrond Myklebust } 1087cf9946cdSTrond Myklebust 1088729749bbSTrond Myklebust /** 1089729749bbSTrond Myklebust * xprt_pin_rqst - Pin a request on the transport receive list 1090729749bbSTrond Myklebust * @req: Request to pin 1091729749bbSTrond Myklebust * 1092729749bbSTrond Myklebust * Caller must ensure this is atomic with the call to xprt_lookup_rqst() 10931f7d1c73SChuck Lever * so should be holding xprt->queue_lock. 1094729749bbSTrond Myklebust */ 1095729749bbSTrond Myklebust void xprt_pin_rqst(struct rpc_rqst *req) 1096729749bbSTrond Myklebust { 1097cf9946cdSTrond Myklebust atomic_inc(&req->rq_pin); 1098729749bbSTrond Myklebust } 10999590d083SChuck Lever EXPORT_SYMBOL_GPL(xprt_pin_rqst); 1100729749bbSTrond Myklebust 1101729749bbSTrond Myklebust /** 1102729749bbSTrond Myklebust * xprt_unpin_rqst - Unpin a request on the transport receive list 1103729749bbSTrond Myklebust * @req: Request to pin 1104729749bbSTrond Myklebust * 11051f7d1c73SChuck Lever * Caller should be holding xprt->queue_lock. 1106729749bbSTrond Myklebust */ 1107729749bbSTrond Myklebust void xprt_unpin_rqst(struct rpc_rqst *req) 1108729749bbSTrond Myklebust { 1109cf9946cdSTrond Myklebust if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) { 1110cf9946cdSTrond Myklebust atomic_dec(&req->rq_pin); 1111cf9946cdSTrond Myklebust return; 1112cf9946cdSTrond Myklebust } 1113cf9946cdSTrond Myklebust if (atomic_dec_and_test(&req->rq_pin)) 1114cf9946cdSTrond Myklebust wake_up_var(&req->rq_pin); 1115729749bbSTrond Myklebust } 11169590d083SChuck Lever EXPORT_SYMBOL_GPL(xprt_unpin_rqst); 1117729749bbSTrond Myklebust 1118729749bbSTrond Myklebust static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) 1119729749bbSTrond Myklebust { 1120cf9946cdSTrond Myklebust wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req)); 1121729749bbSTrond Myklebust } 1122729749bbSTrond Myklebust 1123edc81dcdSTrond Myklebust static bool 1124edc81dcdSTrond Myklebust xprt_request_data_received(struct rpc_task *task) 1125edc81dcdSTrond Myklebust { 1126edc81dcdSTrond Myklebust return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1127edc81dcdSTrond Myklebust READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; 1128edc81dcdSTrond Myklebust } 1129edc81dcdSTrond Myklebust 1130edc81dcdSTrond Myklebust static bool 1131edc81dcdSTrond Myklebust xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) 1132edc81dcdSTrond Myklebust { 1133edc81dcdSTrond Myklebust return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1134edc81dcdSTrond Myklebust READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; 1135edc81dcdSTrond Myklebust } 1136edc81dcdSTrond Myklebust 1137edc81dcdSTrond Myklebust /** 1138edc81dcdSTrond Myklebust * xprt_request_enqueue_receive - Add an request to the receive queue 1139edc81dcdSTrond Myklebust * @task: RPC task 1140edc81dcdSTrond Myklebust * 1141edc81dcdSTrond Myklebust */ 1142eb07d5a4SNeilBrown int 1143edc81dcdSTrond Myklebust xprt_request_enqueue_receive(struct rpc_task *task) 1144edc81dcdSTrond Myklebust { 1145edc81dcdSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1146edc81dcdSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1147eb07d5a4SNeilBrown int ret; 1148edc81dcdSTrond Myklebust 1149edc81dcdSTrond Myklebust if (!xprt_request_need_enqueue_receive(task, req)) 1150eb07d5a4SNeilBrown return 0; 115175369089STrond Myklebust 1152eb07d5a4SNeilBrown ret = xprt_request_prepare(task->tk_rqstp); 1153eb07d5a4SNeilBrown if (ret) 1154eb07d5a4SNeilBrown return ret; 1155edc81dcdSTrond Myklebust spin_lock(&xprt->queue_lock); 1156edc81dcdSTrond Myklebust 1157edc81dcdSTrond Myklebust /* Update the softirq receive buffer */ 1158edc81dcdSTrond Myklebust memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1159edc81dcdSTrond Myklebust sizeof(req->rq_private_buf)); 1160edc81dcdSTrond Myklebust 1161edc81dcdSTrond Myklebust /* Add request to the receive list */ 116295f7691dSTrond Myklebust xprt_request_rb_insert(xprt, req); 1163edc81dcdSTrond Myklebust set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); 1164edc81dcdSTrond Myklebust spin_unlock(&xprt->queue_lock); 1165edc81dcdSTrond Myklebust 1166edc81dcdSTrond Myklebust /* Turn off autodisconnect */ 1167edc81dcdSTrond Myklebust del_singleshot_timer_sync(&xprt->timer); 1168eb07d5a4SNeilBrown return 0; 1169edc81dcdSTrond Myklebust } 1170edc81dcdSTrond Myklebust 1171edc81dcdSTrond Myklebust /** 1172edc81dcdSTrond Myklebust * xprt_request_dequeue_receive_locked - Remove a request from the receive queue 1173edc81dcdSTrond Myklebust * @task: RPC task 1174edc81dcdSTrond Myklebust * 1175edc81dcdSTrond Myklebust * Caller must hold xprt->queue_lock. 1176edc81dcdSTrond Myklebust */ 1177edc81dcdSTrond Myklebust static void 1178edc81dcdSTrond Myklebust xprt_request_dequeue_receive_locked(struct rpc_task *task) 1179edc81dcdSTrond Myklebust { 118095f7691dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 118195f7691dSTrond Myklebust 1182edc81dcdSTrond Myklebust if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 118395f7691dSTrond Myklebust xprt_request_rb_remove(req->rq_xprt, req); 1184edc81dcdSTrond Myklebust } 1185edc81dcdSTrond Myklebust 1186ecd465eeSChuck Lever /** 1187ecd465eeSChuck Lever * xprt_update_rtt - Update RPC RTT statistics 1188ecd465eeSChuck Lever * @task: RPC request that recently completed 1189ecd465eeSChuck Lever * 119075c84151STrond Myklebust * Caller holds xprt->queue_lock. 1191ecd465eeSChuck Lever */ 1192ecd465eeSChuck Lever void xprt_update_rtt(struct rpc_task *task) 11931da177e4SLinus Torvalds { 11941570c1e4SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 11951570c1e4SChuck Lever struct rpc_rtt *rtt = task->tk_client->cl_rtt; 119695c96174SEric Dumazet unsigned int timer = task->tk_msg.rpc_proc->p_timer; 1197d60dbb20STrond Myklebust long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 11981570c1e4SChuck Lever 11991da177e4SLinus Torvalds if (timer) { 12001da177e4SLinus Torvalds if (req->rq_ntrans == 1) 1201ff839970SChuck Lever rpc_update_rtt(rtt, timer, m); 12021570c1e4SChuck Lever rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 12031da177e4SLinus Torvalds } 12041da177e4SLinus Torvalds } 1205ecd465eeSChuck Lever EXPORT_SYMBOL_GPL(xprt_update_rtt); 12061da177e4SLinus Torvalds 12071570c1e4SChuck Lever /** 12081570c1e4SChuck Lever * xprt_complete_rqst - called when reply processing is complete 12091570c1e4SChuck Lever * @task: RPC request that recently completed 12101570c1e4SChuck Lever * @copied: actual number of bytes received from the transport 12111570c1e4SChuck Lever * 121275c84151STrond Myklebust * Caller holds xprt->queue_lock. 12131570c1e4SChuck Lever */ 12141570c1e4SChuck Lever void xprt_complete_rqst(struct rpc_task *task, int copied) 12151570c1e4SChuck Lever { 12161570c1e4SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 1217fda13939STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 12181da177e4SLinus Torvalds 1219fda13939STrond Myklebust xprt->stat.recvs++; 1220ef759a2eSChuck Lever 12211e799b67STrond Myklebust req->rq_private_buf.len = copied; 1222dd2b63d0SRicardo Labiaga /* Ensure all writes are done before we update */ 1223dd2b63d0SRicardo Labiaga /* req->rq_reply_bytes_recvd */ 122443ac3f29STrond Myklebust smp_wmb(); 1225dd2b63d0SRicardo Labiaga req->rq_reply_bytes_recvd = copied; 1226edc81dcdSTrond Myklebust xprt_request_dequeue_receive_locked(task); 1227fda13939STrond Myklebust rpc_wake_up_queued_task(&xprt->pending, task); 12281da177e4SLinus Torvalds } 122912444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_complete_rqst); 12301da177e4SLinus Torvalds 123146c0ee8bSChuck Lever static void xprt_timer(struct rpc_task *task) 12321da177e4SLinus Torvalds { 12331da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 12341da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 12351da177e4SLinus Torvalds 12365d00837bSTrond Myklebust if (task->tk_status != -ETIMEDOUT) 12375d00837bSTrond Myklebust return; 123846c0ee8bSChuck Lever 123982476d9fSChuck Lever trace_xprt_timer(xprt, req->rq_xid, task->tk_status); 1240dd2b63d0SRicardo Labiaga if (!req->rq_reply_bytes_recvd) { 124146c0ee8bSChuck Lever if (xprt->ops->timer) 12426a24dfb6STrond Myklebust xprt->ops->timer(xprt, task); 12435d00837bSTrond Myklebust } else 12445d00837bSTrond Myklebust task->tk_status = 0; 12451da177e4SLinus Torvalds } 12461da177e4SLinus Torvalds 12479903cd1cSChuck Lever /** 12488ba6a92dSTrond Myklebust * xprt_wait_for_reply_request_def - wait for reply 12498ba6a92dSTrond Myklebust * @task: pointer to rpc_task 12508ba6a92dSTrond Myklebust * 12518ba6a92dSTrond Myklebust * Set a request's retransmit timeout based on the transport's 12528ba6a92dSTrond Myklebust * default timeout parameters. Used by transports that don't adjust 12538ba6a92dSTrond Myklebust * the retransmit timeout based on round-trip time estimation, 12548ba6a92dSTrond Myklebust * and put the task to sleep on the pending queue. 12558ba6a92dSTrond Myklebust */ 12568ba6a92dSTrond Myklebust void xprt_wait_for_reply_request_def(struct rpc_task *task) 12578ba6a92dSTrond Myklebust { 12588ba6a92dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 12598ba6a92dSTrond Myklebust 12606b2e6856STrond Myklebust rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 12619e910bffSTrond Myklebust xprt_request_timeout(req)); 12628ba6a92dSTrond Myklebust } 12638ba6a92dSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def); 12648ba6a92dSTrond Myklebust 12658ba6a92dSTrond Myklebust /** 12668ba6a92dSTrond Myklebust * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator 12678ba6a92dSTrond Myklebust * @task: pointer to rpc_task 12688ba6a92dSTrond Myklebust * 12698ba6a92dSTrond Myklebust * Set a request's retransmit timeout using the RTT estimator, 12708ba6a92dSTrond Myklebust * and put the task to sleep on the pending queue. 12718ba6a92dSTrond Myklebust */ 12728ba6a92dSTrond Myklebust void xprt_wait_for_reply_request_rtt(struct rpc_task *task) 12738ba6a92dSTrond Myklebust { 12748ba6a92dSTrond Myklebust int timer = task->tk_msg.rpc_proc->p_timer; 12758ba6a92dSTrond Myklebust struct rpc_clnt *clnt = task->tk_client; 12768ba6a92dSTrond Myklebust struct rpc_rtt *rtt = clnt->cl_rtt; 12778ba6a92dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 12788ba6a92dSTrond Myklebust unsigned long max_timeout = clnt->cl_timeout->to_maxval; 12796b2e6856STrond Myklebust unsigned long timeout; 12808ba6a92dSTrond Myklebust 12816b2e6856STrond Myklebust timeout = rpc_calc_rto(rtt, timer); 12826b2e6856STrond Myklebust timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 12836b2e6856STrond Myklebust if (timeout > max_timeout || timeout == 0) 12846b2e6856STrond Myklebust timeout = max_timeout; 12856b2e6856STrond Myklebust rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 12866b2e6856STrond Myklebust jiffies + timeout); 12878ba6a92dSTrond Myklebust } 12888ba6a92dSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt); 12898ba6a92dSTrond Myklebust 12908ba6a92dSTrond Myklebust /** 12917f3a1d1eSTrond Myklebust * xprt_request_wait_receive - wait for the reply to an RPC request 12927f3a1d1eSTrond Myklebust * @task: RPC task about to send a request 12937f3a1d1eSTrond Myklebust * 12947f3a1d1eSTrond Myklebust */ 12957f3a1d1eSTrond Myklebust void xprt_request_wait_receive(struct rpc_task *task) 12967f3a1d1eSTrond Myklebust { 12977f3a1d1eSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 12987f3a1d1eSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 12997f3a1d1eSTrond Myklebust 13007f3a1d1eSTrond Myklebust if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 13017f3a1d1eSTrond Myklebust return; 13027f3a1d1eSTrond Myklebust /* 13037f3a1d1eSTrond Myklebust * Sleep on the pending queue if we're expecting a reply. 13047f3a1d1eSTrond Myklebust * The spinlock ensures atomicity between the test of 13057f3a1d1eSTrond Myklebust * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). 13067f3a1d1eSTrond Myklebust */ 13077f3a1d1eSTrond Myklebust spin_lock(&xprt->queue_lock); 13087f3a1d1eSTrond Myklebust if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { 13098ba6a92dSTrond Myklebust xprt->ops->wait_for_reply_request(task); 13107f3a1d1eSTrond Myklebust /* 13117f3a1d1eSTrond Myklebust * Send an extra queue wakeup call if the 13127f3a1d1eSTrond Myklebust * connection was dropped in case the call to 13137f3a1d1eSTrond Myklebust * rpc_sleep_on() raced. 13147f3a1d1eSTrond Myklebust */ 13157f3a1d1eSTrond Myklebust if (xprt_request_retransmit_after_disconnect(task)) 13167f3a1d1eSTrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->pending, 13177f3a1d1eSTrond Myklebust task, -ENOTCONN); 13187f3a1d1eSTrond Myklebust } 13197f3a1d1eSTrond Myklebust spin_unlock(&xprt->queue_lock); 13207f3a1d1eSTrond Myklebust } 13217f3a1d1eSTrond Myklebust 1322944b0429STrond Myklebust static bool 1323944b0429STrond Myklebust xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) 1324944b0429STrond Myklebust { 1325762e4e67STrond Myklebust return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1326944b0429STrond Myklebust } 1327944b0429STrond Myklebust 1328944b0429STrond Myklebust /** 1329944b0429STrond Myklebust * xprt_request_enqueue_transmit - queue a task for transmission 1330944b0429STrond Myklebust * @task: pointer to rpc_task 1331944b0429STrond Myklebust * 1332944b0429STrond Myklebust * Add a task to the transmission queue. 1333944b0429STrond Myklebust */ 1334944b0429STrond Myklebust void 1335944b0429STrond Myklebust xprt_request_enqueue_transmit(struct rpc_task *task) 1336944b0429STrond Myklebust { 1337918f3c1fSTrond Myklebust struct rpc_rqst *pos, *req = task->tk_rqstp; 1338944b0429STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1339944b0429STrond Myklebust 1340944b0429STrond Myklebust if (xprt_request_need_enqueue_transmit(task, req)) { 1341e66721f0STrond Myklebust req->rq_bytes_sent = 0; 1342944b0429STrond Myklebust spin_lock(&xprt->queue_lock); 134375891f50STrond Myklebust /* 134475891f50STrond Myklebust * Requests that carry congestion control credits are added 134575891f50STrond Myklebust * to the head of the list to avoid starvation issues. 134675891f50STrond Myklebust */ 134775891f50STrond Myklebust if (req->rq_cong) { 134875891f50STrond Myklebust xprt_clear_congestion_window_wait(xprt); 134975891f50STrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 135075891f50STrond Myklebust if (pos->rq_cong) 135175891f50STrond Myklebust continue; 135275891f50STrond Myklebust /* Note: req is added _before_ pos */ 135375891f50STrond Myklebust list_add_tail(&req->rq_xmit, &pos->rq_xmit); 135475891f50STrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 135575891f50STrond Myklebust goto out; 135675891f50STrond Myklebust } 1357deaa5c96SChuck Lever } else if (!req->rq_seqno) { 1358918f3c1fSTrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1359918f3c1fSTrond Myklebust if (pos->rq_task->tk_owner != task->tk_owner) 1360918f3c1fSTrond Myklebust continue; 1361918f3c1fSTrond Myklebust list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); 1362918f3c1fSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit); 1363918f3c1fSTrond Myklebust goto out; 1364918f3c1fSTrond Myklebust } 136575891f50STrond Myklebust } 1366944b0429STrond Myklebust list_add_tail(&req->rq_xmit, &xprt->xmit_queue); 1367918f3c1fSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 1368918f3c1fSTrond Myklebust out: 1369d737e5d4STrond Myklebust atomic_long_inc(&xprt->xmit_queuelen); 1370944b0429STrond Myklebust set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1371944b0429STrond Myklebust spin_unlock(&xprt->queue_lock); 1372944b0429STrond Myklebust } 1373944b0429STrond Myklebust } 1374944b0429STrond Myklebust 1375944b0429STrond Myklebust /** 1376944b0429STrond Myklebust * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue 1377944b0429STrond Myklebust * @task: pointer to rpc_task 1378944b0429STrond Myklebust * 1379944b0429STrond Myklebust * Remove a task from the transmission queue 1380944b0429STrond Myklebust * Caller must hold xprt->queue_lock 1381944b0429STrond Myklebust */ 1382944b0429STrond Myklebust static void 1383944b0429STrond Myklebust xprt_request_dequeue_transmit_locked(struct rpc_task *task) 1384944b0429STrond Myklebust { 1385918f3c1fSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1386918f3c1fSTrond Myklebust 1387918f3c1fSTrond Myklebust if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1388918f3c1fSTrond Myklebust return; 1389918f3c1fSTrond Myklebust if (!list_empty(&req->rq_xmit)) { 1390918f3c1fSTrond Myklebust list_del(&req->rq_xmit); 1391918f3c1fSTrond Myklebust if (!list_empty(&req->rq_xmit2)) { 1392918f3c1fSTrond Myklebust struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, 1393918f3c1fSTrond Myklebust struct rpc_rqst, rq_xmit2); 1394918f3c1fSTrond Myklebust list_del(&req->rq_xmit2); 1395918f3c1fSTrond Myklebust list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue); 1396918f3c1fSTrond Myklebust } 1397918f3c1fSTrond Myklebust } else 1398918f3c1fSTrond Myklebust list_del(&req->rq_xmit2); 1399d737e5d4STrond Myklebust atomic_long_dec(&req->rq_xprt->xmit_queuelen); 1400944b0429STrond Myklebust } 1401944b0429STrond Myklebust 1402944b0429STrond Myklebust /** 1403944b0429STrond Myklebust * xprt_request_dequeue_transmit - remove a task from the transmission queue 1404944b0429STrond Myklebust * @task: pointer to rpc_task 1405944b0429STrond Myklebust * 1406944b0429STrond Myklebust * Remove a task from the transmission queue 1407944b0429STrond Myklebust */ 1408944b0429STrond Myklebust static void 1409944b0429STrond Myklebust xprt_request_dequeue_transmit(struct rpc_task *task) 1410944b0429STrond Myklebust { 1411944b0429STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1412944b0429STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1413944b0429STrond Myklebust 1414944b0429STrond Myklebust spin_lock(&xprt->queue_lock); 1415944b0429STrond Myklebust xprt_request_dequeue_transmit_locked(task); 1416944b0429STrond Myklebust spin_unlock(&xprt->queue_lock); 1417944b0429STrond Myklebust } 1418944b0429STrond Myklebust 14197f3a1d1eSTrond Myklebust /** 1420cc204d01STrond Myklebust * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue 1421cc204d01STrond Myklebust * @task: pointer to rpc_task 1422cc204d01STrond Myklebust * 1423cc204d01STrond Myklebust * Remove a task from the transmit and receive queues, and ensure that 1424cc204d01STrond Myklebust * it is not pinned by the receive work item. 1425cc204d01STrond Myklebust */ 1426cc204d01STrond Myklebust void 1427cc204d01STrond Myklebust xprt_request_dequeue_xprt(struct rpc_task *task) 1428cc204d01STrond Myklebust { 1429cc204d01STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1430cc204d01STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1431cc204d01STrond Myklebust 1432cc204d01STrond Myklebust if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || 1433cc204d01STrond Myklebust test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || 1434cc204d01STrond Myklebust xprt_is_pinned_rqst(req)) { 1435cc204d01STrond Myklebust spin_lock(&xprt->queue_lock); 1436cc204d01STrond Myklebust xprt_request_dequeue_transmit_locked(task); 1437cc204d01STrond Myklebust xprt_request_dequeue_receive_locked(task); 1438cc204d01STrond Myklebust while (xprt_is_pinned_rqst(req)) { 1439cc204d01STrond Myklebust set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1440cc204d01STrond Myklebust spin_unlock(&xprt->queue_lock); 1441cc204d01STrond Myklebust xprt_wait_on_pinned_rqst(req); 1442cc204d01STrond Myklebust spin_lock(&xprt->queue_lock); 1443cc204d01STrond Myklebust clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1444cc204d01STrond Myklebust } 1445cc204d01STrond Myklebust spin_unlock(&xprt->queue_lock); 1446cc204d01STrond Myklebust } 1447cc204d01STrond Myklebust } 1448cc204d01STrond Myklebust 1449cc204d01STrond Myklebust /** 14509d96acbcSTrond Myklebust * xprt_request_prepare - prepare an encoded request for transport 14519d96acbcSTrond Myklebust * @req: pointer to rpc_rqst 14529d96acbcSTrond Myklebust * 14539d96acbcSTrond Myklebust * Calls into the transport layer to do whatever is needed to prepare 14549d96acbcSTrond Myklebust * the request for transmission or receive. 1455eb07d5a4SNeilBrown * Returns error, or zero. 14569d96acbcSTrond Myklebust */ 1457eb07d5a4SNeilBrown static int 14589d96acbcSTrond Myklebust xprt_request_prepare(struct rpc_rqst *req) 14599d96acbcSTrond Myklebust { 14609d96acbcSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 14619d96acbcSTrond Myklebust 14629d96acbcSTrond Myklebust if (xprt->ops->prepare_request) 1463eb07d5a4SNeilBrown return xprt->ops->prepare_request(req); 1464eb07d5a4SNeilBrown return 0; 14659d96acbcSTrond Myklebust } 14669d96acbcSTrond Myklebust 14679d96acbcSTrond Myklebust /** 1468762e4e67STrond Myklebust * xprt_request_need_retransmit - Test if a task needs retransmission 1469762e4e67STrond Myklebust * @task: pointer to rpc_task 1470762e4e67STrond Myklebust * 1471762e4e67STrond Myklebust * Test for whether a connection breakage requires the task to retransmit 1472762e4e67STrond Myklebust */ 1473762e4e67STrond Myklebust bool 1474762e4e67STrond Myklebust xprt_request_need_retransmit(struct rpc_task *task) 1475762e4e67STrond Myklebust { 1476762e4e67STrond Myklebust return xprt_request_retransmit_after_disconnect(task); 1477762e4e67STrond Myklebust } 1478762e4e67STrond Myklebust 1479762e4e67STrond Myklebust /** 14809903cd1cSChuck Lever * xprt_prepare_transmit - reserve the transport before sending a request 14819903cd1cSChuck Lever * @task: RPC task about to send a request 14829903cd1cSChuck Lever * 14831da177e4SLinus Torvalds */ 148490051ea7STrond Myklebust bool xprt_prepare_transmit(struct rpc_task *task) 14851da177e4SLinus Torvalds { 14861da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 14871da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 14881da177e4SLinus Torvalds 14895f2f6bd9STrond Myklebust if (!xprt_lock_write(xprt, task)) { 14905f2f6bd9STrond Myklebust /* Race breaker: someone may have transmitted us */ 1491944b0429STrond Myklebust if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 14925f2f6bd9STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->sending, 14935f2f6bd9STrond Myklebust task, 0); 14945f2f6bd9STrond Myklebust return false; 14955f2f6bd9STrond Myklebust 14968a19a0b6STrond Myklebust } 14978db55a03SNeilBrown if (atomic_read(&xprt->swapper)) 14988db55a03SNeilBrown /* This will be clear in __rpc_execute */ 14998db55a03SNeilBrown current->flags |= PF_MEMALLOC; 15005f2f6bd9STrond Myklebust return true; 15011da177e4SLinus Torvalds } 15021da177e4SLinus Torvalds 1503e0ab53deSTrond Myklebust void xprt_end_transmit(struct rpc_task *task) 15045e5ce5beSTrond Myklebust { 15057638e0bfSChuck Lever struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 15067638e0bfSChuck Lever 15077638e0bfSChuck Lever xprt_inject_disconnect(xprt); 15087638e0bfSChuck Lever xprt_release_write(xprt, task); 15095e5ce5beSTrond Myklebust } 15105e5ce5beSTrond Myklebust 15119903cd1cSChuck Lever /** 151289f90fe1STrond Myklebust * xprt_request_transmit - send an RPC request on a transport 151389f90fe1STrond Myklebust * @req: pointer to request to transmit 151489f90fe1STrond Myklebust * @snd_task: RPC task that owns the transport lock 15159903cd1cSChuck Lever * 151689f90fe1STrond Myklebust * This performs the transmission of a single request. 151789f90fe1STrond Myklebust * Note that if the request is not the same as snd_task, then it 151889f90fe1STrond Myklebust * does need to be pinned. 151989f90fe1STrond Myklebust * Returns '0' on success. 15209903cd1cSChuck Lever */ 152189f90fe1STrond Myklebust static int 152289f90fe1STrond Myklebust xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) 15231da177e4SLinus Torvalds { 15241da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 152589f90fe1STrond Myklebust struct rpc_task *task = req->rq_task; 152690d91b0cSTrond Myklebust unsigned int connect_cookie; 1527dcbbeda8STrond Myklebust int is_retrans = RPC_WAS_SENT(task); 1528ff699ea8SChuck Lever int status; 15291da177e4SLinus Torvalds 1530edc81dcdSTrond Myklebust if (!req->rq_bytes_sent) { 153189f90fe1STrond Myklebust if (xprt_request_data_received(task)) { 153289f90fe1STrond Myklebust status = 0; 1533944b0429STrond Myklebust goto out_dequeue; 153489f90fe1STrond Myklebust } 15353021a5bbSTrond Myklebust /* Verify that our message lies in the RPCSEC_GSS window */ 1536edc81dcdSTrond Myklebust if (rpcauth_xmit_need_reencode(task)) { 153789f90fe1STrond Myklebust status = -EBADMSG; 1538944b0429STrond Myklebust goto out_dequeue; 15393021a5bbSTrond Myklebust } 1540ae67bd38STrond Myklebust if (RPC_SIGNALLED(task)) { 1541ae67bd38STrond Myklebust status = -ERESTARTSYS; 1542ae67bd38STrond Myklebust goto out_dequeue; 1543ae67bd38STrond Myklebust } 15441da177e4SLinus Torvalds } 15451da177e4SLinus Torvalds 1546dcbbeda8STrond Myklebust /* 1547dcbbeda8STrond Myklebust * Update req->rq_ntrans before transmitting to avoid races with 1548dcbbeda8STrond Myklebust * xprt_update_rtt(), which needs to know that it is recording a 1549dcbbeda8STrond Myklebust * reply to the first transmission. 1550dcbbeda8STrond Myklebust */ 1551dcbbeda8STrond Myklebust req->rq_ntrans++; 1552dcbbeda8STrond Myklebust 1553c509f15aSChuck Lever trace_rpc_xdr_sendto(task, &req->rq_snd_buf); 155490d91b0cSTrond Myklebust connect_cookie = xprt->connect_cookie; 1555adfa7144STrond Myklebust status = xprt->ops->send_request(req); 1556c8485e4dSTrond Myklebust if (status != 0) { 1557dcbbeda8STrond Myklebust req->rq_ntrans--; 15580c77668dSChuck Lever trace_xprt_transmit(req, status); 155989f90fe1STrond Myklebust return status; 1560c8485e4dSTrond Myklebust } 15617ebbbc6eSTrond Myklebust 1562e936a597SChuck Lever if (is_retrans) { 1563dcbbeda8STrond Myklebust task->tk_client->cl_stats->rpcretrans++; 1564e936a597SChuck Lever trace_xprt_retransmit(req); 1565e936a597SChuck Lever } 1566dcbbeda8STrond Myklebust 15674a068258SChuck Lever xprt_inject_disconnect(xprt); 1568c8485e4dSTrond Myklebust 1569468f8613SBryan Schumaker task->tk_flags |= RPC_TASK_SENT; 1570b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 1571262ca07dSChuck Lever 1572262ca07dSChuck Lever xprt->stat.sends++; 1573262ca07dSChuck Lever xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 1574262ca07dSChuck Lever xprt->stat.bklog_u += xprt->backlog.qlen; 157515a45206SAndy Adamson xprt->stat.sending_u += xprt->sending.qlen; 157615a45206SAndy Adamson xprt->stat.pending_u += xprt->pending.qlen; 1577b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 157890d91b0cSTrond Myklebust 157990d91b0cSTrond Myklebust req->rq_connect_cookie = connect_cookie; 1580944b0429STrond Myklebust out_dequeue: 15810c77668dSChuck Lever trace_xprt_transmit(req, status); 1582944b0429STrond Myklebust xprt_request_dequeue_transmit(task); 158389f90fe1STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); 158489f90fe1STrond Myklebust return status; 158589f90fe1STrond Myklebust } 158689f90fe1STrond Myklebust 158789f90fe1STrond Myklebust /** 158889f90fe1STrond Myklebust * xprt_transmit - send an RPC request on a transport 158989f90fe1STrond Myklebust * @task: controlling RPC task 159089f90fe1STrond Myklebust * 159189f90fe1STrond Myklebust * Attempts to drain the transmit queue. On exit, either the transport 159289f90fe1STrond Myklebust * signalled an error that needs to be handled before transmission can 159389f90fe1STrond Myklebust * resume, or @task finished transmitting, and detected that it already 159489f90fe1STrond Myklebust * received a reply. 159589f90fe1STrond Myklebust */ 159689f90fe1STrond Myklebust void 159789f90fe1STrond Myklebust xprt_transmit(struct rpc_task *task) 159889f90fe1STrond Myklebust { 159989f90fe1STrond Myklebust struct rpc_rqst *next, *req = task->tk_rqstp; 160089f90fe1STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1601ea7a1019STrond Myklebust int status; 160289f90fe1STrond Myklebust 160389f90fe1STrond Myklebust spin_lock(&xprt->queue_lock); 1604ea7a1019STrond Myklebust for (;;) { 1605ea7a1019STrond Myklebust next = list_first_entry_or_null(&xprt->xmit_queue, 160689f90fe1STrond Myklebust struct rpc_rqst, rq_xmit); 1607ea7a1019STrond Myklebust if (!next) 1608ea7a1019STrond Myklebust break; 160989f90fe1STrond Myklebust xprt_pin_rqst(next); 161089f90fe1STrond Myklebust spin_unlock(&xprt->queue_lock); 161189f90fe1STrond Myklebust status = xprt_request_transmit(next, task); 161289f90fe1STrond Myklebust if (status == -EBADMSG && next != req) 161389f90fe1STrond Myklebust status = 0; 161489f90fe1STrond Myklebust spin_lock(&xprt->queue_lock); 161589f90fe1STrond Myklebust xprt_unpin_rqst(next); 1616ea7a1019STrond Myklebust if (status < 0) { 1617ea7a1019STrond Myklebust if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 161889f90fe1STrond Myklebust task->tk_status = status; 161989f90fe1STrond Myklebust break; 162089f90fe1STrond Myklebust } 1621ea7a1019STrond Myklebust /* Was @task transmitted, and has it received a reply? */ 1622ea7a1019STrond Myklebust if (xprt_request_data_received(task) && 1623ea7a1019STrond Myklebust !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1624ea7a1019STrond Myklebust break; 1625ea7a1019STrond Myklebust cond_resched_lock(&xprt->queue_lock); 1626ea7a1019STrond Myklebust } 162789f90fe1STrond Myklebust spin_unlock(&xprt->queue_lock); 16281da177e4SLinus Torvalds } 16291da177e4SLinus Torvalds 1630e86be3a0STrond Myklebust static void xprt_complete_request_init(struct rpc_task *task) 1631e86be3a0STrond Myklebust { 1632e86be3a0STrond Myklebust if (task->tk_rqstp) 1633e86be3a0STrond Myklebust xprt_request_init(task); 1634e86be3a0STrond Myklebust } 1635e86be3a0STrond Myklebust 1636e86be3a0STrond Myklebust void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1637ba60eb25STrond Myklebust { 1638ba60eb25STrond Myklebust set_bit(XPRT_CONGESTED, &xprt->state); 1639e86be3a0STrond Myklebust rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); 1640ba60eb25STrond Myklebust } 1641e86be3a0STrond Myklebust EXPORT_SYMBOL_GPL(xprt_add_backlog); 1642ba60eb25STrond Myklebust 1643e877a88dSNeilBrown static bool __xprt_set_rq(struct rpc_task *task, void *data) 1644ba60eb25STrond Myklebust { 1645e877a88dSNeilBrown struct rpc_rqst *req = data; 1646e877a88dSNeilBrown 1647e877a88dSNeilBrown if (task->tk_rqstp == NULL) { 1648e877a88dSNeilBrown memset(req, 0, sizeof(*req)); /* mark unused */ 1649e877a88dSNeilBrown task->tk_rqstp = req; 1650e877a88dSNeilBrown return true; 1651e877a88dSNeilBrown } 1652e877a88dSNeilBrown return false; 1653e877a88dSNeilBrown } 1654e877a88dSNeilBrown 1655e86be3a0STrond Myklebust bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) 1656e877a88dSNeilBrown { 1657e877a88dSNeilBrown if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { 1658ba60eb25STrond Myklebust clear_bit(XPRT_CONGESTED, &xprt->state); 1659e877a88dSNeilBrown return false; 1660e877a88dSNeilBrown } 1661e877a88dSNeilBrown return true; 1662ba60eb25STrond Myklebust } 1663e86be3a0STrond Myklebust EXPORT_SYMBOL_GPL(xprt_wake_up_backlog); 1664ba60eb25STrond Myklebust 1665ba60eb25STrond Myklebust static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1666ba60eb25STrond Myklebust { 1667ba60eb25STrond Myklebust bool ret = false; 1668ba60eb25STrond Myklebust 1669ba60eb25STrond Myklebust if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1670ba60eb25STrond Myklebust goto out; 1671ba60eb25STrond Myklebust spin_lock(&xprt->reserve_lock); 1672ba60eb25STrond Myklebust if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1673e86be3a0STrond Myklebust xprt_add_backlog(xprt, task); 1674ba60eb25STrond Myklebust ret = true; 1675ba60eb25STrond Myklebust } 1676ba60eb25STrond Myklebust spin_unlock(&xprt->reserve_lock); 1677ba60eb25STrond Myklebust out: 1678ba60eb25STrond Myklebust return ret; 1679ba60eb25STrond Myklebust } 1680ba60eb25STrond Myklebust 168192ea011fSTrond Myklebust static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) 1682d9ba131dSTrond Myklebust { 1683d9ba131dSTrond Myklebust struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1684d9ba131dSTrond Myklebust 1685ff699ea8SChuck Lever if (xprt->num_reqs >= xprt->max_reqs) 1686d9ba131dSTrond Myklebust goto out; 1687ff699ea8SChuck Lever ++xprt->num_reqs; 168892ea011fSTrond Myklebust spin_unlock(&xprt->reserve_lock); 1689b2648015STrond Myklebust req = kzalloc(sizeof(*req), rpc_task_gfp_mask()); 169092ea011fSTrond Myklebust spin_lock(&xprt->reserve_lock); 1691d9ba131dSTrond Myklebust if (req != NULL) 1692d9ba131dSTrond Myklebust goto out; 1693ff699ea8SChuck Lever --xprt->num_reqs; 1694d9ba131dSTrond Myklebust req = ERR_PTR(-ENOMEM); 1695d9ba131dSTrond Myklebust out: 1696d9ba131dSTrond Myklebust return req; 1697d9ba131dSTrond Myklebust } 1698d9ba131dSTrond Myklebust 1699d9ba131dSTrond Myklebust static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1700d9ba131dSTrond Myklebust { 1701ff699ea8SChuck Lever if (xprt->num_reqs > xprt->min_reqs) { 1702ff699ea8SChuck Lever --xprt->num_reqs; 1703d9ba131dSTrond Myklebust kfree(req); 1704d9ba131dSTrond Myklebust return true; 1705d9ba131dSTrond Myklebust } 1706d9ba131dSTrond Myklebust return false; 1707d9ba131dSTrond Myklebust } 1708d9ba131dSTrond Myklebust 1709f39c1bfbSTrond Myklebust void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 17101da177e4SLinus Torvalds { 1711d9ba131dSTrond Myklebust struct rpc_rqst *req; 17121da177e4SLinus Torvalds 1713f39c1bfbSTrond Myklebust spin_lock(&xprt->reserve_lock); 17141da177e4SLinus Torvalds if (!list_empty(&xprt->free)) { 1715d9ba131dSTrond Myklebust req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1716d9ba131dSTrond Myklebust list_del(&req->rq_list); 1717d9ba131dSTrond Myklebust goto out_init_req; 1718d9ba131dSTrond Myklebust } 171992ea011fSTrond Myklebust req = xprt_dynamic_alloc_slot(xprt); 1720d9ba131dSTrond Myklebust if (!IS_ERR(req)) 1721d9ba131dSTrond Myklebust goto out_init_req; 1722d9ba131dSTrond Myklebust switch (PTR_ERR(req)) { 1723d9ba131dSTrond Myklebust case -ENOMEM: 1724d9ba131dSTrond Myklebust dprintk("RPC: dynamic allocation of request slot " 1725d9ba131dSTrond Myklebust "failed! Retrying\n"); 17261afeaf5cSTrond Myklebust task->tk_status = -ENOMEM; 1727d9ba131dSTrond Myklebust break; 1728d9ba131dSTrond Myklebust case -EAGAIN: 1729ba60eb25STrond Myklebust xprt_add_backlog(xprt, task); 1730d9ba131dSTrond Myklebust dprintk("RPC: waiting for request slot\n"); 1731df561f66SGustavo A. R. Silva fallthrough; 17321afeaf5cSTrond Myklebust default: 1733d9ba131dSTrond Myklebust task->tk_status = -EAGAIN; 17341afeaf5cSTrond Myklebust } 1735f39c1bfbSTrond Myklebust spin_unlock(&xprt->reserve_lock); 1736d9ba131dSTrond Myklebust return; 1737d9ba131dSTrond Myklebust out_init_req: 1738ff699ea8SChuck Lever xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, 1739ff699ea8SChuck Lever xprt->num_reqs); 174037ac86c3SChuck Lever spin_unlock(&xprt->reserve_lock); 174137ac86c3SChuck Lever 1742d9ba131dSTrond Myklebust task->tk_status = 0; 17431da177e4SLinus Torvalds task->tk_rqstp = req; 17441da177e4SLinus Torvalds } 1745f39c1bfbSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1746f39c1bfbSTrond Myklebust 1747a9cde23aSChuck Lever void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1748ee5ebe85STrond Myklebust { 1749ee5ebe85STrond Myklebust spin_lock(&xprt->reserve_lock); 1750e877a88dSNeilBrown if (!xprt_wake_up_backlog(xprt, req) && 1751e877a88dSNeilBrown !xprt_dynamic_free_slot(xprt, req)) { 1752c25573b5STrond Myklebust memset(req, 0, sizeof(*req)); /* mark unused */ 1753ee5ebe85STrond Myklebust list_add(&req->rq_list, &xprt->free); 1754c25573b5STrond Myklebust } 1755ee5ebe85STrond Myklebust spin_unlock(&xprt->reserve_lock); 1756ee5ebe85STrond Myklebust } 1757a9cde23aSChuck Lever EXPORT_SYMBOL_GPL(xprt_free_slot); 1758ee5ebe85STrond Myklebust 175921de0a95STrond Myklebust static void xprt_free_all_slots(struct rpc_xprt *xprt) 176021de0a95STrond Myklebust { 176121de0a95STrond Myklebust struct rpc_rqst *req; 176221de0a95STrond Myklebust while (!list_empty(&xprt->free)) { 176321de0a95STrond Myklebust req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 176421de0a95STrond Myklebust list_del(&req->rq_list); 176521de0a95STrond Myklebust kfree(req); 176621de0a95STrond Myklebust } 176721de0a95STrond Myklebust } 176821de0a95STrond Myklebust 1769572caba4SOlga Kornievskaia static DEFINE_IDA(rpc_xprt_ids); 1770572caba4SOlga Kornievskaia 1771572caba4SOlga Kornievskaia void xprt_cleanup_ids(void) 1772572caba4SOlga Kornievskaia { 1773572caba4SOlga Kornievskaia ida_destroy(&rpc_xprt_ids); 1774572caba4SOlga Kornievskaia } 1775572caba4SOlga Kornievskaia 1776572caba4SOlga Kornievskaia static int xprt_alloc_id(struct rpc_xprt *xprt) 1777572caba4SOlga Kornievskaia { 1778572caba4SOlga Kornievskaia int id; 1779572caba4SOlga Kornievskaia 1780572caba4SOlga Kornievskaia id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL); 1781572caba4SOlga Kornievskaia if (id < 0) 1782572caba4SOlga Kornievskaia return id; 1783572caba4SOlga Kornievskaia 1784572caba4SOlga Kornievskaia xprt->id = id; 1785572caba4SOlga Kornievskaia return 0; 1786572caba4SOlga Kornievskaia } 1787572caba4SOlga Kornievskaia 1788572caba4SOlga Kornievskaia static void xprt_free_id(struct rpc_xprt *xprt) 1789572caba4SOlga Kornievskaia { 1790572caba4SOlga Kornievskaia ida_simple_remove(&rpc_xprt_ids, xprt->id); 1791572caba4SOlga Kornievskaia } 1792572caba4SOlga Kornievskaia 1793d9ba131dSTrond Myklebust struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1794d9ba131dSTrond Myklebust unsigned int num_prealloc, 1795d9ba131dSTrond Myklebust unsigned int max_alloc) 1796bd1722d4SPavel Emelyanov { 1797bd1722d4SPavel Emelyanov struct rpc_xprt *xprt; 179821de0a95STrond Myklebust struct rpc_rqst *req; 179921de0a95STrond Myklebust int i; 1800bd1722d4SPavel Emelyanov 1801bd1722d4SPavel Emelyanov xprt = kzalloc(size, GFP_KERNEL); 1802bd1722d4SPavel Emelyanov if (xprt == NULL) 1803bd1722d4SPavel Emelyanov goto out; 1804bd1722d4SPavel Emelyanov 1805572caba4SOlga Kornievskaia xprt_alloc_id(xprt); 180621de0a95STrond Myklebust xprt_init(xprt, net); 180721de0a95STrond Myklebust 180821de0a95STrond Myklebust for (i = 0; i < num_prealloc; i++) { 180921de0a95STrond Myklebust req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 181021de0a95STrond Myklebust if (!req) 18118313164cSwangweidong goto out_free; 181221de0a95STrond Myklebust list_add(&req->rq_list, &xprt->free); 181321de0a95STrond Myklebust } 1814d9ba131dSTrond Myklebust if (max_alloc > num_prealloc) 1815d9ba131dSTrond Myklebust xprt->max_reqs = max_alloc; 1816d9ba131dSTrond Myklebust else 181721de0a95STrond Myklebust xprt->max_reqs = num_prealloc; 1818d9ba131dSTrond Myklebust xprt->min_reqs = num_prealloc; 1819ff699ea8SChuck Lever xprt->num_reqs = num_prealloc; 1820bd1722d4SPavel Emelyanov 1821bd1722d4SPavel Emelyanov return xprt; 1822bd1722d4SPavel Emelyanov 1823bd1722d4SPavel Emelyanov out_free: 182421de0a95STrond Myklebust xprt_free(xprt); 1825bd1722d4SPavel Emelyanov out: 1826bd1722d4SPavel Emelyanov return NULL; 1827bd1722d4SPavel Emelyanov } 1828bd1722d4SPavel Emelyanov EXPORT_SYMBOL_GPL(xprt_alloc); 1829bd1722d4SPavel Emelyanov 1830e204e621SPavel Emelyanov void xprt_free(struct rpc_xprt *xprt) 1831e204e621SPavel Emelyanov { 1832b9a0d6d1SEric Dumazet put_net_track(xprt->xprt_net, &xprt->ns_tracker); 183321de0a95STrond Myklebust xprt_free_all_slots(xprt); 1834572caba4SOlga Kornievskaia xprt_free_id(xprt); 1835587bc725SOlga Kornievskaia rpc_sysfs_xprt_destroy(xprt); 1836fda1bfefSTrond Myklebust kfree_rcu(xprt, rcu); 1837e204e621SPavel Emelyanov } 1838e204e621SPavel Emelyanov EXPORT_SYMBOL_GPL(xprt_free); 1839e204e621SPavel Emelyanov 1840902c5887STrond Myklebust static void 1841902c5887STrond Myklebust xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) 1842902c5887STrond Myklebust { 1843902c5887STrond Myklebust req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; 1844902c5887STrond Myklebust } 1845902c5887STrond Myklebust 18469dc6edcfSTrond Myklebust static __be32 18479dc6edcfSTrond Myklebust xprt_alloc_xid(struct rpc_xprt *xprt) 18489dc6edcfSTrond Myklebust { 18499dc6edcfSTrond Myklebust __be32 xid; 18509dc6edcfSTrond Myklebust 18519dc6edcfSTrond Myklebust spin_lock(&xprt->reserve_lock); 18529dc6edcfSTrond Myklebust xid = (__force __be32)xprt->xid++; 18539dc6edcfSTrond Myklebust spin_unlock(&xprt->reserve_lock); 18549dc6edcfSTrond Myklebust return xid; 18559dc6edcfSTrond Myklebust } 18569dc6edcfSTrond Myklebust 18579dc6edcfSTrond Myklebust static void 18589dc6edcfSTrond Myklebust xprt_init_xid(struct rpc_xprt *xprt) 18599dc6edcfSTrond Myklebust { 18609dc6edcfSTrond Myklebust xprt->xid = prandom_u32(); 18619dc6edcfSTrond Myklebust } 18629dc6edcfSTrond Myklebust 18639dc6edcfSTrond Myklebust static void 18649dc6edcfSTrond Myklebust xprt_request_init(struct rpc_task *task) 18659dc6edcfSTrond Myklebust { 18669dc6edcfSTrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 18679dc6edcfSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 18689dc6edcfSTrond Myklebust 18699dc6edcfSTrond Myklebust req->rq_task = task; 18709dc6edcfSTrond Myklebust req->rq_xprt = xprt; 18719dc6edcfSTrond Myklebust req->rq_buffer = NULL; 18729dc6edcfSTrond Myklebust req->rq_xid = xprt_alloc_xid(xprt); 1873902c5887STrond Myklebust xprt_init_connect_cookie(req, xprt); 18749dc6edcfSTrond Myklebust req->rq_snd_buf.len = 0; 18759dc6edcfSTrond Myklebust req->rq_snd_buf.buflen = 0; 18769dc6edcfSTrond Myklebust req->rq_rcv_buf.len = 0; 18779dc6edcfSTrond Myklebust req->rq_rcv_buf.buflen = 0; 187871700bb9STrond Myklebust req->rq_snd_buf.bvec = NULL; 187971700bb9STrond Myklebust req->rq_rcv_buf.bvec = NULL; 18809dc6edcfSTrond Myklebust req->rq_release_snd_buf = NULL; 1881da953063STrond Myklebust xprt_init_majortimeo(task, req); 188209d2ba0cSChuck Lever 188309d2ba0cSChuck Lever trace_xprt_reserve(req); 18849dc6edcfSTrond Myklebust } 18859dc6edcfSTrond Myklebust 18869dc6edcfSTrond Myklebust static void 18879dc6edcfSTrond Myklebust xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) 18889dc6edcfSTrond Myklebust { 18899dc6edcfSTrond Myklebust xprt->ops->alloc_slot(xprt, task); 18909dc6edcfSTrond Myklebust if (task->tk_rqstp != NULL) 18919dc6edcfSTrond Myklebust xprt_request_init(task); 18929dc6edcfSTrond Myklebust } 18939dc6edcfSTrond Myklebust 18949903cd1cSChuck Lever /** 18959903cd1cSChuck Lever * xprt_reserve - allocate an RPC request slot 18969903cd1cSChuck Lever * @task: RPC task requesting a slot allocation 18979903cd1cSChuck Lever * 1898ba60eb25STrond Myklebust * If the transport is marked as being congested, or if no more 1899ba60eb25STrond Myklebust * slots are available, place the task on the transport's 19009903cd1cSChuck Lever * backlog queue. 19019903cd1cSChuck Lever */ 19029903cd1cSChuck Lever void xprt_reserve(struct rpc_task *task) 19031da177e4SLinus Torvalds { 1904fb43d172STrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 19051da177e4SLinus Torvalds 190643cedbf0STrond Myklebust task->tk_status = 0; 190743cedbf0STrond Myklebust if (task->tk_rqstp != NULL) 190843cedbf0STrond Myklebust return; 190943cedbf0STrond Myklebust 191043cedbf0STrond Myklebust task->tk_status = -EAGAIN; 1911ba60eb25STrond Myklebust if (!xprt_throttle_congested(xprt, task)) 19129dc6edcfSTrond Myklebust xprt_do_reserve(xprt, task); 1913ba60eb25STrond Myklebust } 1914ba60eb25STrond Myklebust 1915ba60eb25STrond Myklebust /** 1916ba60eb25STrond Myklebust * xprt_retry_reserve - allocate an RPC request slot 1917ba60eb25STrond Myklebust * @task: RPC task requesting a slot allocation 1918ba60eb25STrond Myklebust * 1919ba60eb25STrond Myklebust * If no more slots are available, place the task on the transport's 1920ba60eb25STrond Myklebust * backlog queue. 1921ba60eb25STrond Myklebust * Note that the only difference with xprt_reserve is that we now 1922ba60eb25STrond Myklebust * ignore the value of the XPRT_CONGESTED flag. 1923ba60eb25STrond Myklebust */ 1924ba60eb25STrond Myklebust void xprt_retry_reserve(struct rpc_task *task) 1925ba60eb25STrond Myklebust { 1926fb43d172STrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 1927ba60eb25STrond Myklebust 1928ba60eb25STrond Myklebust task->tk_status = 0; 1929e86be3a0STrond Myklebust if (task->tk_rqstp != NULL) 1930ba60eb25STrond Myklebust return; 1931ba60eb25STrond Myklebust 1932ba60eb25STrond Myklebust task->tk_status = -EAGAIN; 19339dc6edcfSTrond Myklebust xprt_do_reserve(xprt, task); 19341da177e4SLinus Torvalds } 19351da177e4SLinus Torvalds 19369903cd1cSChuck Lever /** 19379903cd1cSChuck Lever * xprt_release - release an RPC request slot 19389903cd1cSChuck Lever * @task: task which is finished with the slot 19399903cd1cSChuck Lever * 19401da177e4SLinus Torvalds */ 19419903cd1cSChuck Lever void xprt_release(struct rpc_task *task) 19421da177e4SLinus Torvalds { 194355ae1aabSRicardo Labiaga struct rpc_xprt *xprt; 194487ed5003STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 19451da177e4SLinus Torvalds 194687ed5003STrond Myklebust if (req == NULL) { 194787ed5003STrond Myklebust if (task->tk_client) { 1948fb43d172STrond Myklebust xprt = task->tk_xprt; 194987ed5003STrond Myklebust xprt_release_write(xprt, task); 195087ed5003STrond Myklebust } 19511da177e4SLinus Torvalds return; 195287ed5003STrond Myklebust } 195355ae1aabSRicardo Labiaga 195455ae1aabSRicardo Labiaga xprt = req->rq_xprt; 1955cc204d01STrond Myklebust xprt_request_dequeue_xprt(task); 1956b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 195749e9a890SChuck Lever xprt->ops->release_xprt(xprt, task); 1958a58dd398SChuck Lever if (xprt->ops->release_request) 1959a58dd398SChuck Lever xprt->ops->release_request(task); 1960ad3331acSTrond Myklebust xprt_schedule_autodisconnect(xprt); 1961b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 1962ee5ebe85STrond Myklebust if (req->rq_buffer) 19633435c74aSChuck Lever xprt->ops->buf_free(task); 19649d96acbcSTrond Myklebust xdr_free_bvec(&req->rq_rcv_buf); 19650472e476STrond Myklebust xdr_free_bvec(&req->rq_snd_buf); 1966a17c2153STrond Myklebust if (req->rq_cred != NULL) 1967a17c2153STrond Myklebust put_rpccred(req->rq_cred); 1968ead5e1c2SJ. Bruce Fields if (req->rq_release_snd_buf) 1969ead5e1c2SJ. Bruce Fields req->rq_release_snd_buf(req); 197055ae1aabSRicardo Labiaga 1971e877a88dSNeilBrown task->tk_rqstp = NULL; 1972ee5ebe85STrond Myklebust if (likely(!bc_prealloc(req))) 1973a9cde23aSChuck Lever xprt->ops->free_slot(xprt, req); 1974ee5ebe85STrond Myklebust else 1975c9acb42eSTrond Myklebust xprt_free_bc_request(req); 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 1978902c5887STrond Myklebust #ifdef CONFIG_SUNRPC_BACKCHANNEL 1979902c5887STrond Myklebust void 1980902c5887STrond Myklebust xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) 1981902c5887STrond Myklebust { 1982902c5887STrond Myklebust struct xdr_buf *xbufp = &req->rq_snd_buf; 1983902c5887STrond Myklebust 1984902c5887STrond Myklebust task->tk_rqstp = req; 1985902c5887STrond Myklebust req->rq_task = task; 1986902c5887STrond Myklebust xprt_init_connect_cookie(req, req->rq_xprt); 1987902c5887STrond Myklebust /* 1988902c5887STrond Myklebust * Set up the xdr_buf length. 1989902c5887STrond Myklebust * This also indicates that the buffer is XDR encoded already. 1990902c5887STrond Myklebust */ 1991902c5887STrond Myklebust xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 1992902c5887STrond Myklebust xbufp->tail[0].iov_len; 1993902c5887STrond Myklebust } 1994902c5887STrond Myklebust #endif 1995902c5887STrond Myklebust 199621de0a95STrond Myklebust static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1997c2866763SChuck Lever { 199830c5116bSTrond Myklebust kref_init(&xprt->kref); 1999c2866763SChuck Lever 2000c2866763SChuck Lever spin_lock_init(&xprt->transport_lock); 2001c2866763SChuck Lever spin_lock_init(&xprt->reserve_lock); 200275c84151STrond Myklebust spin_lock_init(&xprt->queue_lock); 2003c2866763SChuck Lever 2004c2866763SChuck Lever INIT_LIST_HEAD(&xprt->free); 200595f7691dSTrond Myklebust xprt->recv_queue = RB_ROOT; 2006944b0429STrond Myklebust INIT_LIST_HEAD(&xprt->xmit_queue); 20079e00abc3STrond Myklebust #if defined(CONFIG_SUNRPC_BACKCHANNEL) 2008f9acac1aSRicardo Labiaga spin_lock_init(&xprt->bc_pa_lock); 2009f9acac1aSRicardo Labiaga INIT_LIST_HEAD(&xprt->bc_pa_list); 20109e00abc3STrond Myklebust #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 201180b14d5eSTrond Myklebust INIT_LIST_HEAD(&xprt->xprt_switch); 2012f9acac1aSRicardo Labiaga 2013c2866763SChuck Lever xprt->last_used = jiffies; 2014c2866763SChuck Lever xprt->cwnd = RPC_INITCWND; 2015a509050bSChuck Lever xprt->bind_index = 0; 2016c2866763SChuck Lever 2017c2866763SChuck Lever rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 2018c2866763SChuck Lever rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 201979c99152STrond Myklebust rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 2020c2866763SChuck Lever rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 2021c2866763SChuck Lever 2022c2866763SChuck Lever xprt_init_xid(xprt); 2023c2866763SChuck Lever 2024b9a0d6d1SEric Dumazet xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL); 20258d9266ffSTrond Myklebust } 20268d9266ffSTrond Myklebust 20278d9266ffSTrond Myklebust /** 20288d9266ffSTrond Myklebust * xprt_create_transport - create an RPC transport 20298d9266ffSTrond Myklebust * @args: rpc transport creation arguments 20308d9266ffSTrond Myklebust * 20318d9266ffSTrond Myklebust */ 20328d9266ffSTrond Myklebust struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 20338d9266ffSTrond Myklebust { 20348d9266ffSTrond Myklebust struct rpc_xprt *xprt; 20359bccd264STrond Myklebust const struct xprt_class *t; 20368d9266ffSTrond Myklebust 20379bccd264STrond Myklebust t = xprt_class_find_by_ident(args->ident); 20389bccd264STrond Myklebust if (!t) { 20393c45ddf8SChuck Lever dprintk("RPC: transport (%d) not supported\n", args->ident); 20408d9266ffSTrond Myklebust return ERR_PTR(-EIO); 20419bccd264STrond Myklebust } 20428d9266ffSTrond Myklebust 20438d9266ffSTrond Myklebust xprt = t->setup(args); 20449bccd264STrond Myklebust xprt_class_release(t); 20459bccd264STrond Myklebust 2046911813d7SChuck Lever if (IS_ERR(xprt)) 204721de0a95STrond Myklebust goto out; 204833d90ac0SJ. Bruce Fields if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 204933d90ac0SJ. Bruce Fields xprt->idle_timeout = 0; 205021de0a95STrond Myklebust INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 205121de0a95STrond Myklebust if (xprt_has_timer(xprt)) 2052502980e8SAnna Schumaker timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); 205321de0a95STrond Myklebust else 2054ff861c4dSKees Cook timer_setup(&xprt->timer, NULL, 0); 20554e0038b6STrond Myklebust 20564e0038b6STrond Myklebust if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 20574e0038b6STrond Myklebust xprt_destroy(xprt); 20584e0038b6STrond Myklebust return ERR_PTR(-EINVAL); 20594e0038b6STrond Myklebust } 20604e0038b6STrond Myklebust xprt->servername = kstrdup(args->servername, GFP_KERNEL); 20614e0038b6STrond Myklebust if (xprt->servername == NULL) { 20624e0038b6STrond Myklebust xprt_destroy(xprt); 20634e0038b6STrond Myklebust return ERR_PTR(-ENOMEM); 20644e0038b6STrond Myklebust } 20654e0038b6STrond Myklebust 20663f940098SJeff Layton rpc_xprt_debugfs_register(xprt); 2067388f0c77SJeff Layton 2068911813d7SChuck Lever trace_xprt_create(xprt); 206921de0a95STrond Myklebust out: 2070c2866763SChuck Lever return xprt; 2071c2866763SChuck Lever } 2072c2866763SChuck Lever 2073528fd354STrond Myklebust static void xprt_destroy_cb(struct work_struct *work) 2074528fd354STrond Myklebust { 2075528fd354STrond Myklebust struct rpc_xprt *xprt = 2076528fd354STrond Myklebust container_of(work, struct rpc_xprt, task_cleanup); 2077528fd354STrond Myklebust 2078911813d7SChuck Lever trace_xprt_destroy(xprt); 2079911813d7SChuck Lever 2080528fd354STrond Myklebust rpc_xprt_debugfs_unregister(xprt); 2081528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->binding); 2082528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->pending); 2083528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->sending); 2084528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->backlog); 2085528fd354STrond Myklebust kfree(xprt->servername); 2086528fd354STrond Myklebust /* 2087669996adSTrond Myklebust * Destroy any existing back channel 2088669996adSTrond Myklebust */ 2089669996adSTrond Myklebust xprt_destroy_backchannel(xprt, UINT_MAX); 2090669996adSTrond Myklebust 2091669996adSTrond Myklebust /* 2092528fd354STrond Myklebust * Tear down transport state and free the rpc_xprt 2093528fd354STrond Myklebust */ 2094528fd354STrond Myklebust xprt->ops->destroy(xprt); 2095528fd354STrond Myklebust } 2096528fd354STrond Myklebust 20979903cd1cSChuck Lever /** 20989903cd1cSChuck Lever * xprt_destroy - destroy an RPC transport, killing off all requests. 2099a8de240aSTrond Myklebust * @xprt: transport to destroy 21009903cd1cSChuck Lever * 21011da177e4SLinus Torvalds */ 2102a8de240aSTrond Myklebust static void xprt_destroy(struct rpc_xprt *xprt) 21031da177e4SLinus Torvalds { 2104528fd354STrond Myklebust /* 2105528fd354STrond Myklebust * Exclude transport connect/disconnect handlers and autoclose 2106528fd354STrond Myklebust */ 210779234c3dSTrond Myklebust wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 210879234c3dSTrond Myklebust 21093848e96eSNeilBrown /* 21103848e96eSNeilBrown * xprt_schedule_autodisconnect() can run after XPRT_LOCKED 21113848e96eSNeilBrown * is cleared. We use ->transport_lock to ensure the mod_timer() 21123848e96eSNeilBrown * can only run *before* del_time_sync(), never after. 21133848e96eSNeilBrown */ 21143848e96eSNeilBrown spin_lock(&xprt->transport_lock); 21150065db32STrond Myklebust del_timer_sync(&xprt->timer); 21163848e96eSNeilBrown spin_unlock(&xprt->transport_lock); 2117c8541ecdSChuck Lever 2118c8541ecdSChuck Lever /* 2119528fd354STrond Myklebust * Destroy sockets etc from the system workqueue so they can 2120528fd354STrond Myklebust * safely flush receive work running on rpciod. 2121c8541ecdSChuck Lever */ 2122528fd354STrond Myklebust INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); 2123528fd354STrond Myklebust schedule_work(&xprt->task_cleanup); 21246b6ca86bSTrond Myklebust } 21251da177e4SLinus Torvalds 212630c5116bSTrond Myklebust static void xprt_destroy_kref(struct kref *kref) 212730c5116bSTrond Myklebust { 212830c5116bSTrond Myklebust xprt_destroy(container_of(kref, struct rpc_xprt, kref)); 212930c5116bSTrond Myklebust } 213030c5116bSTrond Myklebust 213130c5116bSTrond Myklebust /** 213230c5116bSTrond Myklebust * xprt_get - return a reference to an RPC transport. 213330c5116bSTrond Myklebust * @xprt: pointer to the transport 213430c5116bSTrond Myklebust * 213530c5116bSTrond Myklebust */ 213630c5116bSTrond Myklebust struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 213730c5116bSTrond Myklebust { 213830c5116bSTrond Myklebust if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) 213930c5116bSTrond Myklebust return xprt; 214030c5116bSTrond Myklebust return NULL; 214130c5116bSTrond Myklebust } 214230c5116bSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_get); 214330c5116bSTrond Myklebust 21446b6ca86bSTrond Myklebust /** 21456b6ca86bSTrond Myklebust * xprt_put - release a reference to an RPC transport. 21466b6ca86bSTrond Myklebust * @xprt: pointer to the transport 21476b6ca86bSTrond Myklebust * 21486b6ca86bSTrond Myklebust */ 21496b6ca86bSTrond Myklebust void xprt_put(struct rpc_xprt *xprt) 21506b6ca86bSTrond Myklebust { 215130c5116bSTrond Myklebust if (xprt != NULL) 215230c5116bSTrond Myklebust kref_put(&xprt->kref, xprt_destroy_kref); 21536b6ca86bSTrond Myklebust } 21545d252f90SChuck Lever EXPORT_SYMBOL_GPL(xprt_put); 2155*7ffcdaa6SOlga Kornievskaia 2156*7ffcdaa6SOlga Kornievskaia void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps) 2157*7ffcdaa6SOlga Kornievskaia { 2158*7ffcdaa6SOlga Kornievskaia if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) { 2159*7ffcdaa6SOlga Kornievskaia spin_lock(&xps->xps_lock); 2160*7ffcdaa6SOlga Kornievskaia xps->xps_nactive--; 2161*7ffcdaa6SOlga Kornievskaia spin_unlock(&xps->xps_lock); 2162*7ffcdaa6SOlga Kornievskaia } 2163*7ffcdaa6SOlga Kornievskaia } 2164*7ffcdaa6SOlga Kornievskaia 2165*7ffcdaa6SOlga Kornievskaia void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps) 2166*7ffcdaa6SOlga Kornievskaia { 2167*7ffcdaa6SOlga Kornievskaia if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) { 2168*7ffcdaa6SOlga Kornievskaia spin_lock(&xps->xps_lock); 2169*7ffcdaa6SOlga Kornievskaia xps->xps_nactive++; 2170*7ffcdaa6SOlga Kornievskaia spin_unlock(&xps->xps_lock); 2171*7ffcdaa6SOlga Kornievskaia } 2172*7ffcdaa6SOlga Kornievskaia } 2173*7ffcdaa6SOlga Kornievskaia 2174*7ffcdaa6SOlga Kornievskaia void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps) 2175*7ffcdaa6SOlga Kornievskaia { 2176*7ffcdaa6SOlga Kornievskaia if (test_and_set_bit(XPRT_REMOVE, &xprt->state)) 2177*7ffcdaa6SOlga Kornievskaia return; 2178*7ffcdaa6SOlga Kornievskaia 2179*7ffcdaa6SOlga Kornievskaia xprt_force_disconnect(xprt); 2180*7ffcdaa6SOlga Kornievskaia if (!test_bit(XPRT_CONNECTED, &xprt->state)) 2181*7ffcdaa6SOlga Kornievskaia return; 2182*7ffcdaa6SOlga Kornievskaia 2183*7ffcdaa6SOlga Kornievskaia if (!xprt->sending.qlen && !xprt->pending.qlen && 2184*7ffcdaa6SOlga Kornievskaia !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen)) 2185*7ffcdaa6SOlga Kornievskaia rpc_xprt_switch_remove_xprt(xps, xprt); 2186*7ffcdaa6SOlga Kornievskaia } 2187