1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/net/sunrpc/xprt.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This is a generic RPC call interface supporting congestion avoidance, 61da177e4SLinus Torvalds * and asynchronous calls. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * The interface works like this: 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * - When a process places a call, it allocates a request slot if 111da177e4SLinus Torvalds * one is available. Otherwise, it sleeps on the backlog queue 121da177e4SLinus Torvalds * (xprt_reserve). 131da177e4SLinus Torvalds * - Next, the caller puts together the RPC message, stuffs it into 1455aa4f58SChuck Lever * the request struct, and calls xprt_transmit(). 1555aa4f58SChuck Lever * - xprt_transmit sends the message and installs the caller on the 1655ae1aabSRicardo Labiaga * transport's wait list. At the same time, if a reply is expected, 1755ae1aabSRicardo Labiaga * it installs a timer that is run after the packet's timeout has 1855ae1aabSRicardo Labiaga * expired. 191da177e4SLinus Torvalds * - When a packet arrives, the data_ready handler walks the list of 2055aa4f58SChuck Lever * pending requests for that transport. If a matching XID is found, the 211da177e4SLinus Torvalds * caller is woken up, and the timer removed. 221da177e4SLinus Torvalds * - When no reply arrives within the timeout interval, the timer is 231da177e4SLinus Torvalds * fired by the kernel and runs xprt_timer(). It either adjusts the 241da177e4SLinus Torvalds * timeout values (minor timeout) or wakes up the caller with a status 251da177e4SLinus Torvalds * of -ETIMEDOUT. 261da177e4SLinus Torvalds * - When the caller receives a notification from RPC that a reply arrived, 271da177e4SLinus Torvalds * it should release the RPC slot, and process the reply. 281da177e4SLinus Torvalds * If the call timed out, it may choose to retry the operation by 291da177e4SLinus Torvalds * adjusting the initial timeout value, and simply calling rpc_call 301da177e4SLinus Torvalds * again. 311da177e4SLinus Torvalds * 321da177e4SLinus Torvalds * Support for async RPC is done through a set of RPC-specific scheduling 331da177e4SLinus Torvalds * primitives that `transparently' work for processes as well as async 341da177e4SLinus Torvalds * tasks that rely on callbacks. 351da177e4SLinus Torvalds * 361da177e4SLinus Torvalds * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 3755aa4f58SChuck Lever * 3855aa4f58SChuck Lever * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 41a246b010SChuck Lever #include <linux/module.h> 42a246b010SChuck Lever 431da177e4SLinus Torvalds #include <linux/types.h> 44a246b010SChuck Lever #include <linux/interrupt.h> 451da177e4SLinus Torvalds #include <linux/workqueue.h> 46bf3fcf89SChuck Lever #include <linux/net.h> 47ff839970SChuck Lever #include <linux/ktime.h> 481da177e4SLinus Torvalds 49a246b010SChuck Lever #include <linux/sunrpc/clnt.h> 5011c556b3SChuck Lever #include <linux/sunrpc/metrics.h> 51c9acb42eSTrond Myklebust #include <linux/sunrpc/bc_xprt.h> 52fda1bfefSTrond Myklebust #include <linux/rcupdate.h> 53a1231fdaSTrond Myklebust #include <linux/sched/mm.h> 541da177e4SLinus Torvalds 553705ad64SJeff Layton #include <trace/events/sunrpc.h> 563705ad64SJeff Layton 5755ae1aabSRicardo Labiaga #include "sunrpc.h" 5855ae1aabSRicardo Labiaga 591da177e4SLinus Torvalds /* 601da177e4SLinus Torvalds * Local variables 611da177e4SLinus Torvalds */ 621da177e4SLinus Torvalds 63f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 641da177e4SLinus Torvalds # define RPCDBG_FACILITY RPCDBG_XPRT 651da177e4SLinus Torvalds #endif 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds /* 681da177e4SLinus Torvalds * Local functions 691da177e4SLinus Torvalds */ 7021de0a95STrond Myklebust static void xprt_init(struct rpc_xprt *xprt, struct net *net); 7137ac86c3SChuck Lever static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); 724e0038b6STrond Myklebust static void xprt_destroy(struct rpc_xprt *xprt); 731da177e4SLinus Torvalds 745ba03e82SJiri Slaby static DEFINE_SPINLOCK(xprt_list_lock); 7581c098afS\"Talpey, Thomas\ static LIST_HEAD(xprt_list); 7681c098afS\"Talpey, Thomas\ 779e910bffSTrond Myklebust static unsigned long xprt_request_timeout(const struct rpc_rqst *req) 789e910bffSTrond Myklebust { 799e910bffSTrond Myklebust unsigned long timeout = jiffies + req->rq_timeout; 809e910bffSTrond Myklebust 819e910bffSTrond Myklebust if (time_before(timeout, req->rq_majortimeo)) 829e910bffSTrond Myklebust return timeout; 839e910bffSTrond Myklebust return req->rq_majortimeo; 849e910bffSTrond Myklebust } 859e910bffSTrond Myklebust 8612a80469SChuck Lever /** 8781c098afS\"Talpey, Thomas\ * xprt_register_transport - register a transport implementation 8881c098afS\"Talpey, Thomas\ * @transport: transport to register 8981c098afS\"Talpey, Thomas\ * 9081c098afS\"Talpey, Thomas\ * If a transport implementation is loaded as a kernel module, it can 9181c098afS\"Talpey, Thomas\ * call this interface to make itself known to the RPC client. 9281c098afS\"Talpey, Thomas\ * 9381c098afS\"Talpey, Thomas\ * Returns: 9481c098afS\"Talpey, Thomas\ * 0: transport successfully registered 9581c098afS\"Talpey, Thomas\ * -EEXIST: transport already registered 9681c098afS\"Talpey, Thomas\ * -EINVAL: transport module being unloaded 9781c098afS\"Talpey, Thomas\ */ 9881c098afS\"Talpey, Thomas\ int xprt_register_transport(struct xprt_class *transport) 9981c098afS\"Talpey, Thomas\ { 10081c098afS\"Talpey, Thomas\ struct xprt_class *t; 10181c098afS\"Talpey, Thomas\ int result; 10281c098afS\"Talpey, Thomas\ 10381c098afS\"Talpey, Thomas\ result = -EEXIST; 10481c098afS\"Talpey, Thomas\ spin_lock(&xprt_list_lock); 10581c098afS\"Talpey, Thomas\ list_for_each_entry(t, &xprt_list, list) { 10681c098afS\"Talpey, Thomas\ /* don't register the same transport class twice */ 1074fa016ebS\"Talpey, Thomas\ if (t->ident == transport->ident) 10881c098afS\"Talpey, Thomas\ goto out; 10981c098afS\"Talpey, Thomas\ } 11081c098afS\"Talpey, Thomas\ 11181c098afS\"Talpey, Thomas\ list_add_tail(&transport->list, &xprt_list); 11281c098afS\"Talpey, Thomas\ printk(KERN_INFO "RPC: Registered %s transport module.\n", 11381c098afS\"Talpey, Thomas\ transport->name); 11481c098afS\"Talpey, Thomas\ result = 0; 11581c098afS\"Talpey, Thomas\ 11681c098afS\"Talpey, Thomas\ out: 11781c098afS\"Talpey, Thomas\ spin_unlock(&xprt_list_lock); 11881c098afS\"Talpey, Thomas\ return result; 11981c098afS\"Talpey, Thomas\ } 12081c098afS\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_register_transport); 12181c098afS\"Talpey, Thomas\ 12281c098afS\"Talpey, Thomas\ /** 12381c098afS\"Talpey, Thomas\ * xprt_unregister_transport - unregister a transport implementation 12465b6e42cSRandy Dunlap * @transport: transport to unregister 12581c098afS\"Talpey, Thomas\ * 12681c098afS\"Talpey, Thomas\ * Returns: 12781c098afS\"Talpey, Thomas\ * 0: transport successfully unregistered 12881c098afS\"Talpey, Thomas\ * -ENOENT: transport never registered 12981c098afS\"Talpey, Thomas\ */ 13081c098afS\"Talpey, Thomas\ int xprt_unregister_transport(struct xprt_class *transport) 13181c098afS\"Talpey, Thomas\ { 13281c098afS\"Talpey, Thomas\ struct xprt_class *t; 13381c098afS\"Talpey, Thomas\ int result; 13481c098afS\"Talpey, Thomas\ 13581c098afS\"Talpey, Thomas\ result = 0; 13681c098afS\"Talpey, Thomas\ spin_lock(&xprt_list_lock); 13781c098afS\"Talpey, Thomas\ list_for_each_entry(t, &xprt_list, list) { 13881c098afS\"Talpey, Thomas\ if (t == transport) { 13981c098afS\"Talpey, Thomas\ printk(KERN_INFO 14081c098afS\"Talpey, Thomas\ "RPC: Unregistered %s transport module.\n", 14181c098afS\"Talpey, Thomas\ transport->name); 14281c098afS\"Talpey, Thomas\ list_del_init(&transport->list); 14381c098afS\"Talpey, Thomas\ goto out; 14481c098afS\"Talpey, Thomas\ } 14581c098afS\"Talpey, Thomas\ } 14681c098afS\"Talpey, Thomas\ result = -ENOENT; 14781c098afS\"Talpey, Thomas\ 14881c098afS\"Talpey, Thomas\ out: 14981c098afS\"Talpey, Thomas\ spin_unlock(&xprt_list_lock); 15081c098afS\"Talpey, Thomas\ return result; 15181c098afS\"Talpey, Thomas\ } 15281c098afS\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_unregister_transport); 15381c098afS\"Talpey, Thomas\ 15481c098afS\"Talpey, Thomas\ /** 155441e3e24STom Talpey * xprt_load_transport - load a transport implementation 156441e3e24STom Talpey * @transport_name: transport to load 157441e3e24STom Talpey * 158441e3e24STom Talpey * Returns: 159441e3e24STom Talpey * 0: transport successfully loaded 160441e3e24STom Talpey * -ENOENT: transport module not available 161441e3e24STom Talpey */ 162441e3e24STom Talpey int xprt_load_transport(const char *transport_name) 163441e3e24STom Talpey { 164441e3e24STom Talpey struct xprt_class *t; 165441e3e24STom Talpey int result; 166441e3e24STom Talpey 167441e3e24STom Talpey result = 0; 168441e3e24STom Talpey spin_lock(&xprt_list_lock); 169441e3e24STom Talpey list_for_each_entry(t, &xprt_list, list) { 170441e3e24STom Talpey if (strcmp(t->name, transport_name) == 0) { 171441e3e24STom Talpey spin_unlock(&xprt_list_lock); 172441e3e24STom Talpey goto out; 173441e3e24STom Talpey } 174441e3e24STom Talpey } 175441e3e24STom Talpey spin_unlock(&xprt_list_lock); 176ef7ffe8fSAlex Riesen result = request_module("xprt%s", transport_name); 177441e3e24STom Talpey out: 178441e3e24STom Talpey return result; 179441e3e24STom Talpey } 180441e3e24STom Talpey EXPORT_SYMBOL_GPL(xprt_load_transport); 181441e3e24STom Talpey 182c544577dSTrond Myklebust static void xprt_clear_locked(struct rpc_xprt *xprt) 183c544577dSTrond Myklebust { 184c544577dSTrond Myklebust xprt->snd_task = NULL; 185c544577dSTrond Myklebust if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 186c544577dSTrond Myklebust smp_mb__before_atomic(); 187c544577dSTrond Myklebust clear_bit(XPRT_LOCKED, &xprt->state); 188c544577dSTrond Myklebust smp_mb__after_atomic(); 189c544577dSTrond Myklebust } else 190c544577dSTrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 191c544577dSTrond Myklebust } 192c544577dSTrond Myklebust 193441e3e24STom Talpey /** 19412a80469SChuck Lever * xprt_reserve_xprt - serialize write access to transports 19512a80469SChuck Lever * @task: task that is requesting access to the transport 196177c27bfSRandy Dunlap * @xprt: pointer to the target transport 19712a80469SChuck Lever * 19812a80469SChuck Lever * This prevents mixing the payload of separate requests, and prevents 19912a80469SChuck Lever * transport connects from colliding with writes. No congestion control 20012a80469SChuck Lever * is provided. 2011da177e4SLinus Torvalds */ 20243cedbf0STrond Myklebust int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 2031da177e4SLinus Torvalds { 20412a80469SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 20512a80469SChuck Lever 20612a80469SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 20712a80469SChuck Lever if (task == xprt->snd_task) 208bf7ca707SChuck Lever goto out_locked; 20912a80469SChuck Lever goto out_sleep; 21012a80469SChuck Lever } 211c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 212c544577dSTrond Myklebust goto out_unlock; 21312a80469SChuck Lever xprt->snd_task = task; 2144d4a76f3Sj223yang@asset.uwaterloo.ca 215bf7ca707SChuck Lever out_locked: 216bf7ca707SChuck Lever trace_xprt_reserve_xprt(xprt, task); 21712a80469SChuck Lever return 1; 21812a80469SChuck Lever 219c544577dSTrond Myklebust out_unlock: 220c544577dSTrond Myklebust xprt_clear_locked(xprt); 22112a80469SChuck Lever out_sleep: 22212a80469SChuck Lever task->tk_status = -EAGAIN; 2236b2e6856STrond Myklebust if (RPC_IS_SOFT(task)) 2246b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->sending, task, NULL, 2259e910bffSTrond Myklebust xprt_request_timeout(req)); 2266b2e6856STrond Myklebust else 22779c99152STrond Myklebust rpc_sleep_on(&xprt->sending, task, NULL); 22812a80469SChuck Lever return 0; 22912a80469SChuck Lever } 23012444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 23112a80469SChuck Lever 23275891f50STrond Myklebust static bool 23375891f50STrond Myklebust xprt_need_congestion_window_wait(struct rpc_xprt *xprt) 23475891f50STrond Myklebust { 23575891f50STrond Myklebust return test_bit(XPRT_CWND_WAIT, &xprt->state); 23675891f50STrond Myklebust } 23775891f50STrond Myklebust 23875891f50STrond Myklebust static void 23975891f50STrond Myklebust xprt_set_congestion_window_wait(struct rpc_xprt *xprt) 24075891f50STrond Myklebust { 24175891f50STrond Myklebust if (!list_empty(&xprt->xmit_queue)) { 24275891f50STrond Myklebust /* Peek at head of queue to see if it can make progress */ 24375891f50STrond Myklebust if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, 24475891f50STrond Myklebust rq_xmit)->rq_cong) 24575891f50STrond Myklebust return; 24675891f50STrond Myklebust } 24775891f50STrond Myklebust set_bit(XPRT_CWND_WAIT, &xprt->state); 24875891f50STrond Myklebust } 24975891f50STrond Myklebust 25075891f50STrond Myklebust static void 25175891f50STrond Myklebust xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) 25275891f50STrond Myklebust { 25375891f50STrond Myklebust if (!RPCXPRT_CONGESTED(xprt)) 25475891f50STrond Myklebust clear_bit(XPRT_CWND_WAIT, &xprt->state); 25575891f50STrond Myklebust } 25675891f50STrond Myklebust 25712a80469SChuck Lever /* 25812a80469SChuck Lever * xprt_reserve_xprt_cong - serialize write access to transports 25912a80469SChuck Lever * @task: task that is requesting access to the transport 26012a80469SChuck Lever * 26112a80469SChuck Lever * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 26212a80469SChuck Lever * integrated into the decision of whether a request is allowed to be 26312a80469SChuck Lever * woken up and given access to the transport. 26475891f50STrond Myklebust * Note that the lock is only granted if we know there are free slots. 26512a80469SChuck Lever */ 26643cedbf0STrond Myklebust int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 26712a80469SChuck Lever { 2681da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 2691da177e4SLinus Torvalds 2702226feb6SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 2711da177e4SLinus Torvalds if (task == xprt->snd_task) 272bf7ca707SChuck Lever goto out_locked; 2731da177e4SLinus Torvalds goto out_sleep; 2741da177e4SLinus Torvalds } 27543cedbf0STrond Myklebust if (req == NULL) { 27643cedbf0STrond Myklebust xprt->snd_task = task; 277bf7ca707SChuck Lever goto out_locked; 27843cedbf0STrond Myklebust } 279c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 280c544577dSTrond Myklebust goto out_unlock; 28175891f50STrond Myklebust if (!xprt_need_congestion_window_wait(xprt)) { 2821da177e4SLinus Torvalds xprt->snd_task = task; 283bf7ca707SChuck Lever goto out_locked; 2841da177e4SLinus Torvalds } 285c544577dSTrond Myklebust out_unlock: 286632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 2871da177e4SLinus Torvalds out_sleep: 2881da177e4SLinus Torvalds task->tk_status = -EAGAIN; 2896b2e6856STrond Myklebust if (RPC_IS_SOFT(task)) 2906b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->sending, task, NULL, 2919e910bffSTrond Myklebust xprt_request_timeout(req)); 2926b2e6856STrond Myklebust else 29379c99152STrond Myklebust rpc_sleep_on(&xprt->sending, task, NULL); 2941da177e4SLinus Torvalds return 0; 295bf7ca707SChuck Lever out_locked: 296bf7ca707SChuck Lever trace_xprt_reserve_cong(xprt, task); 297bf7ca707SChuck Lever return 1; 2981da177e4SLinus Torvalds } 29912444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 3001da177e4SLinus Torvalds 30112a80469SChuck Lever static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 3021da177e4SLinus Torvalds { 3031da177e4SLinus Torvalds int retval; 3041da177e4SLinus Torvalds 305bd79bc57STrond Myklebust if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) 306bd79bc57STrond Myklebust return 1; 307b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 30843cedbf0STrond Myklebust retval = xprt->ops->reserve_xprt(xprt, task); 309b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 3101da177e4SLinus Torvalds return retval; 3111da177e4SLinus Torvalds } 3121da177e4SLinus Torvalds 313961a828dSTrond Myklebust static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 3141da177e4SLinus Torvalds { 315961a828dSTrond Myklebust struct rpc_xprt *xprt = data; 31649e9a890SChuck Lever 31749e9a890SChuck Lever xprt->snd_task = task; 318961a828dSTrond Myklebust return true; 319961a828dSTrond Myklebust } 320961a828dSTrond Myklebust 321961a828dSTrond Myklebust static void __xprt_lock_write_next(struct rpc_xprt *xprt) 322961a828dSTrond Myklebust { 323961a828dSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 32449e9a890SChuck Lever return; 325c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 326c544577dSTrond Myklebust goto out_unlock; 327f1dc237cSTrond Myklebust if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 328f1dc237cSTrond Myklebust __xprt_lock_write_func, xprt)) 329961a828dSTrond Myklebust return; 330c544577dSTrond Myklebust out_unlock: 331632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 33249e9a890SChuck Lever } 33349e9a890SChuck Lever 334961a828dSTrond Myklebust static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 335961a828dSTrond Myklebust { 336961a828dSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 337961a828dSTrond Myklebust return; 338c544577dSTrond Myklebust if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 339c544577dSTrond Myklebust goto out_unlock; 34075891f50STrond Myklebust if (xprt_need_congestion_window_wait(xprt)) 341961a828dSTrond Myklebust goto out_unlock; 342f1dc237cSTrond Myklebust if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 34375891f50STrond Myklebust __xprt_lock_write_func, xprt)) 344961a828dSTrond Myklebust return; 3451da177e4SLinus Torvalds out_unlock: 346632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 3471da177e4SLinus Torvalds } 3481da177e4SLinus Torvalds 34949e9a890SChuck Lever /** 35049e9a890SChuck Lever * xprt_release_xprt - allow other requests to use a transport 35149e9a890SChuck Lever * @xprt: transport with other tasks potentially waiting 35249e9a890SChuck Lever * @task: task that is releasing access to the transport 35349e9a890SChuck Lever * 35449e9a890SChuck Lever * Note that "task" can be NULL. No congestion control is provided. 3551da177e4SLinus Torvalds */ 35649e9a890SChuck Lever void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 3571da177e4SLinus Torvalds { 3581da177e4SLinus Torvalds if (xprt->snd_task == task) { 359632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 3601da177e4SLinus Torvalds __xprt_lock_write_next(xprt); 3611da177e4SLinus Torvalds } 362bf7ca707SChuck Lever trace_xprt_release_xprt(xprt, task); 3631da177e4SLinus Torvalds } 36412444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_xprt); 3651da177e4SLinus Torvalds 36649e9a890SChuck Lever /** 36749e9a890SChuck Lever * xprt_release_xprt_cong - allow other requests to use a transport 36849e9a890SChuck Lever * @xprt: transport with other tasks potentially waiting 36949e9a890SChuck Lever * @task: task that is releasing access to the transport 37049e9a890SChuck Lever * 37149e9a890SChuck Lever * Note that "task" can be NULL. Another task is awoken to use the 37249e9a890SChuck Lever * transport if the transport's congestion window allows it. 37349e9a890SChuck Lever */ 37449e9a890SChuck Lever void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 37549e9a890SChuck Lever { 37649e9a890SChuck Lever if (xprt->snd_task == task) { 377632e3bdcSTrond Myklebust xprt_clear_locked(xprt); 37849e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 37949e9a890SChuck Lever } 380bf7ca707SChuck Lever trace_xprt_release_cong(xprt, task); 38149e9a890SChuck Lever } 38212444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 38349e9a890SChuck Lever 38449e9a890SChuck Lever static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 3851da177e4SLinus Torvalds { 386bd79bc57STrond Myklebust if (xprt->snd_task != task) 387bd79bc57STrond Myklebust return; 388b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 38949e9a890SChuck Lever xprt->ops->release_xprt(xprt, task); 390b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 3911da177e4SLinus Torvalds } 3921da177e4SLinus Torvalds 3931da177e4SLinus Torvalds /* 3941da177e4SLinus Torvalds * Van Jacobson congestion avoidance. Check if the congestion window 3951da177e4SLinus Torvalds * overflowed. Put the task to sleep if this is the case. 3961da177e4SLinus Torvalds */ 3971da177e4SLinus Torvalds static int 39875891f50STrond Myklebust __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 3991da177e4SLinus Torvalds { 4001da177e4SLinus Torvalds if (req->rq_cong) 4011da177e4SLinus Torvalds return 1; 402bf7ca707SChuck Lever trace_xprt_get_cong(xprt, req->rq_task); 40375891f50STrond Myklebust if (RPCXPRT_CONGESTED(xprt)) { 40475891f50STrond Myklebust xprt_set_congestion_window_wait(xprt); 4051da177e4SLinus Torvalds return 0; 40675891f50STrond Myklebust } 4071da177e4SLinus Torvalds req->rq_cong = 1; 4081da177e4SLinus Torvalds xprt->cong += RPC_CWNDSCALE; 4091da177e4SLinus Torvalds return 1; 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds /* 4131da177e4SLinus Torvalds * Adjust the congestion window, and wake up the next task 4141da177e4SLinus Torvalds * that has been sleeping due to congestion 4151da177e4SLinus Torvalds */ 4161da177e4SLinus Torvalds static void 4171da177e4SLinus Torvalds __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 4181da177e4SLinus Torvalds { 4191da177e4SLinus Torvalds if (!req->rq_cong) 4201da177e4SLinus Torvalds return; 4211da177e4SLinus Torvalds req->rq_cong = 0; 4221da177e4SLinus Torvalds xprt->cong -= RPC_CWNDSCALE; 42375891f50STrond Myklebust xprt_test_and_clear_congestion_window_wait(xprt); 424bf7ca707SChuck Lever trace_xprt_put_cong(xprt, req->rq_task); 42549e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds 42846c0ee8bSChuck Lever /** 42975891f50STrond Myklebust * xprt_request_get_cong - Request congestion control credits 43075891f50STrond Myklebust * @xprt: pointer to transport 43175891f50STrond Myklebust * @req: pointer to RPC request 43275891f50STrond Myklebust * 43375891f50STrond Myklebust * Useful for transports that require congestion control. 43475891f50STrond Myklebust */ 43575891f50STrond Myklebust bool 43675891f50STrond Myklebust xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 43775891f50STrond Myklebust { 43875891f50STrond Myklebust bool ret = false; 43975891f50STrond Myklebust 44075891f50STrond Myklebust if (req->rq_cong) 44175891f50STrond Myklebust return true; 442b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 44375891f50STrond Myklebust ret = __xprt_get_cong(xprt, req) != 0; 444b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 44575891f50STrond Myklebust return ret; 44675891f50STrond Myklebust } 44775891f50STrond Myklebust EXPORT_SYMBOL_GPL(xprt_request_get_cong); 44875891f50STrond Myklebust 44975891f50STrond Myklebust /** 450a58dd398SChuck Lever * xprt_release_rqst_cong - housekeeping when request is complete 451a58dd398SChuck Lever * @task: RPC request that recently completed 452a58dd398SChuck Lever * 453a58dd398SChuck Lever * Useful for transports that require congestion control. 454a58dd398SChuck Lever */ 455a58dd398SChuck Lever void xprt_release_rqst_cong(struct rpc_task *task) 456a58dd398SChuck Lever { 457a4f0835cSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 458a4f0835cSTrond Myklebust 459a4f0835cSTrond Myklebust __xprt_put_cong(req->rq_xprt, req); 460a58dd398SChuck Lever } 46112444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 462a58dd398SChuck Lever 4638593e010SChuck Lever static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) 4648593e010SChuck Lever { 4658593e010SChuck Lever if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) 4668593e010SChuck Lever __xprt_lock_write_next_cong(xprt); 4678593e010SChuck Lever } 4688593e010SChuck Lever 46975891f50STrond Myklebust /* 47075891f50STrond Myklebust * Clear the congestion window wait flag and wake up the next 47175891f50STrond Myklebust * entry on xprt->sending 47275891f50STrond Myklebust */ 47375891f50STrond Myklebust static void 47475891f50STrond Myklebust xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) 47575891f50STrond Myklebust { 47675891f50STrond Myklebust if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { 477b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 47875891f50STrond Myklebust __xprt_lock_write_next_cong(xprt); 479b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 48075891f50STrond Myklebust } 48175891f50STrond Myklebust } 48275891f50STrond Myklebust 483a58dd398SChuck Lever /** 48446c0ee8bSChuck Lever * xprt_adjust_cwnd - adjust transport congestion window 4856a24dfb6STrond Myklebust * @xprt: pointer to xprt 48646c0ee8bSChuck Lever * @task: recently completed RPC request used to adjust window 48746c0ee8bSChuck Lever * @result: result code of completed RPC request 48846c0ee8bSChuck Lever * 4894f4cf5adSChuck Lever * The transport code maintains an estimate on the maximum number of out- 4904f4cf5adSChuck Lever * standing RPC requests, using a smoothed version of the congestion 4914f4cf5adSChuck Lever * avoidance implemented in 44BSD. This is basically the Van Jacobson 4924f4cf5adSChuck Lever * congestion algorithm: If a retransmit occurs, the congestion window is 4934f4cf5adSChuck Lever * halved; otherwise, it is incremented by 1/cwnd when 4944f4cf5adSChuck Lever * 4954f4cf5adSChuck Lever * - a reply is received and 4964f4cf5adSChuck Lever * - a full number of requests are outstanding and 4974f4cf5adSChuck Lever * - the congestion window hasn't been updated recently. 4981da177e4SLinus Torvalds */ 4996a24dfb6STrond Myklebust void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 5001da177e4SLinus Torvalds { 50146c0ee8bSChuck Lever struct rpc_rqst *req = task->tk_rqstp; 50246c0ee8bSChuck Lever unsigned long cwnd = xprt->cwnd; 5031da177e4SLinus Torvalds 5041da177e4SLinus Torvalds if (result >= 0 && cwnd <= xprt->cong) { 5051da177e4SLinus Torvalds /* The (cwnd >> 1) term makes sure 5061da177e4SLinus Torvalds * the result gets rounded properly. */ 5071da177e4SLinus Torvalds cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 5081da177e4SLinus Torvalds if (cwnd > RPC_MAXCWND(xprt)) 5091da177e4SLinus Torvalds cwnd = RPC_MAXCWND(xprt); 51049e9a890SChuck Lever __xprt_lock_write_next_cong(xprt); 5111da177e4SLinus Torvalds } else if (result == -ETIMEDOUT) { 5121da177e4SLinus Torvalds cwnd >>= 1; 5131da177e4SLinus Torvalds if (cwnd < RPC_CWNDSCALE) 5141da177e4SLinus Torvalds cwnd = RPC_CWNDSCALE; 5151da177e4SLinus Torvalds } 5161da177e4SLinus Torvalds dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 5171da177e4SLinus Torvalds xprt->cong, xprt->cwnd, cwnd); 5181da177e4SLinus Torvalds xprt->cwnd = cwnd; 51946c0ee8bSChuck Lever __xprt_put_cong(xprt, req); 5201da177e4SLinus Torvalds } 52112444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 5221da177e4SLinus Torvalds 52344fbac22SChuck Lever /** 52444fbac22SChuck Lever * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 52544fbac22SChuck Lever * @xprt: transport with waiting tasks 52644fbac22SChuck Lever * @status: result code to plant in each task before waking it 52744fbac22SChuck Lever * 52844fbac22SChuck Lever */ 52944fbac22SChuck Lever void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 53044fbac22SChuck Lever { 53144fbac22SChuck Lever if (status < 0) 53244fbac22SChuck Lever rpc_wake_up_status(&xprt->pending, status); 53344fbac22SChuck Lever else 53444fbac22SChuck Lever rpc_wake_up(&xprt->pending); 53544fbac22SChuck Lever } 53612444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 53744fbac22SChuck Lever 538c7b2cae8SChuck Lever /** 539c7b2cae8SChuck Lever * xprt_wait_for_buffer_space - wait for transport output buffer to clear 540c544577dSTrond Myklebust * @xprt: transport 541a9a6b52eSTrond Myklebust * 542a9a6b52eSTrond Myklebust * Note that we only set the timer for the case of RPC_IS_SOFT(), since 543a9a6b52eSTrond Myklebust * we don't in general want to force a socket disconnection due to 544a9a6b52eSTrond Myklebust * an incomplete RPC call transmission. 545c7b2cae8SChuck Lever */ 546c544577dSTrond Myklebust void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) 547c7b2cae8SChuck Lever { 548c544577dSTrond Myklebust set_bit(XPRT_WRITE_SPACE, &xprt->state); 549c7b2cae8SChuck Lever } 55012444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 551c7b2cae8SChuck Lever 552c544577dSTrond Myklebust static bool 553c544577dSTrond Myklebust xprt_clear_write_space_locked(struct rpc_xprt *xprt) 554c544577dSTrond Myklebust { 555c544577dSTrond Myklebust if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { 556c544577dSTrond Myklebust __xprt_lock_write_next(xprt); 557c544577dSTrond Myklebust dprintk("RPC: write space: waking waiting task on " 558c544577dSTrond Myklebust "xprt %p\n", xprt); 559c544577dSTrond Myklebust return true; 560c544577dSTrond Myklebust } 561c544577dSTrond Myklebust return false; 562c544577dSTrond Myklebust } 563c544577dSTrond Myklebust 564c7b2cae8SChuck Lever /** 565c7b2cae8SChuck Lever * xprt_write_space - wake the task waiting for transport output buffer space 566c7b2cae8SChuck Lever * @xprt: transport with waiting tasks 567c7b2cae8SChuck Lever * 568c7b2cae8SChuck Lever * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 569c7b2cae8SChuck Lever */ 570c544577dSTrond Myklebust bool xprt_write_space(struct rpc_xprt *xprt) 571c7b2cae8SChuck Lever { 572c544577dSTrond Myklebust bool ret; 573c544577dSTrond Myklebust 574c544577dSTrond Myklebust if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) 575c544577dSTrond Myklebust return false; 576b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 577c544577dSTrond Myklebust ret = xprt_clear_write_space_locked(xprt); 578b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 579c544577dSTrond Myklebust return ret; 580c7b2cae8SChuck Lever } 58112444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_write_space); 582c7b2cae8SChuck Lever 583da953063STrond Myklebust static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime) 584da953063STrond Myklebust { 585da953063STrond Myklebust s64 delta = ktime_to_ns(ktime_get() - abstime); 586da953063STrond Myklebust return likely(delta >= 0) ? 587da953063STrond Myklebust jiffies - nsecs_to_jiffies(delta) : 588da953063STrond Myklebust jiffies + nsecs_to_jiffies(-delta); 589da953063STrond Myklebust } 590da953063STrond Myklebust 591da953063STrond Myklebust static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req) 5921da177e4SLinus Torvalds { 593ba7392bbSTrond Myklebust const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 594da953063STrond Myklebust unsigned long majortimeo = req->rq_timeout; 5951da177e4SLinus Torvalds 5961da177e4SLinus Torvalds if (to->to_exponential) 597da953063STrond Myklebust majortimeo <<= to->to_retries; 5981da177e4SLinus Torvalds else 599da953063STrond Myklebust majortimeo += to->to_increment * to->to_retries; 600da953063STrond Myklebust if (majortimeo > to->to_maxval || majortimeo == 0) 601da953063STrond Myklebust majortimeo = to->to_maxval; 602da953063STrond Myklebust return majortimeo; 603da953063STrond Myklebust } 604da953063STrond Myklebust 605da953063STrond Myklebust static void xprt_reset_majortimeo(struct rpc_rqst *req) 606da953063STrond Myklebust { 607da953063STrond Myklebust req->rq_majortimeo += xprt_calc_majortimeo(req); 608da953063STrond Myklebust } 609da953063STrond Myklebust 6107de62bc0SOlga Kornievskaia static void xprt_reset_minortimeo(struct rpc_rqst *req) 6117de62bc0SOlga Kornievskaia { 6127de62bc0SOlga Kornievskaia req->rq_minortimeo += req->rq_timeout; 6137de62bc0SOlga Kornievskaia } 6147de62bc0SOlga Kornievskaia 615da953063STrond Myklebust static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) 616da953063STrond Myklebust { 617da953063STrond Myklebust unsigned long time_init; 618da953063STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 619da953063STrond Myklebust 620da953063STrond Myklebust if (likely(xprt && xprt_connected(xprt))) 621da953063STrond Myklebust time_init = jiffies; 622da953063STrond Myklebust else 623da953063STrond Myklebust time_init = xprt_abs_ktime_to_jiffies(task->tk_start); 624da953063STrond Myklebust req->rq_timeout = task->tk_client->cl_timeout->to_initval; 625da953063STrond Myklebust req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); 6267de62bc0SOlga Kornievskaia req->rq_minortimeo = time_init + req->rq_timeout; 6271da177e4SLinus Torvalds } 6281da177e4SLinus Torvalds 6299903cd1cSChuck Lever /** 6309903cd1cSChuck Lever * xprt_adjust_timeout - adjust timeout values for next retransmit 6319903cd1cSChuck Lever * @req: RPC request containing parameters to use for the adjustment 6329903cd1cSChuck Lever * 6331da177e4SLinus Torvalds */ 6341da177e4SLinus Torvalds int xprt_adjust_timeout(struct rpc_rqst *req) 6351da177e4SLinus Torvalds { 6361da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 637ba7392bbSTrond Myklebust const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 6381da177e4SLinus Torvalds int status = 0; 6391da177e4SLinus Torvalds 6407de62bc0SOlga Kornievskaia if (time_before(jiffies, req->rq_minortimeo)) 6417de62bc0SOlga Kornievskaia return status; 6421da177e4SLinus Torvalds if (time_before(jiffies, req->rq_majortimeo)) { 6431da177e4SLinus Torvalds if (to->to_exponential) 6441da177e4SLinus Torvalds req->rq_timeout <<= 1; 6451da177e4SLinus Torvalds else 6461da177e4SLinus Torvalds req->rq_timeout += to->to_increment; 6471da177e4SLinus Torvalds if (to->to_maxval && req->rq_timeout >= to->to_maxval) 6481da177e4SLinus Torvalds req->rq_timeout = to->to_maxval; 6491da177e4SLinus Torvalds req->rq_retries++; 6501da177e4SLinus Torvalds } else { 6511da177e4SLinus Torvalds req->rq_timeout = to->to_initval; 6521da177e4SLinus Torvalds req->rq_retries = 0; 6531da177e4SLinus Torvalds xprt_reset_majortimeo(req); 6541da177e4SLinus Torvalds /* Reset the RTT counters == "slow start" */ 655b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 6561da177e4SLinus Torvalds rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 657b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 6581da177e4SLinus Torvalds status = -ETIMEDOUT; 6591da177e4SLinus Torvalds } 6607de62bc0SOlga Kornievskaia xprt_reset_minortimeo(req); 6611da177e4SLinus Torvalds 6621da177e4SLinus Torvalds if (req->rq_timeout == 0) { 6631da177e4SLinus Torvalds printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 6641da177e4SLinus Torvalds req->rq_timeout = 5 * HZ; 6651da177e4SLinus Torvalds } 6661da177e4SLinus Torvalds return status; 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds 66965f27f38SDavid Howells static void xprt_autoclose(struct work_struct *work) 6701da177e4SLinus Torvalds { 67165f27f38SDavid Howells struct rpc_xprt *xprt = 67265f27f38SDavid Howells container_of(work, struct rpc_xprt, task_cleanup); 673a1231fdaSTrond Myklebust unsigned int pflags = memalloc_nofs_save(); 6741da177e4SLinus Torvalds 675911813d7SChuck Lever trace_xprt_disconnect_auto(xprt); 67666af1e55STrond Myklebust clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 6774876cc77STrond Myklebust xprt->ops->close(xprt); 6781da177e4SLinus Torvalds xprt_release_write(xprt, NULL); 67979234c3dSTrond Myklebust wake_up_bit(&xprt->state, XPRT_LOCKED); 680a1231fdaSTrond Myklebust memalloc_nofs_restore(pflags); 6811da177e4SLinus Torvalds } 6821da177e4SLinus Torvalds 6839903cd1cSChuck Lever /** 68462da3b24STrond Myklebust * xprt_disconnect_done - mark a transport as disconnected 6859903cd1cSChuck Lever * @xprt: transport to flag for disconnect 6869903cd1cSChuck Lever * 6871da177e4SLinus Torvalds */ 68862da3b24STrond Myklebust void xprt_disconnect_done(struct rpc_xprt *xprt) 6891da177e4SLinus Torvalds { 690911813d7SChuck Lever trace_xprt_disconnect_done(xprt); 691b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 6921da177e4SLinus Torvalds xprt_clear_connected(xprt); 693c544577dSTrond Myklebust xprt_clear_write_space_locked(xprt); 6948593e010SChuck Lever xprt_clear_congestion_window_wait_locked(xprt); 69527adc785STrond Myklebust xprt_wake_pending_tasks(xprt, -ENOTCONN); 696b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 6971da177e4SLinus Torvalds } 69862da3b24STrond Myklebust EXPORT_SYMBOL_GPL(xprt_disconnect_done); 6991da177e4SLinus Torvalds 70066af1e55STrond Myklebust /** 70166af1e55STrond Myklebust * xprt_force_disconnect - force a transport to disconnect 70266af1e55STrond Myklebust * @xprt: transport to disconnect 70366af1e55STrond Myklebust * 70466af1e55STrond Myklebust */ 70566af1e55STrond Myklebust void xprt_force_disconnect(struct rpc_xprt *xprt) 70666af1e55STrond Myklebust { 707911813d7SChuck Lever trace_xprt_disconnect_force(xprt); 708911813d7SChuck Lever 70966af1e55STrond Myklebust /* Don't race with the test_bit() in xprt_clear_locked() */ 710b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 71166af1e55STrond Myklebust set_bit(XPRT_CLOSE_WAIT, &xprt->state); 71266af1e55STrond Myklebust /* Try to schedule an autoclose RPC call */ 71366af1e55STrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 71440a5f1b1STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 7150445f92cSTrond Myklebust else if (xprt->snd_task) 7160445f92cSTrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->pending, 7170445f92cSTrond Myklebust xprt->snd_task, -ENOTCONN); 718b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 71966af1e55STrond Myklebust } 720e2a4f4fbSChuck Lever EXPORT_SYMBOL_GPL(xprt_force_disconnect); 72166af1e55STrond Myklebust 7227f3a1d1eSTrond Myklebust static unsigned int 7237f3a1d1eSTrond Myklebust xprt_connect_cookie(struct rpc_xprt *xprt) 7247f3a1d1eSTrond Myklebust { 7257f3a1d1eSTrond Myklebust return READ_ONCE(xprt->connect_cookie); 7267f3a1d1eSTrond Myklebust } 7277f3a1d1eSTrond Myklebust 7287f3a1d1eSTrond Myklebust static bool 7297f3a1d1eSTrond Myklebust xprt_request_retransmit_after_disconnect(struct rpc_task *task) 7307f3a1d1eSTrond Myklebust { 7317f3a1d1eSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 7327f3a1d1eSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 7337f3a1d1eSTrond Myklebust 7347f3a1d1eSTrond Myklebust return req->rq_connect_cookie != xprt_connect_cookie(xprt) || 7357f3a1d1eSTrond Myklebust !xprt_connected(xprt); 7367f3a1d1eSTrond Myklebust } 7377f3a1d1eSTrond Myklebust 7387c1d71cfSTrond Myklebust /** 7397c1d71cfSTrond Myklebust * xprt_conditional_disconnect - force a transport to disconnect 7407c1d71cfSTrond Myklebust * @xprt: transport to disconnect 7417c1d71cfSTrond Myklebust * @cookie: 'connection cookie' 7427c1d71cfSTrond Myklebust * 7437c1d71cfSTrond Myklebust * This attempts to break the connection if and only if 'cookie' matches 7447c1d71cfSTrond Myklebust * the current transport 'connection cookie'. It ensures that we don't 7457c1d71cfSTrond Myklebust * try to break the connection more than once when we need to retransmit 7467c1d71cfSTrond Myklebust * a batch of RPC requests. 7477c1d71cfSTrond Myklebust * 7487c1d71cfSTrond Myklebust */ 7497c1d71cfSTrond Myklebust void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 7507c1d71cfSTrond Myklebust { 7517c1d71cfSTrond Myklebust /* Don't race with the test_bit() in xprt_clear_locked() */ 752b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 7537c1d71cfSTrond Myklebust if (cookie != xprt->connect_cookie) 7547c1d71cfSTrond Myklebust goto out; 7552c2ee6d2SNeilBrown if (test_bit(XPRT_CLOSING, &xprt->state)) 7567c1d71cfSTrond Myklebust goto out; 7577c1d71cfSTrond Myklebust set_bit(XPRT_CLOSE_WAIT, &xprt->state); 7587c1d71cfSTrond Myklebust /* Try to schedule an autoclose RPC call */ 7597c1d71cfSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 76040a5f1b1STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 7612a491991STrond Myklebust xprt_wake_pending_tasks(xprt, -EAGAIN); 7627c1d71cfSTrond Myklebust out: 763b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 7647c1d71cfSTrond Myklebust } 7657c1d71cfSTrond Myklebust 766ad3331acSTrond Myklebust static bool 767ad3331acSTrond Myklebust xprt_has_timer(const struct rpc_xprt *xprt) 768ad3331acSTrond Myklebust { 769ad3331acSTrond Myklebust return xprt->idle_timeout != 0; 770ad3331acSTrond Myklebust } 771ad3331acSTrond Myklebust 772ad3331acSTrond Myklebust static void 773ad3331acSTrond Myklebust xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 774ad3331acSTrond Myklebust __must_hold(&xprt->transport_lock) 775ad3331acSTrond Myklebust { 77680d3c45fSDave Wysochanski xprt->last_used = jiffies; 77795f7691dSTrond Myklebust if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) 778ad3331acSTrond Myklebust mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 779ad3331acSTrond Myklebust } 780ad3331acSTrond Myklebust 7811da177e4SLinus Torvalds static void 782ff861c4dSKees Cook xprt_init_autodisconnect(struct timer_list *t) 7831da177e4SLinus Torvalds { 784ff861c4dSKees Cook struct rpc_xprt *xprt = from_timer(xprt, t, timer); 7851da177e4SLinus Torvalds 78695f7691dSTrond Myklebust if (!RB_EMPTY_ROOT(&xprt->recv_queue)) 787b5e92419STrond Myklebust return; 788ad3331acSTrond Myklebust /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 789ad3331acSTrond Myklebust xprt->last_used = jiffies; 7902226feb6SChuck Lever if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 7911da177e4SLinus Torvalds return; 792b5e92419STrond Myklebust queue_work(xprtiod_workqueue, &xprt->task_cleanup); 7931da177e4SLinus Torvalds } 7941da177e4SLinus Torvalds 795718ba5b8STrond Myklebust bool xprt_lock_connect(struct rpc_xprt *xprt, 796718ba5b8STrond Myklebust struct rpc_task *task, 797718ba5b8STrond Myklebust void *cookie) 798718ba5b8STrond Myklebust { 799718ba5b8STrond Myklebust bool ret = false; 800718ba5b8STrond Myklebust 801b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 802718ba5b8STrond Myklebust if (!test_bit(XPRT_LOCKED, &xprt->state)) 803718ba5b8STrond Myklebust goto out; 804718ba5b8STrond Myklebust if (xprt->snd_task != task) 805718ba5b8STrond Myklebust goto out; 806718ba5b8STrond Myklebust xprt->snd_task = cookie; 807718ba5b8STrond Myklebust ret = true; 808718ba5b8STrond Myklebust out: 809b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 810718ba5b8STrond Myklebust return ret; 811718ba5b8STrond Myklebust } 812718ba5b8STrond Myklebust 813718ba5b8STrond Myklebust void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 814718ba5b8STrond Myklebust { 815b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 816718ba5b8STrond Myklebust if (xprt->snd_task != cookie) 817718ba5b8STrond Myklebust goto out; 818718ba5b8STrond Myklebust if (!test_bit(XPRT_LOCKED, &xprt->state)) 819718ba5b8STrond Myklebust goto out; 820718ba5b8STrond Myklebust xprt->snd_task =NULL; 821718ba5b8STrond Myklebust xprt->ops->release_xprt(xprt, NULL); 822ad3331acSTrond Myklebust xprt_schedule_autodisconnect(xprt); 823718ba5b8STrond Myklebust out: 824b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 82579234c3dSTrond Myklebust wake_up_bit(&xprt->state, XPRT_LOCKED); 826718ba5b8STrond Myklebust } 827718ba5b8STrond Myklebust 8289903cd1cSChuck Lever /** 8299903cd1cSChuck Lever * xprt_connect - schedule a transport connect operation 8309903cd1cSChuck Lever * @task: RPC task that is requesting the connect 8311da177e4SLinus Torvalds * 8321da177e4SLinus Torvalds */ 8331da177e4SLinus Torvalds void xprt_connect(struct rpc_task *task) 8341da177e4SLinus Torvalds { 835ad2368d6STrond Myklebust struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 8361da177e4SLinus Torvalds 83746121cf7SChuck Lever dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 8381da177e4SLinus Torvalds xprt, (xprt_connected(xprt) ? "is" : "is not")); 8391da177e4SLinus Torvalds 840ec739ef0SChuck Lever if (!xprt_bound(xprt)) { 84101d37c42STrond Myklebust task->tk_status = -EAGAIN; 8421da177e4SLinus Torvalds return; 8431da177e4SLinus Torvalds } 8441da177e4SLinus Torvalds if (!xprt_lock_write(xprt, task)) 8451da177e4SLinus Torvalds return; 846feb8ca37STrond Myklebust 847911813d7SChuck Lever if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 848911813d7SChuck Lever trace_xprt_disconnect_cleanup(xprt); 849feb8ca37STrond Myklebust xprt->ops->close(xprt); 850911813d7SChuck Lever } 851feb8ca37STrond Myklebust 852718ba5b8STrond Myklebust if (!xprt_connected(xprt)) { 8532c2ee6d2SNeilBrown task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 8546b2e6856STrond Myklebust rpc_sleep_on_timeout(&xprt->pending, task, NULL, 8559e910bffSTrond Myklebust xprt_request_timeout(task->tk_rqstp)); 8560b9e7943STrond Myklebust 8570b9e7943STrond Myklebust if (test_bit(XPRT_CLOSING, &xprt->state)) 8580b9e7943STrond Myklebust return; 8590b9e7943STrond Myklebust if (xprt_test_and_set_connecting(xprt)) 8600b9e7943STrond Myklebust return; 8610a9a4304STrond Myklebust /* Race breaker */ 8620a9a4304STrond Myklebust if (!xprt_connected(xprt)) { 863262ca07dSChuck Lever xprt->stat.connect_start = jiffies; 8641b092092STrond Myklebust xprt->ops->connect(xprt, task); 8650a9a4304STrond Myklebust } else { 8660a9a4304STrond Myklebust xprt_clear_connecting(xprt); 8670a9a4304STrond Myklebust task->tk_status = 0; 8680a9a4304STrond Myklebust rpc_wake_up_queued_task(&xprt->pending, task); 8690a9a4304STrond Myklebust } 8701da177e4SLinus Torvalds } 871718ba5b8STrond Myklebust xprt_release_write(xprt, task); 8721da177e4SLinus Torvalds } 8731da177e4SLinus Torvalds 874675dd90aSChuck Lever /** 875675dd90aSChuck Lever * xprt_reconnect_delay - compute the wait before scheduling a connect 876675dd90aSChuck Lever * @xprt: transport instance 877675dd90aSChuck Lever * 878675dd90aSChuck Lever */ 879675dd90aSChuck Lever unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) 880675dd90aSChuck Lever { 881675dd90aSChuck Lever unsigned long start, now = jiffies; 882675dd90aSChuck Lever 883675dd90aSChuck Lever start = xprt->stat.connect_start + xprt->reestablish_timeout; 884675dd90aSChuck Lever if (time_after(start, now)) 885675dd90aSChuck Lever return start - now; 886675dd90aSChuck Lever return 0; 887675dd90aSChuck Lever } 888675dd90aSChuck Lever EXPORT_SYMBOL_GPL(xprt_reconnect_delay); 889675dd90aSChuck Lever 890675dd90aSChuck Lever /** 891675dd90aSChuck Lever * xprt_reconnect_backoff - compute the new re-establish timeout 892675dd90aSChuck Lever * @xprt: transport instance 893675dd90aSChuck Lever * @init_to: initial reestablish timeout 894675dd90aSChuck Lever * 895675dd90aSChuck Lever */ 896675dd90aSChuck Lever void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) 897675dd90aSChuck Lever { 898675dd90aSChuck Lever xprt->reestablish_timeout <<= 1; 899675dd90aSChuck Lever if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) 900675dd90aSChuck Lever xprt->reestablish_timeout = xprt->max_reconnect_timeout; 901675dd90aSChuck Lever if (xprt->reestablish_timeout < init_to) 902675dd90aSChuck Lever xprt->reestablish_timeout = init_to; 903675dd90aSChuck Lever } 904675dd90aSChuck Lever EXPORT_SYMBOL_GPL(xprt_reconnect_backoff); 905675dd90aSChuck Lever 90695f7691dSTrond Myklebust enum xprt_xid_rb_cmp { 90795f7691dSTrond Myklebust XID_RB_EQUAL, 90895f7691dSTrond Myklebust XID_RB_LEFT, 90995f7691dSTrond Myklebust XID_RB_RIGHT, 91095f7691dSTrond Myklebust }; 91195f7691dSTrond Myklebust static enum xprt_xid_rb_cmp 91295f7691dSTrond Myklebust xprt_xid_cmp(__be32 xid1, __be32 xid2) 91395f7691dSTrond Myklebust { 91495f7691dSTrond Myklebust if (xid1 == xid2) 91595f7691dSTrond Myklebust return XID_RB_EQUAL; 91695f7691dSTrond Myklebust if ((__force u32)xid1 < (__force u32)xid2) 91795f7691dSTrond Myklebust return XID_RB_LEFT; 91895f7691dSTrond Myklebust return XID_RB_RIGHT; 91995f7691dSTrond Myklebust } 92095f7691dSTrond Myklebust 92195f7691dSTrond Myklebust static struct rpc_rqst * 92295f7691dSTrond Myklebust xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) 92395f7691dSTrond Myklebust { 92495f7691dSTrond Myklebust struct rb_node *n = xprt->recv_queue.rb_node; 92595f7691dSTrond Myklebust struct rpc_rqst *req; 92695f7691dSTrond Myklebust 92795f7691dSTrond Myklebust while (n != NULL) { 92895f7691dSTrond Myklebust req = rb_entry(n, struct rpc_rqst, rq_recv); 92995f7691dSTrond Myklebust switch (xprt_xid_cmp(xid, req->rq_xid)) { 93095f7691dSTrond Myklebust case XID_RB_LEFT: 93195f7691dSTrond Myklebust n = n->rb_left; 93295f7691dSTrond Myklebust break; 93395f7691dSTrond Myklebust case XID_RB_RIGHT: 93495f7691dSTrond Myklebust n = n->rb_right; 93595f7691dSTrond Myklebust break; 93695f7691dSTrond Myklebust case XID_RB_EQUAL: 93795f7691dSTrond Myklebust return req; 93895f7691dSTrond Myklebust } 93995f7691dSTrond Myklebust } 94095f7691dSTrond Myklebust return NULL; 94195f7691dSTrond Myklebust } 94295f7691dSTrond Myklebust 94395f7691dSTrond Myklebust static void 94495f7691dSTrond Myklebust xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) 94595f7691dSTrond Myklebust { 94695f7691dSTrond Myklebust struct rb_node **p = &xprt->recv_queue.rb_node; 94795f7691dSTrond Myklebust struct rb_node *n = NULL; 94895f7691dSTrond Myklebust struct rpc_rqst *req; 94995f7691dSTrond Myklebust 95095f7691dSTrond Myklebust while (*p != NULL) { 95195f7691dSTrond Myklebust n = *p; 95295f7691dSTrond Myklebust req = rb_entry(n, struct rpc_rqst, rq_recv); 95395f7691dSTrond Myklebust switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) { 95495f7691dSTrond Myklebust case XID_RB_LEFT: 95595f7691dSTrond Myklebust p = &n->rb_left; 95695f7691dSTrond Myklebust break; 95795f7691dSTrond Myklebust case XID_RB_RIGHT: 95895f7691dSTrond Myklebust p = &n->rb_right; 95995f7691dSTrond Myklebust break; 96095f7691dSTrond Myklebust case XID_RB_EQUAL: 96195f7691dSTrond Myklebust WARN_ON_ONCE(new != req); 96295f7691dSTrond Myklebust return; 96395f7691dSTrond Myklebust } 96495f7691dSTrond Myklebust } 96595f7691dSTrond Myklebust rb_link_node(&new->rq_recv, n, p); 96695f7691dSTrond Myklebust rb_insert_color(&new->rq_recv, &xprt->recv_queue); 96795f7691dSTrond Myklebust } 96895f7691dSTrond Myklebust 96995f7691dSTrond Myklebust static void 97095f7691dSTrond Myklebust xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) 97195f7691dSTrond Myklebust { 97295f7691dSTrond Myklebust rb_erase(&req->rq_recv, &xprt->recv_queue); 97395f7691dSTrond Myklebust } 97495f7691dSTrond Myklebust 9759903cd1cSChuck Lever /** 9769903cd1cSChuck Lever * xprt_lookup_rqst - find an RPC request corresponding to an XID 9779903cd1cSChuck Lever * @xprt: transport on which the original request was transmitted 9789903cd1cSChuck Lever * @xid: RPC XID of incoming reply 9799903cd1cSChuck Lever * 98075c84151STrond Myklebust * Caller holds xprt->queue_lock. 9811da177e4SLinus Torvalds */ 982d8ed029dSAlexey Dobriyan struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 9831da177e4SLinus Torvalds { 9848f3a6de3SPavel Emelyanov struct rpc_rqst *entry; 9851da177e4SLinus Torvalds 98695f7691dSTrond Myklebust entry = xprt_request_rb_find(xprt, xid); 98795f7691dSTrond Myklebust if (entry != NULL) { 9883705ad64SJeff Layton trace_xprt_lookup_rqst(xprt, xid, 0); 9890b87a46bSChuck Lever entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); 990262ca07dSChuck Lever return entry; 9913705ad64SJeff Layton } 99246121cf7SChuck Lever 99346121cf7SChuck Lever dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 99446121cf7SChuck Lever ntohl(xid)); 9953705ad64SJeff Layton trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 996262ca07dSChuck Lever xprt->stat.bad_xids++; 997262ca07dSChuck Lever return NULL; 9981da177e4SLinus Torvalds } 99912444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 10001da177e4SLinus Torvalds 1001cf9946cdSTrond Myklebust static bool 1002cf9946cdSTrond Myklebust xprt_is_pinned_rqst(struct rpc_rqst *req) 1003cf9946cdSTrond Myklebust { 1004cf9946cdSTrond Myklebust return atomic_read(&req->rq_pin) != 0; 1005cf9946cdSTrond Myklebust } 1006cf9946cdSTrond Myklebust 1007729749bbSTrond Myklebust /** 1008729749bbSTrond Myklebust * xprt_pin_rqst - Pin a request on the transport receive list 1009729749bbSTrond Myklebust * @req: Request to pin 1010729749bbSTrond Myklebust * 1011729749bbSTrond Myklebust * Caller must ensure this is atomic with the call to xprt_lookup_rqst() 10121f7d1c73SChuck Lever * so should be holding xprt->queue_lock. 1013729749bbSTrond Myklebust */ 1014729749bbSTrond Myklebust void xprt_pin_rqst(struct rpc_rqst *req) 1015729749bbSTrond Myklebust { 1016cf9946cdSTrond Myklebust atomic_inc(&req->rq_pin); 1017729749bbSTrond Myklebust } 10189590d083SChuck Lever EXPORT_SYMBOL_GPL(xprt_pin_rqst); 1019729749bbSTrond Myklebust 1020729749bbSTrond Myklebust /** 1021729749bbSTrond Myklebust * xprt_unpin_rqst - Unpin a request on the transport receive list 1022729749bbSTrond Myklebust * @req: Request to pin 1023729749bbSTrond Myklebust * 10241f7d1c73SChuck Lever * Caller should be holding xprt->queue_lock. 1025729749bbSTrond Myklebust */ 1026729749bbSTrond Myklebust void xprt_unpin_rqst(struct rpc_rqst *req) 1027729749bbSTrond Myklebust { 1028cf9946cdSTrond Myklebust if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) { 1029cf9946cdSTrond Myklebust atomic_dec(&req->rq_pin); 1030cf9946cdSTrond Myklebust return; 1031cf9946cdSTrond Myklebust } 1032cf9946cdSTrond Myklebust if (atomic_dec_and_test(&req->rq_pin)) 1033cf9946cdSTrond Myklebust wake_up_var(&req->rq_pin); 1034729749bbSTrond Myklebust } 10359590d083SChuck Lever EXPORT_SYMBOL_GPL(xprt_unpin_rqst); 1036729749bbSTrond Myklebust 1037729749bbSTrond Myklebust static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) 1038729749bbSTrond Myklebust { 1039cf9946cdSTrond Myklebust wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req)); 1040729749bbSTrond Myklebust } 1041729749bbSTrond Myklebust 1042edc81dcdSTrond Myklebust static bool 1043edc81dcdSTrond Myklebust xprt_request_data_received(struct rpc_task *task) 1044edc81dcdSTrond Myklebust { 1045edc81dcdSTrond Myklebust return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1046edc81dcdSTrond Myklebust READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; 1047edc81dcdSTrond Myklebust } 1048edc81dcdSTrond Myklebust 1049edc81dcdSTrond Myklebust static bool 1050edc81dcdSTrond Myklebust xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) 1051edc81dcdSTrond Myklebust { 1052edc81dcdSTrond Myklebust return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1053edc81dcdSTrond Myklebust READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; 1054edc81dcdSTrond Myklebust } 1055edc81dcdSTrond Myklebust 1056edc81dcdSTrond Myklebust /** 1057edc81dcdSTrond Myklebust * xprt_request_enqueue_receive - Add an request to the receive queue 1058edc81dcdSTrond Myklebust * @task: RPC task 1059edc81dcdSTrond Myklebust * 1060edc81dcdSTrond Myklebust */ 1061edc81dcdSTrond Myklebust void 1062edc81dcdSTrond Myklebust xprt_request_enqueue_receive(struct rpc_task *task) 1063edc81dcdSTrond Myklebust { 1064edc81dcdSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1065edc81dcdSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1066edc81dcdSTrond Myklebust 1067edc81dcdSTrond Myklebust if (!xprt_request_need_enqueue_receive(task, req)) 1068edc81dcdSTrond Myklebust return; 106975369089STrond Myklebust 107075369089STrond Myklebust xprt_request_prepare(task->tk_rqstp); 1071edc81dcdSTrond Myklebust spin_lock(&xprt->queue_lock); 1072edc81dcdSTrond Myklebust 1073edc81dcdSTrond Myklebust /* Update the softirq receive buffer */ 1074edc81dcdSTrond Myklebust memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1075edc81dcdSTrond Myklebust sizeof(req->rq_private_buf)); 1076edc81dcdSTrond Myklebust 1077edc81dcdSTrond Myklebust /* Add request to the receive list */ 107895f7691dSTrond Myklebust xprt_request_rb_insert(xprt, req); 1079edc81dcdSTrond Myklebust set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); 1080edc81dcdSTrond Myklebust spin_unlock(&xprt->queue_lock); 1081edc81dcdSTrond Myklebust 1082edc81dcdSTrond Myklebust /* Turn off autodisconnect */ 1083edc81dcdSTrond Myklebust del_singleshot_timer_sync(&xprt->timer); 1084edc81dcdSTrond Myklebust } 1085edc81dcdSTrond Myklebust 1086edc81dcdSTrond Myklebust /** 1087edc81dcdSTrond Myklebust * xprt_request_dequeue_receive_locked - Remove a request from the receive queue 1088edc81dcdSTrond Myklebust * @task: RPC task 1089edc81dcdSTrond Myklebust * 1090edc81dcdSTrond Myklebust * Caller must hold xprt->queue_lock. 1091edc81dcdSTrond Myklebust */ 1092edc81dcdSTrond Myklebust static void 1093edc81dcdSTrond Myklebust xprt_request_dequeue_receive_locked(struct rpc_task *task) 1094edc81dcdSTrond Myklebust { 109595f7691dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 109695f7691dSTrond Myklebust 1097edc81dcdSTrond Myklebust if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 109895f7691dSTrond Myklebust xprt_request_rb_remove(req->rq_xprt, req); 1099edc81dcdSTrond Myklebust } 1100edc81dcdSTrond Myklebust 1101ecd465eeSChuck Lever /** 1102ecd465eeSChuck Lever * xprt_update_rtt - Update RPC RTT statistics 1103ecd465eeSChuck Lever * @task: RPC request that recently completed 1104ecd465eeSChuck Lever * 110575c84151STrond Myklebust * Caller holds xprt->queue_lock. 1106ecd465eeSChuck Lever */ 1107ecd465eeSChuck Lever void xprt_update_rtt(struct rpc_task *task) 11081da177e4SLinus Torvalds { 11091570c1e4SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 11101570c1e4SChuck Lever struct rpc_rtt *rtt = task->tk_client->cl_rtt; 111195c96174SEric Dumazet unsigned int timer = task->tk_msg.rpc_proc->p_timer; 1112d60dbb20STrond Myklebust long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 11131570c1e4SChuck Lever 11141da177e4SLinus Torvalds if (timer) { 11151da177e4SLinus Torvalds if (req->rq_ntrans == 1) 1116ff839970SChuck Lever rpc_update_rtt(rtt, timer, m); 11171570c1e4SChuck Lever rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 11181da177e4SLinus Torvalds } 11191da177e4SLinus Torvalds } 1120ecd465eeSChuck Lever EXPORT_SYMBOL_GPL(xprt_update_rtt); 11211da177e4SLinus Torvalds 11221570c1e4SChuck Lever /** 11231570c1e4SChuck Lever * xprt_complete_rqst - called when reply processing is complete 11241570c1e4SChuck Lever * @task: RPC request that recently completed 11251570c1e4SChuck Lever * @copied: actual number of bytes received from the transport 11261570c1e4SChuck Lever * 112775c84151STrond Myklebust * Caller holds xprt->queue_lock. 11281570c1e4SChuck Lever */ 11291570c1e4SChuck Lever void xprt_complete_rqst(struct rpc_task *task, int copied) 11301570c1e4SChuck Lever { 11311570c1e4SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 1132fda13939STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 11331da177e4SLinus Torvalds 11343705ad64SJeff Layton trace_xprt_complete_rqst(xprt, req->rq_xid, copied); 11351da177e4SLinus Torvalds 1136fda13939STrond Myklebust xprt->stat.recvs++; 1137ef759a2eSChuck Lever 11381e799b67STrond Myklebust req->rq_private_buf.len = copied; 1139dd2b63d0SRicardo Labiaga /* Ensure all writes are done before we update */ 1140dd2b63d0SRicardo Labiaga /* req->rq_reply_bytes_recvd */ 114143ac3f29STrond Myklebust smp_wmb(); 1142dd2b63d0SRicardo Labiaga req->rq_reply_bytes_recvd = copied; 1143edc81dcdSTrond Myklebust xprt_request_dequeue_receive_locked(task); 1144fda13939STrond Myklebust rpc_wake_up_queued_task(&xprt->pending, task); 11451da177e4SLinus Torvalds } 114612444809S\"Talpey, Thomas\ EXPORT_SYMBOL_GPL(xprt_complete_rqst); 11471da177e4SLinus Torvalds 114846c0ee8bSChuck Lever static void xprt_timer(struct rpc_task *task) 11491da177e4SLinus Torvalds { 11501da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 11511da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 11521da177e4SLinus Torvalds 11535d00837bSTrond Myklebust if (task->tk_status != -ETIMEDOUT) 11545d00837bSTrond Myklebust return; 115546c0ee8bSChuck Lever 115682476d9fSChuck Lever trace_xprt_timer(xprt, req->rq_xid, task->tk_status); 1157dd2b63d0SRicardo Labiaga if (!req->rq_reply_bytes_recvd) { 115846c0ee8bSChuck Lever if (xprt->ops->timer) 11596a24dfb6STrond Myklebust xprt->ops->timer(xprt, task); 11605d00837bSTrond Myklebust } else 11615d00837bSTrond Myklebust task->tk_status = 0; 11621da177e4SLinus Torvalds } 11631da177e4SLinus Torvalds 11649903cd1cSChuck Lever /** 11658ba6a92dSTrond Myklebust * xprt_wait_for_reply_request_def - wait for reply 11668ba6a92dSTrond Myklebust * @task: pointer to rpc_task 11678ba6a92dSTrond Myklebust * 11688ba6a92dSTrond Myklebust * Set a request's retransmit timeout based on the transport's 11698ba6a92dSTrond Myklebust * default timeout parameters. Used by transports that don't adjust 11708ba6a92dSTrond Myklebust * the retransmit timeout based on round-trip time estimation, 11718ba6a92dSTrond Myklebust * and put the task to sleep on the pending queue. 11728ba6a92dSTrond Myklebust */ 11738ba6a92dSTrond Myklebust void xprt_wait_for_reply_request_def(struct rpc_task *task) 11748ba6a92dSTrond Myklebust { 11758ba6a92dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 11768ba6a92dSTrond Myklebust 11776b2e6856STrond Myklebust rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 11789e910bffSTrond Myklebust xprt_request_timeout(req)); 11798ba6a92dSTrond Myklebust } 11808ba6a92dSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def); 11818ba6a92dSTrond Myklebust 11828ba6a92dSTrond Myklebust /** 11838ba6a92dSTrond Myklebust * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator 11848ba6a92dSTrond Myklebust * @task: pointer to rpc_task 11858ba6a92dSTrond Myklebust * 11868ba6a92dSTrond Myklebust * Set a request's retransmit timeout using the RTT estimator, 11878ba6a92dSTrond Myklebust * and put the task to sleep on the pending queue. 11888ba6a92dSTrond Myklebust */ 11898ba6a92dSTrond Myklebust void xprt_wait_for_reply_request_rtt(struct rpc_task *task) 11908ba6a92dSTrond Myklebust { 11918ba6a92dSTrond Myklebust int timer = task->tk_msg.rpc_proc->p_timer; 11928ba6a92dSTrond Myklebust struct rpc_clnt *clnt = task->tk_client; 11938ba6a92dSTrond Myklebust struct rpc_rtt *rtt = clnt->cl_rtt; 11948ba6a92dSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 11958ba6a92dSTrond Myklebust unsigned long max_timeout = clnt->cl_timeout->to_maxval; 11966b2e6856STrond Myklebust unsigned long timeout; 11978ba6a92dSTrond Myklebust 11986b2e6856STrond Myklebust timeout = rpc_calc_rto(rtt, timer); 11996b2e6856STrond Myklebust timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 12006b2e6856STrond Myklebust if (timeout > max_timeout || timeout == 0) 12016b2e6856STrond Myklebust timeout = max_timeout; 12026b2e6856STrond Myklebust rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 12036b2e6856STrond Myklebust jiffies + timeout); 12048ba6a92dSTrond Myklebust } 12058ba6a92dSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt); 12068ba6a92dSTrond Myklebust 12078ba6a92dSTrond Myklebust /** 12087f3a1d1eSTrond Myklebust * xprt_request_wait_receive - wait for the reply to an RPC request 12097f3a1d1eSTrond Myklebust * @task: RPC task about to send a request 12107f3a1d1eSTrond Myklebust * 12117f3a1d1eSTrond Myklebust */ 12127f3a1d1eSTrond Myklebust void xprt_request_wait_receive(struct rpc_task *task) 12137f3a1d1eSTrond Myklebust { 12147f3a1d1eSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 12157f3a1d1eSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 12167f3a1d1eSTrond Myklebust 12177f3a1d1eSTrond Myklebust if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 12187f3a1d1eSTrond Myklebust return; 12197f3a1d1eSTrond Myklebust /* 12207f3a1d1eSTrond Myklebust * Sleep on the pending queue if we're expecting a reply. 12217f3a1d1eSTrond Myklebust * The spinlock ensures atomicity between the test of 12227f3a1d1eSTrond Myklebust * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). 12237f3a1d1eSTrond Myklebust */ 12247f3a1d1eSTrond Myklebust spin_lock(&xprt->queue_lock); 12257f3a1d1eSTrond Myklebust if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { 12268ba6a92dSTrond Myklebust xprt->ops->wait_for_reply_request(task); 12277f3a1d1eSTrond Myklebust /* 12287f3a1d1eSTrond Myklebust * Send an extra queue wakeup call if the 12297f3a1d1eSTrond Myklebust * connection was dropped in case the call to 12307f3a1d1eSTrond Myklebust * rpc_sleep_on() raced. 12317f3a1d1eSTrond Myklebust */ 12327f3a1d1eSTrond Myklebust if (xprt_request_retransmit_after_disconnect(task)) 12337f3a1d1eSTrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->pending, 12347f3a1d1eSTrond Myklebust task, -ENOTCONN); 12357f3a1d1eSTrond Myklebust } 12367f3a1d1eSTrond Myklebust spin_unlock(&xprt->queue_lock); 12377f3a1d1eSTrond Myklebust } 12387f3a1d1eSTrond Myklebust 1239944b0429STrond Myklebust static bool 1240944b0429STrond Myklebust xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) 1241944b0429STrond Myklebust { 1242762e4e67STrond Myklebust return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1243944b0429STrond Myklebust } 1244944b0429STrond Myklebust 1245944b0429STrond Myklebust /** 1246944b0429STrond Myklebust * xprt_request_enqueue_transmit - queue a task for transmission 1247944b0429STrond Myklebust * @task: pointer to rpc_task 1248944b0429STrond Myklebust * 1249944b0429STrond Myklebust * Add a task to the transmission queue. 1250944b0429STrond Myklebust */ 1251944b0429STrond Myklebust void 1252944b0429STrond Myklebust xprt_request_enqueue_transmit(struct rpc_task *task) 1253944b0429STrond Myklebust { 1254918f3c1fSTrond Myklebust struct rpc_rqst *pos, *req = task->tk_rqstp; 1255944b0429STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1256944b0429STrond Myklebust 1257944b0429STrond Myklebust if (xprt_request_need_enqueue_transmit(task, req)) { 1258e66721f0STrond Myklebust req->rq_bytes_sent = 0; 1259944b0429STrond Myklebust spin_lock(&xprt->queue_lock); 126075891f50STrond Myklebust /* 126175891f50STrond Myklebust * Requests that carry congestion control credits are added 126275891f50STrond Myklebust * to the head of the list to avoid starvation issues. 126375891f50STrond Myklebust */ 126475891f50STrond Myklebust if (req->rq_cong) { 126575891f50STrond Myklebust xprt_clear_congestion_window_wait(xprt); 126675891f50STrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 126775891f50STrond Myklebust if (pos->rq_cong) 126875891f50STrond Myklebust continue; 126975891f50STrond Myklebust /* Note: req is added _before_ pos */ 127075891f50STrond Myklebust list_add_tail(&req->rq_xmit, &pos->rq_xmit); 127175891f50STrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 12720c77668dSChuck Lever trace_xprt_enq_xmit(task, 1); 127375891f50STrond Myklebust goto out; 127475891f50STrond Myklebust } 127586aeee0eSTrond Myklebust } else if (RPC_IS_SWAPPER(task)) { 127686aeee0eSTrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 127786aeee0eSTrond Myklebust if (pos->rq_cong || pos->rq_bytes_sent) 127886aeee0eSTrond Myklebust continue; 127986aeee0eSTrond Myklebust if (RPC_IS_SWAPPER(pos->rq_task)) 128086aeee0eSTrond Myklebust continue; 128186aeee0eSTrond Myklebust /* Note: req is added _before_ pos */ 128286aeee0eSTrond Myklebust list_add_tail(&req->rq_xmit, &pos->rq_xmit); 128386aeee0eSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 12840c77668dSChuck Lever trace_xprt_enq_xmit(task, 2); 128586aeee0eSTrond Myklebust goto out; 128686aeee0eSTrond Myklebust } 1287deaa5c96SChuck Lever } else if (!req->rq_seqno) { 1288918f3c1fSTrond Myklebust list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1289918f3c1fSTrond Myklebust if (pos->rq_task->tk_owner != task->tk_owner) 1290918f3c1fSTrond Myklebust continue; 1291918f3c1fSTrond Myklebust list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); 1292918f3c1fSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit); 12930c77668dSChuck Lever trace_xprt_enq_xmit(task, 3); 1294918f3c1fSTrond Myklebust goto out; 1295918f3c1fSTrond Myklebust } 129675891f50STrond Myklebust } 1297944b0429STrond Myklebust list_add_tail(&req->rq_xmit, &xprt->xmit_queue); 1298918f3c1fSTrond Myklebust INIT_LIST_HEAD(&req->rq_xmit2); 12990c77668dSChuck Lever trace_xprt_enq_xmit(task, 4); 1300918f3c1fSTrond Myklebust out: 1301944b0429STrond Myklebust set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1302944b0429STrond Myklebust spin_unlock(&xprt->queue_lock); 1303944b0429STrond Myklebust } 1304944b0429STrond Myklebust } 1305944b0429STrond Myklebust 1306944b0429STrond Myklebust /** 1307944b0429STrond Myklebust * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue 1308944b0429STrond Myklebust * @task: pointer to rpc_task 1309944b0429STrond Myklebust * 1310944b0429STrond Myklebust * Remove a task from the transmission queue 1311944b0429STrond Myklebust * Caller must hold xprt->queue_lock 1312944b0429STrond Myklebust */ 1313944b0429STrond Myklebust static void 1314944b0429STrond Myklebust xprt_request_dequeue_transmit_locked(struct rpc_task *task) 1315944b0429STrond Myklebust { 1316918f3c1fSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1317918f3c1fSTrond Myklebust 1318918f3c1fSTrond Myklebust if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1319918f3c1fSTrond Myklebust return; 1320918f3c1fSTrond Myklebust if (!list_empty(&req->rq_xmit)) { 1321918f3c1fSTrond Myklebust list_del(&req->rq_xmit); 1322918f3c1fSTrond Myklebust if (!list_empty(&req->rq_xmit2)) { 1323918f3c1fSTrond Myklebust struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, 1324918f3c1fSTrond Myklebust struct rpc_rqst, rq_xmit2); 1325918f3c1fSTrond Myklebust list_del(&req->rq_xmit2); 1326918f3c1fSTrond Myklebust list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue); 1327918f3c1fSTrond Myklebust } 1328918f3c1fSTrond Myklebust } else 1329918f3c1fSTrond Myklebust list_del(&req->rq_xmit2); 1330944b0429STrond Myklebust } 1331944b0429STrond Myklebust 1332944b0429STrond Myklebust /** 1333944b0429STrond Myklebust * xprt_request_dequeue_transmit - remove a task from the transmission queue 1334944b0429STrond Myklebust * @task: pointer to rpc_task 1335944b0429STrond Myklebust * 1336944b0429STrond Myklebust * Remove a task from the transmission queue 1337944b0429STrond Myklebust */ 1338944b0429STrond Myklebust static void 1339944b0429STrond Myklebust xprt_request_dequeue_transmit(struct rpc_task *task) 1340944b0429STrond Myklebust { 1341944b0429STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1342944b0429STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1343944b0429STrond Myklebust 1344944b0429STrond Myklebust spin_lock(&xprt->queue_lock); 1345944b0429STrond Myklebust xprt_request_dequeue_transmit_locked(task); 1346944b0429STrond Myklebust spin_unlock(&xprt->queue_lock); 1347944b0429STrond Myklebust } 1348944b0429STrond Myklebust 13497f3a1d1eSTrond Myklebust /** 1350cc204d01STrond Myklebust * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue 1351cc204d01STrond Myklebust * @task: pointer to rpc_task 1352cc204d01STrond Myklebust * 1353cc204d01STrond Myklebust * Remove a task from the transmit and receive queues, and ensure that 1354cc204d01STrond Myklebust * it is not pinned by the receive work item. 1355cc204d01STrond Myklebust */ 1356cc204d01STrond Myklebust void 1357cc204d01STrond Myklebust xprt_request_dequeue_xprt(struct rpc_task *task) 1358cc204d01STrond Myklebust { 1359cc204d01STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 1360cc204d01STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 1361cc204d01STrond Myklebust 1362cc204d01STrond Myklebust if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || 1363cc204d01STrond Myklebust test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || 1364cc204d01STrond Myklebust xprt_is_pinned_rqst(req)) { 1365cc204d01STrond Myklebust spin_lock(&xprt->queue_lock); 1366cc204d01STrond Myklebust xprt_request_dequeue_transmit_locked(task); 1367cc204d01STrond Myklebust xprt_request_dequeue_receive_locked(task); 1368cc204d01STrond Myklebust while (xprt_is_pinned_rqst(req)) { 1369cc204d01STrond Myklebust set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1370cc204d01STrond Myklebust spin_unlock(&xprt->queue_lock); 1371cc204d01STrond Myklebust xprt_wait_on_pinned_rqst(req); 1372cc204d01STrond Myklebust spin_lock(&xprt->queue_lock); 1373cc204d01STrond Myklebust clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1374cc204d01STrond Myklebust } 1375cc204d01STrond Myklebust spin_unlock(&xprt->queue_lock); 1376cc204d01STrond Myklebust } 1377cc204d01STrond Myklebust } 1378cc204d01STrond Myklebust 1379cc204d01STrond Myklebust /** 13809d96acbcSTrond Myklebust * xprt_request_prepare - prepare an encoded request for transport 13819d96acbcSTrond Myklebust * @req: pointer to rpc_rqst 13829d96acbcSTrond Myklebust * 13839d96acbcSTrond Myklebust * Calls into the transport layer to do whatever is needed to prepare 13849d96acbcSTrond Myklebust * the request for transmission or receive. 13859d96acbcSTrond Myklebust */ 13869d96acbcSTrond Myklebust void 13879d96acbcSTrond Myklebust xprt_request_prepare(struct rpc_rqst *req) 13889d96acbcSTrond Myklebust { 13899d96acbcSTrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 13909d96acbcSTrond Myklebust 13919d96acbcSTrond Myklebust if (xprt->ops->prepare_request) 13929d96acbcSTrond Myklebust xprt->ops->prepare_request(req); 13939d96acbcSTrond Myklebust } 13949d96acbcSTrond Myklebust 13959d96acbcSTrond Myklebust /** 1396762e4e67STrond Myklebust * xprt_request_need_retransmit - Test if a task needs retransmission 1397762e4e67STrond Myklebust * @task: pointer to rpc_task 1398762e4e67STrond Myklebust * 1399762e4e67STrond Myklebust * Test for whether a connection breakage requires the task to retransmit 1400762e4e67STrond Myklebust */ 1401762e4e67STrond Myklebust bool 1402762e4e67STrond Myklebust xprt_request_need_retransmit(struct rpc_task *task) 1403762e4e67STrond Myklebust { 1404762e4e67STrond Myklebust return xprt_request_retransmit_after_disconnect(task); 1405762e4e67STrond Myklebust } 1406762e4e67STrond Myklebust 1407762e4e67STrond Myklebust /** 14089903cd1cSChuck Lever * xprt_prepare_transmit - reserve the transport before sending a request 14099903cd1cSChuck Lever * @task: RPC task about to send a request 14109903cd1cSChuck Lever * 14111da177e4SLinus Torvalds */ 141290051ea7STrond Myklebust bool xprt_prepare_transmit(struct rpc_task *task) 14131da177e4SLinus Torvalds { 14141da177e4SLinus Torvalds struct rpc_rqst *req = task->tk_rqstp; 14151da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 14161da177e4SLinus Torvalds 141746121cf7SChuck Lever dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 14181da177e4SLinus Torvalds 14195f2f6bd9STrond Myklebust if (!xprt_lock_write(xprt, task)) { 14205f2f6bd9STrond Myklebust /* Race breaker: someone may have transmitted us */ 1421944b0429STrond Myklebust if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 14225f2f6bd9STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->sending, 14235f2f6bd9STrond Myklebust task, 0); 14245f2f6bd9STrond Myklebust return false; 14255f2f6bd9STrond Myklebust 14268a19a0b6STrond Myklebust } 14275f2f6bd9STrond Myklebust return true; 14281da177e4SLinus Torvalds } 14291da177e4SLinus Torvalds 1430e0ab53deSTrond Myklebust void xprt_end_transmit(struct rpc_task *task) 14315e5ce5beSTrond Myklebust { 1432343952faSRahul Iyer xprt_release_write(task->tk_rqstp->rq_xprt, task); 14335e5ce5beSTrond Myklebust } 14345e5ce5beSTrond Myklebust 14359903cd1cSChuck Lever /** 143689f90fe1STrond Myklebust * xprt_request_transmit - send an RPC request on a transport 143789f90fe1STrond Myklebust * @req: pointer to request to transmit 143889f90fe1STrond Myklebust * @snd_task: RPC task that owns the transport lock 14399903cd1cSChuck Lever * 144089f90fe1STrond Myklebust * This performs the transmission of a single request. 144189f90fe1STrond Myklebust * Note that if the request is not the same as snd_task, then it 144289f90fe1STrond Myklebust * does need to be pinned. 144389f90fe1STrond Myklebust * Returns '0' on success. 14449903cd1cSChuck Lever */ 144589f90fe1STrond Myklebust static int 144689f90fe1STrond Myklebust xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) 14471da177e4SLinus Torvalds { 14481da177e4SLinus Torvalds struct rpc_xprt *xprt = req->rq_xprt; 144989f90fe1STrond Myklebust struct rpc_task *task = req->rq_task; 145090d91b0cSTrond Myklebust unsigned int connect_cookie; 1451dcbbeda8STrond Myklebust int is_retrans = RPC_WAS_SENT(task); 1452ff699ea8SChuck Lever int status; 14531da177e4SLinus Torvalds 1454edc81dcdSTrond Myklebust if (!req->rq_bytes_sent) { 145589f90fe1STrond Myklebust if (xprt_request_data_received(task)) { 145689f90fe1STrond Myklebust status = 0; 1457944b0429STrond Myklebust goto out_dequeue; 145889f90fe1STrond Myklebust } 14593021a5bbSTrond Myklebust /* Verify that our message lies in the RPCSEC_GSS window */ 1460edc81dcdSTrond Myklebust if (rpcauth_xmit_need_reencode(task)) { 146189f90fe1STrond Myklebust status = -EBADMSG; 1462944b0429STrond Myklebust goto out_dequeue; 14633021a5bbSTrond Myklebust } 1464ae67bd38STrond Myklebust if (RPC_SIGNALLED(task)) { 1465ae67bd38STrond Myklebust status = -ERESTARTSYS; 1466ae67bd38STrond Myklebust goto out_dequeue; 1467ae67bd38STrond Myklebust } 14681da177e4SLinus Torvalds } 14691da177e4SLinus Torvalds 1470dcbbeda8STrond Myklebust /* 1471dcbbeda8STrond Myklebust * Update req->rq_ntrans before transmitting to avoid races with 1472dcbbeda8STrond Myklebust * xprt_update_rtt(), which needs to know that it is recording a 1473dcbbeda8STrond Myklebust * reply to the first transmission. 1474dcbbeda8STrond Myklebust */ 1475dcbbeda8STrond Myklebust req->rq_ntrans++; 1476dcbbeda8STrond Myklebust 1477c509f15aSChuck Lever trace_rpc_xdr_sendto(task, &req->rq_snd_buf); 147890d91b0cSTrond Myklebust connect_cookie = xprt->connect_cookie; 1479adfa7144STrond Myklebust status = xprt->ops->send_request(req); 1480c8485e4dSTrond Myklebust if (status != 0) { 1481dcbbeda8STrond Myklebust req->rq_ntrans--; 14820c77668dSChuck Lever trace_xprt_transmit(req, status); 148389f90fe1STrond Myklebust return status; 1484c8485e4dSTrond Myklebust } 14857ebbbc6eSTrond Myklebust 1486dcbbeda8STrond Myklebust if (is_retrans) 1487dcbbeda8STrond Myklebust task->tk_client->cl_stats->rpcretrans++; 1488dcbbeda8STrond Myklebust 14894a068258SChuck Lever xprt_inject_disconnect(xprt); 1490c8485e4dSTrond Myklebust 1491468f8613SBryan Schumaker task->tk_flags |= RPC_TASK_SENT; 1492b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 1493262ca07dSChuck Lever 1494262ca07dSChuck Lever xprt->stat.sends++; 1495262ca07dSChuck Lever xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 1496262ca07dSChuck Lever xprt->stat.bklog_u += xprt->backlog.qlen; 149715a45206SAndy Adamson xprt->stat.sending_u += xprt->sending.qlen; 149815a45206SAndy Adamson xprt->stat.pending_u += xprt->pending.qlen; 1499b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 150090d91b0cSTrond Myklebust 150190d91b0cSTrond Myklebust req->rq_connect_cookie = connect_cookie; 1502944b0429STrond Myklebust out_dequeue: 15030c77668dSChuck Lever trace_xprt_transmit(req, status); 1504944b0429STrond Myklebust xprt_request_dequeue_transmit(task); 150589f90fe1STrond Myklebust rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); 150689f90fe1STrond Myklebust return status; 150789f90fe1STrond Myklebust } 150889f90fe1STrond Myklebust 150989f90fe1STrond Myklebust /** 151089f90fe1STrond Myklebust * xprt_transmit - send an RPC request on a transport 151189f90fe1STrond Myklebust * @task: controlling RPC task 151289f90fe1STrond Myklebust * 151389f90fe1STrond Myklebust * Attempts to drain the transmit queue. On exit, either the transport 151489f90fe1STrond Myklebust * signalled an error that needs to be handled before transmission can 151589f90fe1STrond Myklebust * resume, or @task finished transmitting, and detected that it already 151689f90fe1STrond Myklebust * received a reply. 151789f90fe1STrond Myklebust */ 151889f90fe1STrond Myklebust void 151989f90fe1STrond Myklebust xprt_transmit(struct rpc_task *task) 152089f90fe1STrond Myklebust { 152189f90fe1STrond Myklebust struct rpc_rqst *next, *req = task->tk_rqstp; 152289f90fe1STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 152389f90fe1STrond Myklebust int status; 152489f90fe1STrond Myklebust 152589f90fe1STrond Myklebust spin_lock(&xprt->queue_lock); 152689f90fe1STrond Myklebust while (!list_empty(&xprt->xmit_queue)) { 152789f90fe1STrond Myklebust next = list_first_entry(&xprt->xmit_queue, 152889f90fe1STrond Myklebust struct rpc_rqst, rq_xmit); 152989f90fe1STrond Myklebust xprt_pin_rqst(next); 153089f90fe1STrond Myklebust spin_unlock(&xprt->queue_lock); 153189f90fe1STrond Myklebust status = xprt_request_transmit(next, task); 153289f90fe1STrond Myklebust if (status == -EBADMSG && next != req) 153389f90fe1STrond Myklebust status = 0; 153489f90fe1STrond Myklebust cond_resched(); 153589f90fe1STrond Myklebust spin_lock(&xprt->queue_lock); 153689f90fe1STrond Myklebust xprt_unpin_rqst(next); 153789f90fe1STrond Myklebust if (status == 0) { 153889f90fe1STrond Myklebust if (!xprt_request_data_received(task) || 153989f90fe1STrond Myklebust test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 154089f90fe1STrond Myklebust continue; 1541c544577dSTrond Myklebust } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 154289f90fe1STrond Myklebust task->tk_status = status; 154389f90fe1STrond Myklebust break; 154489f90fe1STrond Myklebust } 154589f90fe1STrond Myklebust spin_unlock(&xprt->queue_lock); 15461da177e4SLinus Torvalds } 15471da177e4SLinus Torvalds 1548ba60eb25STrond Myklebust static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1549ba60eb25STrond Myklebust { 1550ba60eb25STrond Myklebust set_bit(XPRT_CONGESTED, &xprt->state); 1551ba60eb25STrond Myklebust rpc_sleep_on(&xprt->backlog, task, NULL); 1552ba60eb25STrond Myklebust } 1553ba60eb25STrond Myklebust 1554ba60eb25STrond Myklebust static void xprt_wake_up_backlog(struct rpc_xprt *xprt) 1555ba60eb25STrond Myklebust { 1556ba60eb25STrond Myklebust if (rpc_wake_up_next(&xprt->backlog) == NULL) 1557ba60eb25STrond Myklebust clear_bit(XPRT_CONGESTED, &xprt->state); 1558ba60eb25STrond Myklebust } 1559ba60eb25STrond Myklebust 1560ba60eb25STrond Myklebust static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1561ba60eb25STrond Myklebust { 1562ba60eb25STrond Myklebust bool ret = false; 1563ba60eb25STrond Myklebust 1564ba60eb25STrond Myklebust if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1565ba60eb25STrond Myklebust goto out; 1566ba60eb25STrond Myklebust spin_lock(&xprt->reserve_lock); 1567ba60eb25STrond Myklebust if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1568ba60eb25STrond Myklebust rpc_sleep_on(&xprt->backlog, task, NULL); 1569ba60eb25STrond Myklebust ret = true; 1570ba60eb25STrond Myklebust } 1571ba60eb25STrond Myklebust spin_unlock(&xprt->reserve_lock); 1572ba60eb25STrond Myklebust out: 1573ba60eb25STrond Myklebust return ret; 1574ba60eb25STrond Myklebust } 1575ba60eb25STrond Myklebust 157692ea011fSTrond Myklebust static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) 1577d9ba131dSTrond Myklebust { 1578d9ba131dSTrond Myklebust struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1579d9ba131dSTrond Myklebust 1580ff699ea8SChuck Lever if (xprt->num_reqs >= xprt->max_reqs) 1581d9ba131dSTrond Myklebust goto out; 1582ff699ea8SChuck Lever ++xprt->num_reqs; 158392ea011fSTrond Myklebust spin_unlock(&xprt->reserve_lock); 158492ea011fSTrond Myklebust req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); 158592ea011fSTrond Myklebust spin_lock(&xprt->reserve_lock); 1586d9ba131dSTrond Myklebust if (req != NULL) 1587d9ba131dSTrond Myklebust goto out; 1588ff699ea8SChuck Lever --xprt->num_reqs; 1589d9ba131dSTrond Myklebust req = ERR_PTR(-ENOMEM); 1590d9ba131dSTrond Myklebust out: 1591d9ba131dSTrond Myklebust return req; 1592d9ba131dSTrond Myklebust } 1593d9ba131dSTrond Myklebust 1594d9ba131dSTrond Myklebust static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1595d9ba131dSTrond Myklebust { 1596ff699ea8SChuck Lever if (xprt->num_reqs > xprt->min_reqs) { 1597ff699ea8SChuck Lever --xprt->num_reqs; 1598d9ba131dSTrond Myklebust kfree(req); 1599d9ba131dSTrond Myklebust return true; 1600d9ba131dSTrond Myklebust } 1601d9ba131dSTrond Myklebust return false; 1602d9ba131dSTrond Myklebust } 1603d9ba131dSTrond Myklebust 1604f39c1bfbSTrond Myklebust void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 16051da177e4SLinus Torvalds { 1606d9ba131dSTrond Myklebust struct rpc_rqst *req; 16071da177e4SLinus Torvalds 1608f39c1bfbSTrond Myklebust spin_lock(&xprt->reserve_lock); 16091da177e4SLinus Torvalds if (!list_empty(&xprt->free)) { 1610d9ba131dSTrond Myklebust req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1611d9ba131dSTrond Myklebust list_del(&req->rq_list); 1612d9ba131dSTrond Myklebust goto out_init_req; 1613d9ba131dSTrond Myklebust } 161492ea011fSTrond Myklebust req = xprt_dynamic_alloc_slot(xprt); 1615d9ba131dSTrond Myklebust if (!IS_ERR(req)) 1616d9ba131dSTrond Myklebust goto out_init_req; 1617d9ba131dSTrond Myklebust switch (PTR_ERR(req)) { 1618d9ba131dSTrond Myklebust case -ENOMEM: 1619d9ba131dSTrond Myklebust dprintk("RPC: dynamic allocation of request slot " 1620d9ba131dSTrond Myklebust "failed! Retrying\n"); 16211afeaf5cSTrond Myklebust task->tk_status = -ENOMEM; 1622d9ba131dSTrond Myklebust break; 1623d9ba131dSTrond Myklebust case -EAGAIN: 1624ba60eb25STrond Myklebust xprt_add_backlog(xprt, task); 1625d9ba131dSTrond Myklebust dprintk("RPC: waiting for request slot\n"); 1626e9d47639SGustavo A. R. Silva /* fall through */ 16271afeaf5cSTrond Myklebust default: 1628d9ba131dSTrond Myklebust task->tk_status = -EAGAIN; 16291afeaf5cSTrond Myklebust } 1630f39c1bfbSTrond Myklebust spin_unlock(&xprt->reserve_lock); 1631d9ba131dSTrond Myklebust return; 1632d9ba131dSTrond Myklebust out_init_req: 1633ff699ea8SChuck Lever xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, 1634ff699ea8SChuck Lever xprt->num_reqs); 163537ac86c3SChuck Lever spin_unlock(&xprt->reserve_lock); 163637ac86c3SChuck Lever 1637d9ba131dSTrond Myklebust task->tk_status = 0; 16381da177e4SLinus Torvalds task->tk_rqstp = req; 16391da177e4SLinus Torvalds } 1640f39c1bfbSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1641f39c1bfbSTrond Myklebust 1642a9cde23aSChuck Lever void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1643ee5ebe85STrond Myklebust { 1644ee5ebe85STrond Myklebust spin_lock(&xprt->reserve_lock); 1645c25573b5STrond Myklebust if (!xprt_dynamic_free_slot(xprt, req)) { 1646c25573b5STrond Myklebust memset(req, 0, sizeof(*req)); /* mark unused */ 1647ee5ebe85STrond Myklebust list_add(&req->rq_list, &xprt->free); 1648c25573b5STrond Myklebust } 1649ba60eb25STrond Myklebust xprt_wake_up_backlog(xprt); 1650ee5ebe85STrond Myklebust spin_unlock(&xprt->reserve_lock); 1651ee5ebe85STrond Myklebust } 1652a9cde23aSChuck Lever EXPORT_SYMBOL_GPL(xprt_free_slot); 1653ee5ebe85STrond Myklebust 165421de0a95STrond Myklebust static void xprt_free_all_slots(struct rpc_xprt *xprt) 165521de0a95STrond Myklebust { 165621de0a95STrond Myklebust struct rpc_rqst *req; 165721de0a95STrond Myklebust while (!list_empty(&xprt->free)) { 165821de0a95STrond Myklebust req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 165921de0a95STrond Myklebust list_del(&req->rq_list); 166021de0a95STrond Myklebust kfree(req); 166121de0a95STrond Myklebust } 166221de0a95STrond Myklebust } 166321de0a95STrond Myklebust 1664d9ba131dSTrond Myklebust struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1665d9ba131dSTrond Myklebust unsigned int num_prealloc, 1666d9ba131dSTrond Myklebust unsigned int max_alloc) 1667bd1722d4SPavel Emelyanov { 1668bd1722d4SPavel Emelyanov struct rpc_xprt *xprt; 166921de0a95STrond Myklebust struct rpc_rqst *req; 167021de0a95STrond Myklebust int i; 1671bd1722d4SPavel Emelyanov 1672bd1722d4SPavel Emelyanov xprt = kzalloc(size, GFP_KERNEL); 1673bd1722d4SPavel Emelyanov if (xprt == NULL) 1674bd1722d4SPavel Emelyanov goto out; 1675bd1722d4SPavel Emelyanov 167621de0a95STrond Myklebust xprt_init(xprt, net); 167721de0a95STrond Myklebust 167821de0a95STrond Myklebust for (i = 0; i < num_prealloc; i++) { 167921de0a95STrond Myklebust req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 168021de0a95STrond Myklebust if (!req) 16818313164cSwangweidong goto out_free; 168221de0a95STrond Myklebust list_add(&req->rq_list, &xprt->free); 168321de0a95STrond Myklebust } 1684d9ba131dSTrond Myklebust if (max_alloc > num_prealloc) 1685d9ba131dSTrond Myklebust xprt->max_reqs = max_alloc; 1686d9ba131dSTrond Myklebust else 168721de0a95STrond Myklebust xprt->max_reqs = num_prealloc; 1688d9ba131dSTrond Myklebust xprt->min_reqs = num_prealloc; 1689ff699ea8SChuck Lever xprt->num_reqs = num_prealloc; 1690bd1722d4SPavel Emelyanov 1691bd1722d4SPavel Emelyanov return xprt; 1692bd1722d4SPavel Emelyanov 1693bd1722d4SPavel Emelyanov out_free: 169421de0a95STrond Myklebust xprt_free(xprt); 1695bd1722d4SPavel Emelyanov out: 1696bd1722d4SPavel Emelyanov return NULL; 1697bd1722d4SPavel Emelyanov } 1698bd1722d4SPavel Emelyanov EXPORT_SYMBOL_GPL(xprt_alloc); 1699bd1722d4SPavel Emelyanov 1700e204e621SPavel Emelyanov void xprt_free(struct rpc_xprt *xprt) 1701e204e621SPavel Emelyanov { 170237aa2133SPavel Emelyanov put_net(xprt->xprt_net); 170321de0a95STrond Myklebust xprt_free_all_slots(xprt); 1704fda1bfefSTrond Myklebust kfree_rcu(xprt, rcu); 1705e204e621SPavel Emelyanov } 1706e204e621SPavel Emelyanov EXPORT_SYMBOL_GPL(xprt_free); 1707e204e621SPavel Emelyanov 1708902c5887STrond Myklebust static void 1709902c5887STrond Myklebust xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) 1710902c5887STrond Myklebust { 1711902c5887STrond Myklebust req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; 1712902c5887STrond Myklebust } 1713902c5887STrond Myklebust 17149dc6edcfSTrond Myklebust static __be32 17159dc6edcfSTrond Myklebust xprt_alloc_xid(struct rpc_xprt *xprt) 17169dc6edcfSTrond Myklebust { 17179dc6edcfSTrond Myklebust __be32 xid; 17189dc6edcfSTrond Myklebust 17199dc6edcfSTrond Myklebust spin_lock(&xprt->reserve_lock); 17209dc6edcfSTrond Myklebust xid = (__force __be32)xprt->xid++; 17219dc6edcfSTrond Myklebust spin_unlock(&xprt->reserve_lock); 17229dc6edcfSTrond Myklebust return xid; 17239dc6edcfSTrond Myklebust } 17249dc6edcfSTrond Myklebust 17259dc6edcfSTrond Myklebust static void 17269dc6edcfSTrond Myklebust xprt_init_xid(struct rpc_xprt *xprt) 17279dc6edcfSTrond Myklebust { 17289dc6edcfSTrond Myklebust xprt->xid = prandom_u32(); 17299dc6edcfSTrond Myklebust } 17309dc6edcfSTrond Myklebust 17319dc6edcfSTrond Myklebust static void 17329dc6edcfSTrond Myklebust xprt_request_init(struct rpc_task *task) 17339dc6edcfSTrond Myklebust { 17349dc6edcfSTrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 17359dc6edcfSTrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 17369dc6edcfSTrond Myklebust 17379dc6edcfSTrond Myklebust req->rq_task = task; 17389dc6edcfSTrond Myklebust req->rq_xprt = xprt; 17399dc6edcfSTrond Myklebust req->rq_buffer = NULL; 17409dc6edcfSTrond Myklebust req->rq_xid = xprt_alloc_xid(xprt); 1741902c5887STrond Myklebust xprt_init_connect_cookie(req, xprt); 17429dc6edcfSTrond Myklebust req->rq_snd_buf.len = 0; 17439dc6edcfSTrond Myklebust req->rq_snd_buf.buflen = 0; 17449dc6edcfSTrond Myklebust req->rq_rcv_buf.len = 0; 17459dc6edcfSTrond Myklebust req->rq_rcv_buf.buflen = 0; 174671700bb9STrond Myklebust req->rq_snd_buf.bvec = NULL; 174771700bb9STrond Myklebust req->rq_rcv_buf.bvec = NULL; 17489dc6edcfSTrond Myklebust req->rq_release_snd_buf = NULL; 1749da953063STrond Myklebust xprt_init_majortimeo(task, req); 17509dc6edcfSTrond Myklebust dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, 17519dc6edcfSTrond Myklebust req, ntohl(req->rq_xid)); 17529dc6edcfSTrond Myklebust } 17539dc6edcfSTrond Myklebust 17549dc6edcfSTrond Myklebust static void 17559dc6edcfSTrond Myklebust xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) 17569dc6edcfSTrond Myklebust { 17579dc6edcfSTrond Myklebust xprt->ops->alloc_slot(xprt, task); 17589dc6edcfSTrond Myklebust if (task->tk_rqstp != NULL) 17599dc6edcfSTrond Myklebust xprt_request_init(task); 17609dc6edcfSTrond Myklebust } 17619dc6edcfSTrond Myklebust 17629903cd1cSChuck Lever /** 17639903cd1cSChuck Lever * xprt_reserve - allocate an RPC request slot 17649903cd1cSChuck Lever * @task: RPC task requesting a slot allocation 17659903cd1cSChuck Lever * 1766ba60eb25STrond Myklebust * If the transport is marked as being congested, or if no more 1767ba60eb25STrond Myklebust * slots are available, place the task on the transport's 17689903cd1cSChuck Lever * backlog queue. 17699903cd1cSChuck Lever */ 17709903cd1cSChuck Lever void xprt_reserve(struct rpc_task *task) 17711da177e4SLinus Torvalds { 1772fb43d172STrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 17731da177e4SLinus Torvalds 177443cedbf0STrond Myklebust task->tk_status = 0; 177543cedbf0STrond Myklebust if (task->tk_rqstp != NULL) 177643cedbf0STrond Myklebust return; 177743cedbf0STrond Myklebust 177843cedbf0STrond Myklebust task->tk_status = -EAGAIN; 1779ba60eb25STrond Myklebust if (!xprt_throttle_congested(xprt, task)) 17809dc6edcfSTrond Myklebust xprt_do_reserve(xprt, task); 1781ba60eb25STrond Myklebust } 1782ba60eb25STrond Myklebust 1783ba60eb25STrond Myklebust /** 1784ba60eb25STrond Myklebust * xprt_retry_reserve - allocate an RPC request slot 1785ba60eb25STrond Myklebust * @task: RPC task requesting a slot allocation 1786ba60eb25STrond Myklebust * 1787ba60eb25STrond Myklebust * If no more slots are available, place the task on the transport's 1788ba60eb25STrond Myklebust * backlog queue. 1789ba60eb25STrond Myklebust * Note that the only difference with xprt_reserve is that we now 1790ba60eb25STrond Myklebust * ignore the value of the XPRT_CONGESTED flag. 1791ba60eb25STrond Myklebust */ 1792ba60eb25STrond Myklebust void xprt_retry_reserve(struct rpc_task *task) 1793ba60eb25STrond Myklebust { 1794fb43d172STrond Myklebust struct rpc_xprt *xprt = task->tk_xprt; 1795ba60eb25STrond Myklebust 1796ba60eb25STrond Myklebust task->tk_status = 0; 1797ba60eb25STrond Myklebust if (task->tk_rqstp != NULL) 1798ba60eb25STrond Myklebust return; 1799ba60eb25STrond Myklebust 1800ba60eb25STrond Myklebust task->tk_status = -EAGAIN; 18019dc6edcfSTrond Myklebust xprt_do_reserve(xprt, task); 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds 18049903cd1cSChuck Lever /** 18059903cd1cSChuck Lever * xprt_release - release an RPC request slot 18069903cd1cSChuck Lever * @task: task which is finished with the slot 18079903cd1cSChuck Lever * 18081da177e4SLinus Torvalds */ 18099903cd1cSChuck Lever void xprt_release(struct rpc_task *task) 18101da177e4SLinus Torvalds { 181155ae1aabSRicardo Labiaga struct rpc_xprt *xprt; 181287ed5003STrond Myklebust struct rpc_rqst *req = task->tk_rqstp; 18131da177e4SLinus Torvalds 181487ed5003STrond Myklebust if (req == NULL) { 181587ed5003STrond Myklebust if (task->tk_client) { 1816fb43d172STrond Myklebust xprt = task->tk_xprt; 181787ed5003STrond Myklebust xprt_release_write(xprt, task); 181887ed5003STrond Myklebust } 18191da177e4SLinus Torvalds return; 182087ed5003STrond Myklebust } 182155ae1aabSRicardo Labiaga 182255ae1aabSRicardo Labiaga xprt = req->rq_xprt; 1823cc204d01STrond Myklebust xprt_request_dequeue_xprt(task); 1824b5e92419STrond Myklebust spin_lock(&xprt->transport_lock); 182549e9a890SChuck Lever xprt->ops->release_xprt(xprt, task); 1826a58dd398SChuck Lever if (xprt->ops->release_request) 1827a58dd398SChuck Lever xprt->ops->release_request(task); 1828ad3331acSTrond Myklebust xprt_schedule_autodisconnect(xprt); 1829b5e92419STrond Myklebust spin_unlock(&xprt->transport_lock); 1830ee5ebe85STrond Myklebust if (req->rq_buffer) 18313435c74aSChuck Lever xprt->ops->buf_free(task); 18324a068258SChuck Lever xprt_inject_disconnect(xprt); 18339d96acbcSTrond Myklebust xdr_free_bvec(&req->rq_rcv_buf); 18340472e476STrond Myklebust xdr_free_bvec(&req->rq_snd_buf); 1835a17c2153STrond Myklebust if (req->rq_cred != NULL) 1836a17c2153STrond Myklebust put_rpccred(req->rq_cred); 18371da177e4SLinus Torvalds task->tk_rqstp = NULL; 1838ead5e1c2SJ. Bruce Fields if (req->rq_release_snd_buf) 1839ead5e1c2SJ. Bruce Fields req->rq_release_snd_buf(req); 184055ae1aabSRicardo Labiaga 184146121cf7SChuck Lever dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1842ee5ebe85STrond Myklebust if (likely(!bc_prealloc(req))) 1843a9cde23aSChuck Lever xprt->ops->free_slot(xprt, req); 1844ee5ebe85STrond Myklebust else 1845c9acb42eSTrond Myklebust xprt_free_bc_request(req); 18461da177e4SLinus Torvalds } 18471da177e4SLinus Torvalds 1848902c5887STrond Myklebust #ifdef CONFIG_SUNRPC_BACKCHANNEL 1849902c5887STrond Myklebust void 1850902c5887STrond Myklebust xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) 1851902c5887STrond Myklebust { 1852902c5887STrond Myklebust struct xdr_buf *xbufp = &req->rq_snd_buf; 1853902c5887STrond Myklebust 1854902c5887STrond Myklebust task->tk_rqstp = req; 1855902c5887STrond Myklebust req->rq_task = task; 1856902c5887STrond Myklebust xprt_init_connect_cookie(req, req->rq_xprt); 1857902c5887STrond Myklebust /* 1858902c5887STrond Myklebust * Set up the xdr_buf length. 1859902c5887STrond Myklebust * This also indicates that the buffer is XDR encoded already. 1860902c5887STrond Myklebust */ 1861902c5887STrond Myklebust xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 1862902c5887STrond Myklebust xbufp->tail[0].iov_len; 1863902c5887STrond Myklebust } 1864902c5887STrond Myklebust #endif 1865902c5887STrond Myklebust 186621de0a95STrond Myklebust static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1867c2866763SChuck Lever { 186830c5116bSTrond Myklebust kref_init(&xprt->kref); 1869c2866763SChuck Lever 1870c2866763SChuck Lever spin_lock_init(&xprt->transport_lock); 1871c2866763SChuck Lever spin_lock_init(&xprt->reserve_lock); 187275c84151STrond Myklebust spin_lock_init(&xprt->queue_lock); 1873c2866763SChuck Lever 1874c2866763SChuck Lever INIT_LIST_HEAD(&xprt->free); 187595f7691dSTrond Myklebust xprt->recv_queue = RB_ROOT; 1876944b0429STrond Myklebust INIT_LIST_HEAD(&xprt->xmit_queue); 18779e00abc3STrond Myklebust #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1878f9acac1aSRicardo Labiaga spin_lock_init(&xprt->bc_pa_lock); 1879f9acac1aSRicardo Labiaga INIT_LIST_HEAD(&xprt->bc_pa_list); 18809e00abc3STrond Myklebust #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 188180b14d5eSTrond Myklebust INIT_LIST_HEAD(&xprt->xprt_switch); 1882f9acac1aSRicardo Labiaga 1883c2866763SChuck Lever xprt->last_used = jiffies; 1884c2866763SChuck Lever xprt->cwnd = RPC_INITCWND; 1885a509050bSChuck Lever xprt->bind_index = 0; 1886c2866763SChuck Lever 1887c2866763SChuck Lever rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1888c2866763SChuck Lever rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 188979c99152STrond Myklebust rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 1890c2866763SChuck Lever rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1891c2866763SChuck Lever 1892c2866763SChuck Lever xprt_init_xid(xprt); 1893c2866763SChuck Lever 189421de0a95STrond Myklebust xprt->xprt_net = get_net(net); 18958d9266ffSTrond Myklebust } 18968d9266ffSTrond Myklebust 18978d9266ffSTrond Myklebust /** 18988d9266ffSTrond Myklebust * xprt_create_transport - create an RPC transport 18998d9266ffSTrond Myklebust * @args: rpc transport creation arguments 19008d9266ffSTrond Myklebust * 19018d9266ffSTrond Myklebust */ 19028d9266ffSTrond Myklebust struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 19038d9266ffSTrond Myklebust { 19048d9266ffSTrond Myklebust struct rpc_xprt *xprt; 19058d9266ffSTrond Myklebust struct xprt_class *t; 19068d9266ffSTrond Myklebust 19078d9266ffSTrond Myklebust spin_lock(&xprt_list_lock); 19088d9266ffSTrond Myklebust list_for_each_entry(t, &xprt_list, list) { 19098d9266ffSTrond Myklebust if (t->ident == args->ident) { 19108d9266ffSTrond Myklebust spin_unlock(&xprt_list_lock); 19118d9266ffSTrond Myklebust goto found; 19128d9266ffSTrond Myklebust } 19138d9266ffSTrond Myklebust } 19148d9266ffSTrond Myklebust spin_unlock(&xprt_list_lock); 19153c45ddf8SChuck Lever dprintk("RPC: transport (%d) not supported\n", args->ident); 19168d9266ffSTrond Myklebust return ERR_PTR(-EIO); 19178d9266ffSTrond Myklebust 19188d9266ffSTrond Myklebust found: 19198d9266ffSTrond Myklebust xprt = t->setup(args); 1920911813d7SChuck Lever if (IS_ERR(xprt)) 192121de0a95STrond Myklebust goto out; 192233d90ac0SJ. Bruce Fields if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 192333d90ac0SJ. Bruce Fields xprt->idle_timeout = 0; 192421de0a95STrond Myklebust INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 192521de0a95STrond Myklebust if (xprt_has_timer(xprt)) 1926502980e8SAnna Schumaker timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); 192721de0a95STrond Myklebust else 1928ff861c4dSKees Cook timer_setup(&xprt->timer, NULL, 0); 19294e0038b6STrond Myklebust 19304e0038b6STrond Myklebust if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 19314e0038b6STrond Myklebust xprt_destroy(xprt); 19324e0038b6STrond Myklebust return ERR_PTR(-EINVAL); 19334e0038b6STrond Myklebust } 19344e0038b6STrond Myklebust xprt->servername = kstrdup(args->servername, GFP_KERNEL); 19354e0038b6STrond Myklebust if (xprt->servername == NULL) { 19364e0038b6STrond Myklebust xprt_destroy(xprt); 19374e0038b6STrond Myklebust return ERR_PTR(-ENOMEM); 19384e0038b6STrond Myklebust } 19394e0038b6STrond Myklebust 19403f940098SJeff Layton rpc_xprt_debugfs_register(xprt); 1941388f0c77SJeff Layton 1942911813d7SChuck Lever trace_xprt_create(xprt); 194321de0a95STrond Myklebust out: 1944c2866763SChuck Lever return xprt; 1945c2866763SChuck Lever } 1946c2866763SChuck Lever 1947528fd354STrond Myklebust static void xprt_destroy_cb(struct work_struct *work) 1948528fd354STrond Myklebust { 1949528fd354STrond Myklebust struct rpc_xprt *xprt = 1950528fd354STrond Myklebust container_of(work, struct rpc_xprt, task_cleanup); 1951528fd354STrond Myklebust 1952911813d7SChuck Lever trace_xprt_destroy(xprt); 1953911813d7SChuck Lever 1954528fd354STrond Myklebust rpc_xprt_debugfs_unregister(xprt); 1955528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->binding); 1956528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->pending); 1957528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->sending); 1958528fd354STrond Myklebust rpc_destroy_wait_queue(&xprt->backlog); 1959528fd354STrond Myklebust kfree(xprt->servername); 1960528fd354STrond Myklebust /* 1961669996adSTrond Myklebust * Destroy any existing back channel 1962669996adSTrond Myklebust */ 1963669996adSTrond Myklebust xprt_destroy_backchannel(xprt, UINT_MAX); 1964669996adSTrond Myklebust 1965669996adSTrond Myklebust /* 1966528fd354STrond Myklebust * Tear down transport state and free the rpc_xprt 1967528fd354STrond Myklebust */ 1968528fd354STrond Myklebust xprt->ops->destroy(xprt); 1969528fd354STrond Myklebust } 1970528fd354STrond Myklebust 19719903cd1cSChuck Lever /** 19729903cd1cSChuck Lever * xprt_destroy - destroy an RPC transport, killing off all requests. 1973a8de240aSTrond Myklebust * @xprt: transport to destroy 19749903cd1cSChuck Lever * 19751da177e4SLinus Torvalds */ 1976a8de240aSTrond Myklebust static void xprt_destroy(struct rpc_xprt *xprt) 19771da177e4SLinus Torvalds { 1978528fd354STrond Myklebust /* 1979528fd354STrond Myklebust * Exclude transport connect/disconnect handlers and autoclose 1980528fd354STrond Myklebust */ 198179234c3dSTrond Myklebust wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 198279234c3dSTrond Myklebust 19830065db32STrond Myklebust del_timer_sync(&xprt->timer); 1984c8541ecdSChuck Lever 1985c8541ecdSChuck Lever /* 1986528fd354STrond Myklebust * Destroy sockets etc from the system workqueue so they can 1987528fd354STrond Myklebust * safely flush receive work running on rpciod. 1988c8541ecdSChuck Lever */ 1989528fd354STrond Myklebust INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); 1990528fd354STrond Myklebust schedule_work(&xprt->task_cleanup); 19916b6ca86bSTrond Myklebust } 19921da177e4SLinus Torvalds 199330c5116bSTrond Myklebust static void xprt_destroy_kref(struct kref *kref) 199430c5116bSTrond Myklebust { 199530c5116bSTrond Myklebust xprt_destroy(container_of(kref, struct rpc_xprt, kref)); 199630c5116bSTrond Myklebust } 199730c5116bSTrond Myklebust 199830c5116bSTrond Myklebust /** 199930c5116bSTrond Myklebust * xprt_get - return a reference to an RPC transport. 200030c5116bSTrond Myklebust * @xprt: pointer to the transport 200130c5116bSTrond Myklebust * 200230c5116bSTrond Myklebust */ 200330c5116bSTrond Myklebust struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 200430c5116bSTrond Myklebust { 200530c5116bSTrond Myklebust if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) 200630c5116bSTrond Myklebust return xprt; 200730c5116bSTrond Myklebust return NULL; 200830c5116bSTrond Myklebust } 200930c5116bSTrond Myklebust EXPORT_SYMBOL_GPL(xprt_get); 201030c5116bSTrond Myklebust 20116b6ca86bSTrond Myklebust /** 20126b6ca86bSTrond Myklebust * xprt_put - release a reference to an RPC transport. 20136b6ca86bSTrond Myklebust * @xprt: pointer to the transport 20146b6ca86bSTrond Myklebust * 20156b6ca86bSTrond Myklebust */ 20166b6ca86bSTrond Myklebust void xprt_put(struct rpc_xprt *xprt) 20176b6ca86bSTrond Myklebust { 201830c5116bSTrond Myklebust if (xprt != NULL) 201930c5116bSTrond Myklebust kref_put(&xprt->kref, xprt_destroy_kref); 20206b6ca86bSTrond Myklebust } 20215d252f90SChuck Lever EXPORT_SYMBOL_GPL(xprt_put); 2022