11d8206b9STom Tucker /* 21d8206b9STom Tucker * linux/net/sunrpc/svc_xprt.c 31d8206b9STom Tucker * 41d8206b9STom Tucker * Author: Tom Tucker <tom@opengridcomputing.com> 51d8206b9STom Tucker */ 61d8206b9STom Tucker 71d8206b9STom Tucker #include <linux/sched.h> 81d8206b9STom Tucker #include <linux/errno.h> 91d8206b9STom Tucker #include <linux/freezer.h> 107086721fSJeff Layton #include <linux/kthread.h> 115a0e3ad6STejun Heo #include <linux/slab.h> 121d8206b9STom Tucker #include <net/sock.h> 131d8206b9STom Tucker #include <linux/sunrpc/stats.h> 141d8206b9STom Tucker #include <linux/sunrpc/svc_xprt.h> 15dcf1a357SH Hartley Sweeten #include <linux/sunrpc/svcsock.h> 161d8206b9STom Tucker 171d8206b9STom Tucker #define RPCDBG_FACILITY RPCDBG_SVCXPRT 181d8206b9STom Tucker 190f0257eaSTom Tucker static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 200f0257eaSTom Tucker static int svc_deferred_recv(struct svc_rqst *rqstp); 210f0257eaSTom Tucker static struct cache_deferred_req *svc_defer(struct cache_req *req); 220f0257eaSTom Tucker static void svc_age_temp_xprts(unsigned long closure); 230f0257eaSTom Tucker 240f0257eaSTom Tucker /* apparently the "standard" is that clients close 250f0257eaSTom Tucker * idle connections after 5 minutes, servers after 260f0257eaSTom Tucker * 6 minutes 270f0257eaSTom Tucker * http://www.connectathon.org/talks96/nfstcp.pdf 280f0257eaSTom Tucker */ 290f0257eaSTom Tucker static int svc_conn_age_period = 6*60; 300f0257eaSTom Tucker 311d8206b9STom Tucker /* List of registered transport classes */ 321d8206b9STom Tucker static DEFINE_SPINLOCK(svc_xprt_class_lock); 331d8206b9STom Tucker static LIST_HEAD(svc_xprt_class_list); 341d8206b9STom Tucker 350f0257eaSTom Tucker /* SMP locking strategy: 360f0257eaSTom Tucker * 370f0257eaSTom Tucker * svc_pool->sp_lock protects most of the fields of that pool. 380f0257eaSTom Tucker * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 390f0257eaSTom Tucker * when both need to be taken (rare), svc_serv->sv_lock is first. 400f0257eaSTom Tucker * BKL protects svc_serv->sv_nrthread. 410f0257eaSTom Tucker * svc_sock->sk_lock protects the svc_sock->sk_deferred list 420f0257eaSTom Tucker * and the ->sk_info_authunix cache. 430f0257eaSTom Tucker * 440f0257eaSTom Tucker * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 450f0257eaSTom Tucker * enqueued multiply. During normal transport processing this bit 460f0257eaSTom Tucker * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 470f0257eaSTom Tucker * Providers should not manipulate this bit directly. 480f0257eaSTom Tucker * 490f0257eaSTom Tucker * Some flags can be set to certain values at any time 500f0257eaSTom Tucker * providing that certain rules are followed: 510f0257eaSTom Tucker * 520f0257eaSTom Tucker * XPT_CONN, XPT_DATA: 530f0257eaSTom Tucker * - Can be set or cleared at any time. 540f0257eaSTom Tucker * - After a set, svc_xprt_enqueue must be called to enqueue 550f0257eaSTom Tucker * the transport for processing. 560f0257eaSTom Tucker * - After a clear, the transport must be read/accepted. 570f0257eaSTom Tucker * If this succeeds, it must be set again. 580f0257eaSTom Tucker * XPT_CLOSE: 590f0257eaSTom Tucker * - Can set at any time. It is never cleared. 600f0257eaSTom Tucker * XPT_DEAD: 610f0257eaSTom Tucker * - Can only be set while XPT_BUSY is held which ensures 620f0257eaSTom Tucker * that no other thread will be using the transport or will 630f0257eaSTom Tucker * try to set XPT_DEAD. 640f0257eaSTom Tucker */ 650f0257eaSTom Tucker 661d8206b9STom Tucker int svc_reg_xprt_class(struct svc_xprt_class *xcl) 671d8206b9STom Tucker { 681d8206b9STom Tucker struct svc_xprt_class *cl; 691d8206b9STom Tucker int res = -EEXIST; 701d8206b9STom Tucker 711d8206b9STom Tucker dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 721d8206b9STom Tucker 731d8206b9STom Tucker INIT_LIST_HEAD(&xcl->xcl_list); 741d8206b9STom Tucker spin_lock(&svc_xprt_class_lock); 751d8206b9STom Tucker /* Make sure there isn't already a class with the same name */ 761d8206b9STom Tucker list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 771d8206b9STom Tucker if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 781d8206b9STom Tucker goto out; 791d8206b9STom Tucker } 801d8206b9STom Tucker list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 811d8206b9STom Tucker res = 0; 821d8206b9STom Tucker out: 831d8206b9STom Tucker spin_unlock(&svc_xprt_class_lock); 841d8206b9STom Tucker return res; 851d8206b9STom Tucker } 861d8206b9STom Tucker EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 871d8206b9STom Tucker 881d8206b9STom Tucker void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 891d8206b9STom Tucker { 901d8206b9STom Tucker dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 911d8206b9STom Tucker spin_lock(&svc_xprt_class_lock); 921d8206b9STom Tucker list_del_init(&xcl->xcl_list); 931d8206b9STom Tucker spin_unlock(&svc_xprt_class_lock); 941d8206b9STom Tucker } 951d8206b9STom Tucker EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 961d8206b9STom Tucker 97dc9a16e4STom Tucker /* 98dc9a16e4STom Tucker * Format the transport list for printing 99dc9a16e4STom Tucker */ 100dc9a16e4STom Tucker int svc_print_xprts(char *buf, int maxlen) 101dc9a16e4STom Tucker { 1028f3a6de3SPavel Emelyanov struct svc_xprt_class *xcl; 103dc9a16e4STom Tucker char tmpstr[80]; 104dc9a16e4STom Tucker int len = 0; 105dc9a16e4STom Tucker buf[0] = '\0'; 106dc9a16e4STom Tucker 107dc9a16e4STom Tucker spin_lock(&svc_xprt_class_lock); 1088f3a6de3SPavel Emelyanov list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 109dc9a16e4STom Tucker int slen; 110dc9a16e4STom Tucker 111dc9a16e4STom Tucker sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 112dc9a16e4STom Tucker slen = strlen(tmpstr); 113dc9a16e4STom Tucker if (len + slen > maxlen) 114dc9a16e4STom Tucker break; 115dc9a16e4STom Tucker len += slen; 116dc9a16e4STom Tucker strcat(buf, tmpstr); 117dc9a16e4STom Tucker } 118dc9a16e4STom Tucker spin_unlock(&svc_xprt_class_lock); 119dc9a16e4STom Tucker 120dc9a16e4STom Tucker return len; 121dc9a16e4STom Tucker } 122dc9a16e4STom Tucker 123e1b3157fSTom Tucker static void svc_xprt_free(struct kref *kref) 124e1b3157fSTom Tucker { 125e1b3157fSTom Tucker struct svc_xprt *xprt = 126e1b3157fSTom Tucker container_of(kref, struct svc_xprt, xpt_ref); 127e1b3157fSTom Tucker struct module *owner = xprt->xpt_class->xcl_owner; 128e3bfca01SPavel Emelyanov if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 129e3bfca01SPavel Emelyanov svcauth_unix_info_release(xprt); 1304fb8518bSPavel Emelyanov put_net(xprt->xpt_net); 131e1b3157fSTom Tucker xprt->xpt_ops->xpo_free(xprt); 132e1b3157fSTom Tucker module_put(owner); 133e1b3157fSTom Tucker } 134e1b3157fSTom Tucker 135e1b3157fSTom Tucker void svc_xprt_put(struct svc_xprt *xprt) 136e1b3157fSTom Tucker { 137e1b3157fSTom Tucker kref_put(&xprt->xpt_ref, svc_xprt_free); 138e1b3157fSTom Tucker } 139e1b3157fSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_put); 140e1b3157fSTom Tucker 1411d8206b9STom Tucker /* 1421d8206b9STom Tucker * Called by transport drivers to initialize the transport independent 1431d8206b9STom Tucker * portion of the transport instance. 1441d8206b9STom Tucker */ 145bb5cf160STom Tucker void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, 146bb5cf160STom Tucker struct svc_serv *serv) 1471d8206b9STom Tucker { 1481d8206b9STom Tucker memset(xprt, 0, sizeof(*xprt)); 1491d8206b9STom Tucker xprt->xpt_class = xcl; 1501d8206b9STom Tucker xprt->xpt_ops = xcl->xcl_ops; 151e1b3157fSTom Tucker kref_init(&xprt->xpt_ref); 152bb5cf160STom Tucker xprt->xpt_server = serv; 1537a182083STom Tucker INIT_LIST_HEAD(&xprt->xpt_list); 1547a182083STom Tucker INIT_LIST_HEAD(&xprt->xpt_ready); 1558c7b0172STom Tucker INIT_LIST_HEAD(&xprt->xpt_deferred); 156edc7a894SJ. Bruce Fields INIT_LIST_HEAD(&xprt->xpt_users); 157a50fea26STom Tucker mutex_init(&xprt->xpt_mutex); 158def13d74STom Tucker spin_lock_init(&xprt->xpt_lock); 1594e5caaa5STom Tucker set_bit(XPT_BUSY, &xprt->xpt_flags); 1604cfc7e60SRahul Iyer rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 1614fb8518bSPavel Emelyanov xprt->xpt_net = get_net(&init_net); 1621d8206b9STom Tucker } 1631d8206b9STom Tucker EXPORT_SYMBOL_GPL(svc_xprt_init); 164b700cbb1STom Tucker 1655dd248f6SChuck Lever static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 1665dd248f6SChuck Lever struct svc_serv *serv, 16762832c03SPavel Emelyanov struct net *net, 1689652ada3SChuck Lever const int family, 1699652ada3SChuck Lever const unsigned short port, 1709652ada3SChuck Lever int flags) 171b700cbb1STom Tucker { 172b700cbb1STom Tucker struct sockaddr_in sin = { 173b700cbb1STom Tucker .sin_family = AF_INET, 174e6f1cebfSAl Viro .sin_addr.s_addr = htonl(INADDR_ANY), 175b700cbb1STom Tucker .sin_port = htons(port), 176b700cbb1STom Tucker }; 177d6783b2bSChuck Lever #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1785dd248f6SChuck Lever struct sockaddr_in6 sin6 = { 1795dd248f6SChuck Lever .sin6_family = AF_INET6, 1805dd248f6SChuck Lever .sin6_addr = IN6ADDR_ANY_INIT, 1815dd248f6SChuck Lever .sin6_port = htons(port), 1825dd248f6SChuck Lever }; 183d6783b2bSChuck Lever #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 1845dd248f6SChuck Lever struct sockaddr *sap; 1855dd248f6SChuck Lever size_t len; 1865dd248f6SChuck Lever 1879652ada3SChuck Lever switch (family) { 1889652ada3SChuck Lever case PF_INET: 1895dd248f6SChuck Lever sap = (struct sockaddr *)&sin; 1905dd248f6SChuck Lever len = sizeof(sin); 1915dd248f6SChuck Lever break; 192d6783b2bSChuck Lever #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1939652ada3SChuck Lever case PF_INET6: 1945dd248f6SChuck Lever sap = (struct sockaddr *)&sin6; 1955dd248f6SChuck Lever len = sizeof(sin6); 1965dd248f6SChuck Lever break; 197d6783b2bSChuck Lever #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 1985dd248f6SChuck Lever default: 1995dd248f6SChuck Lever return ERR_PTR(-EAFNOSUPPORT); 2005dd248f6SChuck Lever } 2015dd248f6SChuck Lever 20262832c03SPavel Emelyanov return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 2035dd248f6SChuck Lever } 2045dd248f6SChuck Lever 2059652ada3SChuck Lever int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 206fc5d00b0SPavel Emelyanov struct net *net, const int family, 207fc5d00b0SPavel Emelyanov const unsigned short port, int flags) 2085dd248f6SChuck Lever { 2095dd248f6SChuck Lever struct svc_xprt_class *xcl; 2105dd248f6SChuck Lever 211b700cbb1STom Tucker dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 212b700cbb1STom Tucker spin_lock(&svc_xprt_class_lock); 213b700cbb1STom Tucker list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 214b700cbb1STom Tucker struct svc_xprt *newxprt; 215ed2849d3SNeilBrown unsigned short newport; 2164e5caaa5STom Tucker 2174e5caaa5STom Tucker if (strcmp(xprt_name, xcl->xcl_name)) 2184e5caaa5STom Tucker continue; 2194e5caaa5STom Tucker 2204e5caaa5STom Tucker if (!try_module_get(xcl->xcl_owner)) 2214e5caaa5STom Tucker goto err; 2224e5caaa5STom Tucker 2234e5caaa5STom Tucker spin_unlock(&svc_xprt_class_lock); 22462832c03SPavel Emelyanov newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 225b700cbb1STom Tucker if (IS_ERR(newxprt)) { 226b700cbb1STom Tucker module_put(xcl->xcl_owner); 2274e5caaa5STom Tucker return PTR_ERR(newxprt); 228b700cbb1STom Tucker } 2294e5caaa5STom Tucker 2304e5caaa5STom Tucker clear_bit(XPT_TEMP, &newxprt->xpt_flags); 2314e5caaa5STom Tucker spin_lock_bh(&serv->sv_lock); 2324e5caaa5STom Tucker list_add(&newxprt->xpt_list, &serv->sv_permsocks); 2334e5caaa5STom Tucker spin_unlock_bh(&serv->sv_lock); 234ed2849d3SNeilBrown newport = svc_xprt_local_port(newxprt); 2354e5caaa5STom Tucker clear_bit(XPT_BUSY, &newxprt->xpt_flags); 236ed2849d3SNeilBrown return newport; 237b700cbb1STom Tucker } 2384e5caaa5STom Tucker err: 239b700cbb1STom Tucker spin_unlock(&svc_xprt_class_lock); 240b700cbb1STom Tucker dprintk("svc: transport %s not found\n", xprt_name); 24168717908SChuck Lever 24268717908SChuck Lever /* This errno is exposed to user space. Provide a reasonable 24368717908SChuck Lever * perror msg for a bad transport. */ 24468717908SChuck Lever return -EPROTONOSUPPORT; 245b700cbb1STom Tucker } 246b700cbb1STom Tucker EXPORT_SYMBOL_GPL(svc_create_xprt); 2479dbc240fSTom Tucker 2489dbc240fSTom Tucker /* 2499dbc240fSTom Tucker * Copy the local and remote xprt addresses to the rqstp structure 2509dbc240fSTom Tucker */ 2519dbc240fSTom Tucker void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 2529dbc240fSTom Tucker { 2539dbc240fSTom Tucker struct sockaddr *sin; 2549dbc240fSTom Tucker 2559dbc240fSTom Tucker memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 2569dbc240fSTom Tucker rqstp->rq_addrlen = xprt->xpt_remotelen; 2579dbc240fSTom Tucker 2589dbc240fSTom Tucker /* 2599dbc240fSTom Tucker * Destination address in request is needed for binding the 2609dbc240fSTom Tucker * source address in RPC replies/callbacks later. 2619dbc240fSTom Tucker */ 2629dbc240fSTom Tucker sin = (struct sockaddr *)&xprt->xpt_local; 2639dbc240fSTom Tucker switch (sin->sa_family) { 2649dbc240fSTom Tucker case AF_INET: 2659dbc240fSTom Tucker rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; 2669dbc240fSTom Tucker break; 2679dbc240fSTom Tucker case AF_INET6: 2689dbc240fSTom Tucker rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; 2699dbc240fSTom Tucker break; 2709dbc240fSTom Tucker } 2719dbc240fSTom Tucker } 2729dbc240fSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 2739dbc240fSTom Tucker 2740f0257eaSTom Tucker /** 2750f0257eaSTom Tucker * svc_print_addr - Format rq_addr field for printing 2760f0257eaSTom Tucker * @rqstp: svc_rqst struct containing address to print 2770f0257eaSTom Tucker * @buf: target buffer for formatted address 2780f0257eaSTom Tucker * @len: length of target buffer 2790f0257eaSTom Tucker * 2800f0257eaSTom Tucker */ 2810f0257eaSTom Tucker char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 2820f0257eaSTom Tucker { 2830f0257eaSTom Tucker return __svc_print_addr(svc_addr(rqstp), buf, len); 2840f0257eaSTom Tucker } 2850f0257eaSTom Tucker EXPORT_SYMBOL_GPL(svc_print_addr); 2860f0257eaSTom Tucker 2870f0257eaSTom Tucker /* 2880f0257eaSTom Tucker * Queue up an idle server thread. Must have pool->sp_lock held. 2890f0257eaSTom Tucker * Note: this is really a stack rather than a queue, so that we only 2900f0257eaSTom Tucker * use as many different threads as we need, and the rest don't pollute 2910f0257eaSTom Tucker * the cache. 2920f0257eaSTom Tucker */ 2930f0257eaSTom Tucker static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 2940f0257eaSTom Tucker { 2950f0257eaSTom Tucker list_add(&rqstp->rq_list, &pool->sp_threads); 2960f0257eaSTom Tucker } 2970f0257eaSTom Tucker 2980f0257eaSTom Tucker /* 2990f0257eaSTom Tucker * Dequeue an nfsd thread. Must have pool->sp_lock held. 3000f0257eaSTom Tucker */ 3010f0257eaSTom Tucker static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 3020f0257eaSTom Tucker { 3030f0257eaSTom Tucker list_del(&rqstp->rq_list); 3040f0257eaSTom Tucker } 3050f0257eaSTom Tucker 3060f0257eaSTom Tucker /* 3070f0257eaSTom Tucker * Queue up a transport with data pending. If there are idle nfsd 3080f0257eaSTom Tucker * processes, wake 'em up. 3090f0257eaSTom Tucker * 3100f0257eaSTom Tucker */ 3110f0257eaSTom Tucker void svc_xprt_enqueue(struct svc_xprt *xprt) 3120f0257eaSTom Tucker { 3130f0257eaSTom Tucker struct svc_serv *serv = xprt->xpt_server; 3140f0257eaSTom Tucker struct svc_pool *pool; 3150f0257eaSTom Tucker struct svc_rqst *rqstp; 3160f0257eaSTom Tucker int cpu; 3170f0257eaSTom Tucker 3180f0257eaSTom Tucker if (!(xprt->xpt_flags & 3190f0257eaSTom Tucker ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 3200f0257eaSTom Tucker return; 3210f0257eaSTom Tucker 3220f0257eaSTom Tucker cpu = get_cpu(); 3230f0257eaSTom Tucker pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 3240f0257eaSTom Tucker put_cpu(); 3250f0257eaSTom Tucker 3260f0257eaSTom Tucker spin_lock_bh(&pool->sp_lock); 3270f0257eaSTom Tucker 32878c210efSJ. Bruce Fields if (!list_empty(&pool->sp_threads) && 32978c210efSJ. Bruce Fields !list_empty(&pool->sp_sockets)) 33078c210efSJ. Bruce Fields printk(KERN_ERR 33178c210efSJ. Bruce Fields "svc_xprt_enqueue: " 33278c210efSJ. Bruce Fields "threads and transports both waiting??\n"); 33378c210efSJ. Bruce Fields 33403cf6c9fSGreg Banks pool->sp_stats.packets++; 33503cf6c9fSGreg Banks 3360f0257eaSTom Tucker /* Mark transport as busy. It will remain in this state until 3370f0257eaSTom Tucker * the provider calls svc_xprt_received. We update XPT_BUSY 3380f0257eaSTom Tucker * atomically because it also guards against trying to enqueue 3390f0257eaSTom Tucker * the transport twice. 3400f0257eaSTom Tucker */ 3410f0257eaSTom Tucker if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 3420f0257eaSTom Tucker /* Don't enqueue transport while already enqueued */ 3430f0257eaSTom Tucker dprintk("svc: transport %p busy, not enqueued\n", xprt); 3440f0257eaSTom Tucker goto out_unlock; 3450f0257eaSTom Tucker } 3460f0257eaSTom Tucker BUG_ON(xprt->xpt_pool != NULL); 3470f0257eaSTom Tucker xprt->xpt_pool = pool; 3480f0257eaSTom Tucker 3490f0257eaSTom Tucker /* Handle pending connection */ 3500f0257eaSTom Tucker if (test_bit(XPT_CONN, &xprt->xpt_flags)) 3510f0257eaSTom Tucker goto process; 3520f0257eaSTom Tucker 3530f0257eaSTom Tucker /* Handle close in-progress */ 3540f0257eaSTom Tucker if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) 3550f0257eaSTom Tucker goto process; 3560f0257eaSTom Tucker 3570f0257eaSTom Tucker /* Check if we have space to reply to a request */ 3580f0257eaSTom Tucker if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { 3590f0257eaSTom Tucker /* Don't enqueue while not enough space for reply */ 3600f0257eaSTom Tucker dprintk("svc: no write space, transport %p not enqueued\n", 3610f0257eaSTom Tucker xprt); 3620f0257eaSTom Tucker xprt->xpt_pool = NULL; 3630f0257eaSTom Tucker clear_bit(XPT_BUSY, &xprt->xpt_flags); 3640f0257eaSTom Tucker goto out_unlock; 3650f0257eaSTom Tucker } 3660f0257eaSTom Tucker 3670f0257eaSTom Tucker process: 36878c210efSJ. Bruce Fields if (!list_empty(&pool->sp_threads)) { 3690f0257eaSTom Tucker rqstp = list_entry(pool->sp_threads.next, 3700f0257eaSTom Tucker struct svc_rqst, 3710f0257eaSTom Tucker rq_list); 3720f0257eaSTom Tucker dprintk("svc: transport %p served by daemon %p\n", 3730f0257eaSTom Tucker xprt, rqstp); 3740f0257eaSTom Tucker svc_thread_dequeue(pool, rqstp); 3750f0257eaSTom Tucker if (rqstp->rq_xprt) 3760f0257eaSTom Tucker printk(KERN_ERR 3770f0257eaSTom Tucker "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", 3780f0257eaSTom Tucker rqstp, rqstp->rq_xprt); 3790f0257eaSTom Tucker rqstp->rq_xprt = xprt; 3800f0257eaSTom Tucker svc_xprt_get(xprt); 3810f0257eaSTom Tucker rqstp->rq_reserved = serv->sv_max_mesg; 3820f0257eaSTom Tucker atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 38303cf6c9fSGreg Banks pool->sp_stats.threads_woken++; 3840f0257eaSTom Tucker BUG_ON(xprt->xpt_pool != pool); 3850f0257eaSTom Tucker wake_up(&rqstp->rq_wait); 3860f0257eaSTom Tucker } else { 3870f0257eaSTom Tucker dprintk("svc: transport %p put into queue\n", xprt); 3880f0257eaSTom Tucker list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 38903cf6c9fSGreg Banks pool->sp_stats.sockets_queued++; 3900f0257eaSTom Tucker BUG_ON(xprt->xpt_pool != pool); 3910f0257eaSTom Tucker } 3920f0257eaSTom Tucker 3930f0257eaSTom Tucker out_unlock: 3940f0257eaSTom Tucker spin_unlock_bh(&pool->sp_lock); 3950f0257eaSTom Tucker } 3960f0257eaSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 3970f0257eaSTom Tucker 3980f0257eaSTom Tucker /* 3990f0257eaSTom Tucker * Dequeue the first transport. Must be called with the pool->sp_lock held. 4000f0257eaSTom Tucker */ 4010f0257eaSTom Tucker static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 4020f0257eaSTom Tucker { 4030f0257eaSTom Tucker struct svc_xprt *xprt; 4040f0257eaSTom Tucker 4050f0257eaSTom Tucker if (list_empty(&pool->sp_sockets)) 4060f0257eaSTom Tucker return NULL; 4070f0257eaSTom Tucker 4080f0257eaSTom Tucker xprt = list_entry(pool->sp_sockets.next, 4090f0257eaSTom Tucker struct svc_xprt, xpt_ready); 4100f0257eaSTom Tucker list_del_init(&xprt->xpt_ready); 4110f0257eaSTom Tucker 4120f0257eaSTom Tucker dprintk("svc: transport %p dequeued, inuse=%d\n", 4130f0257eaSTom Tucker xprt, atomic_read(&xprt->xpt_ref.refcount)); 4140f0257eaSTom Tucker 4150f0257eaSTom Tucker return xprt; 4160f0257eaSTom Tucker } 4170f0257eaSTom Tucker 4180f0257eaSTom Tucker /* 4190f0257eaSTom Tucker * svc_xprt_received conditionally queues the transport for processing 4200f0257eaSTom Tucker * by another thread. The caller must hold the XPT_BUSY bit and must 4210f0257eaSTom Tucker * not thereafter touch transport data. 4220f0257eaSTom Tucker * 4230f0257eaSTom Tucker * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 4240f0257eaSTom Tucker * insufficient) data. 4250f0257eaSTom Tucker */ 4260f0257eaSTom Tucker void svc_xprt_received(struct svc_xprt *xprt) 4270f0257eaSTom Tucker { 4280f0257eaSTom Tucker BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 4290f0257eaSTom Tucker xprt->xpt_pool = NULL; 430ed2849d3SNeilBrown /* As soon as we clear busy, the xprt could be closed and 431ed2849d3SNeilBrown * 'put', so we need a reference to call svc_xprt_enqueue with: 432ed2849d3SNeilBrown */ 433ed2849d3SNeilBrown svc_xprt_get(xprt); 4340f0257eaSTom Tucker clear_bit(XPT_BUSY, &xprt->xpt_flags); 4350f0257eaSTom Tucker svc_xprt_enqueue(xprt); 436ed2849d3SNeilBrown svc_xprt_put(xprt); 4370f0257eaSTom Tucker } 4380f0257eaSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_received); 4390f0257eaSTom Tucker 4400f0257eaSTom Tucker /** 4410f0257eaSTom Tucker * svc_reserve - change the space reserved for the reply to a request. 4420f0257eaSTom Tucker * @rqstp: The request in question 4430f0257eaSTom Tucker * @space: new max space to reserve 4440f0257eaSTom Tucker * 4450f0257eaSTom Tucker * Each request reserves some space on the output queue of the transport 4460f0257eaSTom Tucker * to make sure the reply fits. This function reduces that reserved 4470f0257eaSTom Tucker * space to be the amount of space used already, plus @space. 4480f0257eaSTom Tucker * 4490f0257eaSTom Tucker */ 4500f0257eaSTom Tucker void svc_reserve(struct svc_rqst *rqstp, int space) 4510f0257eaSTom Tucker { 4520f0257eaSTom Tucker space += rqstp->rq_res.head[0].iov_len; 4530f0257eaSTom Tucker 4540f0257eaSTom Tucker if (space < rqstp->rq_reserved) { 4550f0257eaSTom Tucker struct svc_xprt *xprt = rqstp->rq_xprt; 4560f0257eaSTom Tucker atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 4570f0257eaSTom Tucker rqstp->rq_reserved = space; 4580f0257eaSTom Tucker 4590f0257eaSTom Tucker svc_xprt_enqueue(xprt); 4600f0257eaSTom Tucker } 4610f0257eaSTom Tucker } 46224c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_reserve); 4630f0257eaSTom Tucker 4640f0257eaSTom Tucker static void svc_xprt_release(struct svc_rqst *rqstp) 4650f0257eaSTom Tucker { 4660f0257eaSTom Tucker struct svc_xprt *xprt = rqstp->rq_xprt; 4670f0257eaSTom Tucker 4680f0257eaSTom Tucker rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 4690f0257eaSTom Tucker 4702779e3aeSTom Tucker kfree(rqstp->rq_deferred); 4712779e3aeSTom Tucker rqstp->rq_deferred = NULL; 4722779e3aeSTom Tucker 4730f0257eaSTom Tucker svc_free_res_pages(rqstp); 4740f0257eaSTom Tucker rqstp->rq_res.page_len = 0; 4750f0257eaSTom Tucker rqstp->rq_res.page_base = 0; 4760f0257eaSTom Tucker 4770f0257eaSTom Tucker /* Reset response buffer and release 4780f0257eaSTom Tucker * the reservation. 4790f0257eaSTom Tucker * But first, check that enough space was reserved 4800f0257eaSTom Tucker * for the reply, otherwise we have a bug! 4810f0257eaSTom Tucker */ 4820f0257eaSTom Tucker if ((rqstp->rq_res.len) > rqstp->rq_reserved) 4830f0257eaSTom Tucker printk(KERN_ERR "RPC request reserved %d but used %d\n", 4840f0257eaSTom Tucker rqstp->rq_reserved, 4850f0257eaSTom Tucker rqstp->rq_res.len); 4860f0257eaSTom Tucker 4870f0257eaSTom Tucker rqstp->rq_res.head[0].iov_len = 0; 4880f0257eaSTom Tucker svc_reserve(rqstp, 0); 4890f0257eaSTom Tucker rqstp->rq_xprt = NULL; 4900f0257eaSTom Tucker 4910f0257eaSTom Tucker svc_xprt_put(xprt); 4920f0257eaSTom Tucker } 4930f0257eaSTom Tucker 4940f0257eaSTom Tucker /* 4950f0257eaSTom Tucker * External function to wake up a server waiting for data 4960f0257eaSTom Tucker * This really only makes sense for services like lockd 4970f0257eaSTom Tucker * which have exactly one thread anyway. 4980f0257eaSTom Tucker */ 4990f0257eaSTom Tucker void svc_wake_up(struct svc_serv *serv) 5000f0257eaSTom Tucker { 5010f0257eaSTom Tucker struct svc_rqst *rqstp; 5020f0257eaSTom Tucker unsigned int i; 5030f0257eaSTom Tucker struct svc_pool *pool; 5040f0257eaSTom Tucker 5050f0257eaSTom Tucker for (i = 0; i < serv->sv_nrpools; i++) { 5060f0257eaSTom Tucker pool = &serv->sv_pools[i]; 5070f0257eaSTom Tucker 5080f0257eaSTom Tucker spin_lock_bh(&pool->sp_lock); 5090f0257eaSTom Tucker if (!list_empty(&pool->sp_threads)) { 5100f0257eaSTom Tucker rqstp = list_entry(pool->sp_threads.next, 5110f0257eaSTom Tucker struct svc_rqst, 5120f0257eaSTom Tucker rq_list); 5130f0257eaSTom Tucker dprintk("svc: daemon %p woken up.\n", rqstp); 5140f0257eaSTom Tucker /* 5150f0257eaSTom Tucker svc_thread_dequeue(pool, rqstp); 5160f0257eaSTom Tucker rqstp->rq_xprt = NULL; 5170f0257eaSTom Tucker */ 5180f0257eaSTom Tucker wake_up(&rqstp->rq_wait); 5190f0257eaSTom Tucker } 5200f0257eaSTom Tucker spin_unlock_bh(&pool->sp_lock); 5210f0257eaSTom Tucker } 5220f0257eaSTom Tucker } 52324c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_wake_up); 5240f0257eaSTom Tucker 5250f0257eaSTom Tucker int svc_port_is_privileged(struct sockaddr *sin) 5260f0257eaSTom Tucker { 5270f0257eaSTom Tucker switch (sin->sa_family) { 5280f0257eaSTom Tucker case AF_INET: 5290f0257eaSTom Tucker return ntohs(((struct sockaddr_in *)sin)->sin_port) 5300f0257eaSTom Tucker < PROT_SOCK; 5310f0257eaSTom Tucker case AF_INET6: 5320f0257eaSTom Tucker return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 5330f0257eaSTom Tucker < PROT_SOCK; 5340f0257eaSTom Tucker default: 5350f0257eaSTom Tucker return 0; 5360f0257eaSTom Tucker } 5370f0257eaSTom Tucker } 5380f0257eaSTom Tucker 5390f0257eaSTom Tucker /* 540c9233eb7SJeff Layton * Make sure that we don't have too many active connections. If we have, 541c9233eb7SJeff Layton * something must be dropped. It's not clear what will happen if we allow 542c9233eb7SJeff Layton * "too many" connections, but when dealing with network-facing software, 543c9233eb7SJeff Layton * we have to code defensively. Here we do that by imposing hard limits. 5440f0257eaSTom Tucker * 5450f0257eaSTom Tucker * There's no point in trying to do random drop here for DoS 5460f0257eaSTom Tucker * prevention. The NFS clients does 1 reconnect in 15 seconds. An 5470f0257eaSTom Tucker * attacker can easily beat that. 5480f0257eaSTom Tucker * 5490f0257eaSTom Tucker * The only somewhat efficient mechanism would be if drop old 5500f0257eaSTom Tucker * connections from the same IP first. But right now we don't even 5510f0257eaSTom Tucker * record the client IP in svc_sock. 552c9233eb7SJeff Layton * 553c9233eb7SJeff Layton * single-threaded services that expect a lot of clients will probably 554c9233eb7SJeff Layton * need to set sv_maxconn to override the default value which is based 555c9233eb7SJeff Layton * on the number of threads 5560f0257eaSTom Tucker */ 5570f0257eaSTom Tucker static void svc_check_conn_limits(struct svc_serv *serv) 5580f0257eaSTom Tucker { 559c9233eb7SJeff Layton unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 560c9233eb7SJeff Layton (serv->sv_nrthreads+3) * 20; 561c9233eb7SJeff Layton 562c9233eb7SJeff Layton if (serv->sv_tmpcnt > limit) { 5630f0257eaSTom Tucker struct svc_xprt *xprt = NULL; 5640f0257eaSTom Tucker spin_lock_bh(&serv->sv_lock); 5650f0257eaSTom Tucker if (!list_empty(&serv->sv_tempsocks)) { 5660f0257eaSTom Tucker if (net_ratelimit()) { 5670f0257eaSTom Tucker /* Try to help the admin */ 5680f0257eaSTom Tucker printk(KERN_NOTICE "%s: too many open " 569c9233eb7SJeff Layton "connections, consider increasing %s\n", 570c9233eb7SJeff Layton serv->sv_name, serv->sv_maxconn ? 571c9233eb7SJeff Layton "the max number of connections." : 572c9233eb7SJeff Layton "the number of threads."); 5730f0257eaSTom Tucker } 5740f0257eaSTom Tucker /* 5750f0257eaSTom Tucker * Always select the oldest connection. It's not fair, 5760f0257eaSTom Tucker * but so is life 5770f0257eaSTom Tucker */ 5780f0257eaSTom Tucker xprt = list_entry(serv->sv_tempsocks.prev, 5790f0257eaSTom Tucker struct svc_xprt, 5800f0257eaSTom Tucker xpt_list); 5810f0257eaSTom Tucker set_bit(XPT_CLOSE, &xprt->xpt_flags); 5820f0257eaSTom Tucker svc_xprt_get(xprt); 5830f0257eaSTom Tucker } 5840f0257eaSTom Tucker spin_unlock_bh(&serv->sv_lock); 5850f0257eaSTom Tucker 5860f0257eaSTom Tucker if (xprt) { 5870f0257eaSTom Tucker svc_xprt_enqueue(xprt); 5880f0257eaSTom Tucker svc_xprt_put(xprt); 5890f0257eaSTom Tucker } 5900f0257eaSTom Tucker } 5910f0257eaSTom Tucker } 5920f0257eaSTom Tucker 5930f0257eaSTom Tucker /* 5940f0257eaSTom Tucker * Receive the next request on any transport. This code is carefully 5950f0257eaSTom Tucker * organised not to touch any cachelines in the shared svc_serv 5960f0257eaSTom Tucker * structure, only cachelines in the local svc_pool. 5970f0257eaSTom Tucker */ 5980f0257eaSTom Tucker int svc_recv(struct svc_rqst *rqstp, long timeout) 5990f0257eaSTom Tucker { 6000f0257eaSTom Tucker struct svc_xprt *xprt = NULL; 6010f0257eaSTom Tucker struct svc_serv *serv = rqstp->rq_server; 6020f0257eaSTom Tucker struct svc_pool *pool = rqstp->rq_pool; 6030f0257eaSTom Tucker int len, i; 6040f0257eaSTom Tucker int pages; 6050f0257eaSTom Tucker struct xdr_buf *arg; 6060f0257eaSTom Tucker DECLARE_WAITQUEUE(wait, current); 60703cf6c9fSGreg Banks long time_left; 6080f0257eaSTom Tucker 6090f0257eaSTom Tucker dprintk("svc: server %p waiting for data (to = %ld)\n", 6100f0257eaSTom Tucker rqstp, timeout); 6110f0257eaSTom Tucker 6120f0257eaSTom Tucker if (rqstp->rq_xprt) 6130f0257eaSTom Tucker printk(KERN_ERR 6140f0257eaSTom Tucker "svc_recv: service %p, transport not NULL!\n", 6150f0257eaSTom Tucker rqstp); 6160f0257eaSTom Tucker if (waitqueue_active(&rqstp->rq_wait)) 6170f0257eaSTom Tucker printk(KERN_ERR 6180f0257eaSTom Tucker "svc_recv: service %p, wait queue active!\n", 6190f0257eaSTom Tucker rqstp); 6200f0257eaSTom Tucker 6210f0257eaSTom Tucker /* now allocate needed pages. If we get a failure, sleep briefly */ 6220f0257eaSTom Tucker pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 6230f0257eaSTom Tucker for (i = 0; i < pages ; i++) 6240f0257eaSTom Tucker while (rqstp->rq_pages[i] == NULL) { 6250f0257eaSTom Tucker struct page *p = alloc_page(GFP_KERNEL); 6260f0257eaSTom Tucker if (!p) { 6277b54fe61SJeff Layton set_current_state(TASK_INTERRUPTIBLE); 6287b54fe61SJeff Layton if (signalled() || kthread_should_stop()) { 6297b54fe61SJeff Layton set_current_state(TASK_RUNNING); 6307086721fSJeff Layton return -EINTR; 6317b54fe61SJeff Layton } 6327b54fe61SJeff Layton schedule_timeout(msecs_to_jiffies(500)); 6330f0257eaSTom Tucker } 6340f0257eaSTom Tucker rqstp->rq_pages[i] = p; 6350f0257eaSTom Tucker } 6360f0257eaSTom Tucker rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 6370f0257eaSTom Tucker BUG_ON(pages >= RPCSVC_MAXPAGES); 6380f0257eaSTom Tucker 6390f0257eaSTom Tucker /* Make arg->head point to first page and arg->pages point to rest */ 6400f0257eaSTom Tucker arg = &rqstp->rq_arg; 6410f0257eaSTom Tucker arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 6420f0257eaSTom Tucker arg->head[0].iov_len = PAGE_SIZE; 6430f0257eaSTom Tucker arg->pages = rqstp->rq_pages + 1; 6440f0257eaSTom Tucker arg->page_base = 0; 6450f0257eaSTom Tucker /* save at least one page for response */ 6460f0257eaSTom Tucker arg->page_len = (pages-2)*PAGE_SIZE; 6470f0257eaSTom Tucker arg->len = (pages-1)*PAGE_SIZE; 6480f0257eaSTom Tucker arg->tail[0].iov_len = 0; 6490f0257eaSTom Tucker 6500f0257eaSTom Tucker try_to_freeze(); 6510f0257eaSTom Tucker cond_resched(); 6527086721fSJeff Layton if (signalled() || kthread_should_stop()) 6530f0257eaSTom Tucker return -EINTR; 6540f0257eaSTom Tucker 655f16b6e8dSNeilBrown /* Normally we will wait up to 5 seconds for any required 656f16b6e8dSNeilBrown * cache information to be provided. 657f16b6e8dSNeilBrown */ 658f16b6e8dSNeilBrown rqstp->rq_chandle.thread_wait = 5*HZ; 659f16b6e8dSNeilBrown 6600f0257eaSTom Tucker spin_lock_bh(&pool->sp_lock); 6610f0257eaSTom Tucker xprt = svc_xprt_dequeue(pool); 6620f0257eaSTom Tucker if (xprt) { 6630f0257eaSTom Tucker rqstp->rq_xprt = xprt; 6640f0257eaSTom Tucker svc_xprt_get(xprt); 6650f0257eaSTom Tucker rqstp->rq_reserved = serv->sv_max_mesg; 6660f0257eaSTom Tucker atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 667f16b6e8dSNeilBrown 668f16b6e8dSNeilBrown /* As there is a shortage of threads and this request 6696610f720SJ. Bruce Fields * had to be queued, don't allow the thread to wait so 670f16b6e8dSNeilBrown * long for cache updates. 671f16b6e8dSNeilBrown */ 672f16b6e8dSNeilBrown rqstp->rq_chandle.thread_wait = 1*HZ; 6730f0257eaSTom Tucker } else { 6740f0257eaSTom Tucker /* No data pending. Go to sleep */ 6750f0257eaSTom Tucker svc_thread_enqueue(pool, rqstp); 6760f0257eaSTom Tucker 6770f0257eaSTom Tucker /* 6780f0257eaSTom Tucker * We have to be able to interrupt this wait 6790f0257eaSTom Tucker * to bring down the daemons ... 6800f0257eaSTom Tucker */ 6810f0257eaSTom Tucker set_current_state(TASK_INTERRUPTIBLE); 6827086721fSJeff Layton 6837086721fSJeff Layton /* 6847086721fSJeff Layton * checking kthread_should_stop() here allows us to avoid 6857086721fSJeff Layton * locking and signalling when stopping kthreads that call 6867086721fSJeff Layton * svc_recv. If the thread has already been woken up, then 6877086721fSJeff Layton * we can exit here without sleeping. If not, then it 6887086721fSJeff Layton * it'll be woken up quickly during the schedule_timeout 6897086721fSJeff Layton */ 6907086721fSJeff Layton if (kthread_should_stop()) { 6917086721fSJeff Layton set_current_state(TASK_RUNNING); 6927086721fSJeff Layton spin_unlock_bh(&pool->sp_lock); 6937086721fSJeff Layton return -EINTR; 6947086721fSJeff Layton } 6957086721fSJeff Layton 6960f0257eaSTom Tucker add_wait_queue(&rqstp->rq_wait, &wait); 6970f0257eaSTom Tucker spin_unlock_bh(&pool->sp_lock); 6980f0257eaSTom Tucker 69903cf6c9fSGreg Banks time_left = schedule_timeout(timeout); 7000f0257eaSTom Tucker 7010f0257eaSTom Tucker try_to_freeze(); 7020f0257eaSTom Tucker 7030f0257eaSTom Tucker spin_lock_bh(&pool->sp_lock); 7040f0257eaSTom Tucker remove_wait_queue(&rqstp->rq_wait, &wait); 70503cf6c9fSGreg Banks if (!time_left) 70603cf6c9fSGreg Banks pool->sp_stats.threads_timedout++; 7070f0257eaSTom Tucker 7080f0257eaSTom Tucker xprt = rqstp->rq_xprt; 7090f0257eaSTom Tucker if (!xprt) { 7100f0257eaSTom Tucker svc_thread_dequeue(pool, rqstp); 7110f0257eaSTom Tucker spin_unlock_bh(&pool->sp_lock); 7120f0257eaSTom Tucker dprintk("svc: server %p, no data yet\n", rqstp); 7137086721fSJeff Layton if (signalled() || kthread_should_stop()) 7147086721fSJeff Layton return -EINTR; 7157086721fSJeff Layton else 7167086721fSJeff Layton return -EAGAIN; 7170f0257eaSTom Tucker } 7180f0257eaSTom Tucker } 7190f0257eaSTom Tucker spin_unlock_bh(&pool->sp_lock); 7200f0257eaSTom Tucker 7210f0257eaSTom Tucker len = 0; 7221b644b6eSJ. Bruce Fields if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 7231b644b6eSJ. Bruce Fields dprintk("svc_recv: found XPT_CLOSE\n"); 7241b644b6eSJ. Bruce Fields svc_delete_xprt(xprt); 7251b644b6eSJ. Bruce Fields } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 7260f0257eaSTom Tucker struct svc_xprt *newxpt; 7270f0257eaSTom Tucker newxpt = xprt->xpt_ops->xpo_accept(xprt); 7280f0257eaSTom Tucker if (newxpt) { 7290f0257eaSTom Tucker /* 7300f0257eaSTom Tucker * We know this module_get will succeed because the 7310f0257eaSTom Tucker * listener holds a reference too 7320f0257eaSTom Tucker */ 7330f0257eaSTom Tucker __module_get(newxpt->xpt_class->xcl_owner); 7340f0257eaSTom Tucker svc_check_conn_limits(xprt->xpt_server); 7350f0257eaSTom Tucker spin_lock_bh(&serv->sv_lock); 7360f0257eaSTom Tucker set_bit(XPT_TEMP, &newxpt->xpt_flags); 7370f0257eaSTom Tucker list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 7380f0257eaSTom Tucker serv->sv_tmpcnt++; 7390f0257eaSTom Tucker if (serv->sv_temptimer.function == NULL) { 7400f0257eaSTom Tucker /* setup timer to age temp transports */ 7410f0257eaSTom Tucker setup_timer(&serv->sv_temptimer, 7420f0257eaSTom Tucker svc_age_temp_xprts, 7430f0257eaSTom Tucker (unsigned long)serv); 7440f0257eaSTom Tucker mod_timer(&serv->sv_temptimer, 7450f0257eaSTom Tucker jiffies + svc_conn_age_period * HZ); 7460f0257eaSTom Tucker } 7470f0257eaSTom Tucker spin_unlock_bh(&serv->sv_lock); 7480f0257eaSTom Tucker svc_xprt_received(newxpt); 7490f0257eaSTom Tucker } 7500f0257eaSTom Tucker svc_xprt_received(xprt); 7511b644b6eSJ. Bruce Fields } else { 7520f0257eaSTom Tucker dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 7530f0257eaSTom Tucker rqstp, pool->sp_id, xprt, 7540f0257eaSTom Tucker atomic_read(&xprt->xpt_ref.refcount)); 7550f0257eaSTom Tucker rqstp->rq_deferred = svc_deferred_dequeue(xprt); 7560f0257eaSTom Tucker if (rqstp->rq_deferred) { 7570f0257eaSTom Tucker svc_xprt_received(xprt); 7580f0257eaSTom Tucker len = svc_deferred_recv(rqstp); 759b48fa6b9SNeil Brown } else { 7600f0257eaSTom Tucker len = xprt->xpt_ops->xpo_recvfrom(rqstp); 761b48fa6b9SNeil Brown svc_xprt_received(xprt); 762b48fa6b9SNeil Brown } 7630f0257eaSTom Tucker dprintk("svc: got len=%d\n", len); 7640f0257eaSTom Tucker } 7650f0257eaSTom Tucker 7660f0257eaSTom Tucker /* No data, incomplete (TCP) read, or accept() */ 7670f0257eaSTom Tucker if (len == 0 || len == -EAGAIN) { 7680f0257eaSTom Tucker rqstp->rq_res.len = 0; 7690f0257eaSTom Tucker svc_xprt_release(rqstp); 7700f0257eaSTom Tucker return -EAGAIN; 7710f0257eaSTom Tucker } 7720f0257eaSTom Tucker clear_bit(XPT_OLD, &xprt->xpt_flags); 7730f0257eaSTom Tucker 7740f0257eaSTom Tucker rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 7750f0257eaSTom Tucker rqstp->rq_chandle.defer = svc_defer; 7760f0257eaSTom Tucker 7770f0257eaSTom Tucker if (serv->sv_stats) 7780f0257eaSTom Tucker serv->sv_stats->netcnt++; 7790f0257eaSTom Tucker return len; 7800f0257eaSTom Tucker } 78124c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_recv); 7820f0257eaSTom Tucker 7830f0257eaSTom Tucker /* 7840f0257eaSTom Tucker * Drop request 7850f0257eaSTom Tucker */ 7860f0257eaSTom Tucker void svc_drop(struct svc_rqst *rqstp) 7870f0257eaSTom Tucker { 7880f0257eaSTom Tucker dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 7890f0257eaSTom Tucker svc_xprt_release(rqstp); 7900f0257eaSTom Tucker } 79124c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_drop); 7920f0257eaSTom Tucker 7930f0257eaSTom Tucker /* 7940f0257eaSTom Tucker * Return reply to client. 7950f0257eaSTom Tucker */ 7960f0257eaSTom Tucker int svc_send(struct svc_rqst *rqstp) 7970f0257eaSTom Tucker { 7980f0257eaSTom Tucker struct svc_xprt *xprt; 7990f0257eaSTom Tucker int len; 8000f0257eaSTom Tucker struct xdr_buf *xb; 8010f0257eaSTom Tucker 8020f0257eaSTom Tucker xprt = rqstp->rq_xprt; 8030f0257eaSTom Tucker if (!xprt) 8040f0257eaSTom Tucker return -EFAULT; 8050f0257eaSTom Tucker 8060f0257eaSTom Tucker /* release the receive skb before sending the reply */ 8070f0257eaSTom Tucker rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 8080f0257eaSTom Tucker 8090f0257eaSTom Tucker /* calculate over-all length */ 8100f0257eaSTom Tucker xb = &rqstp->rq_res; 8110f0257eaSTom Tucker xb->len = xb->head[0].iov_len + 8120f0257eaSTom Tucker xb->page_len + 8130f0257eaSTom Tucker xb->tail[0].iov_len; 8140f0257eaSTom Tucker 8150f0257eaSTom Tucker /* Grab mutex to serialize outgoing data. */ 8160f0257eaSTom Tucker mutex_lock(&xprt->xpt_mutex); 8170f0257eaSTom Tucker if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 8180f0257eaSTom Tucker len = -ENOTCONN; 8190f0257eaSTom Tucker else 8200f0257eaSTom Tucker len = xprt->xpt_ops->xpo_sendto(rqstp); 8210f0257eaSTom Tucker mutex_unlock(&xprt->xpt_mutex); 8224cfc7e60SRahul Iyer rpc_wake_up(&xprt->xpt_bc_pending); 8230f0257eaSTom Tucker svc_xprt_release(rqstp); 8240f0257eaSTom Tucker 8250f0257eaSTom Tucker if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 8260f0257eaSTom Tucker return 0; 8270f0257eaSTom Tucker return len; 8280f0257eaSTom Tucker } 8290f0257eaSTom Tucker 8300f0257eaSTom Tucker /* 8310f0257eaSTom Tucker * Timer function to close old temporary transports, using 8320f0257eaSTom Tucker * a mark-and-sweep algorithm. 8330f0257eaSTom Tucker */ 8340f0257eaSTom Tucker static void svc_age_temp_xprts(unsigned long closure) 8350f0257eaSTom Tucker { 8360f0257eaSTom Tucker struct svc_serv *serv = (struct svc_serv *)closure; 8370f0257eaSTom Tucker struct svc_xprt *xprt; 8380f0257eaSTom Tucker struct list_head *le, *next; 8390f0257eaSTom Tucker LIST_HEAD(to_be_aged); 8400f0257eaSTom Tucker 8410f0257eaSTom Tucker dprintk("svc_age_temp_xprts\n"); 8420f0257eaSTom Tucker 8430f0257eaSTom Tucker if (!spin_trylock_bh(&serv->sv_lock)) { 8440f0257eaSTom Tucker /* busy, try again 1 sec later */ 8450f0257eaSTom Tucker dprintk("svc_age_temp_xprts: busy\n"); 8460f0257eaSTom Tucker mod_timer(&serv->sv_temptimer, jiffies + HZ); 8470f0257eaSTom Tucker return; 8480f0257eaSTom Tucker } 8490f0257eaSTom Tucker 8500f0257eaSTom Tucker list_for_each_safe(le, next, &serv->sv_tempsocks) { 8510f0257eaSTom Tucker xprt = list_entry(le, struct svc_xprt, xpt_list); 8520f0257eaSTom Tucker 8530f0257eaSTom Tucker /* First time through, just mark it OLD. Second time 8540f0257eaSTom Tucker * through, close it. */ 8550f0257eaSTom Tucker if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 8560f0257eaSTom Tucker continue; 857f64f9e71SJoe Perches if (atomic_read(&xprt->xpt_ref.refcount) > 1 || 858f64f9e71SJoe Perches test_bit(XPT_BUSY, &xprt->xpt_flags)) 8590f0257eaSTom Tucker continue; 8600f0257eaSTom Tucker svc_xprt_get(xprt); 8610f0257eaSTom Tucker list_move(le, &to_be_aged); 8620f0257eaSTom Tucker set_bit(XPT_CLOSE, &xprt->xpt_flags); 8630f0257eaSTom Tucker set_bit(XPT_DETACHED, &xprt->xpt_flags); 8640f0257eaSTom Tucker } 8650f0257eaSTom Tucker spin_unlock_bh(&serv->sv_lock); 8660f0257eaSTom Tucker 8670f0257eaSTom Tucker while (!list_empty(&to_be_aged)) { 8680f0257eaSTom Tucker le = to_be_aged.next; 8690f0257eaSTom Tucker /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ 8700f0257eaSTom Tucker list_del_init(le); 8710f0257eaSTom Tucker xprt = list_entry(le, struct svc_xprt, xpt_list); 8720f0257eaSTom Tucker 8730f0257eaSTom Tucker dprintk("queuing xprt %p for closing\n", xprt); 8740f0257eaSTom Tucker 8750f0257eaSTom Tucker /* a thread will dequeue and close it soon */ 8760f0257eaSTom Tucker svc_xprt_enqueue(xprt); 8770f0257eaSTom Tucker svc_xprt_put(xprt); 8780f0257eaSTom Tucker } 8790f0257eaSTom Tucker 8800f0257eaSTom Tucker mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 8810f0257eaSTom Tucker } 8820f0257eaSTom Tucker 883edc7a894SJ. Bruce Fields static void call_xpt_users(struct svc_xprt *xprt) 884edc7a894SJ. Bruce Fields { 885edc7a894SJ. Bruce Fields struct svc_xpt_user *u; 886edc7a894SJ. Bruce Fields 887edc7a894SJ. Bruce Fields spin_lock(&xprt->xpt_lock); 888edc7a894SJ. Bruce Fields while (!list_empty(&xprt->xpt_users)) { 889edc7a894SJ. Bruce Fields u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 890edc7a894SJ. Bruce Fields list_del(&u->list); 891edc7a894SJ. Bruce Fields u->callback(u); 892edc7a894SJ. Bruce Fields } 893edc7a894SJ. Bruce Fields spin_unlock(&xprt->xpt_lock); 894edc7a894SJ. Bruce Fields } 895edc7a894SJ. Bruce Fields 8960f0257eaSTom Tucker /* 8970f0257eaSTom Tucker * Remove a dead transport 8980f0257eaSTom Tucker */ 8990f0257eaSTom Tucker void svc_delete_xprt(struct svc_xprt *xprt) 9000f0257eaSTom Tucker { 9010f0257eaSTom Tucker struct svc_serv *serv = xprt->xpt_server; 90222945e4aSTom Tucker struct svc_deferred_req *dr; 90322945e4aSTom Tucker 90422945e4aSTom Tucker /* Only do this once */ 90522945e4aSTom Tucker if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 906ac9303ebSJ. Bruce Fields BUG(); 9070f0257eaSTom Tucker 9080f0257eaSTom Tucker dprintk("svc: svc_delete_xprt(%p)\n", xprt); 9090f0257eaSTom Tucker xprt->xpt_ops->xpo_detach(xprt); 9100f0257eaSTom Tucker 9110f0257eaSTom Tucker spin_lock_bh(&serv->sv_lock); 9120f0257eaSTom Tucker if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 9130f0257eaSTom Tucker list_del_init(&xprt->xpt_list); 9140f0257eaSTom Tucker /* 9150f0257eaSTom Tucker * We used to delete the transport from whichever list 9160f0257eaSTom Tucker * it's sk_xprt.xpt_ready node was on, but we don't actually 9170f0257eaSTom Tucker * need to. This is because the only time we're called 9180f0257eaSTom Tucker * while still attached to a queue, the queue itself 9190f0257eaSTom Tucker * is about to be destroyed (in svc_destroy). 9200f0257eaSTom Tucker */ 9210f0257eaSTom Tucker if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 9220f0257eaSTom Tucker serv->sv_tmpcnt--; 923788e69e5SJ. Bruce Fields spin_unlock_bh(&serv->sv_lock); 92422945e4aSTom Tucker 925ab1b18f7SNeil Brown while ((dr = svc_deferred_dequeue(xprt)) != NULL) 92622945e4aSTom Tucker kfree(dr); 92722945e4aSTom Tucker 928edc7a894SJ. Bruce Fields call_xpt_users(xprt); 92922945e4aSTom Tucker svc_xprt_put(xprt); 9300f0257eaSTom Tucker } 9310f0257eaSTom Tucker 9320f0257eaSTom Tucker void svc_close_xprt(struct svc_xprt *xprt) 9330f0257eaSTom Tucker { 9340f0257eaSTom Tucker set_bit(XPT_CLOSE, &xprt->xpt_flags); 9350f0257eaSTom Tucker if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 9360f0257eaSTom Tucker /* someone else will have to effect the close */ 9370f0257eaSTom Tucker return; 9380f0257eaSTom Tucker 9390f0257eaSTom Tucker svc_delete_xprt(xprt); 9400f0257eaSTom Tucker } 941a217813fSTom Tucker EXPORT_SYMBOL_GPL(svc_close_xprt); 9420f0257eaSTom Tucker 9430f0257eaSTom Tucker void svc_close_all(struct list_head *xprt_list) 9440f0257eaSTom Tucker { 9450f0257eaSTom Tucker struct svc_xprt *xprt; 9460f0257eaSTom Tucker struct svc_xprt *tmp; 9470f0257eaSTom Tucker 9480f0257eaSTom Tucker list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 9490f0257eaSTom Tucker set_bit(XPT_CLOSE, &xprt->xpt_flags); 9500f0257eaSTom Tucker if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { 9510f0257eaSTom Tucker /* Waiting to be processed, but no threads left, 9520f0257eaSTom Tucker * So just remove it from the waiting list 9530f0257eaSTom Tucker */ 9540f0257eaSTom Tucker list_del_init(&xprt->xpt_ready); 9550f0257eaSTom Tucker clear_bit(XPT_BUSY, &xprt->xpt_flags); 9560f0257eaSTom Tucker } 9570f0257eaSTom Tucker svc_close_xprt(xprt); 9580f0257eaSTom Tucker } 9590f0257eaSTom Tucker } 9600f0257eaSTom Tucker 9610f0257eaSTom Tucker /* 9620f0257eaSTom Tucker * Handle defer and revisit of requests 9630f0257eaSTom Tucker */ 9640f0257eaSTom Tucker 9650f0257eaSTom Tucker static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 9660f0257eaSTom Tucker { 9670f0257eaSTom Tucker struct svc_deferred_req *dr = 9680f0257eaSTom Tucker container_of(dreq, struct svc_deferred_req, handle); 9690f0257eaSTom Tucker struct svc_xprt *xprt = dr->xprt; 9700f0257eaSTom Tucker 97122945e4aSTom Tucker spin_lock(&xprt->xpt_lock); 97222945e4aSTom Tucker set_bit(XPT_DEFERRED, &xprt->xpt_flags); 97322945e4aSTom Tucker if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 97422945e4aSTom Tucker spin_unlock(&xprt->xpt_lock); 97522945e4aSTom Tucker dprintk("revisit canceled\n"); 9760f0257eaSTom Tucker svc_xprt_put(xprt); 9770f0257eaSTom Tucker kfree(dr); 9780f0257eaSTom Tucker return; 9790f0257eaSTom Tucker } 9800f0257eaSTom Tucker dprintk("revisit queued\n"); 9810f0257eaSTom Tucker dr->xprt = NULL; 9820f0257eaSTom Tucker list_add(&dr->handle.recent, &xprt->xpt_deferred); 9830f0257eaSTom Tucker spin_unlock(&xprt->xpt_lock); 9840f0257eaSTom Tucker svc_xprt_enqueue(xprt); 9850f0257eaSTom Tucker svc_xprt_put(xprt); 9860f0257eaSTom Tucker } 9870f0257eaSTom Tucker 988260c1d12STom Tucker /* 989260c1d12STom Tucker * Save the request off for later processing. The request buffer looks 990260c1d12STom Tucker * like this: 991260c1d12STom Tucker * 992260c1d12STom Tucker * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 993260c1d12STom Tucker * 994260c1d12STom Tucker * This code can only handle requests that consist of an xprt-header 995260c1d12STom Tucker * and rpc-header. 996260c1d12STom Tucker */ 9970f0257eaSTom Tucker static struct cache_deferred_req *svc_defer(struct cache_req *req) 9980f0257eaSTom Tucker { 9990f0257eaSTom Tucker struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 10000f0257eaSTom Tucker struct svc_deferred_req *dr; 10010f0257eaSTom Tucker 10022f425878SAndy Adamson if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral) 10030f0257eaSTom Tucker return NULL; /* if more than a page, give up FIXME */ 10040f0257eaSTom Tucker if (rqstp->rq_deferred) { 10050f0257eaSTom Tucker dr = rqstp->rq_deferred; 10060f0257eaSTom Tucker rqstp->rq_deferred = NULL; 10070f0257eaSTom Tucker } else { 1008260c1d12STom Tucker size_t skip; 1009260c1d12STom Tucker size_t size; 10100f0257eaSTom Tucker /* FIXME maybe discard if size too large */ 1011260c1d12STom Tucker size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 10120f0257eaSTom Tucker dr = kmalloc(size, GFP_KERNEL); 10130f0257eaSTom Tucker if (dr == NULL) 10140f0257eaSTom Tucker return NULL; 10150f0257eaSTom Tucker 10160f0257eaSTom Tucker dr->handle.owner = rqstp->rq_server; 10170f0257eaSTom Tucker dr->prot = rqstp->rq_prot; 10180f0257eaSTom Tucker memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 10190f0257eaSTom Tucker dr->addrlen = rqstp->rq_addrlen; 10200f0257eaSTom Tucker dr->daddr = rqstp->rq_daddr; 10210f0257eaSTom Tucker dr->argslen = rqstp->rq_arg.len >> 2; 1022260c1d12STom Tucker dr->xprt_hlen = rqstp->rq_xprt_hlen; 1023260c1d12STom Tucker 1024260c1d12STom Tucker /* back up head to the start of the buffer and copy */ 1025260c1d12STom Tucker skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 10260f0257eaSTom Tucker memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 10270f0257eaSTom Tucker dr->argslen << 2); 10280f0257eaSTom Tucker } 10290f0257eaSTom Tucker svc_xprt_get(rqstp->rq_xprt); 10300f0257eaSTom Tucker dr->xprt = rqstp->rq_xprt; 10310f0257eaSTom Tucker 10320f0257eaSTom Tucker dr->handle.revisit = svc_revisit; 10330f0257eaSTom Tucker return &dr->handle; 10340f0257eaSTom Tucker } 10350f0257eaSTom Tucker 10360f0257eaSTom Tucker /* 10370f0257eaSTom Tucker * recv data from a deferred request into an active one 10380f0257eaSTom Tucker */ 10390f0257eaSTom Tucker static int svc_deferred_recv(struct svc_rqst *rqstp) 10400f0257eaSTom Tucker { 10410f0257eaSTom Tucker struct svc_deferred_req *dr = rqstp->rq_deferred; 10420f0257eaSTom Tucker 1043260c1d12STom Tucker /* setup iov_base past transport header */ 1044260c1d12STom Tucker rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1045260c1d12STom Tucker /* The iov_len does not include the transport header bytes */ 1046260c1d12STom Tucker rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 10470f0257eaSTom Tucker rqstp->rq_arg.page_len = 0; 1048260c1d12STom Tucker /* The rq_arg.len includes the transport header bytes */ 10490f0257eaSTom Tucker rqstp->rq_arg.len = dr->argslen<<2; 10500f0257eaSTom Tucker rqstp->rq_prot = dr->prot; 10510f0257eaSTom Tucker memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 10520f0257eaSTom Tucker rqstp->rq_addrlen = dr->addrlen; 1053260c1d12STom Tucker /* Save off transport header len in case we get deferred again */ 1054260c1d12STom Tucker rqstp->rq_xprt_hlen = dr->xprt_hlen; 10550f0257eaSTom Tucker rqstp->rq_daddr = dr->daddr; 10560f0257eaSTom Tucker rqstp->rq_respages = rqstp->rq_pages; 1057260c1d12STom Tucker return (dr->argslen<<2) - dr->xprt_hlen; 10580f0257eaSTom Tucker } 10590f0257eaSTom Tucker 10600f0257eaSTom Tucker 10610f0257eaSTom Tucker static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 10620f0257eaSTom Tucker { 10630f0257eaSTom Tucker struct svc_deferred_req *dr = NULL; 10640f0257eaSTom Tucker 10650f0257eaSTom Tucker if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 10660f0257eaSTom Tucker return NULL; 10670f0257eaSTom Tucker spin_lock(&xprt->xpt_lock); 10680f0257eaSTom Tucker clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 10690f0257eaSTom Tucker if (!list_empty(&xprt->xpt_deferred)) { 10700f0257eaSTom Tucker dr = list_entry(xprt->xpt_deferred.next, 10710f0257eaSTom Tucker struct svc_deferred_req, 10720f0257eaSTom Tucker handle.recent); 10730f0257eaSTom Tucker list_del_init(&dr->handle.recent); 10740f0257eaSTom Tucker set_bit(XPT_DEFERRED, &xprt->xpt_flags); 10750f0257eaSTom Tucker } 10760f0257eaSTom Tucker spin_unlock(&xprt->xpt_lock); 10770f0257eaSTom Tucker return dr; 10780f0257eaSTom Tucker } 10797fcb98d5STom Tucker 1080156e6209SChuck Lever /** 1081156e6209SChuck Lever * svc_find_xprt - find an RPC transport instance 1082156e6209SChuck Lever * @serv: pointer to svc_serv to search 1083156e6209SChuck Lever * @xcl_name: C string containing transport's class name 1084156e6209SChuck Lever * @af: Address family of transport's local address 1085156e6209SChuck Lever * @port: transport's IP port number 1086156e6209SChuck Lever * 10877fcb98d5STom Tucker * Return the transport instance pointer for the endpoint accepting 10887fcb98d5STom Tucker * connections/peer traffic from the specified transport class, 10897fcb98d5STom Tucker * address family and port. 10907fcb98d5STom Tucker * 10917fcb98d5STom Tucker * Specifying 0 for the address family or port is effectively a 10927fcb98d5STom Tucker * wild-card, and will result in matching the first transport in the 10937fcb98d5STom Tucker * service's list that has a matching class name. 10947fcb98d5STom Tucker */ 1095156e6209SChuck Lever struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1096156e6209SChuck Lever const sa_family_t af, const unsigned short port) 10977fcb98d5STom Tucker { 10987fcb98d5STom Tucker struct svc_xprt *xprt; 10997fcb98d5STom Tucker struct svc_xprt *found = NULL; 11007fcb98d5STom Tucker 11017fcb98d5STom Tucker /* Sanity check the args */ 1102156e6209SChuck Lever if (serv == NULL || xcl_name == NULL) 11037fcb98d5STom Tucker return found; 11047fcb98d5STom Tucker 11057fcb98d5STom Tucker spin_lock_bh(&serv->sv_lock); 11067fcb98d5STom Tucker list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 11077fcb98d5STom Tucker if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 11087fcb98d5STom Tucker continue; 11097fcb98d5STom Tucker if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 11107fcb98d5STom Tucker continue; 1111156e6209SChuck Lever if (port != 0 && port != svc_xprt_local_port(xprt)) 11127fcb98d5STom Tucker continue; 11137fcb98d5STom Tucker found = xprt; 1114a217813fSTom Tucker svc_xprt_get(xprt); 11157fcb98d5STom Tucker break; 11167fcb98d5STom Tucker } 11177fcb98d5STom Tucker spin_unlock_bh(&serv->sv_lock); 11187fcb98d5STom Tucker return found; 11197fcb98d5STom Tucker } 11207fcb98d5STom Tucker EXPORT_SYMBOL_GPL(svc_find_xprt); 11219571af18STom Tucker 1122335c54bdSChuck Lever static int svc_one_xprt_name(const struct svc_xprt *xprt, 1123335c54bdSChuck Lever char *pos, int remaining) 1124335c54bdSChuck Lever { 1125335c54bdSChuck Lever int len; 1126335c54bdSChuck Lever 1127335c54bdSChuck Lever len = snprintf(pos, remaining, "%s %u\n", 1128335c54bdSChuck Lever xprt->xpt_class->xcl_name, 1129335c54bdSChuck Lever svc_xprt_local_port(xprt)); 1130335c54bdSChuck Lever if (len >= remaining) 1131335c54bdSChuck Lever return -ENAMETOOLONG; 1132335c54bdSChuck Lever return len; 1133335c54bdSChuck Lever } 1134335c54bdSChuck Lever 1135335c54bdSChuck Lever /** 1136335c54bdSChuck Lever * svc_xprt_names - format a buffer with a list of transport names 1137335c54bdSChuck Lever * @serv: pointer to an RPC service 1138335c54bdSChuck Lever * @buf: pointer to a buffer to be filled in 1139335c54bdSChuck Lever * @buflen: length of buffer to be filled in 1140335c54bdSChuck Lever * 1141335c54bdSChuck Lever * Fills in @buf with a string containing a list of transport names, 1142335c54bdSChuck Lever * each name terminated with '\n'. 1143335c54bdSChuck Lever * 1144335c54bdSChuck Lever * Returns positive length of the filled-in string on success; otherwise 1145335c54bdSChuck Lever * a negative errno value is returned if an error occurs. 11469571af18STom Tucker */ 1147335c54bdSChuck Lever int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 11489571af18STom Tucker { 11499571af18STom Tucker struct svc_xprt *xprt; 1150335c54bdSChuck Lever int len, totlen; 1151335c54bdSChuck Lever char *pos; 11529571af18STom Tucker 11539571af18STom Tucker /* Sanity check args */ 11549571af18STom Tucker if (!serv) 11559571af18STom Tucker return 0; 11569571af18STom Tucker 11579571af18STom Tucker spin_lock_bh(&serv->sv_lock); 1158335c54bdSChuck Lever 1159335c54bdSChuck Lever pos = buf; 1160335c54bdSChuck Lever totlen = 0; 11619571af18STom Tucker list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1162335c54bdSChuck Lever len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1163335c54bdSChuck Lever if (len < 0) { 1164335c54bdSChuck Lever *buf = '\0'; 1165335c54bdSChuck Lever totlen = len; 1166335c54bdSChuck Lever } 1167335c54bdSChuck Lever if (len <= 0) 11689571af18STom Tucker break; 1169335c54bdSChuck Lever 1170335c54bdSChuck Lever pos += len; 11719571af18STom Tucker totlen += len; 11729571af18STom Tucker } 1173335c54bdSChuck Lever 11749571af18STom Tucker spin_unlock_bh(&serv->sv_lock); 11759571af18STom Tucker return totlen; 11769571af18STom Tucker } 11779571af18STom Tucker EXPORT_SYMBOL_GPL(svc_xprt_names); 117803cf6c9fSGreg Banks 117903cf6c9fSGreg Banks 118003cf6c9fSGreg Banks /*----------------------------------------------------------------------------*/ 118103cf6c9fSGreg Banks 118203cf6c9fSGreg Banks static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 118303cf6c9fSGreg Banks { 118403cf6c9fSGreg Banks unsigned int pidx = (unsigned int)*pos; 118503cf6c9fSGreg Banks struct svc_serv *serv = m->private; 118603cf6c9fSGreg Banks 118703cf6c9fSGreg Banks dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 118803cf6c9fSGreg Banks 118903cf6c9fSGreg Banks if (!pidx) 119003cf6c9fSGreg Banks return SEQ_START_TOKEN; 119103cf6c9fSGreg Banks return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 119203cf6c9fSGreg Banks } 119303cf6c9fSGreg Banks 119403cf6c9fSGreg Banks static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 119503cf6c9fSGreg Banks { 119603cf6c9fSGreg Banks struct svc_pool *pool = p; 119703cf6c9fSGreg Banks struct svc_serv *serv = m->private; 119803cf6c9fSGreg Banks 119903cf6c9fSGreg Banks dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 120003cf6c9fSGreg Banks 120103cf6c9fSGreg Banks if (p == SEQ_START_TOKEN) { 120203cf6c9fSGreg Banks pool = &serv->sv_pools[0]; 120303cf6c9fSGreg Banks } else { 120403cf6c9fSGreg Banks unsigned int pidx = (pool - &serv->sv_pools[0]); 120503cf6c9fSGreg Banks if (pidx < serv->sv_nrpools-1) 120603cf6c9fSGreg Banks pool = &serv->sv_pools[pidx+1]; 120703cf6c9fSGreg Banks else 120803cf6c9fSGreg Banks pool = NULL; 120903cf6c9fSGreg Banks } 121003cf6c9fSGreg Banks ++*pos; 121103cf6c9fSGreg Banks return pool; 121203cf6c9fSGreg Banks } 121303cf6c9fSGreg Banks 121403cf6c9fSGreg Banks static void svc_pool_stats_stop(struct seq_file *m, void *p) 121503cf6c9fSGreg Banks { 121603cf6c9fSGreg Banks } 121703cf6c9fSGreg Banks 121803cf6c9fSGreg Banks static int svc_pool_stats_show(struct seq_file *m, void *p) 121903cf6c9fSGreg Banks { 122003cf6c9fSGreg Banks struct svc_pool *pool = p; 122103cf6c9fSGreg Banks 122203cf6c9fSGreg Banks if (p == SEQ_START_TOKEN) { 122378c210efSJ. Bruce Fields seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 122403cf6c9fSGreg Banks return 0; 122503cf6c9fSGreg Banks } 122603cf6c9fSGreg Banks 122778c210efSJ. Bruce Fields seq_printf(m, "%u %lu %lu %lu %lu\n", 122803cf6c9fSGreg Banks pool->sp_id, 122903cf6c9fSGreg Banks pool->sp_stats.packets, 123003cf6c9fSGreg Banks pool->sp_stats.sockets_queued, 123103cf6c9fSGreg Banks pool->sp_stats.threads_woken, 123203cf6c9fSGreg Banks pool->sp_stats.threads_timedout); 123303cf6c9fSGreg Banks 123403cf6c9fSGreg Banks return 0; 123503cf6c9fSGreg Banks } 123603cf6c9fSGreg Banks 123703cf6c9fSGreg Banks static const struct seq_operations svc_pool_stats_seq_ops = { 123803cf6c9fSGreg Banks .start = svc_pool_stats_start, 123903cf6c9fSGreg Banks .next = svc_pool_stats_next, 124003cf6c9fSGreg Banks .stop = svc_pool_stats_stop, 124103cf6c9fSGreg Banks .show = svc_pool_stats_show, 124203cf6c9fSGreg Banks }; 124303cf6c9fSGreg Banks 124403cf6c9fSGreg Banks int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 124503cf6c9fSGreg Banks { 124603cf6c9fSGreg Banks int err; 124703cf6c9fSGreg Banks 124803cf6c9fSGreg Banks err = seq_open(file, &svc_pool_stats_seq_ops); 124903cf6c9fSGreg Banks if (!err) 125003cf6c9fSGreg Banks ((struct seq_file *) file->private_data)->private = serv; 125103cf6c9fSGreg Banks return err; 125203cf6c9fSGreg Banks } 125303cf6c9fSGreg Banks EXPORT_SYMBOL(svc_pool_stats_open); 125403cf6c9fSGreg Banks 125503cf6c9fSGreg Banks /*----------------------------------------------------------------------------*/ 1256