1 /* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/slab.h> 12 #include <net/sock.h> 13 #include <linux/sunrpc/stats.h> 14 #include <linux/sunrpc/svc_xprt.h> 15 #include <linux/sunrpc/svcsock.h> 16 #include <linux/sunrpc/xprt.h> 17 #include <linux/module.h> 18 19 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 20 21 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 22 static int svc_deferred_recv(struct svc_rqst *rqstp); 23 static struct cache_deferred_req *svc_defer(struct cache_req *req); 24 static void svc_age_temp_xprts(unsigned long closure); 25 static void svc_delete_xprt(struct svc_xprt *xprt); 26 27 /* apparently the "standard" is that clients close 28 * idle connections after 5 minutes, servers after 29 * 6 minutes 30 * http://www.connectathon.org/talks96/nfstcp.pdf 31 */ 32 static int svc_conn_age_period = 6*60; 33 34 /* List of registered transport classes */ 35 static DEFINE_SPINLOCK(svc_xprt_class_lock); 36 static LIST_HEAD(svc_xprt_class_list); 37 38 /* SMP locking strategy: 39 * 40 * svc_pool->sp_lock protects most of the fields of that pool. 41 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 42 * when both need to be taken (rare), svc_serv->sv_lock is first. 43 * BKL protects svc_serv->sv_nrthread. 44 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 45 * and the ->sk_info_authunix cache. 46 * 47 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 48 * enqueued multiply. During normal transport processing this bit 49 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 50 * Providers should not manipulate this bit directly. 51 * 52 * Some flags can be set to certain values at any time 53 * providing that certain rules are followed: 54 * 55 * XPT_CONN, XPT_DATA: 56 * - Can be set or cleared at any time. 57 * - After a set, svc_xprt_enqueue must be called to enqueue 58 * the transport for processing. 59 * - After a clear, the transport must be read/accepted. 60 * If this succeeds, it must be set again. 61 * XPT_CLOSE: 62 * - Can set at any time. It is never cleared. 63 * XPT_DEAD: 64 * - Can only be set while XPT_BUSY is held which ensures 65 * that no other thread will be using the transport or will 66 * try to set XPT_DEAD. 67 */ 68 69 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 70 { 71 struct svc_xprt_class *cl; 72 int res = -EEXIST; 73 74 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 75 76 INIT_LIST_HEAD(&xcl->xcl_list); 77 spin_lock(&svc_xprt_class_lock); 78 /* Make sure there isn't already a class with the same name */ 79 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 80 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 81 goto out; 82 } 83 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 84 res = 0; 85 out: 86 spin_unlock(&svc_xprt_class_lock); 87 return res; 88 } 89 EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 90 91 void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 92 { 93 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 94 spin_lock(&svc_xprt_class_lock); 95 list_del_init(&xcl->xcl_list); 96 spin_unlock(&svc_xprt_class_lock); 97 } 98 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 99 100 /* 101 * Format the transport list for printing 102 */ 103 int svc_print_xprts(char *buf, int maxlen) 104 { 105 struct svc_xprt_class *xcl; 106 char tmpstr[80]; 107 int len = 0; 108 buf[0] = '\0'; 109 110 spin_lock(&svc_xprt_class_lock); 111 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 112 int slen; 113 114 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 115 slen = strlen(tmpstr); 116 if (len + slen > maxlen) 117 break; 118 len += slen; 119 strcat(buf, tmpstr); 120 } 121 spin_unlock(&svc_xprt_class_lock); 122 123 return len; 124 } 125 126 static void svc_xprt_free(struct kref *kref) 127 { 128 struct svc_xprt *xprt = 129 container_of(kref, struct svc_xprt, xpt_ref); 130 struct module *owner = xprt->xpt_class->xcl_owner; 131 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 132 svcauth_unix_info_release(xprt); 133 put_net(xprt->xpt_net); 134 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 135 if (xprt->xpt_bc_xprt) 136 xprt_put(xprt->xpt_bc_xprt); 137 xprt->xpt_ops->xpo_free(xprt); 138 module_put(owner); 139 } 140 141 void svc_xprt_put(struct svc_xprt *xprt) 142 { 143 kref_put(&xprt->xpt_ref, svc_xprt_free); 144 } 145 EXPORT_SYMBOL_GPL(svc_xprt_put); 146 147 /* 148 * Called by transport drivers to initialize the transport independent 149 * portion of the transport instance. 150 */ 151 void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, 152 struct svc_xprt *xprt, struct svc_serv *serv) 153 { 154 memset(xprt, 0, sizeof(*xprt)); 155 xprt->xpt_class = xcl; 156 xprt->xpt_ops = xcl->xcl_ops; 157 kref_init(&xprt->xpt_ref); 158 xprt->xpt_server = serv; 159 INIT_LIST_HEAD(&xprt->xpt_list); 160 INIT_LIST_HEAD(&xprt->xpt_ready); 161 INIT_LIST_HEAD(&xprt->xpt_deferred); 162 INIT_LIST_HEAD(&xprt->xpt_users); 163 mutex_init(&xprt->xpt_mutex); 164 spin_lock_init(&xprt->xpt_lock); 165 set_bit(XPT_BUSY, &xprt->xpt_flags); 166 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 167 xprt->xpt_net = get_net(net); 168 } 169 EXPORT_SYMBOL_GPL(svc_xprt_init); 170 171 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 172 struct svc_serv *serv, 173 struct net *net, 174 const int family, 175 const unsigned short port, 176 int flags) 177 { 178 struct sockaddr_in sin = { 179 .sin_family = AF_INET, 180 .sin_addr.s_addr = htonl(INADDR_ANY), 181 .sin_port = htons(port), 182 }; 183 #if IS_ENABLED(CONFIG_IPV6) 184 struct sockaddr_in6 sin6 = { 185 .sin6_family = AF_INET6, 186 .sin6_addr = IN6ADDR_ANY_INIT, 187 .sin6_port = htons(port), 188 }; 189 #endif 190 struct sockaddr *sap; 191 size_t len; 192 193 switch (family) { 194 case PF_INET: 195 sap = (struct sockaddr *)&sin; 196 len = sizeof(sin); 197 break; 198 #if IS_ENABLED(CONFIG_IPV6) 199 case PF_INET6: 200 sap = (struct sockaddr *)&sin6; 201 len = sizeof(sin6); 202 break; 203 #endif 204 default: 205 return ERR_PTR(-EAFNOSUPPORT); 206 } 207 208 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 209 } 210 211 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 212 struct net *net, const int family, 213 const unsigned short port, int flags) 214 { 215 struct svc_xprt_class *xcl; 216 217 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 218 spin_lock(&svc_xprt_class_lock); 219 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 220 struct svc_xprt *newxprt; 221 unsigned short newport; 222 223 if (strcmp(xprt_name, xcl->xcl_name)) 224 continue; 225 226 if (!try_module_get(xcl->xcl_owner)) 227 goto err; 228 229 spin_unlock(&svc_xprt_class_lock); 230 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 231 if (IS_ERR(newxprt)) { 232 module_put(xcl->xcl_owner); 233 return PTR_ERR(newxprt); 234 } 235 236 clear_bit(XPT_TEMP, &newxprt->xpt_flags); 237 spin_lock_bh(&serv->sv_lock); 238 list_add(&newxprt->xpt_list, &serv->sv_permsocks); 239 spin_unlock_bh(&serv->sv_lock); 240 newport = svc_xprt_local_port(newxprt); 241 clear_bit(XPT_BUSY, &newxprt->xpt_flags); 242 return newport; 243 } 244 err: 245 spin_unlock(&svc_xprt_class_lock); 246 dprintk("svc: transport %s not found\n", xprt_name); 247 248 /* This errno is exposed to user space. Provide a reasonable 249 * perror msg for a bad transport. */ 250 return -EPROTONOSUPPORT; 251 } 252 EXPORT_SYMBOL_GPL(svc_create_xprt); 253 254 /* 255 * Copy the local and remote xprt addresses to the rqstp structure 256 */ 257 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 258 { 259 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 260 rqstp->rq_addrlen = xprt->xpt_remotelen; 261 262 /* 263 * Destination address in request is needed for binding the 264 * source address in RPC replies/callbacks later. 265 */ 266 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); 267 rqstp->rq_daddrlen = xprt->xpt_locallen; 268 } 269 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 270 271 /** 272 * svc_print_addr - Format rq_addr field for printing 273 * @rqstp: svc_rqst struct containing address to print 274 * @buf: target buffer for formatted address 275 * @len: length of target buffer 276 * 277 */ 278 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 279 { 280 return __svc_print_addr(svc_addr(rqstp), buf, len); 281 } 282 EXPORT_SYMBOL_GPL(svc_print_addr); 283 284 /* 285 * Queue up an idle server thread. Must have pool->sp_lock held. 286 * Note: this is really a stack rather than a queue, so that we only 287 * use as many different threads as we need, and the rest don't pollute 288 * the cache. 289 */ 290 static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 291 { 292 list_add(&rqstp->rq_list, &pool->sp_threads); 293 } 294 295 /* 296 * Dequeue an nfsd thread. Must have pool->sp_lock held. 297 */ 298 static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 299 { 300 list_del(&rqstp->rq_list); 301 } 302 303 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 304 { 305 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 306 return true; 307 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) 308 return xprt->xpt_ops->xpo_has_wspace(xprt); 309 return false; 310 } 311 312 /* 313 * Queue up a transport with data pending. If there are idle nfsd 314 * processes, wake 'em up. 315 * 316 */ 317 void svc_xprt_enqueue(struct svc_xprt *xprt) 318 { 319 struct svc_pool *pool; 320 struct svc_rqst *rqstp; 321 int cpu; 322 323 if (!svc_xprt_has_something_to_do(xprt)) 324 return; 325 326 cpu = get_cpu(); 327 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 328 put_cpu(); 329 330 spin_lock_bh(&pool->sp_lock); 331 332 if (!list_empty(&pool->sp_threads) && 333 !list_empty(&pool->sp_sockets)) 334 printk(KERN_ERR 335 "svc_xprt_enqueue: " 336 "threads and transports both waiting??\n"); 337 338 pool->sp_stats.packets++; 339 340 /* Mark transport as busy. It will remain in this state until 341 * the provider calls svc_xprt_received. We update XPT_BUSY 342 * atomically because it also guards against trying to enqueue 343 * the transport twice. 344 */ 345 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 346 /* Don't enqueue transport while already enqueued */ 347 dprintk("svc: transport %p busy, not enqueued\n", xprt); 348 goto out_unlock; 349 } 350 351 if (!list_empty(&pool->sp_threads)) { 352 rqstp = list_entry(pool->sp_threads.next, 353 struct svc_rqst, 354 rq_list); 355 dprintk("svc: transport %p served by daemon %p\n", 356 xprt, rqstp); 357 svc_thread_dequeue(pool, rqstp); 358 if (rqstp->rq_xprt) 359 printk(KERN_ERR 360 "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", 361 rqstp, rqstp->rq_xprt); 362 rqstp->rq_xprt = xprt; 363 svc_xprt_get(xprt); 364 pool->sp_stats.threads_woken++; 365 wake_up(&rqstp->rq_wait); 366 } else { 367 dprintk("svc: transport %p put into queue\n", xprt); 368 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 369 pool->sp_stats.sockets_queued++; 370 } 371 372 out_unlock: 373 spin_unlock_bh(&pool->sp_lock); 374 } 375 EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 376 377 /* 378 * Dequeue the first transport. Must be called with the pool->sp_lock held. 379 */ 380 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 381 { 382 struct svc_xprt *xprt; 383 384 if (list_empty(&pool->sp_sockets)) 385 return NULL; 386 387 xprt = list_entry(pool->sp_sockets.next, 388 struct svc_xprt, xpt_ready); 389 list_del_init(&xprt->xpt_ready); 390 391 dprintk("svc: transport %p dequeued, inuse=%d\n", 392 xprt, atomic_read(&xprt->xpt_ref.refcount)); 393 394 return xprt; 395 } 396 397 /* 398 * svc_xprt_received conditionally queues the transport for processing 399 * by another thread. The caller must hold the XPT_BUSY bit and must 400 * not thereafter touch transport data. 401 * 402 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 403 * insufficient) data. 404 */ 405 void svc_xprt_received(struct svc_xprt *xprt) 406 { 407 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 408 /* As soon as we clear busy, the xprt could be closed and 409 * 'put', so we need a reference to call svc_xprt_enqueue with: 410 */ 411 svc_xprt_get(xprt); 412 clear_bit(XPT_BUSY, &xprt->xpt_flags); 413 svc_xprt_enqueue(xprt); 414 svc_xprt_put(xprt); 415 } 416 EXPORT_SYMBOL_GPL(svc_xprt_received); 417 418 /** 419 * svc_reserve - change the space reserved for the reply to a request. 420 * @rqstp: The request in question 421 * @space: new max space to reserve 422 * 423 * Each request reserves some space on the output queue of the transport 424 * to make sure the reply fits. This function reduces that reserved 425 * space to be the amount of space used already, plus @space. 426 * 427 */ 428 void svc_reserve(struct svc_rqst *rqstp, int space) 429 { 430 space += rqstp->rq_res.head[0].iov_len; 431 432 if (space < rqstp->rq_reserved) { 433 struct svc_xprt *xprt = rqstp->rq_xprt; 434 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 435 rqstp->rq_reserved = space; 436 437 svc_xprt_enqueue(xprt); 438 } 439 } 440 EXPORT_SYMBOL_GPL(svc_reserve); 441 442 static void svc_xprt_release(struct svc_rqst *rqstp) 443 { 444 struct svc_xprt *xprt = rqstp->rq_xprt; 445 446 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 447 448 kfree(rqstp->rq_deferred); 449 rqstp->rq_deferred = NULL; 450 451 svc_free_res_pages(rqstp); 452 rqstp->rq_res.page_len = 0; 453 rqstp->rq_res.page_base = 0; 454 455 /* Reset response buffer and release 456 * the reservation. 457 * But first, check that enough space was reserved 458 * for the reply, otherwise we have a bug! 459 */ 460 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 461 printk(KERN_ERR "RPC request reserved %d but used %d\n", 462 rqstp->rq_reserved, 463 rqstp->rq_res.len); 464 465 rqstp->rq_res.head[0].iov_len = 0; 466 svc_reserve(rqstp, 0); 467 rqstp->rq_xprt = NULL; 468 469 svc_xprt_put(xprt); 470 } 471 472 /* 473 * External function to wake up a server waiting for data 474 * This really only makes sense for services like lockd 475 * which have exactly one thread anyway. 476 */ 477 void svc_wake_up(struct svc_serv *serv) 478 { 479 struct svc_rqst *rqstp; 480 unsigned int i; 481 struct svc_pool *pool; 482 483 for (i = 0; i < serv->sv_nrpools; i++) { 484 pool = &serv->sv_pools[i]; 485 486 spin_lock_bh(&pool->sp_lock); 487 if (!list_empty(&pool->sp_threads)) { 488 rqstp = list_entry(pool->sp_threads.next, 489 struct svc_rqst, 490 rq_list); 491 dprintk("svc: daemon %p woken up.\n", rqstp); 492 /* 493 svc_thread_dequeue(pool, rqstp); 494 rqstp->rq_xprt = NULL; 495 */ 496 wake_up(&rqstp->rq_wait); 497 } 498 spin_unlock_bh(&pool->sp_lock); 499 } 500 } 501 EXPORT_SYMBOL_GPL(svc_wake_up); 502 503 int svc_port_is_privileged(struct sockaddr *sin) 504 { 505 switch (sin->sa_family) { 506 case AF_INET: 507 return ntohs(((struct sockaddr_in *)sin)->sin_port) 508 < PROT_SOCK; 509 case AF_INET6: 510 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 511 < PROT_SOCK; 512 default: 513 return 0; 514 } 515 } 516 517 /* 518 * Make sure that we don't have too many active connections. If we have, 519 * something must be dropped. It's not clear what will happen if we allow 520 * "too many" connections, but when dealing with network-facing software, 521 * we have to code defensively. Here we do that by imposing hard limits. 522 * 523 * There's no point in trying to do random drop here for DoS 524 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 525 * attacker can easily beat that. 526 * 527 * The only somewhat efficient mechanism would be if drop old 528 * connections from the same IP first. But right now we don't even 529 * record the client IP in svc_sock. 530 * 531 * single-threaded services that expect a lot of clients will probably 532 * need to set sv_maxconn to override the default value which is based 533 * on the number of threads 534 */ 535 static void svc_check_conn_limits(struct svc_serv *serv) 536 { 537 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 538 (serv->sv_nrthreads+3) * 20; 539 540 if (serv->sv_tmpcnt > limit) { 541 struct svc_xprt *xprt = NULL; 542 spin_lock_bh(&serv->sv_lock); 543 if (!list_empty(&serv->sv_tempsocks)) { 544 /* Try to help the admin */ 545 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", 546 serv->sv_name, serv->sv_maxconn ? 547 "max number of connections" : 548 "number of threads"); 549 /* 550 * Always select the oldest connection. It's not fair, 551 * but so is life 552 */ 553 xprt = list_entry(serv->sv_tempsocks.prev, 554 struct svc_xprt, 555 xpt_list); 556 set_bit(XPT_CLOSE, &xprt->xpt_flags); 557 svc_xprt_get(xprt); 558 } 559 spin_unlock_bh(&serv->sv_lock); 560 561 if (xprt) { 562 svc_xprt_enqueue(xprt); 563 svc_xprt_put(xprt); 564 } 565 } 566 } 567 568 /* 569 * Receive the next request on any transport. This code is carefully 570 * organised not to touch any cachelines in the shared svc_serv 571 * structure, only cachelines in the local svc_pool. 572 */ 573 int svc_recv(struct svc_rqst *rqstp, long timeout) 574 { 575 struct svc_xprt *xprt = NULL; 576 struct svc_serv *serv = rqstp->rq_server; 577 struct svc_pool *pool = rqstp->rq_pool; 578 int len, i; 579 int pages; 580 struct xdr_buf *arg; 581 DECLARE_WAITQUEUE(wait, current); 582 long time_left; 583 584 dprintk("svc: server %p waiting for data (to = %ld)\n", 585 rqstp, timeout); 586 587 if (rqstp->rq_xprt) 588 printk(KERN_ERR 589 "svc_recv: service %p, transport not NULL!\n", 590 rqstp); 591 if (waitqueue_active(&rqstp->rq_wait)) 592 printk(KERN_ERR 593 "svc_recv: service %p, wait queue active!\n", 594 rqstp); 595 596 /* now allocate needed pages. If we get a failure, sleep briefly */ 597 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 598 BUG_ON(pages >= RPCSVC_MAXPAGES); 599 for (i = 0; i < pages ; i++) 600 while (rqstp->rq_pages[i] == NULL) { 601 struct page *p = alloc_page(GFP_KERNEL); 602 if (!p) { 603 set_current_state(TASK_INTERRUPTIBLE); 604 if (signalled() || kthread_should_stop()) { 605 set_current_state(TASK_RUNNING); 606 return -EINTR; 607 } 608 schedule_timeout(msecs_to_jiffies(500)); 609 } 610 rqstp->rq_pages[i] = p; 611 } 612 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 613 614 /* Make arg->head point to first page and arg->pages point to rest */ 615 arg = &rqstp->rq_arg; 616 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 617 arg->head[0].iov_len = PAGE_SIZE; 618 arg->pages = rqstp->rq_pages + 1; 619 arg->page_base = 0; 620 /* save at least one page for response */ 621 arg->page_len = (pages-2)*PAGE_SIZE; 622 arg->len = (pages-1)*PAGE_SIZE; 623 arg->tail[0].iov_len = 0; 624 625 try_to_freeze(); 626 cond_resched(); 627 if (signalled() || kthread_should_stop()) 628 return -EINTR; 629 630 /* Normally we will wait up to 5 seconds for any required 631 * cache information to be provided. 632 */ 633 rqstp->rq_chandle.thread_wait = 5*HZ; 634 635 spin_lock_bh(&pool->sp_lock); 636 xprt = svc_xprt_dequeue(pool); 637 if (xprt) { 638 rqstp->rq_xprt = xprt; 639 svc_xprt_get(xprt); 640 641 /* As there is a shortage of threads and this request 642 * had to be queued, don't allow the thread to wait so 643 * long for cache updates. 644 */ 645 rqstp->rq_chandle.thread_wait = 1*HZ; 646 } else { 647 /* No data pending. Go to sleep */ 648 svc_thread_enqueue(pool, rqstp); 649 650 /* 651 * We have to be able to interrupt this wait 652 * to bring down the daemons ... 653 */ 654 set_current_state(TASK_INTERRUPTIBLE); 655 656 /* 657 * checking kthread_should_stop() here allows us to avoid 658 * locking and signalling when stopping kthreads that call 659 * svc_recv. If the thread has already been woken up, then 660 * we can exit here without sleeping. If not, then it 661 * it'll be woken up quickly during the schedule_timeout 662 */ 663 if (kthread_should_stop()) { 664 set_current_state(TASK_RUNNING); 665 spin_unlock_bh(&pool->sp_lock); 666 return -EINTR; 667 } 668 669 add_wait_queue(&rqstp->rq_wait, &wait); 670 spin_unlock_bh(&pool->sp_lock); 671 672 time_left = schedule_timeout(timeout); 673 674 try_to_freeze(); 675 676 spin_lock_bh(&pool->sp_lock); 677 remove_wait_queue(&rqstp->rq_wait, &wait); 678 if (!time_left) 679 pool->sp_stats.threads_timedout++; 680 681 xprt = rqstp->rq_xprt; 682 if (!xprt) { 683 svc_thread_dequeue(pool, rqstp); 684 spin_unlock_bh(&pool->sp_lock); 685 dprintk("svc: server %p, no data yet\n", rqstp); 686 if (signalled() || kthread_should_stop()) 687 return -EINTR; 688 else 689 return -EAGAIN; 690 } 691 } 692 spin_unlock_bh(&pool->sp_lock); 693 694 len = 0; 695 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 696 dprintk("svc_recv: found XPT_CLOSE\n"); 697 svc_delete_xprt(xprt); 698 /* Leave XPT_BUSY set on the dead xprt: */ 699 goto out; 700 } 701 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 702 struct svc_xprt *newxpt; 703 newxpt = xprt->xpt_ops->xpo_accept(xprt); 704 if (newxpt) { 705 /* 706 * We know this module_get will succeed because the 707 * listener holds a reference too 708 */ 709 __module_get(newxpt->xpt_class->xcl_owner); 710 svc_check_conn_limits(xprt->xpt_server); 711 spin_lock_bh(&serv->sv_lock); 712 set_bit(XPT_TEMP, &newxpt->xpt_flags); 713 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 714 serv->sv_tmpcnt++; 715 if (serv->sv_temptimer.function == NULL) { 716 /* setup timer to age temp transports */ 717 setup_timer(&serv->sv_temptimer, 718 svc_age_temp_xprts, 719 (unsigned long)serv); 720 mod_timer(&serv->sv_temptimer, 721 jiffies + svc_conn_age_period * HZ); 722 } 723 spin_unlock_bh(&serv->sv_lock); 724 svc_xprt_received(newxpt); 725 } 726 } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { 727 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 728 rqstp, pool->sp_id, xprt, 729 atomic_read(&xprt->xpt_ref.refcount)); 730 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 731 if (rqstp->rq_deferred) 732 len = svc_deferred_recv(rqstp); 733 else 734 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 735 dprintk("svc: got len=%d\n", len); 736 rqstp->rq_reserved = serv->sv_max_mesg; 737 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 738 } 739 svc_xprt_received(xprt); 740 741 /* No data, incomplete (TCP) read, or accept() */ 742 if (len == 0 || len == -EAGAIN) 743 goto out; 744 745 clear_bit(XPT_OLD, &xprt->xpt_flags); 746 747 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 748 rqstp->rq_chandle.defer = svc_defer; 749 750 if (serv->sv_stats) 751 serv->sv_stats->netcnt++; 752 return len; 753 out: 754 rqstp->rq_res.len = 0; 755 svc_xprt_release(rqstp); 756 return -EAGAIN; 757 } 758 EXPORT_SYMBOL_GPL(svc_recv); 759 760 /* 761 * Drop request 762 */ 763 void svc_drop(struct svc_rqst *rqstp) 764 { 765 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 766 svc_xprt_release(rqstp); 767 } 768 EXPORT_SYMBOL_GPL(svc_drop); 769 770 /* 771 * Return reply to client. 772 */ 773 int svc_send(struct svc_rqst *rqstp) 774 { 775 struct svc_xprt *xprt; 776 int len; 777 struct xdr_buf *xb; 778 779 xprt = rqstp->rq_xprt; 780 if (!xprt) 781 return -EFAULT; 782 783 /* release the receive skb before sending the reply */ 784 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 785 786 /* calculate over-all length */ 787 xb = &rqstp->rq_res; 788 xb->len = xb->head[0].iov_len + 789 xb->page_len + 790 xb->tail[0].iov_len; 791 792 /* Grab mutex to serialize outgoing data. */ 793 mutex_lock(&xprt->xpt_mutex); 794 if (test_bit(XPT_DEAD, &xprt->xpt_flags) 795 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) 796 len = -ENOTCONN; 797 else 798 len = xprt->xpt_ops->xpo_sendto(rqstp); 799 mutex_unlock(&xprt->xpt_mutex); 800 rpc_wake_up(&xprt->xpt_bc_pending); 801 svc_xprt_release(rqstp); 802 803 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 804 return 0; 805 return len; 806 } 807 808 /* 809 * Timer function to close old temporary transports, using 810 * a mark-and-sweep algorithm. 811 */ 812 static void svc_age_temp_xprts(unsigned long closure) 813 { 814 struct svc_serv *serv = (struct svc_serv *)closure; 815 struct svc_xprt *xprt; 816 struct list_head *le, *next; 817 LIST_HEAD(to_be_aged); 818 819 dprintk("svc_age_temp_xprts\n"); 820 821 if (!spin_trylock_bh(&serv->sv_lock)) { 822 /* busy, try again 1 sec later */ 823 dprintk("svc_age_temp_xprts: busy\n"); 824 mod_timer(&serv->sv_temptimer, jiffies + HZ); 825 return; 826 } 827 828 list_for_each_safe(le, next, &serv->sv_tempsocks) { 829 xprt = list_entry(le, struct svc_xprt, xpt_list); 830 831 /* First time through, just mark it OLD. Second time 832 * through, close it. */ 833 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 834 continue; 835 if (atomic_read(&xprt->xpt_ref.refcount) > 1 || 836 test_bit(XPT_BUSY, &xprt->xpt_flags)) 837 continue; 838 svc_xprt_get(xprt); 839 list_move(le, &to_be_aged); 840 set_bit(XPT_CLOSE, &xprt->xpt_flags); 841 set_bit(XPT_DETACHED, &xprt->xpt_flags); 842 } 843 spin_unlock_bh(&serv->sv_lock); 844 845 while (!list_empty(&to_be_aged)) { 846 le = to_be_aged.next; 847 /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ 848 list_del_init(le); 849 xprt = list_entry(le, struct svc_xprt, xpt_list); 850 851 dprintk("queuing xprt %p for closing\n", xprt); 852 853 /* a thread will dequeue and close it soon */ 854 svc_xprt_enqueue(xprt); 855 svc_xprt_put(xprt); 856 } 857 858 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 859 } 860 861 static void call_xpt_users(struct svc_xprt *xprt) 862 { 863 struct svc_xpt_user *u; 864 865 spin_lock(&xprt->xpt_lock); 866 while (!list_empty(&xprt->xpt_users)) { 867 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 868 list_del(&u->list); 869 u->callback(u); 870 } 871 spin_unlock(&xprt->xpt_lock); 872 } 873 874 /* 875 * Remove a dead transport 876 */ 877 static void svc_delete_xprt(struct svc_xprt *xprt) 878 { 879 struct svc_serv *serv = xprt->xpt_server; 880 struct svc_deferred_req *dr; 881 882 /* Only do this once */ 883 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 884 BUG(); 885 886 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 887 xprt->xpt_ops->xpo_detach(xprt); 888 889 spin_lock_bh(&serv->sv_lock); 890 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 891 list_del_init(&xprt->xpt_list); 892 BUG_ON(!list_empty(&xprt->xpt_ready)); 893 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 894 serv->sv_tmpcnt--; 895 spin_unlock_bh(&serv->sv_lock); 896 897 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 898 kfree(dr); 899 900 call_xpt_users(xprt); 901 svc_xprt_put(xprt); 902 } 903 904 void svc_close_xprt(struct svc_xprt *xprt) 905 { 906 set_bit(XPT_CLOSE, &xprt->xpt_flags); 907 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 908 /* someone else will have to effect the close */ 909 return; 910 /* 911 * We expect svc_close_xprt() to work even when no threads are 912 * running (e.g., while configuring the server before starting 913 * any threads), so if the transport isn't busy, we delete 914 * it ourself: 915 */ 916 svc_delete_xprt(xprt); 917 } 918 EXPORT_SYMBOL_GPL(svc_close_xprt); 919 920 static void svc_close_list(struct list_head *xprt_list, struct net *net) 921 { 922 struct svc_xprt *xprt; 923 924 list_for_each_entry(xprt, xprt_list, xpt_list) { 925 if (xprt->xpt_net != net) 926 continue; 927 set_bit(XPT_CLOSE, &xprt->xpt_flags); 928 set_bit(XPT_BUSY, &xprt->xpt_flags); 929 } 930 } 931 932 static void svc_clear_pools(struct svc_serv *serv, struct net *net) 933 { 934 struct svc_pool *pool; 935 struct svc_xprt *xprt; 936 struct svc_xprt *tmp; 937 int i; 938 939 for (i = 0; i < serv->sv_nrpools; i++) { 940 pool = &serv->sv_pools[i]; 941 942 spin_lock_bh(&pool->sp_lock); 943 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { 944 if (xprt->xpt_net != net) 945 continue; 946 list_del_init(&xprt->xpt_ready); 947 } 948 spin_unlock_bh(&pool->sp_lock); 949 } 950 } 951 952 static void svc_clear_list(struct list_head *xprt_list, struct net *net) 953 { 954 struct svc_xprt *xprt; 955 struct svc_xprt *tmp; 956 957 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 958 if (xprt->xpt_net != net) 959 continue; 960 svc_delete_xprt(xprt); 961 } 962 list_for_each_entry(xprt, xprt_list, xpt_list) 963 BUG_ON(xprt->xpt_net == net); 964 } 965 966 void svc_close_net(struct svc_serv *serv, struct net *net) 967 { 968 svc_close_list(&serv->sv_tempsocks, net); 969 svc_close_list(&serv->sv_permsocks, net); 970 971 svc_clear_pools(serv, net); 972 /* 973 * At this point the sp_sockets lists will stay empty, since 974 * svc_xprt_enqueue will not add new entries without taking the 975 * sp_lock and checking XPT_BUSY. 976 */ 977 svc_clear_list(&serv->sv_tempsocks, net); 978 svc_clear_list(&serv->sv_permsocks, net); 979 } 980 981 /* 982 * Handle defer and revisit of requests 983 */ 984 985 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 986 { 987 struct svc_deferred_req *dr = 988 container_of(dreq, struct svc_deferred_req, handle); 989 struct svc_xprt *xprt = dr->xprt; 990 991 spin_lock(&xprt->xpt_lock); 992 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 993 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 994 spin_unlock(&xprt->xpt_lock); 995 dprintk("revisit canceled\n"); 996 svc_xprt_put(xprt); 997 kfree(dr); 998 return; 999 } 1000 dprintk("revisit queued\n"); 1001 dr->xprt = NULL; 1002 list_add(&dr->handle.recent, &xprt->xpt_deferred); 1003 spin_unlock(&xprt->xpt_lock); 1004 svc_xprt_enqueue(xprt); 1005 svc_xprt_put(xprt); 1006 } 1007 1008 /* 1009 * Save the request off for later processing. The request buffer looks 1010 * like this: 1011 * 1012 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 1013 * 1014 * This code can only handle requests that consist of an xprt-header 1015 * and rpc-header. 1016 */ 1017 static struct cache_deferred_req *svc_defer(struct cache_req *req) 1018 { 1019 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1020 struct svc_deferred_req *dr; 1021 1022 if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral) 1023 return NULL; /* if more than a page, give up FIXME */ 1024 if (rqstp->rq_deferred) { 1025 dr = rqstp->rq_deferred; 1026 rqstp->rq_deferred = NULL; 1027 } else { 1028 size_t skip; 1029 size_t size; 1030 /* FIXME maybe discard if size too large */ 1031 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 1032 dr = kmalloc(size, GFP_KERNEL); 1033 if (dr == NULL) 1034 return NULL; 1035 1036 dr->handle.owner = rqstp->rq_server; 1037 dr->prot = rqstp->rq_prot; 1038 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1039 dr->addrlen = rqstp->rq_addrlen; 1040 dr->daddr = rqstp->rq_daddr; 1041 dr->argslen = rqstp->rq_arg.len >> 2; 1042 dr->xprt_hlen = rqstp->rq_xprt_hlen; 1043 1044 /* back up head to the start of the buffer and copy */ 1045 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1046 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1047 dr->argslen << 2); 1048 } 1049 svc_xprt_get(rqstp->rq_xprt); 1050 dr->xprt = rqstp->rq_xprt; 1051 rqstp->rq_dropme = true; 1052 1053 dr->handle.revisit = svc_revisit; 1054 return &dr->handle; 1055 } 1056 1057 /* 1058 * recv data from a deferred request into an active one 1059 */ 1060 static int svc_deferred_recv(struct svc_rqst *rqstp) 1061 { 1062 struct svc_deferred_req *dr = rqstp->rq_deferred; 1063 1064 /* setup iov_base past transport header */ 1065 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1066 /* The iov_len does not include the transport header bytes */ 1067 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1068 rqstp->rq_arg.page_len = 0; 1069 /* The rq_arg.len includes the transport header bytes */ 1070 rqstp->rq_arg.len = dr->argslen<<2; 1071 rqstp->rq_prot = dr->prot; 1072 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1073 rqstp->rq_addrlen = dr->addrlen; 1074 /* Save off transport header len in case we get deferred again */ 1075 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1076 rqstp->rq_daddr = dr->daddr; 1077 rqstp->rq_respages = rqstp->rq_pages; 1078 return (dr->argslen<<2) - dr->xprt_hlen; 1079 } 1080 1081 1082 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1083 { 1084 struct svc_deferred_req *dr = NULL; 1085 1086 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1087 return NULL; 1088 spin_lock(&xprt->xpt_lock); 1089 if (!list_empty(&xprt->xpt_deferred)) { 1090 dr = list_entry(xprt->xpt_deferred.next, 1091 struct svc_deferred_req, 1092 handle.recent); 1093 list_del_init(&dr->handle.recent); 1094 } else 1095 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1096 spin_unlock(&xprt->xpt_lock); 1097 return dr; 1098 } 1099 1100 /** 1101 * svc_find_xprt - find an RPC transport instance 1102 * @serv: pointer to svc_serv to search 1103 * @xcl_name: C string containing transport's class name 1104 * @net: owner net pointer 1105 * @af: Address family of transport's local address 1106 * @port: transport's IP port number 1107 * 1108 * Return the transport instance pointer for the endpoint accepting 1109 * connections/peer traffic from the specified transport class, 1110 * address family and port. 1111 * 1112 * Specifying 0 for the address family or port is effectively a 1113 * wild-card, and will result in matching the first transport in the 1114 * service's list that has a matching class name. 1115 */ 1116 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1117 struct net *net, const sa_family_t af, 1118 const unsigned short port) 1119 { 1120 struct svc_xprt *xprt; 1121 struct svc_xprt *found = NULL; 1122 1123 /* Sanity check the args */ 1124 if (serv == NULL || xcl_name == NULL) 1125 return found; 1126 1127 spin_lock_bh(&serv->sv_lock); 1128 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1129 if (xprt->xpt_net != net) 1130 continue; 1131 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1132 continue; 1133 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1134 continue; 1135 if (port != 0 && port != svc_xprt_local_port(xprt)) 1136 continue; 1137 found = xprt; 1138 svc_xprt_get(xprt); 1139 break; 1140 } 1141 spin_unlock_bh(&serv->sv_lock); 1142 return found; 1143 } 1144 EXPORT_SYMBOL_GPL(svc_find_xprt); 1145 1146 static int svc_one_xprt_name(const struct svc_xprt *xprt, 1147 char *pos, int remaining) 1148 { 1149 int len; 1150 1151 len = snprintf(pos, remaining, "%s %u\n", 1152 xprt->xpt_class->xcl_name, 1153 svc_xprt_local_port(xprt)); 1154 if (len >= remaining) 1155 return -ENAMETOOLONG; 1156 return len; 1157 } 1158 1159 /** 1160 * svc_xprt_names - format a buffer with a list of transport names 1161 * @serv: pointer to an RPC service 1162 * @buf: pointer to a buffer to be filled in 1163 * @buflen: length of buffer to be filled in 1164 * 1165 * Fills in @buf with a string containing a list of transport names, 1166 * each name terminated with '\n'. 1167 * 1168 * Returns positive length of the filled-in string on success; otherwise 1169 * a negative errno value is returned if an error occurs. 1170 */ 1171 int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 1172 { 1173 struct svc_xprt *xprt; 1174 int len, totlen; 1175 char *pos; 1176 1177 /* Sanity check args */ 1178 if (!serv) 1179 return 0; 1180 1181 spin_lock_bh(&serv->sv_lock); 1182 1183 pos = buf; 1184 totlen = 0; 1185 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1186 len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1187 if (len < 0) { 1188 *buf = '\0'; 1189 totlen = len; 1190 } 1191 if (len <= 0) 1192 break; 1193 1194 pos += len; 1195 totlen += len; 1196 } 1197 1198 spin_unlock_bh(&serv->sv_lock); 1199 return totlen; 1200 } 1201 EXPORT_SYMBOL_GPL(svc_xprt_names); 1202 1203 1204 /*----------------------------------------------------------------------------*/ 1205 1206 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1207 { 1208 unsigned int pidx = (unsigned int)*pos; 1209 struct svc_serv *serv = m->private; 1210 1211 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1212 1213 if (!pidx) 1214 return SEQ_START_TOKEN; 1215 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1216 } 1217 1218 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1219 { 1220 struct svc_pool *pool = p; 1221 struct svc_serv *serv = m->private; 1222 1223 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1224 1225 if (p == SEQ_START_TOKEN) { 1226 pool = &serv->sv_pools[0]; 1227 } else { 1228 unsigned int pidx = (pool - &serv->sv_pools[0]); 1229 if (pidx < serv->sv_nrpools-1) 1230 pool = &serv->sv_pools[pidx+1]; 1231 else 1232 pool = NULL; 1233 } 1234 ++*pos; 1235 return pool; 1236 } 1237 1238 static void svc_pool_stats_stop(struct seq_file *m, void *p) 1239 { 1240 } 1241 1242 static int svc_pool_stats_show(struct seq_file *m, void *p) 1243 { 1244 struct svc_pool *pool = p; 1245 1246 if (p == SEQ_START_TOKEN) { 1247 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 1248 return 0; 1249 } 1250 1251 seq_printf(m, "%u %lu %lu %lu %lu\n", 1252 pool->sp_id, 1253 pool->sp_stats.packets, 1254 pool->sp_stats.sockets_queued, 1255 pool->sp_stats.threads_woken, 1256 pool->sp_stats.threads_timedout); 1257 1258 return 0; 1259 } 1260 1261 static const struct seq_operations svc_pool_stats_seq_ops = { 1262 .start = svc_pool_stats_start, 1263 .next = svc_pool_stats_next, 1264 .stop = svc_pool_stats_stop, 1265 .show = svc_pool_stats_show, 1266 }; 1267 1268 int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1269 { 1270 int err; 1271 1272 err = seq_open(file, &svc_pool_stats_seq_ops); 1273 if (!err) 1274 ((struct seq_file *) file->private_data)->private = serv; 1275 return err; 1276 } 1277 EXPORT_SYMBOL(svc_pool_stats_open); 1278 1279 /*----------------------------------------------------------------------------*/ 1280