1 /* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/slab.h> 12 #include <net/sock.h> 13 #include <linux/sunrpc/stats.h> 14 #include <linux/sunrpc/svc_xprt.h> 15 #include <linux/sunrpc/svcsock.h> 16 #include <linux/sunrpc/xprt.h> 17 #include <linux/module.h> 18 #include <trace/events/sunrpc.h> 19 20 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 21 22 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 23 static int svc_deferred_recv(struct svc_rqst *rqstp); 24 static struct cache_deferred_req *svc_defer(struct cache_req *req); 25 static void svc_age_temp_xprts(unsigned long closure); 26 static void svc_delete_xprt(struct svc_xprt *xprt); 27 28 /* apparently the "standard" is that clients close 29 * idle connections after 5 minutes, servers after 30 * 6 minutes 31 * http://www.connectathon.org/talks96/nfstcp.pdf 32 */ 33 static int svc_conn_age_period = 6*60; 34 35 /* List of registered transport classes */ 36 static DEFINE_SPINLOCK(svc_xprt_class_lock); 37 static LIST_HEAD(svc_xprt_class_list); 38 39 /* SMP locking strategy: 40 * 41 * svc_pool->sp_lock protects most of the fields of that pool. 42 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 43 * when both need to be taken (rare), svc_serv->sv_lock is first. 44 * The "service mutex" protects svc_serv->sv_nrthread. 45 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 46 * and the ->sk_info_authunix cache. 47 * 48 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 49 * enqueued multiply. During normal transport processing this bit 50 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 51 * Providers should not manipulate this bit directly. 52 * 53 * Some flags can be set to certain values at any time 54 * providing that certain rules are followed: 55 * 56 * XPT_CONN, XPT_DATA: 57 * - Can be set or cleared at any time. 58 * - After a set, svc_xprt_enqueue must be called to enqueue 59 * the transport for processing. 60 * - After a clear, the transport must be read/accepted. 61 * If this succeeds, it must be set again. 62 * XPT_CLOSE: 63 * - Can set at any time. It is never cleared. 64 * XPT_DEAD: 65 * - Can only be set while XPT_BUSY is held which ensures 66 * that no other thread will be using the transport or will 67 * try to set XPT_DEAD. 68 */ 69 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 70 { 71 struct svc_xprt_class *cl; 72 int res = -EEXIST; 73 74 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 75 76 INIT_LIST_HEAD(&xcl->xcl_list); 77 spin_lock(&svc_xprt_class_lock); 78 /* Make sure there isn't already a class with the same name */ 79 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 80 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 81 goto out; 82 } 83 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 84 res = 0; 85 out: 86 spin_unlock(&svc_xprt_class_lock); 87 return res; 88 } 89 EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 90 91 void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 92 { 93 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 94 spin_lock(&svc_xprt_class_lock); 95 list_del_init(&xcl->xcl_list); 96 spin_unlock(&svc_xprt_class_lock); 97 } 98 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 99 100 /* 101 * Format the transport list for printing 102 */ 103 int svc_print_xprts(char *buf, int maxlen) 104 { 105 struct svc_xprt_class *xcl; 106 char tmpstr[80]; 107 int len = 0; 108 buf[0] = '\0'; 109 110 spin_lock(&svc_xprt_class_lock); 111 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 112 int slen; 113 114 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 115 slen = strlen(tmpstr); 116 if (len + slen > maxlen) 117 break; 118 len += slen; 119 strcat(buf, tmpstr); 120 } 121 spin_unlock(&svc_xprt_class_lock); 122 123 return len; 124 } 125 126 static void svc_xprt_free(struct kref *kref) 127 { 128 struct svc_xprt *xprt = 129 container_of(kref, struct svc_xprt, xpt_ref); 130 struct module *owner = xprt->xpt_class->xcl_owner; 131 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 132 svcauth_unix_info_release(xprt); 133 put_net(xprt->xpt_net); 134 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 135 if (xprt->xpt_bc_xprt) 136 xprt_put(xprt->xpt_bc_xprt); 137 xprt->xpt_ops->xpo_free(xprt); 138 module_put(owner); 139 } 140 141 void svc_xprt_put(struct svc_xprt *xprt) 142 { 143 kref_put(&xprt->xpt_ref, svc_xprt_free); 144 } 145 EXPORT_SYMBOL_GPL(svc_xprt_put); 146 147 /* 148 * Called by transport drivers to initialize the transport independent 149 * portion of the transport instance. 150 */ 151 void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, 152 struct svc_xprt *xprt, struct svc_serv *serv) 153 { 154 memset(xprt, 0, sizeof(*xprt)); 155 xprt->xpt_class = xcl; 156 xprt->xpt_ops = xcl->xcl_ops; 157 kref_init(&xprt->xpt_ref); 158 xprt->xpt_server = serv; 159 INIT_LIST_HEAD(&xprt->xpt_list); 160 INIT_LIST_HEAD(&xprt->xpt_ready); 161 INIT_LIST_HEAD(&xprt->xpt_deferred); 162 INIT_LIST_HEAD(&xprt->xpt_users); 163 mutex_init(&xprt->xpt_mutex); 164 spin_lock_init(&xprt->xpt_lock); 165 set_bit(XPT_BUSY, &xprt->xpt_flags); 166 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 167 xprt->xpt_net = get_net(net); 168 } 169 EXPORT_SYMBOL_GPL(svc_xprt_init); 170 171 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 172 struct svc_serv *serv, 173 struct net *net, 174 const int family, 175 const unsigned short port, 176 int flags) 177 { 178 struct sockaddr_in sin = { 179 .sin_family = AF_INET, 180 .sin_addr.s_addr = htonl(INADDR_ANY), 181 .sin_port = htons(port), 182 }; 183 #if IS_ENABLED(CONFIG_IPV6) 184 struct sockaddr_in6 sin6 = { 185 .sin6_family = AF_INET6, 186 .sin6_addr = IN6ADDR_ANY_INIT, 187 .sin6_port = htons(port), 188 }; 189 #endif 190 struct sockaddr *sap; 191 size_t len; 192 193 switch (family) { 194 case PF_INET: 195 sap = (struct sockaddr *)&sin; 196 len = sizeof(sin); 197 break; 198 #if IS_ENABLED(CONFIG_IPV6) 199 case PF_INET6: 200 sap = (struct sockaddr *)&sin6; 201 len = sizeof(sin6); 202 break; 203 #endif 204 default: 205 return ERR_PTR(-EAFNOSUPPORT); 206 } 207 208 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 209 } 210 211 /* 212 * svc_xprt_received conditionally queues the transport for processing 213 * by another thread. The caller must hold the XPT_BUSY bit and must 214 * not thereafter touch transport data. 215 * 216 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 217 * insufficient) data. 218 */ 219 static void svc_xprt_received(struct svc_xprt *xprt) 220 { 221 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { 222 WARN_ONCE(1, "xprt=0x%p already busy!", xprt); 223 return; 224 } 225 226 /* As soon as we clear busy, the xprt could be closed and 227 * 'put', so we need a reference to call svc_enqueue_xprt with: 228 */ 229 svc_xprt_get(xprt); 230 smp_mb__before_atomic(); 231 clear_bit(XPT_BUSY, &xprt->xpt_flags); 232 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 233 svc_xprt_put(xprt); 234 } 235 236 void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) 237 { 238 clear_bit(XPT_TEMP, &new->xpt_flags); 239 spin_lock_bh(&serv->sv_lock); 240 list_add(&new->xpt_list, &serv->sv_permsocks); 241 spin_unlock_bh(&serv->sv_lock); 242 svc_xprt_received(new); 243 } 244 245 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 246 struct net *net, const int family, 247 const unsigned short port, int flags) 248 { 249 struct svc_xprt_class *xcl; 250 251 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 252 spin_lock(&svc_xprt_class_lock); 253 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 254 struct svc_xprt *newxprt; 255 unsigned short newport; 256 257 if (strcmp(xprt_name, xcl->xcl_name)) 258 continue; 259 260 if (!try_module_get(xcl->xcl_owner)) 261 goto err; 262 263 spin_unlock(&svc_xprt_class_lock); 264 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 265 if (IS_ERR(newxprt)) { 266 module_put(xcl->xcl_owner); 267 return PTR_ERR(newxprt); 268 } 269 svc_add_new_perm_xprt(serv, newxprt); 270 newport = svc_xprt_local_port(newxprt); 271 return newport; 272 } 273 err: 274 spin_unlock(&svc_xprt_class_lock); 275 dprintk("svc: transport %s not found\n", xprt_name); 276 277 /* This errno is exposed to user space. Provide a reasonable 278 * perror msg for a bad transport. */ 279 return -EPROTONOSUPPORT; 280 } 281 EXPORT_SYMBOL_GPL(svc_create_xprt); 282 283 /* 284 * Copy the local and remote xprt addresses to the rqstp structure 285 */ 286 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 287 { 288 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 289 rqstp->rq_addrlen = xprt->xpt_remotelen; 290 291 /* 292 * Destination address in request is needed for binding the 293 * source address in RPC replies/callbacks later. 294 */ 295 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); 296 rqstp->rq_daddrlen = xprt->xpt_locallen; 297 } 298 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 299 300 /** 301 * svc_print_addr - Format rq_addr field for printing 302 * @rqstp: svc_rqst struct containing address to print 303 * @buf: target buffer for formatted address 304 * @len: length of target buffer 305 * 306 */ 307 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 308 { 309 return __svc_print_addr(svc_addr(rqstp), buf, len); 310 } 311 EXPORT_SYMBOL_GPL(svc_print_addr); 312 313 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 314 { 315 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 316 return true; 317 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) 318 return xprt->xpt_ops->xpo_has_wspace(xprt); 319 return false; 320 } 321 322 void svc_xprt_do_enqueue(struct svc_xprt *xprt) 323 { 324 struct svc_pool *pool; 325 struct svc_rqst *rqstp = NULL; 326 int cpu; 327 bool queued = false; 328 329 if (!svc_xprt_has_something_to_do(xprt)) 330 goto out; 331 332 /* Mark transport as busy. It will remain in this state until 333 * the provider calls svc_xprt_received. We update XPT_BUSY 334 * atomically because it also guards against trying to enqueue 335 * the transport twice. 336 */ 337 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 338 /* Don't enqueue transport while already enqueued */ 339 dprintk("svc: transport %p busy, not enqueued\n", xprt); 340 goto out; 341 } 342 343 cpu = get_cpu(); 344 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 345 346 atomic_long_inc(&pool->sp_stats.packets); 347 348 redo_search: 349 /* find a thread for this xprt */ 350 rcu_read_lock(); 351 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 352 /* Do a lockless check first */ 353 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 354 continue; 355 356 /* 357 * Once the xprt has been queued, it can only be dequeued by 358 * the task that intends to service it. All we can do at that 359 * point is to try to wake this thread back up so that it can 360 * do so. 361 */ 362 if (!queued) { 363 spin_lock_bh(&rqstp->rq_lock); 364 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) { 365 /* already busy, move on... */ 366 spin_unlock_bh(&rqstp->rq_lock); 367 continue; 368 } 369 370 /* this one will do */ 371 rqstp->rq_xprt = xprt; 372 svc_xprt_get(xprt); 373 spin_unlock_bh(&rqstp->rq_lock); 374 } 375 rcu_read_unlock(); 376 377 atomic_long_inc(&pool->sp_stats.threads_woken); 378 wake_up_process(rqstp->rq_task); 379 put_cpu(); 380 goto out; 381 } 382 rcu_read_unlock(); 383 384 /* 385 * We didn't find an idle thread to use, so we need to queue the xprt. 386 * Do so and then search again. If we find one, we can't hook this one 387 * up to it directly but we can wake the thread up in the hopes that it 388 * will pick it up once it searches for a xprt to service. 389 */ 390 if (!queued) { 391 queued = true; 392 dprintk("svc: transport %p put into queue\n", xprt); 393 spin_lock_bh(&pool->sp_lock); 394 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 395 pool->sp_stats.sockets_queued++; 396 spin_unlock_bh(&pool->sp_lock); 397 goto redo_search; 398 } 399 rqstp = NULL; 400 put_cpu(); 401 out: 402 trace_svc_xprt_do_enqueue(xprt, rqstp); 403 } 404 EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); 405 406 /* 407 * Queue up a transport with data pending. If there are idle nfsd 408 * processes, wake 'em up. 409 * 410 */ 411 void svc_xprt_enqueue(struct svc_xprt *xprt) 412 { 413 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) 414 return; 415 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 416 } 417 EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 418 419 /* 420 * Dequeue the first transport, if there is one. 421 */ 422 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 423 { 424 struct svc_xprt *xprt = NULL; 425 426 if (list_empty(&pool->sp_sockets)) 427 goto out; 428 429 spin_lock_bh(&pool->sp_lock); 430 if (likely(!list_empty(&pool->sp_sockets))) { 431 xprt = list_first_entry(&pool->sp_sockets, 432 struct svc_xprt, xpt_ready); 433 list_del_init(&xprt->xpt_ready); 434 svc_xprt_get(xprt); 435 436 dprintk("svc: transport %p dequeued, inuse=%d\n", 437 xprt, atomic_read(&xprt->xpt_ref.refcount)); 438 } 439 spin_unlock_bh(&pool->sp_lock); 440 out: 441 trace_svc_xprt_dequeue(xprt); 442 return xprt; 443 } 444 445 /** 446 * svc_reserve - change the space reserved for the reply to a request. 447 * @rqstp: The request in question 448 * @space: new max space to reserve 449 * 450 * Each request reserves some space on the output queue of the transport 451 * to make sure the reply fits. This function reduces that reserved 452 * space to be the amount of space used already, plus @space. 453 * 454 */ 455 void svc_reserve(struct svc_rqst *rqstp, int space) 456 { 457 space += rqstp->rq_res.head[0].iov_len; 458 459 if (space < rqstp->rq_reserved) { 460 struct svc_xprt *xprt = rqstp->rq_xprt; 461 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 462 rqstp->rq_reserved = space; 463 464 if (xprt->xpt_ops->xpo_adjust_wspace) 465 xprt->xpt_ops->xpo_adjust_wspace(xprt); 466 svc_xprt_enqueue(xprt); 467 } 468 } 469 EXPORT_SYMBOL_GPL(svc_reserve); 470 471 static void svc_xprt_release(struct svc_rqst *rqstp) 472 { 473 struct svc_xprt *xprt = rqstp->rq_xprt; 474 475 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 476 477 kfree(rqstp->rq_deferred); 478 rqstp->rq_deferred = NULL; 479 480 svc_free_res_pages(rqstp); 481 rqstp->rq_res.page_len = 0; 482 rqstp->rq_res.page_base = 0; 483 484 /* Reset response buffer and release 485 * the reservation. 486 * But first, check that enough space was reserved 487 * for the reply, otherwise we have a bug! 488 */ 489 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 490 printk(KERN_ERR "RPC request reserved %d but used %d\n", 491 rqstp->rq_reserved, 492 rqstp->rq_res.len); 493 494 rqstp->rq_res.head[0].iov_len = 0; 495 svc_reserve(rqstp, 0); 496 rqstp->rq_xprt = NULL; 497 498 svc_xprt_put(xprt); 499 } 500 501 /* 502 * Some svc_serv's will have occasional work to do, even when a xprt is not 503 * waiting to be serviced. This function is there to "kick" a task in one of 504 * those services so that it can wake up and do that work. Note that we only 505 * bother with pool 0 as we don't need to wake up more than one thread for 506 * this purpose. 507 */ 508 void svc_wake_up(struct svc_serv *serv) 509 { 510 struct svc_rqst *rqstp; 511 struct svc_pool *pool; 512 513 pool = &serv->sv_pools[0]; 514 515 rcu_read_lock(); 516 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 517 /* skip any that aren't queued */ 518 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 519 continue; 520 rcu_read_unlock(); 521 dprintk("svc: daemon %p woken up.\n", rqstp); 522 wake_up_process(rqstp->rq_task); 523 trace_svc_wake_up(rqstp->rq_task->pid); 524 return; 525 } 526 rcu_read_unlock(); 527 528 /* No free entries available */ 529 set_bit(SP_TASK_PENDING, &pool->sp_flags); 530 smp_wmb(); 531 trace_svc_wake_up(0); 532 } 533 EXPORT_SYMBOL_GPL(svc_wake_up); 534 535 int svc_port_is_privileged(struct sockaddr *sin) 536 { 537 switch (sin->sa_family) { 538 case AF_INET: 539 return ntohs(((struct sockaddr_in *)sin)->sin_port) 540 < PROT_SOCK; 541 case AF_INET6: 542 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 543 < PROT_SOCK; 544 default: 545 return 0; 546 } 547 } 548 549 /* 550 * Make sure that we don't have too many active connections. If we have, 551 * something must be dropped. It's not clear what will happen if we allow 552 * "too many" connections, but when dealing with network-facing software, 553 * we have to code defensively. Here we do that by imposing hard limits. 554 * 555 * There's no point in trying to do random drop here for DoS 556 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 557 * attacker can easily beat that. 558 * 559 * The only somewhat efficient mechanism would be if drop old 560 * connections from the same IP first. But right now we don't even 561 * record the client IP in svc_sock. 562 * 563 * single-threaded services that expect a lot of clients will probably 564 * need to set sv_maxconn to override the default value which is based 565 * on the number of threads 566 */ 567 static void svc_check_conn_limits(struct svc_serv *serv) 568 { 569 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 570 (serv->sv_nrthreads+3) * 20; 571 572 if (serv->sv_tmpcnt > limit) { 573 struct svc_xprt *xprt = NULL; 574 spin_lock_bh(&serv->sv_lock); 575 if (!list_empty(&serv->sv_tempsocks)) { 576 /* Try to help the admin */ 577 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", 578 serv->sv_name, serv->sv_maxconn ? 579 "max number of connections" : 580 "number of threads"); 581 /* 582 * Always select the oldest connection. It's not fair, 583 * but so is life 584 */ 585 xprt = list_entry(serv->sv_tempsocks.prev, 586 struct svc_xprt, 587 xpt_list); 588 set_bit(XPT_CLOSE, &xprt->xpt_flags); 589 svc_xprt_get(xprt); 590 } 591 spin_unlock_bh(&serv->sv_lock); 592 593 if (xprt) { 594 svc_xprt_enqueue(xprt); 595 svc_xprt_put(xprt); 596 } 597 } 598 } 599 600 static int svc_alloc_arg(struct svc_rqst *rqstp) 601 { 602 struct svc_serv *serv = rqstp->rq_server; 603 struct xdr_buf *arg; 604 int pages; 605 int i; 606 607 /* now allocate needed pages. If we get a failure, sleep briefly */ 608 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 609 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES); 610 if (pages >= RPCSVC_MAXPAGES) 611 /* use as many pages as possible */ 612 pages = RPCSVC_MAXPAGES - 1; 613 for (i = 0; i < pages ; i++) 614 while (rqstp->rq_pages[i] == NULL) { 615 struct page *p = alloc_page(GFP_KERNEL); 616 if (!p) { 617 set_current_state(TASK_INTERRUPTIBLE); 618 if (signalled() || kthread_should_stop()) { 619 set_current_state(TASK_RUNNING); 620 return -EINTR; 621 } 622 schedule_timeout(msecs_to_jiffies(500)); 623 } 624 rqstp->rq_pages[i] = p; 625 } 626 rqstp->rq_page_end = &rqstp->rq_pages[i]; 627 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 628 629 /* Make arg->head point to first page and arg->pages point to rest */ 630 arg = &rqstp->rq_arg; 631 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 632 arg->head[0].iov_len = PAGE_SIZE; 633 arg->pages = rqstp->rq_pages + 1; 634 arg->page_base = 0; 635 /* save at least one page for response */ 636 arg->page_len = (pages-2)*PAGE_SIZE; 637 arg->len = (pages-1)*PAGE_SIZE; 638 arg->tail[0].iov_len = 0; 639 return 0; 640 } 641 642 static bool 643 rqst_should_sleep(struct svc_rqst *rqstp) 644 { 645 struct svc_pool *pool = rqstp->rq_pool; 646 647 /* did someone call svc_wake_up? */ 648 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) 649 return false; 650 651 /* was a socket queued? */ 652 if (!list_empty(&pool->sp_sockets)) 653 return false; 654 655 /* are we shutting down? */ 656 if (signalled() || kthread_should_stop()) 657 return false; 658 659 /* are we freezing? */ 660 if (freezing(current)) 661 return false; 662 663 return true; 664 } 665 666 static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 667 { 668 struct svc_xprt *xprt; 669 struct svc_pool *pool = rqstp->rq_pool; 670 long time_left = 0; 671 672 /* rq_xprt should be clear on entry */ 673 WARN_ON_ONCE(rqstp->rq_xprt); 674 675 /* Normally we will wait up to 5 seconds for any required 676 * cache information to be provided. 677 */ 678 rqstp->rq_chandle.thread_wait = 5*HZ; 679 680 xprt = svc_xprt_dequeue(pool); 681 if (xprt) { 682 rqstp->rq_xprt = xprt; 683 684 /* As there is a shortage of threads and this request 685 * had to be queued, don't allow the thread to wait so 686 * long for cache updates. 687 */ 688 rqstp->rq_chandle.thread_wait = 1*HZ; 689 clear_bit(SP_TASK_PENDING, &pool->sp_flags); 690 return xprt; 691 } 692 693 /* 694 * We have to be able to interrupt this wait 695 * to bring down the daemons ... 696 */ 697 set_current_state(TASK_INTERRUPTIBLE); 698 clear_bit(RQ_BUSY, &rqstp->rq_flags); 699 smp_mb(); 700 701 if (likely(rqst_should_sleep(rqstp))) 702 time_left = schedule_timeout(timeout); 703 else 704 __set_current_state(TASK_RUNNING); 705 706 try_to_freeze(); 707 708 spin_lock_bh(&rqstp->rq_lock); 709 set_bit(RQ_BUSY, &rqstp->rq_flags); 710 spin_unlock_bh(&rqstp->rq_lock); 711 712 xprt = rqstp->rq_xprt; 713 if (xprt != NULL) 714 return xprt; 715 716 if (!time_left) 717 atomic_long_inc(&pool->sp_stats.threads_timedout); 718 719 if (signalled() || kthread_should_stop()) 720 return ERR_PTR(-EINTR); 721 return ERR_PTR(-EAGAIN); 722 } 723 724 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 725 { 726 spin_lock_bh(&serv->sv_lock); 727 set_bit(XPT_TEMP, &newxpt->xpt_flags); 728 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 729 serv->sv_tmpcnt++; 730 if (serv->sv_temptimer.function == NULL) { 731 /* setup timer to age temp transports */ 732 setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, 733 (unsigned long)serv); 734 mod_timer(&serv->sv_temptimer, 735 jiffies + svc_conn_age_period * HZ); 736 } 737 spin_unlock_bh(&serv->sv_lock); 738 svc_xprt_received(newxpt); 739 } 740 741 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) 742 { 743 struct svc_serv *serv = rqstp->rq_server; 744 int len = 0; 745 746 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 747 dprintk("svc_recv: found XPT_CLOSE\n"); 748 svc_delete_xprt(xprt); 749 /* Leave XPT_BUSY set on the dead xprt: */ 750 goto out; 751 } 752 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 753 struct svc_xprt *newxpt; 754 /* 755 * We know this module_get will succeed because the 756 * listener holds a reference too 757 */ 758 __module_get(xprt->xpt_class->xcl_owner); 759 svc_check_conn_limits(xprt->xpt_server); 760 newxpt = xprt->xpt_ops->xpo_accept(xprt); 761 if (newxpt) 762 svc_add_new_temp_xprt(serv, newxpt); 763 else 764 module_put(xprt->xpt_class->xcl_owner); 765 } else { 766 /* XPT_DATA|XPT_DEFERRED case: */ 767 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 768 rqstp, rqstp->rq_pool->sp_id, xprt, 769 atomic_read(&xprt->xpt_ref.refcount)); 770 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 771 if (rqstp->rq_deferred) 772 len = svc_deferred_recv(rqstp); 773 else 774 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 775 dprintk("svc: got len=%d\n", len); 776 rqstp->rq_reserved = serv->sv_max_mesg; 777 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 778 } 779 /* clear XPT_BUSY: */ 780 svc_xprt_received(xprt); 781 out: 782 trace_svc_handle_xprt(xprt, len); 783 return len; 784 } 785 786 /* 787 * Receive the next request on any transport. This code is carefully 788 * organised not to touch any cachelines in the shared svc_serv 789 * structure, only cachelines in the local svc_pool. 790 */ 791 int svc_recv(struct svc_rqst *rqstp, long timeout) 792 { 793 struct svc_xprt *xprt = NULL; 794 struct svc_serv *serv = rqstp->rq_server; 795 int len, err; 796 797 dprintk("svc: server %p waiting for data (to = %ld)\n", 798 rqstp, timeout); 799 800 if (rqstp->rq_xprt) 801 printk(KERN_ERR 802 "svc_recv: service %p, transport not NULL!\n", 803 rqstp); 804 805 err = svc_alloc_arg(rqstp); 806 if (err) 807 goto out; 808 809 try_to_freeze(); 810 cond_resched(); 811 err = -EINTR; 812 if (signalled() || kthread_should_stop()) 813 goto out; 814 815 xprt = svc_get_next_xprt(rqstp, timeout); 816 if (IS_ERR(xprt)) { 817 err = PTR_ERR(xprt); 818 goto out; 819 } 820 821 len = svc_handle_xprt(rqstp, xprt); 822 823 /* No data, incomplete (TCP) read, or accept() */ 824 err = -EAGAIN; 825 if (len <= 0) 826 goto out_release; 827 828 clear_bit(XPT_OLD, &xprt->xpt_flags); 829 830 if (xprt->xpt_ops->xpo_secure_port(rqstp)) 831 set_bit(RQ_SECURE, &rqstp->rq_flags); 832 else 833 clear_bit(RQ_SECURE, &rqstp->rq_flags); 834 rqstp->rq_chandle.defer = svc_defer; 835 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); 836 837 if (serv->sv_stats) 838 serv->sv_stats->netcnt++; 839 trace_svc_recv(rqstp, len); 840 return len; 841 out_release: 842 rqstp->rq_res.len = 0; 843 svc_xprt_release(rqstp); 844 out: 845 trace_svc_recv(rqstp, err); 846 return err; 847 } 848 EXPORT_SYMBOL_GPL(svc_recv); 849 850 /* 851 * Drop request 852 */ 853 void svc_drop(struct svc_rqst *rqstp) 854 { 855 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 856 svc_xprt_release(rqstp); 857 } 858 EXPORT_SYMBOL_GPL(svc_drop); 859 860 /* 861 * Return reply to client. 862 */ 863 int svc_send(struct svc_rqst *rqstp) 864 { 865 struct svc_xprt *xprt; 866 int len = -EFAULT; 867 struct xdr_buf *xb; 868 869 xprt = rqstp->rq_xprt; 870 if (!xprt) 871 goto out; 872 873 /* release the receive skb before sending the reply */ 874 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 875 876 /* calculate over-all length */ 877 xb = &rqstp->rq_res; 878 xb->len = xb->head[0].iov_len + 879 xb->page_len + 880 xb->tail[0].iov_len; 881 882 /* Grab mutex to serialize outgoing data. */ 883 mutex_lock(&xprt->xpt_mutex); 884 if (test_bit(XPT_DEAD, &xprt->xpt_flags) 885 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) 886 len = -ENOTCONN; 887 else 888 len = xprt->xpt_ops->xpo_sendto(rqstp); 889 mutex_unlock(&xprt->xpt_mutex); 890 rpc_wake_up(&xprt->xpt_bc_pending); 891 svc_xprt_release(rqstp); 892 893 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 894 len = 0; 895 out: 896 trace_svc_send(rqstp, len); 897 return len; 898 } 899 900 /* 901 * Timer function to close old temporary transports, using 902 * a mark-and-sweep algorithm. 903 */ 904 static void svc_age_temp_xprts(unsigned long closure) 905 { 906 struct svc_serv *serv = (struct svc_serv *)closure; 907 struct svc_xprt *xprt; 908 struct list_head *le, *next; 909 910 dprintk("svc_age_temp_xprts\n"); 911 912 if (!spin_trylock_bh(&serv->sv_lock)) { 913 /* busy, try again 1 sec later */ 914 dprintk("svc_age_temp_xprts: busy\n"); 915 mod_timer(&serv->sv_temptimer, jiffies + HZ); 916 return; 917 } 918 919 list_for_each_safe(le, next, &serv->sv_tempsocks) { 920 xprt = list_entry(le, struct svc_xprt, xpt_list); 921 922 /* First time through, just mark it OLD. Second time 923 * through, close it. */ 924 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 925 continue; 926 if (atomic_read(&xprt->xpt_ref.refcount) > 1 || 927 test_bit(XPT_BUSY, &xprt->xpt_flags)) 928 continue; 929 list_del_init(le); 930 set_bit(XPT_CLOSE, &xprt->xpt_flags); 931 dprintk("queuing xprt %p for closing\n", xprt); 932 933 /* a thread will dequeue and close it soon */ 934 svc_xprt_enqueue(xprt); 935 } 936 spin_unlock_bh(&serv->sv_lock); 937 938 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 939 } 940 941 static void call_xpt_users(struct svc_xprt *xprt) 942 { 943 struct svc_xpt_user *u; 944 945 spin_lock(&xprt->xpt_lock); 946 while (!list_empty(&xprt->xpt_users)) { 947 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 948 list_del(&u->list); 949 u->callback(u); 950 } 951 spin_unlock(&xprt->xpt_lock); 952 } 953 954 /* 955 * Remove a dead transport 956 */ 957 static void svc_delete_xprt(struct svc_xprt *xprt) 958 { 959 struct svc_serv *serv = xprt->xpt_server; 960 struct svc_deferred_req *dr; 961 962 /* Only do this once */ 963 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 964 BUG(); 965 966 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 967 xprt->xpt_ops->xpo_detach(xprt); 968 969 spin_lock_bh(&serv->sv_lock); 970 list_del_init(&xprt->xpt_list); 971 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); 972 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 973 serv->sv_tmpcnt--; 974 spin_unlock_bh(&serv->sv_lock); 975 976 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 977 kfree(dr); 978 979 call_xpt_users(xprt); 980 svc_xprt_put(xprt); 981 } 982 983 void svc_close_xprt(struct svc_xprt *xprt) 984 { 985 set_bit(XPT_CLOSE, &xprt->xpt_flags); 986 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 987 /* someone else will have to effect the close */ 988 return; 989 /* 990 * We expect svc_close_xprt() to work even when no threads are 991 * running (e.g., while configuring the server before starting 992 * any threads), so if the transport isn't busy, we delete 993 * it ourself: 994 */ 995 svc_delete_xprt(xprt); 996 } 997 EXPORT_SYMBOL_GPL(svc_close_xprt); 998 999 static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 1000 { 1001 struct svc_xprt *xprt; 1002 int ret = 0; 1003 1004 spin_lock(&serv->sv_lock); 1005 list_for_each_entry(xprt, xprt_list, xpt_list) { 1006 if (xprt->xpt_net != net) 1007 continue; 1008 ret++; 1009 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1010 svc_xprt_enqueue(xprt); 1011 } 1012 spin_unlock(&serv->sv_lock); 1013 return ret; 1014 } 1015 1016 static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) 1017 { 1018 struct svc_pool *pool; 1019 struct svc_xprt *xprt; 1020 struct svc_xprt *tmp; 1021 int i; 1022 1023 for (i = 0; i < serv->sv_nrpools; i++) { 1024 pool = &serv->sv_pools[i]; 1025 1026 spin_lock_bh(&pool->sp_lock); 1027 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { 1028 if (xprt->xpt_net != net) 1029 continue; 1030 list_del_init(&xprt->xpt_ready); 1031 spin_unlock_bh(&pool->sp_lock); 1032 return xprt; 1033 } 1034 spin_unlock_bh(&pool->sp_lock); 1035 } 1036 return NULL; 1037 } 1038 1039 static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) 1040 { 1041 struct svc_xprt *xprt; 1042 1043 while ((xprt = svc_dequeue_net(serv, net))) { 1044 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1045 svc_delete_xprt(xprt); 1046 } 1047 } 1048 1049 /* 1050 * Server threads may still be running (especially in the case where the 1051 * service is still running in other network namespaces). 1052 * 1053 * So we shut down sockets the same way we would on a running server, by 1054 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do 1055 * the close. In the case there are no such other threads, 1056 * threads running, svc_clean_up_xprts() does a simple version of a 1057 * server's main event loop, and in the case where there are other 1058 * threads, we may need to wait a little while and then check again to 1059 * see if they're done. 1060 */ 1061 void svc_close_net(struct svc_serv *serv, struct net *net) 1062 { 1063 int delay = 0; 1064 1065 while (svc_close_list(serv, &serv->sv_permsocks, net) + 1066 svc_close_list(serv, &serv->sv_tempsocks, net)) { 1067 1068 svc_clean_up_xprts(serv, net); 1069 msleep(delay++); 1070 } 1071 } 1072 1073 /* 1074 * Handle defer and revisit of requests 1075 */ 1076 1077 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1078 { 1079 struct svc_deferred_req *dr = 1080 container_of(dreq, struct svc_deferred_req, handle); 1081 struct svc_xprt *xprt = dr->xprt; 1082 1083 spin_lock(&xprt->xpt_lock); 1084 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1085 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 1086 spin_unlock(&xprt->xpt_lock); 1087 dprintk("revisit canceled\n"); 1088 svc_xprt_put(xprt); 1089 kfree(dr); 1090 return; 1091 } 1092 dprintk("revisit queued\n"); 1093 dr->xprt = NULL; 1094 list_add(&dr->handle.recent, &xprt->xpt_deferred); 1095 spin_unlock(&xprt->xpt_lock); 1096 svc_xprt_enqueue(xprt); 1097 svc_xprt_put(xprt); 1098 } 1099 1100 /* 1101 * Save the request off for later processing. The request buffer looks 1102 * like this: 1103 * 1104 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 1105 * 1106 * This code can only handle requests that consist of an xprt-header 1107 * and rpc-header. 1108 */ 1109 static struct cache_deferred_req *svc_defer(struct cache_req *req) 1110 { 1111 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1112 struct svc_deferred_req *dr; 1113 1114 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) 1115 return NULL; /* if more than a page, give up FIXME */ 1116 if (rqstp->rq_deferred) { 1117 dr = rqstp->rq_deferred; 1118 rqstp->rq_deferred = NULL; 1119 } else { 1120 size_t skip; 1121 size_t size; 1122 /* FIXME maybe discard if size too large */ 1123 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 1124 dr = kmalloc(size, GFP_KERNEL); 1125 if (dr == NULL) 1126 return NULL; 1127 1128 dr->handle.owner = rqstp->rq_server; 1129 dr->prot = rqstp->rq_prot; 1130 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1131 dr->addrlen = rqstp->rq_addrlen; 1132 dr->daddr = rqstp->rq_daddr; 1133 dr->argslen = rqstp->rq_arg.len >> 2; 1134 dr->xprt_hlen = rqstp->rq_xprt_hlen; 1135 1136 /* back up head to the start of the buffer and copy */ 1137 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1138 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1139 dr->argslen << 2); 1140 } 1141 svc_xprt_get(rqstp->rq_xprt); 1142 dr->xprt = rqstp->rq_xprt; 1143 set_bit(RQ_DROPME, &rqstp->rq_flags); 1144 1145 dr->handle.revisit = svc_revisit; 1146 return &dr->handle; 1147 } 1148 1149 /* 1150 * recv data from a deferred request into an active one 1151 */ 1152 static int svc_deferred_recv(struct svc_rqst *rqstp) 1153 { 1154 struct svc_deferred_req *dr = rqstp->rq_deferred; 1155 1156 /* setup iov_base past transport header */ 1157 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1158 /* The iov_len does not include the transport header bytes */ 1159 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1160 rqstp->rq_arg.page_len = 0; 1161 /* The rq_arg.len includes the transport header bytes */ 1162 rqstp->rq_arg.len = dr->argslen<<2; 1163 rqstp->rq_prot = dr->prot; 1164 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1165 rqstp->rq_addrlen = dr->addrlen; 1166 /* Save off transport header len in case we get deferred again */ 1167 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1168 rqstp->rq_daddr = dr->daddr; 1169 rqstp->rq_respages = rqstp->rq_pages; 1170 return (dr->argslen<<2) - dr->xprt_hlen; 1171 } 1172 1173 1174 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1175 { 1176 struct svc_deferred_req *dr = NULL; 1177 1178 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1179 return NULL; 1180 spin_lock(&xprt->xpt_lock); 1181 if (!list_empty(&xprt->xpt_deferred)) { 1182 dr = list_entry(xprt->xpt_deferred.next, 1183 struct svc_deferred_req, 1184 handle.recent); 1185 list_del_init(&dr->handle.recent); 1186 } else 1187 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1188 spin_unlock(&xprt->xpt_lock); 1189 return dr; 1190 } 1191 1192 /** 1193 * svc_find_xprt - find an RPC transport instance 1194 * @serv: pointer to svc_serv to search 1195 * @xcl_name: C string containing transport's class name 1196 * @net: owner net pointer 1197 * @af: Address family of transport's local address 1198 * @port: transport's IP port number 1199 * 1200 * Return the transport instance pointer for the endpoint accepting 1201 * connections/peer traffic from the specified transport class, 1202 * address family and port. 1203 * 1204 * Specifying 0 for the address family or port is effectively a 1205 * wild-card, and will result in matching the first transport in the 1206 * service's list that has a matching class name. 1207 */ 1208 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1209 struct net *net, const sa_family_t af, 1210 const unsigned short port) 1211 { 1212 struct svc_xprt *xprt; 1213 struct svc_xprt *found = NULL; 1214 1215 /* Sanity check the args */ 1216 if (serv == NULL || xcl_name == NULL) 1217 return found; 1218 1219 spin_lock_bh(&serv->sv_lock); 1220 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1221 if (xprt->xpt_net != net) 1222 continue; 1223 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1224 continue; 1225 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1226 continue; 1227 if (port != 0 && port != svc_xprt_local_port(xprt)) 1228 continue; 1229 found = xprt; 1230 svc_xprt_get(xprt); 1231 break; 1232 } 1233 spin_unlock_bh(&serv->sv_lock); 1234 return found; 1235 } 1236 EXPORT_SYMBOL_GPL(svc_find_xprt); 1237 1238 static int svc_one_xprt_name(const struct svc_xprt *xprt, 1239 char *pos, int remaining) 1240 { 1241 int len; 1242 1243 len = snprintf(pos, remaining, "%s %u\n", 1244 xprt->xpt_class->xcl_name, 1245 svc_xprt_local_port(xprt)); 1246 if (len >= remaining) 1247 return -ENAMETOOLONG; 1248 return len; 1249 } 1250 1251 /** 1252 * svc_xprt_names - format a buffer with a list of transport names 1253 * @serv: pointer to an RPC service 1254 * @buf: pointer to a buffer to be filled in 1255 * @buflen: length of buffer to be filled in 1256 * 1257 * Fills in @buf with a string containing a list of transport names, 1258 * each name terminated with '\n'. 1259 * 1260 * Returns positive length of the filled-in string on success; otherwise 1261 * a negative errno value is returned if an error occurs. 1262 */ 1263 int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 1264 { 1265 struct svc_xprt *xprt; 1266 int len, totlen; 1267 char *pos; 1268 1269 /* Sanity check args */ 1270 if (!serv) 1271 return 0; 1272 1273 spin_lock_bh(&serv->sv_lock); 1274 1275 pos = buf; 1276 totlen = 0; 1277 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1278 len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1279 if (len < 0) { 1280 *buf = '\0'; 1281 totlen = len; 1282 } 1283 if (len <= 0) 1284 break; 1285 1286 pos += len; 1287 totlen += len; 1288 } 1289 1290 spin_unlock_bh(&serv->sv_lock); 1291 return totlen; 1292 } 1293 EXPORT_SYMBOL_GPL(svc_xprt_names); 1294 1295 1296 /*----------------------------------------------------------------------------*/ 1297 1298 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1299 { 1300 unsigned int pidx = (unsigned int)*pos; 1301 struct svc_serv *serv = m->private; 1302 1303 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1304 1305 if (!pidx) 1306 return SEQ_START_TOKEN; 1307 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1308 } 1309 1310 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1311 { 1312 struct svc_pool *pool = p; 1313 struct svc_serv *serv = m->private; 1314 1315 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1316 1317 if (p == SEQ_START_TOKEN) { 1318 pool = &serv->sv_pools[0]; 1319 } else { 1320 unsigned int pidx = (pool - &serv->sv_pools[0]); 1321 if (pidx < serv->sv_nrpools-1) 1322 pool = &serv->sv_pools[pidx+1]; 1323 else 1324 pool = NULL; 1325 } 1326 ++*pos; 1327 return pool; 1328 } 1329 1330 static void svc_pool_stats_stop(struct seq_file *m, void *p) 1331 { 1332 } 1333 1334 static int svc_pool_stats_show(struct seq_file *m, void *p) 1335 { 1336 struct svc_pool *pool = p; 1337 1338 if (p == SEQ_START_TOKEN) { 1339 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 1340 return 0; 1341 } 1342 1343 seq_printf(m, "%u %lu %lu %lu %lu\n", 1344 pool->sp_id, 1345 (unsigned long)atomic_long_read(&pool->sp_stats.packets), 1346 pool->sp_stats.sockets_queued, 1347 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), 1348 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); 1349 1350 return 0; 1351 } 1352 1353 static const struct seq_operations svc_pool_stats_seq_ops = { 1354 .start = svc_pool_stats_start, 1355 .next = svc_pool_stats_next, 1356 .stop = svc_pool_stats_stop, 1357 .show = svc_pool_stats_show, 1358 }; 1359 1360 int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1361 { 1362 int err; 1363 1364 err = seq_open(file, &svc_pool_stats_seq_ops); 1365 if (!err) 1366 ((struct seq_file *) file->private_data)->private = serv; 1367 return err; 1368 } 1369 EXPORT_SYMBOL(svc_pool_stats_open); 1370 1371 /*----------------------------------------------------------------------------*/ 1372