1 /* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/slab.h> 12 #include <net/sock.h> 13 #include <linux/sunrpc/addr.h> 14 #include <linux/sunrpc/stats.h> 15 #include <linux/sunrpc/svc_xprt.h> 16 #include <linux/sunrpc/svcsock.h> 17 #include <linux/sunrpc/xprt.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <trace/events/sunrpc.h> 21 22 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 23 24 static unsigned int svc_rpc_per_connection_limit __read_mostly; 25 module_param(svc_rpc_per_connection_limit, uint, 0644); 26 27 28 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 29 static int svc_deferred_recv(struct svc_rqst *rqstp); 30 static struct cache_deferred_req *svc_defer(struct cache_req *req); 31 static void svc_age_temp_xprts(unsigned long closure); 32 static void svc_delete_xprt(struct svc_xprt *xprt); 33 34 /* apparently the "standard" is that clients close 35 * idle connections after 5 minutes, servers after 36 * 6 minutes 37 * http://www.connectathon.org/talks96/nfstcp.pdf 38 */ 39 static int svc_conn_age_period = 6*60; 40 41 /* List of registered transport classes */ 42 static DEFINE_SPINLOCK(svc_xprt_class_lock); 43 static LIST_HEAD(svc_xprt_class_list); 44 45 /* SMP locking strategy: 46 * 47 * svc_pool->sp_lock protects most of the fields of that pool. 48 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 49 * when both need to be taken (rare), svc_serv->sv_lock is first. 50 * The "service mutex" protects svc_serv->sv_nrthread. 51 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 52 * and the ->sk_info_authunix cache. 53 * 54 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 55 * enqueued multiply. During normal transport processing this bit 56 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 57 * Providers should not manipulate this bit directly. 58 * 59 * Some flags can be set to certain values at any time 60 * providing that certain rules are followed: 61 * 62 * XPT_CONN, XPT_DATA: 63 * - Can be set or cleared at any time. 64 * - After a set, svc_xprt_enqueue must be called to enqueue 65 * the transport for processing. 66 * - After a clear, the transport must be read/accepted. 67 * If this succeeds, it must be set again. 68 * XPT_CLOSE: 69 * - Can set at any time. It is never cleared. 70 * XPT_DEAD: 71 * - Can only be set while XPT_BUSY is held which ensures 72 * that no other thread will be using the transport or will 73 * try to set XPT_DEAD. 74 */ 75 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 76 { 77 struct svc_xprt_class *cl; 78 int res = -EEXIST; 79 80 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 81 82 INIT_LIST_HEAD(&xcl->xcl_list); 83 spin_lock(&svc_xprt_class_lock); 84 /* Make sure there isn't already a class with the same name */ 85 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 86 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 87 goto out; 88 } 89 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 90 res = 0; 91 out: 92 spin_unlock(&svc_xprt_class_lock); 93 return res; 94 } 95 EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 96 97 void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 98 { 99 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 100 spin_lock(&svc_xprt_class_lock); 101 list_del_init(&xcl->xcl_list); 102 spin_unlock(&svc_xprt_class_lock); 103 } 104 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 105 106 /* 107 * Format the transport list for printing 108 */ 109 int svc_print_xprts(char *buf, int maxlen) 110 { 111 struct svc_xprt_class *xcl; 112 char tmpstr[80]; 113 int len = 0; 114 buf[0] = '\0'; 115 116 spin_lock(&svc_xprt_class_lock); 117 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 118 int slen; 119 120 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 121 slen = strlen(tmpstr); 122 if (len + slen > maxlen) 123 break; 124 len += slen; 125 strcat(buf, tmpstr); 126 } 127 spin_unlock(&svc_xprt_class_lock); 128 129 return len; 130 } 131 132 static void svc_xprt_free(struct kref *kref) 133 { 134 struct svc_xprt *xprt = 135 container_of(kref, struct svc_xprt, xpt_ref); 136 struct module *owner = xprt->xpt_class->xcl_owner; 137 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 138 svcauth_unix_info_release(xprt); 139 put_net(xprt->xpt_net); 140 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 141 if (xprt->xpt_bc_xprt) 142 xprt_put(xprt->xpt_bc_xprt); 143 if (xprt->xpt_bc_xps) 144 xprt_switch_put(xprt->xpt_bc_xps); 145 xprt->xpt_ops->xpo_free(xprt); 146 module_put(owner); 147 } 148 149 void svc_xprt_put(struct svc_xprt *xprt) 150 { 151 kref_put(&xprt->xpt_ref, svc_xprt_free); 152 } 153 EXPORT_SYMBOL_GPL(svc_xprt_put); 154 155 /* 156 * Called by transport drivers to initialize the transport independent 157 * portion of the transport instance. 158 */ 159 void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, 160 struct svc_xprt *xprt, struct svc_serv *serv) 161 { 162 memset(xprt, 0, sizeof(*xprt)); 163 xprt->xpt_class = xcl; 164 xprt->xpt_ops = xcl->xcl_ops; 165 kref_init(&xprt->xpt_ref); 166 xprt->xpt_server = serv; 167 INIT_LIST_HEAD(&xprt->xpt_list); 168 INIT_LIST_HEAD(&xprt->xpt_ready); 169 INIT_LIST_HEAD(&xprt->xpt_deferred); 170 INIT_LIST_HEAD(&xprt->xpt_users); 171 mutex_init(&xprt->xpt_mutex); 172 spin_lock_init(&xprt->xpt_lock); 173 set_bit(XPT_BUSY, &xprt->xpt_flags); 174 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 175 xprt->xpt_net = get_net(net); 176 } 177 EXPORT_SYMBOL_GPL(svc_xprt_init); 178 179 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 180 struct svc_serv *serv, 181 struct net *net, 182 const int family, 183 const unsigned short port, 184 int flags) 185 { 186 struct sockaddr_in sin = { 187 .sin_family = AF_INET, 188 .sin_addr.s_addr = htonl(INADDR_ANY), 189 .sin_port = htons(port), 190 }; 191 #if IS_ENABLED(CONFIG_IPV6) 192 struct sockaddr_in6 sin6 = { 193 .sin6_family = AF_INET6, 194 .sin6_addr = IN6ADDR_ANY_INIT, 195 .sin6_port = htons(port), 196 }; 197 #endif 198 struct sockaddr *sap; 199 size_t len; 200 201 switch (family) { 202 case PF_INET: 203 sap = (struct sockaddr *)&sin; 204 len = sizeof(sin); 205 break; 206 #if IS_ENABLED(CONFIG_IPV6) 207 case PF_INET6: 208 sap = (struct sockaddr *)&sin6; 209 len = sizeof(sin6); 210 break; 211 #endif 212 default: 213 return ERR_PTR(-EAFNOSUPPORT); 214 } 215 216 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 217 } 218 219 /* 220 * svc_xprt_received conditionally queues the transport for processing 221 * by another thread. The caller must hold the XPT_BUSY bit and must 222 * not thereafter touch transport data. 223 * 224 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 225 * insufficient) data. 226 */ 227 static void svc_xprt_received(struct svc_xprt *xprt) 228 { 229 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { 230 WARN_ONCE(1, "xprt=0x%p already busy!", xprt); 231 return; 232 } 233 234 /* As soon as we clear busy, the xprt could be closed and 235 * 'put', so we need a reference to call svc_enqueue_xprt with: 236 */ 237 svc_xprt_get(xprt); 238 smp_mb__before_atomic(); 239 clear_bit(XPT_BUSY, &xprt->xpt_flags); 240 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 241 svc_xprt_put(xprt); 242 } 243 244 void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) 245 { 246 clear_bit(XPT_TEMP, &new->xpt_flags); 247 spin_lock_bh(&serv->sv_lock); 248 list_add(&new->xpt_list, &serv->sv_permsocks); 249 spin_unlock_bh(&serv->sv_lock); 250 svc_xprt_received(new); 251 } 252 253 int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 254 struct net *net, const int family, 255 const unsigned short port, int flags) 256 { 257 struct svc_xprt_class *xcl; 258 259 spin_lock(&svc_xprt_class_lock); 260 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 261 struct svc_xprt *newxprt; 262 unsigned short newport; 263 264 if (strcmp(xprt_name, xcl->xcl_name)) 265 continue; 266 267 if (!try_module_get(xcl->xcl_owner)) 268 goto err; 269 270 spin_unlock(&svc_xprt_class_lock); 271 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 272 if (IS_ERR(newxprt)) { 273 module_put(xcl->xcl_owner); 274 return PTR_ERR(newxprt); 275 } 276 svc_add_new_perm_xprt(serv, newxprt); 277 newport = svc_xprt_local_port(newxprt); 278 return newport; 279 } 280 err: 281 spin_unlock(&svc_xprt_class_lock); 282 /* This errno is exposed to user space. Provide a reasonable 283 * perror msg for a bad transport. */ 284 return -EPROTONOSUPPORT; 285 } 286 287 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 288 struct net *net, const int family, 289 const unsigned short port, int flags) 290 { 291 int err; 292 293 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 294 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 295 if (err == -EPROTONOSUPPORT) { 296 request_module("svc%s", xprt_name); 297 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 298 } 299 if (err) 300 dprintk("svc: transport %s not found, err %d\n", 301 xprt_name, err); 302 return err; 303 } 304 EXPORT_SYMBOL_GPL(svc_create_xprt); 305 306 /* 307 * Copy the local and remote xprt addresses to the rqstp structure 308 */ 309 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 310 { 311 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 312 rqstp->rq_addrlen = xprt->xpt_remotelen; 313 314 /* 315 * Destination address in request is needed for binding the 316 * source address in RPC replies/callbacks later. 317 */ 318 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); 319 rqstp->rq_daddrlen = xprt->xpt_locallen; 320 } 321 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 322 323 /** 324 * svc_print_addr - Format rq_addr field for printing 325 * @rqstp: svc_rqst struct containing address to print 326 * @buf: target buffer for formatted address 327 * @len: length of target buffer 328 * 329 */ 330 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 331 { 332 return __svc_print_addr(svc_addr(rqstp), buf, len); 333 } 334 EXPORT_SYMBOL_GPL(svc_print_addr); 335 336 static bool svc_xprt_slots_in_range(struct svc_xprt *xprt) 337 { 338 unsigned int limit = svc_rpc_per_connection_limit; 339 int nrqsts = atomic_read(&xprt->xpt_nr_rqsts); 340 341 return limit == 0 || (nrqsts >= 0 && nrqsts < limit); 342 } 343 344 static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt) 345 { 346 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { 347 if (!svc_xprt_slots_in_range(xprt)) 348 return false; 349 atomic_inc(&xprt->xpt_nr_rqsts); 350 set_bit(RQ_DATA, &rqstp->rq_flags); 351 } 352 return true; 353 } 354 355 static void svc_xprt_release_slot(struct svc_rqst *rqstp) 356 { 357 struct svc_xprt *xprt = rqstp->rq_xprt; 358 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { 359 atomic_dec(&xprt->xpt_nr_rqsts); 360 svc_xprt_enqueue(xprt); 361 } 362 } 363 364 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 365 { 366 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 367 return true; 368 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) { 369 if (xprt->xpt_ops->xpo_has_wspace(xprt) && 370 svc_xprt_slots_in_range(xprt)) 371 return true; 372 trace_svc_xprt_no_write_space(xprt); 373 return false; 374 } 375 return false; 376 } 377 378 void svc_xprt_do_enqueue(struct svc_xprt *xprt) 379 { 380 struct svc_pool *pool; 381 struct svc_rqst *rqstp = NULL; 382 int cpu; 383 bool queued = false; 384 385 if (!svc_xprt_has_something_to_do(xprt)) 386 goto out; 387 388 /* Mark transport as busy. It will remain in this state until 389 * the provider calls svc_xprt_received. We update XPT_BUSY 390 * atomically because it also guards against trying to enqueue 391 * the transport twice. 392 */ 393 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 394 /* Don't enqueue transport while already enqueued */ 395 dprintk("svc: transport %p busy, not enqueued\n", xprt); 396 goto out; 397 } 398 399 cpu = get_cpu(); 400 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 401 402 atomic_long_inc(&pool->sp_stats.packets); 403 404 redo_search: 405 /* find a thread for this xprt */ 406 rcu_read_lock(); 407 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 408 /* Do a lockless check first */ 409 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 410 continue; 411 412 /* 413 * Once the xprt has been queued, it can only be dequeued by 414 * the task that intends to service it. All we can do at that 415 * point is to try to wake this thread back up so that it can 416 * do so. 417 */ 418 if (!queued) { 419 spin_lock_bh(&rqstp->rq_lock); 420 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) { 421 /* already busy, move on... */ 422 spin_unlock_bh(&rqstp->rq_lock); 423 continue; 424 } 425 426 /* this one will do */ 427 rqstp->rq_xprt = xprt; 428 svc_xprt_get(xprt); 429 spin_unlock_bh(&rqstp->rq_lock); 430 } 431 rcu_read_unlock(); 432 433 atomic_long_inc(&pool->sp_stats.threads_woken); 434 wake_up_process(rqstp->rq_task); 435 put_cpu(); 436 goto out; 437 } 438 rcu_read_unlock(); 439 440 /* 441 * We didn't find an idle thread to use, so we need to queue the xprt. 442 * Do so and then search again. If we find one, we can't hook this one 443 * up to it directly but we can wake the thread up in the hopes that it 444 * will pick it up once it searches for a xprt to service. 445 */ 446 if (!queued) { 447 queued = true; 448 dprintk("svc: transport %p put into queue\n", xprt); 449 spin_lock_bh(&pool->sp_lock); 450 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 451 pool->sp_stats.sockets_queued++; 452 spin_unlock_bh(&pool->sp_lock); 453 goto redo_search; 454 } 455 rqstp = NULL; 456 put_cpu(); 457 out: 458 trace_svc_xprt_do_enqueue(xprt, rqstp); 459 } 460 EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); 461 462 /* 463 * Queue up a transport with data pending. If there are idle nfsd 464 * processes, wake 'em up. 465 * 466 */ 467 void svc_xprt_enqueue(struct svc_xprt *xprt) 468 { 469 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) 470 return; 471 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 472 } 473 EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 474 475 /* 476 * Dequeue the first transport, if there is one. 477 */ 478 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 479 { 480 struct svc_xprt *xprt = NULL; 481 482 if (list_empty(&pool->sp_sockets)) 483 goto out; 484 485 spin_lock_bh(&pool->sp_lock); 486 if (likely(!list_empty(&pool->sp_sockets))) { 487 xprt = list_first_entry(&pool->sp_sockets, 488 struct svc_xprt, xpt_ready); 489 list_del_init(&xprt->xpt_ready); 490 svc_xprt_get(xprt); 491 492 dprintk("svc: transport %p dequeued, inuse=%d\n", 493 xprt, kref_read(&xprt->xpt_ref)); 494 } 495 spin_unlock_bh(&pool->sp_lock); 496 out: 497 trace_svc_xprt_dequeue(xprt); 498 return xprt; 499 } 500 501 /** 502 * svc_reserve - change the space reserved for the reply to a request. 503 * @rqstp: The request in question 504 * @space: new max space to reserve 505 * 506 * Each request reserves some space on the output queue of the transport 507 * to make sure the reply fits. This function reduces that reserved 508 * space to be the amount of space used already, plus @space. 509 * 510 */ 511 void svc_reserve(struct svc_rqst *rqstp, int space) 512 { 513 space += rqstp->rq_res.head[0].iov_len; 514 515 if (space < rqstp->rq_reserved) { 516 struct svc_xprt *xprt = rqstp->rq_xprt; 517 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 518 rqstp->rq_reserved = space; 519 520 svc_xprt_enqueue(xprt); 521 } 522 } 523 EXPORT_SYMBOL_GPL(svc_reserve); 524 525 static void svc_xprt_release(struct svc_rqst *rqstp) 526 { 527 struct svc_xprt *xprt = rqstp->rq_xprt; 528 529 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 530 531 kfree(rqstp->rq_deferred); 532 rqstp->rq_deferred = NULL; 533 534 svc_free_res_pages(rqstp); 535 rqstp->rq_res.page_len = 0; 536 rqstp->rq_res.page_base = 0; 537 538 /* Reset response buffer and release 539 * the reservation. 540 * But first, check that enough space was reserved 541 * for the reply, otherwise we have a bug! 542 */ 543 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 544 printk(KERN_ERR "RPC request reserved %d but used %d\n", 545 rqstp->rq_reserved, 546 rqstp->rq_res.len); 547 548 rqstp->rq_res.head[0].iov_len = 0; 549 svc_reserve(rqstp, 0); 550 svc_xprt_release_slot(rqstp); 551 rqstp->rq_xprt = NULL; 552 svc_xprt_put(xprt); 553 } 554 555 /* 556 * Some svc_serv's will have occasional work to do, even when a xprt is not 557 * waiting to be serviced. This function is there to "kick" a task in one of 558 * those services so that it can wake up and do that work. Note that we only 559 * bother with pool 0 as we don't need to wake up more than one thread for 560 * this purpose. 561 */ 562 void svc_wake_up(struct svc_serv *serv) 563 { 564 struct svc_rqst *rqstp; 565 struct svc_pool *pool; 566 567 pool = &serv->sv_pools[0]; 568 569 rcu_read_lock(); 570 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 571 /* skip any that aren't queued */ 572 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 573 continue; 574 rcu_read_unlock(); 575 dprintk("svc: daemon %p woken up.\n", rqstp); 576 wake_up_process(rqstp->rq_task); 577 trace_svc_wake_up(rqstp->rq_task->pid); 578 return; 579 } 580 rcu_read_unlock(); 581 582 /* No free entries available */ 583 set_bit(SP_TASK_PENDING, &pool->sp_flags); 584 smp_wmb(); 585 trace_svc_wake_up(0); 586 } 587 EXPORT_SYMBOL_GPL(svc_wake_up); 588 589 int svc_port_is_privileged(struct sockaddr *sin) 590 { 591 switch (sin->sa_family) { 592 case AF_INET: 593 return ntohs(((struct sockaddr_in *)sin)->sin_port) 594 < PROT_SOCK; 595 case AF_INET6: 596 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 597 < PROT_SOCK; 598 default: 599 return 0; 600 } 601 } 602 603 /* 604 * Make sure that we don't have too many active connections. If we have, 605 * something must be dropped. It's not clear what will happen if we allow 606 * "too many" connections, but when dealing with network-facing software, 607 * we have to code defensively. Here we do that by imposing hard limits. 608 * 609 * There's no point in trying to do random drop here for DoS 610 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 611 * attacker can easily beat that. 612 * 613 * The only somewhat efficient mechanism would be if drop old 614 * connections from the same IP first. But right now we don't even 615 * record the client IP in svc_sock. 616 * 617 * single-threaded services that expect a lot of clients will probably 618 * need to set sv_maxconn to override the default value which is based 619 * on the number of threads 620 */ 621 static void svc_check_conn_limits(struct svc_serv *serv) 622 { 623 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 624 (serv->sv_nrthreads+3) * 20; 625 626 if (serv->sv_tmpcnt > limit) { 627 struct svc_xprt *xprt = NULL; 628 spin_lock_bh(&serv->sv_lock); 629 if (!list_empty(&serv->sv_tempsocks)) { 630 /* Try to help the admin */ 631 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", 632 serv->sv_name, serv->sv_maxconn ? 633 "max number of connections" : 634 "number of threads"); 635 /* 636 * Always select the oldest connection. It's not fair, 637 * but so is life 638 */ 639 xprt = list_entry(serv->sv_tempsocks.prev, 640 struct svc_xprt, 641 xpt_list); 642 set_bit(XPT_CLOSE, &xprt->xpt_flags); 643 svc_xprt_get(xprt); 644 } 645 spin_unlock_bh(&serv->sv_lock); 646 647 if (xprt) { 648 svc_xprt_enqueue(xprt); 649 svc_xprt_put(xprt); 650 } 651 } 652 } 653 654 static int svc_alloc_arg(struct svc_rqst *rqstp) 655 { 656 struct svc_serv *serv = rqstp->rq_server; 657 struct xdr_buf *arg; 658 int pages; 659 int i; 660 661 /* now allocate needed pages. If we get a failure, sleep briefly */ 662 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 663 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES); 664 if (pages >= RPCSVC_MAXPAGES) 665 /* use as many pages as possible */ 666 pages = RPCSVC_MAXPAGES - 1; 667 for (i = 0; i < pages ; i++) 668 while (rqstp->rq_pages[i] == NULL) { 669 struct page *p = alloc_page(GFP_KERNEL); 670 if (!p) { 671 set_current_state(TASK_INTERRUPTIBLE); 672 if (signalled() || kthread_should_stop()) { 673 set_current_state(TASK_RUNNING); 674 return -EINTR; 675 } 676 schedule_timeout(msecs_to_jiffies(500)); 677 } 678 rqstp->rq_pages[i] = p; 679 } 680 rqstp->rq_page_end = &rqstp->rq_pages[i]; 681 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 682 683 /* Make arg->head point to first page and arg->pages point to rest */ 684 arg = &rqstp->rq_arg; 685 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 686 arg->head[0].iov_len = PAGE_SIZE; 687 arg->pages = rqstp->rq_pages + 1; 688 arg->page_base = 0; 689 /* save at least one page for response */ 690 arg->page_len = (pages-2)*PAGE_SIZE; 691 arg->len = (pages-1)*PAGE_SIZE; 692 arg->tail[0].iov_len = 0; 693 return 0; 694 } 695 696 static bool 697 rqst_should_sleep(struct svc_rqst *rqstp) 698 { 699 struct svc_pool *pool = rqstp->rq_pool; 700 701 /* did someone call svc_wake_up? */ 702 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) 703 return false; 704 705 /* was a socket queued? */ 706 if (!list_empty(&pool->sp_sockets)) 707 return false; 708 709 /* are we shutting down? */ 710 if (signalled() || kthread_should_stop()) 711 return false; 712 713 /* are we freezing? */ 714 if (freezing(current)) 715 return false; 716 717 return true; 718 } 719 720 static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 721 { 722 struct svc_xprt *xprt; 723 struct svc_pool *pool = rqstp->rq_pool; 724 long time_left = 0; 725 726 /* rq_xprt should be clear on entry */ 727 WARN_ON_ONCE(rqstp->rq_xprt); 728 729 /* Normally we will wait up to 5 seconds for any required 730 * cache information to be provided. 731 */ 732 rqstp->rq_chandle.thread_wait = 5*HZ; 733 734 xprt = svc_xprt_dequeue(pool); 735 if (xprt) { 736 rqstp->rq_xprt = xprt; 737 738 /* As there is a shortage of threads and this request 739 * had to be queued, don't allow the thread to wait so 740 * long for cache updates. 741 */ 742 rqstp->rq_chandle.thread_wait = 1*HZ; 743 clear_bit(SP_TASK_PENDING, &pool->sp_flags); 744 return xprt; 745 } 746 747 /* 748 * We have to be able to interrupt this wait 749 * to bring down the daemons ... 750 */ 751 set_current_state(TASK_INTERRUPTIBLE); 752 clear_bit(RQ_BUSY, &rqstp->rq_flags); 753 smp_mb(); 754 755 if (likely(rqst_should_sleep(rqstp))) 756 time_left = schedule_timeout(timeout); 757 else 758 __set_current_state(TASK_RUNNING); 759 760 try_to_freeze(); 761 762 spin_lock_bh(&rqstp->rq_lock); 763 set_bit(RQ_BUSY, &rqstp->rq_flags); 764 spin_unlock_bh(&rqstp->rq_lock); 765 766 xprt = rqstp->rq_xprt; 767 if (xprt != NULL) 768 return xprt; 769 770 if (!time_left) 771 atomic_long_inc(&pool->sp_stats.threads_timedout); 772 773 if (signalled() || kthread_should_stop()) 774 return ERR_PTR(-EINTR); 775 return ERR_PTR(-EAGAIN); 776 } 777 778 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 779 { 780 spin_lock_bh(&serv->sv_lock); 781 set_bit(XPT_TEMP, &newxpt->xpt_flags); 782 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 783 serv->sv_tmpcnt++; 784 if (serv->sv_temptimer.function == NULL) { 785 /* setup timer to age temp transports */ 786 setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, 787 (unsigned long)serv); 788 mod_timer(&serv->sv_temptimer, 789 jiffies + svc_conn_age_period * HZ); 790 } 791 spin_unlock_bh(&serv->sv_lock); 792 svc_xprt_received(newxpt); 793 } 794 795 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) 796 { 797 struct svc_serv *serv = rqstp->rq_server; 798 int len = 0; 799 800 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 801 dprintk("svc_recv: found XPT_CLOSE\n"); 802 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) 803 xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 804 svc_delete_xprt(xprt); 805 /* Leave XPT_BUSY set on the dead xprt: */ 806 goto out; 807 } 808 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 809 struct svc_xprt *newxpt; 810 /* 811 * We know this module_get will succeed because the 812 * listener holds a reference too 813 */ 814 __module_get(xprt->xpt_class->xcl_owner); 815 svc_check_conn_limits(xprt->xpt_server); 816 newxpt = xprt->xpt_ops->xpo_accept(xprt); 817 if (newxpt) 818 svc_add_new_temp_xprt(serv, newxpt); 819 else 820 module_put(xprt->xpt_class->xcl_owner); 821 } else if (svc_xprt_reserve_slot(rqstp, xprt)) { 822 /* XPT_DATA|XPT_DEFERRED case: */ 823 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 824 rqstp, rqstp->rq_pool->sp_id, xprt, 825 kref_read(&xprt->xpt_ref)); 826 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 827 if (rqstp->rq_deferred) 828 len = svc_deferred_recv(rqstp); 829 else 830 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 831 dprintk("svc: got len=%d\n", len); 832 rqstp->rq_reserved = serv->sv_max_mesg; 833 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 834 } 835 /* clear XPT_BUSY: */ 836 svc_xprt_received(xprt); 837 out: 838 trace_svc_handle_xprt(xprt, len); 839 return len; 840 } 841 842 /* 843 * Receive the next request on any transport. This code is carefully 844 * organised not to touch any cachelines in the shared svc_serv 845 * structure, only cachelines in the local svc_pool. 846 */ 847 int svc_recv(struct svc_rqst *rqstp, long timeout) 848 { 849 struct svc_xprt *xprt = NULL; 850 struct svc_serv *serv = rqstp->rq_server; 851 int len, err; 852 853 dprintk("svc: server %p waiting for data (to = %ld)\n", 854 rqstp, timeout); 855 856 if (rqstp->rq_xprt) 857 printk(KERN_ERR 858 "svc_recv: service %p, transport not NULL!\n", 859 rqstp); 860 861 err = svc_alloc_arg(rqstp); 862 if (err) 863 goto out; 864 865 try_to_freeze(); 866 cond_resched(); 867 err = -EINTR; 868 if (signalled() || kthread_should_stop()) 869 goto out; 870 871 xprt = svc_get_next_xprt(rqstp, timeout); 872 if (IS_ERR(xprt)) { 873 err = PTR_ERR(xprt); 874 goto out; 875 } 876 877 len = svc_handle_xprt(rqstp, xprt); 878 879 /* No data, incomplete (TCP) read, or accept() */ 880 err = -EAGAIN; 881 if (len <= 0) 882 goto out_release; 883 884 clear_bit(XPT_OLD, &xprt->xpt_flags); 885 886 if (xprt->xpt_ops->xpo_secure_port(rqstp)) 887 set_bit(RQ_SECURE, &rqstp->rq_flags); 888 else 889 clear_bit(RQ_SECURE, &rqstp->rq_flags); 890 rqstp->rq_chandle.defer = svc_defer; 891 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); 892 893 if (serv->sv_stats) 894 serv->sv_stats->netcnt++; 895 trace_svc_recv(rqstp, len); 896 return len; 897 out_release: 898 rqstp->rq_res.len = 0; 899 svc_xprt_release(rqstp); 900 out: 901 trace_svc_recv(rqstp, err); 902 return err; 903 } 904 EXPORT_SYMBOL_GPL(svc_recv); 905 906 /* 907 * Drop request 908 */ 909 void svc_drop(struct svc_rqst *rqstp) 910 { 911 trace_svc_drop(rqstp); 912 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 913 svc_xprt_release(rqstp); 914 } 915 EXPORT_SYMBOL_GPL(svc_drop); 916 917 /* 918 * Return reply to client. 919 */ 920 int svc_send(struct svc_rqst *rqstp) 921 { 922 struct svc_xprt *xprt; 923 int len = -EFAULT; 924 struct xdr_buf *xb; 925 926 xprt = rqstp->rq_xprt; 927 if (!xprt) 928 goto out; 929 930 /* release the receive skb before sending the reply */ 931 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 932 933 /* calculate over-all length */ 934 xb = &rqstp->rq_res; 935 xb->len = xb->head[0].iov_len + 936 xb->page_len + 937 xb->tail[0].iov_len; 938 939 /* Grab mutex to serialize outgoing data. */ 940 mutex_lock(&xprt->xpt_mutex); 941 if (test_bit(XPT_DEAD, &xprt->xpt_flags) 942 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) 943 len = -ENOTCONN; 944 else 945 len = xprt->xpt_ops->xpo_sendto(rqstp); 946 mutex_unlock(&xprt->xpt_mutex); 947 rpc_wake_up(&xprt->xpt_bc_pending); 948 svc_xprt_release(rqstp); 949 950 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 951 len = 0; 952 out: 953 trace_svc_send(rqstp, len); 954 return len; 955 } 956 957 /* 958 * Timer function to close old temporary transports, using 959 * a mark-and-sweep algorithm. 960 */ 961 static void svc_age_temp_xprts(unsigned long closure) 962 { 963 struct svc_serv *serv = (struct svc_serv *)closure; 964 struct svc_xprt *xprt; 965 struct list_head *le, *next; 966 967 dprintk("svc_age_temp_xprts\n"); 968 969 if (!spin_trylock_bh(&serv->sv_lock)) { 970 /* busy, try again 1 sec later */ 971 dprintk("svc_age_temp_xprts: busy\n"); 972 mod_timer(&serv->sv_temptimer, jiffies + HZ); 973 return; 974 } 975 976 list_for_each_safe(le, next, &serv->sv_tempsocks) { 977 xprt = list_entry(le, struct svc_xprt, xpt_list); 978 979 /* First time through, just mark it OLD. Second time 980 * through, close it. */ 981 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 982 continue; 983 if (kref_read(&xprt->xpt_ref) > 1 || 984 test_bit(XPT_BUSY, &xprt->xpt_flags)) 985 continue; 986 list_del_init(le); 987 set_bit(XPT_CLOSE, &xprt->xpt_flags); 988 dprintk("queuing xprt %p for closing\n", xprt); 989 990 /* a thread will dequeue and close it soon */ 991 svc_xprt_enqueue(xprt); 992 } 993 spin_unlock_bh(&serv->sv_lock); 994 995 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 996 } 997 998 /* Close temporary transports whose xpt_local matches server_addr immediately 999 * instead of waiting for them to be picked up by the timer. 1000 * 1001 * This is meant to be called from a notifier_block that runs when an ip 1002 * address is deleted. 1003 */ 1004 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 1005 { 1006 struct svc_xprt *xprt; 1007 struct list_head *le, *next; 1008 LIST_HEAD(to_be_closed); 1009 1010 spin_lock_bh(&serv->sv_lock); 1011 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1012 xprt = list_entry(le, struct svc_xprt, xpt_list); 1013 if (rpc_cmp_addr(server_addr, (struct sockaddr *) 1014 &xprt->xpt_local)) { 1015 dprintk("svc_age_temp_xprts_now: found %p\n", xprt); 1016 list_move(le, &to_be_closed); 1017 } 1018 } 1019 spin_unlock_bh(&serv->sv_lock); 1020 1021 while (!list_empty(&to_be_closed)) { 1022 le = to_be_closed.next; 1023 list_del_init(le); 1024 xprt = list_entry(le, struct svc_xprt, xpt_list); 1025 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1026 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); 1027 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", 1028 xprt); 1029 svc_xprt_enqueue(xprt); 1030 } 1031 } 1032 EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); 1033 1034 static void call_xpt_users(struct svc_xprt *xprt) 1035 { 1036 struct svc_xpt_user *u; 1037 1038 spin_lock(&xprt->xpt_lock); 1039 while (!list_empty(&xprt->xpt_users)) { 1040 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 1041 list_del(&u->list); 1042 u->callback(u); 1043 } 1044 spin_unlock(&xprt->xpt_lock); 1045 } 1046 1047 /* 1048 * Remove a dead transport 1049 */ 1050 static void svc_delete_xprt(struct svc_xprt *xprt) 1051 { 1052 struct svc_serv *serv = xprt->xpt_server; 1053 struct svc_deferred_req *dr; 1054 1055 /* Only do this once */ 1056 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 1057 BUG(); 1058 1059 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 1060 xprt->xpt_ops->xpo_detach(xprt); 1061 1062 spin_lock_bh(&serv->sv_lock); 1063 list_del_init(&xprt->xpt_list); 1064 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); 1065 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 1066 serv->sv_tmpcnt--; 1067 spin_unlock_bh(&serv->sv_lock); 1068 1069 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 1070 kfree(dr); 1071 1072 call_xpt_users(xprt); 1073 svc_xprt_put(xprt); 1074 } 1075 1076 void svc_close_xprt(struct svc_xprt *xprt) 1077 { 1078 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1079 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 1080 /* someone else will have to effect the close */ 1081 return; 1082 /* 1083 * We expect svc_close_xprt() to work even when no threads are 1084 * running (e.g., while configuring the server before starting 1085 * any threads), so if the transport isn't busy, we delete 1086 * it ourself: 1087 */ 1088 svc_delete_xprt(xprt); 1089 } 1090 EXPORT_SYMBOL_GPL(svc_close_xprt); 1091 1092 static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 1093 { 1094 struct svc_xprt *xprt; 1095 int ret = 0; 1096 1097 spin_lock(&serv->sv_lock); 1098 list_for_each_entry(xprt, xprt_list, xpt_list) { 1099 if (xprt->xpt_net != net) 1100 continue; 1101 ret++; 1102 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1103 svc_xprt_enqueue(xprt); 1104 } 1105 spin_unlock(&serv->sv_lock); 1106 return ret; 1107 } 1108 1109 static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) 1110 { 1111 struct svc_pool *pool; 1112 struct svc_xprt *xprt; 1113 struct svc_xprt *tmp; 1114 int i; 1115 1116 for (i = 0; i < serv->sv_nrpools; i++) { 1117 pool = &serv->sv_pools[i]; 1118 1119 spin_lock_bh(&pool->sp_lock); 1120 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { 1121 if (xprt->xpt_net != net) 1122 continue; 1123 list_del_init(&xprt->xpt_ready); 1124 spin_unlock_bh(&pool->sp_lock); 1125 return xprt; 1126 } 1127 spin_unlock_bh(&pool->sp_lock); 1128 } 1129 return NULL; 1130 } 1131 1132 static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) 1133 { 1134 struct svc_xprt *xprt; 1135 1136 while ((xprt = svc_dequeue_net(serv, net))) { 1137 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1138 svc_delete_xprt(xprt); 1139 } 1140 } 1141 1142 /* 1143 * Server threads may still be running (especially in the case where the 1144 * service is still running in other network namespaces). 1145 * 1146 * So we shut down sockets the same way we would on a running server, by 1147 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do 1148 * the close. In the case there are no such other threads, 1149 * threads running, svc_clean_up_xprts() does a simple version of a 1150 * server's main event loop, and in the case where there are other 1151 * threads, we may need to wait a little while and then check again to 1152 * see if they're done. 1153 */ 1154 void svc_close_net(struct svc_serv *serv, struct net *net) 1155 { 1156 int delay = 0; 1157 1158 while (svc_close_list(serv, &serv->sv_permsocks, net) + 1159 svc_close_list(serv, &serv->sv_tempsocks, net)) { 1160 1161 svc_clean_up_xprts(serv, net); 1162 msleep(delay++); 1163 } 1164 } 1165 1166 /* 1167 * Handle defer and revisit of requests 1168 */ 1169 1170 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1171 { 1172 struct svc_deferred_req *dr = 1173 container_of(dreq, struct svc_deferred_req, handle); 1174 struct svc_xprt *xprt = dr->xprt; 1175 1176 spin_lock(&xprt->xpt_lock); 1177 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1178 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 1179 spin_unlock(&xprt->xpt_lock); 1180 dprintk("revisit canceled\n"); 1181 svc_xprt_put(xprt); 1182 trace_svc_drop_deferred(dr); 1183 kfree(dr); 1184 return; 1185 } 1186 dprintk("revisit queued\n"); 1187 dr->xprt = NULL; 1188 list_add(&dr->handle.recent, &xprt->xpt_deferred); 1189 spin_unlock(&xprt->xpt_lock); 1190 svc_xprt_enqueue(xprt); 1191 svc_xprt_put(xprt); 1192 } 1193 1194 /* 1195 * Save the request off for later processing. The request buffer looks 1196 * like this: 1197 * 1198 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 1199 * 1200 * This code can only handle requests that consist of an xprt-header 1201 * and rpc-header. 1202 */ 1203 static struct cache_deferred_req *svc_defer(struct cache_req *req) 1204 { 1205 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1206 struct svc_deferred_req *dr; 1207 1208 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) 1209 return NULL; /* if more than a page, give up FIXME */ 1210 if (rqstp->rq_deferred) { 1211 dr = rqstp->rq_deferred; 1212 rqstp->rq_deferred = NULL; 1213 } else { 1214 size_t skip; 1215 size_t size; 1216 /* FIXME maybe discard if size too large */ 1217 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 1218 dr = kmalloc(size, GFP_KERNEL); 1219 if (dr == NULL) 1220 return NULL; 1221 1222 dr->handle.owner = rqstp->rq_server; 1223 dr->prot = rqstp->rq_prot; 1224 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1225 dr->addrlen = rqstp->rq_addrlen; 1226 dr->daddr = rqstp->rq_daddr; 1227 dr->argslen = rqstp->rq_arg.len >> 2; 1228 dr->xprt_hlen = rqstp->rq_xprt_hlen; 1229 1230 /* back up head to the start of the buffer and copy */ 1231 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1232 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1233 dr->argslen << 2); 1234 } 1235 svc_xprt_get(rqstp->rq_xprt); 1236 dr->xprt = rqstp->rq_xprt; 1237 set_bit(RQ_DROPME, &rqstp->rq_flags); 1238 1239 dr->handle.revisit = svc_revisit; 1240 trace_svc_defer(rqstp); 1241 return &dr->handle; 1242 } 1243 1244 /* 1245 * recv data from a deferred request into an active one 1246 */ 1247 static int svc_deferred_recv(struct svc_rqst *rqstp) 1248 { 1249 struct svc_deferred_req *dr = rqstp->rq_deferred; 1250 1251 /* setup iov_base past transport header */ 1252 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1253 /* The iov_len does not include the transport header bytes */ 1254 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1255 rqstp->rq_arg.page_len = 0; 1256 /* The rq_arg.len includes the transport header bytes */ 1257 rqstp->rq_arg.len = dr->argslen<<2; 1258 rqstp->rq_prot = dr->prot; 1259 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1260 rqstp->rq_addrlen = dr->addrlen; 1261 /* Save off transport header len in case we get deferred again */ 1262 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1263 rqstp->rq_daddr = dr->daddr; 1264 rqstp->rq_respages = rqstp->rq_pages; 1265 return (dr->argslen<<2) - dr->xprt_hlen; 1266 } 1267 1268 1269 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1270 { 1271 struct svc_deferred_req *dr = NULL; 1272 1273 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1274 return NULL; 1275 spin_lock(&xprt->xpt_lock); 1276 if (!list_empty(&xprt->xpt_deferred)) { 1277 dr = list_entry(xprt->xpt_deferred.next, 1278 struct svc_deferred_req, 1279 handle.recent); 1280 list_del_init(&dr->handle.recent); 1281 trace_svc_revisit_deferred(dr); 1282 } else 1283 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1284 spin_unlock(&xprt->xpt_lock); 1285 return dr; 1286 } 1287 1288 /** 1289 * svc_find_xprt - find an RPC transport instance 1290 * @serv: pointer to svc_serv to search 1291 * @xcl_name: C string containing transport's class name 1292 * @net: owner net pointer 1293 * @af: Address family of transport's local address 1294 * @port: transport's IP port number 1295 * 1296 * Return the transport instance pointer for the endpoint accepting 1297 * connections/peer traffic from the specified transport class, 1298 * address family and port. 1299 * 1300 * Specifying 0 for the address family or port is effectively a 1301 * wild-card, and will result in matching the first transport in the 1302 * service's list that has a matching class name. 1303 */ 1304 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1305 struct net *net, const sa_family_t af, 1306 const unsigned short port) 1307 { 1308 struct svc_xprt *xprt; 1309 struct svc_xprt *found = NULL; 1310 1311 /* Sanity check the args */ 1312 if (serv == NULL || xcl_name == NULL) 1313 return found; 1314 1315 spin_lock_bh(&serv->sv_lock); 1316 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1317 if (xprt->xpt_net != net) 1318 continue; 1319 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1320 continue; 1321 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1322 continue; 1323 if (port != 0 && port != svc_xprt_local_port(xprt)) 1324 continue; 1325 found = xprt; 1326 svc_xprt_get(xprt); 1327 break; 1328 } 1329 spin_unlock_bh(&serv->sv_lock); 1330 return found; 1331 } 1332 EXPORT_SYMBOL_GPL(svc_find_xprt); 1333 1334 static int svc_one_xprt_name(const struct svc_xprt *xprt, 1335 char *pos, int remaining) 1336 { 1337 int len; 1338 1339 len = snprintf(pos, remaining, "%s %u\n", 1340 xprt->xpt_class->xcl_name, 1341 svc_xprt_local_port(xprt)); 1342 if (len >= remaining) 1343 return -ENAMETOOLONG; 1344 return len; 1345 } 1346 1347 /** 1348 * svc_xprt_names - format a buffer with a list of transport names 1349 * @serv: pointer to an RPC service 1350 * @buf: pointer to a buffer to be filled in 1351 * @buflen: length of buffer to be filled in 1352 * 1353 * Fills in @buf with a string containing a list of transport names, 1354 * each name terminated with '\n'. 1355 * 1356 * Returns positive length of the filled-in string on success; otherwise 1357 * a negative errno value is returned if an error occurs. 1358 */ 1359 int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 1360 { 1361 struct svc_xprt *xprt; 1362 int len, totlen; 1363 char *pos; 1364 1365 /* Sanity check args */ 1366 if (!serv) 1367 return 0; 1368 1369 spin_lock_bh(&serv->sv_lock); 1370 1371 pos = buf; 1372 totlen = 0; 1373 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1374 len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1375 if (len < 0) { 1376 *buf = '\0'; 1377 totlen = len; 1378 } 1379 if (len <= 0) 1380 break; 1381 1382 pos += len; 1383 totlen += len; 1384 } 1385 1386 spin_unlock_bh(&serv->sv_lock); 1387 return totlen; 1388 } 1389 EXPORT_SYMBOL_GPL(svc_xprt_names); 1390 1391 1392 /*----------------------------------------------------------------------------*/ 1393 1394 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1395 { 1396 unsigned int pidx = (unsigned int)*pos; 1397 struct svc_serv *serv = m->private; 1398 1399 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1400 1401 if (!pidx) 1402 return SEQ_START_TOKEN; 1403 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1404 } 1405 1406 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1407 { 1408 struct svc_pool *pool = p; 1409 struct svc_serv *serv = m->private; 1410 1411 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1412 1413 if (p == SEQ_START_TOKEN) { 1414 pool = &serv->sv_pools[0]; 1415 } else { 1416 unsigned int pidx = (pool - &serv->sv_pools[0]); 1417 if (pidx < serv->sv_nrpools-1) 1418 pool = &serv->sv_pools[pidx+1]; 1419 else 1420 pool = NULL; 1421 } 1422 ++*pos; 1423 return pool; 1424 } 1425 1426 static void svc_pool_stats_stop(struct seq_file *m, void *p) 1427 { 1428 } 1429 1430 static int svc_pool_stats_show(struct seq_file *m, void *p) 1431 { 1432 struct svc_pool *pool = p; 1433 1434 if (p == SEQ_START_TOKEN) { 1435 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 1436 return 0; 1437 } 1438 1439 seq_printf(m, "%u %lu %lu %lu %lu\n", 1440 pool->sp_id, 1441 (unsigned long)atomic_long_read(&pool->sp_stats.packets), 1442 pool->sp_stats.sockets_queued, 1443 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), 1444 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); 1445 1446 return 0; 1447 } 1448 1449 static const struct seq_operations svc_pool_stats_seq_ops = { 1450 .start = svc_pool_stats_start, 1451 .next = svc_pool_stats_next, 1452 .stop = svc_pool_stats_stop, 1453 .show = svc_pool_stats_show, 1454 }; 1455 1456 int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1457 { 1458 int err; 1459 1460 err = seq_open(file, &svc_pool_stats_seq_ops); 1461 if (!err) 1462 ((struct seq_file *) file->private_data)->private = serv; 1463 return err; 1464 } 1465 EXPORT_SYMBOL(svc_pool_stats_open); 1466 1467 /*----------------------------------------------------------------------------*/ 1468