1 /* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/slab.h> 12 #include <net/sock.h> 13 #include <linux/sunrpc/addr.h> 14 #include <linux/sunrpc/stats.h> 15 #include <linux/sunrpc/svc_xprt.h> 16 #include <linux/sunrpc/svcsock.h> 17 #include <linux/sunrpc/xprt.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <trace/events/sunrpc.h> 21 22 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 23 24 static unsigned int svc_rpc_per_connection_limit __read_mostly; 25 module_param(svc_rpc_per_connection_limit, uint, 0644); 26 27 28 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 29 static int svc_deferred_recv(struct svc_rqst *rqstp); 30 static struct cache_deferred_req *svc_defer(struct cache_req *req); 31 static void svc_age_temp_xprts(struct timer_list *t); 32 static void svc_delete_xprt(struct svc_xprt *xprt); 33 34 /* apparently the "standard" is that clients close 35 * idle connections after 5 minutes, servers after 36 * 6 minutes 37 * http://www.connectathon.org/talks96/nfstcp.pdf 38 */ 39 static int svc_conn_age_period = 6*60; 40 41 /* List of registered transport classes */ 42 static DEFINE_SPINLOCK(svc_xprt_class_lock); 43 static LIST_HEAD(svc_xprt_class_list); 44 45 /* SMP locking strategy: 46 * 47 * svc_pool->sp_lock protects most of the fields of that pool. 48 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 49 * when both need to be taken (rare), svc_serv->sv_lock is first. 50 * The "service mutex" protects svc_serv->sv_nrthread. 51 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 52 * and the ->sk_info_authunix cache. 53 * 54 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 55 * enqueued multiply. During normal transport processing this bit 56 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 57 * Providers should not manipulate this bit directly. 58 * 59 * Some flags can be set to certain values at any time 60 * providing that certain rules are followed: 61 * 62 * XPT_CONN, XPT_DATA: 63 * - Can be set or cleared at any time. 64 * - After a set, svc_xprt_enqueue must be called to enqueue 65 * the transport for processing. 66 * - After a clear, the transport must be read/accepted. 67 * If this succeeds, it must be set again. 68 * XPT_CLOSE: 69 * - Can set at any time. It is never cleared. 70 * XPT_DEAD: 71 * - Can only be set while XPT_BUSY is held which ensures 72 * that no other thread will be using the transport or will 73 * try to set XPT_DEAD. 74 */ 75 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 76 { 77 struct svc_xprt_class *cl; 78 int res = -EEXIST; 79 80 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 81 82 INIT_LIST_HEAD(&xcl->xcl_list); 83 spin_lock(&svc_xprt_class_lock); 84 /* Make sure there isn't already a class with the same name */ 85 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 86 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 87 goto out; 88 } 89 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 90 res = 0; 91 out: 92 spin_unlock(&svc_xprt_class_lock); 93 return res; 94 } 95 EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 96 97 void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 98 { 99 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 100 spin_lock(&svc_xprt_class_lock); 101 list_del_init(&xcl->xcl_list); 102 spin_unlock(&svc_xprt_class_lock); 103 } 104 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 105 106 /* 107 * Format the transport list for printing 108 */ 109 int svc_print_xprts(char *buf, int maxlen) 110 { 111 struct svc_xprt_class *xcl; 112 char tmpstr[80]; 113 int len = 0; 114 buf[0] = '\0'; 115 116 spin_lock(&svc_xprt_class_lock); 117 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 118 int slen; 119 120 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 121 slen = strlen(tmpstr); 122 if (len + slen > maxlen) 123 break; 124 len += slen; 125 strcat(buf, tmpstr); 126 } 127 spin_unlock(&svc_xprt_class_lock); 128 129 return len; 130 } 131 132 static void svc_xprt_free(struct kref *kref) 133 { 134 struct svc_xprt *xprt = 135 container_of(kref, struct svc_xprt, xpt_ref); 136 struct module *owner = xprt->xpt_class->xcl_owner; 137 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 138 svcauth_unix_info_release(xprt); 139 put_net(xprt->xpt_net); 140 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 141 if (xprt->xpt_bc_xprt) 142 xprt_put(xprt->xpt_bc_xprt); 143 if (xprt->xpt_bc_xps) 144 xprt_switch_put(xprt->xpt_bc_xps); 145 xprt->xpt_ops->xpo_free(xprt); 146 module_put(owner); 147 } 148 149 void svc_xprt_put(struct svc_xprt *xprt) 150 { 151 kref_put(&xprt->xpt_ref, svc_xprt_free); 152 } 153 EXPORT_SYMBOL_GPL(svc_xprt_put); 154 155 /* 156 * Called by transport drivers to initialize the transport independent 157 * portion of the transport instance. 158 */ 159 void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, 160 struct svc_xprt *xprt, struct svc_serv *serv) 161 { 162 memset(xprt, 0, sizeof(*xprt)); 163 xprt->xpt_class = xcl; 164 xprt->xpt_ops = xcl->xcl_ops; 165 kref_init(&xprt->xpt_ref); 166 xprt->xpt_server = serv; 167 INIT_LIST_HEAD(&xprt->xpt_list); 168 INIT_LIST_HEAD(&xprt->xpt_ready); 169 INIT_LIST_HEAD(&xprt->xpt_deferred); 170 INIT_LIST_HEAD(&xprt->xpt_users); 171 mutex_init(&xprt->xpt_mutex); 172 spin_lock_init(&xprt->xpt_lock); 173 set_bit(XPT_BUSY, &xprt->xpt_flags); 174 xprt->xpt_net = get_net(net); 175 strcpy(xprt->xpt_remotebuf, "uninitialized"); 176 } 177 EXPORT_SYMBOL_GPL(svc_xprt_init); 178 179 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 180 struct svc_serv *serv, 181 struct net *net, 182 const int family, 183 const unsigned short port, 184 int flags) 185 { 186 struct sockaddr_in sin = { 187 .sin_family = AF_INET, 188 .sin_addr.s_addr = htonl(INADDR_ANY), 189 .sin_port = htons(port), 190 }; 191 #if IS_ENABLED(CONFIG_IPV6) 192 struct sockaddr_in6 sin6 = { 193 .sin6_family = AF_INET6, 194 .sin6_addr = IN6ADDR_ANY_INIT, 195 .sin6_port = htons(port), 196 }; 197 #endif 198 struct sockaddr *sap; 199 size_t len; 200 201 switch (family) { 202 case PF_INET: 203 sap = (struct sockaddr *)&sin; 204 len = sizeof(sin); 205 break; 206 #if IS_ENABLED(CONFIG_IPV6) 207 case PF_INET6: 208 sap = (struct sockaddr *)&sin6; 209 len = sizeof(sin6); 210 break; 211 #endif 212 default: 213 return ERR_PTR(-EAFNOSUPPORT); 214 } 215 216 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 217 } 218 219 /* 220 * svc_xprt_received conditionally queues the transport for processing 221 * by another thread. The caller must hold the XPT_BUSY bit and must 222 * not thereafter touch transport data. 223 * 224 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 225 * insufficient) data. 226 */ 227 static void svc_xprt_received(struct svc_xprt *xprt) 228 { 229 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { 230 WARN_ONCE(1, "xprt=0x%p already busy!", xprt); 231 return; 232 } 233 234 /* As soon as we clear busy, the xprt could be closed and 235 * 'put', so we need a reference to call svc_enqueue_xprt with: 236 */ 237 svc_xprt_get(xprt); 238 smp_mb__before_atomic(); 239 clear_bit(XPT_BUSY, &xprt->xpt_flags); 240 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 241 svc_xprt_put(xprt); 242 } 243 244 void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) 245 { 246 clear_bit(XPT_TEMP, &new->xpt_flags); 247 spin_lock_bh(&serv->sv_lock); 248 list_add(&new->xpt_list, &serv->sv_permsocks); 249 spin_unlock_bh(&serv->sv_lock); 250 svc_xprt_received(new); 251 } 252 253 static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 254 struct net *net, const int family, 255 const unsigned short port, int flags) 256 { 257 struct svc_xprt_class *xcl; 258 259 spin_lock(&svc_xprt_class_lock); 260 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 261 struct svc_xprt *newxprt; 262 unsigned short newport; 263 264 if (strcmp(xprt_name, xcl->xcl_name)) 265 continue; 266 267 if (!try_module_get(xcl->xcl_owner)) 268 goto err; 269 270 spin_unlock(&svc_xprt_class_lock); 271 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 272 if (IS_ERR(newxprt)) { 273 module_put(xcl->xcl_owner); 274 return PTR_ERR(newxprt); 275 } 276 svc_add_new_perm_xprt(serv, newxprt); 277 newport = svc_xprt_local_port(newxprt); 278 return newport; 279 } 280 err: 281 spin_unlock(&svc_xprt_class_lock); 282 /* This errno is exposed to user space. Provide a reasonable 283 * perror msg for a bad transport. */ 284 return -EPROTONOSUPPORT; 285 } 286 287 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 288 struct net *net, const int family, 289 const unsigned short port, int flags) 290 { 291 int err; 292 293 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 294 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 295 if (err == -EPROTONOSUPPORT) { 296 request_module("svc%s", xprt_name); 297 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 298 } 299 if (err) 300 dprintk("svc: transport %s not found, err %d\n", 301 xprt_name, err); 302 return err; 303 } 304 EXPORT_SYMBOL_GPL(svc_create_xprt); 305 306 /* 307 * Copy the local and remote xprt addresses to the rqstp structure 308 */ 309 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 310 { 311 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 312 rqstp->rq_addrlen = xprt->xpt_remotelen; 313 314 /* 315 * Destination address in request is needed for binding the 316 * source address in RPC replies/callbacks later. 317 */ 318 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); 319 rqstp->rq_daddrlen = xprt->xpt_locallen; 320 } 321 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 322 323 /** 324 * svc_print_addr - Format rq_addr field for printing 325 * @rqstp: svc_rqst struct containing address to print 326 * @buf: target buffer for formatted address 327 * @len: length of target buffer 328 * 329 */ 330 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 331 { 332 return __svc_print_addr(svc_addr(rqstp), buf, len); 333 } 334 EXPORT_SYMBOL_GPL(svc_print_addr); 335 336 static bool svc_xprt_slots_in_range(struct svc_xprt *xprt) 337 { 338 unsigned int limit = svc_rpc_per_connection_limit; 339 int nrqsts = atomic_read(&xprt->xpt_nr_rqsts); 340 341 return limit == 0 || (nrqsts >= 0 && nrqsts < limit); 342 } 343 344 static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt) 345 { 346 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { 347 if (!svc_xprt_slots_in_range(xprt)) 348 return false; 349 atomic_inc(&xprt->xpt_nr_rqsts); 350 set_bit(RQ_DATA, &rqstp->rq_flags); 351 } 352 return true; 353 } 354 355 static void svc_xprt_release_slot(struct svc_rqst *rqstp) 356 { 357 struct svc_xprt *xprt = rqstp->rq_xprt; 358 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { 359 atomic_dec(&xprt->xpt_nr_rqsts); 360 svc_xprt_enqueue(xprt); 361 } 362 } 363 364 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 365 { 366 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 367 return true; 368 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) { 369 if (xprt->xpt_ops->xpo_has_wspace(xprt) && 370 svc_xprt_slots_in_range(xprt)) 371 return true; 372 trace_svc_xprt_no_write_space(xprt); 373 return false; 374 } 375 return false; 376 } 377 378 void svc_xprt_do_enqueue(struct svc_xprt *xprt) 379 { 380 struct svc_pool *pool; 381 struct svc_rqst *rqstp = NULL; 382 int cpu; 383 384 if (!svc_xprt_has_something_to_do(xprt)) 385 return; 386 387 /* Mark transport as busy. It will remain in this state until 388 * the provider calls svc_xprt_received. We update XPT_BUSY 389 * atomically because it also guards against trying to enqueue 390 * the transport twice. 391 */ 392 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 393 return; 394 395 cpu = get_cpu(); 396 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 397 398 atomic_long_inc(&pool->sp_stats.packets); 399 400 spin_lock_bh(&pool->sp_lock); 401 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 402 pool->sp_stats.sockets_queued++; 403 spin_unlock_bh(&pool->sp_lock); 404 405 /* find a thread for this xprt */ 406 rcu_read_lock(); 407 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 408 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) 409 continue; 410 atomic_long_inc(&pool->sp_stats.threads_woken); 411 rqstp->rq_qtime = ktime_get(); 412 wake_up_process(rqstp->rq_task); 413 goto out_unlock; 414 } 415 set_bit(SP_CONGESTED, &pool->sp_flags); 416 rqstp = NULL; 417 out_unlock: 418 rcu_read_unlock(); 419 put_cpu(); 420 trace_svc_xprt_do_enqueue(xprt, rqstp); 421 } 422 EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); 423 424 /* 425 * Queue up a transport with data pending. If there are idle nfsd 426 * processes, wake 'em up. 427 * 428 */ 429 void svc_xprt_enqueue(struct svc_xprt *xprt) 430 { 431 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) 432 return; 433 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 434 } 435 EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 436 437 /* 438 * Dequeue the first transport, if there is one. 439 */ 440 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 441 { 442 struct svc_xprt *xprt = NULL; 443 444 if (list_empty(&pool->sp_sockets)) 445 goto out; 446 447 spin_lock_bh(&pool->sp_lock); 448 if (likely(!list_empty(&pool->sp_sockets))) { 449 xprt = list_first_entry(&pool->sp_sockets, 450 struct svc_xprt, xpt_ready); 451 list_del_init(&xprt->xpt_ready); 452 svc_xprt_get(xprt); 453 } 454 spin_unlock_bh(&pool->sp_lock); 455 out: 456 return xprt; 457 } 458 459 /** 460 * svc_reserve - change the space reserved for the reply to a request. 461 * @rqstp: The request in question 462 * @space: new max space to reserve 463 * 464 * Each request reserves some space on the output queue of the transport 465 * to make sure the reply fits. This function reduces that reserved 466 * space to be the amount of space used already, plus @space. 467 * 468 */ 469 void svc_reserve(struct svc_rqst *rqstp, int space) 470 { 471 space += rqstp->rq_res.head[0].iov_len; 472 473 if (space < rqstp->rq_reserved) { 474 struct svc_xprt *xprt = rqstp->rq_xprt; 475 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 476 rqstp->rq_reserved = space; 477 478 svc_xprt_enqueue(xprt); 479 } 480 } 481 EXPORT_SYMBOL_GPL(svc_reserve); 482 483 static void svc_xprt_release(struct svc_rqst *rqstp) 484 { 485 struct svc_xprt *xprt = rqstp->rq_xprt; 486 487 xprt->xpt_ops->xpo_release_rqst(rqstp); 488 489 kfree(rqstp->rq_deferred); 490 rqstp->rq_deferred = NULL; 491 492 svc_free_res_pages(rqstp); 493 rqstp->rq_res.page_len = 0; 494 rqstp->rq_res.page_base = 0; 495 496 /* Reset response buffer and release 497 * the reservation. 498 * But first, check that enough space was reserved 499 * for the reply, otherwise we have a bug! 500 */ 501 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 502 printk(KERN_ERR "RPC request reserved %d but used %d\n", 503 rqstp->rq_reserved, 504 rqstp->rq_res.len); 505 506 rqstp->rq_res.head[0].iov_len = 0; 507 svc_reserve(rqstp, 0); 508 svc_xprt_release_slot(rqstp); 509 rqstp->rq_xprt = NULL; 510 svc_xprt_put(xprt); 511 } 512 513 /* 514 * Some svc_serv's will have occasional work to do, even when a xprt is not 515 * waiting to be serviced. This function is there to "kick" a task in one of 516 * those services so that it can wake up and do that work. Note that we only 517 * bother with pool 0 as we don't need to wake up more than one thread for 518 * this purpose. 519 */ 520 void svc_wake_up(struct svc_serv *serv) 521 { 522 struct svc_rqst *rqstp; 523 struct svc_pool *pool; 524 525 pool = &serv->sv_pools[0]; 526 527 rcu_read_lock(); 528 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 529 /* skip any that aren't queued */ 530 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 531 continue; 532 rcu_read_unlock(); 533 wake_up_process(rqstp->rq_task); 534 trace_svc_wake_up(rqstp->rq_task->pid); 535 return; 536 } 537 rcu_read_unlock(); 538 539 /* No free entries available */ 540 set_bit(SP_TASK_PENDING, &pool->sp_flags); 541 smp_wmb(); 542 trace_svc_wake_up(0); 543 } 544 EXPORT_SYMBOL_GPL(svc_wake_up); 545 546 int svc_port_is_privileged(struct sockaddr *sin) 547 { 548 switch (sin->sa_family) { 549 case AF_INET: 550 return ntohs(((struct sockaddr_in *)sin)->sin_port) 551 < PROT_SOCK; 552 case AF_INET6: 553 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 554 < PROT_SOCK; 555 default: 556 return 0; 557 } 558 } 559 560 /* 561 * Make sure that we don't have too many active connections. If we have, 562 * something must be dropped. It's not clear what will happen if we allow 563 * "too many" connections, but when dealing with network-facing software, 564 * we have to code defensively. Here we do that by imposing hard limits. 565 * 566 * There's no point in trying to do random drop here for DoS 567 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 568 * attacker can easily beat that. 569 * 570 * The only somewhat efficient mechanism would be if drop old 571 * connections from the same IP first. But right now we don't even 572 * record the client IP in svc_sock. 573 * 574 * single-threaded services that expect a lot of clients will probably 575 * need to set sv_maxconn to override the default value which is based 576 * on the number of threads 577 */ 578 static void svc_check_conn_limits(struct svc_serv *serv) 579 { 580 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 581 (serv->sv_nrthreads+3) * 20; 582 583 if (serv->sv_tmpcnt > limit) { 584 struct svc_xprt *xprt = NULL; 585 spin_lock_bh(&serv->sv_lock); 586 if (!list_empty(&serv->sv_tempsocks)) { 587 /* Try to help the admin */ 588 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", 589 serv->sv_name, serv->sv_maxconn ? 590 "max number of connections" : 591 "number of threads"); 592 /* 593 * Always select the oldest connection. It's not fair, 594 * but so is life 595 */ 596 xprt = list_entry(serv->sv_tempsocks.prev, 597 struct svc_xprt, 598 xpt_list); 599 set_bit(XPT_CLOSE, &xprt->xpt_flags); 600 svc_xprt_get(xprt); 601 } 602 spin_unlock_bh(&serv->sv_lock); 603 604 if (xprt) { 605 svc_xprt_enqueue(xprt); 606 svc_xprt_put(xprt); 607 } 608 } 609 } 610 611 static int svc_alloc_arg(struct svc_rqst *rqstp) 612 { 613 struct svc_serv *serv = rqstp->rq_server; 614 struct xdr_buf *arg; 615 int pages; 616 int i; 617 618 /* now allocate needed pages. If we get a failure, sleep briefly */ 619 pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; 620 if (pages > RPCSVC_MAXPAGES) { 621 pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", 622 pages, RPCSVC_MAXPAGES); 623 /* use as many pages as possible */ 624 pages = RPCSVC_MAXPAGES; 625 } 626 for (i = 0; i < pages ; i++) 627 while (rqstp->rq_pages[i] == NULL) { 628 struct page *p = alloc_page(GFP_KERNEL); 629 if (!p) { 630 set_current_state(TASK_INTERRUPTIBLE); 631 if (signalled() || kthread_should_stop()) { 632 set_current_state(TASK_RUNNING); 633 return -EINTR; 634 } 635 schedule_timeout(msecs_to_jiffies(500)); 636 } 637 rqstp->rq_pages[i] = p; 638 } 639 rqstp->rq_page_end = &rqstp->rq_pages[i]; 640 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 641 642 /* Make arg->head point to first page and arg->pages point to rest */ 643 arg = &rqstp->rq_arg; 644 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 645 arg->head[0].iov_len = PAGE_SIZE; 646 arg->pages = rqstp->rq_pages + 1; 647 arg->page_base = 0; 648 /* save at least one page for response */ 649 arg->page_len = (pages-2)*PAGE_SIZE; 650 arg->len = (pages-1)*PAGE_SIZE; 651 arg->tail[0].iov_len = 0; 652 return 0; 653 } 654 655 static bool 656 rqst_should_sleep(struct svc_rqst *rqstp) 657 { 658 struct svc_pool *pool = rqstp->rq_pool; 659 660 /* did someone call svc_wake_up? */ 661 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) 662 return false; 663 664 /* was a socket queued? */ 665 if (!list_empty(&pool->sp_sockets)) 666 return false; 667 668 /* are we shutting down? */ 669 if (signalled() || kthread_should_stop()) 670 return false; 671 672 /* are we freezing? */ 673 if (freezing(current)) 674 return false; 675 676 return true; 677 } 678 679 static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 680 { 681 struct svc_pool *pool = rqstp->rq_pool; 682 long time_left = 0; 683 684 /* rq_xprt should be clear on entry */ 685 WARN_ON_ONCE(rqstp->rq_xprt); 686 687 rqstp->rq_xprt = svc_xprt_dequeue(pool); 688 if (rqstp->rq_xprt) 689 goto out_found; 690 691 /* 692 * We have to be able to interrupt this wait 693 * to bring down the daemons ... 694 */ 695 set_current_state(TASK_INTERRUPTIBLE); 696 smp_mb__before_atomic(); 697 clear_bit(SP_CONGESTED, &pool->sp_flags); 698 clear_bit(RQ_BUSY, &rqstp->rq_flags); 699 smp_mb__after_atomic(); 700 701 if (likely(rqst_should_sleep(rqstp))) 702 time_left = schedule_timeout(timeout); 703 else 704 __set_current_state(TASK_RUNNING); 705 706 try_to_freeze(); 707 708 set_bit(RQ_BUSY, &rqstp->rq_flags); 709 smp_mb__after_atomic(); 710 rqstp->rq_xprt = svc_xprt_dequeue(pool); 711 if (rqstp->rq_xprt) 712 goto out_found; 713 714 if (!time_left) 715 atomic_long_inc(&pool->sp_stats.threads_timedout); 716 717 if (signalled() || kthread_should_stop()) 718 return ERR_PTR(-EINTR); 719 return ERR_PTR(-EAGAIN); 720 out_found: 721 /* Normally we will wait up to 5 seconds for any required 722 * cache information to be provided. 723 */ 724 if (!test_bit(SP_CONGESTED, &pool->sp_flags)) 725 rqstp->rq_chandle.thread_wait = 5*HZ; 726 else 727 rqstp->rq_chandle.thread_wait = 1*HZ; 728 trace_svc_xprt_dequeue(rqstp); 729 return rqstp->rq_xprt; 730 } 731 732 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 733 { 734 spin_lock_bh(&serv->sv_lock); 735 set_bit(XPT_TEMP, &newxpt->xpt_flags); 736 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 737 serv->sv_tmpcnt++; 738 if (serv->sv_temptimer.function == NULL) { 739 /* setup timer to age temp transports */ 740 serv->sv_temptimer.function = svc_age_temp_xprts; 741 mod_timer(&serv->sv_temptimer, 742 jiffies + svc_conn_age_period * HZ); 743 } 744 spin_unlock_bh(&serv->sv_lock); 745 svc_xprt_received(newxpt); 746 } 747 748 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) 749 { 750 struct svc_serv *serv = rqstp->rq_server; 751 int len = 0; 752 753 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 754 dprintk("svc_recv: found XPT_CLOSE\n"); 755 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) 756 xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 757 svc_delete_xprt(xprt); 758 /* Leave XPT_BUSY set on the dead xprt: */ 759 goto out; 760 } 761 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 762 struct svc_xprt *newxpt; 763 /* 764 * We know this module_get will succeed because the 765 * listener holds a reference too 766 */ 767 __module_get(xprt->xpt_class->xcl_owner); 768 svc_check_conn_limits(xprt->xpt_server); 769 newxpt = xprt->xpt_ops->xpo_accept(xprt); 770 if (newxpt) 771 svc_add_new_temp_xprt(serv, newxpt); 772 else 773 module_put(xprt->xpt_class->xcl_owner); 774 } else if (svc_xprt_reserve_slot(rqstp, xprt)) { 775 /* XPT_DATA|XPT_DEFERRED case: */ 776 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 777 rqstp, rqstp->rq_pool->sp_id, xprt, 778 kref_read(&xprt->xpt_ref)); 779 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 780 if (rqstp->rq_deferred) 781 len = svc_deferred_recv(rqstp); 782 else 783 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 784 rqstp->rq_stime = ktime_get(); 785 rqstp->rq_reserved = serv->sv_max_mesg; 786 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 787 } 788 /* clear XPT_BUSY: */ 789 svc_xprt_received(xprt); 790 out: 791 trace_svc_handle_xprt(xprt, len); 792 return len; 793 } 794 795 /* 796 * Receive the next request on any transport. This code is carefully 797 * organised not to touch any cachelines in the shared svc_serv 798 * structure, only cachelines in the local svc_pool. 799 */ 800 int svc_recv(struct svc_rqst *rqstp, long timeout) 801 { 802 struct svc_xprt *xprt = NULL; 803 struct svc_serv *serv = rqstp->rq_server; 804 int len, err; 805 806 dprintk("svc: server %p waiting for data (to = %ld)\n", 807 rqstp, timeout); 808 809 if (rqstp->rq_xprt) 810 printk(KERN_ERR 811 "svc_recv: service %p, transport not NULL!\n", 812 rqstp); 813 814 err = svc_alloc_arg(rqstp); 815 if (err) 816 goto out; 817 818 try_to_freeze(); 819 cond_resched(); 820 err = -EINTR; 821 if (signalled() || kthread_should_stop()) 822 goto out; 823 824 xprt = svc_get_next_xprt(rqstp, timeout); 825 if (IS_ERR(xprt)) { 826 err = PTR_ERR(xprt); 827 goto out; 828 } 829 830 len = svc_handle_xprt(rqstp, xprt); 831 832 /* No data, incomplete (TCP) read, or accept() */ 833 err = -EAGAIN; 834 if (len <= 0) 835 goto out_release; 836 837 clear_bit(XPT_OLD, &xprt->xpt_flags); 838 839 xprt->xpt_ops->xpo_secure_port(rqstp); 840 rqstp->rq_chandle.defer = svc_defer; 841 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); 842 843 if (serv->sv_stats) 844 serv->sv_stats->netcnt++; 845 trace_svc_recv(rqstp, len); 846 return len; 847 out_release: 848 rqstp->rq_res.len = 0; 849 svc_xprt_release(rqstp); 850 out: 851 return err; 852 } 853 EXPORT_SYMBOL_GPL(svc_recv); 854 855 /* 856 * Drop request 857 */ 858 void svc_drop(struct svc_rqst *rqstp) 859 { 860 trace_svc_drop(rqstp); 861 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 862 svc_xprt_release(rqstp); 863 } 864 EXPORT_SYMBOL_GPL(svc_drop); 865 866 /* 867 * Return reply to client. 868 */ 869 int svc_send(struct svc_rqst *rqstp) 870 { 871 struct svc_xprt *xprt; 872 int len = -EFAULT; 873 struct xdr_buf *xb; 874 875 xprt = rqstp->rq_xprt; 876 if (!xprt) 877 goto out; 878 879 /* release the receive skb before sending the reply */ 880 xprt->xpt_ops->xpo_release_rqst(rqstp); 881 882 /* calculate over-all length */ 883 xb = &rqstp->rq_res; 884 xb->len = xb->head[0].iov_len + 885 xb->page_len + 886 xb->tail[0].iov_len; 887 888 /* Grab mutex to serialize outgoing data. */ 889 mutex_lock(&xprt->xpt_mutex); 890 trace_svc_stats_latency(rqstp); 891 if (test_bit(XPT_DEAD, &xprt->xpt_flags) 892 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) 893 len = -ENOTCONN; 894 else 895 len = xprt->xpt_ops->xpo_sendto(rqstp); 896 mutex_unlock(&xprt->xpt_mutex); 897 trace_svc_send(rqstp, len); 898 svc_xprt_release(rqstp); 899 900 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 901 len = 0; 902 out: 903 return len; 904 } 905 906 /* 907 * Timer function to close old temporary transports, using 908 * a mark-and-sweep algorithm. 909 */ 910 static void svc_age_temp_xprts(struct timer_list *t) 911 { 912 struct svc_serv *serv = from_timer(serv, t, sv_temptimer); 913 struct svc_xprt *xprt; 914 struct list_head *le, *next; 915 916 dprintk("svc_age_temp_xprts\n"); 917 918 if (!spin_trylock_bh(&serv->sv_lock)) { 919 /* busy, try again 1 sec later */ 920 dprintk("svc_age_temp_xprts: busy\n"); 921 mod_timer(&serv->sv_temptimer, jiffies + HZ); 922 return; 923 } 924 925 list_for_each_safe(le, next, &serv->sv_tempsocks) { 926 xprt = list_entry(le, struct svc_xprt, xpt_list); 927 928 /* First time through, just mark it OLD. Second time 929 * through, close it. */ 930 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 931 continue; 932 if (kref_read(&xprt->xpt_ref) > 1 || 933 test_bit(XPT_BUSY, &xprt->xpt_flags)) 934 continue; 935 list_del_init(le); 936 set_bit(XPT_CLOSE, &xprt->xpt_flags); 937 dprintk("queuing xprt %p for closing\n", xprt); 938 939 /* a thread will dequeue and close it soon */ 940 svc_xprt_enqueue(xprt); 941 } 942 spin_unlock_bh(&serv->sv_lock); 943 944 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 945 } 946 947 /* Close temporary transports whose xpt_local matches server_addr immediately 948 * instead of waiting for them to be picked up by the timer. 949 * 950 * This is meant to be called from a notifier_block that runs when an ip 951 * address is deleted. 952 */ 953 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 954 { 955 struct svc_xprt *xprt; 956 struct list_head *le, *next; 957 LIST_HEAD(to_be_closed); 958 959 spin_lock_bh(&serv->sv_lock); 960 list_for_each_safe(le, next, &serv->sv_tempsocks) { 961 xprt = list_entry(le, struct svc_xprt, xpt_list); 962 if (rpc_cmp_addr(server_addr, (struct sockaddr *) 963 &xprt->xpt_local)) { 964 dprintk("svc_age_temp_xprts_now: found %p\n", xprt); 965 list_move(le, &to_be_closed); 966 } 967 } 968 spin_unlock_bh(&serv->sv_lock); 969 970 while (!list_empty(&to_be_closed)) { 971 le = to_be_closed.next; 972 list_del_init(le); 973 xprt = list_entry(le, struct svc_xprt, xpt_list); 974 set_bit(XPT_CLOSE, &xprt->xpt_flags); 975 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); 976 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", 977 xprt); 978 svc_xprt_enqueue(xprt); 979 } 980 } 981 EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); 982 983 static void call_xpt_users(struct svc_xprt *xprt) 984 { 985 struct svc_xpt_user *u; 986 987 spin_lock(&xprt->xpt_lock); 988 while (!list_empty(&xprt->xpt_users)) { 989 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 990 list_del_init(&u->list); 991 u->callback(u); 992 } 993 spin_unlock(&xprt->xpt_lock); 994 } 995 996 /* 997 * Remove a dead transport 998 */ 999 static void svc_delete_xprt(struct svc_xprt *xprt) 1000 { 1001 struct svc_serv *serv = xprt->xpt_server; 1002 struct svc_deferred_req *dr; 1003 1004 /* Only do this once */ 1005 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 1006 BUG(); 1007 1008 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 1009 xprt->xpt_ops->xpo_detach(xprt); 1010 1011 spin_lock_bh(&serv->sv_lock); 1012 list_del_init(&xprt->xpt_list); 1013 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); 1014 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 1015 serv->sv_tmpcnt--; 1016 spin_unlock_bh(&serv->sv_lock); 1017 1018 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 1019 kfree(dr); 1020 1021 call_xpt_users(xprt); 1022 svc_xprt_put(xprt); 1023 } 1024 1025 void svc_close_xprt(struct svc_xprt *xprt) 1026 { 1027 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1028 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 1029 /* someone else will have to effect the close */ 1030 return; 1031 /* 1032 * We expect svc_close_xprt() to work even when no threads are 1033 * running (e.g., while configuring the server before starting 1034 * any threads), so if the transport isn't busy, we delete 1035 * it ourself: 1036 */ 1037 svc_delete_xprt(xprt); 1038 } 1039 EXPORT_SYMBOL_GPL(svc_close_xprt); 1040 1041 static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 1042 { 1043 struct svc_xprt *xprt; 1044 int ret = 0; 1045 1046 spin_lock(&serv->sv_lock); 1047 list_for_each_entry(xprt, xprt_list, xpt_list) { 1048 if (xprt->xpt_net != net) 1049 continue; 1050 ret++; 1051 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1052 svc_xprt_enqueue(xprt); 1053 } 1054 spin_unlock(&serv->sv_lock); 1055 return ret; 1056 } 1057 1058 static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) 1059 { 1060 struct svc_pool *pool; 1061 struct svc_xprt *xprt; 1062 struct svc_xprt *tmp; 1063 int i; 1064 1065 for (i = 0; i < serv->sv_nrpools; i++) { 1066 pool = &serv->sv_pools[i]; 1067 1068 spin_lock_bh(&pool->sp_lock); 1069 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { 1070 if (xprt->xpt_net != net) 1071 continue; 1072 list_del_init(&xprt->xpt_ready); 1073 spin_unlock_bh(&pool->sp_lock); 1074 return xprt; 1075 } 1076 spin_unlock_bh(&pool->sp_lock); 1077 } 1078 return NULL; 1079 } 1080 1081 static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) 1082 { 1083 struct svc_xprt *xprt; 1084 1085 while ((xprt = svc_dequeue_net(serv, net))) { 1086 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1087 svc_delete_xprt(xprt); 1088 } 1089 } 1090 1091 /* 1092 * Server threads may still be running (especially in the case where the 1093 * service is still running in other network namespaces). 1094 * 1095 * So we shut down sockets the same way we would on a running server, by 1096 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do 1097 * the close. In the case there are no such other threads, 1098 * threads running, svc_clean_up_xprts() does a simple version of a 1099 * server's main event loop, and in the case where there are other 1100 * threads, we may need to wait a little while and then check again to 1101 * see if they're done. 1102 */ 1103 void svc_close_net(struct svc_serv *serv, struct net *net) 1104 { 1105 int delay = 0; 1106 1107 while (svc_close_list(serv, &serv->sv_permsocks, net) + 1108 svc_close_list(serv, &serv->sv_tempsocks, net)) { 1109 1110 svc_clean_up_xprts(serv, net); 1111 msleep(delay++); 1112 } 1113 } 1114 1115 /* 1116 * Handle defer and revisit of requests 1117 */ 1118 1119 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1120 { 1121 struct svc_deferred_req *dr = 1122 container_of(dreq, struct svc_deferred_req, handle); 1123 struct svc_xprt *xprt = dr->xprt; 1124 1125 spin_lock(&xprt->xpt_lock); 1126 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1127 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 1128 spin_unlock(&xprt->xpt_lock); 1129 dprintk("revisit canceled\n"); 1130 svc_xprt_put(xprt); 1131 trace_svc_drop_deferred(dr); 1132 kfree(dr); 1133 return; 1134 } 1135 dprintk("revisit queued\n"); 1136 dr->xprt = NULL; 1137 list_add(&dr->handle.recent, &xprt->xpt_deferred); 1138 spin_unlock(&xprt->xpt_lock); 1139 svc_xprt_enqueue(xprt); 1140 svc_xprt_put(xprt); 1141 } 1142 1143 /* 1144 * Save the request off for later processing. The request buffer looks 1145 * like this: 1146 * 1147 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 1148 * 1149 * This code can only handle requests that consist of an xprt-header 1150 * and rpc-header. 1151 */ 1152 static struct cache_deferred_req *svc_defer(struct cache_req *req) 1153 { 1154 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1155 struct svc_deferred_req *dr; 1156 1157 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) 1158 return NULL; /* if more than a page, give up FIXME */ 1159 if (rqstp->rq_deferred) { 1160 dr = rqstp->rq_deferred; 1161 rqstp->rq_deferred = NULL; 1162 } else { 1163 size_t skip; 1164 size_t size; 1165 /* FIXME maybe discard if size too large */ 1166 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 1167 dr = kmalloc(size, GFP_KERNEL); 1168 if (dr == NULL) 1169 return NULL; 1170 1171 dr->handle.owner = rqstp->rq_server; 1172 dr->prot = rqstp->rq_prot; 1173 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1174 dr->addrlen = rqstp->rq_addrlen; 1175 dr->daddr = rqstp->rq_daddr; 1176 dr->argslen = rqstp->rq_arg.len >> 2; 1177 dr->xprt_hlen = rqstp->rq_xprt_hlen; 1178 1179 /* back up head to the start of the buffer and copy */ 1180 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1181 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1182 dr->argslen << 2); 1183 } 1184 svc_xprt_get(rqstp->rq_xprt); 1185 dr->xprt = rqstp->rq_xprt; 1186 set_bit(RQ_DROPME, &rqstp->rq_flags); 1187 1188 dr->handle.revisit = svc_revisit; 1189 trace_svc_defer(rqstp); 1190 return &dr->handle; 1191 } 1192 1193 /* 1194 * recv data from a deferred request into an active one 1195 */ 1196 static int svc_deferred_recv(struct svc_rqst *rqstp) 1197 { 1198 struct svc_deferred_req *dr = rqstp->rq_deferred; 1199 1200 /* setup iov_base past transport header */ 1201 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1202 /* The iov_len does not include the transport header bytes */ 1203 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1204 rqstp->rq_arg.page_len = 0; 1205 /* The rq_arg.len includes the transport header bytes */ 1206 rqstp->rq_arg.len = dr->argslen<<2; 1207 rqstp->rq_prot = dr->prot; 1208 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1209 rqstp->rq_addrlen = dr->addrlen; 1210 /* Save off transport header len in case we get deferred again */ 1211 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1212 rqstp->rq_daddr = dr->daddr; 1213 rqstp->rq_respages = rqstp->rq_pages; 1214 return (dr->argslen<<2) - dr->xprt_hlen; 1215 } 1216 1217 1218 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1219 { 1220 struct svc_deferred_req *dr = NULL; 1221 1222 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1223 return NULL; 1224 spin_lock(&xprt->xpt_lock); 1225 if (!list_empty(&xprt->xpt_deferred)) { 1226 dr = list_entry(xprt->xpt_deferred.next, 1227 struct svc_deferred_req, 1228 handle.recent); 1229 list_del_init(&dr->handle.recent); 1230 trace_svc_revisit_deferred(dr); 1231 } else 1232 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1233 spin_unlock(&xprt->xpt_lock); 1234 return dr; 1235 } 1236 1237 /** 1238 * svc_find_xprt - find an RPC transport instance 1239 * @serv: pointer to svc_serv to search 1240 * @xcl_name: C string containing transport's class name 1241 * @net: owner net pointer 1242 * @af: Address family of transport's local address 1243 * @port: transport's IP port number 1244 * 1245 * Return the transport instance pointer for the endpoint accepting 1246 * connections/peer traffic from the specified transport class, 1247 * address family and port. 1248 * 1249 * Specifying 0 for the address family or port is effectively a 1250 * wild-card, and will result in matching the first transport in the 1251 * service's list that has a matching class name. 1252 */ 1253 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1254 struct net *net, const sa_family_t af, 1255 const unsigned short port) 1256 { 1257 struct svc_xprt *xprt; 1258 struct svc_xprt *found = NULL; 1259 1260 /* Sanity check the args */ 1261 if (serv == NULL || xcl_name == NULL) 1262 return found; 1263 1264 spin_lock_bh(&serv->sv_lock); 1265 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1266 if (xprt->xpt_net != net) 1267 continue; 1268 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1269 continue; 1270 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1271 continue; 1272 if (port != 0 && port != svc_xprt_local_port(xprt)) 1273 continue; 1274 found = xprt; 1275 svc_xprt_get(xprt); 1276 break; 1277 } 1278 spin_unlock_bh(&serv->sv_lock); 1279 return found; 1280 } 1281 EXPORT_SYMBOL_GPL(svc_find_xprt); 1282 1283 static int svc_one_xprt_name(const struct svc_xprt *xprt, 1284 char *pos, int remaining) 1285 { 1286 int len; 1287 1288 len = snprintf(pos, remaining, "%s %u\n", 1289 xprt->xpt_class->xcl_name, 1290 svc_xprt_local_port(xprt)); 1291 if (len >= remaining) 1292 return -ENAMETOOLONG; 1293 return len; 1294 } 1295 1296 /** 1297 * svc_xprt_names - format a buffer with a list of transport names 1298 * @serv: pointer to an RPC service 1299 * @buf: pointer to a buffer to be filled in 1300 * @buflen: length of buffer to be filled in 1301 * 1302 * Fills in @buf with a string containing a list of transport names, 1303 * each name terminated with '\n'. 1304 * 1305 * Returns positive length of the filled-in string on success; otherwise 1306 * a negative errno value is returned if an error occurs. 1307 */ 1308 int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 1309 { 1310 struct svc_xprt *xprt; 1311 int len, totlen; 1312 char *pos; 1313 1314 /* Sanity check args */ 1315 if (!serv) 1316 return 0; 1317 1318 spin_lock_bh(&serv->sv_lock); 1319 1320 pos = buf; 1321 totlen = 0; 1322 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1323 len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1324 if (len < 0) { 1325 *buf = '\0'; 1326 totlen = len; 1327 } 1328 if (len <= 0) 1329 break; 1330 1331 pos += len; 1332 totlen += len; 1333 } 1334 1335 spin_unlock_bh(&serv->sv_lock); 1336 return totlen; 1337 } 1338 EXPORT_SYMBOL_GPL(svc_xprt_names); 1339 1340 1341 /*----------------------------------------------------------------------------*/ 1342 1343 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1344 { 1345 unsigned int pidx = (unsigned int)*pos; 1346 struct svc_serv *serv = m->private; 1347 1348 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1349 1350 if (!pidx) 1351 return SEQ_START_TOKEN; 1352 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1353 } 1354 1355 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1356 { 1357 struct svc_pool *pool = p; 1358 struct svc_serv *serv = m->private; 1359 1360 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1361 1362 if (p == SEQ_START_TOKEN) { 1363 pool = &serv->sv_pools[0]; 1364 } else { 1365 unsigned int pidx = (pool - &serv->sv_pools[0]); 1366 if (pidx < serv->sv_nrpools-1) 1367 pool = &serv->sv_pools[pidx+1]; 1368 else 1369 pool = NULL; 1370 } 1371 ++*pos; 1372 return pool; 1373 } 1374 1375 static void svc_pool_stats_stop(struct seq_file *m, void *p) 1376 { 1377 } 1378 1379 static int svc_pool_stats_show(struct seq_file *m, void *p) 1380 { 1381 struct svc_pool *pool = p; 1382 1383 if (p == SEQ_START_TOKEN) { 1384 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 1385 return 0; 1386 } 1387 1388 seq_printf(m, "%u %lu %lu %lu %lu\n", 1389 pool->sp_id, 1390 (unsigned long)atomic_long_read(&pool->sp_stats.packets), 1391 pool->sp_stats.sockets_queued, 1392 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), 1393 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); 1394 1395 return 0; 1396 } 1397 1398 static const struct seq_operations svc_pool_stats_seq_ops = { 1399 .start = svc_pool_stats_start, 1400 .next = svc_pool_stats_next, 1401 .stop = svc_pool_stats_stop, 1402 .show = svc_pool_stats_show, 1403 }; 1404 1405 int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1406 { 1407 int err; 1408 1409 err = seq_open(file, &svc_pool_stats_seq_ops); 1410 if (!err) 1411 ((struct seq_file *) file->private_data)->private = serv; 1412 return err; 1413 } 1414 EXPORT_SYMBOL(svc_pool_stats_open); 1415 1416 /*----------------------------------------------------------------------------*/ 1417