1 /* 2 * linux/net/sunrpc/svc.c 3 * 4 * High-level RPC service routines 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 * 8 * Multiple threads pools and NUMAisation 9 * Copyright (c) 2006 Silicon Graphics, Inc. 10 * by Greg Banks <gnb@melbourne.sgi.com> 11 */ 12 13 #include <linux/linkage.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/net.h> 17 #include <linux/in.h> 18 #include <linux/mm.h> 19 #include <linux/interrupt.h> 20 #include <linux/module.h> 21 #include <linux/sched.h> 22 23 #include <linux/sunrpc/types.h> 24 #include <linux/sunrpc/xdr.h> 25 #include <linux/sunrpc/stats.h> 26 #include <linux/sunrpc/svcsock.h> 27 #include <linux/sunrpc/clnt.h> 28 29 #define RPCDBG_FACILITY RPCDBG_SVCDSP 30 31 #define svc_serv_is_pooled(serv) ((serv)->sv_function) 32 33 /* 34 * Mode for mapping cpus to pools. 35 */ 36 enum { 37 SVC_POOL_AUTO = -1, /* choose one of the others */ 38 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 39 * (legacy & UP mode) */ 40 SVC_POOL_PERCPU, /* one pool per cpu */ 41 SVC_POOL_PERNODE /* one pool per numa node */ 42 }; 43 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 44 45 /* 46 * Structure for mapping cpus to pools and vice versa. 47 * Setup once during sunrpc initialisation. 48 */ 49 static struct svc_pool_map { 50 int count; /* How many svc_servs use us */ 51 int mode; /* Note: int not enum to avoid 52 * warnings about "enumeration value 53 * not handled in switch" */ 54 unsigned int npools; 55 unsigned int *pool_to; /* maps pool id to cpu or node */ 56 unsigned int *to_pool; /* maps cpu or node to pool id */ 57 } svc_pool_map = { 58 .count = 0, 59 .mode = SVC_POOL_DEFAULT 60 }; 61 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 62 63 static int 64 param_set_pool_mode(const char *val, struct kernel_param *kp) 65 { 66 int *ip = (int *)kp->arg; 67 struct svc_pool_map *m = &svc_pool_map; 68 int err; 69 70 mutex_lock(&svc_pool_map_mutex); 71 72 err = -EBUSY; 73 if (m->count) 74 goto out; 75 76 err = 0; 77 if (!strncmp(val, "auto", 4)) 78 *ip = SVC_POOL_AUTO; 79 else if (!strncmp(val, "global", 6)) 80 *ip = SVC_POOL_GLOBAL; 81 else if (!strncmp(val, "percpu", 6)) 82 *ip = SVC_POOL_PERCPU; 83 else if (!strncmp(val, "pernode", 7)) 84 *ip = SVC_POOL_PERNODE; 85 else 86 err = -EINVAL; 87 88 out: 89 mutex_unlock(&svc_pool_map_mutex); 90 return err; 91 } 92 93 static int 94 param_get_pool_mode(char *buf, struct kernel_param *kp) 95 { 96 int *ip = (int *)kp->arg; 97 98 switch (*ip) 99 { 100 case SVC_POOL_AUTO: 101 return strlcpy(buf, "auto", 20); 102 case SVC_POOL_GLOBAL: 103 return strlcpy(buf, "global", 20); 104 case SVC_POOL_PERCPU: 105 return strlcpy(buf, "percpu", 20); 106 case SVC_POOL_PERNODE: 107 return strlcpy(buf, "pernode", 20); 108 default: 109 return sprintf(buf, "%d", *ip); 110 } 111 } 112 113 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 114 &svc_pool_map.mode, 0644); 115 116 /* 117 * Detect best pool mapping mode heuristically, 118 * according to the machine's topology. 119 */ 120 static int 121 svc_pool_map_choose_mode(void) 122 { 123 unsigned int node; 124 125 if (num_online_nodes() > 1) { 126 /* 127 * Actually have multiple NUMA nodes, 128 * so split pools on NUMA node boundaries 129 */ 130 return SVC_POOL_PERNODE; 131 } 132 133 node = any_online_node(node_online_map); 134 if (nr_cpus_node(node) > 2) { 135 /* 136 * Non-trivial SMP, or CONFIG_NUMA on 137 * non-NUMA hardware, e.g. with a generic 138 * x86_64 kernel on Xeons. In this case we 139 * want to divide the pools on cpu boundaries. 140 */ 141 return SVC_POOL_PERCPU; 142 } 143 144 /* default: one global pool */ 145 return SVC_POOL_GLOBAL; 146 } 147 148 /* 149 * Allocate the to_pool[] and pool_to[] arrays. 150 * Returns 0 on success or an errno. 151 */ 152 static int 153 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 154 { 155 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 156 if (!m->to_pool) 157 goto fail; 158 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 159 if (!m->pool_to) 160 goto fail_free; 161 162 return 0; 163 164 fail_free: 165 kfree(m->to_pool); 166 fail: 167 return -ENOMEM; 168 } 169 170 /* 171 * Initialise the pool map for SVC_POOL_PERCPU mode. 172 * Returns number of pools or <0 on error. 173 */ 174 static int 175 svc_pool_map_init_percpu(struct svc_pool_map *m) 176 { 177 unsigned int maxpools = nr_cpu_ids; 178 unsigned int pidx = 0; 179 unsigned int cpu; 180 int err; 181 182 err = svc_pool_map_alloc_arrays(m, maxpools); 183 if (err) 184 return err; 185 186 for_each_online_cpu(cpu) { 187 BUG_ON(pidx > maxpools); 188 m->to_pool[cpu] = pidx; 189 m->pool_to[pidx] = cpu; 190 pidx++; 191 } 192 /* cpus brought online later all get mapped to pool0, sorry */ 193 194 return pidx; 195 }; 196 197 198 /* 199 * Initialise the pool map for SVC_POOL_PERNODE mode. 200 * Returns number of pools or <0 on error. 201 */ 202 static int 203 svc_pool_map_init_pernode(struct svc_pool_map *m) 204 { 205 unsigned int maxpools = nr_node_ids; 206 unsigned int pidx = 0; 207 unsigned int node; 208 int err; 209 210 err = svc_pool_map_alloc_arrays(m, maxpools); 211 if (err) 212 return err; 213 214 for_each_node_with_cpus(node) { 215 /* some architectures (e.g. SN2) have cpuless nodes */ 216 BUG_ON(pidx > maxpools); 217 m->to_pool[node] = pidx; 218 m->pool_to[pidx] = node; 219 pidx++; 220 } 221 /* nodes brought online later all get mapped to pool0, sorry */ 222 223 return pidx; 224 } 225 226 227 /* 228 * Add a reference to the global map of cpus to pools (and 229 * vice versa). Initialise the map if we're the first user. 230 * Returns the number of pools. 231 */ 232 static unsigned int 233 svc_pool_map_get(void) 234 { 235 struct svc_pool_map *m = &svc_pool_map; 236 int npools = -1; 237 238 mutex_lock(&svc_pool_map_mutex); 239 240 if (m->count++) { 241 mutex_unlock(&svc_pool_map_mutex); 242 return m->npools; 243 } 244 245 if (m->mode == SVC_POOL_AUTO) 246 m->mode = svc_pool_map_choose_mode(); 247 248 switch (m->mode) { 249 case SVC_POOL_PERCPU: 250 npools = svc_pool_map_init_percpu(m); 251 break; 252 case SVC_POOL_PERNODE: 253 npools = svc_pool_map_init_pernode(m); 254 break; 255 } 256 257 if (npools < 0) { 258 /* default, or memory allocation failure */ 259 npools = 1; 260 m->mode = SVC_POOL_GLOBAL; 261 } 262 m->npools = npools; 263 264 mutex_unlock(&svc_pool_map_mutex); 265 return m->npools; 266 } 267 268 269 /* 270 * Drop a reference to the global map of cpus to pools. 271 * When the last reference is dropped, the map data is 272 * freed; this allows the sysadmin to change the pool 273 * mode using the pool_mode module option without 274 * rebooting or re-loading sunrpc.ko. 275 */ 276 static void 277 svc_pool_map_put(void) 278 { 279 struct svc_pool_map *m = &svc_pool_map; 280 281 mutex_lock(&svc_pool_map_mutex); 282 283 if (!--m->count) { 284 m->mode = SVC_POOL_DEFAULT; 285 kfree(m->to_pool); 286 kfree(m->pool_to); 287 m->npools = 0; 288 } 289 290 mutex_unlock(&svc_pool_map_mutex); 291 } 292 293 294 /* 295 * Set the current thread's cpus_allowed mask so that it 296 * will only run on cpus in the given pool. 297 * 298 * Returns 1 and fills in oldmask iff a cpumask was applied. 299 */ 300 static inline int 301 svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) 302 { 303 struct svc_pool_map *m = &svc_pool_map; 304 unsigned int node; /* or cpu */ 305 306 /* 307 * The caller checks for sv_nrpools > 1, which 308 * implies that we've been initialized. 309 */ 310 BUG_ON(m->count == 0); 311 312 switch (m->mode) 313 { 314 default: 315 return 0; 316 case SVC_POOL_PERCPU: 317 node = m->pool_to[pidx]; 318 *oldmask = current->cpus_allowed; 319 set_cpus_allowed(current, cpumask_of_cpu(node)); 320 return 1; 321 case SVC_POOL_PERNODE: 322 node = m->pool_to[pidx]; 323 *oldmask = current->cpus_allowed; 324 set_cpus_allowed(current, node_to_cpumask(node)); 325 return 1; 326 } 327 } 328 329 /* 330 * Use the mapping mode to choose a pool for a given CPU. 331 * Used when enqueueing an incoming RPC. Always returns 332 * a non-NULL pool pointer. 333 */ 334 struct svc_pool * 335 svc_pool_for_cpu(struct svc_serv *serv, int cpu) 336 { 337 struct svc_pool_map *m = &svc_pool_map; 338 unsigned int pidx = 0; 339 340 /* 341 * An uninitialised map happens in a pure client when 342 * lockd is brought up, so silently treat it the 343 * same as SVC_POOL_GLOBAL. 344 */ 345 if (svc_serv_is_pooled(serv)) { 346 switch (m->mode) { 347 case SVC_POOL_PERCPU: 348 pidx = m->to_pool[cpu]; 349 break; 350 case SVC_POOL_PERNODE: 351 pidx = m->to_pool[cpu_to_node(cpu)]; 352 break; 353 } 354 } 355 return &serv->sv_pools[pidx % serv->sv_nrpools]; 356 } 357 358 359 /* 360 * Create an RPC service 361 */ 362 static struct svc_serv * 363 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 364 void (*shutdown)(struct svc_serv *serv)) 365 { 366 struct svc_serv *serv; 367 unsigned int vers; 368 unsigned int xdrsize; 369 unsigned int i; 370 371 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 372 return NULL; 373 serv->sv_name = prog->pg_name; 374 serv->sv_program = prog; 375 serv->sv_nrthreads = 1; 376 serv->sv_stats = prog->pg_stats; 377 if (bufsize > RPCSVC_MAXPAYLOAD) 378 bufsize = RPCSVC_MAXPAYLOAD; 379 serv->sv_max_payload = bufsize? bufsize : 4096; 380 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 381 serv->sv_shutdown = shutdown; 382 xdrsize = 0; 383 while (prog) { 384 prog->pg_lovers = prog->pg_nvers-1; 385 for (vers=0; vers<prog->pg_nvers ; vers++) 386 if (prog->pg_vers[vers]) { 387 prog->pg_hivers = vers; 388 if (prog->pg_lovers > vers) 389 prog->pg_lovers = vers; 390 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 391 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 392 } 393 prog = prog->pg_next; 394 } 395 serv->sv_xdrsize = xdrsize; 396 INIT_LIST_HEAD(&serv->sv_tempsocks); 397 INIT_LIST_HEAD(&serv->sv_permsocks); 398 init_timer(&serv->sv_temptimer); 399 spin_lock_init(&serv->sv_lock); 400 401 serv->sv_nrpools = npools; 402 serv->sv_pools = 403 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 404 GFP_KERNEL); 405 if (!serv->sv_pools) { 406 kfree(serv); 407 return NULL; 408 } 409 410 for (i = 0; i < serv->sv_nrpools; i++) { 411 struct svc_pool *pool = &serv->sv_pools[i]; 412 413 dprintk("svc: initialising pool %u for %s\n", 414 i, serv->sv_name); 415 416 pool->sp_id = i; 417 INIT_LIST_HEAD(&pool->sp_threads); 418 INIT_LIST_HEAD(&pool->sp_sockets); 419 INIT_LIST_HEAD(&pool->sp_all_threads); 420 spin_lock_init(&pool->sp_lock); 421 } 422 423 424 /* Remove any stale portmap registrations */ 425 svc_register(serv, 0, 0); 426 427 return serv; 428 } 429 430 struct svc_serv * 431 svc_create(struct svc_program *prog, unsigned int bufsize, 432 void (*shutdown)(struct svc_serv *serv)) 433 { 434 return __svc_create(prog, bufsize, /*npools*/1, shutdown); 435 } 436 EXPORT_SYMBOL(svc_create); 437 438 struct svc_serv * 439 svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 440 void (*shutdown)(struct svc_serv *serv), 441 svc_thread_fn func, int sig, struct module *mod) 442 { 443 struct svc_serv *serv; 444 unsigned int npools = svc_pool_map_get(); 445 446 serv = __svc_create(prog, bufsize, npools, shutdown); 447 448 if (serv != NULL) { 449 serv->sv_function = func; 450 serv->sv_kill_signal = sig; 451 serv->sv_module = mod; 452 } 453 454 return serv; 455 } 456 EXPORT_SYMBOL(svc_create_pooled); 457 458 /* 459 * Destroy an RPC service. Should be called with the BKL held 460 */ 461 void 462 svc_destroy(struct svc_serv *serv) 463 { 464 dprintk("svc: svc_destroy(%s, %d)\n", 465 serv->sv_program->pg_name, 466 serv->sv_nrthreads); 467 468 if (serv->sv_nrthreads) { 469 if (--(serv->sv_nrthreads) != 0) { 470 svc_sock_update_bufs(serv); 471 return; 472 } 473 } else 474 printk("svc_destroy: no threads for serv=%p!\n", serv); 475 476 del_timer_sync(&serv->sv_temptimer); 477 478 svc_close_all(&serv->sv_tempsocks); 479 480 if (serv->sv_shutdown) 481 serv->sv_shutdown(serv); 482 483 svc_close_all(&serv->sv_permsocks); 484 485 BUG_ON(!list_empty(&serv->sv_permsocks)); 486 BUG_ON(!list_empty(&serv->sv_tempsocks)); 487 488 cache_clean_deferred(serv); 489 490 if (svc_serv_is_pooled(serv)) 491 svc_pool_map_put(); 492 493 /* Unregister service with the portmapper */ 494 svc_register(serv, 0, 0); 495 kfree(serv->sv_pools); 496 kfree(serv); 497 } 498 EXPORT_SYMBOL(svc_destroy); 499 500 /* 501 * Allocate an RPC server's buffer space. 502 * We allocate pages and place them in rq_argpages. 503 */ 504 static int 505 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 506 { 507 int pages; 508 int arghi; 509 510 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 511 * We assume one is at most one page 512 */ 513 arghi = 0; 514 BUG_ON(pages > RPCSVC_MAXPAGES); 515 while (pages) { 516 struct page *p = alloc_page(GFP_KERNEL); 517 if (!p) 518 break; 519 rqstp->rq_pages[arghi++] = p; 520 pages--; 521 } 522 return ! pages; 523 } 524 525 /* 526 * Release an RPC server buffer 527 */ 528 static void 529 svc_release_buffer(struct svc_rqst *rqstp) 530 { 531 int i; 532 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++) 533 if (rqstp->rq_pages[i]) 534 put_page(rqstp->rq_pages[i]); 535 } 536 537 struct svc_rqst * 538 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 539 { 540 struct svc_rqst *rqstp; 541 542 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 543 if (!rqstp) 544 goto out_enomem; 545 546 init_waitqueue_head(&rqstp->rq_wait); 547 548 serv->sv_nrthreads++; 549 spin_lock_bh(&pool->sp_lock); 550 pool->sp_nrthreads++; 551 list_add(&rqstp->rq_all, &pool->sp_all_threads); 552 spin_unlock_bh(&pool->sp_lock); 553 rqstp->rq_server = serv; 554 rqstp->rq_pool = pool; 555 556 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 557 if (!rqstp->rq_argp) 558 goto out_thread; 559 560 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 561 if (!rqstp->rq_resp) 562 goto out_thread; 563 564 if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 565 goto out_thread; 566 567 return rqstp; 568 out_thread: 569 svc_exit_thread(rqstp); 570 out_enomem: 571 return ERR_PTR(-ENOMEM); 572 } 573 EXPORT_SYMBOL(svc_prepare_thread); 574 575 /* 576 * Create a thread in the given pool. Caller must hold BKL. 577 * On a NUMA or SMP machine, with a multi-pool serv, the thread 578 * will be restricted to run on the cpus belonging to the pool. 579 */ 580 static int 581 __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, 582 struct svc_pool *pool) 583 { 584 struct svc_rqst *rqstp; 585 int error = -ENOMEM; 586 int have_oldmask = 0; 587 cpumask_t oldmask; 588 589 rqstp = svc_prepare_thread(serv, pool); 590 if (IS_ERR(rqstp)) { 591 error = PTR_ERR(rqstp); 592 goto out; 593 } 594 595 if (serv->sv_nrpools > 1) 596 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask); 597 598 error = kernel_thread((int (*)(void *)) func, rqstp, 0); 599 600 if (have_oldmask) 601 set_cpus_allowed(current, oldmask); 602 603 if (error < 0) 604 goto out_thread; 605 svc_sock_update_bufs(serv); 606 error = 0; 607 out: 608 return error; 609 610 out_thread: 611 svc_exit_thread(rqstp); 612 goto out; 613 } 614 615 /* 616 * Create a thread in the default pool. Caller must hold BKL. 617 */ 618 int 619 svc_create_thread(svc_thread_fn func, struct svc_serv *serv) 620 { 621 return __svc_create_thread(func, serv, &serv->sv_pools[0]); 622 } 623 EXPORT_SYMBOL(svc_create_thread); 624 625 /* 626 * Choose a pool in which to create a new thread, for svc_set_num_threads 627 */ 628 static inline struct svc_pool * 629 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 630 { 631 if (pool != NULL) 632 return pool; 633 634 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 635 } 636 637 /* 638 * Choose a thread to kill, for svc_set_num_threads 639 */ 640 static inline struct task_struct * 641 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 642 { 643 unsigned int i; 644 struct task_struct *task = NULL; 645 646 if (pool != NULL) { 647 spin_lock_bh(&pool->sp_lock); 648 } else { 649 /* choose a pool in round-robin fashion */ 650 for (i = 0; i < serv->sv_nrpools; i++) { 651 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 652 spin_lock_bh(&pool->sp_lock); 653 if (!list_empty(&pool->sp_all_threads)) 654 goto found_pool; 655 spin_unlock_bh(&pool->sp_lock); 656 } 657 return NULL; 658 } 659 660 found_pool: 661 if (!list_empty(&pool->sp_all_threads)) { 662 struct svc_rqst *rqstp; 663 664 /* 665 * Remove from the pool->sp_all_threads list 666 * so we don't try to kill it again. 667 */ 668 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 669 list_del_init(&rqstp->rq_all); 670 task = rqstp->rq_task; 671 } 672 spin_unlock_bh(&pool->sp_lock); 673 674 return task; 675 } 676 677 /* 678 * Create or destroy enough new threads to make the number 679 * of threads the given number. If `pool' is non-NULL, applies 680 * only to threads in that pool, otherwise round-robins between 681 * all pools. Must be called with a svc_get() reference and 682 * the BKL held. 683 * 684 * Destroying threads relies on the service threads filling in 685 * rqstp->rq_task, which only the nfs ones do. Assumes the serv 686 * has been created using svc_create_pooled(). 687 * 688 * Based on code that used to be in nfsd_svc() but tweaked 689 * to be pool-aware. 690 */ 691 int 692 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 693 { 694 struct task_struct *victim; 695 int error = 0; 696 unsigned int state = serv->sv_nrthreads-1; 697 698 if (pool == NULL) { 699 /* The -1 assumes caller has done a svc_get() */ 700 nrservs -= (serv->sv_nrthreads-1); 701 } else { 702 spin_lock_bh(&pool->sp_lock); 703 nrservs -= pool->sp_nrthreads; 704 spin_unlock_bh(&pool->sp_lock); 705 } 706 707 /* create new threads */ 708 while (nrservs > 0) { 709 nrservs--; 710 __module_get(serv->sv_module); 711 error = __svc_create_thread(serv->sv_function, serv, 712 choose_pool(serv, pool, &state)); 713 if (error < 0) { 714 module_put(serv->sv_module); 715 break; 716 } 717 } 718 /* destroy old threads */ 719 while (nrservs < 0 && 720 (victim = choose_victim(serv, pool, &state)) != NULL) { 721 send_sig(serv->sv_kill_signal, victim, 1); 722 nrservs++; 723 } 724 725 return error; 726 } 727 EXPORT_SYMBOL(svc_set_num_threads); 728 729 /* 730 * Called from a server thread as it's exiting. Caller must hold BKL. 731 */ 732 void 733 svc_exit_thread(struct svc_rqst *rqstp) 734 { 735 struct svc_serv *serv = rqstp->rq_server; 736 struct svc_pool *pool = rqstp->rq_pool; 737 738 svc_release_buffer(rqstp); 739 kfree(rqstp->rq_resp); 740 kfree(rqstp->rq_argp); 741 kfree(rqstp->rq_auth_data); 742 743 spin_lock_bh(&pool->sp_lock); 744 pool->sp_nrthreads--; 745 list_del(&rqstp->rq_all); 746 spin_unlock_bh(&pool->sp_lock); 747 748 kfree(rqstp); 749 750 /* Release the server */ 751 if (serv) 752 svc_destroy(serv); 753 } 754 EXPORT_SYMBOL(svc_exit_thread); 755 756 /* 757 * Register an RPC service with the local portmapper. 758 * To unregister a service, call this routine with 759 * proto and port == 0. 760 */ 761 int 762 svc_register(struct svc_serv *serv, int proto, unsigned short port) 763 { 764 struct svc_program *progp; 765 unsigned long flags; 766 unsigned int i; 767 int error = 0, dummy; 768 769 if (!port) 770 clear_thread_flag(TIF_SIGPENDING); 771 772 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 773 for (i = 0; i < progp->pg_nvers; i++) { 774 if (progp->pg_vers[i] == NULL) 775 continue; 776 777 dprintk("svc: svc_register(%s, %s, %d, %d)%s\n", 778 progp->pg_name, 779 proto == IPPROTO_UDP? "udp" : "tcp", 780 port, 781 i, 782 progp->pg_vers[i]->vs_hidden? 783 " (but not telling portmap)" : ""); 784 785 if (progp->pg_vers[i]->vs_hidden) 786 continue; 787 788 error = rpcb_register(progp->pg_prog, i, proto, port, &dummy); 789 if (error < 0) 790 break; 791 if (port && !dummy) { 792 error = -EACCES; 793 break; 794 } 795 } 796 } 797 798 if (!port) { 799 spin_lock_irqsave(¤t->sighand->siglock, flags); 800 recalc_sigpending(); 801 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 802 } 803 804 return error; 805 } 806 807 /* 808 * Printk the given error with the address of the client that caused it. 809 */ 810 static int 811 __attribute__ ((format (printf, 2, 3))) 812 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 813 { 814 va_list args; 815 int r; 816 char buf[RPC_MAX_ADDRBUFLEN]; 817 818 if (!net_ratelimit()) 819 return 0; 820 821 printk(KERN_WARNING "svc: %s: ", 822 svc_print_addr(rqstp, buf, sizeof(buf))); 823 824 va_start(args, fmt); 825 r = vprintk(fmt, args); 826 va_end(args); 827 828 return r; 829 } 830 831 /* 832 * Process the RPC request. 833 */ 834 int 835 svc_process(struct svc_rqst *rqstp) 836 { 837 struct svc_program *progp; 838 struct svc_version *versp = NULL; /* compiler food */ 839 struct svc_procedure *procp = NULL; 840 struct kvec * argv = &rqstp->rq_arg.head[0]; 841 struct kvec * resv = &rqstp->rq_res.head[0]; 842 struct svc_serv *serv = rqstp->rq_server; 843 kxdrproc_t xdr; 844 __be32 *statp; 845 u32 dir, prog, vers, proc; 846 __be32 auth_stat, rpc_stat; 847 int auth_res; 848 __be32 *reply_statp; 849 850 rpc_stat = rpc_success; 851 852 if (argv->iov_len < 6*4) 853 goto err_short_len; 854 855 /* setup response xdr_buf. 856 * Initially it has just one page 857 */ 858 rqstp->rq_resused = 1; 859 resv->iov_base = page_address(rqstp->rq_respages[0]); 860 resv->iov_len = 0; 861 rqstp->rq_res.pages = rqstp->rq_respages + 1; 862 rqstp->rq_res.len = 0; 863 rqstp->rq_res.page_base = 0; 864 rqstp->rq_res.page_len = 0; 865 rqstp->rq_res.buflen = PAGE_SIZE; 866 rqstp->rq_res.tail[0].iov_base = NULL; 867 rqstp->rq_res.tail[0].iov_len = 0; 868 /* Will be turned off only in gss privacy case: */ 869 rqstp->rq_splice_ok = 1; 870 871 /* Setup reply header */ 872 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 873 874 rqstp->rq_xid = svc_getu32(argv); 875 svc_putu32(resv, rqstp->rq_xid); 876 877 dir = svc_getnl(argv); 878 vers = svc_getnl(argv); 879 880 /* First words of reply: */ 881 svc_putnl(resv, 1); /* REPLY */ 882 883 if (dir != 0) /* direction != CALL */ 884 goto err_bad_dir; 885 if (vers != 2) /* RPC version number */ 886 goto err_bad_rpc; 887 888 /* Save position in case we later decide to reject: */ 889 reply_statp = resv->iov_base + resv->iov_len; 890 891 svc_putnl(resv, 0); /* ACCEPT */ 892 893 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */ 894 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */ 895 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */ 896 897 progp = serv->sv_program; 898 899 for (progp = serv->sv_program; progp; progp = progp->pg_next) 900 if (prog == progp->pg_prog) 901 break; 902 903 /* 904 * Decode auth data, and add verifier to reply buffer. 905 * We do this before anything else in order to get a decent 906 * auth verifier. 907 */ 908 auth_res = svc_authenticate(rqstp, &auth_stat); 909 /* Also give the program a chance to reject this call: */ 910 if (auth_res == SVC_OK && progp) { 911 auth_stat = rpc_autherr_badcred; 912 auth_res = progp->pg_authenticate(rqstp); 913 } 914 switch (auth_res) { 915 case SVC_OK: 916 break; 917 case SVC_GARBAGE: 918 rpc_stat = rpc_garbage_args; 919 goto err_bad; 920 case SVC_SYSERR: 921 rpc_stat = rpc_system_err; 922 goto err_bad; 923 case SVC_DENIED: 924 goto err_bad_auth; 925 case SVC_DROP: 926 goto dropit; 927 case SVC_COMPLETE: 928 goto sendit; 929 } 930 931 if (progp == NULL) 932 goto err_bad_prog; 933 934 if (vers >= progp->pg_nvers || 935 !(versp = progp->pg_vers[vers])) 936 goto err_bad_vers; 937 938 procp = versp->vs_proc + proc; 939 if (proc >= versp->vs_nproc || !procp->pc_func) 940 goto err_bad_proc; 941 rqstp->rq_server = serv; 942 rqstp->rq_procinfo = procp; 943 944 /* Syntactic check complete */ 945 serv->sv_stats->rpccnt++; 946 947 /* Build the reply header. */ 948 statp = resv->iov_base +resv->iov_len; 949 svc_putnl(resv, RPC_SUCCESS); 950 951 /* Bump per-procedure stats counter */ 952 procp->pc_count++; 953 954 /* Initialize storage for argp and resp */ 955 memset(rqstp->rq_argp, 0, procp->pc_argsize); 956 memset(rqstp->rq_resp, 0, procp->pc_ressize); 957 958 /* un-reserve some of the out-queue now that we have a 959 * better idea of reply size 960 */ 961 if (procp->pc_xdrressize) 962 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 963 964 /* Call the function that processes the request. */ 965 if (!versp->vs_dispatch) { 966 /* Decode arguments */ 967 xdr = procp->pc_decode; 968 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp)) 969 goto err_garbage; 970 971 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 972 973 /* Encode reply */ 974 if (*statp == rpc_drop_reply) { 975 if (procp->pc_release) 976 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 977 goto dropit; 978 } 979 if (*statp == rpc_success && (xdr = procp->pc_encode) 980 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { 981 dprintk("svc: failed to encode reply\n"); 982 /* serv->sv_stats->rpcsystemerr++; */ 983 *statp = rpc_system_err; 984 } 985 } else { 986 dprintk("svc: calling dispatcher\n"); 987 if (!versp->vs_dispatch(rqstp, statp)) { 988 /* Release reply info */ 989 if (procp->pc_release) 990 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 991 goto dropit; 992 } 993 } 994 995 /* Check RPC status result */ 996 if (*statp != rpc_success) 997 resv->iov_len = ((void*)statp) - resv->iov_base + 4; 998 999 /* Release reply info */ 1000 if (procp->pc_release) 1001 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1002 1003 if (procp->pc_encode == NULL) 1004 goto dropit; 1005 1006 sendit: 1007 if (svc_authorise(rqstp)) 1008 goto dropit; 1009 return svc_send(rqstp); 1010 1011 dropit: 1012 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1013 dprintk("svc: svc_process dropit\n"); 1014 svc_drop(rqstp); 1015 return 0; 1016 1017 err_short_len: 1018 svc_printk(rqstp, "short len %Zd, dropping request\n", 1019 argv->iov_len); 1020 1021 goto dropit; /* drop request */ 1022 1023 err_bad_dir: 1024 svc_printk(rqstp, "bad direction %d, dropping request\n", dir); 1025 1026 serv->sv_stats->rpcbadfmt++; 1027 goto dropit; /* drop request */ 1028 1029 err_bad_rpc: 1030 serv->sv_stats->rpcbadfmt++; 1031 svc_putnl(resv, 1); /* REJECT */ 1032 svc_putnl(resv, 0); /* RPC_MISMATCH */ 1033 svc_putnl(resv, 2); /* Only RPCv2 supported */ 1034 svc_putnl(resv, 2); 1035 goto sendit; 1036 1037 err_bad_auth: 1038 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); 1039 serv->sv_stats->rpcbadauth++; 1040 /* Restore write pointer to location of accept status: */ 1041 xdr_ressize_check(rqstp, reply_statp); 1042 svc_putnl(resv, 1); /* REJECT */ 1043 svc_putnl(resv, 1); /* AUTH_ERROR */ 1044 svc_putnl(resv, ntohl(auth_stat)); /* status */ 1045 goto sendit; 1046 1047 err_bad_prog: 1048 dprintk("svc: unknown program %d\n", prog); 1049 serv->sv_stats->rpcbadfmt++; 1050 svc_putnl(resv, RPC_PROG_UNAVAIL); 1051 goto sendit; 1052 1053 err_bad_vers: 1054 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1055 vers, prog, progp->pg_name); 1056 1057 serv->sv_stats->rpcbadfmt++; 1058 svc_putnl(resv, RPC_PROG_MISMATCH); 1059 svc_putnl(resv, progp->pg_lovers); 1060 svc_putnl(resv, progp->pg_hivers); 1061 goto sendit; 1062 1063 err_bad_proc: 1064 svc_printk(rqstp, "unknown procedure (%d)\n", proc); 1065 1066 serv->sv_stats->rpcbadfmt++; 1067 svc_putnl(resv, RPC_PROC_UNAVAIL); 1068 goto sendit; 1069 1070 err_garbage: 1071 svc_printk(rqstp, "failed to decode args\n"); 1072 1073 rpc_stat = rpc_garbage_args; 1074 err_bad: 1075 serv->sv_stats->rpcbadfmt++; 1076 svc_putnl(resv, ntohl(rpc_stat)); 1077 goto sendit; 1078 } 1079 EXPORT_SYMBOL(svc_process); 1080 1081 /* 1082 * Return (transport-specific) limit on the rpc payload. 1083 */ 1084 u32 svc_max_payload(const struct svc_rqst *rqstp) 1085 { 1086 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1087 1088 if (rqstp->rq_server->sv_max_payload < max) 1089 max = rqstp->rq_server->sv_max_payload; 1090 return max; 1091 } 1092 EXPORT_SYMBOL_GPL(svc_max_payload); 1093