1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/svc.c 4 * 5 * High-level RPC service routines 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 * 9 * Multiple threads pools and NUMAisation 10 * Copyright (c) 2006 Silicon Graphics, Inc. 11 * by Greg Banks <gnb@melbourne.sgi.com> 12 */ 13 14 #include <linux/linkage.h> 15 #include <linux/sched/signal.h> 16 #include <linux/errno.h> 17 #include <linux/net.h> 18 #include <linux/in.h> 19 #include <linux/mm.h> 20 #include <linux/interrupt.h> 21 #include <linux/module.h> 22 #include <linux/kthread.h> 23 #include <linux/slab.h> 24 25 #include <linux/sunrpc/types.h> 26 #include <linux/sunrpc/xdr.h> 27 #include <linux/sunrpc/stats.h> 28 #include <linux/sunrpc/svcsock.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/bc_xprt.h> 31 32 #include <trace/events/sunrpc.h> 33 34 #include "fail.h" 35 36 #define RPCDBG_FACILITY RPCDBG_SVCDSP 37 38 static void svc_unregister(const struct svc_serv *serv, struct net *net); 39 40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 41 42 /* 43 * Mode for mapping cpus to pools. 44 */ 45 enum { 46 SVC_POOL_AUTO = -1, /* choose one of the others */ 47 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 48 * (legacy & UP mode) */ 49 SVC_POOL_PERCPU, /* one pool per cpu */ 50 SVC_POOL_PERNODE /* one pool per numa node */ 51 }; 52 53 /* 54 * Structure for mapping cpus to pools and vice versa. 55 * Setup once during sunrpc initialisation. 56 */ 57 58 struct svc_pool_map { 59 int count; /* How many svc_servs use us */ 60 int mode; /* Note: int not enum to avoid 61 * warnings about "enumeration value 62 * not handled in switch" */ 63 unsigned int npools; 64 unsigned int *pool_to; /* maps pool id to cpu or node */ 65 unsigned int *to_pool; /* maps cpu or node to pool id */ 66 }; 67 68 static struct svc_pool_map svc_pool_map = { 69 .mode = SVC_POOL_DEFAULT 70 }; 71 72 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 73 74 static int 75 param_set_pool_mode(const char *val, const struct kernel_param *kp) 76 { 77 int *ip = (int *)kp->arg; 78 struct svc_pool_map *m = &svc_pool_map; 79 int err; 80 81 mutex_lock(&svc_pool_map_mutex); 82 83 err = -EBUSY; 84 if (m->count) 85 goto out; 86 87 err = 0; 88 if (!strncmp(val, "auto", 4)) 89 *ip = SVC_POOL_AUTO; 90 else if (!strncmp(val, "global", 6)) 91 *ip = SVC_POOL_GLOBAL; 92 else if (!strncmp(val, "percpu", 6)) 93 *ip = SVC_POOL_PERCPU; 94 else if (!strncmp(val, "pernode", 7)) 95 *ip = SVC_POOL_PERNODE; 96 else 97 err = -EINVAL; 98 99 out: 100 mutex_unlock(&svc_pool_map_mutex); 101 return err; 102 } 103 104 static int 105 param_get_pool_mode(char *buf, const struct kernel_param *kp) 106 { 107 int *ip = (int *)kp->arg; 108 109 switch (*ip) 110 { 111 case SVC_POOL_AUTO: 112 return sysfs_emit(buf, "auto\n"); 113 case SVC_POOL_GLOBAL: 114 return sysfs_emit(buf, "global\n"); 115 case SVC_POOL_PERCPU: 116 return sysfs_emit(buf, "percpu\n"); 117 case SVC_POOL_PERNODE: 118 return sysfs_emit(buf, "pernode\n"); 119 default: 120 return sysfs_emit(buf, "%d\n", *ip); 121 } 122 } 123 124 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 125 &svc_pool_map.mode, 0644); 126 127 /* 128 * Detect best pool mapping mode heuristically, 129 * according to the machine's topology. 130 */ 131 static int 132 svc_pool_map_choose_mode(void) 133 { 134 unsigned int node; 135 136 if (nr_online_nodes > 1) { 137 /* 138 * Actually have multiple NUMA nodes, 139 * so split pools on NUMA node boundaries 140 */ 141 return SVC_POOL_PERNODE; 142 } 143 144 node = first_online_node; 145 if (nr_cpus_node(node) > 2) { 146 /* 147 * Non-trivial SMP, or CONFIG_NUMA on 148 * non-NUMA hardware, e.g. with a generic 149 * x86_64 kernel on Xeons. In this case we 150 * want to divide the pools on cpu boundaries. 151 */ 152 return SVC_POOL_PERCPU; 153 } 154 155 /* default: one global pool */ 156 return SVC_POOL_GLOBAL; 157 } 158 159 /* 160 * Allocate the to_pool[] and pool_to[] arrays. 161 * Returns 0 on success or an errno. 162 */ 163 static int 164 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 165 { 166 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 167 if (!m->to_pool) 168 goto fail; 169 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 170 if (!m->pool_to) 171 goto fail_free; 172 173 return 0; 174 175 fail_free: 176 kfree(m->to_pool); 177 m->to_pool = NULL; 178 fail: 179 return -ENOMEM; 180 } 181 182 /* 183 * Initialise the pool map for SVC_POOL_PERCPU mode. 184 * Returns number of pools or <0 on error. 185 */ 186 static int 187 svc_pool_map_init_percpu(struct svc_pool_map *m) 188 { 189 unsigned int maxpools = nr_cpu_ids; 190 unsigned int pidx = 0; 191 unsigned int cpu; 192 int err; 193 194 err = svc_pool_map_alloc_arrays(m, maxpools); 195 if (err) 196 return err; 197 198 for_each_online_cpu(cpu) { 199 BUG_ON(pidx >= maxpools); 200 m->to_pool[cpu] = pidx; 201 m->pool_to[pidx] = cpu; 202 pidx++; 203 } 204 /* cpus brought online later all get mapped to pool0, sorry */ 205 206 return pidx; 207 }; 208 209 210 /* 211 * Initialise the pool map for SVC_POOL_PERNODE mode. 212 * Returns number of pools or <0 on error. 213 */ 214 static int 215 svc_pool_map_init_pernode(struct svc_pool_map *m) 216 { 217 unsigned int maxpools = nr_node_ids; 218 unsigned int pidx = 0; 219 unsigned int node; 220 int err; 221 222 err = svc_pool_map_alloc_arrays(m, maxpools); 223 if (err) 224 return err; 225 226 for_each_node_with_cpus(node) { 227 /* some architectures (e.g. SN2) have cpuless nodes */ 228 BUG_ON(pidx > maxpools); 229 m->to_pool[node] = pidx; 230 m->pool_to[pidx] = node; 231 pidx++; 232 } 233 /* nodes brought online later all get mapped to pool0, sorry */ 234 235 return pidx; 236 } 237 238 239 /* 240 * Add a reference to the global map of cpus to pools (and 241 * vice versa) if pools are in use. 242 * Initialise the map if we're the first user. 243 * Returns the number of pools. If this is '1', no reference 244 * was taken. 245 */ 246 static unsigned int 247 svc_pool_map_get(void) 248 { 249 struct svc_pool_map *m = &svc_pool_map; 250 int npools = -1; 251 252 mutex_lock(&svc_pool_map_mutex); 253 254 if (m->count++) { 255 mutex_unlock(&svc_pool_map_mutex); 256 WARN_ON_ONCE(m->npools <= 1); 257 return m->npools; 258 } 259 260 if (m->mode == SVC_POOL_AUTO) 261 m->mode = svc_pool_map_choose_mode(); 262 263 switch (m->mode) { 264 case SVC_POOL_PERCPU: 265 npools = svc_pool_map_init_percpu(m); 266 break; 267 case SVC_POOL_PERNODE: 268 npools = svc_pool_map_init_pernode(m); 269 break; 270 } 271 272 if (npools <= 0) { 273 /* default, or memory allocation failure */ 274 npools = 1; 275 m->mode = SVC_POOL_GLOBAL; 276 } 277 m->npools = npools; 278 279 if (npools == 1) 280 /* service is unpooled, so doesn't hold a reference */ 281 m->count--; 282 283 mutex_unlock(&svc_pool_map_mutex); 284 return npools; 285 } 286 287 /* 288 * Drop a reference to the global map of cpus to pools, if 289 * pools were in use, i.e. if npools > 1. 290 * When the last reference is dropped, the map data is 291 * freed; this allows the sysadmin to change the pool 292 * mode using the pool_mode module option without 293 * rebooting or re-loading sunrpc.ko. 294 */ 295 static void 296 svc_pool_map_put(int npools) 297 { 298 struct svc_pool_map *m = &svc_pool_map; 299 300 if (npools <= 1) 301 return; 302 mutex_lock(&svc_pool_map_mutex); 303 304 if (!--m->count) { 305 kfree(m->to_pool); 306 m->to_pool = NULL; 307 kfree(m->pool_to); 308 m->pool_to = NULL; 309 m->npools = 0; 310 } 311 312 mutex_unlock(&svc_pool_map_mutex); 313 } 314 315 static int svc_pool_map_get_node(unsigned int pidx) 316 { 317 const struct svc_pool_map *m = &svc_pool_map; 318 319 if (m->count) { 320 if (m->mode == SVC_POOL_PERCPU) 321 return cpu_to_node(m->pool_to[pidx]); 322 if (m->mode == SVC_POOL_PERNODE) 323 return m->pool_to[pidx]; 324 } 325 return NUMA_NO_NODE; 326 } 327 /* 328 * Set the given thread's cpus_allowed mask so that it 329 * will only run on cpus in the given pool. 330 */ 331 static inline void 332 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) 333 { 334 struct svc_pool_map *m = &svc_pool_map; 335 unsigned int node = m->pool_to[pidx]; 336 337 /* 338 * The caller checks for sv_nrpools > 1, which 339 * implies that we've been initialized. 340 */ 341 WARN_ON_ONCE(m->count == 0); 342 if (m->count == 0) 343 return; 344 345 switch (m->mode) { 346 case SVC_POOL_PERCPU: 347 { 348 set_cpus_allowed_ptr(task, cpumask_of(node)); 349 break; 350 } 351 case SVC_POOL_PERNODE: 352 { 353 set_cpus_allowed_ptr(task, cpumask_of_node(node)); 354 break; 355 } 356 } 357 } 358 359 /** 360 * svc_pool_for_cpu - Select pool to run a thread on this cpu 361 * @serv: An RPC service 362 * 363 * Use the active CPU and the svc_pool_map's mode setting to 364 * select the svc thread pool to use. Once initialized, the 365 * svc_pool_map does not change. 366 * 367 * Return value: 368 * A pointer to an svc_pool 369 */ 370 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) 371 { 372 struct svc_pool_map *m = &svc_pool_map; 373 int cpu = raw_smp_processor_id(); 374 unsigned int pidx = 0; 375 376 if (serv->sv_nrpools <= 1) 377 return serv->sv_pools; 378 379 switch (m->mode) { 380 case SVC_POOL_PERCPU: 381 pidx = m->to_pool[cpu]; 382 break; 383 case SVC_POOL_PERNODE: 384 pidx = m->to_pool[cpu_to_node(cpu)]; 385 break; 386 } 387 388 return &serv->sv_pools[pidx % serv->sv_nrpools]; 389 } 390 391 int svc_rpcb_setup(struct svc_serv *serv, struct net *net) 392 { 393 int err; 394 395 err = rpcb_create_local(net); 396 if (err) 397 return err; 398 399 /* Remove any stale portmap registrations */ 400 svc_unregister(serv, net); 401 return 0; 402 } 403 EXPORT_SYMBOL_GPL(svc_rpcb_setup); 404 405 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) 406 { 407 svc_unregister(serv, net); 408 rpcb_put_local(net); 409 } 410 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); 411 412 static int svc_uses_rpcbind(struct svc_serv *serv) 413 { 414 struct svc_program *progp; 415 unsigned int i; 416 417 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 418 for (i = 0; i < progp->pg_nvers; i++) { 419 if (progp->pg_vers[i] == NULL) 420 continue; 421 if (!progp->pg_vers[i]->vs_hidden) 422 return 1; 423 } 424 } 425 426 return 0; 427 } 428 429 int svc_bind(struct svc_serv *serv, struct net *net) 430 { 431 if (!svc_uses_rpcbind(serv)) 432 return 0; 433 return svc_rpcb_setup(serv, net); 434 } 435 EXPORT_SYMBOL_GPL(svc_bind); 436 437 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 438 static void 439 __svc_init_bc(struct svc_serv *serv) 440 { 441 INIT_LIST_HEAD(&serv->sv_cb_list); 442 spin_lock_init(&serv->sv_cb_lock); 443 init_waitqueue_head(&serv->sv_cb_waitq); 444 } 445 #else 446 static void 447 __svc_init_bc(struct svc_serv *serv) 448 { 449 } 450 #endif 451 452 /* 453 * Create an RPC service 454 */ 455 static struct svc_serv * 456 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 457 int (*threadfn)(void *data)) 458 { 459 struct svc_serv *serv; 460 unsigned int vers; 461 unsigned int xdrsize; 462 unsigned int i; 463 464 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 465 return NULL; 466 serv->sv_name = prog->pg_name; 467 serv->sv_program = prog; 468 kref_init(&serv->sv_refcnt); 469 serv->sv_stats = prog->pg_stats; 470 if (bufsize > RPCSVC_MAXPAYLOAD) 471 bufsize = RPCSVC_MAXPAYLOAD; 472 serv->sv_max_payload = bufsize? bufsize : 4096; 473 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 474 serv->sv_threadfn = threadfn; 475 xdrsize = 0; 476 while (prog) { 477 prog->pg_lovers = prog->pg_nvers-1; 478 for (vers=0; vers<prog->pg_nvers ; vers++) 479 if (prog->pg_vers[vers]) { 480 prog->pg_hivers = vers; 481 if (prog->pg_lovers > vers) 482 prog->pg_lovers = vers; 483 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 484 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 485 } 486 prog = prog->pg_next; 487 } 488 serv->sv_xdrsize = xdrsize; 489 INIT_LIST_HEAD(&serv->sv_tempsocks); 490 INIT_LIST_HEAD(&serv->sv_permsocks); 491 timer_setup(&serv->sv_temptimer, NULL, 0); 492 spin_lock_init(&serv->sv_lock); 493 494 __svc_init_bc(serv); 495 496 serv->sv_nrpools = npools; 497 serv->sv_pools = 498 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 499 GFP_KERNEL); 500 if (!serv->sv_pools) { 501 kfree(serv); 502 return NULL; 503 } 504 505 for (i = 0; i < serv->sv_nrpools; i++) { 506 struct svc_pool *pool = &serv->sv_pools[i]; 507 508 dprintk("svc: initialising pool %u for %s\n", 509 i, serv->sv_name); 510 511 pool->sp_id = i; 512 INIT_LIST_HEAD(&pool->sp_sockets); 513 INIT_LIST_HEAD(&pool->sp_all_threads); 514 spin_lock_init(&pool->sp_lock); 515 516 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); 517 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); 518 percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL); 519 } 520 521 return serv; 522 } 523 524 /** 525 * svc_create - Create an RPC service 526 * @prog: the RPC program the new service will handle 527 * @bufsize: maximum message size for @prog 528 * @threadfn: a function to service RPC requests for @prog 529 * 530 * Returns an instantiated struct svc_serv object or NULL. 531 */ 532 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, 533 int (*threadfn)(void *data)) 534 { 535 return __svc_create(prog, bufsize, 1, threadfn); 536 } 537 EXPORT_SYMBOL_GPL(svc_create); 538 539 /** 540 * svc_create_pooled - Create an RPC service with pooled threads 541 * @prog: the RPC program the new service will handle 542 * @bufsize: maximum message size for @prog 543 * @threadfn: a function to service RPC requests for @prog 544 * 545 * Returns an instantiated struct svc_serv object or NULL. 546 */ 547 struct svc_serv *svc_create_pooled(struct svc_program *prog, 548 unsigned int bufsize, 549 int (*threadfn)(void *data)) 550 { 551 struct svc_serv *serv; 552 unsigned int npools = svc_pool_map_get(); 553 554 serv = __svc_create(prog, bufsize, npools, threadfn); 555 if (!serv) 556 goto out_err; 557 return serv; 558 out_err: 559 svc_pool_map_put(npools); 560 return NULL; 561 } 562 EXPORT_SYMBOL_GPL(svc_create_pooled); 563 564 /* 565 * Destroy an RPC service. Should be called with appropriate locking to 566 * protect sv_permsocks and sv_tempsocks. 567 */ 568 void 569 svc_destroy(struct kref *ref) 570 { 571 struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt); 572 unsigned int i; 573 574 dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name); 575 timer_shutdown_sync(&serv->sv_temptimer); 576 577 /* 578 * The last user is gone and thus all sockets have to be destroyed to 579 * the point. Check this. 580 */ 581 BUG_ON(!list_empty(&serv->sv_permsocks)); 582 BUG_ON(!list_empty(&serv->sv_tempsocks)); 583 584 cache_clean_deferred(serv); 585 586 svc_pool_map_put(serv->sv_nrpools); 587 588 for (i = 0; i < serv->sv_nrpools; i++) { 589 struct svc_pool *pool = &serv->sv_pools[i]; 590 591 percpu_counter_destroy(&pool->sp_sockets_queued); 592 percpu_counter_destroy(&pool->sp_threads_woken); 593 percpu_counter_destroy(&pool->sp_threads_timedout); 594 } 595 kfree(serv->sv_pools); 596 kfree(serv); 597 } 598 EXPORT_SYMBOL_GPL(svc_destroy); 599 600 static bool 601 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) 602 { 603 unsigned long pages, ret; 604 605 /* bc_xprt uses fore channel allocated buffers */ 606 if (svc_is_backchannel(rqstp)) 607 return true; 608 609 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 610 * We assume one is at most one page 611 */ 612 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); 613 if (pages > RPCSVC_MAXPAGES) 614 pages = RPCSVC_MAXPAGES; 615 616 ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, 617 rqstp->rq_pages); 618 return ret == pages; 619 } 620 621 /* 622 * Release an RPC server buffer 623 */ 624 static void 625 svc_release_buffer(struct svc_rqst *rqstp) 626 { 627 unsigned int i; 628 629 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) 630 if (rqstp->rq_pages[i]) 631 put_page(rqstp->rq_pages[i]); 632 } 633 634 struct svc_rqst * 635 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) 636 { 637 struct svc_rqst *rqstp; 638 639 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); 640 if (!rqstp) 641 return rqstp; 642 643 folio_batch_init(&rqstp->rq_fbatch); 644 645 __set_bit(RQ_BUSY, &rqstp->rq_flags); 646 rqstp->rq_server = serv; 647 rqstp->rq_pool = pool; 648 649 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); 650 if (!rqstp->rq_scratch_page) 651 goto out_enomem; 652 653 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 654 if (!rqstp->rq_argp) 655 goto out_enomem; 656 657 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 658 if (!rqstp->rq_resp) 659 goto out_enomem; 660 661 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) 662 goto out_enomem; 663 664 return rqstp; 665 out_enomem: 666 svc_rqst_free(rqstp); 667 return NULL; 668 } 669 EXPORT_SYMBOL_GPL(svc_rqst_alloc); 670 671 static struct svc_rqst * 672 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) 673 { 674 struct svc_rqst *rqstp; 675 676 rqstp = svc_rqst_alloc(serv, pool, node); 677 if (!rqstp) 678 return ERR_PTR(-ENOMEM); 679 680 svc_get(serv); 681 spin_lock_bh(&serv->sv_lock); 682 serv->sv_nrthreads += 1; 683 spin_unlock_bh(&serv->sv_lock); 684 685 spin_lock_bh(&pool->sp_lock); 686 pool->sp_nrthreads++; 687 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); 688 spin_unlock_bh(&pool->sp_lock); 689 return rqstp; 690 } 691 692 /* 693 * Choose a pool in which to create a new thread, for svc_set_num_threads 694 */ 695 static inline struct svc_pool * 696 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 697 { 698 if (pool != NULL) 699 return pool; 700 701 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 702 } 703 704 /* 705 * Choose a thread to kill, for svc_set_num_threads 706 */ 707 static inline struct task_struct * 708 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 709 { 710 unsigned int i; 711 struct task_struct *task = NULL; 712 713 if (pool != NULL) { 714 spin_lock_bh(&pool->sp_lock); 715 } else { 716 /* choose a pool in round-robin fashion */ 717 for (i = 0; i < serv->sv_nrpools; i++) { 718 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 719 spin_lock_bh(&pool->sp_lock); 720 if (!list_empty(&pool->sp_all_threads)) 721 goto found_pool; 722 spin_unlock_bh(&pool->sp_lock); 723 } 724 return NULL; 725 } 726 727 found_pool: 728 if (!list_empty(&pool->sp_all_threads)) { 729 struct svc_rqst *rqstp; 730 731 /* 732 * Remove from the pool->sp_all_threads list 733 * so we don't try to kill it again. 734 */ 735 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 736 set_bit(RQ_VICTIM, &rqstp->rq_flags); 737 list_del_rcu(&rqstp->rq_all); 738 task = rqstp->rq_task; 739 } 740 spin_unlock_bh(&pool->sp_lock); 741 742 return task; 743 } 744 745 /* create new threads */ 746 static int 747 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 748 { 749 struct svc_rqst *rqstp; 750 struct task_struct *task; 751 struct svc_pool *chosen_pool; 752 unsigned int state = serv->sv_nrthreads-1; 753 int node; 754 755 do { 756 nrservs--; 757 chosen_pool = choose_pool(serv, pool, &state); 758 759 node = svc_pool_map_get_node(chosen_pool->sp_id); 760 rqstp = svc_prepare_thread(serv, chosen_pool, node); 761 if (IS_ERR(rqstp)) 762 return PTR_ERR(rqstp); 763 764 task = kthread_create_on_node(serv->sv_threadfn, rqstp, 765 node, "%s", serv->sv_name); 766 if (IS_ERR(task)) { 767 svc_exit_thread(rqstp); 768 return PTR_ERR(task); 769 } 770 771 rqstp->rq_task = task; 772 if (serv->sv_nrpools > 1) 773 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); 774 775 svc_sock_update_bufs(serv); 776 wake_up_process(task); 777 } while (nrservs > 0); 778 779 return 0; 780 } 781 782 /* 783 * Create or destroy enough new threads to make the number 784 * of threads the given number. If `pool' is non-NULL, applies 785 * only to threads in that pool, otherwise round-robins between 786 * all pools. Caller must ensure that mutual exclusion between this and 787 * server startup or shutdown. 788 */ 789 790 /* destroy old threads */ 791 static int 792 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 793 { 794 struct svc_rqst *rqstp; 795 struct task_struct *task; 796 unsigned int state = serv->sv_nrthreads-1; 797 798 /* destroy old threads */ 799 do { 800 task = choose_victim(serv, pool, &state); 801 if (task == NULL) 802 break; 803 rqstp = kthread_data(task); 804 /* Did we lose a race to svo_function threadfn? */ 805 if (kthread_stop(task) == -EINTR) 806 svc_exit_thread(rqstp); 807 nrservs++; 808 } while (nrservs < 0); 809 return 0; 810 } 811 812 int 813 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 814 { 815 if (pool == NULL) { 816 nrservs -= serv->sv_nrthreads; 817 } else { 818 spin_lock_bh(&pool->sp_lock); 819 nrservs -= pool->sp_nrthreads; 820 spin_unlock_bh(&pool->sp_lock); 821 } 822 823 if (nrservs > 0) 824 return svc_start_kthreads(serv, pool, nrservs); 825 if (nrservs < 0) 826 return svc_stop_kthreads(serv, pool, nrservs); 827 return 0; 828 } 829 EXPORT_SYMBOL_GPL(svc_set_num_threads); 830 831 /** 832 * svc_rqst_replace_page - Replace one page in rq_pages[] 833 * @rqstp: svc_rqst with pages to replace 834 * @page: replacement page 835 * 836 * When replacing a page in rq_pages, batch the release of the 837 * replaced pages to avoid hammering the page allocator. 838 * 839 * Return values: 840 * %true: page replaced 841 * %false: array bounds checking failed 842 */ 843 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) 844 { 845 struct page **begin = rqstp->rq_pages; 846 struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; 847 848 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { 849 trace_svc_replace_page_err(rqstp); 850 return false; 851 } 852 853 if (*rqstp->rq_next_page) { 854 if (!folio_batch_add(&rqstp->rq_fbatch, 855 page_folio(*rqstp->rq_next_page))) 856 __folio_batch_release(&rqstp->rq_fbatch); 857 } 858 859 get_page(page); 860 *(rqstp->rq_next_page++) = page; 861 return true; 862 } 863 EXPORT_SYMBOL_GPL(svc_rqst_replace_page); 864 865 /** 866 * svc_rqst_release_pages - Release Reply buffer pages 867 * @rqstp: RPC transaction context 868 * 869 * Release response pages that might still be in flight after 870 * svc_send, and any spliced filesystem-owned pages. 871 */ 872 void svc_rqst_release_pages(struct svc_rqst *rqstp) 873 { 874 int i, count = rqstp->rq_next_page - rqstp->rq_respages; 875 876 if (count) { 877 release_pages(rqstp->rq_respages, count); 878 for (i = 0; i < count; i++) 879 rqstp->rq_respages[i] = NULL; 880 } 881 } 882 883 /* 884 * Called from a server thread as it's exiting. Caller must hold the "service 885 * mutex" for the service. 886 */ 887 void 888 svc_rqst_free(struct svc_rqst *rqstp) 889 { 890 folio_batch_release(&rqstp->rq_fbatch); 891 svc_release_buffer(rqstp); 892 if (rqstp->rq_scratch_page) 893 put_page(rqstp->rq_scratch_page); 894 kfree(rqstp->rq_resp); 895 kfree(rqstp->rq_argp); 896 kfree(rqstp->rq_auth_data); 897 kfree_rcu(rqstp, rq_rcu_head); 898 } 899 EXPORT_SYMBOL_GPL(svc_rqst_free); 900 901 void 902 svc_exit_thread(struct svc_rqst *rqstp) 903 { 904 struct svc_serv *serv = rqstp->rq_server; 905 struct svc_pool *pool = rqstp->rq_pool; 906 907 spin_lock_bh(&pool->sp_lock); 908 pool->sp_nrthreads--; 909 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) 910 list_del_rcu(&rqstp->rq_all); 911 spin_unlock_bh(&pool->sp_lock); 912 913 spin_lock_bh(&serv->sv_lock); 914 serv->sv_nrthreads -= 1; 915 spin_unlock_bh(&serv->sv_lock); 916 svc_sock_update_bufs(serv); 917 918 svc_rqst_free(rqstp); 919 920 svc_put(serv); 921 } 922 EXPORT_SYMBOL_GPL(svc_exit_thread); 923 924 /* 925 * Register an "inet" protocol family netid with the local 926 * rpcbind daemon via an rpcbind v4 SET request. 927 * 928 * No netconfig infrastructure is available in the kernel, so 929 * we map IP_ protocol numbers to netids by hand. 930 * 931 * Returns zero on success; a negative errno value is returned 932 * if any error occurs. 933 */ 934 static int __svc_rpcb_register4(struct net *net, const u32 program, 935 const u32 version, 936 const unsigned short protocol, 937 const unsigned short port) 938 { 939 const struct sockaddr_in sin = { 940 .sin_family = AF_INET, 941 .sin_addr.s_addr = htonl(INADDR_ANY), 942 .sin_port = htons(port), 943 }; 944 const char *netid; 945 int error; 946 947 switch (protocol) { 948 case IPPROTO_UDP: 949 netid = RPCBIND_NETID_UDP; 950 break; 951 case IPPROTO_TCP: 952 netid = RPCBIND_NETID_TCP; 953 break; 954 default: 955 return -ENOPROTOOPT; 956 } 957 958 error = rpcb_v4_register(net, program, version, 959 (const struct sockaddr *)&sin, netid); 960 961 /* 962 * User space didn't support rpcbind v4, so retry this 963 * registration request with the legacy rpcbind v2 protocol. 964 */ 965 if (error == -EPROTONOSUPPORT) 966 error = rpcb_register(net, program, version, protocol, port); 967 968 return error; 969 } 970 971 #if IS_ENABLED(CONFIG_IPV6) 972 /* 973 * Register an "inet6" protocol family netid with the local 974 * rpcbind daemon via an rpcbind v4 SET request. 975 * 976 * No netconfig infrastructure is available in the kernel, so 977 * we map IP_ protocol numbers to netids by hand. 978 * 979 * Returns zero on success; a negative errno value is returned 980 * if any error occurs. 981 */ 982 static int __svc_rpcb_register6(struct net *net, const u32 program, 983 const u32 version, 984 const unsigned short protocol, 985 const unsigned short port) 986 { 987 const struct sockaddr_in6 sin6 = { 988 .sin6_family = AF_INET6, 989 .sin6_addr = IN6ADDR_ANY_INIT, 990 .sin6_port = htons(port), 991 }; 992 const char *netid; 993 int error; 994 995 switch (protocol) { 996 case IPPROTO_UDP: 997 netid = RPCBIND_NETID_UDP6; 998 break; 999 case IPPROTO_TCP: 1000 netid = RPCBIND_NETID_TCP6; 1001 break; 1002 default: 1003 return -ENOPROTOOPT; 1004 } 1005 1006 error = rpcb_v4_register(net, program, version, 1007 (const struct sockaddr *)&sin6, netid); 1008 1009 /* 1010 * User space didn't support rpcbind version 4, so we won't 1011 * use a PF_INET6 listener. 1012 */ 1013 if (error == -EPROTONOSUPPORT) 1014 error = -EAFNOSUPPORT; 1015 1016 return error; 1017 } 1018 #endif /* IS_ENABLED(CONFIG_IPV6) */ 1019 1020 /* 1021 * Register a kernel RPC service via rpcbind version 4. 1022 * 1023 * Returns zero on success; a negative errno value is returned 1024 * if any error occurs. 1025 */ 1026 static int __svc_register(struct net *net, const char *progname, 1027 const u32 program, const u32 version, 1028 const int family, 1029 const unsigned short protocol, 1030 const unsigned short port) 1031 { 1032 int error = -EAFNOSUPPORT; 1033 1034 switch (family) { 1035 case PF_INET: 1036 error = __svc_rpcb_register4(net, program, version, 1037 protocol, port); 1038 break; 1039 #if IS_ENABLED(CONFIG_IPV6) 1040 case PF_INET6: 1041 error = __svc_rpcb_register6(net, program, version, 1042 protocol, port); 1043 #endif 1044 } 1045 1046 trace_svc_register(progname, version, family, protocol, port, error); 1047 return error; 1048 } 1049 1050 int svc_rpcbind_set_version(struct net *net, 1051 const struct svc_program *progp, 1052 u32 version, int family, 1053 unsigned short proto, 1054 unsigned short port) 1055 { 1056 return __svc_register(net, progp->pg_name, progp->pg_prog, 1057 version, family, proto, port); 1058 1059 } 1060 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version); 1061 1062 int svc_generic_rpcbind_set(struct net *net, 1063 const struct svc_program *progp, 1064 u32 version, int family, 1065 unsigned short proto, 1066 unsigned short port) 1067 { 1068 const struct svc_version *vers = progp->pg_vers[version]; 1069 int error; 1070 1071 if (vers == NULL) 1072 return 0; 1073 1074 if (vers->vs_hidden) { 1075 trace_svc_noregister(progp->pg_name, version, proto, 1076 port, family, 0); 1077 return 0; 1078 } 1079 1080 /* 1081 * Don't register a UDP port if we need congestion 1082 * control. 1083 */ 1084 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) 1085 return 0; 1086 1087 error = svc_rpcbind_set_version(net, progp, version, 1088 family, proto, port); 1089 1090 return (vers->vs_rpcb_optnl) ? 0 : error; 1091 } 1092 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); 1093 1094 /** 1095 * svc_register - register an RPC service with the local portmapper 1096 * @serv: svc_serv struct for the service to register 1097 * @net: net namespace for the service to register 1098 * @family: protocol family of service's listener socket 1099 * @proto: transport protocol number to advertise 1100 * @port: port to advertise 1101 * 1102 * Service is registered for any address in the passed-in protocol family 1103 */ 1104 int svc_register(const struct svc_serv *serv, struct net *net, 1105 const int family, const unsigned short proto, 1106 const unsigned short port) 1107 { 1108 struct svc_program *progp; 1109 unsigned int i; 1110 int error = 0; 1111 1112 WARN_ON_ONCE(proto == 0 && port == 0); 1113 if (proto == 0 && port == 0) 1114 return -EINVAL; 1115 1116 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1117 for (i = 0; i < progp->pg_nvers; i++) { 1118 1119 error = progp->pg_rpcbind_set(net, progp, i, 1120 family, proto, port); 1121 if (error < 0) { 1122 printk(KERN_WARNING "svc: failed to register " 1123 "%sv%u RPC service (errno %d).\n", 1124 progp->pg_name, i, -error); 1125 break; 1126 } 1127 } 1128 } 1129 1130 return error; 1131 } 1132 1133 /* 1134 * If user space is running rpcbind, it should take the v4 UNSET 1135 * and clear everything for this [program, version]. If user space 1136 * is running portmap, it will reject the v4 UNSET, but won't have 1137 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 1138 * in this case to clear all existing entries for [program, version]. 1139 */ 1140 static void __svc_unregister(struct net *net, const u32 program, const u32 version, 1141 const char *progname) 1142 { 1143 int error; 1144 1145 error = rpcb_v4_register(net, program, version, NULL, ""); 1146 1147 /* 1148 * User space didn't support rpcbind v4, so retry this 1149 * request with the legacy rpcbind v2 protocol. 1150 */ 1151 if (error == -EPROTONOSUPPORT) 1152 error = rpcb_register(net, program, version, 0, 0); 1153 1154 trace_svc_unregister(progname, version, error); 1155 } 1156 1157 /* 1158 * All netids, bind addresses and ports registered for [program, version] 1159 * are removed from the local rpcbind database (if the service is not 1160 * hidden) to make way for a new instance of the service. 1161 * 1162 * The result of unregistration is reported via dprintk for those who want 1163 * verification of the result, but is otherwise not important. 1164 */ 1165 static void svc_unregister(const struct svc_serv *serv, struct net *net) 1166 { 1167 struct sighand_struct *sighand; 1168 struct svc_program *progp; 1169 unsigned long flags; 1170 unsigned int i; 1171 1172 clear_thread_flag(TIF_SIGPENDING); 1173 1174 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1175 for (i = 0; i < progp->pg_nvers; i++) { 1176 if (progp->pg_vers[i] == NULL) 1177 continue; 1178 if (progp->pg_vers[i]->vs_hidden) 1179 continue; 1180 __svc_unregister(net, progp->pg_prog, i, progp->pg_name); 1181 } 1182 } 1183 1184 rcu_read_lock(); 1185 sighand = rcu_dereference(current->sighand); 1186 spin_lock_irqsave(&sighand->siglock, flags); 1187 recalc_sigpending(); 1188 spin_unlock_irqrestore(&sighand->siglock, flags); 1189 rcu_read_unlock(); 1190 } 1191 1192 /* 1193 * dprintk the given error with the address of the client that caused it. 1194 */ 1195 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 1196 static __printf(2, 3) 1197 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1198 { 1199 struct va_format vaf; 1200 va_list args; 1201 char buf[RPC_MAX_ADDRBUFLEN]; 1202 1203 va_start(args, fmt); 1204 1205 vaf.fmt = fmt; 1206 vaf.va = &args; 1207 1208 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); 1209 1210 va_end(args); 1211 } 1212 #else 1213 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} 1214 #endif 1215 1216 __be32 1217 svc_generic_init_request(struct svc_rqst *rqstp, 1218 const struct svc_program *progp, 1219 struct svc_process_info *ret) 1220 { 1221 const struct svc_version *versp = NULL; /* compiler food */ 1222 const struct svc_procedure *procp = NULL; 1223 1224 if (rqstp->rq_vers >= progp->pg_nvers ) 1225 goto err_bad_vers; 1226 versp = progp->pg_vers[rqstp->rq_vers]; 1227 if (!versp) 1228 goto err_bad_vers; 1229 1230 /* 1231 * Some protocol versions (namely NFSv4) require some form of 1232 * congestion control. (See RFC 7530 section 3.1 paragraph 2) 1233 * In other words, UDP is not allowed. We mark those when setting 1234 * up the svc_xprt, and verify that here. 1235 * 1236 * The spec is not very clear about what error should be returned 1237 * when someone tries to access a server that is listening on UDP 1238 * for lower versions. RPC_PROG_MISMATCH seems to be the closest 1239 * fit. 1240 */ 1241 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && 1242 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) 1243 goto err_bad_vers; 1244 1245 if (rqstp->rq_proc >= versp->vs_nproc) 1246 goto err_bad_proc; 1247 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; 1248 if (!procp) 1249 goto err_bad_proc; 1250 1251 /* Initialize storage for argp and resp */ 1252 memset(rqstp->rq_argp, 0, procp->pc_argzero); 1253 memset(rqstp->rq_resp, 0, procp->pc_ressize); 1254 1255 /* Bump per-procedure stats counter */ 1256 this_cpu_inc(versp->vs_count[rqstp->rq_proc]); 1257 1258 ret->dispatch = versp->vs_dispatch; 1259 return rpc_success; 1260 err_bad_vers: 1261 ret->mismatch.lovers = progp->pg_lovers; 1262 ret->mismatch.hivers = progp->pg_hivers; 1263 return rpc_prog_mismatch; 1264 err_bad_proc: 1265 return rpc_proc_unavail; 1266 } 1267 EXPORT_SYMBOL_GPL(svc_generic_init_request); 1268 1269 /* 1270 * Common routine for processing the RPC request. 1271 */ 1272 static int 1273 svc_process_common(struct svc_rqst *rqstp) 1274 { 1275 struct xdr_stream *xdr = &rqstp->rq_res_stream; 1276 struct svc_program *progp; 1277 const struct svc_procedure *procp = NULL; 1278 struct svc_serv *serv = rqstp->rq_server; 1279 struct svc_process_info process; 1280 int auth_res, rc; 1281 unsigned int aoffset; 1282 __be32 *p; 1283 1284 /* Will be turned off by GSS integrity and privacy services */ 1285 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); 1286 /* Will be turned off only when NFSv4 Sessions are used */ 1287 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); 1288 clear_bit(RQ_DROPME, &rqstp->rq_flags); 1289 1290 /* Construct the first words of the reply: */ 1291 svcxdr_init_encode(rqstp); 1292 xdr_stream_encode_be32(xdr, rqstp->rq_xid); 1293 xdr_stream_encode_be32(xdr, rpc_reply); 1294 1295 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); 1296 if (unlikely(!p)) 1297 goto err_short_len; 1298 if (*p++ != cpu_to_be32(RPC_VERSION)) 1299 goto err_bad_rpc; 1300 1301 xdr_stream_encode_be32(xdr, rpc_msg_accepted); 1302 1303 rqstp->rq_prog = be32_to_cpup(p++); 1304 rqstp->rq_vers = be32_to_cpup(p++); 1305 rqstp->rq_proc = be32_to_cpup(p); 1306 1307 for (progp = serv->sv_program; progp; progp = progp->pg_next) 1308 if (rqstp->rq_prog == progp->pg_prog) 1309 break; 1310 1311 /* 1312 * Decode auth data, and add verifier to reply buffer. 1313 * We do this before anything else in order to get a decent 1314 * auth verifier. 1315 */ 1316 auth_res = svc_authenticate(rqstp); 1317 /* Also give the program a chance to reject this call: */ 1318 if (auth_res == SVC_OK && progp) 1319 auth_res = progp->pg_authenticate(rqstp); 1320 trace_svc_authenticate(rqstp, auth_res); 1321 switch (auth_res) { 1322 case SVC_OK: 1323 break; 1324 case SVC_GARBAGE: 1325 goto err_garbage_args; 1326 case SVC_SYSERR: 1327 goto err_system_err; 1328 case SVC_DENIED: 1329 goto err_bad_auth; 1330 case SVC_CLOSE: 1331 goto close; 1332 case SVC_DROP: 1333 goto dropit; 1334 case SVC_COMPLETE: 1335 goto sendit; 1336 } 1337 1338 if (progp == NULL) 1339 goto err_bad_prog; 1340 1341 switch (progp->pg_init_request(rqstp, progp, &process)) { 1342 case rpc_success: 1343 break; 1344 case rpc_prog_unavail: 1345 goto err_bad_prog; 1346 case rpc_prog_mismatch: 1347 goto err_bad_vers; 1348 case rpc_proc_unavail: 1349 goto err_bad_proc; 1350 } 1351 1352 procp = rqstp->rq_procinfo; 1353 /* Should this check go into the dispatcher? */ 1354 if (!procp || !procp->pc_func) 1355 goto err_bad_proc; 1356 1357 /* Syntactic check complete */ 1358 serv->sv_stats->rpccnt++; 1359 trace_svc_process(rqstp, progp->pg_name); 1360 1361 aoffset = xdr_stream_pos(xdr); 1362 1363 /* un-reserve some of the out-queue now that we have a 1364 * better idea of reply size 1365 */ 1366 if (procp->pc_xdrressize) 1367 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 1368 1369 /* Call the function that processes the request. */ 1370 rc = process.dispatch(rqstp); 1371 if (procp->pc_release) 1372 procp->pc_release(rqstp); 1373 if (!rc) 1374 goto dropit; 1375 if (rqstp->rq_auth_stat != rpc_auth_ok) 1376 goto err_bad_auth; 1377 1378 if (*rqstp->rq_accept_statp != rpc_success) 1379 xdr_truncate_encode(xdr, aoffset); 1380 1381 if (procp->pc_encode == NULL) 1382 goto dropit; 1383 1384 sendit: 1385 if (svc_authorise(rqstp)) 1386 goto close_xprt; 1387 return 1; /* Caller can now send it */ 1388 1389 dropit: 1390 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1391 dprintk("svc: svc_process dropit\n"); 1392 return 0; 1393 1394 close: 1395 svc_authorise(rqstp); 1396 close_xprt: 1397 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) 1398 svc_xprt_close(rqstp->rq_xprt); 1399 dprintk("svc: svc_process close\n"); 1400 return 0; 1401 1402 err_short_len: 1403 svc_printk(rqstp, "short len %u, dropping request\n", 1404 rqstp->rq_arg.len); 1405 goto close_xprt; 1406 1407 err_bad_rpc: 1408 serv->sv_stats->rpcbadfmt++; 1409 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1410 xdr_stream_encode_u32(xdr, RPC_MISMATCH); 1411 /* Only RPCv2 supported */ 1412 xdr_stream_encode_u32(xdr, RPC_VERSION); 1413 xdr_stream_encode_u32(xdr, RPC_VERSION); 1414 return 1; /* don't wrap */ 1415 1416 err_bad_auth: 1417 dprintk("svc: authentication failed (%d)\n", 1418 be32_to_cpu(rqstp->rq_auth_stat)); 1419 serv->sv_stats->rpcbadauth++; 1420 /* Restore write pointer to location of reply status: */ 1421 xdr_truncate_encode(xdr, XDR_UNIT * 2); 1422 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1423 xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); 1424 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); 1425 goto sendit; 1426 1427 err_bad_prog: 1428 dprintk("svc: unknown program %d\n", rqstp->rq_prog); 1429 serv->sv_stats->rpcbadfmt++; 1430 *rqstp->rq_accept_statp = rpc_prog_unavail; 1431 goto sendit; 1432 1433 err_bad_vers: 1434 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1435 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); 1436 1437 serv->sv_stats->rpcbadfmt++; 1438 *rqstp->rq_accept_statp = rpc_prog_mismatch; 1439 1440 /* 1441 * svc_authenticate() has already added the verifier and 1442 * advanced the stream just past rq_accept_statp. 1443 */ 1444 xdr_stream_encode_u32(xdr, process.mismatch.lovers); 1445 xdr_stream_encode_u32(xdr, process.mismatch.hivers); 1446 goto sendit; 1447 1448 err_bad_proc: 1449 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); 1450 1451 serv->sv_stats->rpcbadfmt++; 1452 *rqstp->rq_accept_statp = rpc_proc_unavail; 1453 goto sendit; 1454 1455 err_garbage_args: 1456 svc_printk(rqstp, "failed to decode RPC header\n"); 1457 1458 serv->sv_stats->rpcbadfmt++; 1459 *rqstp->rq_accept_statp = rpc_garbage_args; 1460 goto sendit; 1461 1462 err_system_err: 1463 serv->sv_stats->rpcbadfmt++; 1464 *rqstp->rq_accept_statp = rpc_system_err; 1465 goto sendit; 1466 } 1467 1468 /** 1469 * svc_process - Execute one RPC transaction 1470 * @rqstp: RPC transaction context 1471 * 1472 */ 1473 void svc_process(struct svc_rqst *rqstp) 1474 { 1475 struct kvec *resv = &rqstp->rq_res.head[0]; 1476 __be32 *p; 1477 1478 #if IS_ENABLED(CONFIG_FAIL_SUNRPC) 1479 if (!fail_sunrpc.ignore_server_disconnect && 1480 should_fail(&fail_sunrpc.attr, 1)) 1481 svc_xprt_deferred_close(rqstp->rq_xprt); 1482 #endif 1483 1484 /* 1485 * Setup response xdr_buf. 1486 * Initially it has just one page 1487 */ 1488 rqstp->rq_next_page = &rqstp->rq_respages[1]; 1489 resv->iov_base = page_address(rqstp->rq_respages[0]); 1490 resv->iov_len = 0; 1491 rqstp->rq_res.pages = rqstp->rq_next_page; 1492 rqstp->rq_res.len = 0; 1493 rqstp->rq_res.page_base = 0; 1494 rqstp->rq_res.page_len = 0; 1495 rqstp->rq_res.buflen = PAGE_SIZE; 1496 rqstp->rq_res.tail[0].iov_base = NULL; 1497 rqstp->rq_res.tail[0].iov_len = 0; 1498 1499 svcxdr_init_decode(rqstp); 1500 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); 1501 if (unlikely(!p)) 1502 goto out_drop; 1503 rqstp->rq_xid = *p++; 1504 if (unlikely(*p != rpc_call)) 1505 goto out_baddir; 1506 1507 if (!svc_process_common(rqstp)) 1508 goto out_drop; 1509 svc_send(rqstp); 1510 return; 1511 1512 out_baddir: 1513 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", 1514 be32_to_cpu(*p)); 1515 rqstp->rq_server->sv_stats->rpcbadfmt++; 1516 out_drop: 1517 svc_drop(rqstp); 1518 } 1519 EXPORT_SYMBOL_GPL(svc_process); 1520 1521 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1522 /* 1523 * Process a backchannel RPC request that arrived over an existing 1524 * outbound connection 1525 */ 1526 int 1527 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, 1528 struct svc_rqst *rqstp) 1529 { 1530 struct rpc_task *task; 1531 int proc_error; 1532 int error; 1533 1534 dprintk("svc: %s(%p)\n", __func__, req); 1535 1536 /* Build the svc_rqst used by the common processing routine */ 1537 rqstp->rq_xid = req->rq_xid; 1538 rqstp->rq_prot = req->rq_xprt->prot; 1539 rqstp->rq_server = serv; 1540 rqstp->rq_bc_net = req->rq_xprt->xprt_net; 1541 1542 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); 1543 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1544 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1545 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1546 1547 /* Adjust the argument buffer length */ 1548 rqstp->rq_arg.len = req->rq_private_buf.len; 1549 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1550 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1551 rqstp->rq_arg.page_len = 0; 1552 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + 1553 rqstp->rq_arg.page_len) 1554 rqstp->rq_arg.page_len = rqstp->rq_arg.len - 1555 rqstp->rq_arg.head[0].iov_len; 1556 else 1557 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + 1558 rqstp->rq_arg.page_len; 1559 1560 /* Reset the response buffer */ 1561 rqstp->rq_res.head[0].iov_len = 0; 1562 1563 /* 1564 * Skip the XID and calldir fields because they've already 1565 * been processed by the caller. 1566 */ 1567 svcxdr_init_decode(rqstp); 1568 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) { 1569 error = -EINVAL; 1570 goto out; 1571 } 1572 1573 /* Parse and execute the bc call */ 1574 proc_error = svc_process_common(rqstp); 1575 1576 atomic_dec(&req->rq_xprt->bc_slot_count); 1577 if (!proc_error) { 1578 /* Processing error: drop the request */ 1579 xprt_free_bc_request(req); 1580 error = -EINVAL; 1581 goto out; 1582 } 1583 /* Finally, send the reply synchronously */ 1584 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1585 task = rpc_run_bc_task(req); 1586 if (IS_ERR(task)) { 1587 error = PTR_ERR(task); 1588 goto out; 1589 } 1590 1591 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); 1592 error = task->tk_status; 1593 rpc_put_task(task); 1594 1595 out: 1596 dprintk("svc: %s(), error=%d\n", __func__, error); 1597 return error; 1598 } 1599 EXPORT_SYMBOL_GPL(bc_svc_process); 1600 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1601 1602 /** 1603 * svc_max_payload - Return transport-specific limit on the RPC payload 1604 * @rqstp: RPC transaction context 1605 * 1606 * Returns the maximum number of payload bytes the current transport 1607 * allows. 1608 */ 1609 u32 svc_max_payload(const struct svc_rqst *rqstp) 1610 { 1611 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1612 1613 if (rqstp->rq_server->sv_max_payload < max) 1614 max = rqstp->rq_server->sv_max_payload; 1615 return max; 1616 } 1617 EXPORT_SYMBOL_GPL(svc_max_payload); 1618 1619 /** 1620 * svc_proc_name - Return RPC procedure name in string form 1621 * @rqstp: svc_rqst to operate on 1622 * 1623 * Return value: 1624 * Pointer to a NUL-terminated string 1625 */ 1626 const char *svc_proc_name(const struct svc_rqst *rqstp) 1627 { 1628 if (rqstp && rqstp->rq_procinfo) 1629 return rqstp->rq_procinfo->pc_name; 1630 return "unknown"; 1631 } 1632 1633 1634 /** 1635 * svc_encode_result_payload - mark a range of bytes as a result payload 1636 * @rqstp: svc_rqst to operate on 1637 * @offset: payload's byte offset in rqstp->rq_res 1638 * @length: size of payload, in bytes 1639 * 1640 * Returns zero on success, or a negative errno if a permanent 1641 * error occurred. 1642 */ 1643 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1644 unsigned int length) 1645 { 1646 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, 1647 length); 1648 } 1649 EXPORT_SYMBOL_GPL(svc_encode_result_payload); 1650 1651 /** 1652 * svc_fill_write_vector - Construct data argument for VFS write call 1653 * @rqstp: svc_rqst to operate on 1654 * @payload: xdr_buf containing only the write data payload 1655 * 1656 * Fills in rqstp::rq_vec, and returns the number of elements. 1657 */ 1658 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, 1659 struct xdr_buf *payload) 1660 { 1661 struct page **pages = payload->pages; 1662 struct kvec *first = payload->head; 1663 struct kvec *vec = rqstp->rq_vec; 1664 size_t total = payload->len; 1665 unsigned int i; 1666 1667 /* Some types of transport can present the write payload 1668 * entirely in rq_arg.pages. In this case, @first is empty. 1669 */ 1670 i = 0; 1671 if (first->iov_len) { 1672 vec[i].iov_base = first->iov_base; 1673 vec[i].iov_len = min_t(size_t, total, first->iov_len); 1674 total -= vec[i].iov_len; 1675 ++i; 1676 } 1677 1678 while (total) { 1679 vec[i].iov_base = page_address(*pages); 1680 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE); 1681 total -= vec[i].iov_len; 1682 ++i; 1683 ++pages; 1684 } 1685 1686 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); 1687 return i; 1688 } 1689 EXPORT_SYMBOL_GPL(svc_fill_write_vector); 1690 1691 /** 1692 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call 1693 * @rqstp: svc_rqst to operate on 1694 * @first: buffer containing first section of pathname 1695 * @p: buffer containing remaining section of pathname 1696 * @total: total length of the pathname argument 1697 * 1698 * The VFS symlink API demands a NUL-terminated pathname in mapped memory. 1699 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free 1700 * the returned string. 1701 */ 1702 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, 1703 void *p, size_t total) 1704 { 1705 size_t len, remaining; 1706 char *result, *dst; 1707 1708 result = kmalloc(total + 1, GFP_KERNEL); 1709 if (!result) 1710 return ERR_PTR(-ESERVERFAULT); 1711 1712 dst = result; 1713 remaining = total; 1714 1715 len = min_t(size_t, total, first->iov_len); 1716 if (len) { 1717 memcpy(dst, first->iov_base, len); 1718 dst += len; 1719 remaining -= len; 1720 } 1721 1722 if (remaining) { 1723 len = min_t(size_t, remaining, PAGE_SIZE); 1724 memcpy(dst, p, len); 1725 dst += len; 1726 } 1727 1728 *dst = '\0'; 1729 1730 /* Sanity check: Linux doesn't allow the pathname argument to 1731 * contain a NUL byte. 1732 */ 1733 if (strlen(result) != total) { 1734 kfree(result); 1735 return ERR_PTR(-EINVAL); 1736 } 1737 return result; 1738 } 1739 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname); 1740