1 /* 2 * net/sunrpc/rpc_pipe.c 3 * 4 * Userland/kernel interface for rpcauth_gss. 5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c 6 * and fs/sysfs/inode.c 7 * 8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 #include <linux/pagemap.h> 15 #include <linux/mount.h> 16 #include <linux/namei.h> 17 #include <linux/fsnotify.h> 18 #include <linux/kernel.h> 19 #include <linux/rcupdate.h> 20 21 #include <asm/ioctls.h> 22 #include <linux/poll.h> 23 #include <linux/wait.h> 24 #include <linux/seq_file.h> 25 26 #include <linux/sunrpc/clnt.h> 27 #include <linux/workqueue.h> 28 #include <linux/sunrpc/rpc_pipe_fs.h> 29 #include <linux/sunrpc/cache.h> 30 #include <linux/nsproxy.h> 31 #include <linux/notifier.h> 32 33 #include "netns.h" 34 #include "sunrpc.h" 35 36 #define RPCDBG_FACILITY RPCDBG_DEBUG 37 38 #define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") 39 40 static struct file_system_type rpc_pipe_fs_type; 41 42 43 static struct kmem_cache *rpc_inode_cachep __read_mostly; 44 45 #define RPC_UPCALL_TIMEOUT (30*HZ) 46 47 static BLOCKING_NOTIFIER_HEAD(rpc_pipefs_notifier_list); 48 49 int rpc_pipefs_notifier_register(struct notifier_block *nb) 50 { 51 return blocking_notifier_chain_cond_register(&rpc_pipefs_notifier_list, nb); 52 } 53 EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_register); 54 55 void rpc_pipefs_notifier_unregister(struct notifier_block *nb) 56 { 57 blocking_notifier_chain_unregister(&rpc_pipefs_notifier_list, nb); 58 } 59 EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); 60 61 static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, 62 void (*destroy_msg)(struct rpc_pipe_msg *), int err) 63 { 64 struct rpc_pipe_msg *msg; 65 66 if (list_empty(head)) 67 return; 68 do { 69 msg = list_entry(head->next, struct rpc_pipe_msg, list); 70 list_del_init(&msg->list); 71 msg->errno = err; 72 destroy_msg(msg); 73 } while (!list_empty(head)); 74 75 if (waitq) 76 wake_up(waitq); 77 } 78 79 static void 80 rpc_timeout_upcall_queue(struct work_struct *work) 81 { 82 LIST_HEAD(free_list); 83 struct rpc_pipe *pipe = 84 container_of(work, struct rpc_pipe, queue_timeout.work); 85 void (*destroy_msg)(struct rpc_pipe_msg *); 86 struct dentry *dentry; 87 88 spin_lock(&pipe->lock); 89 destroy_msg = pipe->ops->destroy_msg; 90 if (pipe->nreaders == 0) { 91 list_splice_init(&pipe->pipe, &free_list); 92 pipe->pipelen = 0; 93 } 94 dentry = dget(pipe->dentry); 95 spin_unlock(&pipe->lock); 96 rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL, 97 &free_list, destroy_msg, -ETIMEDOUT); 98 dput(dentry); 99 } 100 101 ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, 102 char __user *dst, size_t buflen) 103 { 104 char *data = (char *)msg->data + msg->copied; 105 size_t mlen = min(msg->len - msg->copied, buflen); 106 unsigned long left; 107 108 left = copy_to_user(dst, data, mlen); 109 if (left == mlen) { 110 msg->errno = -EFAULT; 111 return -EFAULT; 112 } 113 114 mlen -= left; 115 msg->copied += mlen; 116 msg->errno = 0; 117 return mlen; 118 } 119 EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall); 120 121 /** 122 * rpc_queue_upcall - queue an upcall message to userspace 123 * @pipe: upcall pipe on which to queue given message 124 * @msg: message to queue 125 * 126 * Call with an @inode created by rpc_mkpipe() to queue an upcall. 127 * A userspace process may then later read the upcall by performing a 128 * read on an open file for this inode. It is up to the caller to 129 * initialize the fields of @msg (other than @msg->list) appropriately. 130 */ 131 int 132 rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) 133 { 134 int res = -EPIPE; 135 struct dentry *dentry; 136 137 spin_lock(&pipe->lock); 138 if (pipe->nreaders) { 139 list_add_tail(&msg->list, &pipe->pipe); 140 pipe->pipelen += msg->len; 141 res = 0; 142 } else if (pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { 143 if (list_empty(&pipe->pipe)) 144 queue_delayed_work(rpciod_workqueue, 145 &pipe->queue_timeout, 146 RPC_UPCALL_TIMEOUT); 147 list_add_tail(&msg->list, &pipe->pipe); 148 pipe->pipelen += msg->len; 149 res = 0; 150 } 151 dentry = dget(pipe->dentry); 152 spin_unlock(&pipe->lock); 153 if (dentry) { 154 wake_up(&RPC_I(dentry->d_inode)->waitq); 155 dput(dentry); 156 } 157 return res; 158 } 159 EXPORT_SYMBOL_GPL(rpc_queue_upcall); 160 161 static inline void 162 rpc_inode_setowner(struct inode *inode, void *private) 163 { 164 RPC_I(inode)->private = private; 165 } 166 167 static void 168 rpc_close_pipes(struct inode *inode) 169 { 170 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 171 int need_release; 172 LIST_HEAD(free_list); 173 174 mutex_lock(&inode->i_mutex); 175 spin_lock(&pipe->lock); 176 need_release = pipe->nreaders != 0 || pipe->nwriters != 0; 177 pipe->nreaders = 0; 178 list_splice_init(&pipe->in_upcall, &free_list); 179 list_splice_init(&pipe->pipe, &free_list); 180 pipe->pipelen = 0; 181 pipe->dentry = NULL; 182 spin_unlock(&pipe->lock); 183 rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EPIPE); 184 pipe->nwriters = 0; 185 if (need_release && pipe->ops->release_pipe) 186 pipe->ops->release_pipe(inode); 187 cancel_delayed_work_sync(&pipe->queue_timeout); 188 rpc_inode_setowner(inode, NULL); 189 RPC_I(inode)->pipe = NULL; 190 mutex_unlock(&inode->i_mutex); 191 } 192 193 static struct inode * 194 rpc_alloc_inode(struct super_block *sb) 195 { 196 struct rpc_inode *rpci; 197 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); 198 if (!rpci) 199 return NULL; 200 return &rpci->vfs_inode; 201 } 202 203 static void 204 rpc_i_callback(struct rcu_head *head) 205 { 206 struct inode *inode = container_of(head, struct inode, i_rcu); 207 kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); 208 } 209 210 static void 211 rpc_destroy_inode(struct inode *inode) 212 { 213 call_rcu(&inode->i_rcu, rpc_i_callback); 214 } 215 216 static int 217 rpc_pipe_open(struct inode *inode, struct file *filp) 218 { 219 struct net *net = inode->i_sb->s_fs_info; 220 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 221 struct rpc_pipe *pipe; 222 int first_open; 223 int res = -ENXIO; 224 225 mutex_lock(&inode->i_mutex); 226 sn->gssd_running = 1; 227 pipe = RPC_I(inode)->pipe; 228 if (pipe == NULL) 229 goto out; 230 first_open = pipe->nreaders == 0 && pipe->nwriters == 0; 231 if (first_open && pipe->ops->open_pipe) { 232 res = pipe->ops->open_pipe(inode); 233 if (res) 234 goto out; 235 } 236 if (filp->f_mode & FMODE_READ) 237 pipe->nreaders++; 238 if (filp->f_mode & FMODE_WRITE) 239 pipe->nwriters++; 240 res = 0; 241 out: 242 mutex_unlock(&inode->i_mutex); 243 return res; 244 } 245 246 static int 247 rpc_pipe_release(struct inode *inode, struct file *filp) 248 { 249 struct rpc_pipe *pipe; 250 struct rpc_pipe_msg *msg; 251 int last_close; 252 253 mutex_lock(&inode->i_mutex); 254 pipe = RPC_I(inode)->pipe; 255 if (pipe == NULL) 256 goto out; 257 msg = filp->private_data; 258 if (msg != NULL) { 259 spin_lock(&pipe->lock); 260 msg->errno = -EAGAIN; 261 list_del_init(&msg->list); 262 spin_unlock(&pipe->lock); 263 pipe->ops->destroy_msg(msg); 264 } 265 if (filp->f_mode & FMODE_WRITE) 266 pipe->nwriters --; 267 if (filp->f_mode & FMODE_READ) { 268 pipe->nreaders --; 269 if (pipe->nreaders == 0) { 270 LIST_HEAD(free_list); 271 spin_lock(&pipe->lock); 272 list_splice_init(&pipe->pipe, &free_list); 273 pipe->pipelen = 0; 274 spin_unlock(&pipe->lock); 275 rpc_purge_list(&RPC_I(inode)->waitq, &free_list, 276 pipe->ops->destroy_msg, -EAGAIN); 277 } 278 } 279 last_close = pipe->nwriters == 0 && pipe->nreaders == 0; 280 if (last_close && pipe->ops->release_pipe) 281 pipe->ops->release_pipe(inode); 282 out: 283 mutex_unlock(&inode->i_mutex); 284 return 0; 285 } 286 287 static ssize_t 288 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) 289 { 290 struct inode *inode = file_inode(filp); 291 struct rpc_pipe *pipe; 292 struct rpc_pipe_msg *msg; 293 int res = 0; 294 295 mutex_lock(&inode->i_mutex); 296 pipe = RPC_I(inode)->pipe; 297 if (pipe == NULL) { 298 res = -EPIPE; 299 goto out_unlock; 300 } 301 msg = filp->private_data; 302 if (msg == NULL) { 303 spin_lock(&pipe->lock); 304 if (!list_empty(&pipe->pipe)) { 305 msg = list_entry(pipe->pipe.next, 306 struct rpc_pipe_msg, 307 list); 308 list_move(&msg->list, &pipe->in_upcall); 309 pipe->pipelen -= msg->len; 310 filp->private_data = msg; 311 msg->copied = 0; 312 } 313 spin_unlock(&pipe->lock); 314 if (msg == NULL) 315 goto out_unlock; 316 } 317 /* NOTE: it is up to the callback to update msg->copied */ 318 res = pipe->ops->upcall(filp, msg, buf, len); 319 if (res < 0 || msg->len == msg->copied) { 320 filp->private_data = NULL; 321 spin_lock(&pipe->lock); 322 list_del_init(&msg->list); 323 spin_unlock(&pipe->lock); 324 pipe->ops->destroy_msg(msg); 325 } 326 out_unlock: 327 mutex_unlock(&inode->i_mutex); 328 return res; 329 } 330 331 static ssize_t 332 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) 333 { 334 struct inode *inode = file_inode(filp); 335 int res; 336 337 mutex_lock(&inode->i_mutex); 338 res = -EPIPE; 339 if (RPC_I(inode)->pipe != NULL) 340 res = RPC_I(inode)->pipe->ops->downcall(filp, buf, len); 341 mutex_unlock(&inode->i_mutex); 342 return res; 343 } 344 345 static unsigned int 346 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) 347 { 348 struct inode *inode = file_inode(filp); 349 struct rpc_inode *rpci = RPC_I(inode); 350 unsigned int mask = POLLOUT | POLLWRNORM; 351 352 poll_wait(filp, &rpci->waitq, wait); 353 354 mutex_lock(&inode->i_mutex); 355 if (rpci->pipe == NULL) 356 mask |= POLLERR | POLLHUP; 357 else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) 358 mask |= POLLIN | POLLRDNORM; 359 mutex_unlock(&inode->i_mutex); 360 return mask; 361 } 362 363 static long 364 rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 365 { 366 struct inode *inode = file_inode(filp); 367 struct rpc_pipe *pipe; 368 int len; 369 370 switch (cmd) { 371 case FIONREAD: 372 mutex_lock(&inode->i_mutex); 373 pipe = RPC_I(inode)->pipe; 374 if (pipe == NULL) { 375 mutex_unlock(&inode->i_mutex); 376 return -EPIPE; 377 } 378 spin_lock(&pipe->lock); 379 len = pipe->pipelen; 380 if (filp->private_data) { 381 struct rpc_pipe_msg *msg; 382 msg = filp->private_data; 383 len += msg->len - msg->copied; 384 } 385 spin_unlock(&pipe->lock); 386 mutex_unlock(&inode->i_mutex); 387 return put_user(len, (int __user *)arg); 388 default: 389 return -EINVAL; 390 } 391 } 392 393 static const struct file_operations rpc_pipe_fops = { 394 .owner = THIS_MODULE, 395 .llseek = no_llseek, 396 .read = rpc_pipe_read, 397 .write = rpc_pipe_write, 398 .poll = rpc_pipe_poll, 399 .unlocked_ioctl = rpc_pipe_ioctl, 400 .open = rpc_pipe_open, 401 .release = rpc_pipe_release, 402 }; 403 404 static int 405 rpc_show_info(struct seq_file *m, void *v) 406 { 407 struct rpc_clnt *clnt = m->private; 408 409 rcu_read_lock(); 410 seq_printf(m, "RPC server: %s\n", 411 rcu_dereference(clnt->cl_xprt)->servername); 412 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 413 clnt->cl_prog, clnt->cl_vers); 414 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 415 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); 416 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT)); 417 rcu_read_unlock(); 418 return 0; 419 } 420 421 static int 422 rpc_info_open(struct inode *inode, struct file *file) 423 { 424 struct rpc_clnt *clnt = NULL; 425 int ret = single_open(file, rpc_show_info, NULL); 426 427 if (!ret) { 428 struct seq_file *m = file->private_data; 429 430 spin_lock(&file->f_path.dentry->d_lock); 431 if (!d_unhashed(file->f_path.dentry)) 432 clnt = RPC_I(inode)->private; 433 if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) { 434 spin_unlock(&file->f_path.dentry->d_lock); 435 m->private = clnt; 436 } else { 437 spin_unlock(&file->f_path.dentry->d_lock); 438 single_release(inode, file); 439 ret = -EINVAL; 440 } 441 } 442 return ret; 443 } 444 445 static int 446 rpc_info_release(struct inode *inode, struct file *file) 447 { 448 struct seq_file *m = file->private_data; 449 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private; 450 451 if (clnt) 452 rpc_release_client(clnt); 453 return single_release(inode, file); 454 } 455 456 static const struct file_operations rpc_info_operations = { 457 .owner = THIS_MODULE, 458 .open = rpc_info_open, 459 .read = seq_read, 460 .llseek = seq_lseek, 461 .release = rpc_info_release, 462 }; 463 464 465 /* 466 * Description of fs contents. 467 */ 468 struct rpc_filelist { 469 const char *name; 470 const struct file_operations *i_fop; 471 umode_t mode; 472 }; 473 474 static int rpc_delete_dentry(const struct dentry *dentry) 475 { 476 return 1; 477 } 478 479 static const struct dentry_operations rpc_dentry_operations = { 480 .d_delete = rpc_delete_dentry, 481 }; 482 483 /* 484 * Lookup the data. This is trivial - if the dentry didn't already 485 * exist, we know it is negative. 486 */ 487 static struct dentry * 488 rpc_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 489 { 490 if (dentry->d_name.len > NAME_MAX) 491 return ERR_PTR(-ENAMETOOLONG); 492 d_add(dentry, NULL); 493 return NULL; 494 } 495 496 static const struct inode_operations rpc_dir_inode_operations = { 497 .lookup = rpc_lookup, 498 }; 499 500 static struct inode * 501 rpc_get_inode(struct super_block *sb, umode_t mode) 502 { 503 struct inode *inode = new_inode(sb); 504 if (!inode) 505 return NULL; 506 inode->i_ino = get_next_ino(); 507 inode->i_mode = mode; 508 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 509 switch (mode & S_IFMT) { 510 case S_IFDIR: 511 inode->i_fop = &simple_dir_operations; 512 inode->i_op = &rpc_dir_inode_operations; 513 inc_nlink(inode); 514 default: 515 break; 516 } 517 return inode; 518 } 519 520 static int __rpc_create_common(struct inode *dir, struct dentry *dentry, 521 umode_t mode, 522 const struct file_operations *i_fop, 523 void *private) 524 { 525 struct inode *inode; 526 527 d_drop(dentry); 528 inode = rpc_get_inode(dir->i_sb, mode); 529 if (!inode) 530 goto out_err; 531 inode->i_ino = iunique(dir->i_sb, 100); 532 if (i_fop) 533 inode->i_fop = i_fop; 534 if (private) 535 rpc_inode_setowner(inode, private); 536 d_add(dentry, inode); 537 return 0; 538 out_err: 539 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 540 __FILE__, __func__, dentry->d_name.name); 541 dput(dentry); 542 return -ENOMEM; 543 } 544 545 static int __rpc_create(struct inode *dir, struct dentry *dentry, 546 umode_t mode, 547 const struct file_operations *i_fop, 548 void *private) 549 { 550 int err; 551 552 err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private); 553 if (err) 554 return err; 555 fsnotify_create(dir, dentry); 556 return 0; 557 } 558 559 static int __rpc_mkdir(struct inode *dir, struct dentry *dentry, 560 umode_t mode, 561 const struct file_operations *i_fop, 562 void *private) 563 { 564 int err; 565 566 err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private); 567 if (err) 568 return err; 569 inc_nlink(dir); 570 fsnotify_mkdir(dir, dentry); 571 return 0; 572 } 573 574 static void 575 init_pipe(struct rpc_pipe *pipe) 576 { 577 pipe->nreaders = 0; 578 pipe->nwriters = 0; 579 INIT_LIST_HEAD(&pipe->in_upcall); 580 INIT_LIST_HEAD(&pipe->in_downcall); 581 INIT_LIST_HEAD(&pipe->pipe); 582 pipe->pipelen = 0; 583 INIT_DELAYED_WORK(&pipe->queue_timeout, 584 rpc_timeout_upcall_queue); 585 pipe->ops = NULL; 586 spin_lock_init(&pipe->lock); 587 pipe->dentry = NULL; 588 } 589 590 void rpc_destroy_pipe_data(struct rpc_pipe *pipe) 591 { 592 kfree(pipe); 593 } 594 EXPORT_SYMBOL_GPL(rpc_destroy_pipe_data); 595 596 struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags) 597 { 598 struct rpc_pipe *pipe; 599 600 pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL); 601 if (!pipe) 602 return ERR_PTR(-ENOMEM); 603 init_pipe(pipe); 604 pipe->ops = ops; 605 pipe->flags = flags; 606 return pipe; 607 } 608 EXPORT_SYMBOL_GPL(rpc_mkpipe_data); 609 610 static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry, 611 umode_t mode, 612 const struct file_operations *i_fop, 613 void *private, 614 struct rpc_pipe *pipe) 615 { 616 struct rpc_inode *rpci; 617 int err; 618 619 err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); 620 if (err) 621 return err; 622 rpci = RPC_I(dentry->d_inode); 623 rpci->private = private; 624 rpci->pipe = pipe; 625 fsnotify_create(dir, dentry); 626 return 0; 627 } 628 629 static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) 630 { 631 int ret; 632 633 dget(dentry); 634 ret = simple_rmdir(dir, dentry); 635 d_delete(dentry); 636 dput(dentry); 637 return ret; 638 } 639 640 int rpc_rmdir(struct dentry *dentry) 641 { 642 struct dentry *parent; 643 struct inode *dir; 644 int error; 645 646 parent = dget_parent(dentry); 647 dir = parent->d_inode; 648 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 649 error = __rpc_rmdir(dir, dentry); 650 mutex_unlock(&dir->i_mutex); 651 dput(parent); 652 return error; 653 } 654 EXPORT_SYMBOL_GPL(rpc_rmdir); 655 656 static int __rpc_unlink(struct inode *dir, struct dentry *dentry) 657 { 658 int ret; 659 660 dget(dentry); 661 ret = simple_unlink(dir, dentry); 662 d_delete(dentry); 663 dput(dentry); 664 return ret; 665 } 666 667 static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry) 668 { 669 struct inode *inode = dentry->d_inode; 670 671 rpc_close_pipes(inode); 672 return __rpc_unlink(dir, dentry); 673 } 674 675 static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, 676 const char *name) 677 { 678 struct qstr q = QSTR_INIT(name, strlen(name)); 679 struct dentry *dentry = d_hash_and_lookup(parent, &q); 680 if (!dentry) { 681 dentry = d_alloc(parent, &q); 682 if (!dentry) 683 return ERR_PTR(-ENOMEM); 684 } 685 if (dentry->d_inode == NULL) 686 return dentry; 687 dput(dentry); 688 return ERR_PTR(-EEXIST); 689 } 690 691 /* 692 * FIXME: This probably has races. 693 */ 694 static void __rpc_depopulate(struct dentry *parent, 695 const struct rpc_filelist *files, 696 int start, int eof) 697 { 698 struct inode *dir = parent->d_inode; 699 struct dentry *dentry; 700 struct qstr name; 701 int i; 702 703 for (i = start; i < eof; i++) { 704 name.name = files[i].name; 705 name.len = strlen(files[i].name); 706 dentry = d_hash_and_lookup(parent, &name); 707 708 if (dentry == NULL) 709 continue; 710 if (dentry->d_inode == NULL) 711 goto next; 712 switch (dentry->d_inode->i_mode & S_IFMT) { 713 default: 714 BUG(); 715 case S_IFREG: 716 __rpc_unlink(dir, dentry); 717 break; 718 case S_IFDIR: 719 __rpc_rmdir(dir, dentry); 720 } 721 next: 722 dput(dentry); 723 } 724 } 725 726 static void rpc_depopulate(struct dentry *parent, 727 const struct rpc_filelist *files, 728 int start, int eof) 729 { 730 struct inode *dir = parent->d_inode; 731 732 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); 733 __rpc_depopulate(parent, files, start, eof); 734 mutex_unlock(&dir->i_mutex); 735 } 736 737 static int rpc_populate(struct dentry *parent, 738 const struct rpc_filelist *files, 739 int start, int eof, 740 void *private) 741 { 742 struct inode *dir = parent->d_inode; 743 struct dentry *dentry; 744 int i, err; 745 746 mutex_lock(&dir->i_mutex); 747 for (i = start; i < eof; i++) { 748 dentry = __rpc_lookup_create_exclusive(parent, files[i].name); 749 err = PTR_ERR(dentry); 750 if (IS_ERR(dentry)) 751 goto out_bad; 752 switch (files[i].mode & S_IFMT) { 753 default: 754 BUG(); 755 case S_IFREG: 756 err = __rpc_create(dir, dentry, 757 files[i].mode, 758 files[i].i_fop, 759 private); 760 break; 761 case S_IFDIR: 762 err = __rpc_mkdir(dir, dentry, 763 files[i].mode, 764 NULL, 765 private); 766 } 767 if (err != 0) 768 goto out_bad; 769 } 770 mutex_unlock(&dir->i_mutex); 771 return 0; 772 out_bad: 773 __rpc_depopulate(parent, files, start, eof); 774 mutex_unlock(&dir->i_mutex); 775 printk(KERN_WARNING "%s: %s failed to populate directory %s\n", 776 __FILE__, __func__, parent->d_name.name); 777 return err; 778 } 779 780 static struct dentry *rpc_mkdir_populate(struct dentry *parent, 781 const char *name, umode_t mode, void *private, 782 int (*populate)(struct dentry *, void *), void *args_populate) 783 { 784 struct dentry *dentry; 785 struct inode *dir = parent->d_inode; 786 int error; 787 788 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 789 dentry = __rpc_lookup_create_exclusive(parent, name); 790 if (IS_ERR(dentry)) 791 goto out; 792 error = __rpc_mkdir(dir, dentry, mode, NULL, private); 793 if (error != 0) 794 goto out_err; 795 if (populate != NULL) { 796 error = populate(dentry, args_populate); 797 if (error) 798 goto err_rmdir; 799 } 800 out: 801 mutex_unlock(&dir->i_mutex); 802 return dentry; 803 err_rmdir: 804 __rpc_rmdir(dir, dentry); 805 out_err: 806 dentry = ERR_PTR(error); 807 goto out; 808 } 809 810 static int rpc_rmdir_depopulate(struct dentry *dentry, 811 void (*depopulate)(struct dentry *)) 812 { 813 struct dentry *parent; 814 struct inode *dir; 815 int error; 816 817 parent = dget_parent(dentry); 818 dir = parent->d_inode; 819 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 820 if (depopulate != NULL) 821 depopulate(dentry); 822 error = __rpc_rmdir(dir, dentry); 823 mutex_unlock(&dir->i_mutex); 824 dput(parent); 825 return error; 826 } 827 828 /** 829 * rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication 830 * @parent: dentry of directory to create new "pipe" in 831 * @name: name of pipe 832 * @private: private data to associate with the pipe, for the caller's use 833 * @pipe: &rpc_pipe containing input parameters 834 * 835 * Data is made available for userspace to read by calls to 836 * rpc_queue_upcall(). The actual reads will result in calls to 837 * @ops->upcall, which will be called with the file pointer, 838 * message, and userspace buffer to copy to. 839 * 840 * Writes can come at any time, and do not necessarily have to be 841 * responses to upcalls. They will result in calls to @msg->downcall. 842 * 843 * The @private argument passed here will be available to all these methods 844 * from the file pointer, via RPC_I(file_inode(file))->private. 845 */ 846 struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name, 847 void *private, struct rpc_pipe *pipe) 848 { 849 struct dentry *dentry; 850 struct inode *dir = parent->d_inode; 851 umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR; 852 int err; 853 854 if (pipe->ops->upcall == NULL) 855 umode &= ~S_IRUGO; 856 if (pipe->ops->downcall == NULL) 857 umode &= ~S_IWUGO; 858 859 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 860 dentry = __rpc_lookup_create_exclusive(parent, name); 861 if (IS_ERR(dentry)) 862 goto out; 863 err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops, 864 private, pipe); 865 if (err) 866 goto out_err; 867 out: 868 mutex_unlock(&dir->i_mutex); 869 return dentry; 870 out_err: 871 dentry = ERR_PTR(err); 872 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", 873 __FILE__, __func__, parent->d_name.name, name, 874 err); 875 goto out; 876 } 877 EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry); 878 879 /** 880 * rpc_unlink - remove a pipe 881 * @dentry: dentry for the pipe, as returned from rpc_mkpipe 882 * 883 * After this call, lookups will no longer find the pipe, and any 884 * attempts to read or write using preexisting opens of the pipe will 885 * return -EPIPE. 886 */ 887 int 888 rpc_unlink(struct dentry *dentry) 889 { 890 struct dentry *parent; 891 struct inode *dir; 892 int error = 0; 893 894 parent = dget_parent(dentry); 895 dir = parent->d_inode; 896 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 897 error = __rpc_rmpipe(dir, dentry); 898 mutex_unlock(&dir->i_mutex); 899 dput(parent); 900 return error; 901 } 902 EXPORT_SYMBOL_GPL(rpc_unlink); 903 904 enum { 905 RPCAUTH_info, 906 RPCAUTH_EOF 907 }; 908 909 static const struct rpc_filelist authfiles[] = { 910 [RPCAUTH_info] = { 911 .name = "info", 912 .i_fop = &rpc_info_operations, 913 .mode = S_IFREG | S_IRUSR, 914 }, 915 }; 916 917 static int rpc_clntdir_populate(struct dentry *dentry, void *private) 918 { 919 return rpc_populate(dentry, 920 authfiles, RPCAUTH_info, RPCAUTH_EOF, 921 private); 922 } 923 924 static void rpc_clntdir_depopulate(struct dentry *dentry) 925 { 926 rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF); 927 } 928 929 /** 930 * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs 931 * @dentry: the parent of new directory 932 * @name: the name of new directory 933 * @rpc_client: rpc client to associate with this directory 934 * 935 * This creates a directory at the given @path associated with 936 * @rpc_clnt, which will contain a file named "info" with some basic 937 * information about the client, together with any "pipes" that may 938 * later be created using rpc_mkpipe(). 939 */ 940 struct dentry *rpc_create_client_dir(struct dentry *dentry, 941 const char *name, 942 struct rpc_clnt *rpc_client) 943 { 944 return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL, 945 rpc_clntdir_populate, rpc_client); 946 } 947 948 /** 949 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir() 950 * @dentry: dentry for the pipe 951 */ 952 int rpc_remove_client_dir(struct dentry *dentry) 953 { 954 return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate); 955 } 956 957 static const struct rpc_filelist cache_pipefs_files[3] = { 958 [0] = { 959 .name = "channel", 960 .i_fop = &cache_file_operations_pipefs, 961 .mode = S_IFREG|S_IRUSR|S_IWUSR, 962 }, 963 [1] = { 964 .name = "content", 965 .i_fop = &content_file_operations_pipefs, 966 .mode = S_IFREG|S_IRUSR, 967 }, 968 [2] = { 969 .name = "flush", 970 .i_fop = &cache_flush_operations_pipefs, 971 .mode = S_IFREG|S_IRUSR|S_IWUSR, 972 }, 973 }; 974 975 static int rpc_cachedir_populate(struct dentry *dentry, void *private) 976 { 977 return rpc_populate(dentry, 978 cache_pipefs_files, 0, 3, 979 private); 980 } 981 982 static void rpc_cachedir_depopulate(struct dentry *dentry) 983 { 984 rpc_depopulate(dentry, cache_pipefs_files, 0, 3); 985 } 986 987 struct dentry *rpc_create_cache_dir(struct dentry *parent, const char *name, 988 umode_t umode, struct cache_detail *cd) 989 { 990 return rpc_mkdir_populate(parent, name, umode, NULL, 991 rpc_cachedir_populate, cd); 992 } 993 994 void rpc_remove_cache_dir(struct dentry *dentry) 995 { 996 rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate); 997 } 998 999 /* 1000 * populate the filesystem 1001 */ 1002 static const struct super_operations s_ops = { 1003 .alloc_inode = rpc_alloc_inode, 1004 .destroy_inode = rpc_destroy_inode, 1005 .statfs = simple_statfs, 1006 }; 1007 1008 #define RPCAUTH_GSSMAGIC 0x67596969 1009 1010 /* 1011 * We have a single directory with 1 node in it. 1012 */ 1013 enum { 1014 RPCAUTH_lockd, 1015 RPCAUTH_mount, 1016 RPCAUTH_nfs, 1017 RPCAUTH_portmap, 1018 RPCAUTH_statd, 1019 RPCAUTH_nfsd4_cb, 1020 RPCAUTH_cache, 1021 RPCAUTH_nfsd, 1022 RPCAUTH_RootEOF 1023 }; 1024 1025 static const struct rpc_filelist files[] = { 1026 [RPCAUTH_lockd] = { 1027 .name = "lockd", 1028 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1029 }, 1030 [RPCAUTH_mount] = { 1031 .name = "mount", 1032 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1033 }, 1034 [RPCAUTH_nfs] = { 1035 .name = "nfs", 1036 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1037 }, 1038 [RPCAUTH_portmap] = { 1039 .name = "portmap", 1040 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1041 }, 1042 [RPCAUTH_statd] = { 1043 .name = "statd", 1044 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1045 }, 1046 [RPCAUTH_nfsd4_cb] = { 1047 .name = "nfsd4_cb", 1048 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1049 }, 1050 [RPCAUTH_cache] = { 1051 .name = "cache", 1052 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1053 }, 1054 [RPCAUTH_nfsd] = { 1055 .name = "nfsd", 1056 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1057 }, 1058 }; 1059 1060 /* 1061 * This call can be used only in RPC pipefs mount notification hooks. 1062 */ 1063 struct dentry *rpc_d_lookup_sb(const struct super_block *sb, 1064 const unsigned char *dir_name) 1065 { 1066 struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name)); 1067 return d_hash_and_lookup(sb->s_root, &dir); 1068 } 1069 EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); 1070 1071 void rpc_pipefs_init_net(struct net *net) 1072 { 1073 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1074 1075 mutex_init(&sn->pipefs_sb_lock); 1076 sn->gssd_running = 1; 1077 sn->pipe_version = -1; 1078 } 1079 1080 /* 1081 * This call will be used for per network namespace operations calls. 1082 * Note: Function will be returned with pipefs_sb_lock taken if superblock was 1083 * found. This lock have to be released by rpc_put_sb_net() when all operations 1084 * will be completed. 1085 */ 1086 struct super_block *rpc_get_sb_net(const struct net *net) 1087 { 1088 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1089 1090 mutex_lock(&sn->pipefs_sb_lock); 1091 if (sn->pipefs_sb) 1092 return sn->pipefs_sb; 1093 mutex_unlock(&sn->pipefs_sb_lock); 1094 return NULL; 1095 } 1096 EXPORT_SYMBOL_GPL(rpc_get_sb_net); 1097 1098 void rpc_put_sb_net(const struct net *net) 1099 { 1100 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1101 1102 WARN_ON(sn->pipefs_sb == NULL); 1103 mutex_unlock(&sn->pipefs_sb_lock); 1104 } 1105 EXPORT_SYMBOL_GPL(rpc_put_sb_net); 1106 1107 static int 1108 rpc_fill_super(struct super_block *sb, void *data, int silent) 1109 { 1110 struct inode *inode; 1111 struct dentry *root; 1112 struct net *net = data; 1113 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1114 int err; 1115 1116 sb->s_blocksize = PAGE_CACHE_SIZE; 1117 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1118 sb->s_magic = RPCAUTH_GSSMAGIC; 1119 sb->s_op = &s_ops; 1120 sb->s_d_op = &rpc_dentry_operations; 1121 sb->s_time_gran = 1; 1122 1123 inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO); 1124 sb->s_root = root = d_make_root(inode); 1125 if (!root) 1126 return -ENOMEM; 1127 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1128 return -ENOMEM; 1129 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", 1130 net, NET_NAME(net)); 1131 mutex_lock(&sn->pipefs_sb_lock); 1132 sn->pipefs_sb = sb; 1133 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1134 RPC_PIPEFS_MOUNT, 1135 sb); 1136 if (err) 1137 goto err_depopulate; 1138 sb->s_fs_info = get_net(net); 1139 mutex_unlock(&sn->pipefs_sb_lock); 1140 return 0; 1141 1142 err_depopulate: 1143 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1144 RPC_PIPEFS_UMOUNT, 1145 sb); 1146 sn->pipefs_sb = NULL; 1147 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); 1148 mutex_unlock(&sn->pipefs_sb_lock); 1149 return err; 1150 } 1151 1152 static struct dentry * 1153 rpc_mount(struct file_system_type *fs_type, 1154 int flags, const char *dev_name, void *data) 1155 { 1156 return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super); 1157 } 1158 1159 static void rpc_kill_sb(struct super_block *sb) 1160 { 1161 struct net *net = sb->s_fs_info; 1162 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1163 1164 mutex_lock(&sn->pipefs_sb_lock); 1165 if (sn->pipefs_sb != sb) { 1166 mutex_unlock(&sn->pipefs_sb_lock); 1167 goto out; 1168 } 1169 sn->pipefs_sb = NULL; 1170 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", 1171 net, NET_NAME(net)); 1172 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1173 RPC_PIPEFS_UMOUNT, 1174 sb); 1175 mutex_unlock(&sn->pipefs_sb_lock); 1176 put_net(net); 1177 out: 1178 kill_litter_super(sb); 1179 } 1180 1181 static struct file_system_type rpc_pipe_fs_type = { 1182 .owner = THIS_MODULE, 1183 .name = "rpc_pipefs", 1184 .mount = rpc_mount, 1185 .kill_sb = rpc_kill_sb, 1186 }; 1187 MODULE_ALIAS_FS("rpc_pipefs"); 1188 MODULE_ALIAS("rpc_pipefs"); 1189 1190 static void 1191 init_once(void *foo) 1192 { 1193 struct rpc_inode *rpci = (struct rpc_inode *) foo; 1194 1195 inode_init_once(&rpci->vfs_inode); 1196 rpci->private = NULL; 1197 rpci->pipe = NULL; 1198 init_waitqueue_head(&rpci->waitq); 1199 } 1200 1201 int register_rpc_pipefs(void) 1202 { 1203 int err; 1204 1205 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 1206 sizeof(struct rpc_inode), 1207 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 1208 SLAB_MEM_SPREAD), 1209 init_once); 1210 if (!rpc_inode_cachep) 1211 return -ENOMEM; 1212 err = rpc_clients_notifier_register(); 1213 if (err) 1214 goto err_notifier; 1215 err = register_filesystem(&rpc_pipe_fs_type); 1216 if (err) 1217 goto err_register; 1218 return 0; 1219 1220 err_register: 1221 rpc_clients_notifier_unregister(); 1222 err_notifier: 1223 kmem_cache_destroy(rpc_inode_cachep); 1224 return err; 1225 } 1226 1227 void unregister_rpc_pipefs(void) 1228 { 1229 rpc_clients_notifier_unregister(); 1230 kmem_cache_destroy(rpc_inode_cachep); 1231 unregister_filesystem(&rpc_pipe_fs_type); 1232 } 1233