1 /* 2 * net/sunrpc/rpc_pipe.c 3 * 4 * Userland/kernel interface for rpcauth_gss. 5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c 6 * and fs/sysfs/inode.c 7 * 8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 #include <linux/pagemap.h> 15 #include <linux/mount.h> 16 #include <linux/namei.h> 17 #include <linux/fsnotify.h> 18 #include <linux/kernel.h> 19 #include <linux/rcupdate.h> 20 21 #include <asm/ioctls.h> 22 #include <linux/poll.h> 23 #include <linux/wait.h> 24 #include <linux/seq_file.h> 25 26 #include <linux/sunrpc/clnt.h> 27 #include <linux/workqueue.h> 28 #include <linux/sunrpc/rpc_pipe_fs.h> 29 #include <linux/sunrpc/cache.h> 30 #include <linux/nsproxy.h> 31 #include <linux/notifier.h> 32 33 #include "netns.h" 34 #include "sunrpc.h" 35 36 #define RPCDBG_FACILITY RPCDBG_DEBUG 37 38 #define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") 39 40 static struct file_system_type rpc_pipe_fs_type; 41 42 43 static struct kmem_cache *rpc_inode_cachep __read_mostly; 44 45 #define RPC_UPCALL_TIMEOUT (30*HZ) 46 47 static BLOCKING_NOTIFIER_HEAD(rpc_pipefs_notifier_list); 48 49 int rpc_pipefs_notifier_register(struct notifier_block *nb) 50 { 51 return blocking_notifier_chain_cond_register(&rpc_pipefs_notifier_list, nb); 52 } 53 EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_register); 54 55 void rpc_pipefs_notifier_unregister(struct notifier_block *nb) 56 { 57 blocking_notifier_chain_unregister(&rpc_pipefs_notifier_list, nb); 58 } 59 EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); 60 61 static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, 62 void (*destroy_msg)(struct rpc_pipe_msg *), int err) 63 { 64 struct rpc_pipe_msg *msg; 65 66 if (list_empty(head)) 67 return; 68 do { 69 msg = list_entry(head->next, struct rpc_pipe_msg, list); 70 list_del_init(&msg->list); 71 msg->errno = err; 72 destroy_msg(msg); 73 } while (!list_empty(head)); 74 wake_up(waitq); 75 } 76 77 static void 78 rpc_timeout_upcall_queue(struct work_struct *work) 79 { 80 LIST_HEAD(free_list); 81 struct rpc_pipe *pipe = 82 container_of(work, struct rpc_pipe, queue_timeout.work); 83 void (*destroy_msg)(struct rpc_pipe_msg *); 84 struct dentry *dentry; 85 86 spin_lock(&pipe->lock); 87 destroy_msg = pipe->ops->destroy_msg; 88 if (pipe->nreaders == 0) { 89 list_splice_init(&pipe->pipe, &free_list); 90 pipe->pipelen = 0; 91 } 92 dentry = dget(pipe->dentry); 93 spin_unlock(&pipe->lock); 94 if (dentry) { 95 rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, 96 &free_list, destroy_msg, -ETIMEDOUT); 97 dput(dentry); 98 } 99 } 100 101 ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, 102 char __user *dst, size_t buflen) 103 { 104 char *data = (char *)msg->data + msg->copied; 105 size_t mlen = min(msg->len - msg->copied, buflen); 106 unsigned long left; 107 108 left = copy_to_user(dst, data, mlen); 109 if (left == mlen) { 110 msg->errno = -EFAULT; 111 return -EFAULT; 112 } 113 114 mlen -= left; 115 msg->copied += mlen; 116 msg->errno = 0; 117 return mlen; 118 } 119 EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall); 120 121 /** 122 * rpc_queue_upcall - queue an upcall message to userspace 123 * @inode: inode of upcall pipe on which to queue given message 124 * @msg: message to queue 125 * 126 * Call with an @inode created by rpc_mkpipe() to queue an upcall. 127 * A userspace process may then later read the upcall by performing a 128 * read on an open file for this inode. It is up to the caller to 129 * initialize the fields of @msg (other than @msg->list) appropriately. 130 */ 131 int 132 rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) 133 { 134 int res = -EPIPE; 135 struct dentry *dentry; 136 137 spin_lock(&pipe->lock); 138 if (pipe->nreaders) { 139 list_add_tail(&msg->list, &pipe->pipe); 140 pipe->pipelen += msg->len; 141 res = 0; 142 } else if (pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { 143 if (list_empty(&pipe->pipe)) 144 queue_delayed_work(rpciod_workqueue, 145 &pipe->queue_timeout, 146 RPC_UPCALL_TIMEOUT); 147 list_add_tail(&msg->list, &pipe->pipe); 148 pipe->pipelen += msg->len; 149 res = 0; 150 } 151 dentry = dget(pipe->dentry); 152 spin_unlock(&pipe->lock); 153 if (dentry) { 154 wake_up(&RPC_I(dentry->d_inode)->waitq); 155 dput(dentry); 156 } 157 return res; 158 } 159 EXPORT_SYMBOL_GPL(rpc_queue_upcall); 160 161 static inline void 162 rpc_inode_setowner(struct inode *inode, void *private) 163 { 164 RPC_I(inode)->private = private; 165 } 166 167 static void 168 rpc_close_pipes(struct inode *inode) 169 { 170 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 171 int need_release; 172 LIST_HEAD(free_list); 173 174 mutex_lock(&inode->i_mutex); 175 spin_lock(&pipe->lock); 176 need_release = pipe->nreaders != 0 || pipe->nwriters != 0; 177 pipe->nreaders = 0; 178 list_splice_init(&pipe->in_upcall, &free_list); 179 list_splice_init(&pipe->pipe, &free_list); 180 pipe->pipelen = 0; 181 pipe->dentry = NULL; 182 spin_unlock(&pipe->lock); 183 rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EPIPE); 184 pipe->nwriters = 0; 185 if (need_release && pipe->ops->release_pipe) 186 pipe->ops->release_pipe(inode); 187 cancel_delayed_work_sync(&pipe->queue_timeout); 188 rpc_inode_setowner(inode, NULL); 189 RPC_I(inode)->pipe = NULL; 190 mutex_unlock(&inode->i_mutex); 191 } 192 193 static struct inode * 194 rpc_alloc_inode(struct super_block *sb) 195 { 196 struct rpc_inode *rpci; 197 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); 198 if (!rpci) 199 return NULL; 200 return &rpci->vfs_inode; 201 } 202 203 static void 204 rpc_i_callback(struct rcu_head *head) 205 { 206 struct inode *inode = container_of(head, struct inode, i_rcu); 207 kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); 208 } 209 210 static void 211 rpc_destroy_inode(struct inode *inode) 212 { 213 call_rcu(&inode->i_rcu, rpc_i_callback); 214 } 215 216 static int 217 rpc_pipe_open(struct inode *inode, struct file *filp) 218 { 219 struct rpc_pipe *pipe; 220 int first_open; 221 int res = -ENXIO; 222 223 mutex_lock(&inode->i_mutex); 224 pipe = RPC_I(inode)->pipe; 225 if (pipe == NULL) 226 goto out; 227 first_open = pipe->nreaders == 0 && pipe->nwriters == 0; 228 if (first_open && pipe->ops->open_pipe) { 229 res = pipe->ops->open_pipe(inode); 230 if (res) 231 goto out; 232 } 233 if (filp->f_mode & FMODE_READ) 234 pipe->nreaders++; 235 if (filp->f_mode & FMODE_WRITE) 236 pipe->nwriters++; 237 res = 0; 238 out: 239 mutex_unlock(&inode->i_mutex); 240 return res; 241 } 242 243 static int 244 rpc_pipe_release(struct inode *inode, struct file *filp) 245 { 246 struct rpc_pipe *pipe; 247 struct rpc_pipe_msg *msg; 248 int last_close; 249 250 mutex_lock(&inode->i_mutex); 251 pipe = RPC_I(inode)->pipe; 252 if (pipe == NULL) 253 goto out; 254 msg = filp->private_data; 255 if (msg != NULL) { 256 spin_lock(&pipe->lock); 257 msg->errno = -EAGAIN; 258 list_del_init(&msg->list); 259 spin_unlock(&pipe->lock); 260 pipe->ops->destroy_msg(msg); 261 } 262 if (filp->f_mode & FMODE_WRITE) 263 pipe->nwriters --; 264 if (filp->f_mode & FMODE_READ) { 265 pipe->nreaders --; 266 if (pipe->nreaders == 0) { 267 LIST_HEAD(free_list); 268 spin_lock(&pipe->lock); 269 list_splice_init(&pipe->pipe, &free_list); 270 pipe->pipelen = 0; 271 spin_unlock(&pipe->lock); 272 rpc_purge_list(&RPC_I(inode)->waitq, &free_list, 273 pipe->ops->destroy_msg, -EAGAIN); 274 } 275 } 276 last_close = pipe->nwriters == 0 && pipe->nreaders == 0; 277 if (last_close && pipe->ops->release_pipe) 278 pipe->ops->release_pipe(inode); 279 out: 280 mutex_unlock(&inode->i_mutex); 281 return 0; 282 } 283 284 static ssize_t 285 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) 286 { 287 struct inode *inode = filp->f_path.dentry->d_inode; 288 struct rpc_pipe *pipe; 289 struct rpc_pipe_msg *msg; 290 int res = 0; 291 292 mutex_lock(&inode->i_mutex); 293 pipe = RPC_I(inode)->pipe; 294 if (pipe == NULL) { 295 res = -EPIPE; 296 goto out_unlock; 297 } 298 msg = filp->private_data; 299 if (msg == NULL) { 300 spin_lock(&pipe->lock); 301 if (!list_empty(&pipe->pipe)) { 302 msg = list_entry(pipe->pipe.next, 303 struct rpc_pipe_msg, 304 list); 305 list_move(&msg->list, &pipe->in_upcall); 306 pipe->pipelen -= msg->len; 307 filp->private_data = msg; 308 msg->copied = 0; 309 } 310 spin_unlock(&pipe->lock); 311 if (msg == NULL) 312 goto out_unlock; 313 } 314 /* NOTE: it is up to the callback to update msg->copied */ 315 res = pipe->ops->upcall(filp, msg, buf, len); 316 if (res < 0 || msg->len == msg->copied) { 317 filp->private_data = NULL; 318 spin_lock(&pipe->lock); 319 list_del_init(&msg->list); 320 spin_unlock(&pipe->lock); 321 pipe->ops->destroy_msg(msg); 322 } 323 out_unlock: 324 mutex_unlock(&inode->i_mutex); 325 return res; 326 } 327 328 static ssize_t 329 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) 330 { 331 struct inode *inode = filp->f_path.dentry->d_inode; 332 int res; 333 334 mutex_lock(&inode->i_mutex); 335 res = -EPIPE; 336 if (RPC_I(inode)->pipe != NULL) 337 res = RPC_I(inode)->pipe->ops->downcall(filp, buf, len); 338 mutex_unlock(&inode->i_mutex); 339 return res; 340 } 341 342 static unsigned int 343 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) 344 { 345 struct inode *inode = filp->f_path.dentry->d_inode; 346 struct rpc_inode *rpci = RPC_I(inode); 347 unsigned int mask = POLLOUT | POLLWRNORM; 348 349 poll_wait(filp, &rpci->waitq, wait); 350 351 mutex_lock(&inode->i_mutex); 352 if (rpci->pipe == NULL) 353 mask |= POLLERR | POLLHUP; 354 else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) 355 mask |= POLLIN | POLLRDNORM; 356 mutex_unlock(&inode->i_mutex); 357 return mask; 358 } 359 360 static long 361 rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 362 { 363 struct inode *inode = filp->f_path.dentry->d_inode; 364 struct rpc_pipe *pipe; 365 int len; 366 367 switch (cmd) { 368 case FIONREAD: 369 mutex_lock(&inode->i_mutex); 370 pipe = RPC_I(inode)->pipe; 371 if (pipe == NULL) { 372 mutex_unlock(&inode->i_mutex); 373 return -EPIPE; 374 } 375 spin_lock(&pipe->lock); 376 len = pipe->pipelen; 377 if (filp->private_data) { 378 struct rpc_pipe_msg *msg; 379 msg = filp->private_data; 380 len += msg->len - msg->copied; 381 } 382 spin_unlock(&pipe->lock); 383 mutex_unlock(&inode->i_mutex); 384 return put_user(len, (int __user *)arg); 385 default: 386 return -EINVAL; 387 } 388 } 389 390 static const struct file_operations rpc_pipe_fops = { 391 .owner = THIS_MODULE, 392 .llseek = no_llseek, 393 .read = rpc_pipe_read, 394 .write = rpc_pipe_write, 395 .poll = rpc_pipe_poll, 396 .unlocked_ioctl = rpc_pipe_ioctl, 397 .open = rpc_pipe_open, 398 .release = rpc_pipe_release, 399 }; 400 401 static int 402 rpc_show_info(struct seq_file *m, void *v) 403 { 404 struct rpc_clnt *clnt = m->private; 405 406 rcu_read_lock(); 407 seq_printf(m, "RPC server: %s\n", 408 rcu_dereference(clnt->cl_xprt)->servername); 409 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 410 clnt->cl_prog, clnt->cl_vers); 411 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 412 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); 413 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT)); 414 rcu_read_unlock(); 415 return 0; 416 } 417 418 static int 419 rpc_info_open(struct inode *inode, struct file *file) 420 { 421 struct rpc_clnt *clnt = NULL; 422 int ret = single_open(file, rpc_show_info, NULL); 423 424 if (!ret) { 425 struct seq_file *m = file->private_data; 426 427 spin_lock(&file->f_path.dentry->d_lock); 428 if (!d_unhashed(file->f_path.dentry)) 429 clnt = RPC_I(inode)->private; 430 if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) { 431 spin_unlock(&file->f_path.dentry->d_lock); 432 m->private = clnt; 433 } else { 434 spin_unlock(&file->f_path.dentry->d_lock); 435 single_release(inode, file); 436 ret = -EINVAL; 437 } 438 } 439 return ret; 440 } 441 442 static int 443 rpc_info_release(struct inode *inode, struct file *file) 444 { 445 struct seq_file *m = file->private_data; 446 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private; 447 448 if (clnt) 449 rpc_release_client(clnt); 450 return single_release(inode, file); 451 } 452 453 static const struct file_operations rpc_info_operations = { 454 .owner = THIS_MODULE, 455 .open = rpc_info_open, 456 .read = seq_read, 457 .llseek = seq_lseek, 458 .release = rpc_info_release, 459 }; 460 461 462 /* 463 * Description of fs contents. 464 */ 465 struct rpc_filelist { 466 const char *name; 467 const struct file_operations *i_fop; 468 umode_t mode; 469 }; 470 471 static int rpc_delete_dentry(const struct dentry *dentry) 472 { 473 return 1; 474 } 475 476 static const struct dentry_operations rpc_dentry_operations = { 477 .d_delete = rpc_delete_dentry, 478 }; 479 480 static struct inode * 481 rpc_get_inode(struct super_block *sb, umode_t mode) 482 { 483 struct inode *inode = new_inode(sb); 484 if (!inode) 485 return NULL; 486 inode->i_ino = get_next_ino(); 487 inode->i_mode = mode; 488 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 489 switch (mode & S_IFMT) { 490 case S_IFDIR: 491 inode->i_fop = &simple_dir_operations; 492 inode->i_op = &simple_dir_inode_operations; 493 inc_nlink(inode); 494 default: 495 break; 496 } 497 return inode; 498 } 499 500 static int __rpc_create_common(struct inode *dir, struct dentry *dentry, 501 umode_t mode, 502 const struct file_operations *i_fop, 503 void *private) 504 { 505 struct inode *inode; 506 507 d_drop(dentry); 508 inode = rpc_get_inode(dir->i_sb, mode); 509 if (!inode) 510 goto out_err; 511 inode->i_ino = iunique(dir->i_sb, 100); 512 if (i_fop) 513 inode->i_fop = i_fop; 514 if (private) 515 rpc_inode_setowner(inode, private); 516 d_add(dentry, inode); 517 return 0; 518 out_err: 519 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 520 __FILE__, __func__, dentry->d_name.name); 521 dput(dentry); 522 return -ENOMEM; 523 } 524 525 static int __rpc_create(struct inode *dir, struct dentry *dentry, 526 umode_t mode, 527 const struct file_operations *i_fop, 528 void *private) 529 { 530 int err; 531 532 err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private); 533 if (err) 534 return err; 535 fsnotify_create(dir, dentry); 536 return 0; 537 } 538 539 static int __rpc_mkdir(struct inode *dir, struct dentry *dentry, 540 umode_t mode, 541 const struct file_operations *i_fop, 542 void *private) 543 { 544 int err; 545 546 err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private); 547 if (err) 548 return err; 549 inc_nlink(dir); 550 fsnotify_mkdir(dir, dentry); 551 return 0; 552 } 553 554 static void 555 init_pipe(struct rpc_pipe *pipe) 556 { 557 pipe->nreaders = 0; 558 pipe->nwriters = 0; 559 INIT_LIST_HEAD(&pipe->in_upcall); 560 INIT_LIST_HEAD(&pipe->in_downcall); 561 INIT_LIST_HEAD(&pipe->pipe); 562 pipe->pipelen = 0; 563 INIT_DELAYED_WORK(&pipe->queue_timeout, 564 rpc_timeout_upcall_queue); 565 pipe->ops = NULL; 566 spin_lock_init(&pipe->lock); 567 pipe->dentry = NULL; 568 } 569 570 void rpc_destroy_pipe_data(struct rpc_pipe *pipe) 571 { 572 kfree(pipe); 573 } 574 EXPORT_SYMBOL_GPL(rpc_destroy_pipe_data); 575 576 struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags) 577 { 578 struct rpc_pipe *pipe; 579 580 pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL); 581 if (!pipe) 582 return ERR_PTR(-ENOMEM); 583 init_pipe(pipe); 584 pipe->ops = ops; 585 pipe->flags = flags; 586 return pipe; 587 } 588 EXPORT_SYMBOL_GPL(rpc_mkpipe_data); 589 590 static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry, 591 umode_t mode, 592 const struct file_operations *i_fop, 593 void *private, 594 struct rpc_pipe *pipe) 595 { 596 struct rpc_inode *rpci; 597 int err; 598 599 err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); 600 if (err) 601 return err; 602 rpci = RPC_I(dentry->d_inode); 603 rpci->private = private; 604 rpci->pipe = pipe; 605 fsnotify_create(dir, dentry); 606 return 0; 607 } 608 609 static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) 610 { 611 int ret; 612 613 dget(dentry); 614 ret = simple_rmdir(dir, dentry); 615 d_delete(dentry); 616 dput(dentry); 617 return ret; 618 } 619 620 int rpc_rmdir(struct dentry *dentry) 621 { 622 struct dentry *parent; 623 struct inode *dir; 624 int error; 625 626 parent = dget_parent(dentry); 627 dir = parent->d_inode; 628 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 629 error = __rpc_rmdir(dir, dentry); 630 mutex_unlock(&dir->i_mutex); 631 dput(parent); 632 return error; 633 } 634 EXPORT_SYMBOL_GPL(rpc_rmdir); 635 636 static int __rpc_unlink(struct inode *dir, struct dentry *dentry) 637 { 638 int ret; 639 640 dget(dentry); 641 ret = simple_unlink(dir, dentry); 642 d_delete(dentry); 643 dput(dentry); 644 return ret; 645 } 646 647 static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry) 648 { 649 struct inode *inode = dentry->d_inode; 650 651 rpc_close_pipes(inode); 652 return __rpc_unlink(dir, dentry); 653 } 654 655 static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, 656 struct qstr *name) 657 { 658 struct dentry *dentry; 659 660 dentry = d_lookup(parent, name); 661 if (!dentry) { 662 dentry = d_alloc(parent, name); 663 if (!dentry) 664 return ERR_PTR(-ENOMEM); 665 } 666 if (dentry->d_inode == NULL) { 667 d_set_d_op(dentry, &rpc_dentry_operations); 668 return dentry; 669 } 670 dput(dentry); 671 return ERR_PTR(-EEXIST); 672 } 673 674 /* 675 * FIXME: This probably has races. 676 */ 677 static void __rpc_depopulate(struct dentry *parent, 678 const struct rpc_filelist *files, 679 int start, int eof) 680 { 681 struct inode *dir = parent->d_inode; 682 struct dentry *dentry; 683 struct qstr name; 684 int i; 685 686 for (i = start; i < eof; i++) { 687 name.name = files[i].name; 688 name.len = strlen(files[i].name); 689 name.hash = full_name_hash(name.name, name.len); 690 dentry = d_lookup(parent, &name); 691 692 if (dentry == NULL) 693 continue; 694 if (dentry->d_inode == NULL) 695 goto next; 696 switch (dentry->d_inode->i_mode & S_IFMT) { 697 default: 698 BUG(); 699 case S_IFREG: 700 __rpc_unlink(dir, dentry); 701 break; 702 case S_IFDIR: 703 __rpc_rmdir(dir, dentry); 704 } 705 next: 706 dput(dentry); 707 } 708 } 709 710 static void rpc_depopulate(struct dentry *parent, 711 const struct rpc_filelist *files, 712 int start, int eof) 713 { 714 struct inode *dir = parent->d_inode; 715 716 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); 717 __rpc_depopulate(parent, files, start, eof); 718 mutex_unlock(&dir->i_mutex); 719 } 720 721 static int rpc_populate(struct dentry *parent, 722 const struct rpc_filelist *files, 723 int start, int eof, 724 void *private) 725 { 726 struct inode *dir = parent->d_inode; 727 struct dentry *dentry; 728 int i, err; 729 730 mutex_lock(&dir->i_mutex); 731 for (i = start; i < eof; i++) { 732 struct qstr q; 733 734 q.name = files[i].name; 735 q.len = strlen(files[i].name); 736 q.hash = full_name_hash(q.name, q.len); 737 dentry = __rpc_lookup_create_exclusive(parent, &q); 738 err = PTR_ERR(dentry); 739 if (IS_ERR(dentry)) 740 goto out_bad; 741 switch (files[i].mode & S_IFMT) { 742 default: 743 BUG(); 744 case S_IFREG: 745 err = __rpc_create(dir, dentry, 746 files[i].mode, 747 files[i].i_fop, 748 private); 749 break; 750 case S_IFDIR: 751 err = __rpc_mkdir(dir, dentry, 752 files[i].mode, 753 NULL, 754 private); 755 } 756 if (err != 0) 757 goto out_bad; 758 } 759 mutex_unlock(&dir->i_mutex); 760 return 0; 761 out_bad: 762 __rpc_depopulate(parent, files, start, eof); 763 mutex_unlock(&dir->i_mutex); 764 printk(KERN_WARNING "%s: %s failed to populate directory %s\n", 765 __FILE__, __func__, parent->d_name.name); 766 return err; 767 } 768 769 static struct dentry *rpc_mkdir_populate(struct dentry *parent, 770 struct qstr *name, umode_t mode, void *private, 771 int (*populate)(struct dentry *, void *), void *args_populate) 772 { 773 struct dentry *dentry; 774 struct inode *dir = parent->d_inode; 775 int error; 776 777 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 778 dentry = __rpc_lookup_create_exclusive(parent, name); 779 if (IS_ERR(dentry)) 780 goto out; 781 error = __rpc_mkdir(dir, dentry, mode, NULL, private); 782 if (error != 0) 783 goto out_err; 784 if (populate != NULL) { 785 error = populate(dentry, args_populate); 786 if (error) 787 goto err_rmdir; 788 } 789 out: 790 mutex_unlock(&dir->i_mutex); 791 return dentry; 792 err_rmdir: 793 __rpc_rmdir(dir, dentry); 794 out_err: 795 dentry = ERR_PTR(error); 796 goto out; 797 } 798 799 static int rpc_rmdir_depopulate(struct dentry *dentry, 800 void (*depopulate)(struct dentry *)) 801 { 802 struct dentry *parent; 803 struct inode *dir; 804 int error; 805 806 parent = dget_parent(dentry); 807 dir = parent->d_inode; 808 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 809 if (depopulate != NULL) 810 depopulate(dentry); 811 error = __rpc_rmdir(dir, dentry); 812 mutex_unlock(&dir->i_mutex); 813 dput(parent); 814 return error; 815 } 816 817 /** 818 * rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication 819 * @parent: dentry of directory to create new "pipe" in 820 * @name: name of pipe 821 * @private: private data to associate with the pipe, for the caller's use 822 * @ops: operations defining the behavior of the pipe: upcall, downcall, 823 * release_pipe, open_pipe, and destroy_msg. 824 * @flags: rpc_pipe flags 825 * 826 * Data is made available for userspace to read by calls to 827 * rpc_queue_upcall(). The actual reads will result in calls to 828 * @ops->upcall, which will be called with the file pointer, 829 * message, and userspace buffer to copy to. 830 * 831 * Writes can come at any time, and do not necessarily have to be 832 * responses to upcalls. They will result in calls to @msg->downcall. 833 * 834 * The @private argument passed here will be available to all these methods 835 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. 836 */ 837 struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name, 838 void *private, struct rpc_pipe *pipe) 839 { 840 struct dentry *dentry; 841 struct inode *dir = parent->d_inode; 842 umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR; 843 struct qstr q; 844 int err; 845 846 if (pipe->ops->upcall == NULL) 847 umode &= ~S_IRUGO; 848 if (pipe->ops->downcall == NULL) 849 umode &= ~S_IWUGO; 850 851 q.name = name; 852 q.len = strlen(name); 853 q.hash = full_name_hash(q.name, q.len), 854 855 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 856 dentry = __rpc_lookup_create_exclusive(parent, &q); 857 if (IS_ERR(dentry)) 858 goto out; 859 err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops, 860 private, pipe); 861 if (err) 862 goto out_err; 863 out: 864 mutex_unlock(&dir->i_mutex); 865 return dentry; 866 out_err: 867 dentry = ERR_PTR(err); 868 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", 869 __FILE__, __func__, parent->d_name.name, name, 870 err); 871 goto out; 872 } 873 EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry); 874 875 /** 876 * rpc_unlink - remove a pipe 877 * @dentry: dentry for the pipe, as returned from rpc_mkpipe 878 * 879 * After this call, lookups will no longer find the pipe, and any 880 * attempts to read or write using preexisting opens of the pipe will 881 * return -EPIPE. 882 */ 883 int 884 rpc_unlink(struct dentry *dentry) 885 { 886 struct dentry *parent; 887 struct inode *dir; 888 int error = 0; 889 890 parent = dget_parent(dentry); 891 dir = parent->d_inode; 892 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 893 error = __rpc_rmpipe(dir, dentry); 894 mutex_unlock(&dir->i_mutex); 895 dput(parent); 896 return error; 897 } 898 EXPORT_SYMBOL_GPL(rpc_unlink); 899 900 enum { 901 RPCAUTH_info, 902 RPCAUTH_EOF 903 }; 904 905 static const struct rpc_filelist authfiles[] = { 906 [RPCAUTH_info] = { 907 .name = "info", 908 .i_fop = &rpc_info_operations, 909 .mode = S_IFREG | S_IRUSR, 910 }, 911 }; 912 913 static int rpc_clntdir_populate(struct dentry *dentry, void *private) 914 { 915 return rpc_populate(dentry, 916 authfiles, RPCAUTH_info, RPCAUTH_EOF, 917 private); 918 } 919 920 static void rpc_clntdir_depopulate(struct dentry *dentry) 921 { 922 rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF); 923 } 924 925 /** 926 * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs 927 * @dentry: dentry from the rpc_pipefs root to the new directory 928 * @name: &struct qstr for the name 929 * @rpc_client: rpc client to associate with this directory 930 * 931 * This creates a directory at the given @path associated with 932 * @rpc_clnt, which will contain a file named "info" with some basic 933 * information about the client, together with any "pipes" that may 934 * later be created using rpc_mkpipe(). 935 */ 936 struct dentry *rpc_create_client_dir(struct dentry *dentry, 937 struct qstr *name, 938 struct rpc_clnt *rpc_client) 939 { 940 return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL, 941 rpc_clntdir_populate, rpc_client); 942 } 943 944 /** 945 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir() 946 * @clnt: rpc client 947 */ 948 int rpc_remove_client_dir(struct dentry *dentry) 949 { 950 return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate); 951 } 952 953 static const struct rpc_filelist cache_pipefs_files[3] = { 954 [0] = { 955 .name = "channel", 956 .i_fop = &cache_file_operations_pipefs, 957 .mode = S_IFREG|S_IRUSR|S_IWUSR, 958 }, 959 [1] = { 960 .name = "content", 961 .i_fop = &content_file_operations_pipefs, 962 .mode = S_IFREG|S_IRUSR, 963 }, 964 [2] = { 965 .name = "flush", 966 .i_fop = &cache_flush_operations_pipefs, 967 .mode = S_IFREG|S_IRUSR|S_IWUSR, 968 }, 969 }; 970 971 static int rpc_cachedir_populate(struct dentry *dentry, void *private) 972 { 973 return rpc_populate(dentry, 974 cache_pipefs_files, 0, 3, 975 private); 976 } 977 978 static void rpc_cachedir_depopulate(struct dentry *dentry) 979 { 980 rpc_depopulate(dentry, cache_pipefs_files, 0, 3); 981 } 982 983 struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name, 984 umode_t umode, struct cache_detail *cd) 985 { 986 return rpc_mkdir_populate(parent, name, umode, NULL, 987 rpc_cachedir_populate, cd); 988 } 989 990 void rpc_remove_cache_dir(struct dentry *dentry) 991 { 992 rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate); 993 } 994 995 /* 996 * populate the filesystem 997 */ 998 static const struct super_operations s_ops = { 999 .alloc_inode = rpc_alloc_inode, 1000 .destroy_inode = rpc_destroy_inode, 1001 .statfs = simple_statfs, 1002 }; 1003 1004 #define RPCAUTH_GSSMAGIC 0x67596969 1005 1006 /* 1007 * We have a single directory with 1 node in it. 1008 */ 1009 enum { 1010 RPCAUTH_lockd, 1011 RPCAUTH_mount, 1012 RPCAUTH_nfs, 1013 RPCAUTH_portmap, 1014 RPCAUTH_statd, 1015 RPCAUTH_nfsd4_cb, 1016 RPCAUTH_cache, 1017 RPCAUTH_nfsd, 1018 RPCAUTH_RootEOF 1019 }; 1020 1021 static const struct rpc_filelist files[] = { 1022 [RPCAUTH_lockd] = { 1023 .name = "lockd", 1024 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1025 }, 1026 [RPCAUTH_mount] = { 1027 .name = "mount", 1028 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1029 }, 1030 [RPCAUTH_nfs] = { 1031 .name = "nfs", 1032 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1033 }, 1034 [RPCAUTH_portmap] = { 1035 .name = "portmap", 1036 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1037 }, 1038 [RPCAUTH_statd] = { 1039 .name = "statd", 1040 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1041 }, 1042 [RPCAUTH_nfsd4_cb] = { 1043 .name = "nfsd4_cb", 1044 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1045 }, 1046 [RPCAUTH_cache] = { 1047 .name = "cache", 1048 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1049 }, 1050 [RPCAUTH_nfsd] = { 1051 .name = "nfsd", 1052 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1053 }, 1054 }; 1055 1056 /* 1057 * This call can be used only in RPC pipefs mount notification hooks. 1058 */ 1059 struct dentry *rpc_d_lookup_sb(const struct super_block *sb, 1060 const unsigned char *dir_name) 1061 { 1062 struct qstr dir = { 1063 .name = dir_name, 1064 .len = strlen(dir_name), 1065 .hash = full_name_hash(dir_name, strlen(dir_name)), 1066 }; 1067 1068 return d_lookup(sb->s_root, &dir); 1069 } 1070 EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); 1071 1072 void rpc_pipefs_init_net(struct net *net) 1073 { 1074 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1075 1076 mutex_init(&sn->pipefs_sb_lock); 1077 } 1078 1079 /* 1080 * This call will be used for per network namespace operations calls. 1081 * Note: Function will be returned with pipefs_sb_lock taken if superblock was 1082 * found. This lock have to be released by rpc_put_sb_net() when all operations 1083 * will be completed. 1084 */ 1085 struct super_block *rpc_get_sb_net(const struct net *net) 1086 { 1087 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1088 1089 mutex_lock(&sn->pipefs_sb_lock); 1090 if (sn->pipefs_sb) 1091 return sn->pipefs_sb; 1092 mutex_unlock(&sn->pipefs_sb_lock); 1093 return NULL; 1094 } 1095 EXPORT_SYMBOL_GPL(rpc_get_sb_net); 1096 1097 void rpc_put_sb_net(const struct net *net) 1098 { 1099 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1100 1101 BUG_ON(sn->pipefs_sb == NULL); 1102 mutex_unlock(&sn->pipefs_sb_lock); 1103 } 1104 EXPORT_SYMBOL_GPL(rpc_put_sb_net); 1105 1106 static int 1107 rpc_fill_super(struct super_block *sb, void *data, int silent) 1108 { 1109 struct inode *inode; 1110 struct dentry *root; 1111 struct net *net = data; 1112 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1113 int err; 1114 1115 sb->s_blocksize = PAGE_CACHE_SIZE; 1116 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1117 sb->s_magic = RPCAUTH_GSSMAGIC; 1118 sb->s_op = &s_ops; 1119 sb->s_time_gran = 1; 1120 1121 inode = rpc_get_inode(sb, S_IFDIR | 0755); 1122 sb->s_root = root = d_make_root(inode); 1123 if (!root) 1124 return -ENOMEM; 1125 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1126 return -ENOMEM; 1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, 1128 NET_NAME(net)); 1129 sn->pipefs_sb = sb; 1130 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1131 RPC_PIPEFS_MOUNT, 1132 sb); 1133 if (err) 1134 goto err_depopulate; 1135 sb->s_fs_info = get_net(net); 1136 return 0; 1137 1138 err_depopulate: 1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1140 RPC_PIPEFS_UMOUNT, 1141 sb); 1142 sn->pipefs_sb = NULL; 1143 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); 1144 return err; 1145 } 1146 1147 static struct dentry * 1148 rpc_mount(struct file_system_type *fs_type, 1149 int flags, const char *dev_name, void *data) 1150 { 1151 return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super); 1152 } 1153 1154 static void rpc_kill_sb(struct super_block *sb) 1155 { 1156 struct net *net = sb->s_fs_info; 1157 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1158 1159 mutex_lock(&sn->pipefs_sb_lock); 1160 sn->pipefs_sb = NULL; 1161 mutex_unlock(&sn->pipefs_sb_lock); 1162 put_net(net); 1163 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net, 1164 NET_NAME(net)); 1165 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1166 RPC_PIPEFS_UMOUNT, 1167 sb); 1168 kill_litter_super(sb); 1169 } 1170 1171 static struct file_system_type rpc_pipe_fs_type = { 1172 .owner = THIS_MODULE, 1173 .name = "rpc_pipefs", 1174 .mount = rpc_mount, 1175 .kill_sb = rpc_kill_sb, 1176 }; 1177 1178 static void 1179 init_once(void *foo) 1180 { 1181 struct rpc_inode *rpci = (struct rpc_inode *) foo; 1182 1183 inode_init_once(&rpci->vfs_inode); 1184 rpci->private = NULL; 1185 rpci->pipe = NULL; 1186 init_waitqueue_head(&rpci->waitq); 1187 } 1188 1189 int register_rpc_pipefs(void) 1190 { 1191 int err; 1192 1193 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 1194 sizeof(struct rpc_inode), 1195 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 1196 SLAB_MEM_SPREAD), 1197 init_once); 1198 if (!rpc_inode_cachep) 1199 return -ENOMEM; 1200 err = rpc_clients_notifier_register(); 1201 if (err) 1202 goto err_notifier; 1203 err = register_filesystem(&rpc_pipe_fs_type); 1204 if (err) 1205 goto err_register; 1206 return 0; 1207 1208 err_register: 1209 rpc_clients_notifier_unregister(); 1210 err_notifier: 1211 kmem_cache_destroy(rpc_inode_cachep); 1212 return err; 1213 } 1214 1215 void unregister_rpc_pipefs(void) 1216 { 1217 rpc_clients_notifier_unregister(); 1218 kmem_cache_destroy(rpc_inode_cachep); 1219 unregister_filesystem(&rpc_pipe_fs_type); 1220 } 1221 1222 /* Make 'mount -t rpc_pipefs ...' autoload this module. */ 1223 MODULE_ALIAS("rpc_pipefs"); 1224