1 /* 2 * net/sunrpc/rpc_pipe.c 3 * 4 * Userland/kernel interface for rpcauth_gss. 5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c 6 * and fs/sysfs/inode.c 7 * 8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 #include <linux/pagemap.h> 15 #include <linux/mount.h> 16 #include <linux/namei.h> 17 #include <linux/fsnotify.h> 18 #include <linux/kernel.h> 19 20 #include <asm/ioctls.h> 21 #include <linux/fs.h> 22 #include <linux/poll.h> 23 #include <linux/wait.h> 24 #include <linux/seq_file.h> 25 26 #include <linux/sunrpc/clnt.h> 27 #include <linux/workqueue.h> 28 #include <linux/sunrpc/rpc_pipe_fs.h> 29 30 static struct vfsmount *rpc_mount __read_mostly; 31 static int rpc_mount_count; 32 33 static struct file_system_type rpc_pipe_fs_type; 34 35 36 static struct kmem_cache *rpc_inode_cachep __read_mostly; 37 38 #define RPC_UPCALL_TIMEOUT (30*HZ) 39 40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, 41 void (*destroy_msg)(struct rpc_pipe_msg *), int err) 42 { 43 struct rpc_pipe_msg *msg; 44 45 if (list_empty(head)) 46 return; 47 do { 48 msg = list_entry(head->next, struct rpc_pipe_msg, list); 49 list_del(&msg->list); 50 msg->errno = err; 51 destroy_msg(msg); 52 } while (!list_empty(head)); 53 wake_up(&rpci->waitq); 54 } 55 56 static void 57 rpc_timeout_upcall_queue(struct work_struct *work) 58 { 59 LIST_HEAD(free_list); 60 struct rpc_inode *rpci = 61 container_of(work, struct rpc_inode, queue_timeout.work); 62 struct inode *inode = &rpci->vfs_inode; 63 void (*destroy_msg)(struct rpc_pipe_msg *); 64 65 spin_lock(&inode->i_lock); 66 if (rpci->ops == NULL) { 67 spin_unlock(&inode->i_lock); 68 return; 69 } 70 destroy_msg = rpci->ops->destroy_msg; 71 if (rpci->nreaders == 0) { 72 list_splice_init(&rpci->pipe, &free_list); 73 rpci->pipelen = 0; 74 } 75 spin_unlock(&inode->i_lock); 76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); 77 } 78 79 /** 80 * rpc_queue_upcall 81 * @inode: inode of upcall pipe on which to queue given message 82 * @msg: message to queue 83 * 84 * Call with an @inode created by rpc_mkpipe() to queue an upcall. 85 * A userspace process may then later read the upcall by performing a 86 * read on an open file for this inode. It is up to the caller to 87 * initialize the fields of @msg (other than @msg->list) appropriately. 88 */ 89 int 90 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) 91 { 92 struct rpc_inode *rpci = RPC_I(inode); 93 int res = -EPIPE; 94 95 spin_lock(&inode->i_lock); 96 if (rpci->ops == NULL) 97 goto out; 98 if (rpci->nreaders) { 99 list_add_tail(&msg->list, &rpci->pipe); 100 rpci->pipelen += msg->len; 101 res = 0; 102 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { 103 if (list_empty(&rpci->pipe)) 104 queue_delayed_work(rpciod_workqueue, 105 &rpci->queue_timeout, 106 RPC_UPCALL_TIMEOUT); 107 list_add_tail(&msg->list, &rpci->pipe); 108 rpci->pipelen += msg->len; 109 res = 0; 110 } 111 out: 112 spin_unlock(&inode->i_lock); 113 wake_up(&rpci->waitq); 114 return res; 115 } 116 EXPORT_SYMBOL(rpc_queue_upcall); 117 118 static inline void 119 rpc_inode_setowner(struct inode *inode, void *private) 120 { 121 RPC_I(inode)->private = private; 122 } 123 124 static void 125 rpc_close_pipes(struct inode *inode) 126 { 127 struct rpc_inode *rpci = RPC_I(inode); 128 struct rpc_pipe_ops *ops; 129 130 mutex_lock(&inode->i_mutex); 131 ops = rpci->ops; 132 if (ops != NULL) { 133 LIST_HEAD(free_list); 134 135 spin_lock(&inode->i_lock); 136 rpci->nreaders = 0; 137 list_splice_init(&rpci->in_upcall, &free_list); 138 list_splice_init(&rpci->pipe, &free_list); 139 rpci->pipelen = 0; 140 rpci->ops = NULL; 141 spin_unlock(&inode->i_lock); 142 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); 143 rpci->nwriters = 0; 144 if (ops->release_pipe) 145 ops->release_pipe(inode); 146 cancel_delayed_work_sync(&rpci->queue_timeout); 147 } 148 rpc_inode_setowner(inode, NULL); 149 mutex_unlock(&inode->i_mutex); 150 } 151 152 static struct inode * 153 rpc_alloc_inode(struct super_block *sb) 154 { 155 struct rpc_inode *rpci; 156 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); 157 if (!rpci) 158 return NULL; 159 return &rpci->vfs_inode; 160 } 161 162 static void 163 rpc_destroy_inode(struct inode *inode) 164 { 165 kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); 166 } 167 168 static int 169 rpc_pipe_open(struct inode *inode, struct file *filp) 170 { 171 struct rpc_inode *rpci = RPC_I(inode); 172 int res = -ENXIO; 173 174 mutex_lock(&inode->i_mutex); 175 if (rpci->ops != NULL) { 176 if (filp->f_mode & FMODE_READ) 177 rpci->nreaders ++; 178 if (filp->f_mode & FMODE_WRITE) 179 rpci->nwriters ++; 180 res = 0; 181 } 182 mutex_unlock(&inode->i_mutex); 183 return res; 184 } 185 186 static int 187 rpc_pipe_release(struct inode *inode, struct file *filp) 188 { 189 struct rpc_inode *rpci = RPC_I(inode); 190 struct rpc_pipe_msg *msg; 191 192 mutex_lock(&inode->i_mutex); 193 if (rpci->ops == NULL) 194 goto out; 195 msg = (struct rpc_pipe_msg *)filp->private_data; 196 if (msg != NULL) { 197 spin_lock(&inode->i_lock); 198 msg->errno = -EAGAIN; 199 list_del(&msg->list); 200 spin_unlock(&inode->i_lock); 201 rpci->ops->destroy_msg(msg); 202 } 203 if (filp->f_mode & FMODE_WRITE) 204 rpci->nwriters --; 205 if (filp->f_mode & FMODE_READ) { 206 rpci->nreaders --; 207 if (rpci->nreaders == 0) { 208 LIST_HEAD(free_list); 209 spin_lock(&inode->i_lock); 210 list_splice_init(&rpci->pipe, &free_list); 211 rpci->pipelen = 0; 212 spin_unlock(&inode->i_lock); 213 rpc_purge_list(rpci, &free_list, 214 rpci->ops->destroy_msg, -EAGAIN); 215 } 216 } 217 if (rpci->ops->release_pipe) 218 rpci->ops->release_pipe(inode); 219 out: 220 mutex_unlock(&inode->i_mutex); 221 return 0; 222 } 223 224 static ssize_t 225 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) 226 { 227 struct inode *inode = filp->f_path.dentry->d_inode; 228 struct rpc_inode *rpci = RPC_I(inode); 229 struct rpc_pipe_msg *msg; 230 int res = 0; 231 232 mutex_lock(&inode->i_mutex); 233 if (rpci->ops == NULL) { 234 res = -EPIPE; 235 goto out_unlock; 236 } 237 msg = filp->private_data; 238 if (msg == NULL) { 239 spin_lock(&inode->i_lock); 240 if (!list_empty(&rpci->pipe)) { 241 msg = list_entry(rpci->pipe.next, 242 struct rpc_pipe_msg, 243 list); 244 list_move(&msg->list, &rpci->in_upcall); 245 rpci->pipelen -= msg->len; 246 filp->private_data = msg; 247 msg->copied = 0; 248 } 249 spin_unlock(&inode->i_lock); 250 if (msg == NULL) 251 goto out_unlock; 252 } 253 /* NOTE: it is up to the callback to update msg->copied */ 254 res = rpci->ops->upcall(filp, msg, buf, len); 255 if (res < 0 || msg->len == msg->copied) { 256 filp->private_data = NULL; 257 spin_lock(&inode->i_lock); 258 list_del(&msg->list); 259 spin_unlock(&inode->i_lock); 260 rpci->ops->destroy_msg(msg); 261 } 262 out_unlock: 263 mutex_unlock(&inode->i_mutex); 264 return res; 265 } 266 267 static ssize_t 268 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) 269 { 270 struct inode *inode = filp->f_path.dentry->d_inode; 271 struct rpc_inode *rpci = RPC_I(inode); 272 int res; 273 274 mutex_lock(&inode->i_mutex); 275 res = -EPIPE; 276 if (rpci->ops != NULL) 277 res = rpci->ops->downcall(filp, buf, len); 278 mutex_unlock(&inode->i_mutex); 279 return res; 280 } 281 282 static unsigned int 283 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) 284 { 285 struct rpc_inode *rpci; 286 unsigned int mask = 0; 287 288 rpci = RPC_I(filp->f_path.dentry->d_inode); 289 poll_wait(filp, &rpci->waitq, wait); 290 291 mask = POLLOUT | POLLWRNORM; 292 if (rpci->ops == NULL) 293 mask |= POLLERR | POLLHUP; 294 if (filp->private_data || !list_empty(&rpci->pipe)) 295 mask |= POLLIN | POLLRDNORM; 296 return mask; 297 } 298 299 static int 300 rpc_pipe_ioctl(struct inode *ino, struct file *filp, 301 unsigned int cmd, unsigned long arg) 302 { 303 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); 304 int len; 305 306 switch (cmd) { 307 case FIONREAD: 308 if (rpci->ops == NULL) 309 return -EPIPE; 310 len = rpci->pipelen; 311 if (filp->private_data) { 312 struct rpc_pipe_msg *msg; 313 msg = (struct rpc_pipe_msg *)filp->private_data; 314 len += msg->len - msg->copied; 315 } 316 return put_user(len, (int __user *)arg); 317 default: 318 return -EINVAL; 319 } 320 } 321 322 static const struct file_operations rpc_pipe_fops = { 323 .owner = THIS_MODULE, 324 .llseek = no_llseek, 325 .read = rpc_pipe_read, 326 .write = rpc_pipe_write, 327 .poll = rpc_pipe_poll, 328 .ioctl = rpc_pipe_ioctl, 329 .open = rpc_pipe_open, 330 .release = rpc_pipe_release, 331 }; 332 333 static int 334 rpc_show_info(struct seq_file *m, void *v) 335 { 336 struct rpc_clnt *clnt = m->private; 337 338 seq_printf(m, "RPC server: %s\n", clnt->cl_server); 339 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 340 clnt->cl_prog, clnt->cl_vers); 341 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 342 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); 343 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT)); 344 return 0; 345 } 346 347 static int 348 rpc_info_open(struct inode *inode, struct file *file) 349 { 350 struct rpc_clnt *clnt; 351 int ret = single_open(file, rpc_show_info, NULL); 352 353 if (!ret) { 354 struct seq_file *m = file->private_data; 355 mutex_lock(&inode->i_mutex); 356 clnt = RPC_I(inode)->private; 357 if (clnt) { 358 kref_get(&clnt->cl_kref); 359 m->private = clnt; 360 } else { 361 single_release(inode, file); 362 ret = -EINVAL; 363 } 364 mutex_unlock(&inode->i_mutex); 365 } 366 return ret; 367 } 368 369 static int 370 rpc_info_release(struct inode *inode, struct file *file) 371 { 372 struct seq_file *m = file->private_data; 373 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private; 374 375 if (clnt) 376 rpc_release_client(clnt); 377 return single_release(inode, file); 378 } 379 380 static const struct file_operations rpc_info_operations = { 381 .owner = THIS_MODULE, 382 .open = rpc_info_open, 383 .read = seq_read, 384 .llseek = seq_lseek, 385 .release = rpc_info_release, 386 }; 387 388 389 /* 390 * We have a single directory with 1 node in it. 391 */ 392 enum { 393 RPCAUTH_Root = 1, 394 RPCAUTH_lockd, 395 RPCAUTH_mount, 396 RPCAUTH_nfs, 397 RPCAUTH_portmap, 398 RPCAUTH_statd, 399 RPCAUTH_RootEOF 400 }; 401 402 /* 403 * Description of fs contents. 404 */ 405 struct rpc_filelist { 406 char *name; 407 const struct file_operations *i_fop; 408 int mode; 409 }; 410 411 static struct rpc_filelist files[] = { 412 [RPCAUTH_lockd] = { 413 .name = "lockd", 414 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 415 }, 416 [RPCAUTH_mount] = { 417 .name = "mount", 418 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 419 }, 420 [RPCAUTH_nfs] = { 421 .name = "nfs", 422 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 423 }, 424 [RPCAUTH_portmap] = { 425 .name = "portmap", 426 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 427 }, 428 [RPCAUTH_statd] = { 429 .name = "statd", 430 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 431 }, 432 }; 433 434 enum { 435 RPCAUTH_info = 2, 436 RPCAUTH_EOF 437 }; 438 439 static struct rpc_filelist authfiles[] = { 440 [RPCAUTH_info] = { 441 .name = "info", 442 .i_fop = &rpc_info_operations, 443 .mode = S_IFREG | S_IRUSR, 444 }, 445 }; 446 447 struct vfsmount *rpc_get_mount(void) 448 { 449 int err; 450 451 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count); 452 if (err != 0) 453 return ERR_PTR(err); 454 return rpc_mount; 455 } 456 457 void rpc_put_mount(void) 458 { 459 simple_release_fs(&rpc_mount, &rpc_mount_count); 460 } 461 462 static int rpc_delete_dentry(struct dentry *dentry) 463 { 464 return 1; 465 } 466 467 static struct dentry_operations rpc_dentry_operations = { 468 .d_delete = rpc_delete_dentry, 469 }; 470 471 static int 472 rpc_lookup_parent(char *path, struct nameidata *nd) 473 { 474 struct vfsmount *mnt; 475 476 if (path[0] == '\0') 477 return -ENOENT; 478 479 mnt = rpc_get_mount(); 480 if (IS_ERR(mnt)) { 481 printk(KERN_WARNING "%s: %s failed to mount " 482 "pseudofilesystem \n", __FILE__, __FUNCTION__); 483 return PTR_ERR(mnt); 484 } 485 486 if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) { 487 printk(KERN_WARNING "%s: %s failed to find path %s\n", 488 __FILE__, __FUNCTION__, path); 489 rpc_put_mount(); 490 return -ENOENT; 491 } 492 return 0; 493 } 494 495 static void 496 rpc_release_path(struct nameidata *nd) 497 { 498 path_release(nd); 499 rpc_put_mount(); 500 } 501 502 static struct inode * 503 rpc_get_inode(struct super_block *sb, int mode) 504 { 505 struct inode *inode = new_inode(sb); 506 if (!inode) 507 return NULL; 508 inode->i_mode = mode; 509 inode->i_uid = inode->i_gid = 0; 510 inode->i_blocks = 0; 511 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 512 switch(mode & S_IFMT) { 513 case S_IFDIR: 514 inode->i_fop = &simple_dir_operations; 515 inode->i_op = &simple_dir_inode_operations; 516 inc_nlink(inode); 517 default: 518 break; 519 } 520 return inode; 521 } 522 523 /* 524 * FIXME: This probably has races. 525 */ 526 static void rpc_depopulate(struct dentry *parent, 527 unsigned long start, unsigned long eof) 528 { 529 struct inode *dir = parent->d_inode; 530 struct list_head *pos, *next; 531 struct dentry *dentry, *dvec[10]; 532 int n = 0; 533 534 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); 535 repeat: 536 spin_lock(&dcache_lock); 537 list_for_each_safe(pos, next, &parent->d_subdirs) { 538 dentry = list_entry(pos, struct dentry, d_u.d_child); 539 if (!dentry->d_inode || 540 dentry->d_inode->i_ino < start || 541 dentry->d_inode->i_ino >= eof) 542 continue; 543 spin_lock(&dentry->d_lock); 544 if (!d_unhashed(dentry)) { 545 dget_locked(dentry); 546 __d_drop(dentry); 547 spin_unlock(&dentry->d_lock); 548 dvec[n++] = dentry; 549 if (n == ARRAY_SIZE(dvec)) 550 break; 551 } else 552 spin_unlock(&dentry->d_lock); 553 } 554 spin_unlock(&dcache_lock); 555 if (n) { 556 do { 557 dentry = dvec[--n]; 558 if (S_ISREG(dentry->d_inode->i_mode)) 559 simple_unlink(dir, dentry); 560 else if (S_ISDIR(dentry->d_inode->i_mode)) 561 simple_rmdir(dir, dentry); 562 d_delete(dentry); 563 dput(dentry); 564 } while (n); 565 goto repeat; 566 } 567 mutex_unlock(&dir->i_mutex); 568 } 569 570 static int 571 rpc_populate(struct dentry *parent, 572 struct rpc_filelist *files, 573 int start, int eof) 574 { 575 struct inode *inode, *dir = parent->d_inode; 576 void *private = RPC_I(dir)->private; 577 struct dentry *dentry; 578 int mode, i; 579 580 mutex_lock(&dir->i_mutex); 581 for (i = start; i < eof; i++) { 582 dentry = d_alloc_name(parent, files[i].name); 583 if (!dentry) 584 goto out_bad; 585 dentry->d_op = &rpc_dentry_operations; 586 mode = files[i].mode; 587 inode = rpc_get_inode(dir->i_sb, mode); 588 if (!inode) { 589 dput(dentry); 590 goto out_bad; 591 } 592 inode->i_ino = i; 593 if (files[i].i_fop) 594 inode->i_fop = files[i].i_fop; 595 if (private) 596 rpc_inode_setowner(inode, private); 597 if (S_ISDIR(mode)) 598 inc_nlink(dir); 599 d_add(dentry, inode); 600 fsnotify_create(dir, dentry); 601 } 602 mutex_unlock(&dir->i_mutex); 603 return 0; 604 out_bad: 605 mutex_unlock(&dir->i_mutex); 606 printk(KERN_WARNING "%s: %s failed to populate directory %s\n", 607 __FILE__, __FUNCTION__, parent->d_name.name); 608 return -ENOMEM; 609 } 610 611 static int 612 __rpc_mkdir(struct inode *dir, struct dentry *dentry) 613 { 614 struct inode *inode; 615 616 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO); 617 if (!inode) 618 goto out_err; 619 inode->i_ino = iunique(dir->i_sb, 100); 620 d_instantiate(dentry, inode); 621 inc_nlink(dir); 622 fsnotify_mkdir(dir, dentry); 623 return 0; 624 out_err: 625 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 626 __FILE__, __FUNCTION__, dentry->d_name.name); 627 return -ENOMEM; 628 } 629 630 static int 631 __rpc_rmdir(struct inode *dir, struct dentry *dentry) 632 { 633 int error; 634 error = simple_rmdir(dir, dentry); 635 if (!error) 636 d_delete(dentry); 637 return error; 638 } 639 640 static struct dentry * 641 rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive) 642 { 643 struct inode *dir = parent->d_inode; 644 struct dentry *dentry; 645 646 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 647 dentry = lookup_one_len(name, parent, len); 648 if (IS_ERR(dentry)) 649 goto out_err; 650 if (!dentry->d_inode) 651 dentry->d_op = &rpc_dentry_operations; 652 else if (exclusive) { 653 dput(dentry); 654 dentry = ERR_PTR(-EEXIST); 655 goto out_err; 656 } 657 return dentry; 658 out_err: 659 mutex_unlock(&dir->i_mutex); 660 return dentry; 661 } 662 663 static struct dentry * 664 rpc_lookup_negative(char *path, struct nameidata *nd) 665 { 666 struct dentry *dentry; 667 int error; 668 669 if ((error = rpc_lookup_parent(path, nd)) != 0) 670 return ERR_PTR(error); 671 dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1); 672 if (IS_ERR(dentry)) 673 rpc_release_path(nd); 674 return dentry; 675 } 676 677 /** 678 * rpc_mkdir - Create a new directory in rpc_pipefs 679 * @path: path from the rpc_pipefs root to the new directory 680 * @rpc_clnt: rpc client to associate with this directory 681 * 682 * This creates a directory at the given @path associated with 683 * @rpc_clnt, which will contain a file named "info" with some basic 684 * information about the client, together with any "pipes" that may 685 * later be created using rpc_mkpipe(). 686 */ 687 struct dentry * 688 rpc_mkdir(char *path, struct rpc_clnt *rpc_client) 689 { 690 struct nameidata nd; 691 struct dentry *dentry; 692 struct inode *dir; 693 int error; 694 695 dentry = rpc_lookup_negative(path, &nd); 696 if (IS_ERR(dentry)) 697 return dentry; 698 dir = nd.dentry->d_inode; 699 if ((error = __rpc_mkdir(dir, dentry)) != 0) 700 goto err_dput; 701 RPC_I(dentry->d_inode)->private = rpc_client; 702 error = rpc_populate(dentry, authfiles, 703 RPCAUTH_info, RPCAUTH_EOF); 704 if (error) 705 goto err_depopulate; 706 dget(dentry); 707 out: 708 mutex_unlock(&dir->i_mutex); 709 rpc_release_path(&nd); 710 return dentry; 711 err_depopulate: 712 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); 713 __rpc_rmdir(dir, dentry); 714 err_dput: 715 dput(dentry); 716 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", 717 __FILE__, __FUNCTION__, path, error); 718 dentry = ERR_PTR(error); 719 goto out; 720 } 721 722 /** 723 * rpc_rmdir - Remove a directory created with rpc_mkdir() 724 * @dentry: directory to remove 725 */ 726 int 727 rpc_rmdir(struct dentry *dentry) 728 { 729 struct dentry *parent; 730 struct inode *dir; 731 int error; 732 733 parent = dget_parent(dentry); 734 dir = parent->d_inode; 735 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 736 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); 737 error = __rpc_rmdir(dir, dentry); 738 dput(dentry); 739 mutex_unlock(&dir->i_mutex); 740 dput(parent); 741 return error; 742 } 743 744 /** 745 * rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication 746 * @parent: dentry of directory to create new "pipe" in 747 * @name: name of pipe 748 * @private: private data to associate with the pipe, for the caller's use 749 * @ops: operations defining the behavior of the pipe: upcall, downcall, 750 * release_pipe, and destroy_msg. 751 * 752 * Data is made available for userspace to read by calls to 753 * rpc_queue_upcall(). The actual reads will result in calls to 754 * @ops->upcall, which will be called with the file pointer, 755 * message, and userspace buffer to copy to. 756 * 757 * Writes can come at any time, and do not necessarily have to be 758 * responses to upcalls. They will result in calls to @msg->downcall. 759 * 760 * The @private argument passed here will be available to all these methods 761 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. 762 */ 763 struct dentry * 764 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags) 765 { 766 struct dentry *dentry; 767 struct inode *dir, *inode; 768 struct rpc_inode *rpci; 769 770 dentry = rpc_lookup_create(parent, name, strlen(name), 0); 771 if (IS_ERR(dentry)) 772 return dentry; 773 dir = parent->d_inode; 774 if (dentry->d_inode) { 775 rpci = RPC_I(dentry->d_inode); 776 if (rpci->private != private || 777 rpci->ops != ops || 778 rpci->flags != flags) { 779 dput (dentry); 780 dentry = ERR_PTR(-EBUSY); 781 } 782 rpci->nkern_readwriters++; 783 goto out; 784 } 785 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); 786 if (!inode) 787 goto err_dput; 788 inode->i_ino = iunique(dir->i_sb, 100); 789 inode->i_fop = &rpc_pipe_fops; 790 d_instantiate(dentry, inode); 791 rpci = RPC_I(inode); 792 rpci->private = private; 793 rpci->flags = flags; 794 rpci->ops = ops; 795 rpci->nkern_readwriters = 1; 796 fsnotify_create(dir, dentry); 797 dget(dentry); 798 out: 799 mutex_unlock(&dir->i_mutex); 800 return dentry; 801 err_dput: 802 dput(dentry); 803 dentry = ERR_PTR(-ENOMEM); 804 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", 805 __FILE__, __FUNCTION__, parent->d_name.name, name, 806 -ENOMEM); 807 goto out; 808 } 809 EXPORT_SYMBOL(rpc_mkpipe); 810 811 /** 812 * rpc_unlink - remove a pipe 813 * @dentry: dentry for the pipe, as returned from rpc_mkpipe 814 * 815 * After this call, lookups will no longer find the pipe, and any 816 * attempts to read or write using preexisting opens of the pipe will 817 * return -EPIPE. 818 */ 819 int 820 rpc_unlink(struct dentry *dentry) 821 { 822 struct dentry *parent; 823 struct inode *dir; 824 int error = 0; 825 826 parent = dget_parent(dentry); 827 dir = parent->d_inode; 828 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 829 if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) { 830 rpc_close_pipes(dentry->d_inode); 831 error = simple_unlink(dir, dentry); 832 if (!error) 833 d_delete(dentry); 834 } 835 dput(dentry); 836 mutex_unlock(&dir->i_mutex); 837 dput(parent); 838 return error; 839 } 840 EXPORT_SYMBOL(rpc_unlink); 841 842 /* 843 * populate the filesystem 844 */ 845 static struct super_operations s_ops = { 846 .alloc_inode = rpc_alloc_inode, 847 .destroy_inode = rpc_destroy_inode, 848 .statfs = simple_statfs, 849 }; 850 851 #define RPCAUTH_GSSMAGIC 0x67596969 852 853 static int 854 rpc_fill_super(struct super_block *sb, void *data, int silent) 855 { 856 struct inode *inode; 857 struct dentry *root; 858 859 sb->s_blocksize = PAGE_CACHE_SIZE; 860 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 861 sb->s_magic = RPCAUTH_GSSMAGIC; 862 sb->s_op = &s_ops; 863 sb->s_time_gran = 1; 864 865 inode = rpc_get_inode(sb, S_IFDIR | 0755); 866 if (!inode) 867 return -ENOMEM; 868 root = d_alloc_root(inode); 869 if (!root) { 870 iput(inode); 871 return -ENOMEM; 872 } 873 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF)) 874 goto out; 875 sb->s_root = root; 876 return 0; 877 out: 878 d_genocide(root); 879 dput(root); 880 return -ENOMEM; 881 } 882 883 static int 884 rpc_get_sb(struct file_system_type *fs_type, 885 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 886 { 887 return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt); 888 } 889 890 static struct file_system_type rpc_pipe_fs_type = { 891 .owner = THIS_MODULE, 892 .name = "rpc_pipefs", 893 .get_sb = rpc_get_sb, 894 .kill_sb = kill_litter_super, 895 }; 896 897 static void 898 init_once(struct kmem_cache * cachep, void *foo) 899 { 900 struct rpc_inode *rpci = (struct rpc_inode *) foo; 901 902 inode_init_once(&rpci->vfs_inode); 903 rpci->private = NULL; 904 rpci->nreaders = 0; 905 rpci->nwriters = 0; 906 INIT_LIST_HEAD(&rpci->in_upcall); 907 INIT_LIST_HEAD(&rpci->in_downcall); 908 INIT_LIST_HEAD(&rpci->pipe); 909 rpci->pipelen = 0; 910 init_waitqueue_head(&rpci->waitq); 911 INIT_DELAYED_WORK(&rpci->queue_timeout, 912 rpc_timeout_upcall_queue); 913 rpci->ops = NULL; 914 } 915 916 int register_rpc_pipefs(void) 917 { 918 int err; 919 920 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 921 sizeof(struct rpc_inode), 922 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 923 SLAB_MEM_SPREAD), 924 init_once); 925 if (!rpc_inode_cachep) 926 return -ENOMEM; 927 err = register_filesystem(&rpc_pipe_fs_type); 928 if (err) { 929 kmem_cache_destroy(rpc_inode_cachep); 930 return err; 931 } 932 933 return 0; 934 } 935 936 void unregister_rpc_pipefs(void) 937 { 938 kmem_cache_destroy(rpc_inode_cachep); 939 unregister_filesystem(&rpc_pipe_fs_type); 940 } 941