1 /* 2 * net/sunrpc/rpc_pipe.c 3 * 4 * Userland/kernel interface for rpcauth_gss. 5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c 6 * and fs/sysfs/inode.c 7 * 8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 #include <linux/pagemap.h> 15 #include <linux/mount.h> 16 #include <linux/namei.h> 17 #include <linux/dnotify.h> 18 #include <linux/kernel.h> 19 20 #include <asm/ioctls.h> 21 #include <linux/fs.h> 22 #include <linux/poll.h> 23 #include <linux/wait.h> 24 #include <linux/seq_file.h> 25 26 #include <linux/sunrpc/clnt.h> 27 #include <linux/workqueue.h> 28 #include <linux/sunrpc/rpc_pipe_fs.h> 29 30 static struct vfsmount *rpc_mount __read_mostly; 31 static int rpc_mount_count; 32 33 static struct file_system_type rpc_pipe_fs_type; 34 35 36 static struct kmem_cache *rpc_inode_cachep __read_mostly; 37 38 #define RPC_UPCALL_TIMEOUT (30*HZ) 39 40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, 41 void (*destroy_msg)(struct rpc_pipe_msg *), int err) 42 { 43 struct rpc_pipe_msg *msg; 44 45 if (list_empty(head)) 46 return; 47 do { 48 msg = list_entry(head->next, struct rpc_pipe_msg, list); 49 list_del(&msg->list); 50 msg->errno = err; 51 destroy_msg(msg); 52 } while (!list_empty(head)); 53 wake_up(&rpci->waitq); 54 } 55 56 static void 57 rpc_timeout_upcall_queue(struct work_struct *work) 58 { 59 LIST_HEAD(free_list); 60 struct rpc_inode *rpci = 61 container_of(work, struct rpc_inode, queue_timeout.work); 62 struct inode *inode = &rpci->vfs_inode; 63 void (*destroy_msg)(struct rpc_pipe_msg *); 64 65 spin_lock(&inode->i_lock); 66 if (rpci->ops == NULL) { 67 spin_unlock(&inode->i_lock); 68 return; 69 } 70 destroy_msg = rpci->ops->destroy_msg; 71 if (rpci->nreaders == 0) { 72 list_splice_init(&rpci->pipe, &free_list); 73 rpci->pipelen = 0; 74 } 75 spin_unlock(&inode->i_lock); 76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); 77 } 78 79 int 80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) 81 { 82 struct rpc_inode *rpci = RPC_I(inode); 83 int res = -EPIPE; 84 85 spin_lock(&inode->i_lock); 86 if (rpci->ops == NULL) 87 goto out; 88 if (rpci->nreaders) { 89 list_add_tail(&msg->list, &rpci->pipe); 90 rpci->pipelen += msg->len; 91 res = 0; 92 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { 93 if (list_empty(&rpci->pipe)) 94 queue_delayed_work(rpciod_workqueue, 95 &rpci->queue_timeout, 96 RPC_UPCALL_TIMEOUT); 97 list_add_tail(&msg->list, &rpci->pipe); 98 rpci->pipelen += msg->len; 99 res = 0; 100 } 101 out: 102 spin_unlock(&inode->i_lock); 103 wake_up(&rpci->waitq); 104 return res; 105 } 106 107 static inline void 108 rpc_inode_setowner(struct inode *inode, void *private) 109 { 110 RPC_I(inode)->private = private; 111 } 112 113 static void 114 rpc_close_pipes(struct inode *inode) 115 { 116 struct rpc_inode *rpci = RPC_I(inode); 117 struct rpc_pipe_ops *ops; 118 119 mutex_lock(&inode->i_mutex); 120 ops = rpci->ops; 121 if (ops != NULL) { 122 LIST_HEAD(free_list); 123 124 spin_lock(&inode->i_lock); 125 rpci->nreaders = 0; 126 list_splice_init(&rpci->in_upcall, &free_list); 127 list_splice_init(&rpci->pipe, &free_list); 128 rpci->pipelen = 0; 129 rpci->ops = NULL; 130 spin_unlock(&inode->i_lock); 131 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); 132 rpci->nwriters = 0; 133 if (ops->release_pipe) 134 ops->release_pipe(inode); 135 cancel_delayed_work(&rpci->queue_timeout); 136 flush_workqueue(rpciod_workqueue); 137 } 138 rpc_inode_setowner(inode, NULL); 139 mutex_unlock(&inode->i_mutex); 140 } 141 142 static struct inode * 143 rpc_alloc_inode(struct super_block *sb) 144 { 145 struct rpc_inode *rpci; 146 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); 147 if (!rpci) 148 return NULL; 149 return &rpci->vfs_inode; 150 } 151 152 static void 153 rpc_destroy_inode(struct inode *inode) 154 { 155 kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); 156 } 157 158 static int 159 rpc_pipe_open(struct inode *inode, struct file *filp) 160 { 161 struct rpc_inode *rpci = RPC_I(inode); 162 int res = -ENXIO; 163 164 mutex_lock(&inode->i_mutex); 165 if (rpci->ops != NULL) { 166 if (filp->f_mode & FMODE_READ) 167 rpci->nreaders ++; 168 if (filp->f_mode & FMODE_WRITE) 169 rpci->nwriters ++; 170 res = 0; 171 } 172 mutex_unlock(&inode->i_mutex); 173 return res; 174 } 175 176 static int 177 rpc_pipe_release(struct inode *inode, struct file *filp) 178 { 179 struct rpc_inode *rpci = RPC_I(inode); 180 struct rpc_pipe_msg *msg; 181 182 mutex_lock(&inode->i_mutex); 183 if (rpci->ops == NULL) 184 goto out; 185 msg = (struct rpc_pipe_msg *)filp->private_data; 186 if (msg != NULL) { 187 spin_lock(&inode->i_lock); 188 msg->errno = -EAGAIN; 189 list_del(&msg->list); 190 spin_unlock(&inode->i_lock); 191 rpci->ops->destroy_msg(msg); 192 } 193 if (filp->f_mode & FMODE_WRITE) 194 rpci->nwriters --; 195 if (filp->f_mode & FMODE_READ) { 196 rpci->nreaders --; 197 if (rpci->nreaders == 0) { 198 LIST_HEAD(free_list); 199 spin_lock(&inode->i_lock); 200 list_splice_init(&rpci->pipe, &free_list); 201 rpci->pipelen = 0; 202 spin_unlock(&inode->i_lock); 203 rpc_purge_list(rpci, &free_list, 204 rpci->ops->destroy_msg, -EAGAIN); 205 } 206 } 207 if (rpci->ops->release_pipe) 208 rpci->ops->release_pipe(inode); 209 out: 210 mutex_unlock(&inode->i_mutex); 211 return 0; 212 } 213 214 static ssize_t 215 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) 216 { 217 struct inode *inode = filp->f_path.dentry->d_inode; 218 struct rpc_inode *rpci = RPC_I(inode); 219 struct rpc_pipe_msg *msg; 220 int res = 0; 221 222 mutex_lock(&inode->i_mutex); 223 if (rpci->ops == NULL) { 224 res = -EPIPE; 225 goto out_unlock; 226 } 227 msg = filp->private_data; 228 if (msg == NULL) { 229 spin_lock(&inode->i_lock); 230 if (!list_empty(&rpci->pipe)) { 231 msg = list_entry(rpci->pipe.next, 232 struct rpc_pipe_msg, 233 list); 234 list_move(&msg->list, &rpci->in_upcall); 235 rpci->pipelen -= msg->len; 236 filp->private_data = msg; 237 msg->copied = 0; 238 } 239 spin_unlock(&inode->i_lock); 240 if (msg == NULL) 241 goto out_unlock; 242 } 243 /* NOTE: it is up to the callback to update msg->copied */ 244 res = rpci->ops->upcall(filp, msg, buf, len); 245 if (res < 0 || msg->len == msg->copied) { 246 filp->private_data = NULL; 247 spin_lock(&inode->i_lock); 248 list_del(&msg->list); 249 spin_unlock(&inode->i_lock); 250 rpci->ops->destroy_msg(msg); 251 } 252 out_unlock: 253 mutex_unlock(&inode->i_mutex); 254 return res; 255 } 256 257 static ssize_t 258 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) 259 { 260 struct inode *inode = filp->f_path.dentry->d_inode; 261 struct rpc_inode *rpci = RPC_I(inode); 262 int res; 263 264 mutex_lock(&inode->i_mutex); 265 res = -EPIPE; 266 if (rpci->ops != NULL) 267 res = rpci->ops->downcall(filp, buf, len); 268 mutex_unlock(&inode->i_mutex); 269 return res; 270 } 271 272 static unsigned int 273 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) 274 { 275 struct rpc_inode *rpci; 276 unsigned int mask = 0; 277 278 rpci = RPC_I(filp->f_path.dentry->d_inode); 279 poll_wait(filp, &rpci->waitq, wait); 280 281 mask = POLLOUT | POLLWRNORM; 282 if (rpci->ops == NULL) 283 mask |= POLLERR | POLLHUP; 284 if (!list_empty(&rpci->pipe)) 285 mask |= POLLIN | POLLRDNORM; 286 return mask; 287 } 288 289 static int 290 rpc_pipe_ioctl(struct inode *ino, struct file *filp, 291 unsigned int cmd, unsigned long arg) 292 { 293 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); 294 int len; 295 296 switch (cmd) { 297 case FIONREAD: 298 if (rpci->ops == NULL) 299 return -EPIPE; 300 len = rpci->pipelen; 301 if (filp->private_data) { 302 struct rpc_pipe_msg *msg; 303 msg = (struct rpc_pipe_msg *)filp->private_data; 304 len += msg->len - msg->copied; 305 } 306 return put_user(len, (int __user *)arg); 307 default: 308 return -EINVAL; 309 } 310 } 311 312 static const struct file_operations rpc_pipe_fops = { 313 .owner = THIS_MODULE, 314 .llseek = no_llseek, 315 .read = rpc_pipe_read, 316 .write = rpc_pipe_write, 317 .poll = rpc_pipe_poll, 318 .ioctl = rpc_pipe_ioctl, 319 .open = rpc_pipe_open, 320 .release = rpc_pipe_release, 321 }; 322 323 static int 324 rpc_show_info(struct seq_file *m, void *v) 325 { 326 struct rpc_clnt *clnt = m->private; 327 328 seq_printf(m, "RPC server: %s\n", clnt->cl_server); 329 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 330 clnt->cl_prog, clnt->cl_vers); 331 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 332 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); 333 return 0; 334 } 335 336 static int 337 rpc_info_open(struct inode *inode, struct file *file) 338 { 339 struct rpc_clnt *clnt; 340 int ret = single_open(file, rpc_show_info, NULL); 341 342 if (!ret) { 343 struct seq_file *m = file->private_data; 344 mutex_lock(&inode->i_mutex); 345 clnt = RPC_I(inode)->private; 346 if (clnt) { 347 atomic_inc(&clnt->cl_users); 348 m->private = clnt; 349 } else { 350 single_release(inode, file); 351 ret = -EINVAL; 352 } 353 mutex_unlock(&inode->i_mutex); 354 } 355 return ret; 356 } 357 358 static int 359 rpc_info_release(struct inode *inode, struct file *file) 360 { 361 struct seq_file *m = file->private_data; 362 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private; 363 364 if (clnt) 365 rpc_release_client(clnt); 366 return single_release(inode, file); 367 } 368 369 static const struct file_operations rpc_info_operations = { 370 .owner = THIS_MODULE, 371 .open = rpc_info_open, 372 .read = seq_read, 373 .llseek = seq_lseek, 374 .release = rpc_info_release, 375 }; 376 377 378 /* 379 * We have a single directory with 1 node in it. 380 */ 381 enum { 382 RPCAUTH_Root = 1, 383 RPCAUTH_lockd, 384 RPCAUTH_mount, 385 RPCAUTH_nfs, 386 RPCAUTH_portmap, 387 RPCAUTH_statd, 388 RPCAUTH_RootEOF 389 }; 390 391 /* 392 * Description of fs contents. 393 */ 394 struct rpc_filelist { 395 char *name; 396 const struct file_operations *i_fop; 397 int mode; 398 }; 399 400 static struct rpc_filelist files[] = { 401 [RPCAUTH_lockd] = { 402 .name = "lockd", 403 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 404 }, 405 [RPCAUTH_mount] = { 406 .name = "mount", 407 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 408 }, 409 [RPCAUTH_nfs] = { 410 .name = "nfs", 411 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 412 }, 413 [RPCAUTH_portmap] = { 414 .name = "portmap", 415 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 416 }, 417 [RPCAUTH_statd] = { 418 .name = "statd", 419 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 420 }, 421 }; 422 423 enum { 424 RPCAUTH_info = 2, 425 RPCAUTH_EOF 426 }; 427 428 static struct rpc_filelist authfiles[] = { 429 [RPCAUTH_info] = { 430 .name = "info", 431 .i_fop = &rpc_info_operations, 432 .mode = S_IFREG | S_IRUSR, 433 }, 434 }; 435 436 struct vfsmount *rpc_get_mount(void) 437 { 438 int err; 439 440 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count); 441 if (err != 0) 442 return ERR_PTR(err); 443 return rpc_mount; 444 } 445 446 void rpc_put_mount(void) 447 { 448 simple_release_fs(&rpc_mount, &rpc_mount_count); 449 } 450 451 static int 452 rpc_lookup_parent(char *path, struct nameidata *nd) 453 { 454 if (path[0] == '\0') 455 return -ENOENT; 456 nd->mnt = rpc_get_mount(); 457 if (IS_ERR(nd->mnt)) { 458 printk(KERN_WARNING "%s: %s failed to mount " 459 "pseudofilesystem \n", __FILE__, __FUNCTION__); 460 return PTR_ERR(nd->mnt); 461 } 462 mntget(nd->mnt); 463 nd->dentry = dget(rpc_mount->mnt_root); 464 nd->last_type = LAST_ROOT; 465 nd->flags = LOOKUP_PARENT; 466 nd->depth = 0; 467 468 if (path_walk(path, nd)) { 469 printk(KERN_WARNING "%s: %s failed to find path %s\n", 470 __FILE__, __FUNCTION__, path); 471 rpc_put_mount(); 472 return -ENOENT; 473 } 474 return 0; 475 } 476 477 static void 478 rpc_release_path(struct nameidata *nd) 479 { 480 path_release(nd); 481 rpc_put_mount(); 482 } 483 484 static struct inode * 485 rpc_get_inode(struct super_block *sb, int mode) 486 { 487 struct inode *inode = new_inode(sb); 488 if (!inode) 489 return NULL; 490 inode->i_mode = mode; 491 inode->i_uid = inode->i_gid = 0; 492 inode->i_blocks = 0; 493 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 494 switch(mode & S_IFMT) { 495 case S_IFDIR: 496 inode->i_fop = &simple_dir_operations; 497 inode->i_op = &simple_dir_inode_operations; 498 inc_nlink(inode); 499 default: 500 break; 501 } 502 return inode; 503 } 504 505 /* 506 * FIXME: This probably has races. 507 */ 508 static void 509 rpc_depopulate(struct dentry *parent) 510 { 511 struct inode *dir = parent->d_inode; 512 struct list_head *pos, *next; 513 struct dentry *dentry, *dvec[10]; 514 int n = 0; 515 516 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); 517 repeat: 518 spin_lock(&dcache_lock); 519 list_for_each_safe(pos, next, &parent->d_subdirs) { 520 dentry = list_entry(pos, struct dentry, d_u.d_child); 521 spin_lock(&dentry->d_lock); 522 if (!d_unhashed(dentry)) { 523 dget_locked(dentry); 524 __d_drop(dentry); 525 spin_unlock(&dentry->d_lock); 526 dvec[n++] = dentry; 527 if (n == ARRAY_SIZE(dvec)) 528 break; 529 } else 530 spin_unlock(&dentry->d_lock); 531 } 532 spin_unlock(&dcache_lock); 533 if (n) { 534 do { 535 dentry = dvec[--n]; 536 if (dentry->d_inode) { 537 rpc_close_pipes(dentry->d_inode); 538 simple_unlink(dir, dentry); 539 } 540 inode_dir_notify(dir, DN_DELETE); 541 dput(dentry); 542 } while (n); 543 goto repeat; 544 } 545 mutex_unlock(&dir->i_mutex); 546 } 547 548 static int 549 rpc_populate(struct dentry *parent, 550 struct rpc_filelist *files, 551 int start, int eof) 552 { 553 struct inode *inode, *dir = parent->d_inode; 554 void *private = RPC_I(dir)->private; 555 struct dentry *dentry; 556 int mode, i; 557 558 mutex_lock(&dir->i_mutex); 559 for (i = start; i < eof; i++) { 560 dentry = d_alloc_name(parent, files[i].name); 561 if (!dentry) 562 goto out_bad; 563 mode = files[i].mode; 564 inode = rpc_get_inode(dir->i_sb, mode); 565 if (!inode) { 566 dput(dentry); 567 goto out_bad; 568 } 569 inode->i_ino = i; 570 if (files[i].i_fop) 571 inode->i_fop = files[i].i_fop; 572 if (private) 573 rpc_inode_setowner(inode, private); 574 if (S_ISDIR(mode)) 575 inc_nlink(dir); 576 d_add(dentry, inode); 577 } 578 mutex_unlock(&dir->i_mutex); 579 return 0; 580 out_bad: 581 mutex_unlock(&dir->i_mutex); 582 printk(KERN_WARNING "%s: %s failed to populate directory %s\n", 583 __FILE__, __FUNCTION__, parent->d_name.name); 584 return -ENOMEM; 585 } 586 587 static int 588 __rpc_mkdir(struct inode *dir, struct dentry *dentry) 589 { 590 struct inode *inode; 591 592 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO); 593 if (!inode) 594 goto out_err; 595 inode->i_ino = iunique(dir->i_sb, 100); 596 d_instantiate(dentry, inode); 597 inc_nlink(dir); 598 inode_dir_notify(dir, DN_CREATE); 599 return 0; 600 out_err: 601 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 602 __FILE__, __FUNCTION__, dentry->d_name.name); 603 return -ENOMEM; 604 } 605 606 static int 607 __rpc_rmdir(struct inode *dir, struct dentry *dentry) 608 { 609 int error; 610 611 shrink_dcache_parent(dentry); 612 if (d_unhashed(dentry)) 613 return 0; 614 if ((error = simple_rmdir(dir, dentry)) != 0) 615 return error; 616 if (!error) { 617 inode_dir_notify(dir, DN_DELETE); 618 d_drop(dentry); 619 } 620 return 0; 621 } 622 623 static struct dentry * 624 rpc_lookup_create(struct dentry *parent, const char *name, int len) 625 { 626 struct inode *dir = parent->d_inode; 627 struct dentry *dentry; 628 629 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 630 dentry = lookup_one_len(name, parent, len); 631 if (IS_ERR(dentry)) 632 goto out_err; 633 if (dentry->d_inode) { 634 dput(dentry); 635 dentry = ERR_PTR(-EEXIST); 636 goto out_err; 637 } 638 return dentry; 639 out_err: 640 mutex_unlock(&dir->i_mutex); 641 return dentry; 642 } 643 644 static struct dentry * 645 rpc_lookup_negative(char *path, struct nameidata *nd) 646 { 647 struct dentry *dentry; 648 int error; 649 650 if ((error = rpc_lookup_parent(path, nd)) != 0) 651 return ERR_PTR(error); 652 dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len); 653 if (IS_ERR(dentry)) 654 rpc_release_path(nd); 655 return dentry; 656 } 657 658 659 struct dentry * 660 rpc_mkdir(char *path, struct rpc_clnt *rpc_client) 661 { 662 struct nameidata nd; 663 struct dentry *dentry; 664 struct inode *dir; 665 int error; 666 667 dentry = rpc_lookup_negative(path, &nd); 668 if (IS_ERR(dentry)) 669 return dentry; 670 dir = nd.dentry->d_inode; 671 if ((error = __rpc_mkdir(dir, dentry)) != 0) 672 goto err_dput; 673 RPC_I(dentry->d_inode)->private = rpc_client; 674 error = rpc_populate(dentry, authfiles, 675 RPCAUTH_info, RPCAUTH_EOF); 676 if (error) 677 goto err_depopulate; 678 dget(dentry); 679 out: 680 mutex_unlock(&dir->i_mutex); 681 rpc_release_path(&nd); 682 return dentry; 683 err_depopulate: 684 rpc_depopulate(dentry); 685 __rpc_rmdir(dir, dentry); 686 err_dput: 687 dput(dentry); 688 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", 689 __FILE__, __FUNCTION__, path, error); 690 dentry = ERR_PTR(error); 691 goto out; 692 } 693 694 int 695 rpc_rmdir(struct dentry *dentry) 696 { 697 struct dentry *parent; 698 struct inode *dir; 699 int error; 700 701 parent = dget_parent(dentry); 702 dir = parent->d_inode; 703 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 704 rpc_depopulate(dentry); 705 error = __rpc_rmdir(dir, dentry); 706 dput(dentry); 707 mutex_unlock(&dir->i_mutex); 708 dput(parent); 709 return error; 710 } 711 712 struct dentry * 713 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags) 714 { 715 struct dentry *dentry; 716 struct inode *dir, *inode; 717 struct rpc_inode *rpci; 718 719 dentry = rpc_lookup_create(parent, name, strlen(name)); 720 if (IS_ERR(dentry)) 721 return dentry; 722 dir = parent->d_inode; 723 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); 724 if (!inode) 725 goto err_dput; 726 inode->i_ino = iunique(dir->i_sb, 100); 727 inode->i_fop = &rpc_pipe_fops; 728 d_instantiate(dentry, inode); 729 rpci = RPC_I(inode); 730 rpci->private = private; 731 rpci->flags = flags; 732 rpci->ops = ops; 733 inode_dir_notify(dir, DN_CREATE); 734 dget(dentry); 735 out: 736 mutex_unlock(&dir->i_mutex); 737 return dentry; 738 err_dput: 739 dput(dentry); 740 dentry = ERR_PTR(-ENOMEM); 741 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", 742 __FILE__, __FUNCTION__, parent->d_name.name, name, 743 -ENOMEM); 744 goto out; 745 } 746 747 int 748 rpc_unlink(struct dentry *dentry) 749 { 750 struct dentry *parent; 751 struct inode *dir; 752 int error = 0; 753 754 parent = dget_parent(dentry); 755 dir = parent->d_inode; 756 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 757 if (!d_unhashed(dentry)) { 758 d_drop(dentry); 759 if (dentry->d_inode) { 760 rpc_close_pipes(dentry->d_inode); 761 error = simple_unlink(dir, dentry); 762 } 763 inode_dir_notify(dir, DN_DELETE); 764 } 765 dput(dentry); 766 mutex_unlock(&dir->i_mutex); 767 dput(parent); 768 return error; 769 } 770 771 /* 772 * populate the filesystem 773 */ 774 static struct super_operations s_ops = { 775 .alloc_inode = rpc_alloc_inode, 776 .destroy_inode = rpc_destroy_inode, 777 .statfs = simple_statfs, 778 }; 779 780 #define RPCAUTH_GSSMAGIC 0x67596969 781 782 static int 783 rpc_fill_super(struct super_block *sb, void *data, int silent) 784 { 785 struct inode *inode; 786 struct dentry *root; 787 788 sb->s_blocksize = PAGE_CACHE_SIZE; 789 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 790 sb->s_magic = RPCAUTH_GSSMAGIC; 791 sb->s_op = &s_ops; 792 sb->s_time_gran = 1; 793 794 inode = rpc_get_inode(sb, S_IFDIR | 0755); 795 if (!inode) 796 return -ENOMEM; 797 root = d_alloc_root(inode); 798 if (!root) { 799 iput(inode); 800 return -ENOMEM; 801 } 802 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF)) 803 goto out; 804 sb->s_root = root; 805 return 0; 806 out: 807 d_genocide(root); 808 dput(root); 809 return -ENOMEM; 810 } 811 812 static int 813 rpc_get_sb(struct file_system_type *fs_type, 814 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 815 { 816 return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt); 817 } 818 819 static struct file_system_type rpc_pipe_fs_type = { 820 .owner = THIS_MODULE, 821 .name = "rpc_pipefs", 822 .get_sb = rpc_get_sb, 823 .kill_sb = kill_litter_super, 824 }; 825 826 static void 827 init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 828 { 829 struct rpc_inode *rpci = (struct rpc_inode *) foo; 830 831 if (flags & SLAB_CTOR_CONSTRUCTOR) { 832 inode_init_once(&rpci->vfs_inode); 833 rpci->private = NULL; 834 rpci->nreaders = 0; 835 rpci->nwriters = 0; 836 INIT_LIST_HEAD(&rpci->in_upcall); 837 INIT_LIST_HEAD(&rpci->pipe); 838 rpci->pipelen = 0; 839 init_waitqueue_head(&rpci->waitq); 840 INIT_DELAYED_WORK(&rpci->queue_timeout, 841 rpc_timeout_upcall_queue); 842 rpci->ops = NULL; 843 } 844 } 845 846 int register_rpc_pipefs(void) 847 { 848 int err; 849 850 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 851 sizeof(struct rpc_inode), 852 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 853 SLAB_MEM_SPREAD), 854 init_once, NULL); 855 if (!rpc_inode_cachep) 856 return -ENOMEM; 857 err = register_filesystem(&rpc_pipe_fs_type); 858 if (err) { 859 kmem_cache_destroy(rpc_inode_cachep); 860 return err; 861 } 862 863 return 0; 864 } 865 866 void unregister_rpc_pipefs(void) 867 { 868 kmem_cache_destroy(rpc_inode_cachep); 869 unregister_filesystem(&rpc_pipe_fs_type); 870 } 871