xref: /openbmc/linux/net/sunrpc/rpc_pipe.c (revision c21b37f6)
1 /*
2  * net/sunrpc/rpc_pipe.c
3  *
4  * Userland/kernel interface for rpcauth_gss.
5  * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
6  * and fs/sysfs/inode.c
7  *
8  * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/dnotify.h>
18 #include <linux/kernel.h>
19 
20 #include <asm/ioctls.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
25 
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
29 
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
32 
33 static struct file_system_type rpc_pipe_fs_type;
34 
35 
36 static struct kmem_cache *rpc_inode_cachep __read_mostly;
37 
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
39 
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 		void (*destroy_msg)(struct rpc_pipe_msg *), int err)
42 {
43 	struct rpc_pipe_msg *msg;
44 
45 	if (list_empty(head))
46 		return;
47 	do {
48 		msg = list_entry(head->next, struct rpc_pipe_msg, list);
49 		list_del(&msg->list);
50 		msg->errno = err;
51 		destroy_msg(msg);
52 	} while (!list_empty(head));
53 	wake_up(&rpci->waitq);
54 }
55 
56 static void
57 rpc_timeout_upcall_queue(struct work_struct *work)
58 {
59 	LIST_HEAD(free_list);
60 	struct rpc_inode *rpci =
61 		container_of(work, struct rpc_inode, queue_timeout.work);
62 	struct inode *inode = &rpci->vfs_inode;
63 	void (*destroy_msg)(struct rpc_pipe_msg *);
64 
65 	spin_lock(&inode->i_lock);
66 	if (rpci->ops == NULL) {
67 		spin_unlock(&inode->i_lock);
68 		return;
69 	}
70 	destroy_msg = rpci->ops->destroy_msg;
71 	if (rpci->nreaders == 0) {
72 		list_splice_init(&rpci->pipe, &free_list);
73 		rpci->pipelen = 0;
74 	}
75 	spin_unlock(&inode->i_lock);
76 	rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
77 }
78 
79 int
80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
81 {
82 	struct rpc_inode *rpci = RPC_I(inode);
83 	int res = -EPIPE;
84 
85 	spin_lock(&inode->i_lock);
86 	if (rpci->ops == NULL)
87 		goto out;
88 	if (rpci->nreaders) {
89 		list_add_tail(&msg->list, &rpci->pipe);
90 		rpci->pipelen += msg->len;
91 		res = 0;
92 	} else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
93 		if (list_empty(&rpci->pipe))
94 			queue_delayed_work(rpciod_workqueue,
95 					&rpci->queue_timeout,
96 					RPC_UPCALL_TIMEOUT);
97 		list_add_tail(&msg->list, &rpci->pipe);
98 		rpci->pipelen += msg->len;
99 		res = 0;
100 	}
101 out:
102 	spin_unlock(&inode->i_lock);
103 	wake_up(&rpci->waitq);
104 	return res;
105 }
106 
107 static inline void
108 rpc_inode_setowner(struct inode *inode, void *private)
109 {
110 	RPC_I(inode)->private = private;
111 }
112 
113 static void
114 rpc_close_pipes(struct inode *inode)
115 {
116 	struct rpc_inode *rpci = RPC_I(inode);
117 	struct rpc_pipe_ops *ops;
118 
119 	mutex_lock(&inode->i_mutex);
120 	ops = rpci->ops;
121 	if (ops != NULL) {
122 		LIST_HEAD(free_list);
123 
124 		spin_lock(&inode->i_lock);
125 		rpci->nreaders = 0;
126 		list_splice_init(&rpci->in_upcall, &free_list);
127 		list_splice_init(&rpci->pipe, &free_list);
128 		rpci->pipelen = 0;
129 		rpci->ops = NULL;
130 		spin_unlock(&inode->i_lock);
131 		rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
132 		rpci->nwriters = 0;
133 		if (ops->release_pipe)
134 			ops->release_pipe(inode);
135 		cancel_delayed_work(&rpci->queue_timeout);
136 		flush_workqueue(rpciod_workqueue);
137 	}
138 	rpc_inode_setowner(inode, NULL);
139 	mutex_unlock(&inode->i_mutex);
140 }
141 
142 static struct inode *
143 rpc_alloc_inode(struct super_block *sb)
144 {
145 	struct rpc_inode *rpci;
146 	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
147 	if (!rpci)
148 		return NULL;
149 	return &rpci->vfs_inode;
150 }
151 
152 static void
153 rpc_destroy_inode(struct inode *inode)
154 {
155 	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
156 }
157 
158 static int
159 rpc_pipe_open(struct inode *inode, struct file *filp)
160 {
161 	struct rpc_inode *rpci = RPC_I(inode);
162 	int res = -ENXIO;
163 
164 	mutex_lock(&inode->i_mutex);
165 	if (rpci->ops != NULL) {
166 		if (filp->f_mode & FMODE_READ)
167 			rpci->nreaders ++;
168 		if (filp->f_mode & FMODE_WRITE)
169 			rpci->nwriters ++;
170 		res = 0;
171 	}
172 	mutex_unlock(&inode->i_mutex);
173 	return res;
174 }
175 
176 static int
177 rpc_pipe_release(struct inode *inode, struct file *filp)
178 {
179 	struct rpc_inode *rpci = RPC_I(inode);
180 	struct rpc_pipe_msg *msg;
181 
182 	mutex_lock(&inode->i_mutex);
183 	if (rpci->ops == NULL)
184 		goto out;
185 	msg = (struct rpc_pipe_msg *)filp->private_data;
186 	if (msg != NULL) {
187 		spin_lock(&inode->i_lock);
188 		msg->errno = -EAGAIN;
189 		list_del(&msg->list);
190 		spin_unlock(&inode->i_lock);
191 		rpci->ops->destroy_msg(msg);
192 	}
193 	if (filp->f_mode & FMODE_WRITE)
194 		rpci->nwriters --;
195 	if (filp->f_mode & FMODE_READ) {
196 		rpci->nreaders --;
197 		if (rpci->nreaders == 0) {
198 			LIST_HEAD(free_list);
199 			spin_lock(&inode->i_lock);
200 			list_splice_init(&rpci->pipe, &free_list);
201 			rpci->pipelen = 0;
202 			spin_unlock(&inode->i_lock);
203 			rpc_purge_list(rpci, &free_list,
204 					rpci->ops->destroy_msg, -EAGAIN);
205 		}
206 	}
207 	if (rpci->ops->release_pipe)
208 		rpci->ops->release_pipe(inode);
209 out:
210 	mutex_unlock(&inode->i_mutex);
211 	return 0;
212 }
213 
214 static ssize_t
215 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
216 {
217 	struct inode *inode = filp->f_path.dentry->d_inode;
218 	struct rpc_inode *rpci = RPC_I(inode);
219 	struct rpc_pipe_msg *msg;
220 	int res = 0;
221 
222 	mutex_lock(&inode->i_mutex);
223 	if (rpci->ops == NULL) {
224 		res = -EPIPE;
225 		goto out_unlock;
226 	}
227 	msg = filp->private_data;
228 	if (msg == NULL) {
229 		spin_lock(&inode->i_lock);
230 		if (!list_empty(&rpci->pipe)) {
231 			msg = list_entry(rpci->pipe.next,
232 					struct rpc_pipe_msg,
233 					list);
234 			list_move(&msg->list, &rpci->in_upcall);
235 			rpci->pipelen -= msg->len;
236 			filp->private_data = msg;
237 			msg->copied = 0;
238 		}
239 		spin_unlock(&inode->i_lock);
240 		if (msg == NULL)
241 			goto out_unlock;
242 	}
243 	/* NOTE: it is up to the callback to update msg->copied */
244 	res = rpci->ops->upcall(filp, msg, buf, len);
245 	if (res < 0 || msg->len == msg->copied) {
246 		filp->private_data = NULL;
247 		spin_lock(&inode->i_lock);
248 		list_del(&msg->list);
249 		spin_unlock(&inode->i_lock);
250 		rpci->ops->destroy_msg(msg);
251 	}
252 out_unlock:
253 	mutex_unlock(&inode->i_mutex);
254 	return res;
255 }
256 
257 static ssize_t
258 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
259 {
260 	struct inode *inode = filp->f_path.dentry->d_inode;
261 	struct rpc_inode *rpci = RPC_I(inode);
262 	int res;
263 
264 	mutex_lock(&inode->i_mutex);
265 	res = -EPIPE;
266 	if (rpci->ops != NULL)
267 		res = rpci->ops->downcall(filp, buf, len);
268 	mutex_unlock(&inode->i_mutex);
269 	return res;
270 }
271 
272 static unsigned int
273 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
274 {
275 	struct rpc_inode *rpci;
276 	unsigned int mask = 0;
277 
278 	rpci = RPC_I(filp->f_path.dentry->d_inode);
279 	poll_wait(filp, &rpci->waitq, wait);
280 
281 	mask = POLLOUT | POLLWRNORM;
282 	if (rpci->ops == NULL)
283 		mask |= POLLERR | POLLHUP;
284 	if (!list_empty(&rpci->pipe))
285 		mask |= POLLIN | POLLRDNORM;
286 	return mask;
287 }
288 
289 static int
290 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
291 		unsigned int cmd, unsigned long arg)
292 {
293 	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
294 	int len;
295 
296 	switch (cmd) {
297 	case FIONREAD:
298 		if (rpci->ops == NULL)
299 			return -EPIPE;
300 		len = rpci->pipelen;
301 		if (filp->private_data) {
302 			struct rpc_pipe_msg *msg;
303 			msg = (struct rpc_pipe_msg *)filp->private_data;
304 			len += msg->len - msg->copied;
305 		}
306 		return put_user(len, (int __user *)arg);
307 	default:
308 		return -EINVAL;
309 	}
310 }
311 
312 static const struct file_operations rpc_pipe_fops = {
313 	.owner		= THIS_MODULE,
314 	.llseek		= no_llseek,
315 	.read		= rpc_pipe_read,
316 	.write		= rpc_pipe_write,
317 	.poll		= rpc_pipe_poll,
318 	.ioctl		= rpc_pipe_ioctl,
319 	.open		= rpc_pipe_open,
320 	.release	= rpc_pipe_release,
321 };
322 
323 static int
324 rpc_show_info(struct seq_file *m, void *v)
325 {
326 	struct rpc_clnt *clnt = m->private;
327 
328 	seq_printf(m, "RPC server: %s\n", clnt->cl_server);
329 	seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
330 			clnt->cl_prog, clnt->cl_vers);
331 	seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
332 	seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
333 	return 0;
334 }
335 
336 static int
337 rpc_info_open(struct inode *inode, struct file *file)
338 {
339 	struct rpc_clnt *clnt;
340 	int ret = single_open(file, rpc_show_info, NULL);
341 
342 	if (!ret) {
343 		struct seq_file *m = file->private_data;
344 		mutex_lock(&inode->i_mutex);
345 		clnt = RPC_I(inode)->private;
346 		if (clnt) {
347 			kref_get(&clnt->cl_kref);
348 			m->private = clnt;
349 		} else {
350 			single_release(inode, file);
351 			ret = -EINVAL;
352 		}
353 		mutex_unlock(&inode->i_mutex);
354 	}
355 	return ret;
356 }
357 
358 static int
359 rpc_info_release(struct inode *inode, struct file *file)
360 {
361 	struct seq_file *m = file->private_data;
362 	struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
363 
364 	if (clnt)
365 		rpc_release_client(clnt);
366 	return single_release(inode, file);
367 }
368 
369 static const struct file_operations rpc_info_operations = {
370 	.owner		= THIS_MODULE,
371 	.open		= rpc_info_open,
372 	.read		= seq_read,
373 	.llseek		= seq_lseek,
374 	.release	= rpc_info_release,
375 };
376 
377 
378 /*
379  * We have a single directory with 1 node in it.
380  */
381 enum {
382 	RPCAUTH_Root = 1,
383 	RPCAUTH_lockd,
384 	RPCAUTH_mount,
385 	RPCAUTH_nfs,
386 	RPCAUTH_portmap,
387 	RPCAUTH_statd,
388 	RPCAUTH_RootEOF
389 };
390 
391 /*
392  * Description of fs contents.
393  */
394 struct rpc_filelist {
395 	char *name;
396 	const struct file_operations *i_fop;
397 	int mode;
398 };
399 
400 static struct rpc_filelist files[] = {
401 	[RPCAUTH_lockd] = {
402 		.name = "lockd",
403 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
404 	},
405 	[RPCAUTH_mount] = {
406 		.name = "mount",
407 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
408 	},
409 	[RPCAUTH_nfs] = {
410 		.name = "nfs",
411 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
412 	},
413 	[RPCAUTH_portmap] = {
414 		.name = "portmap",
415 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
416 	},
417 	[RPCAUTH_statd] = {
418 		.name = "statd",
419 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
420 	},
421 };
422 
423 enum {
424 	RPCAUTH_info = 2,
425 	RPCAUTH_EOF
426 };
427 
428 static struct rpc_filelist authfiles[] = {
429 	[RPCAUTH_info] = {
430 		.name = "info",
431 		.i_fop = &rpc_info_operations,
432 		.mode = S_IFREG | S_IRUSR,
433 	},
434 };
435 
436 struct vfsmount *rpc_get_mount(void)
437 {
438 	int err;
439 
440 	err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
441 	if (err != 0)
442 		return ERR_PTR(err);
443 	return rpc_mount;
444 }
445 
446 void rpc_put_mount(void)
447 {
448 	simple_release_fs(&rpc_mount, &rpc_mount_count);
449 }
450 
451 static int rpc_delete_dentry(struct dentry *dentry)
452 {
453 	return 1;
454 }
455 
456 static struct dentry_operations rpc_dentry_operations = {
457 	.d_delete = rpc_delete_dentry,
458 };
459 
460 static int
461 rpc_lookup_parent(char *path, struct nameidata *nd)
462 {
463 	struct vfsmount *mnt;
464 
465 	if (path[0] == '\0')
466 		return -ENOENT;
467 
468 	mnt = rpc_get_mount();
469 	if (IS_ERR(mnt)) {
470 		printk(KERN_WARNING "%s: %s failed to mount "
471 			       "pseudofilesystem \n", __FILE__, __FUNCTION__);
472 		return PTR_ERR(mnt);
473 	}
474 
475 	if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
476 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
477 				__FILE__, __FUNCTION__, path);
478 		rpc_put_mount();
479 		return -ENOENT;
480 	}
481 	return 0;
482 }
483 
484 static void
485 rpc_release_path(struct nameidata *nd)
486 {
487 	path_release(nd);
488 	rpc_put_mount();
489 }
490 
491 static struct inode *
492 rpc_get_inode(struct super_block *sb, int mode)
493 {
494 	struct inode *inode = new_inode(sb);
495 	if (!inode)
496 		return NULL;
497 	inode->i_mode = mode;
498 	inode->i_uid = inode->i_gid = 0;
499 	inode->i_blocks = 0;
500 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
501 	switch(mode & S_IFMT) {
502 		case S_IFDIR:
503 			inode->i_fop = &simple_dir_operations;
504 			inode->i_op = &simple_dir_inode_operations;
505 			inc_nlink(inode);
506 		default:
507 			break;
508 	}
509 	return inode;
510 }
511 
512 /*
513  * FIXME: This probably has races.
514  */
515 static void
516 rpc_depopulate(struct dentry *parent, int start, int eof)
517 {
518 	struct inode *dir = parent->d_inode;
519 	struct list_head *pos, *next;
520 	struct dentry *dentry, *dvec[10];
521 	int n = 0;
522 
523 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
524 repeat:
525 	spin_lock(&dcache_lock);
526 	list_for_each_safe(pos, next, &parent->d_subdirs) {
527 		dentry = list_entry(pos, struct dentry, d_u.d_child);
528 		if (!dentry->d_inode ||
529 				dentry->d_inode->i_ino < start ||
530 				dentry->d_inode->i_ino >= eof)
531 			continue;
532 		spin_lock(&dentry->d_lock);
533 		if (!d_unhashed(dentry)) {
534 			dget_locked(dentry);
535 			__d_drop(dentry);
536 			spin_unlock(&dentry->d_lock);
537 			dvec[n++] = dentry;
538 			if (n == ARRAY_SIZE(dvec))
539 				break;
540 		} else
541 			spin_unlock(&dentry->d_lock);
542 	}
543 	spin_unlock(&dcache_lock);
544 	if (n) {
545 		do {
546 			dentry = dvec[--n];
547 			if (S_ISREG(dentry->d_inode->i_mode))
548 				simple_unlink(dir, dentry);
549 			else if (S_ISDIR(dentry->d_inode->i_mode))
550 				simple_rmdir(dir, dentry);
551 			d_delete(dentry);
552 			dput(dentry);
553 		} while (n);
554 		goto repeat;
555 	}
556 	mutex_unlock(&dir->i_mutex);
557 }
558 
559 static int
560 rpc_populate(struct dentry *parent,
561 		struct rpc_filelist *files,
562 		int start, int eof)
563 {
564 	struct inode *inode, *dir = parent->d_inode;
565 	void *private = RPC_I(dir)->private;
566 	struct dentry *dentry;
567 	int mode, i;
568 
569 	mutex_lock(&dir->i_mutex);
570 	for (i = start; i < eof; i++) {
571 		dentry = d_alloc_name(parent, files[i].name);
572 		if (!dentry)
573 			goto out_bad;
574 		dentry->d_op = &rpc_dentry_operations;
575 		mode = files[i].mode;
576 		inode = rpc_get_inode(dir->i_sb, mode);
577 		if (!inode) {
578 			dput(dentry);
579 			goto out_bad;
580 		}
581 		inode->i_ino = i;
582 		if (files[i].i_fop)
583 			inode->i_fop = files[i].i_fop;
584 		if (private)
585 			rpc_inode_setowner(inode, private);
586 		if (S_ISDIR(mode))
587 			inc_nlink(dir);
588 		d_add(dentry, inode);
589 	}
590 	mutex_unlock(&dir->i_mutex);
591 	return 0;
592 out_bad:
593 	mutex_unlock(&dir->i_mutex);
594 	printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
595 			__FILE__, __FUNCTION__, parent->d_name.name);
596 	return -ENOMEM;
597 }
598 
599 static int
600 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
601 {
602 	struct inode *inode;
603 
604 	inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
605 	if (!inode)
606 		goto out_err;
607 	inode->i_ino = iunique(dir->i_sb, 100);
608 	d_instantiate(dentry, inode);
609 	inc_nlink(dir);
610 	inode_dir_notify(dir, DN_CREATE);
611 	return 0;
612 out_err:
613 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
614 			__FILE__, __FUNCTION__, dentry->d_name.name);
615 	return -ENOMEM;
616 }
617 
618 static int
619 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
620 {
621 	int error;
622 	error = simple_rmdir(dir, dentry);
623 	if (!error)
624 		d_delete(dentry);
625 	return error;
626 }
627 
628 static struct dentry *
629 rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive)
630 {
631 	struct inode *dir = parent->d_inode;
632 	struct dentry *dentry;
633 
634 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
635 	dentry = lookup_one_len(name, parent, len);
636 	if (IS_ERR(dentry))
637 		goto out_err;
638 	if (!dentry->d_inode)
639 		dentry->d_op = &rpc_dentry_operations;
640 	else if (exclusive) {
641 		dput(dentry);
642 		dentry = ERR_PTR(-EEXIST);
643 		goto out_err;
644 	}
645 	return dentry;
646 out_err:
647 	mutex_unlock(&dir->i_mutex);
648 	return dentry;
649 }
650 
651 static struct dentry *
652 rpc_lookup_negative(char *path, struct nameidata *nd)
653 {
654 	struct dentry *dentry;
655 	int error;
656 
657 	if ((error = rpc_lookup_parent(path, nd)) != 0)
658 		return ERR_PTR(error);
659 	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
660 	if (IS_ERR(dentry))
661 		rpc_release_path(nd);
662 	return dentry;
663 }
664 
665 
666 struct dentry *
667 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
668 {
669 	struct nameidata nd;
670 	struct dentry *dentry;
671 	struct inode *dir;
672 	int error;
673 
674 	dentry = rpc_lookup_negative(path, &nd);
675 	if (IS_ERR(dentry))
676 		return dentry;
677 	dir = nd.dentry->d_inode;
678 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
679 		goto err_dput;
680 	RPC_I(dentry->d_inode)->private = rpc_client;
681 	error = rpc_populate(dentry, authfiles,
682 			RPCAUTH_info, RPCAUTH_EOF);
683 	if (error)
684 		goto err_depopulate;
685 	dget(dentry);
686 out:
687 	mutex_unlock(&dir->i_mutex);
688 	rpc_release_path(&nd);
689 	return dentry;
690 err_depopulate:
691 	rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
692 	__rpc_rmdir(dir, dentry);
693 err_dput:
694 	dput(dentry);
695 	printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
696 			__FILE__, __FUNCTION__, path, error);
697 	dentry = ERR_PTR(error);
698 	goto out;
699 }
700 
701 int
702 rpc_rmdir(struct dentry *dentry)
703 {
704 	struct dentry *parent;
705 	struct inode *dir;
706 	int error;
707 
708 	parent = dget_parent(dentry);
709 	dir = parent->d_inode;
710 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
711 	rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
712 	error = __rpc_rmdir(dir, dentry);
713 	dput(dentry);
714 	mutex_unlock(&dir->i_mutex);
715 	dput(parent);
716 	return error;
717 }
718 
719 struct dentry *
720 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
721 {
722 	struct dentry *dentry;
723 	struct inode *dir, *inode;
724 	struct rpc_inode *rpci;
725 
726 	dentry = rpc_lookup_create(parent, name, strlen(name), 0);
727 	if (IS_ERR(dentry))
728 		return dentry;
729 	dir = parent->d_inode;
730 	if (dentry->d_inode) {
731 		rpci = RPC_I(dentry->d_inode);
732 		if (rpci->private != private ||
733 				rpci->ops != ops ||
734 				rpci->flags != flags) {
735 			dput (dentry);
736 			dentry = ERR_PTR(-EBUSY);
737 		}
738 		rpci->nkern_readwriters++;
739 		goto out;
740 	}
741 	inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
742 	if (!inode)
743 		goto err_dput;
744 	inode->i_ino = iunique(dir->i_sb, 100);
745 	inode->i_fop = &rpc_pipe_fops;
746 	d_instantiate(dentry, inode);
747 	rpci = RPC_I(inode);
748 	rpci->private = private;
749 	rpci->flags = flags;
750 	rpci->ops = ops;
751 	rpci->nkern_readwriters = 1;
752 	inode_dir_notify(dir, DN_CREATE);
753 	dget(dentry);
754 out:
755 	mutex_unlock(&dir->i_mutex);
756 	return dentry;
757 err_dput:
758 	dput(dentry);
759 	dentry = ERR_PTR(-ENOMEM);
760 	printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
761 			__FILE__, __FUNCTION__, parent->d_name.name, name,
762 			-ENOMEM);
763 	goto out;
764 }
765 
766 int
767 rpc_unlink(struct dentry *dentry)
768 {
769 	struct dentry *parent;
770 	struct inode *dir;
771 	int error = 0;
772 
773 	parent = dget_parent(dentry);
774 	dir = parent->d_inode;
775 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
776 	if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) {
777 		rpc_close_pipes(dentry->d_inode);
778 		error = simple_unlink(dir, dentry);
779 		if (!error)
780 			d_delete(dentry);
781 	}
782 	dput(dentry);
783 	mutex_unlock(&dir->i_mutex);
784 	dput(parent);
785 	return error;
786 }
787 
788 /*
789  * populate the filesystem
790  */
791 static struct super_operations s_ops = {
792 	.alloc_inode	= rpc_alloc_inode,
793 	.destroy_inode	= rpc_destroy_inode,
794 	.statfs		= simple_statfs,
795 };
796 
797 #define RPCAUTH_GSSMAGIC 0x67596969
798 
799 static int
800 rpc_fill_super(struct super_block *sb, void *data, int silent)
801 {
802 	struct inode *inode;
803 	struct dentry *root;
804 
805 	sb->s_blocksize = PAGE_CACHE_SIZE;
806 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
807 	sb->s_magic = RPCAUTH_GSSMAGIC;
808 	sb->s_op = &s_ops;
809 	sb->s_time_gran = 1;
810 
811 	inode = rpc_get_inode(sb, S_IFDIR | 0755);
812 	if (!inode)
813 		return -ENOMEM;
814 	root = d_alloc_root(inode);
815 	if (!root) {
816 		iput(inode);
817 		return -ENOMEM;
818 	}
819 	if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
820 		goto out;
821 	sb->s_root = root;
822 	return 0;
823 out:
824 	d_genocide(root);
825 	dput(root);
826 	return -ENOMEM;
827 }
828 
829 static int
830 rpc_get_sb(struct file_system_type *fs_type,
831 		int flags, const char *dev_name, void *data, struct vfsmount *mnt)
832 {
833 	return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
834 }
835 
836 static struct file_system_type rpc_pipe_fs_type = {
837 	.owner		= THIS_MODULE,
838 	.name		= "rpc_pipefs",
839 	.get_sb		= rpc_get_sb,
840 	.kill_sb	= kill_litter_super,
841 };
842 
843 static void
844 init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
845 {
846 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
847 
848 	inode_init_once(&rpci->vfs_inode);
849 	rpci->private = NULL;
850 	rpci->nreaders = 0;
851 	rpci->nwriters = 0;
852 	INIT_LIST_HEAD(&rpci->in_upcall);
853 	INIT_LIST_HEAD(&rpci->in_downcall);
854 	INIT_LIST_HEAD(&rpci->pipe);
855 	rpci->pipelen = 0;
856 	init_waitqueue_head(&rpci->waitq);
857 	INIT_DELAYED_WORK(&rpci->queue_timeout,
858 			    rpc_timeout_upcall_queue);
859 	rpci->ops = NULL;
860 }
861 
862 int register_rpc_pipefs(void)
863 {
864 	int err;
865 
866 	rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
867 				sizeof(struct rpc_inode),
868 				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
869 						SLAB_MEM_SPREAD),
870 				init_once);
871 	if (!rpc_inode_cachep)
872 		return -ENOMEM;
873 	err = register_filesystem(&rpc_pipe_fs_type);
874 	if (err) {
875 		kmem_cache_destroy(rpc_inode_cachep);
876 		return err;
877 	}
878 
879 	return 0;
880 }
881 
882 void unregister_rpc_pipefs(void)
883 {
884 	kmem_cache_destroy(rpc_inode_cachep);
885 	unregister_filesystem(&rpc_pipe_fs_type);
886 }
887