xref: /openbmc/linux/ipc/mqueue.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * POSIX message queues filesystem for Linux.
3  *
4  * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5  *                          Michal Wronski          (michal.wronski@gmail.com)
6  *
7  * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8  * Lockless receive & send, fd based notify:
9  * 			    Manfred Spraul	    (manfred@colorfullife.com)
10  *
11  * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12  *
13  * This file is released under the GPL.
14  */
15 
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/netlink.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/signal.h>
31 #include <linux/mutex.h>
32 
33 #include <net/sock.h>
34 #include "util.h"
35 
36 #define MQUEUE_MAGIC	0x19800202
37 #define DIRENT_SIZE	20
38 #define FILENT_SIZE	80
39 
40 #define SEND		0
41 #define RECV		1
42 
43 #define STATE_NONE	0
44 #define STATE_PENDING	1
45 #define STATE_READY	2
46 
47 /* used by sysctl */
48 #define FS_MQUEUE 	1
49 #define CTL_QUEUESMAX 	2
50 #define CTL_MSGMAX 	3
51 #define CTL_MSGSIZEMAX 	4
52 
53 /* default values */
54 #define DFLT_QUEUESMAX	256	/* max number of message queues */
55 #define DFLT_MSGMAX 	10	/* max number of messages in each queue */
56 #define HARD_MSGMAX 	(131072/sizeof(void*))
57 #define DFLT_MSGSIZEMAX 8192	/* max message size */
58 
59 
60 struct ext_wait_queue {		/* queue of sleeping tasks */
61 	struct task_struct *task;
62 	struct list_head list;
63 	struct msg_msg *msg;	/* ptr of loaded message */
64 	int state;		/* one of STATE_* values */
65 };
66 
67 struct mqueue_inode_info {
68 	spinlock_t lock;
69 	struct inode vfs_inode;
70 	wait_queue_head_t wait_q;
71 
72 	struct msg_msg **messages;
73 	struct mq_attr attr;
74 
75 	struct sigevent notify;
76 	struct pid* notify_owner;
77 	struct user_struct *user;	/* user who created, for accounting */
78 	struct sock *notify_sock;
79 	struct sk_buff *notify_cookie;
80 
81 	/* for tasks waiting for free space and messages, respectively */
82 	struct ext_wait_queue e_wait_q[2];
83 
84 	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
85 };
86 
87 static const struct inode_operations mqueue_dir_inode_operations;
88 static const struct file_operations mqueue_file_operations;
89 static struct super_operations mqueue_super_ops;
90 static void remove_notification(struct mqueue_inode_info *info);
91 
92 static spinlock_t mq_lock;
93 static struct kmem_cache *mqueue_inode_cachep;
94 static struct vfsmount *mqueue_mnt;
95 
96 static unsigned int queues_count;
97 static unsigned int queues_max 	= DFLT_QUEUESMAX;
98 static unsigned int msg_max 	= DFLT_MSGMAX;
99 static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
100 
101 static struct ctl_table_header * mq_sysctl_table;
102 
103 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
104 {
105 	return container_of(inode, struct mqueue_inode_info, vfs_inode);
106 }
107 
108 static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
109 							struct mq_attr *attr)
110 {
111 	struct inode *inode;
112 
113 	inode = new_inode(sb);
114 	if (inode) {
115 		inode->i_mode = mode;
116 		inode->i_uid = current->fsuid;
117 		inode->i_gid = current->fsgid;
118 		inode->i_blocks = 0;
119 		inode->i_mtime = inode->i_ctime = inode->i_atime =
120 				CURRENT_TIME;
121 
122 		if (S_ISREG(mode)) {
123 			struct mqueue_inode_info *info;
124 			struct task_struct *p = current;
125 			struct user_struct *u = p->user;
126 			unsigned long mq_bytes, mq_msg_tblsz;
127 
128 			inode->i_fop = &mqueue_file_operations;
129 			inode->i_size = FILENT_SIZE;
130 			/* mqueue specific info */
131 			info = MQUEUE_I(inode);
132 			spin_lock_init(&info->lock);
133 			init_waitqueue_head(&info->wait_q);
134 			INIT_LIST_HEAD(&info->e_wait_q[0].list);
135 			INIT_LIST_HEAD(&info->e_wait_q[1].list);
136 			info->messages = NULL;
137 			info->notify_owner = NULL;
138 			info->qsize = 0;
139 			info->user = NULL;	/* set when all is ok */
140 			memset(&info->attr, 0, sizeof(info->attr));
141 			info->attr.mq_maxmsg = DFLT_MSGMAX;
142 			info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
143 			if (attr) {
144 				info->attr.mq_maxmsg = attr->mq_maxmsg;
145 				info->attr.mq_msgsize = attr->mq_msgsize;
146 			}
147 			mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
148 			mq_bytes = (mq_msg_tblsz +
149 				(info->attr.mq_maxmsg * info->attr.mq_msgsize));
150 
151 			spin_lock(&mq_lock);
152 			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
153 		 	    u->mq_bytes + mq_bytes >
154 			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
155 				spin_unlock(&mq_lock);
156 				goto out_inode;
157 			}
158 			u->mq_bytes += mq_bytes;
159 			spin_unlock(&mq_lock);
160 
161 			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
162 			if (!info->messages) {
163 				spin_lock(&mq_lock);
164 				u->mq_bytes -= mq_bytes;
165 				spin_unlock(&mq_lock);
166 				goto out_inode;
167 			}
168 			/* all is ok */
169 			info->user = get_uid(u);
170 		} else if (S_ISDIR(mode)) {
171 			inc_nlink(inode);
172 			/* Some things misbehave if size == 0 on a directory */
173 			inode->i_size = 2 * DIRENT_SIZE;
174 			inode->i_op = &mqueue_dir_inode_operations;
175 			inode->i_fop = &simple_dir_operations;
176 		}
177 	}
178 	return inode;
179 out_inode:
180 	make_bad_inode(inode);
181 	iput(inode);
182 	return NULL;
183 }
184 
185 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
186 {
187 	struct inode *inode;
188 
189 	sb->s_blocksize = PAGE_CACHE_SIZE;
190 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
191 	sb->s_magic = MQUEUE_MAGIC;
192 	sb->s_op = &mqueue_super_ops;
193 
194 	inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
195 	if (!inode)
196 		return -ENOMEM;
197 
198 	sb->s_root = d_alloc_root(inode);
199 	if (!sb->s_root) {
200 		iput(inode);
201 		return -ENOMEM;
202 	}
203 
204 	return 0;
205 }
206 
207 static int mqueue_get_sb(struct file_system_type *fs_type,
208 			 int flags, const char *dev_name,
209 			 void *data, struct vfsmount *mnt)
210 {
211 	return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
212 }
213 
214 static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
215 {
216 	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
217 
218 	if (flags & SLAB_CTOR_CONSTRUCTOR)
219 		inode_init_once(&p->vfs_inode);
220 }
221 
222 static struct inode *mqueue_alloc_inode(struct super_block *sb)
223 {
224 	struct mqueue_inode_info *ei;
225 
226 	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
227 	if (!ei)
228 		return NULL;
229 	return &ei->vfs_inode;
230 }
231 
232 static void mqueue_destroy_inode(struct inode *inode)
233 {
234 	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
235 }
236 
237 static void mqueue_delete_inode(struct inode *inode)
238 {
239 	struct mqueue_inode_info *info;
240 	struct user_struct *user;
241 	unsigned long mq_bytes;
242 	int i;
243 
244 	if (S_ISDIR(inode->i_mode)) {
245 		clear_inode(inode);
246 		return;
247 	}
248 	info = MQUEUE_I(inode);
249 	spin_lock(&info->lock);
250 	for (i = 0; i < info->attr.mq_curmsgs; i++)
251 		free_msg(info->messages[i]);
252 	kfree(info->messages);
253 	spin_unlock(&info->lock);
254 
255 	clear_inode(inode);
256 
257 	mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
258 		   (info->attr.mq_maxmsg * info->attr.mq_msgsize));
259 	user = info->user;
260 	if (user) {
261 		spin_lock(&mq_lock);
262 		user->mq_bytes -= mq_bytes;
263 		queues_count--;
264 		spin_unlock(&mq_lock);
265 		free_uid(user);
266 	}
267 }
268 
269 static int mqueue_create(struct inode *dir, struct dentry *dentry,
270 				int mode, struct nameidata *nd)
271 {
272 	struct inode *inode;
273 	struct mq_attr *attr = dentry->d_fsdata;
274 	int error;
275 
276 	spin_lock(&mq_lock);
277 	if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
278 		error = -ENOSPC;
279 		goto out_lock;
280 	}
281 	queues_count++;
282 	spin_unlock(&mq_lock);
283 
284 	inode = mqueue_get_inode(dir->i_sb, mode, attr);
285 	if (!inode) {
286 		error = -ENOMEM;
287 		spin_lock(&mq_lock);
288 		queues_count--;
289 		goto out_lock;
290 	}
291 
292 	dir->i_size += DIRENT_SIZE;
293 	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
294 
295 	d_instantiate(dentry, inode);
296 	dget(dentry);
297 	return 0;
298 out_lock:
299 	spin_unlock(&mq_lock);
300 	return error;
301 }
302 
303 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
304 {
305   	struct inode *inode = dentry->d_inode;
306 
307 	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
308 	dir->i_size -= DIRENT_SIZE;
309   	drop_nlink(inode);
310   	dput(dentry);
311   	return 0;
312 }
313 
314 /*
315 *	This is routine for system read from queue file.
316 *	To avoid mess with doing here some sort of mq_receive we allow
317 *	to read only queue size & notification info (the only values
318 *	that are interesting from user point of view and aren't accessible
319 *	through std routines)
320 */
321 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
322 				size_t count, loff_t * off)
323 {
324 	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
325 	char buffer[FILENT_SIZE];
326 	size_t slen;
327 	loff_t o;
328 
329 	if (!count)
330 		return 0;
331 
332 	spin_lock(&info->lock);
333 	snprintf(buffer, sizeof(buffer),
334 			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
335 			info->qsize,
336 			info->notify_owner ? info->notify.sigev_notify : 0,
337 			(info->notify_owner &&
338 			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
339 				info->notify.sigev_signo : 0,
340 			pid_nr(info->notify_owner));
341 	spin_unlock(&info->lock);
342 	buffer[sizeof(buffer)-1] = '\0';
343 	slen = strlen(buffer)+1;
344 
345 	o = *off;
346 	if (o > slen)
347 		return 0;
348 
349 	if (o + count > slen)
350 		count = slen - o;
351 
352 	if (copy_to_user(u_data, buffer + o, count))
353 		return -EFAULT;
354 
355 	*off = o + count;
356 	filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
357 	return count;
358 }
359 
360 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
361 {
362 	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
363 
364 	spin_lock(&info->lock);
365 	if (task_tgid(current) == info->notify_owner)
366 		remove_notification(info);
367 
368 	spin_unlock(&info->lock);
369 	return 0;
370 }
371 
372 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
373 {
374 	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
375 	int retval = 0;
376 
377 	poll_wait(filp, &info->wait_q, poll_tab);
378 
379 	spin_lock(&info->lock);
380 	if (info->attr.mq_curmsgs)
381 		retval = POLLIN | POLLRDNORM;
382 
383 	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
384 		retval |= POLLOUT | POLLWRNORM;
385 	spin_unlock(&info->lock);
386 
387 	return retval;
388 }
389 
390 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
391 static void wq_add(struct mqueue_inode_info *info, int sr,
392 			struct ext_wait_queue *ewp)
393 {
394 	struct ext_wait_queue *walk;
395 
396 	ewp->task = current;
397 
398 	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
399 		if (walk->task->static_prio <= current->static_prio) {
400 			list_add_tail(&ewp->list, &walk->list);
401 			return;
402 		}
403 	}
404 	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
405 }
406 
407 /*
408  * Puts current task to sleep. Caller must hold queue lock. After return
409  * lock isn't held.
410  * sr: SEND or RECV
411  */
412 static int wq_sleep(struct mqueue_inode_info *info, int sr,
413 			long timeout, struct ext_wait_queue *ewp)
414 {
415 	int retval;
416 	signed long time;
417 
418 	wq_add(info, sr, ewp);
419 
420 	for (;;) {
421 		set_current_state(TASK_INTERRUPTIBLE);
422 
423 		spin_unlock(&info->lock);
424 		time = schedule_timeout(timeout);
425 
426 		while (ewp->state == STATE_PENDING)
427 			cpu_relax();
428 
429 		if (ewp->state == STATE_READY) {
430 			retval = 0;
431 			goto out;
432 		}
433 		spin_lock(&info->lock);
434 		if (ewp->state == STATE_READY) {
435 			retval = 0;
436 			goto out_unlock;
437 		}
438 		if (signal_pending(current)) {
439 			retval = -ERESTARTSYS;
440 			break;
441 		}
442 		if (time == 0) {
443 			retval = -ETIMEDOUT;
444 			break;
445 		}
446 	}
447 	list_del(&ewp->list);
448 out_unlock:
449 	spin_unlock(&info->lock);
450 out:
451 	return retval;
452 }
453 
454 /*
455  * Returns waiting task that should be serviced first or NULL if none exists
456  */
457 static struct ext_wait_queue *wq_get_first_waiter(
458 		struct mqueue_inode_info *info, int sr)
459 {
460 	struct list_head *ptr;
461 
462 	ptr = info->e_wait_q[sr].list.prev;
463 	if (ptr == &info->e_wait_q[sr].list)
464 		return NULL;
465 	return list_entry(ptr, struct ext_wait_queue, list);
466 }
467 
468 /* Auxiliary functions to manipulate messages' list */
469 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
470 {
471 	int k;
472 
473 	k = info->attr.mq_curmsgs - 1;
474 	while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
475 		info->messages[k + 1] = info->messages[k];
476 		k--;
477 	}
478 	info->attr.mq_curmsgs++;
479 	info->qsize += ptr->m_ts;
480 	info->messages[k + 1] = ptr;
481 }
482 
483 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
484 {
485 	info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
486 	return info->messages[info->attr.mq_curmsgs];
487 }
488 
489 static inline void set_cookie(struct sk_buff *skb, char code)
490 {
491 	((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
492 }
493 
494 /*
495  * The next function is only to split too long sys_mq_timedsend
496  */
497 static void __do_notify(struct mqueue_inode_info *info)
498 {
499 	/* notification
500 	 * invoked when there is registered process and there isn't process
501 	 * waiting synchronously for message AND state of queue changed from
502 	 * empty to not empty. Here we are sure that no one is waiting
503 	 * synchronously. */
504 	if (info->notify_owner &&
505 	    info->attr.mq_curmsgs == 1) {
506 		struct siginfo sig_i;
507 		switch (info->notify.sigev_notify) {
508 		case SIGEV_NONE:
509 			break;
510 		case SIGEV_SIGNAL:
511 			/* sends signal */
512 
513 			sig_i.si_signo = info->notify.sigev_signo;
514 			sig_i.si_errno = 0;
515 			sig_i.si_code = SI_MESGQ;
516 			sig_i.si_value = info->notify.sigev_value;
517 			sig_i.si_pid = current->tgid;
518 			sig_i.si_uid = current->uid;
519 
520 			kill_pid_info(info->notify.sigev_signo,
521 				      &sig_i, info->notify_owner);
522 			break;
523 		case SIGEV_THREAD:
524 			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
525 			netlink_sendskb(info->notify_sock,
526 					info->notify_cookie, 0);
527 			break;
528 		}
529 		/* after notification unregisters process */
530 		put_pid(info->notify_owner);
531 		info->notify_owner = NULL;
532 	}
533 	wake_up(&info->wait_q);
534 }
535 
536 static long prepare_timeout(const struct timespec __user *u_arg)
537 {
538 	struct timespec ts, nowts;
539 	long timeout;
540 
541 	if (u_arg) {
542 		if (unlikely(copy_from_user(&ts, u_arg,
543 					sizeof(struct timespec))))
544 			return -EFAULT;
545 
546 		if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
547 			|| ts.tv_nsec >= NSEC_PER_SEC))
548 			return -EINVAL;
549 		nowts = CURRENT_TIME;
550 		/* first subtract as jiffies can't be too big */
551 		ts.tv_sec -= nowts.tv_sec;
552 		if (ts.tv_nsec < nowts.tv_nsec) {
553 			ts.tv_nsec += NSEC_PER_SEC;
554 			ts.tv_sec--;
555 		}
556 		ts.tv_nsec -= nowts.tv_nsec;
557 		if (ts.tv_sec < 0)
558 			return 0;
559 
560 		timeout = timespec_to_jiffies(&ts) + 1;
561 	} else
562 		return MAX_SCHEDULE_TIMEOUT;
563 
564 	return timeout;
565 }
566 
567 static void remove_notification(struct mqueue_inode_info *info)
568 {
569 	if (info->notify_owner != NULL &&
570 	    info->notify.sigev_notify == SIGEV_THREAD) {
571 		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
572 		netlink_sendskb(info->notify_sock, info->notify_cookie, 0);
573 	}
574 	put_pid(info->notify_owner);
575 	info->notify_owner = NULL;
576 }
577 
578 static int mq_attr_ok(struct mq_attr *attr)
579 {
580 	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
581 		return 0;
582 	if (capable(CAP_SYS_RESOURCE)) {
583 		if (attr->mq_maxmsg > HARD_MSGMAX)
584 			return 0;
585 	} else {
586 		if (attr->mq_maxmsg > msg_max ||
587 				attr->mq_msgsize > msgsize_max)
588 			return 0;
589 	}
590 	/* check for overflow */
591 	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
592 		return 0;
593 	if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
594 	    (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
595 	    (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
596 		return 0;
597 	return 1;
598 }
599 
600 /*
601  * Invoked when creating a new queue via sys_mq_open
602  */
603 static struct file *do_create(struct dentry *dir, struct dentry *dentry,
604 			int oflag, mode_t mode, struct mq_attr __user *u_attr)
605 {
606 	struct mq_attr attr;
607 	int ret;
608 
609 	if (u_attr) {
610 		ret = -EFAULT;
611 		if (copy_from_user(&attr, u_attr, sizeof(attr)))
612 			goto out;
613 		ret = -EINVAL;
614 		if (!mq_attr_ok(&attr))
615 			goto out;
616 		/* store for use during create */
617 		dentry->d_fsdata = &attr;
618 	}
619 
620 	mode &= ~current->fs->umask;
621 	ret = vfs_create(dir->d_inode, dentry, mode, NULL);
622 	dentry->d_fsdata = NULL;
623 	if (ret)
624 		goto out;
625 
626 	return dentry_open(dentry, mqueue_mnt, oflag);
627 
628 out:
629 	dput(dentry);
630 	mntput(mqueue_mnt);
631 	return ERR_PTR(ret);
632 }
633 
634 /* Opens existing queue */
635 static struct file *do_open(struct dentry *dentry, int oflag)
636 {
637 static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
638 					MAY_READ | MAY_WRITE };
639 
640 	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
641 		dput(dentry);
642 		mntput(mqueue_mnt);
643 		return ERR_PTR(-EINVAL);
644 	}
645 
646 	if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) {
647 		dput(dentry);
648 		mntput(mqueue_mnt);
649 		return ERR_PTR(-EACCES);
650 	}
651 
652 	return dentry_open(dentry, mqueue_mnt, oflag);
653 }
654 
655 asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
656 				struct mq_attr __user *u_attr)
657 {
658 	struct dentry *dentry;
659 	struct file *filp;
660 	char *name;
661 	int fd, error;
662 
663 	error = audit_mq_open(oflag, mode, u_attr);
664 	if (error != 0)
665 		return error;
666 
667 	if (IS_ERR(name = getname(u_name)))
668 		return PTR_ERR(name);
669 
670 	fd = get_unused_fd();
671 	if (fd < 0)
672 		goto out_putname;
673 
674 	mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
675 	dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
676 	if (IS_ERR(dentry)) {
677 		error = PTR_ERR(dentry);
678 		goto out_err;
679 	}
680 	mntget(mqueue_mnt);
681 
682 	if (oflag & O_CREAT) {
683 		if (dentry->d_inode) {	/* entry already exists */
684 			audit_inode(name, dentry->d_inode);
685 			error = -EEXIST;
686 			if (oflag & O_EXCL)
687 				goto out;
688 			filp = do_open(dentry, oflag);
689 		} else {
690 			filp = do_create(mqueue_mnt->mnt_root, dentry,
691 						oflag, mode, u_attr);
692 		}
693 	} else {
694 		error = -ENOENT;
695 		if (!dentry->d_inode)
696 			goto out;
697 		audit_inode(name, dentry->d_inode);
698 		filp = do_open(dentry, oflag);
699 	}
700 
701 	if (IS_ERR(filp)) {
702 		error = PTR_ERR(filp);
703 		goto out_putfd;
704 	}
705 
706 	set_close_on_exec(fd, 1);
707 	fd_install(fd, filp);
708 	goto out_upsem;
709 
710 out:
711 	dput(dentry);
712 	mntput(mqueue_mnt);
713 out_putfd:
714 	put_unused_fd(fd);
715 out_err:
716 	fd = error;
717 out_upsem:
718 	mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
719 out_putname:
720 	putname(name);
721 	return fd;
722 }
723 
724 asmlinkage long sys_mq_unlink(const char __user *u_name)
725 {
726 	int err;
727 	char *name;
728 	struct dentry *dentry;
729 	struct inode *inode = NULL;
730 
731 	name = getname(u_name);
732 	if (IS_ERR(name))
733 		return PTR_ERR(name);
734 
735 	mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
736 			I_MUTEX_PARENT);
737 	dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
738 	if (IS_ERR(dentry)) {
739 		err = PTR_ERR(dentry);
740 		goto out_unlock;
741 	}
742 
743 	if (!dentry->d_inode) {
744 		err = -ENOENT;
745 		goto out_err;
746 	}
747 
748 	inode = dentry->d_inode;
749 	if (inode)
750 		atomic_inc(&inode->i_count);
751 
752 	err = vfs_unlink(dentry->d_parent->d_inode, dentry);
753 out_err:
754 	dput(dentry);
755 
756 out_unlock:
757 	mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
758 	putname(name);
759 	if (inode)
760 		iput(inode);
761 
762 	return err;
763 }
764 
765 /* Pipelined send and receive functions.
766  *
767  * If a receiver finds no waiting message, then it registers itself in the
768  * list of waiting receivers. A sender checks that list before adding the new
769  * message into the message array. If there is a waiting receiver, then it
770  * bypasses the message array and directly hands the message over to the
771  * receiver.
772  * The receiver accepts the message and returns without grabbing the queue
773  * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
774  * are necessary. The same algorithm is used for sysv semaphores, see
775  * ipc/sem.c for more details.
776  *
777  * The same algorithm is used for senders.
778  */
779 
780 /* pipelined_send() - send a message directly to the task waiting in
781  * sys_mq_timedreceive() (without inserting message into a queue).
782  */
783 static inline void pipelined_send(struct mqueue_inode_info *info,
784 				  struct msg_msg *message,
785 				  struct ext_wait_queue *receiver)
786 {
787 	receiver->msg = message;
788 	list_del(&receiver->list);
789 	receiver->state = STATE_PENDING;
790 	wake_up_process(receiver->task);
791 	smp_wmb();
792 	receiver->state = STATE_READY;
793 }
794 
795 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
796  * gets its message and put to the queue (we have one free place for sure). */
797 static inline void pipelined_receive(struct mqueue_inode_info *info)
798 {
799 	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
800 
801 	if (!sender) {
802 		/* for poll */
803 		wake_up_interruptible(&info->wait_q);
804 		return;
805 	}
806 	msg_insert(sender->msg, info);
807 	list_del(&sender->list);
808 	sender->state = STATE_PENDING;
809 	wake_up_process(sender->task);
810 	smp_wmb();
811 	sender->state = STATE_READY;
812 }
813 
814 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
815 	size_t msg_len, unsigned int msg_prio,
816 	const struct timespec __user *u_abs_timeout)
817 {
818 	struct file *filp;
819 	struct inode *inode;
820 	struct ext_wait_queue wait;
821 	struct ext_wait_queue *receiver;
822 	struct msg_msg *msg_ptr;
823 	struct mqueue_inode_info *info;
824 	long timeout;
825 	int ret;
826 
827 	ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
828 	if (ret != 0)
829 		return ret;
830 
831 	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
832 		return -EINVAL;
833 
834 	timeout = prepare_timeout(u_abs_timeout);
835 
836 	ret = -EBADF;
837 	filp = fget(mqdes);
838 	if (unlikely(!filp))
839 		goto out;
840 
841 	inode = filp->f_path.dentry->d_inode;
842 	if (unlikely(filp->f_op != &mqueue_file_operations))
843 		goto out_fput;
844 	info = MQUEUE_I(inode);
845 	audit_inode(NULL, inode);
846 
847 	if (unlikely(!(filp->f_mode & FMODE_WRITE)))
848 		goto out_fput;
849 
850 	if (unlikely(msg_len > info->attr.mq_msgsize)) {
851 		ret = -EMSGSIZE;
852 		goto out_fput;
853 	}
854 
855 	/* First try to allocate memory, before doing anything with
856 	 * existing queues. */
857 	msg_ptr = load_msg(u_msg_ptr, msg_len);
858 	if (IS_ERR(msg_ptr)) {
859 		ret = PTR_ERR(msg_ptr);
860 		goto out_fput;
861 	}
862 	msg_ptr->m_ts = msg_len;
863 	msg_ptr->m_type = msg_prio;
864 
865 	spin_lock(&info->lock);
866 
867 	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
868 		if (filp->f_flags & O_NONBLOCK) {
869 			spin_unlock(&info->lock);
870 			ret = -EAGAIN;
871 		} else if (unlikely(timeout < 0)) {
872 			spin_unlock(&info->lock);
873 			ret = timeout;
874 		} else {
875 			wait.task = current;
876 			wait.msg = (void *) msg_ptr;
877 			wait.state = STATE_NONE;
878 			ret = wq_sleep(info, SEND, timeout, &wait);
879 		}
880 		if (ret < 0)
881 			free_msg(msg_ptr);
882 	} else {
883 		receiver = wq_get_first_waiter(info, RECV);
884 		if (receiver) {
885 			pipelined_send(info, msg_ptr, receiver);
886 		} else {
887 			/* adds message to the queue */
888 			msg_insert(msg_ptr, info);
889 			__do_notify(info);
890 		}
891 		inode->i_atime = inode->i_mtime = inode->i_ctime =
892 				CURRENT_TIME;
893 		spin_unlock(&info->lock);
894 		ret = 0;
895 	}
896 out_fput:
897 	fput(filp);
898 out:
899 	return ret;
900 }
901 
902 asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
903 	size_t msg_len, unsigned int __user *u_msg_prio,
904 	const struct timespec __user *u_abs_timeout)
905 {
906 	long timeout;
907 	ssize_t ret;
908 	struct msg_msg *msg_ptr;
909 	struct file *filp;
910 	struct inode *inode;
911 	struct mqueue_inode_info *info;
912 	struct ext_wait_queue wait;
913 
914 	ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
915 	if (ret != 0)
916 		return ret;
917 
918 	timeout = prepare_timeout(u_abs_timeout);
919 
920 	ret = -EBADF;
921 	filp = fget(mqdes);
922 	if (unlikely(!filp))
923 		goto out;
924 
925 	inode = filp->f_path.dentry->d_inode;
926 	if (unlikely(filp->f_op != &mqueue_file_operations))
927 		goto out_fput;
928 	info = MQUEUE_I(inode);
929 	audit_inode(NULL, inode);
930 
931 	if (unlikely(!(filp->f_mode & FMODE_READ)))
932 		goto out_fput;
933 
934 	/* checks if buffer is big enough */
935 	if (unlikely(msg_len < info->attr.mq_msgsize)) {
936 		ret = -EMSGSIZE;
937 		goto out_fput;
938 	}
939 
940 	spin_lock(&info->lock);
941 	if (info->attr.mq_curmsgs == 0) {
942 		if (filp->f_flags & O_NONBLOCK) {
943 			spin_unlock(&info->lock);
944 			ret = -EAGAIN;
945 			msg_ptr = NULL;
946 		} else if (unlikely(timeout < 0)) {
947 			spin_unlock(&info->lock);
948 			ret = timeout;
949 			msg_ptr = NULL;
950 		} else {
951 			wait.task = current;
952 			wait.state = STATE_NONE;
953 			ret = wq_sleep(info, RECV, timeout, &wait);
954 			msg_ptr = wait.msg;
955 		}
956 	} else {
957 		msg_ptr = msg_get(info);
958 
959 		inode->i_atime = inode->i_mtime = inode->i_ctime =
960 				CURRENT_TIME;
961 
962 		/* There is now free space in queue. */
963 		pipelined_receive(info);
964 		spin_unlock(&info->lock);
965 		ret = 0;
966 	}
967 	if (ret == 0) {
968 		ret = msg_ptr->m_ts;
969 
970 		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
971 			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
972 			ret = -EFAULT;
973 		}
974 		free_msg(msg_ptr);
975 	}
976 out_fput:
977 	fput(filp);
978 out:
979 	return ret;
980 }
981 
982 /*
983  * Notes: the case when user wants us to deregister (with NULL as pointer)
984  * and he isn't currently owner of notification, will be silently discarded.
985  * It isn't explicitly defined in the POSIX.
986  */
987 asmlinkage long sys_mq_notify(mqd_t mqdes,
988 				const struct sigevent __user *u_notification)
989 {
990 	int ret;
991 	struct file *filp;
992 	struct sock *sock;
993 	struct inode *inode;
994 	struct sigevent notification;
995 	struct mqueue_inode_info *info;
996 	struct sk_buff *nc;
997 
998 	ret = audit_mq_notify(mqdes, u_notification);
999 	if (ret != 0)
1000 		return ret;
1001 
1002 	nc = NULL;
1003 	sock = NULL;
1004 	if (u_notification != NULL) {
1005 		if (copy_from_user(&notification, u_notification,
1006 					sizeof(struct sigevent)))
1007 			return -EFAULT;
1008 
1009 		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1010 			     notification.sigev_notify != SIGEV_SIGNAL &&
1011 			     notification.sigev_notify != SIGEV_THREAD))
1012 			return -EINVAL;
1013 		if (notification.sigev_notify == SIGEV_SIGNAL &&
1014 			!valid_signal(notification.sigev_signo)) {
1015 			return -EINVAL;
1016 		}
1017 		if (notification.sigev_notify == SIGEV_THREAD) {
1018 			/* create the notify skb */
1019 			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1020 			ret = -ENOMEM;
1021 			if (!nc)
1022 				goto out;
1023 			ret = -EFAULT;
1024 			if (copy_from_user(nc->data,
1025 					notification.sigev_value.sival_ptr,
1026 					NOTIFY_COOKIE_LEN)) {
1027 				goto out;
1028 			}
1029 
1030 			/* TODO: add a header? */
1031 			skb_put(nc, NOTIFY_COOKIE_LEN);
1032 			/* and attach it to the socket */
1033 retry:
1034 			filp = fget(notification.sigev_signo);
1035 			ret = -EBADF;
1036 			if (!filp)
1037 				goto out;
1038 			sock = netlink_getsockbyfilp(filp);
1039 			fput(filp);
1040 			if (IS_ERR(sock)) {
1041 				ret = PTR_ERR(sock);
1042 				sock = NULL;
1043 				goto out;
1044 			}
1045 
1046 			ret = netlink_attachskb(sock, nc, 0,
1047 					MAX_SCHEDULE_TIMEOUT, NULL);
1048 			if (ret == 1)
1049 		       		goto retry;
1050 			if (ret) {
1051 				sock = NULL;
1052 				nc = NULL;
1053 				goto out;
1054 			}
1055 		}
1056 	}
1057 
1058 	ret = -EBADF;
1059 	filp = fget(mqdes);
1060 	if (!filp)
1061 		goto out;
1062 
1063 	inode = filp->f_path.dentry->d_inode;
1064 	if (unlikely(filp->f_op != &mqueue_file_operations))
1065 		goto out_fput;
1066 	info = MQUEUE_I(inode);
1067 
1068 	ret = 0;
1069 	spin_lock(&info->lock);
1070 	if (u_notification == NULL) {
1071 		if (info->notify_owner == task_tgid(current)) {
1072 			remove_notification(info);
1073 			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1074 		}
1075 	} else if (info->notify_owner != NULL) {
1076 		ret = -EBUSY;
1077 	} else {
1078 		switch (notification.sigev_notify) {
1079 		case SIGEV_NONE:
1080 			info->notify.sigev_notify = SIGEV_NONE;
1081 			break;
1082 		case SIGEV_THREAD:
1083 			info->notify_sock = sock;
1084 			info->notify_cookie = nc;
1085 			sock = NULL;
1086 			nc = NULL;
1087 			info->notify.sigev_notify = SIGEV_THREAD;
1088 			break;
1089 		case SIGEV_SIGNAL:
1090 			info->notify.sigev_signo = notification.sigev_signo;
1091 			info->notify.sigev_value = notification.sigev_value;
1092 			info->notify.sigev_notify = SIGEV_SIGNAL;
1093 			break;
1094 		}
1095 
1096 		info->notify_owner = get_pid(task_tgid(current));
1097 		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1098 	}
1099 	spin_unlock(&info->lock);
1100 out_fput:
1101 	fput(filp);
1102 out:
1103 	if (sock) {
1104 		netlink_detachskb(sock, nc);
1105 	} else if (nc) {
1106 		dev_kfree_skb(nc);
1107 	}
1108 	return ret;
1109 }
1110 
1111 asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
1112 			const struct mq_attr __user *u_mqstat,
1113 			struct mq_attr __user *u_omqstat)
1114 {
1115 	int ret;
1116 	struct mq_attr mqstat, omqstat;
1117 	struct file *filp;
1118 	struct inode *inode;
1119 	struct mqueue_inode_info *info;
1120 
1121 	if (u_mqstat != NULL) {
1122 		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1123 			return -EFAULT;
1124 		if (mqstat.mq_flags & (~O_NONBLOCK))
1125 			return -EINVAL;
1126 	}
1127 
1128 	ret = -EBADF;
1129 	filp = fget(mqdes);
1130 	if (!filp)
1131 		goto out;
1132 
1133 	inode = filp->f_path.dentry->d_inode;
1134 	if (unlikely(filp->f_op != &mqueue_file_operations))
1135 		goto out_fput;
1136 	info = MQUEUE_I(inode);
1137 
1138 	spin_lock(&info->lock);
1139 
1140 	omqstat = info->attr;
1141 	omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1142 	if (u_mqstat) {
1143 		ret = audit_mq_getsetattr(mqdes, &mqstat);
1144 		if (ret != 0)
1145 			goto out;
1146 		if (mqstat.mq_flags & O_NONBLOCK)
1147 			filp->f_flags |= O_NONBLOCK;
1148 		else
1149 			filp->f_flags &= ~O_NONBLOCK;
1150 
1151 		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1152 	}
1153 
1154 	spin_unlock(&info->lock);
1155 
1156 	ret = 0;
1157 	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1158 						sizeof(struct mq_attr)))
1159 		ret = -EFAULT;
1160 
1161 out_fput:
1162 	fput(filp);
1163 out:
1164 	return ret;
1165 }
1166 
1167 static const struct inode_operations mqueue_dir_inode_operations = {
1168 	.lookup = simple_lookup,
1169 	.create = mqueue_create,
1170 	.unlink = mqueue_unlink,
1171 };
1172 
1173 static const struct file_operations mqueue_file_operations = {
1174 	.flush = mqueue_flush_file,
1175 	.poll = mqueue_poll_file,
1176 	.read = mqueue_read_file,
1177 };
1178 
1179 static struct super_operations mqueue_super_ops = {
1180 	.alloc_inode = mqueue_alloc_inode,
1181 	.destroy_inode = mqueue_destroy_inode,
1182 	.statfs = simple_statfs,
1183 	.delete_inode = mqueue_delete_inode,
1184 	.drop_inode = generic_delete_inode,
1185 };
1186 
1187 static struct file_system_type mqueue_fs_type = {
1188 	.name = "mqueue",
1189 	.get_sb = mqueue_get_sb,
1190 	.kill_sb = kill_litter_super,
1191 };
1192 
1193 static int msg_max_limit_min = DFLT_MSGMAX;
1194 static int msg_max_limit_max = HARD_MSGMAX;
1195 
1196 static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
1197 static int msg_maxsize_limit_max = INT_MAX;
1198 
1199 static ctl_table mq_sysctls[] = {
1200 	{
1201 		.ctl_name	= CTL_QUEUESMAX,
1202 		.procname	= "queues_max",
1203 		.data		= &queues_max,
1204 		.maxlen		= sizeof(int),
1205 		.mode		= 0644,
1206 		.proc_handler	= &proc_dointvec,
1207 	},
1208 	{
1209 		.ctl_name	= CTL_MSGMAX,
1210 		.procname	= "msg_max",
1211 		.data		= &msg_max,
1212 		.maxlen		= sizeof(int),
1213 		.mode		= 0644,
1214 		.proc_handler	= &proc_dointvec_minmax,
1215 		.extra1		= &msg_max_limit_min,
1216 		.extra2		= &msg_max_limit_max,
1217 	},
1218 	{
1219 		.ctl_name	= CTL_MSGSIZEMAX,
1220 		.procname	= "msgsize_max",
1221 		.data		= &msgsize_max,
1222 		.maxlen		= sizeof(int),
1223 		.mode		= 0644,
1224 		.proc_handler	= &proc_dointvec_minmax,
1225 		.extra1		= &msg_maxsize_limit_min,
1226 		.extra2		= &msg_maxsize_limit_max,
1227 	},
1228 	{ .ctl_name = 0 }
1229 };
1230 
1231 static ctl_table mq_sysctl_dir[] = {
1232 	{
1233 		.ctl_name	= FS_MQUEUE,
1234 		.procname	= "mqueue",
1235 		.mode		= 0555,
1236 		.child		= mq_sysctls,
1237 	},
1238 	{ .ctl_name = 0 }
1239 };
1240 
1241 static ctl_table mq_sysctl_root[] = {
1242 	{
1243 		.ctl_name	= CTL_FS,
1244 		.procname	= "fs",
1245 		.mode		= 0555,
1246 		.child		= mq_sysctl_dir,
1247 	},
1248 	{ .ctl_name = 0 }
1249 };
1250 
1251 static int __init init_mqueue_fs(void)
1252 {
1253 	int error;
1254 
1255 	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1256 				sizeof(struct mqueue_inode_info), 0,
1257 				SLAB_HWCACHE_ALIGN, init_once, NULL);
1258 	if (mqueue_inode_cachep == NULL)
1259 		return -ENOMEM;
1260 
1261 	/* ignore failues - they are not fatal */
1262 	mq_sysctl_table = register_sysctl_table(mq_sysctl_root);
1263 
1264 	error = register_filesystem(&mqueue_fs_type);
1265 	if (error)
1266 		goto out_sysctl;
1267 
1268 	if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1269 		error = PTR_ERR(mqueue_mnt);
1270 		goto out_filesystem;
1271 	}
1272 
1273 	/* internal initialization - not common for vfs */
1274 	queues_count = 0;
1275 	spin_lock_init(&mq_lock);
1276 
1277 	return 0;
1278 
1279 out_filesystem:
1280 	unregister_filesystem(&mqueue_fs_type);
1281 out_sysctl:
1282 	if (mq_sysctl_table)
1283 		unregister_sysctl_table(mq_sysctl_table);
1284 	kmem_cache_destroy(mqueue_inode_cachep);
1285 	return error;
1286 }
1287 
1288 __initcall(init_mqueue_fs);
1289