xref: /openbmc/linux/ipc/mqueue.c (revision d3efbdd6)
1 /*
2  * POSIX message queues filesystem for Linux.
3  *
4  * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5  *                          Michal Wronski          (michal.wronski@gmail.com)
6  *
7  * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8  * Lockless receive & send, fd based notify:
9  * 			    Manfred Spraul	    (manfred@colorfullife.com)
10  *
11  * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12  *
13  * This file is released under the GPL.
14  */
15 
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/netlink.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/signal.h>
31 #include <linux/mutex.h>
32 
33 #include <net/sock.h>
34 #include "util.h"
35 
36 #define MQUEUE_MAGIC	0x19800202
37 #define DIRENT_SIZE	20
38 #define FILENT_SIZE	80
39 
40 #define SEND		0
41 #define RECV		1
42 
43 #define STATE_NONE	0
44 #define STATE_PENDING	1
45 #define STATE_READY	2
46 
47 /* used by sysctl */
48 #define FS_MQUEUE 	1
49 #define CTL_QUEUESMAX 	2
50 #define CTL_MSGMAX 	3
51 #define CTL_MSGSIZEMAX 	4
52 
53 /* default values */
54 #define DFLT_QUEUESMAX	256	/* max number of message queues */
55 #define DFLT_MSGMAX 	10	/* max number of messages in each queue */
56 #define HARD_MSGMAX 	(131072/sizeof(void*))
57 #define DFLT_MSGSIZEMAX 8192	/* max message size */
58 
59 
60 struct ext_wait_queue {		/* queue of sleeping tasks */
61 	struct task_struct *task;
62 	struct list_head list;
63 	struct msg_msg *msg;	/* ptr of loaded message */
64 	int state;		/* one of STATE_* values */
65 };
66 
67 struct mqueue_inode_info {
68 	spinlock_t lock;
69 	struct inode vfs_inode;
70 	wait_queue_head_t wait_q;
71 
72 	struct msg_msg **messages;
73 	struct mq_attr attr;
74 
75 	struct sigevent notify;
76 	struct pid* notify_owner;
77 	struct user_struct *user;	/* user who created, for accounting */
78 	struct sock *notify_sock;
79 	struct sk_buff *notify_cookie;
80 
81 	/* for tasks waiting for free space and messages, respectively */
82 	struct ext_wait_queue e_wait_q[2];
83 
84 	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
85 };
86 
87 static const struct inode_operations mqueue_dir_inode_operations;
88 static const struct file_operations mqueue_file_operations;
89 static struct super_operations mqueue_super_ops;
90 static void remove_notification(struct mqueue_inode_info *info);
91 
92 static spinlock_t mq_lock;
93 static struct kmem_cache *mqueue_inode_cachep;
94 static struct vfsmount *mqueue_mnt;
95 
96 static unsigned int queues_count;
97 static unsigned int queues_max 	= DFLT_QUEUESMAX;
98 static unsigned int msg_max 	= DFLT_MSGMAX;
99 static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
100 
101 static struct ctl_table_header * mq_sysctl_table;
102 
103 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
104 {
105 	return container_of(inode, struct mqueue_inode_info, vfs_inode);
106 }
107 
108 static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
109 							struct mq_attr *attr)
110 {
111 	struct inode *inode;
112 
113 	inode = new_inode(sb);
114 	if (inode) {
115 		inode->i_mode = mode;
116 		inode->i_uid = current->fsuid;
117 		inode->i_gid = current->fsgid;
118 		inode->i_blocks = 0;
119 		inode->i_mtime = inode->i_ctime = inode->i_atime =
120 				CURRENT_TIME;
121 
122 		if (S_ISREG(mode)) {
123 			struct mqueue_inode_info *info;
124 			struct task_struct *p = current;
125 			struct user_struct *u = p->user;
126 			unsigned long mq_bytes, mq_msg_tblsz;
127 
128 			inode->i_fop = &mqueue_file_operations;
129 			inode->i_size = FILENT_SIZE;
130 			/* mqueue specific info */
131 			info = MQUEUE_I(inode);
132 			spin_lock_init(&info->lock);
133 			init_waitqueue_head(&info->wait_q);
134 			INIT_LIST_HEAD(&info->e_wait_q[0].list);
135 			INIT_LIST_HEAD(&info->e_wait_q[1].list);
136 			info->messages = NULL;
137 			info->notify_owner = NULL;
138 			info->qsize = 0;
139 			info->user = NULL;	/* set when all is ok */
140 			memset(&info->attr, 0, sizeof(info->attr));
141 			info->attr.mq_maxmsg = DFLT_MSGMAX;
142 			info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
143 			if (attr) {
144 				info->attr.mq_maxmsg = attr->mq_maxmsg;
145 				info->attr.mq_msgsize = attr->mq_msgsize;
146 			}
147 			mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
148 			mq_bytes = (mq_msg_tblsz +
149 				(info->attr.mq_maxmsg * info->attr.mq_msgsize));
150 
151 			spin_lock(&mq_lock);
152 			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
153 		 	    u->mq_bytes + mq_bytes >
154 			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
155 				spin_unlock(&mq_lock);
156 				goto out_inode;
157 			}
158 			u->mq_bytes += mq_bytes;
159 			spin_unlock(&mq_lock);
160 
161 			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
162 			if (!info->messages) {
163 				spin_lock(&mq_lock);
164 				u->mq_bytes -= mq_bytes;
165 				spin_unlock(&mq_lock);
166 				goto out_inode;
167 			}
168 			/* all is ok */
169 			info->user = get_uid(u);
170 		} else if (S_ISDIR(mode)) {
171 			inc_nlink(inode);
172 			/* Some things misbehave if size == 0 on a directory */
173 			inode->i_size = 2 * DIRENT_SIZE;
174 			inode->i_op = &mqueue_dir_inode_operations;
175 			inode->i_fop = &simple_dir_operations;
176 		}
177 	}
178 	return inode;
179 out_inode:
180 	make_bad_inode(inode);
181 	iput(inode);
182 	return NULL;
183 }
184 
185 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
186 {
187 	struct inode *inode;
188 
189 	sb->s_blocksize = PAGE_CACHE_SIZE;
190 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
191 	sb->s_magic = MQUEUE_MAGIC;
192 	sb->s_op = &mqueue_super_ops;
193 
194 	inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
195 	if (!inode)
196 		return -ENOMEM;
197 
198 	sb->s_root = d_alloc_root(inode);
199 	if (!sb->s_root) {
200 		iput(inode);
201 		return -ENOMEM;
202 	}
203 
204 	return 0;
205 }
206 
207 static int mqueue_get_sb(struct file_system_type *fs_type,
208 			 int flags, const char *dev_name,
209 			 void *data, struct vfsmount *mnt)
210 {
211 	return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
212 }
213 
214 static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
215 {
216 	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
217 
218 	inode_init_once(&p->vfs_inode);
219 }
220 
221 static struct inode *mqueue_alloc_inode(struct super_block *sb)
222 {
223 	struct mqueue_inode_info *ei;
224 
225 	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
226 	if (!ei)
227 		return NULL;
228 	return &ei->vfs_inode;
229 }
230 
231 static void mqueue_destroy_inode(struct inode *inode)
232 {
233 	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
234 }
235 
236 static void mqueue_delete_inode(struct inode *inode)
237 {
238 	struct mqueue_inode_info *info;
239 	struct user_struct *user;
240 	unsigned long mq_bytes;
241 	int i;
242 
243 	if (S_ISDIR(inode->i_mode)) {
244 		clear_inode(inode);
245 		return;
246 	}
247 	info = MQUEUE_I(inode);
248 	spin_lock(&info->lock);
249 	for (i = 0; i < info->attr.mq_curmsgs; i++)
250 		free_msg(info->messages[i]);
251 	kfree(info->messages);
252 	spin_unlock(&info->lock);
253 
254 	clear_inode(inode);
255 
256 	mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
257 		   (info->attr.mq_maxmsg * info->attr.mq_msgsize));
258 	user = info->user;
259 	if (user) {
260 		spin_lock(&mq_lock);
261 		user->mq_bytes -= mq_bytes;
262 		queues_count--;
263 		spin_unlock(&mq_lock);
264 		free_uid(user);
265 	}
266 }
267 
268 static int mqueue_create(struct inode *dir, struct dentry *dentry,
269 				int mode, struct nameidata *nd)
270 {
271 	struct inode *inode;
272 	struct mq_attr *attr = dentry->d_fsdata;
273 	int error;
274 
275 	spin_lock(&mq_lock);
276 	if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
277 		error = -ENOSPC;
278 		goto out_lock;
279 	}
280 	queues_count++;
281 	spin_unlock(&mq_lock);
282 
283 	inode = mqueue_get_inode(dir->i_sb, mode, attr);
284 	if (!inode) {
285 		error = -ENOMEM;
286 		spin_lock(&mq_lock);
287 		queues_count--;
288 		goto out_lock;
289 	}
290 
291 	dir->i_size += DIRENT_SIZE;
292 	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
293 
294 	d_instantiate(dentry, inode);
295 	dget(dentry);
296 	return 0;
297 out_lock:
298 	spin_unlock(&mq_lock);
299 	return error;
300 }
301 
302 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
303 {
304   	struct inode *inode = dentry->d_inode;
305 
306 	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
307 	dir->i_size -= DIRENT_SIZE;
308   	drop_nlink(inode);
309   	dput(dentry);
310   	return 0;
311 }
312 
313 /*
314 *	This is routine for system read from queue file.
315 *	To avoid mess with doing here some sort of mq_receive we allow
316 *	to read only queue size & notification info (the only values
317 *	that are interesting from user point of view and aren't accessible
318 *	through std routines)
319 */
320 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
321 				size_t count, loff_t * off)
322 {
323 	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
324 	char buffer[FILENT_SIZE];
325 	size_t slen;
326 	loff_t o;
327 
328 	if (!count)
329 		return 0;
330 
331 	spin_lock(&info->lock);
332 	snprintf(buffer, sizeof(buffer),
333 			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
334 			info->qsize,
335 			info->notify_owner ? info->notify.sigev_notify : 0,
336 			(info->notify_owner &&
337 			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
338 				info->notify.sigev_signo : 0,
339 			pid_nr(info->notify_owner));
340 	spin_unlock(&info->lock);
341 	buffer[sizeof(buffer)-1] = '\0';
342 	slen = strlen(buffer)+1;
343 
344 	o = *off;
345 	if (o > slen)
346 		return 0;
347 
348 	if (o + count > slen)
349 		count = slen - o;
350 
351 	if (copy_to_user(u_data, buffer + o, count))
352 		return -EFAULT;
353 
354 	*off = o + count;
355 	filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
356 	return count;
357 }
358 
359 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
360 {
361 	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
362 
363 	spin_lock(&info->lock);
364 	if (task_tgid(current) == info->notify_owner)
365 		remove_notification(info);
366 
367 	spin_unlock(&info->lock);
368 	return 0;
369 }
370 
371 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
372 {
373 	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
374 	int retval = 0;
375 
376 	poll_wait(filp, &info->wait_q, poll_tab);
377 
378 	spin_lock(&info->lock);
379 	if (info->attr.mq_curmsgs)
380 		retval = POLLIN | POLLRDNORM;
381 
382 	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
383 		retval |= POLLOUT | POLLWRNORM;
384 	spin_unlock(&info->lock);
385 
386 	return retval;
387 }
388 
389 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
390 static void wq_add(struct mqueue_inode_info *info, int sr,
391 			struct ext_wait_queue *ewp)
392 {
393 	struct ext_wait_queue *walk;
394 
395 	ewp->task = current;
396 
397 	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
398 		if (walk->task->static_prio <= current->static_prio) {
399 			list_add_tail(&ewp->list, &walk->list);
400 			return;
401 		}
402 	}
403 	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
404 }
405 
406 /*
407  * Puts current task to sleep. Caller must hold queue lock. After return
408  * lock isn't held.
409  * sr: SEND or RECV
410  */
411 static int wq_sleep(struct mqueue_inode_info *info, int sr,
412 			long timeout, struct ext_wait_queue *ewp)
413 {
414 	int retval;
415 	signed long time;
416 
417 	wq_add(info, sr, ewp);
418 
419 	for (;;) {
420 		set_current_state(TASK_INTERRUPTIBLE);
421 
422 		spin_unlock(&info->lock);
423 		time = schedule_timeout(timeout);
424 
425 		while (ewp->state == STATE_PENDING)
426 			cpu_relax();
427 
428 		if (ewp->state == STATE_READY) {
429 			retval = 0;
430 			goto out;
431 		}
432 		spin_lock(&info->lock);
433 		if (ewp->state == STATE_READY) {
434 			retval = 0;
435 			goto out_unlock;
436 		}
437 		if (signal_pending(current)) {
438 			retval = -ERESTARTSYS;
439 			break;
440 		}
441 		if (time == 0) {
442 			retval = -ETIMEDOUT;
443 			break;
444 		}
445 	}
446 	list_del(&ewp->list);
447 out_unlock:
448 	spin_unlock(&info->lock);
449 out:
450 	return retval;
451 }
452 
453 /*
454  * Returns waiting task that should be serviced first or NULL if none exists
455  */
456 static struct ext_wait_queue *wq_get_first_waiter(
457 		struct mqueue_inode_info *info, int sr)
458 {
459 	struct list_head *ptr;
460 
461 	ptr = info->e_wait_q[sr].list.prev;
462 	if (ptr == &info->e_wait_q[sr].list)
463 		return NULL;
464 	return list_entry(ptr, struct ext_wait_queue, list);
465 }
466 
467 /* Auxiliary functions to manipulate messages' list */
468 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
469 {
470 	int k;
471 
472 	k = info->attr.mq_curmsgs - 1;
473 	while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
474 		info->messages[k + 1] = info->messages[k];
475 		k--;
476 	}
477 	info->attr.mq_curmsgs++;
478 	info->qsize += ptr->m_ts;
479 	info->messages[k + 1] = ptr;
480 }
481 
482 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
483 {
484 	info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
485 	return info->messages[info->attr.mq_curmsgs];
486 }
487 
488 static inline void set_cookie(struct sk_buff *skb, char code)
489 {
490 	((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
491 }
492 
493 /*
494  * The next function is only to split too long sys_mq_timedsend
495  */
496 static void __do_notify(struct mqueue_inode_info *info)
497 {
498 	/* notification
499 	 * invoked when there is registered process and there isn't process
500 	 * waiting synchronously for message AND state of queue changed from
501 	 * empty to not empty. Here we are sure that no one is waiting
502 	 * synchronously. */
503 	if (info->notify_owner &&
504 	    info->attr.mq_curmsgs == 1) {
505 		struct siginfo sig_i;
506 		switch (info->notify.sigev_notify) {
507 		case SIGEV_NONE:
508 			break;
509 		case SIGEV_SIGNAL:
510 			/* sends signal */
511 
512 			sig_i.si_signo = info->notify.sigev_signo;
513 			sig_i.si_errno = 0;
514 			sig_i.si_code = SI_MESGQ;
515 			sig_i.si_value = info->notify.sigev_value;
516 			sig_i.si_pid = current->tgid;
517 			sig_i.si_uid = current->uid;
518 
519 			kill_pid_info(info->notify.sigev_signo,
520 				      &sig_i, info->notify_owner);
521 			break;
522 		case SIGEV_THREAD:
523 			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
524 			netlink_sendskb(info->notify_sock,
525 					info->notify_cookie, 0);
526 			break;
527 		}
528 		/* after notification unregisters process */
529 		put_pid(info->notify_owner);
530 		info->notify_owner = NULL;
531 	}
532 	wake_up(&info->wait_q);
533 }
534 
535 static long prepare_timeout(const struct timespec __user *u_arg)
536 {
537 	struct timespec ts, nowts;
538 	long timeout;
539 
540 	if (u_arg) {
541 		if (unlikely(copy_from_user(&ts, u_arg,
542 					sizeof(struct timespec))))
543 			return -EFAULT;
544 
545 		if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
546 			|| ts.tv_nsec >= NSEC_PER_SEC))
547 			return -EINVAL;
548 		nowts = CURRENT_TIME;
549 		/* first subtract as jiffies can't be too big */
550 		ts.tv_sec -= nowts.tv_sec;
551 		if (ts.tv_nsec < nowts.tv_nsec) {
552 			ts.tv_nsec += NSEC_PER_SEC;
553 			ts.tv_sec--;
554 		}
555 		ts.tv_nsec -= nowts.tv_nsec;
556 		if (ts.tv_sec < 0)
557 			return 0;
558 
559 		timeout = timespec_to_jiffies(&ts) + 1;
560 	} else
561 		return MAX_SCHEDULE_TIMEOUT;
562 
563 	return timeout;
564 }
565 
566 static void remove_notification(struct mqueue_inode_info *info)
567 {
568 	if (info->notify_owner != NULL &&
569 	    info->notify.sigev_notify == SIGEV_THREAD) {
570 		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
571 		netlink_sendskb(info->notify_sock, info->notify_cookie, 0);
572 	}
573 	put_pid(info->notify_owner);
574 	info->notify_owner = NULL;
575 }
576 
577 static int mq_attr_ok(struct mq_attr *attr)
578 {
579 	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
580 		return 0;
581 	if (capable(CAP_SYS_RESOURCE)) {
582 		if (attr->mq_maxmsg > HARD_MSGMAX)
583 			return 0;
584 	} else {
585 		if (attr->mq_maxmsg > msg_max ||
586 				attr->mq_msgsize > msgsize_max)
587 			return 0;
588 	}
589 	/* check for overflow */
590 	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
591 		return 0;
592 	if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
593 	    (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
594 	    (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
595 		return 0;
596 	return 1;
597 }
598 
599 /*
600  * Invoked when creating a new queue via sys_mq_open
601  */
602 static struct file *do_create(struct dentry *dir, struct dentry *dentry,
603 			int oflag, mode_t mode, struct mq_attr __user *u_attr)
604 {
605 	struct mq_attr attr;
606 	int ret;
607 
608 	if (u_attr) {
609 		ret = -EFAULT;
610 		if (copy_from_user(&attr, u_attr, sizeof(attr)))
611 			goto out;
612 		ret = -EINVAL;
613 		if (!mq_attr_ok(&attr))
614 			goto out;
615 		/* store for use during create */
616 		dentry->d_fsdata = &attr;
617 	}
618 
619 	mode &= ~current->fs->umask;
620 	ret = vfs_create(dir->d_inode, dentry, mode, NULL);
621 	dentry->d_fsdata = NULL;
622 	if (ret)
623 		goto out;
624 
625 	return dentry_open(dentry, mqueue_mnt, oflag);
626 
627 out:
628 	dput(dentry);
629 	mntput(mqueue_mnt);
630 	return ERR_PTR(ret);
631 }
632 
633 /* Opens existing queue */
634 static struct file *do_open(struct dentry *dentry, int oflag)
635 {
636 static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
637 					MAY_READ | MAY_WRITE };
638 
639 	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
640 		dput(dentry);
641 		mntput(mqueue_mnt);
642 		return ERR_PTR(-EINVAL);
643 	}
644 
645 	if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) {
646 		dput(dentry);
647 		mntput(mqueue_mnt);
648 		return ERR_PTR(-EACCES);
649 	}
650 
651 	return dentry_open(dentry, mqueue_mnt, oflag);
652 }
653 
654 asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
655 				struct mq_attr __user *u_attr)
656 {
657 	struct dentry *dentry;
658 	struct file *filp;
659 	char *name;
660 	int fd, error;
661 
662 	error = audit_mq_open(oflag, mode, u_attr);
663 	if (error != 0)
664 		return error;
665 
666 	if (IS_ERR(name = getname(u_name)))
667 		return PTR_ERR(name);
668 
669 	fd = get_unused_fd();
670 	if (fd < 0)
671 		goto out_putname;
672 
673 	mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
674 	dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
675 	if (IS_ERR(dentry)) {
676 		error = PTR_ERR(dentry);
677 		goto out_err;
678 	}
679 	mntget(mqueue_mnt);
680 
681 	if (oflag & O_CREAT) {
682 		if (dentry->d_inode) {	/* entry already exists */
683 			audit_inode(name, dentry->d_inode);
684 			error = -EEXIST;
685 			if (oflag & O_EXCL)
686 				goto out;
687 			filp = do_open(dentry, oflag);
688 		} else {
689 			filp = do_create(mqueue_mnt->mnt_root, dentry,
690 						oflag, mode, u_attr);
691 		}
692 	} else {
693 		error = -ENOENT;
694 		if (!dentry->d_inode)
695 			goto out;
696 		audit_inode(name, dentry->d_inode);
697 		filp = do_open(dentry, oflag);
698 	}
699 
700 	if (IS_ERR(filp)) {
701 		error = PTR_ERR(filp);
702 		goto out_putfd;
703 	}
704 
705 	set_close_on_exec(fd, 1);
706 	fd_install(fd, filp);
707 	goto out_upsem;
708 
709 out:
710 	dput(dentry);
711 	mntput(mqueue_mnt);
712 out_putfd:
713 	put_unused_fd(fd);
714 out_err:
715 	fd = error;
716 out_upsem:
717 	mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
718 out_putname:
719 	putname(name);
720 	return fd;
721 }
722 
723 asmlinkage long sys_mq_unlink(const char __user *u_name)
724 {
725 	int err;
726 	char *name;
727 	struct dentry *dentry;
728 	struct inode *inode = NULL;
729 
730 	name = getname(u_name);
731 	if (IS_ERR(name))
732 		return PTR_ERR(name);
733 
734 	mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
735 			I_MUTEX_PARENT);
736 	dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
737 	if (IS_ERR(dentry)) {
738 		err = PTR_ERR(dentry);
739 		goto out_unlock;
740 	}
741 
742 	if (!dentry->d_inode) {
743 		err = -ENOENT;
744 		goto out_err;
745 	}
746 
747 	inode = dentry->d_inode;
748 	if (inode)
749 		atomic_inc(&inode->i_count);
750 
751 	err = vfs_unlink(dentry->d_parent->d_inode, dentry);
752 out_err:
753 	dput(dentry);
754 
755 out_unlock:
756 	mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
757 	putname(name);
758 	if (inode)
759 		iput(inode);
760 
761 	return err;
762 }
763 
764 /* Pipelined send and receive functions.
765  *
766  * If a receiver finds no waiting message, then it registers itself in the
767  * list of waiting receivers. A sender checks that list before adding the new
768  * message into the message array. If there is a waiting receiver, then it
769  * bypasses the message array and directly hands the message over to the
770  * receiver.
771  * The receiver accepts the message and returns without grabbing the queue
772  * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
773  * are necessary. The same algorithm is used for sysv semaphores, see
774  * ipc/sem.c for more details.
775  *
776  * The same algorithm is used for senders.
777  */
778 
779 /* pipelined_send() - send a message directly to the task waiting in
780  * sys_mq_timedreceive() (without inserting message into a queue).
781  */
782 static inline void pipelined_send(struct mqueue_inode_info *info,
783 				  struct msg_msg *message,
784 				  struct ext_wait_queue *receiver)
785 {
786 	receiver->msg = message;
787 	list_del(&receiver->list);
788 	receiver->state = STATE_PENDING;
789 	wake_up_process(receiver->task);
790 	smp_wmb();
791 	receiver->state = STATE_READY;
792 }
793 
794 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
795  * gets its message and put to the queue (we have one free place for sure). */
796 static inline void pipelined_receive(struct mqueue_inode_info *info)
797 {
798 	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
799 
800 	if (!sender) {
801 		/* for poll */
802 		wake_up_interruptible(&info->wait_q);
803 		return;
804 	}
805 	msg_insert(sender->msg, info);
806 	list_del(&sender->list);
807 	sender->state = STATE_PENDING;
808 	wake_up_process(sender->task);
809 	smp_wmb();
810 	sender->state = STATE_READY;
811 }
812 
813 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
814 	size_t msg_len, unsigned int msg_prio,
815 	const struct timespec __user *u_abs_timeout)
816 {
817 	struct file *filp;
818 	struct inode *inode;
819 	struct ext_wait_queue wait;
820 	struct ext_wait_queue *receiver;
821 	struct msg_msg *msg_ptr;
822 	struct mqueue_inode_info *info;
823 	long timeout;
824 	int ret;
825 
826 	ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
827 	if (ret != 0)
828 		return ret;
829 
830 	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
831 		return -EINVAL;
832 
833 	timeout = prepare_timeout(u_abs_timeout);
834 
835 	ret = -EBADF;
836 	filp = fget(mqdes);
837 	if (unlikely(!filp))
838 		goto out;
839 
840 	inode = filp->f_path.dentry->d_inode;
841 	if (unlikely(filp->f_op != &mqueue_file_operations))
842 		goto out_fput;
843 	info = MQUEUE_I(inode);
844 	audit_inode(NULL, inode);
845 
846 	if (unlikely(!(filp->f_mode & FMODE_WRITE)))
847 		goto out_fput;
848 
849 	if (unlikely(msg_len > info->attr.mq_msgsize)) {
850 		ret = -EMSGSIZE;
851 		goto out_fput;
852 	}
853 
854 	/* First try to allocate memory, before doing anything with
855 	 * existing queues. */
856 	msg_ptr = load_msg(u_msg_ptr, msg_len);
857 	if (IS_ERR(msg_ptr)) {
858 		ret = PTR_ERR(msg_ptr);
859 		goto out_fput;
860 	}
861 	msg_ptr->m_ts = msg_len;
862 	msg_ptr->m_type = msg_prio;
863 
864 	spin_lock(&info->lock);
865 
866 	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
867 		if (filp->f_flags & O_NONBLOCK) {
868 			spin_unlock(&info->lock);
869 			ret = -EAGAIN;
870 		} else if (unlikely(timeout < 0)) {
871 			spin_unlock(&info->lock);
872 			ret = timeout;
873 		} else {
874 			wait.task = current;
875 			wait.msg = (void *) msg_ptr;
876 			wait.state = STATE_NONE;
877 			ret = wq_sleep(info, SEND, timeout, &wait);
878 		}
879 		if (ret < 0)
880 			free_msg(msg_ptr);
881 	} else {
882 		receiver = wq_get_first_waiter(info, RECV);
883 		if (receiver) {
884 			pipelined_send(info, msg_ptr, receiver);
885 		} else {
886 			/* adds message to the queue */
887 			msg_insert(msg_ptr, info);
888 			__do_notify(info);
889 		}
890 		inode->i_atime = inode->i_mtime = inode->i_ctime =
891 				CURRENT_TIME;
892 		spin_unlock(&info->lock);
893 		ret = 0;
894 	}
895 out_fput:
896 	fput(filp);
897 out:
898 	return ret;
899 }
900 
901 asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
902 	size_t msg_len, unsigned int __user *u_msg_prio,
903 	const struct timespec __user *u_abs_timeout)
904 {
905 	long timeout;
906 	ssize_t ret;
907 	struct msg_msg *msg_ptr;
908 	struct file *filp;
909 	struct inode *inode;
910 	struct mqueue_inode_info *info;
911 	struct ext_wait_queue wait;
912 
913 	ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
914 	if (ret != 0)
915 		return ret;
916 
917 	timeout = prepare_timeout(u_abs_timeout);
918 
919 	ret = -EBADF;
920 	filp = fget(mqdes);
921 	if (unlikely(!filp))
922 		goto out;
923 
924 	inode = filp->f_path.dentry->d_inode;
925 	if (unlikely(filp->f_op != &mqueue_file_operations))
926 		goto out_fput;
927 	info = MQUEUE_I(inode);
928 	audit_inode(NULL, inode);
929 
930 	if (unlikely(!(filp->f_mode & FMODE_READ)))
931 		goto out_fput;
932 
933 	/* checks if buffer is big enough */
934 	if (unlikely(msg_len < info->attr.mq_msgsize)) {
935 		ret = -EMSGSIZE;
936 		goto out_fput;
937 	}
938 
939 	spin_lock(&info->lock);
940 	if (info->attr.mq_curmsgs == 0) {
941 		if (filp->f_flags & O_NONBLOCK) {
942 			spin_unlock(&info->lock);
943 			ret = -EAGAIN;
944 			msg_ptr = NULL;
945 		} else if (unlikely(timeout < 0)) {
946 			spin_unlock(&info->lock);
947 			ret = timeout;
948 			msg_ptr = NULL;
949 		} else {
950 			wait.task = current;
951 			wait.state = STATE_NONE;
952 			ret = wq_sleep(info, RECV, timeout, &wait);
953 			msg_ptr = wait.msg;
954 		}
955 	} else {
956 		msg_ptr = msg_get(info);
957 
958 		inode->i_atime = inode->i_mtime = inode->i_ctime =
959 				CURRENT_TIME;
960 
961 		/* There is now free space in queue. */
962 		pipelined_receive(info);
963 		spin_unlock(&info->lock);
964 		ret = 0;
965 	}
966 	if (ret == 0) {
967 		ret = msg_ptr->m_ts;
968 
969 		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
970 			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
971 			ret = -EFAULT;
972 		}
973 		free_msg(msg_ptr);
974 	}
975 out_fput:
976 	fput(filp);
977 out:
978 	return ret;
979 }
980 
981 /*
982  * Notes: the case when user wants us to deregister (with NULL as pointer)
983  * and he isn't currently owner of notification, will be silently discarded.
984  * It isn't explicitly defined in the POSIX.
985  */
986 asmlinkage long sys_mq_notify(mqd_t mqdes,
987 				const struct sigevent __user *u_notification)
988 {
989 	int ret;
990 	struct file *filp;
991 	struct sock *sock;
992 	struct inode *inode;
993 	struct sigevent notification;
994 	struct mqueue_inode_info *info;
995 	struct sk_buff *nc;
996 
997 	ret = audit_mq_notify(mqdes, u_notification);
998 	if (ret != 0)
999 		return ret;
1000 
1001 	nc = NULL;
1002 	sock = NULL;
1003 	if (u_notification != NULL) {
1004 		if (copy_from_user(&notification, u_notification,
1005 					sizeof(struct sigevent)))
1006 			return -EFAULT;
1007 
1008 		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1009 			     notification.sigev_notify != SIGEV_SIGNAL &&
1010 			     notification.sigev_notify != SIGEV_THREAD))
1011 			return -EINVAL;
1012 		if (notification.sigev_notify == SIGEV_SIGNAL &&
1013 			!valid_signal(notification.sigev_signo)) {
1014 			return -EINVAL;
1015 		}
1016 		if (notification.sigev_notify == SIGEV_THREAD) {
1017 			/* create the notify skb */
1018 			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1019 			ret = -ENOMEM;
1020 			if (!nc)
1021 				goto out;
1022 			ret = -EFAULT;
1023 			if (copy_from_user(nc->data,
1024 					notification.sigev_value.sival_ptr,
1025 					NOTIFY_COOKIE_LEN)) {
1026 				goto out;
1027 			}
1028 
1029 			/* TODO: add a header? */
1030 			skb_put(nc, NOTIFY_COOKIE_LEN);
1031 			/* and attach it to the socket */
1032 retry:
1033 			filp = fget(notification.sigev_signo);
1034 			ret = -EBADF;
1035 			if (!filp)
1036 				goto out;
1037 			sock = netlink_getsockbyfilp(filp);
1038 			fput(filp);
1039 			if (IS_ERR(sock)) {
1040 				ret = PTR_ERR(sock);
1041 				sock = NULL;
1042 				goto out;
1043 			}
1044 
1045 			ret = netlink_attachskb(sock, nc, 0,
1046 					MAX_SCHEDULE_TIMEOUT, NULL);
1047 			if (ret == 1)
1048 		       		goto retry;
1049 			if (ret) {
1050 				sock = NULL;
1051 				nc = NULL;
1052 				goto out;
1053 			}
1054 		}
1055 	}
1056 
1057 	ret = -EBADF;
1058 	filp = fget(mqdes);
1059 	if (!filp)
1060 		goto out;
1061 
1062 	inode = filp->f_path.dentry->d_inode;
1063 	if (unlikely(filp->f_op != &mqueue_file_operations))
1064 		goto out_fput;
1065 	info = MQUEUE_I(inode);
1066 
1067 	ret = 0;
1068 	spin_lock(&info->lock);
1069 	if (u_notification == NULL) {
1070 		if (info->notify_owner == task_tgid(current)) {
1071 			remove_notification(info);
1072 			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1073 		}
1074 	} else if (info->notify_owner != NULL) {
1075 		ret = -EBUSY;
1076 	} else {
1077 		switch (notification.sigev_notify) {
1078 		case SIGEV_NONE:
1079 			info->notify.sigev_notify = SIGEV_NONE;
1080 			break;
1081 		case SIGEV_THREAD:
1082 			info->notify_sock = sock;
1083 			info->notify_cookie = nc;
1084 			sock = NULL;
1085 			nc = NULL;
1086 			info->notify.sigev_notify = SIGEV_THREAD;
1087 			break;
1088 		case SIGEV_SIGNAL:
1089 			info->notify.sigev_signo = notification.sigev_signo;
1090 			info->notify.sigev_value = notification.sigev_value;
1091 			info->notify.sigev_notify = SIGEV_SIGNAL;
1092 			break;
1093 		}
1094 
1095 		info->notify_owner = get_pid(task_tgid(current));
1096 		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1097 	}
1098 	spin_unlock(&info->lock);
1099 out_fput:
1100 	fput(filp);
1101 out:
1102 	if (sock) {
1103 		netlink_detachskb(sock, nc);
1104 	} else if (nc) {
1105 		dev_kfree_skb(nc);
1106 	}
1107 	return ret;
1108 }
1109 
1110 asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
1111 			const struct mq_attr __user *u_mqstat,
1112 			struct mq_attr __user *u_omqstat)
1113 {
1114 	int ret;
1115 	struct mq_attr mqstat, omqstat;
1116 	struct file *filp;
1117 	struct inode *inode;
1118 	struct mqueue_inode_info *info;
1119 
1120 	if (u_mqstat != NULL) {
1121 		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1122 			return -EFAULT;
1123 		if (mqstat.mq_flags & (~O_NONBLOCK))
1124 			return -EINVAL;
1125 	}
1126 
1127 	ret = -EBADF;
1128 	filp = fget(mqdes);
1129 	if (!filp)
1130 		goto out;
1131 
1132 	inode = filp->f_path.dentry->d_inode;
1133 	if (unlikely(filp->f_op != &mqueue_file_operations))
1134 		goto out_fput;
1135 	info = MQUEUE_I(inode);
1136 
1137 	spin_lock(&info->lock);
1138 
1139 	omqstat = info->attr;
1140 	omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1141 	if (u_mqstat) {
1142 		ret = audit_mq_getsetattr(mqdes, &mqstat);
1143 		if (ret != 0)
1144 			goto out;
1145 		if (mqstat.mq_flags & O_NONBLOCK)
1146 			filp->f_flags |= O_NONBLOCK;
1147 		else
1148 			filp->f_flags &= ~O_NONBLOCK;
1149 
1150 		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1151 	}
1152 
1153 	spin_unlock(&info->lock);
1154 
1155 	ret = 0;
1156 	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1157 						sizeof(struct mq_attr)))
1158 		ret = -EFAULT;
1159 
1160 out_fput:
1161 	fput(filp);
1162 out:
1163 	return ret;
1164 }
1165 
1166 static const struct inode_operations mqueue_dir_inode_operations = {
1167 	.lookup = simple_lookup,
1168 	.create = mqueue_create,
1169 	.unlink = mqueue_unlink,
1170 };
1171 
1172 static const struct file_operations mqueue_file_operations = {
1173 	.flush = mqueue_flush_file,
1174 	.poll = mqueue_poll_file,
1175 	.read = mqueue_read_file,
1176 };
1177 
1178 static struct super_operations mqueue_super_ops = {
1179 	.alloc_inode = mqueue_alloc_inode,
1180 	.destroy_inode = mqueue_destroy_inode,
1181 	.statfs = simple_statfs,
1182 	.delete_inode = mqueue_delete_inode,
1183 	.drop_inode = generic_delete_inode,
1184 };
1185 
1186 static struct file_system_type mqueue_fs_type = {
1187 	.name = "mqueue",
1188 	.get_sb = mqueue_get_sb,
1189 	.kill_sb = kill_litter_super,
1190 };
1191 
1192 static int msg_max_limit_min = DFLT_MSGMAX;
1193 static int msg_max_limit_max = HARD_MSGMAX;
1194 
1195 static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
1196 static int msg_maxsize_limit_max = INT_MAX;
1197 
1198 static ctl_table mq_sysctls[] = {
1199 	{
1200 		.ctl_name	= CTL_QUEUESMAX,
1201 		.procname	= "queues_max",
1202 		.data		= &queues_max,
1203 		.maxlen		= sizeof(int),
1204 		.mode		= 0644,
1205 		.proc_handler	= &proc_dointvec,
1206 	},
1207 	{
1208 		.ctl_name	= CTL_MSGMAX,
1209 		.procname	= "msg_max",
1210 		.data		= &msg_max,
1211 		.maxlen		= sizeof(int),
1212 		.mode		= 0644,
1213 		.proc_handler	= &proc_dointvec_minmax,
1214 		.extra1		= &msg_max_limit_min,
1215 		.extra2		= &msg_max_limit_max,
1216 	},
1217 	{
1218 		.ctl_name	= CTL_MSGSIZEMAX,
1219 		.procname	= "msgsize_max",
1220 		.data		= &msgsize_max,
1221 		.maxlen		= sizeof(int),
1222 		.mode		= 0644,
1223 		.proc_handler	= &proc_dointvec_minmax,
1224 		.extra1		= &msg_maxsize_limit_min,
1225 		.extra2		= &msg_maxsize_limit_max,
1226 	},
1227 	{ .ctl_name = 0 }
1228 };
1229 
1230 static ctl_table mq_sysctl_dir[] = {
1231 	{
1232 		.ctl_name	= FS_MQUEUE,
1233 		.procname	= "mqueue",
1234 		.mode		= 0555,
1235 		.child		= mq_sysctls,
1236 	},
1237 	{ .ctl_name = 0 }
1238 };
1239 
1240 static ctl_table mq_sysctl_root[] = {
1241 	{
1242 		.ctl_name	= CTL_FS,
1243 		.procname	= "fs",
1244 		.mode		= 0555,
1245 		.child		= mq_sysctl_dir,
1246 	},
1247 	{ .ctl_name = 0 }
1248 };
1249 
1250 static int __init init_mqueue_fs(void)
1251 {
1252 	int error;
1253 
1254 	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1255 				sizeof(struct mqueue_inode_info), 0,
1256 				SLAB_HWCACHE_ALIGN, init_once, NULL);
1257 	if (mqueue_inode_cachep == NULL)
1258 		return -ENOMEM;
1259 
1260 	/* ignore failues - they are not fatal */
1261 	mq_sysctl_table = register_sysctl_table(mq_sysctl_root);
1262 
1263 	error = register_filesystem(&mqueue_fs_type);
1264 	if (error)
1265 		goto out_sysctl;
1266 
1267 	if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1268 		error = PTR_ERR(mqueue_mnt);
1269 		goto out_filesystem;
1270 	}
1271 
1272 	/* internal initialization - not common for vfs */
1273 	queues_count = 0;
1274 	spin_lock_init(&mq_lock);
1275 
1276 	return 0;
1277 
1278 out_filesystem:
1279 	unregister_filesystem(&mqueue_fs_type);
1280 out_sysctl:
1281 	if (mq_sysctl_table)
1282 		unregister_sysctl_table(mq_sysctl_table);
1283 	kmem_cache_destroy(mqueue_inode_cachep);
1284 	return error;
1285 }
1286 
1287 __initcall(init_mqueue_fs);
1288