xref: /openbmc/linux/fs/pipe.c (revision f9a82c48)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/magic.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/uio.h>
20 #include <linux/highmem.h>
21 #include <linux/pagemap.h>
22 #include <linux/audit.h>
23 #include <linux/syscalls.h>
24 #include <linux/fcntl.h>
25 #include <linux/memcontrol.h>
26 
27 #include <linux/uaccess.h>
28 #include <asm/ioctls.h>
29 
30 #include "internal.h"
31 
32 /*
33  * The max size that a non-root user is allowed to grow the pipe. Can
34  * be set by root in /proc/sys/fs/pipe-max-size
35  */
36 unsigned int pipe_max_size = 1048576;
37 
38 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
39  * matches default values.
40  */
41 unsigned long pipe_user_pages_hard;
42 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
43 
44 /*
45  * We use a start+len construction, which provides full use of the
46  * allocated memory.
47  * -- Florian Coosmann (FGC)
48  *
49  * Reads with count = 0 should always return 0.
50  * -- Julian Bradfield 1999-06-07.
51  *
52  * FIFOs and Pipes now generate SIGIO for both readers and writers.
53  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
54  *
55  * pipe_read & write cleanup
56  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
57  */
58 
59 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
60 {
61 	if (pipe->files)
62 		mutex_lock_nested(&pipe->mutex, subclass);
63 }
64 
65 void pipe_lock(struct pipe_inode_info *pipe)
66 {
67 	/*
68 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
69 	 */
70 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
71 }
72 EXPORT_SYMBOL(pipe_lock);
73 
74 void pipe_unlock(struct pipe_inode_info *pipe)
75 {
76 	if (pipe->files)
77 		mutex_unlock(&pipe->mutex);
78 }
79 EXPORT_SYMBOL(pipe_unlock);
80 
81 static inline void __pipe_lock(struct pipe_inode_info *pipe)
82 {
83 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
84 }
85 
86 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
87 {
88 	mutex_unlock(&pipe->mutex);
89 }
90 
91 void pipe_double_lock(struct pipe_inode_info *pipe1,
92 		      struct pipe_inode_info *pipe2)
93 {
94 	BUG_ON(pipe1 == pipe2);
95 
96 	if (pipe1 < pipe2) {
97 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
98 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
99 	} else {
100 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
101 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
102 	}
103 }
104 
105 /* Drop the inode semaphore and wait for a pipe event, atomically */
106 void pipe_wait(struct pipe_inode_info *pipe)
107 {
108 	DEFINE_WAIT(wait);
109 
110 	/*
111 	 * Pipes are system-local resources, so sleeping on them
112 	 * is considered a noninteractive wait:
113 	 */
114 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
115 	pipe_unlock(pipe);
116 	schedule();
117 	finish_wait(&pipe->wait, &wait);
118 	pipe_lock(pipe);
119 }
120 
121 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
122 				  struct pipe_buffer *buf)
123 {
124 	struct page *page = buf->page;
125 
126 	/*
127 	 * If nobody else uses this page, and we don't already have a
128 	 * temporary page, let's keep track of it as a one-deep
129 	 * allocation cache. (Otherwise just release our reference to it)
130 	 */
131 	if (page_count(page) == 1 && !pipe->tmp_page)
132 		pipe->tmp_page = page;
133 	else
134 		put_page(page);
135 }
136 
137 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
138 			       struct pipe_buffer *buf)
139 {
140 	struct page *page = buf->page;
141 
142 	if (page_count(page) == 1) {
143 		memcg_kmem_uncharge(page, 0);
144 		__SetPageLocked(page);
145 		return 0;
146 	}
147 	return 1;
148 }
149 
150 /**
151  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
152  * @pipe:	the pipe that the buffer belongs to
153  * @buf:	the buffer to attempt to steal
154  *
155  * Description:
156  *	This function attempts to steal the &struct page attached to
157  *	@buf. If successful, this function returns 0 and returns with
158  *	the page locked. The caller may then reuse the page for whatever
159  *	he wishes; the typical use is insertion into a different file
160  *	page cache.
161  */
162 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
163 			   struct pipe_buffer *buf)
164 {
165 	struct page *page = buf->page;
166 
167 	/*
168 	 * A reference of one is golden, that means that the owner of this
169 	 * page is the only one holding a reference to it. lock the page
170 	 * and return OK.
171 	 */
172 	if (page_count(page) == 1) {
173 		lock_page(page);
174 		return 0;
175 	}
176 
177 	return 1;
178 }
179 EXPORT_SYMBOL(generic_pipe_buf_steal);
180 
181 /**
182  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
183  * @pipe:	the pipe that the buffer belongs to
184  * @buf:	the buffer to get a reference to
185  *
186  * Description:
187  *	This function grabs an extra reference to @buf. It's used in
188  *	in the tee() system call, when we duplicate the buffers in one
189  *	pipe into another.
190  */
191 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
192 {
193 	get_page(buf->page);
194 }
195 EXPORT_SYMBOL(generic_pipe_buf_get);
196 
197 /**
198  * generic_pipe_buf_confirm - verify contents of the pipe buffer
199  * @info:	the pipe that the buffer belongs to
200  * @buf:	the buffer to confirm
201  *
202  * Description:
203  *	This function does nothing, because the generic pipe code uses
204  *	pages that are always good when inserted into the pipe.
205  */
206 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
207 			     struct pipe_buffer *buf)
208 {
209 	return 0;
210 }
211 EXPORT_SYMBOL(generic_pipe_buf_confirm);
212 
213 /**
214  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
215  * @pipe:	the pipe that the buffer belongs to
216  * @buf:	the buffer to put a reference to
217  *
218  * Description:
219  *	This function releases a reference to @buf.
220  */
221 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
222 			      struct pipe_buffer *buf)
223 {
224 	put_page(buf->page);
225 }
226 EXPORT_SYMBOL(generic_pipe_buf_release);
227 
228 /* New data written to a pipe may be appended to a buffer with this type. */
229 static const struct pipe_buf_operations anon_pipe_buf_ops = {
230 	.confirm = generic_pipe_buf_confirm,
231 	.release = anon_pipe_buf_release,
232 	.steal = anon_pipe_buf_steal,
233 	.get = generic_pipe_buf_get,
234 };
235 
236 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
237 	.confirm = generic_pipe_buf_confirm,
238 	.release = anon_pipe_buf_release,
239 	.steal = anon_pipe_buf_steal,
240 	.get = generic_pipe_buf_get,
241 };
242 
243 static const struct pipe_buf_operations packet_pipe_buf_ops = {
244 	.confirm = generic_pipe_buf_confirm,
245 	.release = anon_pipe_buf_release,
246 	.steal = anon_pipe_buf_steal,
247 	.get = generic_pipe_buf_get,
248 };
249 
250 /**
251  * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
252  * @buf:	the buffer to mark
253  *
254  * Description:
255  *	This function ensures that no future writes will be merged into the
256  *	given &struct pipe_buffer. This is necessary when multiple pipe buffers
257  *	share the same backing page.
258  */
259 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
260 {
261 	if (buf->ops == &anon_pipe_buf_ops)
262 		buf->ops = &anon_pipe_buf_nomerge_ops;
263 }
264 
265 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
266 {
267 	return buf->ops == &anon_pipe_buf_ops;
268 }
269 
270 static ssize_t
271 pipe_read(struct kiocb *iocb, struct iov_iter *to)
272 {
273 	size_t total_len = iov_iter_count(to);
274 	struct file *filp = iocb->ki_filp;
275 	struct pipe_inode_info *pipe = filp->private_data;
276 	int do_wakeup;
277 	ssize_t ret;
278 
279 	/* Null read succeeds. */
280 	if (unlikely(total_len == 0))
281 		return 0;
282 
283 	do_wakeup = 0;
284 	ret = 0;
285 	__pipe_lock(pipe);
286 	for (;;) {
287 		int bufs = pipe->nrbufs;
288 		if (bufs) {
289 			int curbuf = pipe->curbuf;
290 			struct pipe_buffer *buf = pipe->bufs + curbuf;
291 			size_t chars = buf->len;
292 			size_t written;
293 			int error;
294 
295 			if (chars > total_len)
296 				chars = total_len;
297 
298 			error = pipe_buf_confirm(pipe, buf);
299 			if (error) {
300 				if (!ret)
301 					ret = error;
302 				break;
303 			}
304 
305 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
306 			if (unlikely(written < chars)) {
307 				if (!ret)
308 					ret = -EFAULT;
309 				break;
310 			}
311 			ret += chars;
312 			buf->offset += chars;
313 			buf->len -= chars;
314 
315 			/* Was it a packet buffer? Clean up and exit */
316 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
317 				total_len = chars;
318 				buf->len = 0;
319 			}
320 
321 			if (!buf->len) {
322 				pipe_buf_release(pipe, buf);
323 				curbuf = (curbuf + 1) & (pipe->buffers - 1);
324 				pipe->curbuf = curbuf;
325 				pipe->nrbufs = --bufs;
326 				do_wakeup = 1;
327 			}
328 			total_len -= chars;
329 			if (!total_len)
330 				break;	/* common path: read succeeded */
331 		}
332 		if (bufs)	/* More to do? */
333 			continue;
334 		if (!pipe->writers)
335 			break;
336 		if (!pipe->waiting_writers) {
337 			/* syscall merging: Usually we must not sleep
338 			 * if O_NONBLOCK is set, or if we got some data.
339 			 * But if a writer sleeps in kernel space, then
340 			 * we can wait for that data without violating POSIX.
341 			 */
342 			if (ret)
343 				break;
344 			if (filp->f_flags & O_NONBLOCK) {
345 				ret = -EAGAIN;
346 				break;
347 			}
348 		}
349 		if (signal_pending(current)) {
350 			if (!ret)
351 				ret = -ERESTARTSYS;
352 			break;
353 		}
354 		if (do_wakeup) {
355 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
356  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
357 		}
358 		pipe_wait(pipe);
359 	}
360 	__pipe_unlock(pipe);
361 
362 	/* Signal writers asynchronously that there is more room. */
363 	if (do_wakeup) {
364 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
365 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
366 	}
367 	if (ret > 0)
368 		file_accessed(filp);
369 	return ret;
370 }
371 
372 static inline int is_packetized(struct file *file)
373 {
374 	return (file->f_flags & O_DIRECT) != 0;
375 }
376 
377 static ssize_t
378 pipe_write(struct kiocb *iocb, struct iov_iter *from)
379 {
380 	struct file *filp = iocb->ki_filp;
381 	struct pipe_inode_info *pipe = filp->private_data;
382 	ssize_t ret = 0;
383 	int do_wakeup = 0;
384 	size_t total_len = iov_iter_count(from);
385 	ssize_t chars;
386 
387 	/* Null write succeeds. */
388 	if (unlikely(total_len == 0))
389 		return 0;
390 
391 	__pipe_lock(pipe);
392 
393 	if (!pipe->readers) {
394 		send_sig(SIGPIPE, current, 0);
395 		ret = -EPIPE;
396 		goto out;
397 	}
398 
399 	/* We try to merge small writes */
400 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
401 	if (pipe->nrbufs && chars != 0) {
402 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
403 							(pipe->buffers - 1);
404 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
405 		int offset = buf->offset + buf->len;
406 
407 		if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
408 			ret = pipe_buf_confirm(pipe, buf);
409 			if (ret)
410 				goto out;
411 
412 			ret = copy_page_from_iter(buf->page, offset, chars, from);
413 			if (unlikely(ret < chars)) {
414 				ret = -EFAULT;
415 				goto out;
416 			}
417 			do_wakeup = 1;
418 			buf->len += ret;
419 			if (!iov_iter_count(from))
420 				goto out;
421 		}
422 	}
423 
424 	for (;;) {
425 		int bufs;
426 
427 		if (!pipe->readers) {
428 			send_sig(SIGPIPE, current, 0);
429 			if (!ret)
430 				ret = -EPIPE;
431 			break;
432 		}
433 		bufs = pipe->nrbufs;
434 		if (bufs < pipe->buffers) {
435 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
436 			struct pipe_buffer *buf = pipe->bufs + newbuf;
437 			struct page *page = pipe->tmp_page;
438 			int copied;
439 
440 			if (!page) {
441 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
442 				if (unlikely(!page)) {
443 					ret = ret ? : -ENOMEM;
444 					break;
445 				}
446 				pipe->tmp_page = page;
447 			}
448 			/* Always wake up, even if the copy fails. Otherwise
449 			 * we lock up (O_NONBLOCK-)readers that sleep due to
450 			 * syscall merging.
451 			 * FIXME! Is this really true?
452 			 */
453 			do_wakeup = 1;
454 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
455 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
456 				if (!ret)
457 					ret = -EFAULT;
458 				break;
459 			}
460 			ret += copied;
461 
462 			/* Insert it into the buffer array */
463 			buf->page = page;
464 			buf->ops = &anon_pipe_buf_ops;
465 			buf->offset = 0;
466 			buf->len = copied;
467 			buf->flags = 0;
468 			if (is_packetized(filp)) {
469 				buf->ops = &packet_pipe_buf_ops;
470 				buf->flags = PIPE_BUF_FLAG_PACKET;
471 			}
472 			pipe->nrbufs = ++bufs;
473 			pipe->tmp_page = NULL;
474 
475 			if (!iov_iter_count(from))
476 				break;
477 		}
478 		if (bufs < pipe->buffers)
479 			continue;
480 		if (filp->f_flags & O_NONBLOCK) {
481 			if (!ret)
482 				ret = -EAGAIN;
483 			break;
484 		}
485 		if (signal_pending(current)) {
486 			if (!ret)
487 				ret = -ERESTARTSYS;
488 			break;
489 		}
490 		if (do_wakeup) {
491 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
492 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
493 			do_wakeup = 0;
494 		}
495 		pipe->waiting_writers++;
496 		pipe_wait(pipe);
497 		pipe->waiting_writers--;
498 	}
499 out:
500 	__pipe_unlock(pipe);
501 	if (do_wakeup) {
502 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
503 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
504 	}
505 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
506 		int err = file_update_time(filp);
507 		if (err)
508 			ret = err;
509 		sb_end_write(file_inode(filp)->i_sb);
510 	}
511 	return ret;
512 }
513 
514 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
515 {
516 	struct pipe_inode_info *pipe = filp->private_data;
517 	int count, buf, nrbufs;
518 
519 	switch (cmd) {
520 		case FIONREAD:
521 			__pipe_lock(pipe);
522 			count = 0;
523 			buf = pipe->curbuf;
524 			nrbufs = pipe->nrbufs;
525 			while (--nrbufs >= 0) {
526 				count += pipe->bufs[buf].len;
527 				buf = (buf+1) & (pipe->buffers - 1);
528 			}
529 			__pipe_unlock(pipe);
530 
531 			return put_user(count, (int __user *)arg);
532 		default:
533 			return -ENOIOCTLCMD;
534 	}
535 }
536 
537 /* No kernel lock held - fine */
538 static __poll_t
539 pipe_poll(struct file *filp, poll_table *wait)
540 {
541 	__poll_t mask;
542 	struct pipe_inode_info *pipe = filp->private_data;
543 	int nrbufs;
544 
545 	poll_wait(filp, &pipe->wait, wait);
546 
547 	/* Reading only -- no need for acquiring the semaphore.  */
548 	nrbufs = pipe->nrbufs;
549 	mask = 0;
550 	if (filp->f_mode & FMODE_READ) {
551 		mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
552 		if (!pipe->writers && filp->f_version != pipe->w_counter)
553 			mask |= EPOLLHUP;
554 	}
555 
556 	if (filp->f_mode & FMODE_WRITE) {
557 		mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
558 		/*
559 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
560 		 * behave exactly like pipes for poll().
561 		 */
562 		if (!pipe->readers)
563 			mask |= EPOLLERR;
564 	}
565 
566 	return mask;
567 }
568 
569 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
570 {
571 	int kill = 0;
572 
573 	spin_lock(&inode->i_lock);
574 	if (!--pipe->files) {
575 		inode->i_pipe = NULL;
576 		kill = 1;
577 	}
578 	spin_unlock(&inode->i_lock);
579 
580 	if (kill)
581 		free_pipe_info(pipe);
582 }
583 
584 static int
585 pipe_release(struct inode *inode, struct file *file)
586 {
587 	struct pipe_inode_info *pipe = file->private_data;
588 
589 	__pipe_lock(pipe);
590 	if (file->f_mode & FMODE_READ)
591 		pipe->readers--;
592 	if (file->f_mode & FMODE_WRITE)
593 		pipe->writers--;
594 
595 	if (pipe->readers || pipe->writers) {
596 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
597 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
598 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
599 	}
600 	__pipe_unlock(pipe);
601 
602 	put_pipe_info(inode, pipe);
603 	return 0;
604 }
605 
606 static int
607 pipe_fasync(int fd, struct file *filp, int on)
608 {
609 	struct pipe_inode_info *pipe = filp->private_data;
610 	int retval = 0;
611 
612 	__pipe_lock(pipe);
613 	if (filp->f_mode & FMODE_READ)
614 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
615 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
616 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
617 		if (retval < 0 && (filp->f_mode & FMODE_READ))
618 			/* this can happen only if on == T */
619 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
620 	}
621 	__pipe_unlock(pipe);
622 	return retval;
623 }
624 
625 static unsigned long account_pipe_buffers(struct user_struct *user,
626                                  unsigned long old, unsigned long new)
627 {
628 	return atomic_long_add_return(new - old, &user->pipe_bufs);
629 }
630 
631 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
632 {
633 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
634 
635 	return soft_limit && user_bufs > soft_limit;
636 }
637 
638 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
639 {
640 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
641 
642 	return hard_limit && user_bufs > hard_limit;
643 }
644 
645 static bool is_unprivileged_user(void)
646 {
647 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
648 }
649 
650 struct pipe_inode_info *alloc_pipe_info(void)
651 {
652 	struct pipe_inode_info *pipe;
653 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
654 	struct user_struct *user = get_current_user();
655 	unsigned long user_bufs;
656 	unsigned int max_size = READ_ONCE(pipe_max_size);
657 
658 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
659 	if (pipe == NULL)
660 		goto out_free_uid;
661 
662 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
663 		pipe_bufs = max_size >> PAGE_SHIFT;
664 
665 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
666 
667 	if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
668 		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
669 		pipe_bufs = 1;
670 	}
671 
672 	if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
673 		goto out_revert_acct;
674 
675 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
676 			     GFP_KERNEL_ACCOUNT);
677 
678 	if (pipe->bufs) {
679 		init_waitqueue_head(&pipe->wait);
680 		pipe->r_counter = pipe->w_counter = 1;
681 		pipe->buffers = pipe_bufs;
682 		pipe->user = user;
683 		mutex_init(&pipe->mutex);
684 		return pipe;
685 	}
686 
687 out_revert_acct:
688 	(void) account_pipe_buffers(user, pipe_bufs, 0);
689 	kfree(pipe);
690 out_free_uid:
691 	free_uid(user);
692 	return NULL;
693 }
694 
695 void free_pipe_info(struct pipe_inode_info *pipe)
696 {
697 	int i;
698 
699 	(void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
700 	free_uid(pipe->user);
701 	for (i = 0; i < pipe->buffers; i++) {
702 		struct pipe_buffer *buf = pipe->bufs + i;
703 		if (buf->ops)
704 			pipe_buf_release(pipe, buf);
705 	}
706 	if (pipe->tmp_page)
707 		__free_page(pipe->tmp_page);
708 	kfree(pipe->bufs);
709 	kfree(pipe);
710 }
711 
712 static struct vfsmount *pipe_mnt __read_mostly;
713 
714 /*
715  * pipefs_dname() is called from d_path().
716  */
717 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
718 {
719 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
720 				d_inode(dentry)->i_ino);
721 }
722 
723 static const struct dentry_operations pipefs_dentry_operations = {
724 	.d_dname	= pipefs_dname,
725 };
726 
727 static struct inode * get_pipe_inode(void)
728 {
729 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
730 	struct pipe_inode_info *pipe;
731 
732 	if (!inode)
733 		goto fail_inode;
734 
735 	inode->i_ino = get_next_ino();
736 
737 	pipe = alloc_pipe_info();
738 	if (!pipe)
739 		goto fail_iput;
740 
741 	inode->i_pipe = pipe;
742 	pipe->files = 2;
743 	pipe->readers = pipe->writers = 1;
744 	inode->i_fop = &pipefifo_fops;
745 
746 	/*
747 	 * Mark the inode dirty from the very beginning,
748 	 * that way it will never be moved to the dirty
749 	 * list because "mark_inode_dirty()" will think
750 	 * that it already _is_ on the dirty list.
751 	 */
752 	inode->i_state = I_DIRTY;
753 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
754 	inode->i_uid = current_fsuid();
755 	inode->i_gid = current_fsgid();
756 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
757 
758 	return inode;
759 
760 fail_iput:
761 	iput(inode);
762 
763 fail_inode:
764 	return NULL;
765 }
766 
767 int create_pipe_files(struct file **res, int flags)
768 {
769 	struct inode *inode = get_pipe_inode();
770 	struct file *f;
771 
772 	if (!inode)
773 		return -ENFILE;
774 
775 	f = alloc_file_pseudo(inode, pipe_mnt, "",
776 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
777 				&pipefifo_fops);
778 	if (IS_ERR(f)) {
779 		free_pipe_info(inode->i_pipe);
780 		iput(inode);
781 		return PTR_ERR(f);
782 	}
783 
784 	f->private_data = inode->i_pipe;
785 
786 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
787 				  &pipefifo_fops);
788 	if (IS_ERR(res[0])) {
789 		put_pipe_info(inode, inode->i_pipe);
790 		fput(f);
791 		return PTR_ERR(res[0]);
792 	}
793 	res[0]->private_data = inode->i_pipe;
794 	res[1] = f;
795 	return 0;
796 }
797 
798 static int __do_pipe_flags(int *fd, struct file **files, int flags)
799 {
800 	int error;
801 	int fdw, fdr;
802 
803 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
804 		return -EINVAL;
805 
806 	error = create_pipe_files(files, flags);
807 	if (error)
808 		return error;
809 
810 	error = get_unused_fd_flags(flags);
811 	if (error < 0)
812 		goto err_read_pipe;
813 	fdr = error;
814 
815 	error = get_unused_fd_flags(flags);
816 	if (error < 0)
817 		goto err_fdr;
818 	fdw = error;
819 
820 	audit_fd_pair(fdr, fdw);
821 	fd[0] = fdr;
822 	fd[1] = fdw;
823 	return 0;
824 
825  err_fdr:
826 	put_unused_fd(fdr);
827  err_read_pipe:
828 	fput(files[0]);
829 	fput(files[1]);
830 	return error;
831 }
832 
833 int do_pipe_flags(int *fd, int flags)
834 {
835 	struct file *files[2];
836 	int error = __do_pipe_flags(fd, files, flags);
837 	if (!error) {
838 		fd_install(fd[0], files[0]);
839 		fd_install(fd[1], files[1]);
840 	}
841 	return error;
842 }
843 
844 /*
845  * sys_pipe() is the normal C calling standard for creating
846  * a pipe. It's not the way Unix traditionally does this, though.
847  */
848 static int do_pipe2(int __user *fildes, int flags)
849 {
850 	struct file *files[2];
851 	int fd[2];
852 	int error;
853 
854 	error = __do_pipe_flags(fd, files, flags);
855 	if (!error) {
856 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
857 			fput(files[0]);
858 			fput(files[1]);
859 			put_unused_fd(fd[0]);
860 			put_unused_fd(fd[1]);
861 			error = -EFAULT;
862 		} else {
863 			fd_install(fd[0], files[0]);
864 			fd_install(fd[1], files[1]);
865 		}
866 	}
867 	return error;
868 }
869 
870 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
871 {
872 	return do_pipe2(fildes, flags);
873 }
874 
875 SYSCALL_DEFINE1(pipe, int __user *, fildes)
876 {
877 	return do_pipe2(fildes, 0);
878 }
879 
880 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
881 {
882 	int cur = *cnt;
883 
884 	while (cur == *cnt) {
885 		pipe_wait(pipe);
886 		if (signal_pending(current))
887 			break;
888 	}
889 	return cur == *cnt ? -ERESTARTSYS : 0;
890 }
891 
892 static void wake_up_partner(struct pipe_inode_info *pipe)
893 {
894 	wake_up_interruptible(&pipe->wait);
895 }
896 
897 static int fifo_open(struct inode *inode, struct file *filp)
898 {
899 	struct pipe_inode_info *pipe;
900 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
901 	int ret;
902 
903 	filp->f_version = 0;
904 
905 	spin_lock(&inode->i_lock);
906 	if (inode->i_pipe) {
907 		pipe = inode->i_pipe;
908 		pipe->files++;
909 		spin_unlock(&inode->i_lock);
910 	} else {
911 		spin_unlock(&inode->i_lock);
912 		pipe = alloc_pipe_info();
913 		if (!pipe)
914 			return -ENOMEM;
915 		pipe->files = 1;
916 		spin_lock(&inode->i_lock);
917 		if (unlikely(inode->i_pipe)) {
918 			inode->i_pipe->files++;
919 			spin_unlock(&inode->i_lock);
920 			free_pipe_info(pipe);
921 			pipe = inode->i_pipe;
922 		} else {
923 			inode->i_pipe = pipe;
924 			spin_unlock(&inode->i_lock);
925 		}
926 	}
927 	filp->private_data = pipe;
928 	/* OK, we have a pipe and it's pinned down */
929 
930 	__pipe_lock(pipe);
931 
932 	/* We can only do regular read/write on fifos */
933 	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
934 
935 	switch (filp->f_mode) {
936 	case FMODE_READ:
937 	/*
938 	 *  O_RDONLY
939 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
940 	 *  opened, even when there is no process writing the FIFO.
941 	 */
942 		pipe->r_counter++;
943 		if (pipe->readers++ == 0)
944 			wake_up_partner(pipe);
945 
946 		if (!is_pipe && !pipe->writers) {
947 			if ((filp->f_flags & O_NONBLOCK)) {
948 				/* suppress EPOLLHUP until we have
949 				 * seen a writer */
950 				filp->f_version = pipe->w_counter;
951 			} else {
952 				if (wait_for_partner(pipe, &pipe->w_counter))
953 					goto err_rd;
954 			}
955 		}
956 		break;
957 
958 	case FMODE_WRITE:
959 	/*
960 	 *  O_WRONLY
961 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
962 	 *  errno=ENXIO when there is no process reading the FIFO.
963 	 */
964 		ret = -ENXIO;
965 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
966 			goto err;
967 
968 		pipe->w_counter++;
969 		if (!pipe->writers++)
970 			wake_up_partner(pipe);
971 
972 		if (!is_pipe && !pipe->readers) {
973 			if (wait_for_partner(pipe, &pipe->r_counter))
974 				goto err_wr;
975 		}
976 		break;
977 
978 	case FMODE_READ | FMODE_WRITE:
979 	/*
980 	 *  O_RDWR
981 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
982 	 *  This implementation will NEVER block on a O_RDWR open, since
983 	 *  the process can at least talk to itself.
984 	 */
985 
986 		pipe->readers++;
987 		pipe->writers++;
988 		pipe->r_counter++;
989 		pipe->w_counter++;
990 		if (pipe->readers == 1 || pipe->writers == 1)
991 			wake_up_partner(pipe);
992 		break;
993 
994 	default:
995 		ret = -EINVAL;
996 		goto err;
997 	}
998 
999 	/* Ok! */
1000 	__pipe_unlock(pipe);
1001 	return 0;
1002 
1003 err_rd:
1004 	if (!--pipe->readers)
1005 		wake_up_interruptible(&pipe->wait);
1006 	ret = -ERESTARTSYS;
1007 	goto err;
1008 
1009 err_wr:
1010 	if (!--pipe->writers)
1011 		wake_up_interruptible(&pipe->wait);
1012 	ret = -ERESTARTSYS;
1013 	goto err;
1014 
1015 err:
1016 	__pipe_unlock(pipe);
1017 
1018 	put_pipe_info(inode, pipe);
1019 	return ret;
1020 }
1021 
1022 const struct file_operations pipefifo_fops = {
1023 	.open		= fifo_open,
1024 	.llseek		= no_llseek,
1025 	.read_iter	= pipe_read,
1026 	.write_iter	= pipe_write,
1027 	.poll		= pipe_poll,
1028 	.unlocked_ioctl	= pipe_ioctl,
1029 	.release	= pipe_release,
1030 	.fasync		= pipe_fasync,
1031 };
1032 
1033 /*
1034  * Currently we rely on the pipe array holding a power-of-2 number
1035  * of pages. Returns 0 on error.
1036  */
1037 unsigned int round_pipe_size(unsigned long size)
1038 {
1039 	if (size > (1U << 31))
1040 		return 0;
1041 
1042 	/* Minimum pipe size, as required by POSIX */
1043 	if (size < PAGE_SIZE)
1044 		return PAGE_SIZE;
1045 
1046 	return roundup_pow_of_two(size);
1047 }
1048 
1049 /*
1050  * Allocate a new array of pipe buffers and copy the info over. Returns the
1051  * pipe size if successful, or return -ERROR on error.
1052  */
1053 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1054 {
1055 	struct pipe_buffer *bufs;
1056 	unsigned int size, nr_pages;
1057 	unsigned long user_bufs;
1058 	long ret = 0;
1059 
1060 	size = round_pipe_size(arg);
1061 	nr_pages = size >> PAGE_SHIFT;
1062 
1063 	if (!nr_pages)
1064 		return -EINVAL;
1065 
1066 	/*
1067 	 * If trying to increase the pipe capacity, check that an
1068 	 * unprivileged user is not trying to exceed various limits
1069 	 * (soft limit check here, hard limit check just below).
1070 	 * Decreasing the pipe capacity is always permitted, even
1071 	 * if the user is currently over a limit.
1072 	 */
1073 	if (nr_pages > pipe->buffers &&
1074 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1075 		return -EPERM;
1076 
1077 	user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1078 
1079 	if (nr_pages > pipe->buffers &&
1080 			(too_many_pipe_buffers_hard(user_bufs) ||
1081 			 too_many_pipe_buffers_soft(user_bufs)) &&
1082 			is_unprivileged_user()) {
1083 		ret = -EPERM;
1084 		goto out_revert_acct;
1085 	}
1086 
1087 	/*
1088 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1089 	 * expect a lot of shrink+grow operations, just free and allocate
1090 	 * again like we would do for growing. If the pipe currently
1091 	 * contains more buffers than arg, then return busy.
1092 	 */
1093 	if (nr_pages < pipe->nrbufs) {
1094 		ret = -EBUSY;
1095 		goto out_revert_acct;
1096 	}
1097 
1098 	bufs = kcalloc(nr_pages, sizeof(*bufs),
1099 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1100 	if (unlikely(!bufs)) {
1101 		ret = -ENOMEM;
1102 		goto out_revert_acct;
1103 	}
1104 
1105 	/*
1106 	 * The pipe array wraps around, so just start the new one at zero
1107 	 * and adjust the indexes.
1108 	 */
1109 	if (pipe->nrbufs) {
1110 		unsigned int tail;
1111 		unsigned int head;
1112 
1113 		tail = pipe->curbuf + pipe->nrbufs;
1114 		if (tail < pipe->buffers)
1115 			tail = 0;
1116 		else
1117 			tail &= (pipe->buffers - 1);
1118 
1119 		head = pipe->nrbufs - tail;
1120 		if (head)
1121 			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1122 		if (tail)
1123 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1124 	}
1125 
1126 	pipe->curbuf = 0;
1127 	kfree(pipe->bufs);
1128 	pipe->bufs = bufs;
1129 	pipe->buffers = nr_pages;
1130 	return nr_pages * PAGE_SIZE;
1131 
1132 out_revert_acct:
1133 	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1134 	return ret;
1135 }
1136 
1137 /*
1138  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1139  * location, so checking ->i_pipe is not enough to verify that this is a
1140  * pipe.
1141  */
1142 struct pipe_inode_info *get_pipe_info(struct file *file)
1143 {
1144 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1145 }
1146 
1147 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1148 {
1149 	struct pipe_inode_info *pipe;
1150 	long ret;
1151 
1152 	pipe = get_pipe_info(file);
1153 	if (!pipe)
1154 		return -EBADF;
1155 
1156 	__pipe_lock(pipe);
1157 
1158 	switch (cmd) {
1159 	case F_SETPIPE_SZ:
1160 		ret = pipe_set_size(pipe, arg);
1161 		break;
1162 	case F_GETPIPE_SZ:
1163 		ret = pipe->buffers * PAGE_SIZE;
1164 		break;
1165 	default:
1166 		ret = -EINVAL;
1167 		break;
1168 	}
1169 
1170 	__pipe_unlock(pipe);
1171 	return ret;
1172 }
1173 
1174 static const struct super_operations pipefs_ops = {
1175 	.destroy_inode = free_inode_nonrcu,
1176 	.statfs = simple_statfs,
1177 };
1178 
1179 /*
1180  * pipefs should _never_ be mounted by userland - too much of security hassle,
1181  * no real gain from having the whole whorehouse mounted. So we don't need
1182  * any operations on the root directory. However, we need a non-trivial
1183  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1184  */
1185 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1186 			 int flags, const char *dev_name, void *data)
1187 {
1188 	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1189 			&pipefs_dentry_operations, PIPEFS_MAGIC);
1190 }
1191 
1192 static struct file_system_type pipe_fs_type = {
1193 	.name		= "pipefs",
1194 	.mount		= pipefs_mount,
1195 	.kill_sb	= kill_anon_super,
1196 };
1197 
1198 static int __init init_pipe_fs(void)
1199 {
1200 	int err = register_filesystem(&pipe_fs_type);
1201 
1202 	if (!err) {
1203 		pipe_mnt = kern_mount(&pipe_fs_type);
1204 		if (IS_ERR(pipe_mnt)) {
1205 			err = PTR_ERR(pipe_mnt);
1206 			unregister_filesystem(&pipe_fs_type);
1207 		}
1208 	}
1209 	return err;
1210 }
1211 
1212 fs_initcall(init_pipe_fs);
1213