xref: /openbmc/linux/fs/pipe.c (revision 78289b4a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
30 
31 #include "internal.h"
32 
33 /*
34  * The max size that a non-root user is allowed to grow the pipe. Can
35  * be set by root in /proc/sys/fs/pipe-max-size
36  */
37 unsigned int pipe_max_size = 1048576;
38 
39 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
40  * matches default values.
41  */
42 unsigned long pipe_user_pages_hard;
43 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
44 
45 /*
46  * We use head and tail indices that aren't masked off, except at the point of
47  * dereference, but rather they're allowed to wrap naturally.  This means there
48  * isn't a dead spot in the buffer, but the ring has to be a power of two and
49  * <= 2^31.
50  * -- David Howells 2019-09-23.
51  *
52  * Reads with count = 0 should always return 0.
53  * -- Julian Bradfield 1999-06-07.
54  *
55  * FIFOs and Pipes now generate SIGIO for both readers and writers.
56  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
57  *
58  * pipe_read & write cleanup
59  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
60  */
61 
62 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
63 {
64 	if (pipe->files)
65 		mutex_lock_nested(&pipe->mutex, subclass);
66 }
67 
68 void pipe_lock(struct pipe_inode_info *pipe)
69 {
70 	/*
71 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
72 	 */
73 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
74 }
75 EXPORT_SYMBOL(pipe_lock);
76 
77 void pipe_unlock(struct pipe_inode_info *pipe)
78 {
79 	if (pipe->files)
80 		mutex_unlock(&pipe->mutex);
81 }
82 EXPORT_SYMBOL(pipe_unlock);
83 
84 static inline void __pipe_lock(struct pipe_inode_info *pipe)
85 {
86 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
87 }
88 
89 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
90 {
91 	mutex_unlock(&pipe->mutex);
92 }
93 
94 void pipe_double_lock(struct pipe_inode_info *pipe1,
95 		      struct pipe_inode_info *pipe2)
96 {
97 	BUG_ON(pipe1 == pipe2);
98 
99 	if (pipe1 < pipe2) {
100 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
101 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
102 	} else {
103 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
104 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
105 	}
106 }
107 
108 /* Drop the inode semaphore and wait for a pipe event, atomically */
109 void pipe_wait(struct pipe_inode_info *pipe)
110 {
111 	DEFINE_WAIT(rdwait);
112 	DEFINE_WAIT(wrwait);
113 
114 	/*
115 	 * Pipes are system-local resources, so sleeping on them
116 	 * is considered a noninteractive wait:
117 	 */
118 	prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
119 	prepare_to_wait(&pipe->wr_wait, &wrwait, TASK_INTERRUPTIBLE);
120 	pipe_unlock(pipe);
121 	schedule();
122 	finish_wait(&pipe->rd_wait, &rdwait);
123 	finish_wait(&pipe->wr_wait, &wrwait);
124 	pipe_lock(pipe);
125 }
126 
127 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
128 				  struct pipe_buffer *buf)
129 {
130 	struct page *page = buf->page;
131 
132 	/*
133 	 * If nobody else uses this page, and we don't already have a
134 	 * temporary page, let's keep track of it as a one-deep
135 	 * allocation cache. (Otherwise just release our reference to it)
136 	 */
137 	if (page_count(page) == 1 && !pipe->tmp_page)
138 		pipe->tmp_page = page;
139 	else
140 		put_page(page);
141 }
142 
143 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
144 		struct pipe_buffer *buf)
145 {
146 	struct page *page = buf->page;
147 
148 	if (page_count(page) != 1)
149 		return false;
150 	memcg_kmem_uncharge_page(page, 0);
151 	__SetPageLocked(page);
152 	return true;
153 }
154 
155 /**
156  * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
157  * @pipe:	the pipe that the buffer belongs to
158  * @buf:	the buffer to attempt to steal
159  *
160  * Description:
161  *	This function attempts to steal the &struct page attached to
162  *	@buf. If successful, this function returns 0 and returns with
163  *	the page locked. The caller may then reuse the page for whatever
164  *	he wishes; the typical use is insertion into a different file
165  *	page cache.
166  */
167 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
168 		struct pipe_buffer *buf)
169 {
170 	struct page *page = buf->page;
171 
172 	/*
173 	 * A reference of one is golden, that means that the owner of this
174 	 * page is the only one holding a reference to it. lock the page
175 	 * and return OK.
176 	 */
177 	if (page_count(page) == 1) {
178 		lock_page(page);
179 		return true;
180 	}
181 	return false;
182 }
183 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
184 
185 /**
186  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
187  * @pipe:	the pipe that the buffer belongs to
188  * @buf:	the buffer to get a reference to
189  *
190  * Description:
191  *	This function grabs an extra reference to @buf. It's used in
192  *	in the tee() system call, when we duplicate the buffers in one
193  *	pipe into another.
194  */
195 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
196 {
197 	return try_get_page(buf->page);
198 }
199 EXPORT_SYMBOL(generic_pipe_buf_get);
200 
201 /**
202  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
203  * @pipe:	the pipe that the buffer belongs to
204  * @buf:	the buffer to put a reference to
205  *
206  * Description:
207  *	This function releases a reference to @buf.
208  */
209 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
210 			      struct pipe_buffer *buf)
211 {
212 	put_page(buf->page);
213 }
214 EXPORT_SYMBOL(generic_pipe_buf_release);
215 
216 static const struct pipe_buf_operations anon_pipe_buf_ops = {
217 	.release	= anon_pipe_buf_release,
218 	.try_steal	= anon_pipe_buf_try_steal,
219 	.get		= generic_pipe_buf_get,
220 };
221 
222 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
223 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
224 {
225 	unsigned int head = READ_ONCE(pipe->head);
226 	unsigned int tail = READ_ONCE(pipe->tail);
227 	unsigned int writers = READ_ONCE(pipe->writers);
228 
229 	return !pipe_empty(head, tail) || !writers;
230 }
231 
232 static ssize_t
233 pipe_read(struct kiocb *iocb, struct iov_iter *to)
234 {
235 	size_t total_len = iov_iter_count(to);
236 	struct file *filp = iocb->ki_filp;
237 	struct pipe_inode_info *pipe = filp->private_data;
238 	bool was_full, wake_next_reader = false;
239 	ssize_t ret;
240 
241 	/* Null read succeeds. */
242 	if (unlikely(total_len == 0))
243 		return 0;
244 
245 	ret = 0;
246 	__pipe_lock(pipe);
247 
248 	/*
249 	 * We only wake up writers if the pipe was full when we started
250 	 * reading in order to avoid unnecessary wakeups.
251 	 *
252 	 * But when we do wake up writers, we do so using a sync wakeup
253 	 * (WF_SYNC), because we want them to get going and generate more
254 	 * data for us.
255 	 */
256 	was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
257 	for (;;) {
258 		unsigned int head = pipe->head;
259 		unsigned int tail = pipe->tail;
260 		unsigned int mask = pipe->ring_size - 1;
261 
262 		if (!pipe_empty(head, tail)) {
263 			struct pipe_buffer *buf = &pipe->bufs[tail & mask];
264 			size_t chars = buf->len;
265 			size_t written;
266 			int error;
267 
268 			if (chars > total_len)
269 				chars = total_len;
270 
271 			error = pipe_buf_confirm(pipe, buf);
272 			if (error) {
273 				if (!ret)
274 					ret = error;
275 				break;
276 			}
277 
278 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
279 			if (unlikely(written < chars)) {
280 				if (!ret)
281 					ret = -EFAULT;
282 				break;
283 			}
284 			ret += chars;
285 			buf->offset += chars;
286 			buf->len -= chars;
287 
288 			/* Was it a packet buffer? Clean up and exit */
289 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
290 				total_len = chars;
291 				buf->len = 0;
292 			}
293 
294 			if (!buf->len) {
295 				pipe_buf_release(pipe, buf);
296 				spin_lock_irq(&pipe->rd_wait.lock);
297 				tail++;
298 				pipe->tail = tail;
299 				spin_unlock_irq(&pipe->rd_wait.lock);
300 			}
301 			total_len -= chars;
302 			if (!total_len)
303 				break;	/* common path: read succeeded */
304 			if (!pipe_empty(head, tail))	/* More to do? */
305 				continue;
306 		}
307 
308 		if (!pipe->writers)
309 			break;
310 		if (ret)
311 			break;
312 		if (filp->f_flags & O_NONBLOCK) {
313 			ret = -EAGAIN;
314 			break;
315 		}
316 		__pipe_unlock(pipe);
317 
318 		/*
319 		 * We only get here if we didn't actually read anything.
320 		 *
321 		 * However, we could have seen (and removed) a zero-sized
322 		 * pipe buffer, and might have made space in the buffers
323 		 * that way.
324 		 *
325 		 * You can't make zero-sized pipe buffers by doing an empty
326 		 * write (not even in packet mode), but they can happen if
327 		 * the writer gets an EFAULT when trying to fill a buffer
328 		 * that already got allocated and inserted in the buffer
329 		 * array.
330 		 *
331 		 * So we still need to wake up any pending writers in the
332 		 * _very_ unlikely case that the pipe was full, but we got
333 		 * no data.
334 		 */
335 		if (unlikely(was_full)) {
336 			wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
337 			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
338 		}
339 
340 		/*
341 		 * But because we didn't read anything, at this point we can
342 		 * just return directly with -ERESTARTSYS if we're interrupted,
343 		 * since we've done any required wakeups and there's no need
344 		 * to mark anything accessed. And we've dropped the lock.
345 		 */
346 		if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
347 			return -ERESTARTSYS;
348 
349 		__pipe_lock(pipe);
350 		was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
351 		wake_next_reader = true;
352 	}
353 	if (pipe_empty(pipe->head, pipe->tail))
354 		wake_next_reader = false;
355 	__pipe_unlock(pipe);
356 
357 	if (was_full) {
358 		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
359 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
360 	}
361 	if (wake_next_reader)
362 		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
363 	if (ret > 0)
364 		file_accessed(filp);
365 	return ret;
366 }
367 
368 static inline int is_packetized(struct file *file)
369 {
370 	return (file->f_flags & O_DIRECT) != 0;
371 }
372 
373 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
374 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
375 {
376 	unsigned int head = READ_ONCE(pipe->head);
377 	unsigned int tail = READ_ONCE(pipe->tail);
378 	unsigned int max_usage = READ_ONCE(pipe->max_usage);
379 
380 	return !pipe_full(head, tail, max_usage) ||
381 		!READ_ONCE(pipe->readers);
382 }
383 
384 static ssize_t
385 pipe_write(struct kiocb *iocb, struct iov_iter *from)
386 {
387 	struct file *filp = iocb->ki_filp;
388 	struct pipe_inode_info *pipe = filp->private_data;
389 	unsigned int head;
390 	ssize_t ret = 0;
391 	size_t total_len = iov_iter_count(from);
392 	ssize_t chars;
393 	bool was_empty = false;
394 	bool wake_next_writer = false;
395 
396 	/* Null write succeeds. */
397 	if (unlikely(total_len == 0))
398 		return 0;
399 
400 	__pipe_lock(pipe);
401 
402 	if (!pipe->readers) {
403 		send_sig(SIGPIPE, current, 0);
404 		ret = -EPIPE;
405 		goto out;
406 	}
407 
408 	/*
409 	 * Only wake up if the pipe started out empty, since
410 	 * otherwise there should be no readers waiting.
411 	 *
412 	 * If it wasn't empty we try to merge new data into
413 	 * the last buffer.
414 	 *
415 	 * That naturally merges small writes, but it also
416 	 * page-aligs the rest of the writes for large writes
417 	 * spanning multiple pages.
418 	 */
419 	head = pipe->head;
420 	was_empty = pipe_empty(head, pipe->tail);
421 	chars = total_len & (PAGE_SIZE-1);
422 	if (chars && !was_empty) {
423 		unsigned int mask = pipe->ring_size - 1;
424 		struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
425 		int offset = buf->offset + buf->len;
426 
427 		if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
428 		    offset + chars <= PAGE_SIZE) {
429 			ret = pipe_buf_confirm(pipe, buf);
430 			if (ret)
431 				goto out;
432 
433 			ret = copy_page_from_iter(buf->page, offset, chars, from);
434 			if (unlikely(ret < chars)) {
435 				ret = -EFAULT;
436 				goto out;
437 			}
438 
439 			buf->len += ret;
440 			if (!iov_iter_count(from))
441 				goto out;
442 		}
443 	}
444 
445 	for (;;) {
446 		if (!pipe->readers) {
447 			send_sig(SIGPIPE, current, 0);
448 			if (!ret)
449 				ret = -EPIPE;
450 			break;
451 		}
452 
453 		head = pipe->head;
454 		if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
455 			unsigned int mask = pipe->ring_size - 1;
456 			struct pipe_buffer *buf = &pipe->bufs[head & mask];
457 			struct page *page = pipe->tmp_page;
458 			int copied;
459 
460 			if (!page) {
461 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
462 				if (unlikely(!page)) {
463 					ret = ret ? : -ENOMEM;
464 					break;
465 				}
466 				pipe->tmp_page = page;
467 			}
468 
469 			/* Allocate a slot in the ring in advance and attach an
470 			 * empty buffer.  If we fault or otherwise fail to use
471 			 * it, either the reader will consume it or it'll still
472 			 * be there for the next write.
473 			 */
474 			spin_lock_irq(&pipe->rd_wait.lock);
475 
476 			head = pipe->head;
477 			if (pipe_full(head, pipe->tail, pipe->max_usage)) {
478 				spin_unlock_irq(&pipe->rd_wait.lock);
479 				continue;
480 			}
481 
482 			pipe->head = head + 1;
483 			spin_unlock_irq(&pipe->rd_wait.lock);
484 
485 			/* Insert it into the buffer array */
486 			buf = &pipe->bufs[head & mask];
487 			buf->page = page;
488 			buf->ops = &anon_pipe_buf_ops;
489 			buf->offset = 0;
490 			buf->len = 0;
491 			if (is_packetized(filp))
492 				buf->flags = PIPE_BUF_FLAG_PACKET;
493 			else
494 				buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
495 			pipe->tmp_page = NULL;
496 
497 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
498 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
499 				if (!ret)
500 					ret = -EFAULT;
501 				break;
502 			}
503 			ret += copied;
504 			buf->offset = 0;
505 			buf->len = copied;
506 
507 			if (!iov_iter_count(from))
508 				break;
509 		}
510 
511 		if (!pipe_full(head, pipe->tail, pipe->max_usage))
512 			continue;
513 
514 		/* Wait for buffer space to become available. */
515 		if (filp->f_flags & O_NONBLOCK) {
516 			if (!ret)
517 				ret = -EAGAIN;
518 			break;
519 		}
520 		if (signal_pending(current)) {
521 			if (!ret)
522 				ret = -ERESTARTSYS;
523 			break;
524 		}
525 
526 		/*
527 		 * We're going to release the pipe lock and wait for more
528 		 * space. We wake up any readers if necessary, and then
529 		 * after waiting we need to re-check whether the pipe
530 		 * become empty while we dropped the lock.
531 		 */
532 		__pipe_unlock(pipe);
533 		if (was_empty) {
534 			wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
535 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
536 		}
537 		wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
538 		__pipe_lock(pipe);
539 		was_empty = pipe_empty(pipe->head, pipe->tail);
540 		wake_next_writer = true;
541 	}
542 out:
543 	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
544 		wake_next_writer = false;
545 	__pipe_unlock(pipe);
546 
547 	/*
548 	 * If we do do a wakeup event, we do a 'sync' wakeup, because we
549 	 * want the reader to start processing things asap, rather than
550 	 * leave the data pending.
551 	 *
552 	 * This is particularly important for small writes, because of
553 	 * how (for example) the GNU make jobserver uses small writes to
554 	 * wake up pending jobs
555 	 */
556 	if (was_empty) {
557 		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
558 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
559 	}
560 	if (wake_next_writer)
561 		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
562 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
563 		int err = file_update_time(filp);
564 		if (err)
565 			ret = err;
566 		sb_end_write(file_inode(filp)->i_sb);
567 	}
568 	return ret;
569 }
570 
571 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
572 {
573 	struct pipe_inode_info *pipe = filp->private_data;
574 	int count, head, tail, mask;
575 
576 	switch (cmd) {
577 		case FIONREAD:
578 			__pipe_lock(pipe);
579 			count = 0;
580 			head = pipe->head;
581 			tail = pipe->tail;
582 			mask = pipe->ring_size - 1;
583 
584 			while (tail != head) {
585 				count += pipe->bufs[tail & mask].len;
586 				tail++;
587 			}
588 			__pipe_unlock(pipe);
589 
590 			return put_user(count, (int __user *)arg);
591 		default:
592 			return -ENOIOCTLCMD;
593 	}
594 }
595 
596 /* No kernel lock held - fine */
597 static __poll_t
598 pipe_poll(struct file *filp, poll_table *wait)
599 {
600 	__poll_t mask;
601 	struct pipe_inode_info *pipe = filp->private_data;
602 	unsigned int head, tail;
603 
604 	/*
605 	 * Reading pipe state only -- no need for acquiring the semaphore.
606 	 *
607 	 * But because this is racy, the code has to add the
608 	 * entry to the poll table _first_ ..
609 	 */
610 	if (filp->f_mode & FMODE_READ)
611 		poll_wait(filp, &pipe->rd_wait, wait);
612 	if (filp->f_mode & FMODE_WRITE)
613 		poll_wait(filp, &pipe->wr_wait, wait);
614 
615 	/*
616 	 * .. and only then can you do the racy tests. That way,
617 	 * if something changes and you got it wrong, the poll
618 	 * table entry will wake you up and fix it.
619 	 */
620 	head = READ_ONCE(pipe->head);
621 	tail = READ_ONCE(pipe->tail);
622 
623 	mask = 0;
624 	if (filp->f_mode & FMODE_READ) {
625 		if (!pipe_empty(head, tail))
626 			mask |= EPOLLIN | EPOLLRDNORM;
627 		if (!pipe->writers && filp->f_version != pipe->w_counter)
628 			mask |= EPOLLHUP;
629 	}
630 
631 	if (filp->f_mode & FMODE_WRITE) {
632 		if (!pipe_full(head, tail, pipe->max_usage))
633 			mask |= EPOLLOUT | EPOLLWRNORM;
634 		/*
635 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
636 		 * behave exactly like pipes for poll().
637 		 */
638 		if (!pipe->readers)
639 			mask |= EPOLLERR;
640 	}
641 
642 	return mask;
643 }
644 
645 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
646 {
647 	int kill = 0;
648 
649 	spin_lock(&inode->i_lock);
650 	if (!--pipe->files) {
651 		inode->i_pipe = NULL;
652 		kill = 1;
653 	}
654 	spin_unlock(&inode->i_lock);
655 
656 	if (kill)
657 		free_pipe_info(pipe);
658 }
659 
660 static int
661 pipe_release(struct inode *inode, struct file *file)
662 {
663 	struct pipe_inode_info *pipe = file->private_data;
664 
665 	__pipe_lock(pipe);
666 	if (file->f_mode & FMODE_READ)
667 		pipe->readers--;
668 	if (file->f_mode & FMODE_WRITE)
669 		pipe->writers--;
670 
671 	/* Was that the last reader or writer, but not the other side? */
672 	if (!pipe->readers != !pipe->writers) {
673 		wake_up_interruptible_all(&pipe->rd_wait);
674 		wake_up_interruptible_all(&pipe->wr_wait);
675 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
676 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
677 	}
678 	__pipe_unlock(pipe);
679 
680 	put_pipe_info(inode, pipe);
681 	return 0;
682 }
683 
684 static int
685 pipe_fasync(int fd, struct file *filp, int on)
686 {
687 	struct pipe_inode_info *pipe = filp->private_data;
688 	int retval = 0;
689 
690 	__pipe_lock(pipe);
691 	if (filp->f_mode & FMODE_READ)
692 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
693 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
694 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
695 		if (retval < 0 && (filp->f_mode & FMODE_READ))
696 			/* this can happen only if on == T */
697 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
698 	}
699 	__pipe_unlock(pipe);
700 	return retval;
701 }
702 
703 static unsigned long account_pipe_buffers(struct user_struct *user,
704                                  unsigned long old, unsigned long new)
705 {
706 	return atomic_long_add_return(new - old, &user->pipe_bufs);
707 }
708 
709 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
710 {
711 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
712 
713 	return soft_limit && user_bufs > soft_limit;
714 }
715 
716 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
717 {
718 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
719 
720 	return hard_limit && user_bufs > hard_limit;
721 }
722 
723 static bool is_unprivileged_user(void)
724 {
725 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
726 }
727 
728 struct pipe_inode_info *alloc_pipe_info(void)
729 {
730 	struct pipe_inode_info *pipe;
731 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
732 	struct user_struct *user = get_current_user();
733 	unsigned long user_bufs;
734 	unsigned int max_size = READ_ONCE(pipe_max_size);
735 
736 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
737 	if (pipe == NULL)
738 		goto out_free_uid;
739 
740 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
741 		pipe_bufs = max_size >> PAGE_SHIFT;
742 
743 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
744 
745 	if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
746 		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
747 		pipe_bufs = 1;
748 	}
749 
750 	if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
751 		goto out_revert_acct;
752 
753 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
754 			     GFP_KERNEL_ACCOUNT);
755 
756 	if (pipe->bufs) {
757 		init_waitqueue_head(&pipe->rd_wait);
758 		init_waitqueue_head(&pipe->wr_wait);
759 		pipe->r_counter = pipe->w_counter = 1;
760 		pipe->max_usage = pipe_bufs;
761 		pipe->ring_size = pipe_bufs;
762 		pipe->user = user;
763 		mutex_init(&pipe->mutex);
764 		return pipe;
765 	}
766 
767 out_revert_acct:
768 	(void) account_pipe_buffers(user, pipe_bufs, 0);
769 	kfree(pipe);
770 out_free_uid:
771 	free_uid(user);
772 	return NULL;
773 }
774 
775 void free_pipe_info(struct pipe_inode_info *pipe)
776 {
777 	int i;
778 
779 	(void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
780 	free_uid(pipe->user);
781 	for (i = 0; i < pipe->ring_size; i++) {
782 		struct pipe_buffer *buf = pipe->bufs + i;
783 		if (buf->ops)
784 			pipe_buf_release(pipe, buf);
785 	}
786 	if (pipe->tmp_page)
787 		__free_page(pipe->tmp_page);
788 	kfree(pipe->bufs);
789 	kfree(pipe);
790 }
791 
792 static struct vfsmount *pipe_mnt __read_mostly;
793 
794 /*
795  * pipefs_dname() is called from d_path().
796  */
797 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
798 {
799 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
800 				d_inode(dentry)->i_ino);
801 }
802 
803 static const struct dentry_operations pipefs_dentry_operations = {
804 	.d_dname	= pipefs_dname,
805 };
806 
807 static struct inode * get_pipe_inode(void)
808 {
809 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
810 	struct pipe_inode_info *pipe;
811 
812 	if (!inode)
813 		goto fail_inode;
814 
815 	inode->i_ino = get_next_ino();
816 
817 	pipe = alloc_pipe_info();
818 	if (!pipe)
819 		goto fail_iput;
820 
821 	inode->i_pipe = pipe;
822 	pipe->files = 2;
823 	pipe->readers = pipe->writers = 1;
824 	inode->i_fop = &pipefifo_fops;
825 
826 	/*
827 	 * Mark the inode dirty from the very beginning,
828 	 * that way it will never be moved to the dirty
829 	 * list because "mark_inode_dirty()" will think
830 	 * that it already _is_ on the dirty list.
831 	 */
832 	inode->i_state = I_DIRTY;
833 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
834 	inode->i_uid = current_fsuid();
835 	inode->i_gid = current_fsgid();
836 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
837 
838 	return inode;
839 
840 fail_iput:
841 	iput(inode);
842 
843 fail_inode:
844 	return NULL;
845 }
846 
847 int create_pipe_files(struct file **res, int flags)
848 {
849 	struct inode *inode = get_pipe_inode();
850 	struct file *f;
851 
852 	if (!inode)
853 		return -ENFILE;
854 
855 	f = alloc_file_pseudo(inode, pipe_mnt, "",
856 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
857 				&pipefifo_fops);
858 	if (IS_ERR(f)) {
859 		free_pipe_info(inode->i_pipe);
860 		iput(inode);
861 		return PTR_ERR(f);
862 	}
863 
864 	f->private_data = inode->i_pipe;
865 
866 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
867 				  &pipefifo_fops);
868 	if (IS_ERR(res[0])) {
869 		put_pipe_info(inode, inode->i_pipe);
870 		fput(f);
871 		return PTR_ERR(res[0]);
872 	}
873 	res[0]->private_data = inode->i_pipe;
874 	res[1] = f;
875 	stream_open(inode, res[0]);
876 	stream_open(inode, res[1]);
877 	return 0;
878 }
879 
880 static int __do_pipe_flags(int *fd, struct file **files, int flags)
881 {
882 	int error;
883 	int fdw, fdr;
884 
885 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
886 		return -EINVAL;
887 
888 	error = create_pipe_files(files, flags);
889 	if (error)
890 		return error;
891 
892 	error = get_unused_fd_flags(flags);
893 	if (error < 0)
894 		goto err_read_pipe;
895 	fdr = error;
896 
897 	error = get_unused_fd_flags(flags);
898 	if (error < 0)
899 		goto err_fdr;
900 	fdw = error;
901 
902 	audit_fd_pair(fdr, fdw);
903 	fd[0] = fdr;
904 	fd[1] = fdw;
905 	return 0;
906 
907  err_fdr:
908 	put_unused_fd(fdr);
909  err_read_pipe:
910 	fput(files[0]);
911 	fput(files[1]);
912 	return error;
913 }
914 
915 int do_pipe_flags(int *fd, int flags)
916 {
917 	struct file *files[2];
918 	int error = __do_pipe_flags(fd, files, flags);
919 	if (!error) {
920 		fd_install(fd[0], files[0]);
921 		fd_install(fd[1], files[1]);
922 	}
923 	return error;
924 }
925 
926 /*
927  * sys_pipe() is the normal C calling standard for creating
928  * a pipe. It's not the way Unix traditionally does this, though.
929  */
930 static int do_pipe2(int __user *fildes, int flags)
931 {
932 	struct file *files[2];
933 	int fd[2];
934 	int error;
935 
936 	error = __do_pipe_flags(fd, files, flags);
937 	if (!error) {
938 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
939 			fput(files[0]);
940 			fput(files[1]);
941 			put_unused_fd(fd[0]);
942 			put_unused_fd(fd[1]);
943 			error = -EFAULT;
944 		} else {
945 			fd_install(fd[0], files[0]);
946 			fd_install(fd[1], files[1]);
947 		}
948 	}
949 	return error;
950 }
951 
952 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
953 {
954 	return do_pipe2(fildes, flags);
955 }
956 
957 SYSCALL_DEFINE1(pipe, int __user *, fildes)
958 {
959 	return do_pipe2(fildes, 0);
960 }
961 
962 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
963 {
964 	int cur = *cnt;
965 
966 	while (cur == *cnt) {
967 		pipe_wait(pipe);
968 		if (signal_pending(current))
969 			break;
970 	}
971 	return cur == *cnt ? -ERESTARTSYS : 0;
972 }
973 
974 static void wake_up_partner(struct pipe_inode_info *pipe)
975 {
976 	wake_up_interruptible_all(&pipe->rd_wait);
977 	wake_up_interruptible_all(&pipe->wr_wait);
978 }
979 
980 static int fifo_open(struct inode *inode, struct file *filp)
981 {
982 	struct pipe_inode_info *pipe;
983 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
984 	int ret;
985 
986 	filp->f_version = 0;
987 
988 	spin_lock(&inode->i_lock);
989 	if (inode->i_pipe) {
990 		pipe = inode->i_pipe;
991 		pipe->files++;
992 		spin_unlock(&inode->i_lock);
993 	} else {
994 		spin_unlock(&inode->i_lock);
995 		pipe = alloc_pipe_info();
996 		if (!pipe)
997 			return -ENOMEM;
998 		pipe->files = 1;
999 		spin_lock(&inode->i_lock);
1000 		if (unlikely(inode->i_pipe)) {
1001 			inode->i_pipe->files++;
1002 			spin_unlock(&inode->i_lock);
1003 			free_pipe_info(pipe);
1004 			pipe = inode->i_pipe;
1005 		} else {
1006 			inode->i_pipe = pipe;
1007 			spin_unlock(&inode->i_lock);
1008 		}
1009 	}
1010 	filp->private_data = pipe;
1011 	/* OK, we have a pipe and it's pinned down */
1012 
1013 	__pipe_lock(pipe);
1014 
1015 	/* We can only do regular read/write on fifos */
1016 	stream_open(inode, filp);
1017 
1018 	switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1019 	case FMODE_READ:
1020 	/*
1021 	 *  O_RDONLY
1022 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1023 	 *  opened, even when there is no process writing the FIFO.
1024 	 */
1025 		pipe->r_counter++;
1026 		if (pipe->readers++ == 0)
1027 			wake_up_partner(pipe);
1028 
1029 		if (!is_pipe && !pipe->writers) {
1030 			if ((filp->f_flags & O_NONBLOCK)) {
1031 				/* suppress EPOLLHUP until we have
1032 				 * seen a writer */
1033 				filp->f_version = pipe->w_counter;
1034 			} else {
1035 				if (wait_for_partner(pipe, &pipe->w_counter))
1036 					goto err_rd;
1037 			}
1038 		}
1039 		break;
1040 
1041 	case FMODE_WRITE:
1042 	/*
1043 	 *  O_WRONLY
1044 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
1045 	 *  errno=ENXIO when there is no process reading the FIFO.
1046 	 */
1047 		ret = -ENXIO;
1048 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1049 			goto err;
1050 
1051 		pipe->w_counter++;
1052 		if (!pipe->writers++)
1053 			wake_up_partner(pipe);
1054 
1055 		if (!is_pipe && !pipe->readers) {
1056 			if (wait_for_partner(pipe, &pipe->r_counter))
1057 				goto err_wr;
1058 		}
1059 		break;
1060 
1061 	case FMODE_READ | FMODE_WRITE:
1062 	/*
1063 	 *  O_RDWR
1064 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1065 	 *  This implementation will NEVER block on a O_RDWR open, since
1066 	 *  the process can at least talk to itself.
1067 	 */
1068 
1069 		pipe->readers++;
1070 		pipe->writers++;
1071 		pipe->r_counter++;
1072 		pipe->w_counter++;
1073 		if (pipe->readers == 1 || pipe->writers == 1)
1074 			wake_up_partner(pipe);
1075 		break;
1076 
1077 	default:
1078 		ret = -EINVAL;
1079 		goto err;
1080 	}
1081 
1082 	/* Ok! */
1083 	__pipe_unlock(pipe);
1084 	return 0;
1085 
1086 err_rd:
1087 	if (!--pipe->readers)
1088 		wake_up_interruptible(&pipe->wr_wait);
1089 	ret = -ERESTARTSYS;
1090 	goto err;
1091 
1092 err_wr:
1093 	if (!--pipe->writers)
1094 		wake_up_interruptible_all(&pipe->rd_wait);
1095 	ret = -ERESTARTSYS;
1096 	goto err;
1097 
1098 err:
1099 	__pipe_unlock(pipe);
1100 
1101 	put_pipe_info(inode, pipe);
1102 	return ret;
1103 }
1104 
1105 const struct file_operations pipefifo_fops = {
1106 	.open		= fifo_open,
1107 	.llseek		= no_llseek,
1108 	.read_iter	= pipe_read,
1109 	.write_iter	= pipe_write,
1110 	.poll		= pipe_poll,
1111 	.unlocked_ioctl	= pipe_ioctl,
1112 	.release	= pipe_release,
1113 	.fasync		= pipe_fasync,
1114 };
1115 
1116 /*
1117  * Currently we rely on the pipe array holding a power-of-2 number
1118  * of pages. Returns 0 on error.
1119  */
1120 unsigned int round_pipe_size(unsigned long size)
1121 {
1122 	if (size > (1U << 31))
1123 		return 0;
1124 
1125 	/* Minimum pipe size, as required by POSIX */
1126 	if (size < PAGE_SIZE)
1127 		return PAGE_SIZE;
1128 
1129 	return roundup_pow_of_two(size);
1130 }
1131 
1132 /*
1133  * Allocate a new array of pipe buffers and copy the info over. Returns the
1134  * pipe size if successful, or return -ERROR on error.
1135  */
1136 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1137 {
1138 	struct pipe_buffer *bufs;
1139 	unsigned int size, nr_slots, head, tail, mask, n;
1140 	unsigned long user_bufs;
1141 	long ret = 0;
1142 
1143 	size = round_pipe_size(arg);
1144 	nr_slots = size >> PAGE_SHIFT;
1145 
1146 	if (!nr_slots)
1147 		return -EINVAL;
1148 
1149 	/*
1150 	 * If trying to increase the pipe capacity, check that an
1151 	 * unprivileged user is not trying to exceed various limits
1152 	 * (soft limit check here, hard limit check just below).
1153 	 * Decreasing the pipe capacity is always permitted, even
1154 	 * if the user is currently over a limit.
1155 	 */
1156 	if (nr_slots > pipe->ring_size &&
1157 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1158 		return -EPERM;
1159 
1160 	user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
1161 
1162 	if (nr_slots > pipe->ring_size &&
1163 			(too_many_pipe_buffers_hard(user_bufs) ||
1164 			 too_many_pipe_buffers_soft(user_bufs)) &&
1165 			is_unprivileged_user()) {
1166 		ret = -EPERM;
1167 		goto out_revert_acct;
1168 	}
1169 
1170 	/*
1171 	 * We can shrink the pipe, if arg is greater than the ring occupancy.
1172 	 * Since we don't expect a lot of shrink+grow operations, just free and
1173 	 * allocate again like we would do for growing.  If the pipe currently
1174 	 * contains more buffers than arg, then return busy.
1175 	 */
1176 	mask = pipe->ring_size - 1;
1177 	head = pipe->head;
1178 	tail = pipe->tail;
1179 	n = pipe_occupancy(pipe->head, pipe->tail);
1180 	if (nr_slots < n) {
1181 		ret = -EBUSY;
1182 		goto out_revert_acct;
1183 	}
1184 
1185 	bufs = kcalloc(nr_slots, sizeof(*bufs),
1186 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1187 	if (unlikely(!bufs)) {
1188 		ret = -ENOMEM;
1189 		goto out_revert_acct;
1190 	}
1191 
1192 	/*
1193 	 * The pipe array wraps around, so just start the new one at zero
1194 	 * and adjust the indices.
1195 	 */
1196 	if (n > 0) {
1197 		unsigned int h = head & mask;
1198 		unsigned int t = tail & mask;
1199 		if (h > t) {
1200 			memcpy(bufs, pipe->bufs + t,
1201 			       n * sizeof(struct pipe_buffer));
1202 		} else {
1203 			unsigned int tsize = pipe->ring_size - t;
1204 			if (h > 0)
1205 				memcpy(bufs + tsize, pipe->bufs,
1206 				       h * sizeof(struct pipe_buffer));
1207 			memcpy(bufs, pipe->bufs + t,
1208 			       tsize * sizeof(struct pipe_buffer));
1209 		}
1210 	}
1211 
1212 	head = n;
1213 	tail = 0;
1214 
1215 	kfree(pipe->bufs);
1216 	pipe->bufs = bufs;
1217 	pipe->ring_size = nr_slots;
1218 	pipe->max_usage = nr_slots;
1219 	pipe->tail = tail;
1220 	pipe->head = head;
1221 
1222 	/* This might have made more room for writers */
1223 	wake_up_interruptible(&pipe->wr_wait);
1224 	return pipe->max_usage * PAGE_SIZE;
1225 
1226 out_revert_acct:
1227 	(void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
1228 	return ret;
1229 }
1230 
1231 /*
1232  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1233  * location, so checking ->i_pipe is not enough to verify that this is a
1234  * pipe.
1235  */
1236 struct pipe_inode_info *get_pipe_info(struct file *file)
1237 {
1238 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1239 }
1240 
1241 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1242 {
1243 	struct pipe_inode_info *pipe;
1244 	long ret;
1245 
1246 	pipe = get_pipe_info(file);
1247 	if (!pipe)
1248 		return -EBADF;
1249 
1250 	__pipe_lock(pipe);
1251 
1252 	switch (cmd) {
1253 	case F_SETPIPE_SZ:
1254 		ret = pipe_set_size(pipe, arg);
1255 		break;
1256 	case F_GETPIPE_SZ:
1257 		ret = pipe->max_usage * PAGE_SIZE;
1258 		break;
1259 	default:
1260 		ret = -EINVAL;
1261 		break;
1262 	}
1263 
1264 	__pipe_unlock(pipe);
1265 	return ret;
1266 }
1267 
1268 static const struct super_operations pipefs_ops = {
1269 	.destroy_inode = free_inode_nonrcu,
1270 	.statfs = simple_statfs,
1271 };
1272 
1273 /*
1274  * pipefs should _never_ be mounted by userland - too much of security hassle,
1275  * no real gain from having the whole whorehouse mounted. So we don't need
1276  * any operations on the root directory. However, we need a non-trivial
1277  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1278  */
1279 
1280 static int pipefs_init_fs_context(struct fs_context *fc)
1281 {
1282 	struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1283 	if (!ctx)
1284 		return -ENOMEM;
1285 	ctx->ops = &pipefs_ops;
1286 	ctx->dops = &pipefs_dentry_operations;
1287 	return 0;
1288 }
1289 
1290 static struct file_system_type pipe_fs_type = {
1291 	.name		= "pipefs",
1292 	.init_fs_context = pipefs_init_fs_context,
1293 	.kill_sb	= kill_anon_super,
1294 };
1295 
1296 static int __init init_pipe_fs(void)
1297 {
1298 	int err = register_filesystem(&pipe_fs_type);
1299 
1300 	if (!err) {
1301 		pipe_mnt = kern_mount(&pipe_fs_type);
1302 		if (IS_ERR(pipe_mnt)) {
1303 			err = PTR_ERR(pipe_mnt);
1304 			unregister_filesystem(&pipe_fs_type);
1305 		}
1306 	}
1307 	return err;
1308 }
1309 
1310 fs_initcall(init_pipe_fs);
1311