xref: /openbmc/linux/include/linux/poll.h (revision 25763b3c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_POLL_H
3 #define _LINUX_POLL_H
4 
5 
6 #include <linux/compiler.h>
7 #include <linux/ktime.h>
8 #include <linux/wait.h>
9 #include <linux/string.h>
10 #include <linux/fs.h>
11 #include <linux/sysctl.h>
12 #include <linux/uaccess.h>
13 #include <uapi/linux/poll.h>
14 #include <uapi/linux/eventpoll.h>
15 
16 extern struct ctl_table epoll_table[]; /* for sysctl */
17 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
18    additional memory. */
19 #ifdef __clang__
20 #define MAX_STACK_ALLOC 768
21 #else
22 #define MAX_STACK_ALLOC 832
23 #endif
24 #define FRONTEND_STACK_ALLOC	256
25 #define SELECT_STACK_ALLOC	FRONTEND_STACK_ALLOC
26 #define POLL_STACK_ALLOC	FRONTEND_STACK_ALLOC
27 #define WQUEUES_STACK_ALLOC	(MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
28 #define N_INLINE_POLL_ENTRIES	(WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
29 
30 #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
31 
32 struct poll_table_struct;
33 
34 /*
35  * structures and helpers for f_op->poll implementations
36  */
37 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
38 
39 /*
40  * Do not touch the structure directly, use the access functions
41  * poll_does_not_wait() and poll_requested_events() instead.
42  */
43 typedef struct poll_table_struct {
44 	poll_queue_proc _qproc;
45 	__poll_t _key;
46 } poll_table;
47 
48 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
49 {
50 	if (p && p->_qproc && wait_address)
51 		p->_qproc(filp, wait_address, p);
52 }
53 
54 /*
55  * Return true if it is guaranteed that poll will not wait. This is the case
56  * if the poll() of another file descriptor in the set got an event, so there
57  * is no need for waiting.
58  */
59 static inline bool poll_does_not_wait(const poll_table *p)
60 {
61 	return p == NULL || p->_qproc == NULL;
62 }
63 
64 /*
65  * Return the set of events that the application wants to poll for.
66  * This is useful for drivers that need to know whether a DMA transfer has
67  * to be started implicitly on poll(). You typically only want to do that
68  * if the application is actually polling for POLLIN and/or POLLOUT.
69  */
70 static inline __poll_t poll_requested_events(const poll_table *p)
71 {
72 	return p ? p->_key : ~(__poll_t)0;
73 }
74 
75 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
76 {
77 	pt->_qproc = qproc;
78 	pt->_key   = ~(__poll_t)0; /* all events enabled */
79 }
80 
81 static inline bool file_can_poll(struct file *file)
82 {
83 	return file->f_op->poll;
84 }
85 
86 static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
87 {
88 	if (unlikely(!file->f_op->poll))
89 		return DEFAULT_POLLMASK;
90 	return file->f_op->poll(file, pt);
91 }
92 
93 struct poll_table_entry {
94 	struct file *filp;
95 	__poll_t key;
96 	wait_queue_entry_t wait;
97 	wait_queue_head_t *wait_address;
98 };
99 
100 /*
101  * Structures and helpers for select/poll syscall
102  */
103 struct poll_wqueues {
104 	poll_table pt;
105 	struct poll_table_page *table;
106 	struct task_struct *polling_task;
107 	int triggered;
108 	int error;
109 	int inline_index;
110 	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
111 };
112 
113 extern void poll_initwait(struct poll_wqueues *pwq);
114 extern void poll_freewait(struct poll_wqueues *pwq);
115 extern u64 select_estimate_accuracy(struct timespec64 *tv);
116 
117 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
118 
119 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
120 			   fd_set __user *exp, struct timespec64 *end_time);
121 
122 extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
123 				   long nsec);
124 
125 #define __MAP(v, from, to) \
126 	(from < to ? (v & from) * (to/from) : (v & from) / (from/to))
127 
128 static inline __u16 mangle_poll(__poll_t val)
129 {
130 	__u16 v = (__force __u16)val;
131 #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
132 	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
133 		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
134 		M(HUP) | M(RDHUP) | M(MSG);
135 #undef M
136 }
137 
138 static inline __poll_t demangle_poll(u16 val)
139 {
140 #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
141 	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
142 		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
143 		M(HUP) | M(RDHUP) | M(MSG);
144 #undef M
145 }
146 #undef __MAP
147 
148 
149 #endif /* _LINUX_POLL_H */
150