xref: /openbmc/linux/include/net/request_sock.h (revision fd589a8f)
1 /*
2  * NET		Generic infrastructure for Network protocols.
3  *
4  *		Definitions for request_sock
5  *
6  * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  *
8  * 		From code originally in include/net/tcp.h
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or (at your option) any later version.
14  */
15 #ifndef _REQUEST_SOCK_H
16 #define _REQUEST_SOCK_H
17 
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
21 #include <linux/bug.h>
22 
23 #include <net/sock.h>
24 
25 struct request_sock;
26 struct sk_buff;
27 struct dst_entry;
28 struct proto;
29 
30 struct request_sock_ops {
31 	int		family;
32 	int		obj_size;
33 	struct kmem_cache	*slab;
34 	char		*slab_name;
35 	int		(*rtx_syn_ack)(struct sock *sk,
36 				       struct request_sock *req);
37 	void		(*send_ack)(struct sock *sk, struct sk_buff *skb,
38 				    struct request_sock *req);
39 	void		(*send_reset)(struct sock *sk,
40 				      struct sk_buff *skb);
41 	void		(*destructor)(struct request_sock *req);
42 };
43 
44 /* struct request_sock - mini sock to represent a connection request
45  */
46 struct request_sock {
47 	struct request_sock		*dl_next; /* Must be first member! */
48 	u16				mss;
49 	u8				retrans;
50 	u8				cookie_ts; /* syncookie: encode tcpopts in timestamp */
51 	/* The following two fields can be easily recomputed I think -AK */
52 	u32				window_clamp; /* window clamp at creation time */
53 	u32				rcv_wnd;	  /* rcv_wnd offered first time */
54 	u32				ts_recent;
55 	unsigned long			expires;
56 	const struct request_sock_ops	*rsk_ops;
57 	struct sock			*sk;
58 	u32				secid;
59 	u32				peer_secid;
60 };
61 
62 static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
63 {
64 	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
65 
66 	if (req != NULL)
67 		req->rsk_ops = ops;
68 
69 	return req;
70 }
71 
72 static inline void __reqsk_free(struct request_sock *req)
73 {
74 	kmem_cache_free(req->rsk_ops->slab, req);
75 }
76 
77 static inline void reqsk_free(struct request_sock *req)
78 {
79 	req->rsk_ops->destructor(req);
80 	__reqsk_free(req);
81 }
82 
83 extern int sysctl_max_syn_backlog;
84 
85 /** struct listen_sock - listen state
86  *
87  * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
88  */
89 struct listen_sock {
90 	u8			max_qlen_log;
91 	/* 3 bytes hole, try to use */
92 	int			qlen;
93 	int			qlen_young;
94 	int			clock_hand;
95 	u32			hash_rnd;
96 	u32			nr_table_entries;
97 	struct request_sock	*syn_table[0];
98 };
99 
100 /** struct request_sock_queue - queue of request_socks
101  *
102  * @rskq_accept_head - FIFO head of established children
103  * @rskq_accept_tail - FIFO tail of established children
104  * @rskq_defer_accept - User waits for some data after accept()
105  * @syn_wait_lock - serializer
106  *
107  * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
108  * lock sock while browsing the listening hash (otherwise it's deadlock prone).
109  *
110  * This lock is acquired in read mode only from listening_get_next() seq_file
111  * op and it's acquired in write mode _only_ from code that is actively
112  * changing rskq_accept_head. All readers that are holding the master sock lock
113  * don't need to grab this lock in read mode too as rskq_accept_head. writes
114  * are always protected from the main sock lock.
115  */
116 struct request_sock_queue {
117 	struct request_sock	*rskq_accept_head;
118 	struct request_sock	*rskq_accept_tail;
119 	rwlock_t		syn_wait_lock;
120 	u8			rskq_defer_accept;
121 	/* 3 bytes hole, try to pack */
122 	struct listen_sock	*listen_opt;
123 };
124 
125 extern int reqsk_queue_alloc(struct request_sock_queue *queue,
126 			     unsigned int nr_table_entries);
127 
128 extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
129 extern void reqsk_queue_destroy(struct request_sock_queue *queue);
130 
131 static inline struct request_sock *
132 	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
133 {
134 	struct request_sock *req = queue->rskq_accept_head;
135 
136 	queue->rskq_accept_head = NULL;
137 	return req;
138 }
139 
140 static inline int reqsk_queue_empty(struct request_sock_queue *queue)
141 {
142 	return queue->rskq_accept_head == NULL;
143 }
144 
145 static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
146 				      struct request_sock *req,
147 				      struct request_sock **prev_req)
148 {
149 	write_lock(&queue->syn_wait_lock);
150 	*prev_req = req->dl_next;
151 	write_unlock(&queue->syn_wait_lock);
152 }
153 
154 static inline void reqsk_queue_add(struct request_sock_queue *queue,
155 				   struct request_sock *req,
156 				   struct sock *parent,
157 				   struct sock *child)
158 {
159 	req->sk = child;
160 	sk_acceptq_added(parent);
161 
162 	if (queue->rskq_accept_head == NULL)
163 		queue->rskq_accept_head = req;
164 	else
165 		queue->rskq_accept_tail->dl_next = req;
166 
167 	queue->rskq_accept_tail = req;
168 	req->dl_next = NULL;
169 }
170 
171 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
172 {
173 	struct request_sock *req = queue->rskq_accept_head;
174 
175 	WARN_ON(req == NULL);
176 
177 	queue->rskq_accept_head = req->dl_next;
178 	if (queue->rskq_accept_head == NULL)
179 		queue->rskq_accept_tail = NULL;
180 
181 	return req;
182 }
183 
184 static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
185 						 struct sock *parent)
186 {
187 	struct request_sock *req = reqsk_queue_remove(queue);
188 	struct sock *child = req->sk;
189 
190 	WARN_ON(child == NULL);
191 
192 	sk_acceptq_removed(parent);
193 	__reqsk_free(req);
194 	return child;
195 }
196 
197 static inline int reqsk_queue_removed(struct request_sock_queue *queue,
198 				      struct request_sock *req)
199 {
200 	struct listen_sock *lopt = queue->listen_opt;
201 
202 	if (req->retrans == 0)
203 		--lopt->qlen_young;
204 
205 	return --lopt->qlen;
206 }
207 
208 static inline int reqsk_queue_added(struct request_sock_queue *queue)
209 {
210 	struct listen_sock *lopt = queue->listen_opt;
211 	const int prev_qlen = lopt->qlen;
212 
213 	lopt->qlen_young++;
214 	lopt->qlen++;
215 	return prev_qlen;
216 }
217 
218 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
219 {
220 	return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
221 }
222 
223 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
224 {
225 	return queue->listen_opt->qlen_young;
226 }
227 
228 static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
229 {
230 	return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
231 }
232 
233 static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
234 					u32 hash, struct request_sock *req,
235 					unsigned long timeout)
236 {
237 	struct listen_sock *lopt = queue->listen_opt;
238 
239 	req->expires = jiffies + timeout;
240 	req->retrans = 0;
241 	req->sk = NULL;
242 	req->dl_next = lopt->syn_table[hash];
243 
244 	write_lock(&queue->syn_wait_lock);
245 	lopt->syn_table[hash] = req;
246 	write_unlock(&queue->syn_wait_lock);
247 }
248 
249 #endif /* _REQUEST_SOCK_H */
250