1 /* 2 * NET Generic infrastructure for Network protocols. 3 * 4 * Definitions for request_sock 5 * 6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7 * 8 * From code originally in include/net/tcp.h 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 #ifndef _REQUEST_SOCK_H 16 #define _REQUEST_SOCK_H 17 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/types.h> 21 #include <linux/bug.h> 22 23 #include <net/sock.h> 24 25 struct request_sock; 26 struct sk_buff; 27 struct dst_entry; 28 struct proto; 29 30 struct request_sock_ops { 31 int family; 32 int obj_size; 33 struct kmem_cache *slab; 34 char *slab_name; 35 int (*rtx_syn_ack)(struct sock *sk, 36 struct request_sock *req); 37 void (*send_ack)(struct sock *sk, struct sk_buff *skb, 38 struct request_sock *req); 39 void (*send_reset)(struct sock *sk, 40 struct sk_buff *skb); 41 void (*destructor)(struct request_sock *req); 42 void (*syn_ack_timeout)(struct sock *sk, 43 struct request_sock *req); 44 }; 45 46 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); 47 48 /* struct request_sock - mini sock to represent a connection request 49 */ 50 struct request_sock { 51 struct sock_common __req_common; 52 struct request_sock *dl_next; 53 u16 mss; 54 u8 num_retrans; /* number of retransmits */ 55 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ 56 u8 num_timeout:7; /* number of timeouts */ 57 /* The following two fields can be easily recomputed I think -AK */ 58 u32 window_clamp; /* window clamp at creation time */ 59 u32 rcv_wnd; /* rcv_wnd offered first time */ 60 u32 ts_recent; 61 unsigned long expires; 62 const struct request_sock_ops *rsk_ops; 63 struct sock *sk; 64 u32 secid; 65 u32 peer_secid; 66 }; 67 68 static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) 69 { 70 struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); 71 72 if (req != NULL) 73 req->rsk_ops = ops; 74 75 return req; 76 } 77 78 static inline void __reqsk_free(struct request_sock *req) 79 { 80 kmem_cache_free(req->rsk_ops->slab, req); 81 } 82 83 static inline void reqsk_free(struct request_sock *req) 84 { 85 req->rsk_ops->destructor(req); 86 __reqsk_free(req); 87 } 88 89 extern int sysctl_max_syn_backlog; 90 91 /** struct listen_sock - listen state 92 * 93 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs 94 */ 95 struct listen_sock { 96 u8 max_qlen_log; 97 u8 synflood_warned; 98 /* 2 bytes hole, try to use */ 99 int qlen; 100 int qlen_young; 101 int clock_hand; 102 u32 hash_rnd; 103 u32 nr_table_entries; 104 struct request_sock *syn_table[0]; 105 }; 106 107 /* 108 * For a TCP Fast Open listener - 109 * lock - protects the access to all the reqsk, which is co-owned by 110 * the listener and the child socket. 111 * qlen - pending TFO requests (still in TCP_SYN_RECV). 112 * max_qlen - max TFO reqs allowed before TFO is disabled. 113 * 114 * XXX (TFO) - ideally these fields can be made as part of "listen_sock" 115 * structure above. But there is some implementation difficulty due to 116 * listen_sock being part of request_sock_queue hence will be freed when 117 * a listener is stopped. But TFO related fields may continue to be 118 * accessed even after a listener is closed, until its sk_refcnt drops 119 * to 0 implying no more outstanding TFO reqs. One solution is to keep 120 * listen_opt around until sk_refcnt drops to 0. But there is some other 121 * complexity that needs to be resolved. E.g., a listener can be disabled 122 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later. 123 */ 124 struct fastopen_queue { 125 struct request_sock *rskq_rst_head; /* Keep track of past TFO */ 126 struct request_sock *rskq_rst_tail; /* requests that caused RST. 127 * This is part of the defense 128 * against spoofing attack. 129 */ 130 spinlock_t lock; 131 int qlen; /* # of pending (TCP_SYN_RECV) reqs */ 132 int max_qlen; /* != 0 iff TFO is currently enabled */ 133 }; 134 135 /** struct request_sock_queue - queue of request_socks 136 * 137 * @rskq_accept_head - FIFO head of established children 138 * @rskq_accept_tail - FIFO tail of established children 139 * @rskq_defer_accept - User waits for some data after accept() 140 * @syn_wait_lock - serializer 141 * 142 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main 143 * lock sock while browsing the listening hash (otherwise it's deadlock prone). 144 * 145 * This lock is acquired in read mode only from listening_get_next() seq_file 146 * op and it's acquired in write mode _only_ from code that is actively 147 * changing rskq_accept_head. All readers that are holding the master sock lock 148 * don't need to grab this lock in read mode too as rskq_accept_head. writes 149 * are always protected from the main sock lock. 150 */ 151 struct request_sock_queue { 152 struct request_sock *rskq_accept_head; 153 struct request_sock *rskq_accept_tail; 154 rwlock_t syn_wait_lock; 155 u8 rskq_defer_accept; 156 /* 3 bytes hole, try to pack */ 157 struct listen_sock *listen_opt; 158 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been 159 * enabled on this listener. Check 160 * max_qlen != 0 in fastopen_queue 161 * to determine if TFO is enabled 162 * right at this moment. 163 */ 164 }; 165 166 int reqsk_queue_alloc(struct request_sock_queue *queue, 167 unsigned int nr_table_entries); 168 169 void __reqsk_queue_destroy(struct request_sock_queue *queue); 170 void reqsk_queue_destroy(struct request_sock_queue *queue); 171 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, 172 bool reset); 173 174 static inline struct request_sock * 175 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) 176 { 177 struct request_sock *req = queue->rskq_accept_head; 178 179 queue->rskq_accept_head = NULL; 180 return req; 181 } 182 183 static inline int reqsk_queue_empty(struct request_sock_queue *queue) 184 { 185 return queue->rskq_accept_head == NULL; 186 } 187 188 static inline void reqsk_queue_unlink(struct request_sock_queue *queue, 189 struct request_sock *req, 190 struct request_sock **prev_req) 191 { 192 write_lock(&queue->syn_wait_lock); 193 *prev_req = req->dl_next; 194 write_unlock(&queue->syn_wait_lock); 195 } 196 197 static inline void reqsk_queue_add(struct request_sock_queue *queue, 198 struct request_sock *req, 199 struct sock *parent, 200 struct sock *child) 201 { 202 req->sk = child; 203 sk_acceptq_added(parent); 204 205 if (queue->rskq_accept_head == NULL) 206 queue->rskq_accept_head = req; 207 else 208 queue->rskq_accept_tail->dl_next = req; 209 210 queue->rskq_accept_tail = req; 211 req->dl_next = NULL; 212 } 213 214 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) 215 { 216 struct request_sock *req = queue->rskq_accept_head; 217 218 WARN_ON(req == NULL); 219 220 queue->rskq_accept_head = req->dl_next; 221 if (queue->rskq_accept_head == NULL) 222 queue->rskq_accept_tail = NULL; 223 224 return req; 225 } 226 227 static inline int reqsk_queue_removed(struct request_sock_queue *queue, 228 struct request_sock *req) 229 { 230 struct listen_sock *lopt = queue->listen_opt; 231 232 if (req->num_timeout == 0) 233 --lopt->qlen_young; 234 235 return --lopt->qlen; 236 } 237 238 static inline int reqsk_queue_added(struct request_sock_queue *queue) 239 { 240 struct listen_sock *lopt = queue->listen_opt; 241 const int prev_qlen = lopt->qlen; 242 243 lopt->qlen_young++; 244 lopt->qlen++; 245 return prev_qlen; 246 } 247 248 static inline int reqsk_queue_len(const struct request_sock_queue *queue) 249 { 250 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; 251 } 252 253 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) 254 { 255 return queue->listen_opt->qlen_young; 256 } 257 258 static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) 259 { 260 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; 261 } 262 263 static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, 264 u32 hash, struct request_sock *req, 265 unsigned long timeout) 266 { 267 struct listen_sock *lopt = queue->listen_opt; 268 269 req->expires = jiffies + timeout; 270 req->num_retrans = 0; 271 req->num_timeout = 0; 272 req->sk = NULL; 273 req->dl_next = lopt->syn_table[hash]; 274 275 write_lock(&queue->syn_wait_lock); 276 lopt->syn_table[hash] = req; 277 write_unlock(&queue->syn_wait_lock); 278 } 279 280 #endif /* _REQUEST_SOCK_H */ 281