1 /* 2 * NET3: Garbage Collector For AF_UNIX sockets 3 * 4 * Garbage Collector: 5 * Copyright (C) Barak A. Pearlmutter. 6 * Released under the GPL version 2 or later. 7 * 8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 9 * If it doesn't work blame me, it worked when Barak sent it. 10 * 11 * Assumptions: 12 * 13 * - object w/ a bit 14 * - free list 15 * 16 * Current optimizations: 17 * 18 * - explicit stack instead of recursion 19 * - tail recurse on first born instead of immediate push/pop 20 * - we gather the stuff that should not be killed into tree 21 * and stack is just a path from root to the current pointer. 22 * 23 * Future optimizations: 24 * 25 * - don't just push entire root set; process in place 26 * 27 * This program is free software; you can redistribute it and/or 28 * modify it under the terms of the GNU General Public License 29 * as published by the Free Software Foundation; either version 30 * 2 of the License, or (at your option) any later version. 31 * 32 * Fixes: 33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 34 * Cope with changing max_files. 35 * Al Viro 11 Oct 1998 36 * Graph may have cycles. That is, we can send the descriptor 37 * of foo to bar and vice versa. Current code chokes on that. 38 * Fix: move SCM_RIGHTS ones into the separate list and then 39 * skb_free() them all instead of doing explicit fput's. 40 * Another problem: since fput() may block somebody may 41 * create a new unix_socket when we are in the middle of sweep 42 * phase. Fix: revert the logic wrt MARKED. Mark everything 43 * upon the beginning and unmark non-junk ones. 44 * 45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 46 * sent to connect()'ed but still not accept()'ed sockets. 47 * Fixed. Old code had slightly different problem here: 48 * extra fput() in situation when we passed the descriptor via 49 * such socket and closed it (descriptor). That would happen on 50 * each unix_gc() until the accept(). Since the struct file in 51 * question would go to the free list and might be reused... 52 * That might be the reason of random oopses on filp_close() 53 * in unrelated processes. 54 * 55 * AV 28 Feb 1999 56 * Kill the explicit allocation of stack. Now we keep the tree 57 * with root in dummy + pointer (gc_current) to one of the nodes. 58 * Stack is represented as path from gc_current to dummy. Unmark 59 * now means "add to tree". Push == "make it a son of gc_current". 60 * Pop == "move gc_current to parent". We keep only pointers to 61 * parents (->gc_tree). 62 * AV 1 Mar 1999 63 * Damn. Added missing check for ->dead in listen queues scanning. 64 * 65 * Miklos Szeredi 25 Jun 2007 66 * Reimplement with a cycle collecting algorithm. This should 67 * solve several problems with the previous code, like being racy 68 * wrt receive and holding up unrelated socket operations. 69 */ 70 71 #include <linux/kernel.h> 72 #include <linux/string.h> 73 #include <linux/socket.h> 74 #include <linux/un.h> 75 #include <linux/net.h> 76 #include <linux/fs.h> 77 #include <linux/skbuff.h> 78 #include <linux/netdevice.h> 79 #include <linux/file.h> 80 #include <linux/proc_fs.h> 81 #include <linux/mutex.h> 82 #include <linux/wait.h> 83 84 #include <net/sock.h> 85 #include <net/af_unix.h> 86 #include <net/scm.h> 87 #include <net/tcp_states.h> 88 89 #include "scm.h" 90 91 /* Internal data structures and random procedures: */ 92 93 static LIST_HEAD(gc_candidates); 94 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); 95 96 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 97 struct sk_buff_head *hitlist) 98 { 99 struct sk_buff *skb; 100 struct sk_buff *next; 101 102 spin_lock(&x->sk_receive_queue.lock); 103 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 104 /* Do we have file descriptors ? */ 105 if (UNIXCB(skb).fp) { 106 bool hit = false; 107 /* Process the descriptors of this socket */ 108 int nfd = UNIXCB(skb).fp->count; 109 struct file **fp = UNIXCB(skb).fp->fp; 110 111 while (nfd--) { 112 /* Get the socket the fd matches if it indeed does so */ 113 struct sock *sk = unix_get_socket(*fp++); 114 115 if (sk) { 116 struct unix_sock *u = unix_sk(sk); 117 118 /* Ignore non-candidates, they could 119 * have been added to the queues after 120 * starting the garbage collection 121 */ 122 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 123 hit = true; 124 125 func(u); 126 } 127 } 128 } 129 if (hit && hitlist != NULL) { 130 __skb_unlink(skb, &x->sk_receive_queue); 131 __skb_queue_tail(hitlist, skb); 132 } 133 } 134 } 135 spin_unlock(&x->sk_receive_queue.lock); 136 } 137 138 static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 139 struct sk_buff_head *hitlist) 140 { 141 if (x->sk_state != TCP_LISTEN) { 142 scan_inflight(x, func, hitlist); 143 } else { 144 struct sk_buff *skb; 145 struct sk_buff *next; 146 struct unix_sock *u; 147 LIST_HEAD(embryos); 148 149 /* For a listening socket collect the queued embryos 150 * and perform a scan on them as well. 151 */ 152 spin_lock(&x->sk_receive_queue.lock); 153 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 154 u = unix_sk(skb->sk); 155 156 /* An embryo cannot be in-flight, so it's safe 157 * to use the list link. 158 */ 159 BUG_ON(!list_empty(&u->link)); 160 list_add_tail(&u->link, &embryos); 161 } 162 spin_unlock(&x->sk_receive_queue.lock); 163 164 while (!list_empty(&embryos)) { 165 u = list_entry(embryos.next, struct unix_sock, link); 166 scan_inflight(&u->sk, func, hitlist); 167 list_del_init(&u->link); 168 } 169 } 170 } 171 172 static void dec_inflight(struct unix_sock *usk) 173 { 174 atomic_long_dec(&usk->inflight); 175 } 176 177 static void inc_inflight(struct unix_sock *usk) 178 { 179 atomic_long_inc(&usk->inflight); 180 } 181 182 static void inc_inflight_move_tail(struct unix_sock *u) 183 { 184 atomic_long_inc(&u->inflight); 185 /* If this still might be part of a cycle, move it to the end 186 * of the list, so that it's checked even if it was already 187 * passed over 188 */ 189 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) 190 list_move_tail(&u->link, &gc_candidates); 191 } 192 193 static bool gc_in_progress; 194 #define UNIX_INFLIGHT_TRIGGER_GC 16000 195 196 void wait_for_unix_gc(void) 197 { 198 /* If number of inflight sockets is insane, 199 * force a garbage collect right now. 200 */ 201 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) 202 unix_gc(); 203 wait_event(unix_gc_wait, gc_in_progress == false); 204 } 205 206 /* The external entry point: unix_gc() */ 207 void unix_gc(void) 208 { 209 struct unix_sock *u; 210 struct unix_sock *next; 211 struct sk_buff_head hitlist; 212 struct list_head cursor; 213 LIST_HEAD(not_cycle_list); 214 215 spin_lock(&unix_gc_lock); 216 217 /* Avoid a recursive GC. */ 218 if (gc_in_progress) 219 goto out; 220 221 gc_in_progress = true; 222 /* First, select candidates for garbage collection. Only 223 * in-flight sockets are considered, and from those only ones 224 * which don't have any external reference. 225 * 226 * Holding unix_gc_lock will protect these candidates from 227 * being detached, and hence from gaining an external 228 * reference. Since there are no possible receivers, all 229 * buffers currently on the candidates' queues stay there 230 * during the garbage collection. 231 * 232 * We also know that no new candidate can be added onto the 233 * receive queues. Other, non candidate sockets _can_ be 234 * added to queue, so we must make sure only to touch 235 * candidates. 236 */ 237 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 238 long total_refs; 239 long inflight_refs; 240 241 total_refs = file_count(u->sk.sk_socket->file); 242 inflight_refs = atomic_long_read(&u->inflight); 243 244 BUG_ON(inflight_refs < 1); 245 BUG_ON(total_refs < inflight_refs); 246 if (total_refs == inflight_refs) { 247 list_move_tail(&u->link, &gc_candidates); 248 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 249 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 250 } 251 } 252 253 /* Now remove all internal in-flight reference to children of 254 * the candidates. 255 */ 256 list_for_each_entry(u, &gc_candidates, link) 257 scan_children(&u->sk, dec_inflight, NULL); 258 259 /* Restore the references for children of all candidates, 260 * which have remaining references. Do this recursively, so 261 * only those remain, which form cyclic references. 262 * 263 * Use a "cursor" link, to make the list traversal safe, even 264 * though elements might be moved about. 265 */ 266 list_add(&cursor, &gc_candidates); 267 while (cursor.next != &gc_candidates) { 268 u = list_entry(cursor.next, struct unix_sock, link); 269 270 /* Move cursor to after the current position. */ 271 list_move(&cursor, &u->link); 272 273 if (atomic_long_read(&u->inflight) > 0) { 274 list_move_tail(&u->link, ¬_cycle_list); 275 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 276 scan_children(&u->sk, inc_inflight_move_tail, NULL); 277 } 278 } 279 list_del(&cursor); 280 281 /* Now gc_candidates contains only garbage. Restore original 282 * inflight counters for these as well, and remove the skbuffs 283 * which are creating the cycle(s). 284 */ 285 skb_queue_head_init(&hitlist); 286 list_for_each_entry(u, &gc_candidates, link) 287 scan_children(&u->sk, inc_inflight, &hitlist); 288 289 /* not_cycle_list contains those sockets which do not make up a 290 * cycle. Restore these to the inflight list. 291 */ 292 while (!list_empty(¬_cycle_list)) { 293 u = list_entry(not_cycle_list.next, struct unix_sock, link); 294 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 295 list_move_tail(&u->link, &gc_inflight_list); 296 } 297 298 spin_unlock(&unix_gc_lock); 299 300 /* Here we are. Hitlist is filled. Die. */ 301 __skb_queue_purge(&hitlist); 302 303 spin_lock(&unix_gc_lock); 304 305 /* All candidates should have been detached by now. */ 306 BUG_ON(!list_empty(&gc_candidates)); 307 gc_in_progress = false; 308 wake_up(&unix_gc_wait); 309 310 out: 311 spin_unlock(&unix_gc_lock); 312 } 313