1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NET3: Garbage Collector For AF_UNIX sockets
4 *
5 * Garbage Collector:
6 * Copyright (C) Barak A. Pearlmutter.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * Fixes:
28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
29 * Cope with changing max_files.
30 * Al Viro 11 Oct 1998
31 * Graph may have cycles. That is, we can send the descriptor
32 * of foo to bar and vice versa. Current code chokes on that.
33 * Fix: move SCM_RIGHTS ones into the separate list and then
34 * skb_free() them all instead of doing explicit fput's.
35 * Another problem: since fput() may block somebody may
36 * create a new unix_socket when we are in the middle of sweep
37 * phase. Fix: revert the logic wrt MARKED. Mark everything
38 * upon the beginning and unmark non-junk ones.
39 *
40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41 * sent to connect()'ed but still not accept()'ed sockets.
42 * Fixed. Old code had slightly different problem here:
43 * extra fput() in situation when we passed the descriptor via
44 * such socket and closed it (descriptor). That would happen on
45 * each unix_gc() until the accept(). Since the struct file in
46 * question would go to the free list and might be reused...
47 * That might be the reason of random oopses on filp_close()
48 * in unrelated processes.
49 *
50 * AV 28 Feb 1999
51 * Kill the explicit allocation of stack. Now we keep the tree
52 * with root in dummy + pointer (gc_current) to one of the nodes.
53 * Stack is represented as path from gc_current to dummy. Unmark
54 * now means "add to tree". Push == "make it a son of gc_current".
55 * Pop == "move gc_current to parent". We keep only pointers to
56 * parents (->gc_tree).
57 * AV 1 Mar 1999
58 * Damn. Added missing check for ->dead in listen queues scanning.
59 *
60 * Miklos Szeredi 25 Jun 2007
61 * Reimplement with a cycle collecting algorithm. This should
62 * solve several problems with the previous code, like being racy
63 * wrt receive and holding up unrelated socket operations.
64 */
65
66 #include <linux/kernel.h>
67 #include <linux/string.h>
68 #include <linux/socket.h>
69 #include <linux/un.h>
70 #include <linux/net.h>
71 #include <linux/fs.h>
72 #include <linux/skbuff.h>
73 #include <linux/netdevice.h>
74 #include <linux/file.h>
75 #include <linux/proc_fs.h>
76 #include <linux/mutex.h>
77 #include <linux/wait.h>
78
79 #include <net/sock.h>
80 #include <net/af_unix.h>
81 #include <net/scm.h>
82 #include <net/tcp_states.h>
83
84 #include "scm.h"
85
86 /* Internal data structures and random procedures: */
87
88 static LIST_HEAD(gc_candidates);
89 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
90
scan_inflight(struct sock * x,void (* func)(struct unix_sock *),struct sk_buff_head * hitlist)91 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
92 struct sk_buff_head *hitlist)
93 {
94 struct sk_buff *skb;
95 struct sk_buff *next;
96
97 spin_lock(&x->sk_receive_queue.lock);
98 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
99 /* Do we have file descriptors ? */
100 if (UNIXCB(skb).fp) {
101 bool hit = false;
102 /* Process the descriptors of this socket */
103 int nfd = UNIXCB(skb).fp->count;
104 struct file **fp = UNIXCB(skb).fp->fp;
105
106 while (nfd--) {
107 /* Get the socket the fd matches if it indeed does so */
108 struct sock *sk = unix_get_socket(*fp++);
109
110 if (sk) {
111 struct unix_sock *u = unix_sk(sk);
112
113 /* Ignore non-candidates, they could
114 * have been added to the queues after
115 * starting the garbage collection
116 */
117 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
118 hit = true;
119
120 func(u);
121 }
122 }
123 }
124 if (hit && hitlist != NULL) {
125 __skb_unlink(skb, &x->sk_receive_queue);
126 __skb_queue_tail(hitlist, skb);
127 }
128 }
129 }
130 spin_unlock(&x->sk_receive_queue.lock);
131 }
132
scan_children(struct sock * x,void (* func)(struct unix_sock *),struct sk_buff_head * hitlist)133 static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
134 struct sk_buff_head *hitlist)
135 {
136 if (x->sk_state != TCP_LISTEN) {
137 scan_inflight(x, func, hitlist);
138 } else {
139 struct sk_buff *skb;
140 struct sk_buff *next;
141 struct unix_sock *u;
142 LIST_HEAD(embryos);
143
144 /* For a listening socket collect the queued embryos
145 * and perform a scan on them as well.
146 */
147 spin_lock(&x->sk_receive_queue.lock);
148 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
149 u = unix_sk(skb->sk);
150
151 /* An embryo cannot be in-flight, so it's safe
152 * to use the list link.
153 */
154 BUG_ON(!list_empty(&u->link));
155 list_add_tail(&u->link, &embryos);
156 }
157 spin_unlock(&x->sk_receive_queue.lock);
158
159 while (!list_empty(&embryos)) {
160 u = list_entry(embryos.next, struct unix_sock, link);
161 scan_inflight(&u->sk, func, hitlist);
162 list_del_init(&u->link);
163 }
164 }
165 }
166
dec_inflight(struct unix_sock * usk)167 static void dec_inflight(struct unix_sock *usk)
168 {
169 usk->inflight--;
170 }
171
inc_inflight(struct unix_sock * usk)172 static void inc_inflight(struct unix_sock *usk)
173 {
174 usk->inflight++;
175 }
176
inc_inflight_move_tail(struct unix_sock * u)177 static void inc_inflight_move_tail(struct unix_sock *u)
178 {
179 u->inflight++;
180
181 /* If this still might be part of a cycle, move it to the end
182 * of the list, so that it's checked even if it was already
183 * passed over
184 */
185 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
186 list_move_tail(&u->link, &gc_candidates);
187 }
188
189 static bool gc_in_progress;
190 #define UNIX_INFLIGHT_TRIGGER_GC 16000
191
wait_for_unix_gc(void)192 void wait_for_unix_gc(void)
193 {
194 /* If number of inflight sockets is insane,
195 * force a garbage collect right now.
196 * Paired with the WRITE_ONCE() in unix_inflight(),
197 * unix_notinflight() and gc_in_progress().
198 */
199 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
200 !READ_ONCE(gc_in_progress))
201 unix_gc();
202 wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
203 }
204
205 /* The external entry point: unix_gc() */
unix_gc(void)206 void unix_gc(void)
207 {
208 struct sk_buff *next_skb, *skb;
209 struct unix_sock *u;
210 struct unix_sock *next;
211 struct sk_buff_head hitlist;
212 struct list_head cursor;
213 LIST_HEAD(not_cycle_list);
214
215 spin_lock(&unix_gc_lock);
216
217 /* Avoid a recursive GC. */
218 if (gc_in_progress)
219 goto out;
220
221 /* Paired with READ_ONCE() in wait_for_unix_gc(). */
222 WRITE_ONCE(gc_in_progress, true);
223
224 /* First, select candidates for garbage collection. Only
225 * in-flight sockets are considered, and from those only ones
226 * which don't have any external reference.
227 *
228 * Holding unix_gc_lock will protect these candidates from
229 * being detached, and hence from gaining an external
230 * reference. Since there are no possible receivers, all
231 * buffers currently on the candidates' queues stay there
232 * during the garbage collection.
233 *
234 * We also know that no new candidate can be added onto the
235 * receive queues. Other, non candidate sockets _can_ be
236 * added to queue, so we must make sure only to touch
237 * candidates.
238 *
239 * Embryos, though never candidates themselves, affect which
240 * candidates are reachable by the garbage collector. Before
241 * being added to a listener's queue, an embryo may already
242 * receive data carrying SCM_RIGHTS, potentially making the
243 * passed socket a candidate that is not yet reachable by the
244 * collector. It becomes reachable once the embryo is
245 * enqueued. Therefore, we must ensure that no SCM-laden
246 * embryo appears in a (candidate) listener's queue between
247 * consecutive scan_children() calls.
248 */
249 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
250 struct sock *sk = &u->sk;
251 long total_refs;
252
253 total_refs = file_count(sk->sk_socket->file);
254
255 BUG_ON(!u->inflight);
256 BUG_ON(total_refs < u->inflight);
257 if (total_refs == u->inflight) {
258 list_move_tail(&u->link, &gc_candidates);
259 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
260 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
261
262 if (sk->sk_state == TCP_LISTEN) {
263 unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
264 unix_state_unlock(sk);
265 }
266 }
267 }
268
269 /* Now remove all internal in-flight reference to children of
270 * the candidates.
271 */
272 list_for_each_entry(u, &gc_candidates, link)
273 scan_children(&u->sk, dec_inflight, NULL);
274
275 /* Restore the references for children of all candidates,
276 * which have remaining references. Do this recursively, so
277 * only those remain, which form cyclic references.
278 *
279 * Use a "cursor" link, to make the list traversal safe, even
280 * though elements might be moved about.
281 */
282 list_add(&cursor, &gc_candidates);
283 while (cursor.next != &gc_candidates) {
284 u = list_entry(cursor.next, struct unix_sock, link);
285
286 /* Move cursor to after the current position. */
287 list_move(&cursor, &u->link);
288
289 if (u->inflight) {
290 list_move_tail(&u->link, ¬_cycle_list);
291 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
292 scan_children(&u->sk, inc_inflight_move_tail, NULL);
293 }
294 }
295 list_del(&cursor);
296
297 /* Now gc_candidates contains only garbage. Restore original
298 * inflight counters for these as well, and remove the skbuffs
299 * which are creating the cycle(s).
300 */
301 skb_queue_head_init(&hitlist);
302 list_for_each_entry(u, &gc_candidates, link) {
303 scan_children(&u->sk, inc_inflight, &hitlist);
304
305 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
306 if (u->oob_skb) {
307 kfree_skb(u->oob_skb);
308 u->oob_skb = NULL;
309 }
310 #endif
311 }
312
313 /* not_cycle_list contains those sockets which do not make up a
314 * cycle. Restore these to the inflight list.
315 */
316 while (!list_empty(¬_cycle_list)) {
317 u = list_entry(not_cycle_list.next, struct unix_sock, link);
318 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
319 list_move_tail(&u->link, &gc_inflight_list);
320 }
321
322 spin_unlock(&unix_gc_lock);
323
324 /* We need io_uring to clean its registered files, ignore all io_uring
325 * originated skbs. It's fine as io_uring doesn't keep references to
326 * other io_uring instances and so killing all other files in the cycle
327 * will put all io_uring references forcing it to go through normal
328 * release.path eventually putting registered files.
329 */
330 skb_queue_walk_safe(&hitlist, skb, next_skb) {
331 if (skb->destructor == io_uring_destruct_scm) {
332 __skb_unlink(skb, &hitlist);
333 skb_queue_tail(&skb->sk->sk_receive_queue, skb);
334 }
335 }
336
337 /* Here we are. Hitlist is filled. Die. */
338 __skb_queue_purge(&hitlist);
339
340 spin_lock(&unix_gc_lock);
341
342 /* There could be io_uring registered files, just push them back to
343 * the inflight list
344 */
345 list_for_each_entry_safe(u, next, &gc_candidates, link)
346 list_move_tail(&u->link, &gc_inflight_list);
347
348 /* All candidates should have been detached by now. */
349 BUG_ON(!list_empty(&gc_candidates));
350
351 /* Paired with READ_ONCE() in wait_for_unix_gc(). */
352 WRITE_ONCE(gc_in_progress, false);
353
354 wake_up(&unix_gc_wait);
355
356 out:
357 spin_unlock(&unix_gc_lock);
358 }
359