1a85036f6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * NET3: Garbage Collector For AF_UNIX sockets
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Garbage Collector:
61da177e4SLinus Torvalds * Copyright (C) Barak A. Pearlmutter.
71da177e4SLinus Torvalds *
81da177e4SLinus Torvalds * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
91da177e4SLinus Torvalds * If it doesn't work blame me, it worked when Barak sent it.
101da177e4SLinus Torvalds *
111da177e4SLinus Torvalds * Assumptions:
121da177e4SLinus Torvalds *
131da177e4SLinus Torvalds * - object w/ a bit
141da177e4SLinus Torvalds * - free list
151da177e4SLinus Torvalds *
161da177e4SLinus Torvalds * Current optimizations:
171da177e4SLinus Torvalds *
181da177e4SLinus Torvalds * - explicit stack instead of recursion
191da177e4SLinus Torvalds * - tail recurse on first born instead of immediate push/pop
201da177e4SLinus Torvalds * - we gather the stuff that should not be killed into tree
211da177e4SLinus Torvalds * and stack is just a path from root to the current pointer.
221da177e4SLinus Torvalds *
231da177e4SLinus Torvalds * Future optimizations:
241da177e4SLinus Torvalds *
251da177e4SLinus Torvalds * - don't just push entire root set; process in place
261da177e4SLinus Torvalds *
271da177e4SLinus Torvalds * Fixes:
281da177e4SLinus Torvalds * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
291da177e4SLinus Torvalds * Cope with changing max_files.
301da177e4SLinus Torvalds * Al Viro 11 Oct 1998
311da177e4SLinus Torvalds * Graph may have cycles. That is, we can send the descriptor
321da177e4SLinus Torvalds * of foo to bar and vice versa. Current code chokes on that.
331da177e4SLinus Torvalds * Fix: move SCM_RIGHTS ones into the separate list and then
341da177e4SLinus Torvalds * skb_free() them all instead of doing explicit fput's.
351da177e4SLinus Torvalds * Another problem: since fput() may block somebody may
361da177e4SLinus Torvalds * create a new unix_socket when we are in the middle of sweep
371da177e4SLinus Torvalds * phase. Fix: revert the logic wrt MARKED. Mark everything
381da177e4SLinus Torvalds * upon the beginning and unmark non-junk ones.
391da177e4SLinus Torvalds *
401da177e4SLinus Torvalds * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
411da177e4SLinus Torvalds * sent to connect()'ed but still not accept()'ed sockets.
421da177e4SLinus Torvalds * Fixed. Old code had slightly different problem here:
431da177e4SLinus Torvalds * extra fput() in situation when we passed the descriptor via
441da177e4SLinus Torvalds * such socket and closed it (descriptor). That would happen on
451da177e4SLinus Torvalds * each unix_gc() until the accept(). Since the struct file in
461da177e4SLinus Torvalds * question would go to the free list and might be reused...
471da177e4SLinus Torvalds * That might be the reason of random oopses on filp_close()
481da177e4SLinus Torvalds * in unrelated processes.
491da177e4SLinus Torvalds *
501da177e4SLinus Torvalds * AV 28 Feb 1999
511da177e4SLinus Torvalds * Kill the explicit allocation of stack. Now we keep the tree
521da177e4SLinus Torvalds * with root in dummy + pointer (gc_current) to one of the nodes.
531da177e4SLinus Torvalds * Stack is represented as path from gc_current to dummy. Unmark
541da177e4SLinus Torvalds * now means "add to tree". Push == "make it a son of gc_current".
551da177e4SLinus Torvalds * Pop == "move gc_current to parent". We keep only pointers to
561da177e4SLinus Torvalds * parents (->gc_tree).
571da177e4SLinus Torvalds * AV 1 Mar 1999
581da177e4SLinus Torvalds * Damn. Added missing check for ->dead in listen queues scanning.
591da177e4SLinus Torvalds *
601fd05ba5SMiklos Szeredi * Miklos Szeredi 25 Jun 2007
611fd05ba5SMiklos Szeredi * Reimplement with a cycle collecting algorithm. This should
621fd05ba5SMiklos Szeredi * solve several problems with the previous code, like being racy
631fd05ba5SMiklos Szeredi * wrt receive and holding up unrelated socket operations.
641da177e4SLinus Torvalds */
651da177e4SLinus Torvalds
661da177e4SLinus Torvalds #include <linux/kernel.h>
671da177e4SLinus Torvalds #include <linux/string.h>
681da177e4SLinus Torvalds #include <linux/socket.h>
691da177e4SLinus Torvalds #include <linux/un.h>
701da177e4SLinus Torvalds #include <linux/net.h>
711da177e4SLinus Torvalds #include <linux/fs.h>
721da177e4SLinus Torvalds #include <linux/skbuff.h>
731da177e4SLinus Torvalds #include <linux/netdevice.h>
741da177e4SLinus Torvalds #include <linux/file.h>
751da177e4SLinus Torvalds #include <linux/proc_fs.h>
764a3e2f71SArjan van de Ven #include <linux/mutex.h>
775f23b734Sdann frazier #include <linux/wait.h>
781da177e4SLinus Torvalds
791da177e4SLinus Torvalds #include <net/sock.h>
801da177e4SLinus Torvalds #include <net/af_unix.h>
811da177e4SLinus Torvalds #include <net/scm.h>
82c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
831da177e4SLinus Torvalds
84f4e65870SJens Axboe #include "scm.h"
85f4e65870SJens Axboe
861da177e4SLinus Torvalds /* Internal data structures and random procedures: */
871da177e4SLinus Torvalds
881fd05ba5SMiklos Szeredi static LIST_HEAD(gc_candidates);
895f23b734Sdann frazier static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
901da177e4SLinus Torvalds
scan_inflight(struct sock * x,void (* func)(struct unix_sock *),struct sk_buff_head * hitlist)915c80f1aeSPavel Emelyanov static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
921fd05ba5SMiklos Szeredi struct sk_buff_head *hitlist)
931da177e4SLinus Torvalds {
941da177e4SLinus Torvalds struct sk_buff *skb;
951fd05ba5SMiklos Szeredi struct sk_buff *next;
961da177e4SLinus Torvalds
971da177e4SLinus Torvalds spin_lock(&x->sk_receive_queue.lock);
98a2f3be17SIlpo Järvinen skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
99d1ab39f1SJason Eastman /* Do we have file descriptors ? */
1001fd05ba5SMiklos Szeredi if (UNIXCB(skb).fp) {
1011fd05ba5SMiklos Szeredi bool hit = false;
102d1ab39f1SJason Eastman /* Process the descriptors of this socket */
1031da177e4SLinus Torvalds int nfd = UNIXCB(skb).fp->count;
1041da177e4SLinus Torvalds struct file **fp = UNIXCB(skb).fp->fp;
105d1ab39f1SJason Eastman
1061fd05ba5SMiklos Szeredi while (nfd--) {
107d1ab39f1SJason Eastman /* Get the socket the fd matches if it indeed does so */
1081fd05ba5SMiklos Szeredi struct sock *sk = unix_get_socket(*fp++);
109d1ab39f1SJason Eastman
1101fd05ba5SMiklos Szeredi if (sk) {
1116209344fSMiklos Szeredi struct unix_sock *u = unix_sk(sk);
1126209344fSMiklos Szeredi
113d1ab39f1SJason Eastman /* Ignore non-candidates, they could
1146209344fSMiklos Szeredi * have been added to the queues after
1156209344fSMiklos Szeredi * starting the garbage collection
1166209344fSMiklos Szeredi */
11760bc851aSEric Dumazet if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
1181fd05ba5SMiklos Szeredi hit = true;
119d1ab39f1SJason Eastman
1206209344fSMiklos Szeredi func(u);
1216209344fSMiklos Szeredi }
1221da177e4SLinus Torvalds }
1231da177e4SLinus Torvalds }
1241fd05ba5SMiklos Szeredi if (hit && hitlist != NULL) {
1251fd05ba5SMiklos Szeredi __skb_unlink(skb, &x->sk_receive_queue);
1261fd05ba5SMiklos Szeredi __skb_queue_tail(hitlist, skb);
1271da177e4SLinus Torvalds }
1281fd05ba5SMiklos Szeredi }
1291da177e4SLinus Torvalds }
1301da177e4SLinus Torvalds spin_unlock(&x->sk_receive_queue.lock);
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds
scan_children(struct sock * x,void (* func)(struct unix_sock *),struct sk_buff_head * hitlist)1335c80f1aeSPavel Emelyanov static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
1341fd05ba5SMiklos Szeredi struct sk_buff_head *hitlist)
1351da177e4SLinus Torvalds {
136d1ab39f1SJason Eastman if (x->sk_state != TCP_LISTEN) {
1371fd05ba5SMiklos Szeredi scan_inflight(x, func, hitlist);
138d1ab39f1SJason Eastman } else {
1391fd05ba5SMiklos Szeredi struct sk_buff *skb;
1401fd05ba5SMiklos Szeredi struct sk_buff *next;
1411fd05ba5SMiklos Szeredi struct unix_sock *u;
1421fd05ba5SMiklos Szeredi LIST_HEAD(embryos);
1431da177e4SLinus Torvalds
144d1ab39f1SJason Eastman /* For a listening socket collect the queued embryos
1451fd05ba5SMiklos Szeredi * and perform a scan on them as well.
1461da177e4SLinus Torvalds */
1471fd05ba5SMiklos Szeredi spin_lock(&x->sk_receive_queue.lock);
148a2f3be17SIlpo Järvinen skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
1491fd05ba5SMiklos Szeredi u = unix_sk(skb->sk);
1501da177e4SLinus Torvalds
151d1ab39f1SJason Eastman /* An embryo cannot be in-flight, so it's safe
1521fd05ba5SMiklos Szeredi * to use the list link.
1531fd05ba5SMiklos Szeredi */
1541fd05ba5SMiklos Szeredi BUG_ON(!list_empty(&u->link));
1551fd05ba5SMiklos Szeredi list_add_tail(&u->link, &embryos);
1561fd05ba5SMiklos Szeredi }
1571fd05ba5SMiklos Szeredi spin_unlock(&x->sk_receive_queue.lock);
1581fd05ba5SMiklos Szeredi
1591fd05ba5SMiklos Szeredi while (!list_empty(&embryos)) {
1601fd05ba5SMiklos Szeredi u = list_entry(embryos.next, struct unix_sock, link);
1611fd05ba5SMiklos Szeredi scan_inflight(&u->sk, func, hitlist);
1621fd05ba5SMiklos Szeredi list_del_init(&u->link);
1631fd05ba5SMiklos Szeredi }
1641fd05ba5SMiklos Szeredi }
1651fd05ba5SMiklos Szeredi }
1661fd05ba5SMiklos Szeredi
dec_inflight(struct unix_sock * usk)1675c80f1aeSPavel Emelyanov static void dec_inflight(struct unix_sock *usk)
1681fd05ba5SMiklos Szeredi {
169301fdbaaSKuniyuki Iwashima usk->inflight--;
1701fd05ba5SMiklos Szeredi }
1711fd05ba5SMiklos Szeredi
inc_inflight(struct unix_sock * usk)1725c80f1aeSPavel Emelyanov static void inc_inflight(struct unix_sock *usk)
1731fd05ba5SMiklos Szeredi {
174301fdbaaSKuniyuki Iwashima usk->inflight++;
1751fd05ba5SMiklos Szeredi }
1761fd05ba5SMiklos Szeredi
inc_inflight_move_tail(struct unix_sock * u)1775c80f1aeSPavel Emelyanov static void inc_inflight_move_tail(struct unix_sock *u)
1781fd05ba5SMiklos Szeredi {
179301fdbaaSKuniyuki Iwashima u->inflight++;
180301fdbaaSKuniyuki Iwashima
181d1ab39f1SJason Eastman /* If this still might be part of a cycle, move it to the end
1826209344fSMiklos Szeredi * of the list, so that it's checked even if it was already
1836209344fSMiklos Szeredi * passed over
1841fd05ba5SMiklos Szeredi */
18560bc851aSEric Dumazet if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
1861fd05ba5SMiklos Szeredi list_move_tail(&u->link, &gc_candidates);
1871fd05ba5SMiklos Szeredi }
1881fd05ba5SMiklos Szeredi
189505e907dSFabian Frederick static bool gc_in_progress;
1909915672dSEric Dumazet #define UNIX_INFLIGHT_TRIGGER_GC 16000
1911fd05ba5SMiklos Szeredi
wait_for_unix_gc(void)1925f23b734Sdann frazier void wait_for_unix_gc(void)
1935f23b734Sdann frazier {
194d1ab39f1SJason Eastman /* If number of inflight sockets is insane,
1959915672dSEric Dumazet * force a garbage collect right now.
1969d6d7f1cSEric Dumazet * Paired with the WRITE_ONCE() in unix_inflight(),
1979d6d7f1cSEric Dumazet * unix_notinflight() and gc_in_progress().
1989915672dSEric Dumazet */
1999d6d7f1cSEric Dumazet if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
2009d6d7f1cSEric Dumazet !READ_ONCE(gc_in_progress))
2019915672dSEric Dumazet unix_gc();
202debbb998SKuniyuki Iwashima wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
2035f23b734Sdann frazier }
2045f23b734Sdann frazier
2055f23b734Sdann frazier /* The external entry point: unix_gc() */
unix_gc(void)2065f23b734Sdann frazier void unix_gc(void)
2075f23b734Sdann frazier {
2080091bfc8SPavel Begunkov struct sk_buff *next_skb, *skb;
2091fd05ba5SMiklos Szeredi struct unix_sock *u;
2101fd05ba5SMiklos Szeredi struct unix_sock *next;
2111fd05ba5SMiklos Szeredi struct sk_buff_head hitlist;
2121fd05ba5SMiklos Szeredi struct list_head cursor;
2136209344fSMiklos Szeredi LIST_HEAD(not_cycle_list);
2141fd05ba5SMiklos Szeredi
2151fd05ba5SMiklos Szeredi spin_lock(&unix_gc_lock);
2161fd05ba5SMiklos Szeredi
2171fd05ba5SMiklos Szeredi /* Avoid a recursive GC. */
2181fd05ba5SMiklos Szeredi if (gc_in_progress)
2191fd05ba5SMiklos Szeredi goto out;
2201fd05ba5SMiklos Szeredi
2219d6d7f1cSEric Dumazet /* Paired with READ_ONCE() in wait_for_unix_gc(). */
2229d6d7f1cSEric Dumazet WRITE_ONCE(gc_in_progress, true);
2239d6d7f1cSEric Dumazet
224d1ab39f1SJason Eastman /* First, select candidates for garbage collection. Only
2251fd05ba5SMiklos Szeredi * in-flight sockets are considered, and from those only ones
2261fd05ba5SMiklos Szeredi * which don't have any external reference.
2271fd05ba5SMiklos Szeredi *
2281fd05ba5SMiklos Szeredi * Holding unix_gc_lock will protect these candidates from
2291fd05ba5SMiklos Szeredi * being detached, and hence from gaining an external
2306209344fSMiklos Szeredi * reference. Since there are no possible receivers, all
2316209344fSMiklos Szeredi * buffers currently on the candidates' queues stay there
2326209344fSMiklos Szeredi * during the garbage collection.
2336209344fSMiklos Szeredi *
2346209344fSMiklos Szeredi * We also know that no new candidate can be added onto the
2356209344fSMiklos Szeredi * receive queues. Other, non candidate sockets _can_ be
2366209344fSMiklos Szeredi * added to queue, so we must make sure only to touch
2376209344fSMiklos Szeredi * candidates.
238507cc232SMichal Luczaj *
239507cc232SMichal Luczaj * Embryos, though never candidates themselves, affect which
240507cc232SMichal Luczaj * candidates are reachable by the garbage collector. Before
241507cc232SMichal Luczaj * being added to a listener's queue, an embryo may already
242507cc232SMichal Luczaj * receive data carrying SCM_RIGHTS, potentially making the
243507cc232SMichal Luczaj * passed socket a candidate that is not yet reachable by the
244507cc232SMichal Luczaj * collector. It becomes reachable once the embryo is
245507cc232SMichal Luczaj * enqueued. Therefore, we must ensure that no SCM-laden
246507cc232SMichal Luczaj * embryo appears in a (candidate) listener's queue between
247507cc232SMichal Luczaj * consecutive scan_children() calls.
2481fd05ba5SMiklos Szeredi */
2491fd05ba5SMiklos Szeredi list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
250507cc232SMichal Luczaj struct sock *sk = &u->sk;
251516e0cc5SAl Viro long total_refs;
2521fd05ba5SMiklos Szeredi
253507cc232SMichal Luczaj total_refs = file_count(sk->sk_socket->file);
2541fd05ba5SMiklos Szeredi
255301fdbaaSKuniyuki Iwashima BUG_ON(!u->inflight);
256301fdbaaSKuniyuki Iwashima BUG_ON(total_refs < u->inflight);
257301fdbaaSKuniyuki Iwashima if (total_refs == u->inflight) {
2581fd05ba5SMiklos Szeredi list_move_tail(&u->link, &gc_candidates);
25960bc851aSEric Dumazet __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
26060bc851aSEric Dumazet __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
261507cc232SMichal Luczaj
262507cc232SMichal Luczaj if (sk->sk_state == TCP_LISTEN) {
263*753a277eSKuniyuki Iwashima unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
264507cc232SMichal Luczaj unix_state_unlock(sk);
265507cc232SMichal Luczaj }
2661fd05ba5SMiklos Szeredi }
2671fd05ba5SMiklos Szeredi }
2681fd05ba5SMiklos Szeredi
269d1ab39f1SJason Eastman /* Now remove all internal in-flight reference to children of
2701fd05ba5SMiklos Szeredi * the candidates.
2711fd05ba5SMiklos Szeredi */
2721fd05ba5SMiklos Szeredi list_for_each_entry(u, &gc_candidates, link)
2731fd05ba5SMiklos Szeredi scan_children(&u->sk, dec_inflight, NULL);
2741fd05ba5SMiklos Szeredi
275d1ab39f1SJason Eastman /* Restore the references for children of all candidates,
2761fd05ba5SMiklos Szeredi * which have remaining references. Do this recursively, so
2771fd05ba5SMiklos Szeredi * only those remain, which form cyclic references.
2781fd05ba5SMiklos Szeredi *
2791fd05ba5SMiklos Szeredi * Use a "cursor" link, to make the list traversal safe, even
2801fd05ba5SMiklos Szeredi * though elements might be moved about.
2811fd05ba5SMiklos Szeredi */
2821fd05ba5SMiklos Szeredi list_add(&cursor, &gc_candidates);
2831fd05ba5SMiklos Szeredi while (cursor.next != &gc_candidates) {
2841fd05ba5SMiklos Szeredi u = list_entry(cursor.next, struct unix_sock, link);
2851fd05ba5SMiklos Szeredi
2861fd05ba5SMiklos Szeredi /* Move cursor to after the current position. */
2871fd05ba5SMiklos Szeredi list_move(&cursor, &u->link);
2881fd05ba5SMiklos Szeredi
289301fdbaaSKuniyuki Iwashima if (u->inflight) {
2906209344fSMiklos Szeredi list_move_tail(&u->link, ¬_cycle_list);
29160bc851aSEric Dumazet __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2921fd05ba5SMiklos Szeredi scan_children(&u->sk, inc_inflight_move_tail, NULL);
2931fd05ba5SMiklos Szeredi }
2941fd05ba5SMiklos Szeredi }
2951fd05ba5SMiklos Szeredi list_del(&cursor);
2961fd05ba5SMiklos Szeredi
2977df9c246SAndrey Ulanov /* Now gc_candidates contains only garbage. Restore original
2987df9c246SAndrey Ulanov * inflight counters for these as well, and remove the skbuffs
2997df9c246SAndrey Ulanov * which are creating the cycle(s).
3007df9c246SAndrey Ulanov */
3017df9c246SAndrey Ulanov skb_queue_head_init(&hitlist);
302e9eac260SKuniyuki Iwashima list_for_each_entry(u, &gc_candidates, link) {
3037df9c246SAndrey Ulanov scan_children(&u->sk, inc_inflight, &hitlist);
3047df9c246SAndrey Ulanov
305e9eac260SKuniyuki Iwashima #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
306e9eac260SKuniyuki Iwashima if (u->oob_skb) {
307e9eac260SKuniyuki Iwashima kfree_skb(u->oob_skb);
308e9eac260SKuniyuki Iwashima u->oob_skb = NULL;
309e9eac260SKuniyuki Iwashima }
310e9eac260SKuniyuki Iwashima #endif
311e9eac260SKuniyuki Iwashima }
312e9eac260SKuniyuki Iwashima
313d1ab39f1SJason Eastman /* not_cycle_list contains those sockets which do not make up a
3146209344fSMiklos Szeredi * cycle. Restore these to the inflight list.
3156209344fSMiklos Szeredi */
3166209344fSMiklos Szeredi while (!list_empty(¬_cycle_list)) {
3176209344fSMiklos Szeredi u = list_entry(not_cycle_list.next, struct unix_sock, link);
31860bc851aSEric Dumazet __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
3196209344fSMiklos Szeredi list_move_tail(&u->link, &gc_inflight_list);
3206209344fSMiklos Szeredi }
3216209344fSMiklos Szeredi
3221fd05ba5SMiklos Szeredi spin_unlock(&unix_gc_lock);
3231fd05ba5SMiklos Szeredi
3240091bfc8SPavel Begunkov /* We need io_uring to clean its registered files, ignore all io_uring
3250091bfc8SPavel Begunkov * originated skbs. It's fine as io_uring doesn't keep references to
3260091bfc8SPavel Begunkov * other io_uring instances and so killing all other files in the cycle
3270091bfc8SPavel Begunkov * will put all io_uring references forcing it to go through normal
3280091bfc8SPavel Begunkov * release.path eventually putting registered files.
3290091bfc8SPavel Begunkov */
3300091bfc8SPavel Begunkov skb_queue_walk_safe(&hitlist, skb, next_skb) {
33110369080SEric Dumazet if (skb->destructor == io_uring_destruct_scm) {
3320091bfc8SPavel Begunkov __skb_unlink(skb, &hitlist);
3330091bfc8SPavel Begunkov skb_queue_tail(&skb->sk->sk_receive_queue, skb);
3340091bfc8SPavel Begunkov }
3350091bfc8SPavel Begunkov }
3360091bfc8SPavel Begunkov
3371fd05ba5SMiklos Szeredi /* Here we are. Hitlist is filled. Die. */
3381da177e4SLinus Torvalds __skb_queue_purge(&hitlist);
3391fd05ba5SMiklos Szeredi
3401fd05ba5SMiklos Szeredi spin_lock(&unix_gc_lock);
3411fd05ba5SMiklos Szeredi
3420091bfc8SPavel Begunkov /* There could be io_uring registered files, just push them back to
3430091bfc8SPavel Begunkov * the inflight list
3440091bfc8SPavel Begunkov */
3450091bfc8SPavel Begunkov list_for_each_entry_safe(u, next, &gc_candidates, link)
3460091bfc8SPavel Begunkov list_move_tail(&u->link, &gc_inflight_list);
3470091bfc8SPavel Begunkov
3481fd05ba5SMiklos Szeredi /* All candidates should have been detached by now. */
3491fd05ba5SMiklos Szeredi BUG_ON(!list_empty(&gc_candidates));
3509d6d7f1cSEric Dumazet
3519d6d7f1cSEric Dumazet /* Paired with READ_ONCE() in wait_for_unix_gc(). */
3529d6d7f1cSEric Dumazet WRITE_ONCE(gc_in_progress, false);
3539d6d7f1cSEric Dumazet
3545f23b734Sdann frazier wake_up(&unix_gc_wait);
3551fd05ba5SMiklos Szeredi
3561fd05ba5SMiklos Szeredi out:
3571fd05ba5SMiklos Szeredi spin_unlock(&unix_gc_lock);
3581da177e4SLinus Torvalds }
359