xref: /openbmc/linux/net/unix/garbage.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * NET3:	Garbage Collector For AF_UNIX sockets
3  *
4  * Garbage Collector:
5  *	Copyright (C) Barak A. Pearlmutter.
6  *	Released under the GPL version 2 or later.
7  *
8  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9  * If it doesn't work blame me, it worked when Barak sent it.
10  *
11  * Assumptions:
12  *
13  *  - object w/ a bit
14  *  - free list
15  *
16  * Current optimizations:
17  *
18  *  - explicit stack instead of recursion
19  *  - tail recurse on first born instead of immediate push/pop
20  *  - we gather the stuff that should not be killed into tree
21  *    and stack is just a path from root to the current pointer.
22  *
23  *  Future optimizations:
24  *
25  *  - don't just push entire root set; process in place
26  *
27  *	This program is free software; you can redistribute it and/or
28  *	modify it under the terms of the GNU General Public License
29  *	as published by the Free Software Foundation; either version
30  *	2 of the License, or (at your option) any later version.
31  *
32  *  Fixes:
33  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
34  *					Cope with changing max_files.
35  *	Al Viro		11 Oct 1998
36  *		Graph may have cycles. That is, we can send the descriptor
37  *		of foo to bar and vice versa. Current code chokes on that.
38  *		Fix: move SCM_RIGHTS ones into the separate list and then
39  *		skb_free() them all instead of doing explicit fput's.
40  *		Another problem: since fput() may block somebody may
41  *		create a new unix_socket when we are in the middle of sweep
42  *		phase. Fix: revert the logic wrt MARKED. Mark everything
43  *		upon the beginning and unmark non-junk ones.
44  *
45  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46  *		sent to connect()'ed but still not accept()'ed sockets.
47  *		Fixed. Old code had slightly different problem here:
48  *		extra fput() in situation when we passed the descriptor via
49  *		such socket and closed it (descriptor). That would happen on
50  *		each unix_gc() until the accept(). Since the struct file in
51  *		question would go to the free list and might be reused...
52  *		That might be the reason of random oopses on filp_close()
53  *		in unrelated processes.
54  *
55  *	AV		28 Feb 1999
56  *		Kill the explicit allocation of stack. Now we keep the tree
57  *		with root in dummy + pointer (gc_current) to one of the nodes.
58  *		Stack is represented as path from gc_current to dummy. Unmark
59  *		now means "add to tree". Push == "make it a son of gc_current".
60  *		Pop == "move gc_current to parent". We keep only pointers to
61  *		parents (->gc_tree).
62  *	AV		1 Mar 1999
63  *		Damn. Added missing check for ->dead in listen queues scanning.
64  *
65  */
66 
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/socket.h>
70 #include <linux/un.h>
71 #include <linux/net.h>
72 #include <linux/fs.h>
73 #include <linux/slab.h>
74 #include <linux/skbuff.h>
75 #include <linux/netdevice.h>
76 #include <linux/file.h>
77 #include <linux/proc_fs.h>
78 #include <linux/mutex.h>
79 
80 #include <net/sock.h>
81 #include <net/af_unix.h>
82 #include <net/scm.h>
83 #include <net/tcp_states.h>
84 
85 /* Internal data structures and random procedures: */
86 
87 #define GC_HEAD		((struct sock *)(-1))
88 #define GC_ORPHAN	((struct sock *)(-3))
89 
90 static struct sock *gc_current = GC_HEAD; /* stack of objects to mark */
91 
92 atomic_t unix_tot_inflight = ATOMIC_INIT(0);
93 
94 
95 static struct sock *unix_get_socket(struct file *filp)
96 {
97 	struct sock *u_sock = NULL;
98 	struct inode *inode = filp->f_path.dentry->d_inode;
99 
100 	/*
101 	 *	Socket ?
102 	 */
103 	if (S_ISSOCK(inode->i_mode)) {
104 		struct socket * sock = SOCKET_I(inode);
105 		struct sock * s = sock->sk;
106 
107 		/*
108 		 *	PF_UNIX ?
109 		 */
110 		if (s && sock->ops && sock->ops->family == PF_UNIX)
111 			u_sock = s;
112 	}
113 	return u_sock;
114 }
115 
116 /*
117  *	Keep the number of times in flight count for the file
118  *	descriptor if it is for an AF_UNIX socket.
119  */
120 
121 void unix_inflight(struct file *fp)
122 {
123 	struct sock *s = unix_get_socket(fp);
124 	if(s) {
125 		atomic_inc(&unix_sk(s)->inflight);
126 		atomic_inc(&unix_tot_inflight);
127 	}
128 }
129 
130 void unix_notinflight(struct file *fp)
131 {
132 	struct sock *s = unix_get_socket(fp);
133 	if(s) {
134 		atomic_dec(&unix_sk(s)->inflight);
135 		atomic_dec(&unix_tot_inflight);
136 	}
137 }
138 
139 
140 /*
141  *	Garbage Collector Support Functions
142  */
143 
144 static inline struct sock *pop_stack(void)
145 {
146 	struct sock *p = gc_current;
147 	gc_current = unix_sk(p)->gc_tree;
148 	return p;
149 }
150 
151 static inline int empty_stack(void)
152 {
153 	return gc_current == GC_HEAD;
154 }
155 
156 static void maybe_unmark_and_push(struct sock *x)
157 {
158 	struct unix_sock *u = unix_sk(x);
159 
160 	if (u->gc_tree != GC_ORPHAN)
161 		return;
162 	sock_hold(x);
163 	u->gc_tree = gc_current;
164 	gc_current = x;
165 }
166 
167 
168 /* The external entry point: unix_gc() */
169 
170 void unix_gc(void)
171 {
172 	static DEFINE_MUTEX(unix_gc_sem);
173 	int i;
174 	struct sock *s;
175 	struct sk_buff_head hitlist;
176 	struct sk_buff *skb;
177 
178 	/*
179 	 *	Avoid a recursive GC.
180 	 */
181 
182 	if (!mutex_trylock(&unix_gc_sem))
183 		return;
184 
185 	spin_lock(&unix_table_lock);
186 
187 	forall_unix_sockets(i, s)
188 	{
189 		unix_sk(s)->gc_tree = GC_ORPHAN;
190 	}
191 	/*
192 	 *	Everything is now marked
193 	 */
194 
195 	/* Invariant to be maintained:
196 		- everything unmarked is either:
197 		-- (a) on the stack, or
198 		-- (b) has all of its children unmarked
199 		- everything on the stack is always unmarked
200 		- nothing is ever pushed onto the stack twice, because:
201 		-- nothing previously unmarked is ever pushed on the stack
202 	 */
203 
204 	/*
205 	 *	Push root set
206 	 */
207 
208 	forall_unix_sockets(i, s)
209 	{
210 		int open_count = 0;
211 
212 		/*
213 		 *	If all instances of the descriptor are not
214 		 *	in flight we are in use.
215 		 *
216 		 *	Special case: when socket s is embrion, it may be
217 		 *	hashed but still not in queue of listening socket.
218 		 *	In this case (see unix_create1()) we set artificial
219 		 *	negative inflight counter to close race window.
220 		 *	It is trick of course and dirty one.
221 		 */
222 		if (s->sk_socket && s->sk_socket->file)
223 			open_count = file_count(s->sk_socket->file);
224 		if (open_count > atomic_read(&unix_sk(s)->inflight))
225 			maybe_unmark_and_push(s);
226 	}
227 
228 	/*
229 	 *	Mark phase
230 	 */
231 
232 	while (!empty_stack())
233 	{
234 		struct sock *x = pop_stack();
235 		struct sock *sk;
236 
237 		spin_lock(&x->sk_receive_queue.lock);
238 		skb = skb_peek(&x->sk_receive_queue);
239 
240 		/*
241 		 *	Loop through all but first born
242 		 */
243 
244 		while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
245 			/*
246 			 *	Do we have file descriptors ?
247 			 */
248 			if(UNIXCB(skb).fp)
249 			{
250 				/*
251 				 *	Process the descriptors of this socket
252 				 */
253 				int nfd=UNIXCB(skb).fp->count;
254 				struct file **fp = UNIXCB(skb).fp->fp;
255 				while(nfd--)
256 				{
257 					/*
258 					 *	Get the socket the fd matches if
259 					 *	it indeed does so
260 					 */
261 					if((sk=unix_get_socket(*fp++))!=NULL)
262 					{
263 						maybe_unmark_and_push(sk);
264 					}
265 				}
266 			}
267 			/* We have to scan not-yet-accepted ones too */
268 			if (x->sk_state == TCP_LISTEN)
269 				maybe_unmark_and_push(skb->sk);
270 			skb=skb->next;
271 		}
272 		spin_unlock(&x->sk_receive_queue.lock);
273 		sock_put(x);
274 	}
275 
276 	skb_queue_head_init(&hitlist);
277 
278 	forall_unix_sockets(i, s)
279 	{
280 		struct unix_sock *u = unix_sk(s);
281 
282 		if (u->gc_tree == GC_ORPHAN) {
283 			struct sk_buff *nextsk;
284 
285 			spin_lock(&s->sk_receive_queue.lock);
286 			skb = skb_peek(&s->sk_receive_queue);
287 			while (skb &&
288 			       skb != (struct sk_buff *)&s->sk_receive_queue) {
289 				nextsk = skb->next;
290 				/*
291 				 *	Do we have file descriptors ?
292 				 */
293 				if (UNIXCB(skb).fp) {
294 					__skb_unlink(skb,
295 						     &s->sk_receive_queue);
296 					__skb_queue_tail(&hitlist, skb);
297 				}
298 				skb = nextsk;
299 			}
300 			spin_unlock(&s->sk_receive_queue.lock);
301 		}
302 		u->gc_tree = GC_ORPHAN;
303 	}
304 	spin_unlock(&unix_table_lock);
305 
306 	/*
307 	 *	Here we are. Hitlist is filled. Die.
308 	 */
309 
310 	__skb_queue_purge(&hitlist);
311 	mutex_unlock(&unix_gc_sem);
312 }
313