Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * NET3:	Garbage Collector For AF_UNIX sockets
  3 *
  4 * Garbage Collector:
  5 *	Copyright (C) Barak A. Pearlmutter.
  6 *	Released under the GPL version 2 or later.
  7 *
  8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9 * If it doesn't work blame me, it worked when Barak sent it.
 10 *
 11 * Assumptions:
 12 *
 13 *  - object w/ a bit
 14 *  - free list
 15 *
 16 * Current optimizations:
 17 *
 18 *  - explicit stack instead of recursion
 19 *  - tail recurse on first born instead of immediate push/pop
 20 *  - we gather the stuff that should not be killed into tree
 21 *    and stack is just a path from root to the current pointer.
 22 *
 23 *  Future optimizations:
 24 *
 25 *  - don't just push entire root set; process in place
 26 *
 27 *	This program is free software; you can redistribute it and/or
 28 *	modify it under the terms of the GNU General Public License
 29 *	as published by the Free Software Foundation; either version
 30 *	2 of the License, or (at your option) any later version.
 31 *
 32 *  Fixes:
 33 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 34 *					Cope with changing max_files.
 35 *	Al Viro		11 Oct 1998
 36 *		Graph may have cycles. That is, we can send the descriptor
 37 *		of foo to bar and vice versa. Current code chokes on that.
 38 *		Fix: move SCM_RIGHTS ones into the separate list and then
 39 *		skb_free() them all instead of doing explicit fput's.
 40 *		Another problem: since fput() may block somebody may
 41 *		create a new unix_socket when we are in the middle of sweep
 42 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 43 *		upon the beginning and unmark non-junk ones.
 44 *
 45 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 46 *		sent to connect()'ed but still not accept()'ed sockets.
 47 *		Fixed. Old code had slightly different problem here:
 48 *		extra fput() in situation when we passed the descriptor via
 49 *		such socket and closed it (descriptor). That would happen on
 50 *		each unix_gc() until the accept(). Since the struct file in
 51 *		question would go to the free list and might be reused...
 52 *		That might be the reason of random oopses on filp_close()
 53 *		in unrelated processes.
 54 *
 55 *	AV		28 Feb 1999
 56 *		Kill the explicit allocation of stack. Now we keep the tree
 57 *		with root in dummy + pointer (gc_current) to one of the nodes.
 58 *		Stack is represented as path from gc_current to dummy. Unmark
 59 *		now means "add to tree". Push == "make it a son of gc_current".
 60 *		Pop == "move gc_current to parent". We keep only pointers to
 61 *		parents (->gc_tree).
 62 *	AV		1 Mar 1999
 63 *		Damn. Added missing check for ->dead in listen queues scanning.
 64 *
 65 *	Miklos Szeredi 25 Jun 2007
 66 *		Reimplement with a cycle collecting algorithm. This should
 67 *		solve several problems with the previous code, like being racy
 68 *		wrt receive and holding up unrelated socket operations.
 69 */
 70
 71#include <linux/kernel.h>
 72#include <linux/string.h>
 73#include <linux/socket.h>
 74#include <linux/un.h>
 75#include <linux/net.h>
 76#include <linux/fs.h>
 77#include <linux/skbuff.h>
 78#include <linux/netdevice.h>
 79#include <linux/file.h>
 80#include <linux/proc_fs.h>
 81#include <linux/mutex.h>
 82#include <linux/wait.h>
 83
 84#include <net/sock.h>
 85#include <net/af_unix.h>
 86#include <net/scm.h>
 87#include <net/tcp_states.h>
 88
 89/* Internal data structures and random procedures: */
 90
 91static LIST_HEAD(gc_inflight_list);
 92static LIST_HEAD(gc_candidates);
 93static DEFINE_SPINLOCK(unix_gc_lock);
 94static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
 95
 96unsigned int unix_tot_inflight;
 97
 98struct sock *unix_get_socket(struct file *filp)
 99{
100	struct sock *u_sock = NULL;
101	struct inode *inode = file_inode(filp);
102
103	/* Socket ? */
104	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
105		struct socket *sock = SOCKET_I(inode);
106		struct sock *s = sock->sk;
 
 
 
107
108		/* PF_UNIX ? */
109		if (s && sock->ops && sock->ops->family == PF_UNIX)
110			u_sock = s;
111	}
112	return u_sock;
 
113}
114
 
 
 
 
 
115/* Keep the number of times in flight count for the file
116 * descriptor if it is for an AF_UNIX socket.
117 */
118
119void unix_inflight(struct user_struct *user, struct file *fp)
120{
121	struct sock *s = unix_get_socket(fp);
122
123	spin_lock(&unix_gc_lock);
124
125	if (s) {
126		struct unix_sock *u = unix_sk(s);
127
128		if (atomic_long_inc_return(&u->inflight) == 1) {
129			BUG_ON(!list_empty(&u->link));
130			list_add_tail(&u->link, &gc_inflight_list);
131		} else {
132			BUG_ON(list_empty(&u->link));
133		}
134		unix_tot_inflight++;
 
 
 
135	}
136	user->unix_inflight++;
 
 
137	spin_unlock(&unix_gc_lock);
138}
139
140void unix_notinflight(struct user_struct *user, struct file *fp)
141{
142	struct sock *s = unix_get_socket(fp);
143
144	spin_lock(&unix_gc_lock);
145
146	if (s) {
147		struct unix_sock *u = unix_sk(s);
148
149		BUG_ON(list_empty(&u->link));
150
151		if (atomic_long_dec_and_test(&u->inflight))
 
152			list_del_init(&u->link);
153		unix_tot_inflight--;
 
 
154	}
155	user->unix_inflight--;
 
 
156	spin_unlock(&unix_gc_lock);
157}
158
159static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
160			  struct sk_buff_head *hitlist)
161{
162	struct sk_buff *skb;
163	struct sk_buff *next;
164
165	spin_lock(&x->sk_receive_queue.lock);
166	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
167		/* Do we have file descriptors ? */
168		if (UNIXCB(skb).fp) {
169			bool hit = false;
170			/* Process the descriptors of this socket */
171			int nfd = UNIXCB(skb).fp->count;
172			struct file **fp = UNIXCB(skb).fp->fp;
173
174			while (nfd--) {
175				/* Get the socket the fd matches if it indeed does so */
176				struct sock *sk = unix_get_socket(*fp++);
177
178				if (sk) {
179					struct unix_sock *u = unix_sk(sk);
 
 
 
180
181					/* Ignore non-candidates, they could
182					 * have been added to the queues after
183					 * starting the garbage collection
184					 */
185					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
186						hit = true;
187
188						func(u);
189					}
190				}
191			}
192			if (hit && hitlist != NULL) {
193				__skb_unlink(skb, &x->sk_receive_queue);
194				__skb_queue_tail(hitlist, skb);
195			}
196		}
197	}
198	spin_unlock(&x->sk_receive_queue.lock);
199}
200
201static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
202			  struct sk_buff_head *hitlist)
203{
204	if (x->sk_state != TCP_LISTEN) {
205		scan_inflight(x, func, hitlist);
206	} else {
207		struct sk_buff *skb;
208		struct sk_buff *next;
209		struct unix_sock *u;
210		LIST_HEAD(embryos);
211
212		/* For a listening socket collect the queued embryos
213		 * and perform a scan on them as well.
214		 */
215		spin_lock(&x->sk_receive_queue.lock);
216		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
217			u = unix_sk(skb->sk);
218
219			/* An embryo cannot be in-flight, so it's safe
220			 * to use the list link.
221			 */
222			BUG_ON(!list_empty(&u->link));
223			list_add_tail(&u->link, &embryos);
224		}
225		spin_unlock(&x->sk_receive_queue.lock);
226
227		while (!list_empty(&embryos)) {
228			u = list_entry(embryos.next, struct unix_sock, link);
229			scan_inflight(&u->sk, func, hitlist);
230			list_del_init(&u->link);
231		}
232	}
233}
234
235static void dec_inflight(struct unix_sock *usk)
236{
237	atomic_long_dec(&usk->inflight);
238}
239
240static void inc_inflight(struct unix_sock *usk)
241{
242	atomic_long_inc(&usk->inflight);
243}
244
245static void inc_inflight_move_tail(struct unix_sock *u)
246{
247	atomic_long_inc(&u->inflight);
 
248	/* If this still might be part of a cycle, move it to the end
249	 * of the list, so that it's checked even if it was already
250	 * passed over
251	 */
252	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
253		list_move_tail(&u->link, &gc_candidates);
254}
255
256static bool gc_in_progress;
257#define UNIX_INFLIGHT_TRIGGER_GC 16000
258
259void wait_for_unix_gc(void)
260{
261	/* If number of inflight sockets is insane,
262	 * force a garbage collect right now.
263	 */
264	if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
265		unix_gc();
266	wait_event(unix_gc_wait, gc_in_progress == false);
267}
268
269/* The external entry point: unix_gc() */
270void unix_gc(void)
271{
272	struct unix_sock *u;
273	struct unix_sock *next;
274	struct sk_buff_head hitlist;
275	struct list_head cursor;
276	LIST_HEAD(not_cycle_list);
 
277
278	spin_lock(&unix_gc_lock);
279
280	/* Avoid a recursive GC. */
281	if (gc_in_progress)
282		goto out;
283
284	gc_in_progress = true;
285	/* First, select candidates for garbage collection.  Only
286	 * in-flight sockets are considered, and from those only ones
287	 * which don't have any external reference.
288	 *
289	 * Holding unix_gc_lock will protect these candidates from
290	 * being detached, and hence from gaining an external
291	 * reference.  Since there are no possible receivers, all
292	 * buffers currently on the candidates' queues stay there
293	 * during the garbage collection.
294	 *
295	 * We also know that no new candidate can be added onto the
296	 * receive queues.  Other, non candidate sockets _can_ be
297	 * added to queue, so we must make sure only to touch
298	 * candidates.
 
 
 
 
 
 
 
 
 
 
299	 */
300	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
 
301		long total_refs;
302		long inflight_refs;
303
304		total_refs = file_count(u->sk.sk_socket->file);
305		inflight_refs = atomic_long_read(&u->inflight);
306
307		BUG_ON(inflight_refs < 1);
308		BUG_ON(total_refs < inflight_refs);
309		if (total_refs == inflight_refs) {
310			list_move_tail(&u->link, &gc_candidates);
311			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
312			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
 
 
 
 
 
313		}
314	}
315
316	/* Now remove all internal in-flight reference to children of
317	 * the candidates.
318	 */
319	list_for_each_entry(u, &gc_candidates, link)
320		scan_children(&u->sk, dec_inflight, NULL);
321
322	/* Restore the references for children of all candidates,
323	 * which have remaining references.  Do this recursively, so
324	 * only those remain, which form cyclic references.
325	 *
326	 * Use a "cursor" link, to make the list traversal safe, even
327	 * though elements might be moved about.
328	 */
329	list_add(&cursor, &gc_candidates);
330	while (cursor.next != &gc_candidates) {
331		u = list_entry(cursor.next, struct unix_sock, link);
332
333		/* Move cursor to after the current position. */
334		list_move(&cursor, &u->link);
335
336		if (atomic_long_read(&u->inflight) > 0) {
337			list_move_tail(&u->link, &not_cycle_list);
338			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
339			scan_children(&u->sk, inc_inflight_move_tail, NULL);
340		}
341	}
342	list_del(&cursor);
343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344	/* not_cycle_list contains those sockets which do not make up a
345	 * cycle.  Restore these to the inflight list.
346	 */
347	while (!list_empty(&not_cycle_list)) {
348		u = list_entry(not_cycle_list.next, struct unix_sock, link);
349		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
350		list_move_tail(&u->link, &gc_inflight_list);
351	}
352
353	/* Now gc_candidates contains only garbage.  Restore original
354	 * inflight counters for these as well, and remove the skbuffs
355	 * which are creating the cycle(s).
356	 */
357	skb_queue_head_init(&hitlist);
358	list_for_each_entry(u, &gc_candidates, link)
359	scan_children(&u->sk, inc_inflight, &hitlist);
360
361	spin_unlock(&unix_gc_lock);
362
363	/* Here we are. Hitlist is filled. Die. */
364	__skb_queue_purge(&hitlist);
365
366	spin_lock(&unix_gc_lock);
367
368	/* All candidates should have been detached by now. */
369	BUG_ON(!list_empty(&gc_candidates));
370	gc_in_progress = false;
371	wake_up(&unix_gc_wait);
 
372
373 out:
374	spin_unlock(&unix_gc_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * NET3:	Garbage Collector For AF_UNIX sockets
  4 *
  5 * Garbage Collector:
  6 *	Copyright (C) Barak A. Pearlmutter.
 
  7 *
  8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9 * If it doesn't work blame me, it worked when Barak sent it.
 10 *
 11 * Assumptions:
 12 *
 13 *  - object w/ a bit
 14 *  - free list
 15 *
 16 * Current optimizations:
 17 *
 18 *  - explicit stack instead of recursion
 19 *  - tail recurse on first born instead of immediate push/pop
 20 *  - we gather the stuff that should not be killed into tree
 21 *    and stack is just a path from root to the current pointer.
 22 *
 23 *  Future optimizations:
 24 *
 25 *  - don't just push entire root set; process in place
 26 *
 
 
 
 
 
 27 *  Fixes:
 28 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 29 *					Cope with changing max_files.
 30 *	Al Viro		11 Oct 1998
 31 *		Graph may have cycles. That is, we can send the descriptor
 32 *		of foo to bar and vice versa. Current code chokes on that.
 33 *		Fix: move SCM_RIGHTS ones into the separate list and then
 34 *		skb_free() them all instead of doing explicit fput's.
 35 *		Another problem: since fput() may block somebody may
 36 *		create a new unix_socket when we are in the middle of sweep
 37 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 38 *		upon the beginning and unmark non-junk ones.
 39 *
 40 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 41 *		sent to connect()'ed but still not accept()'ed sockets.
 42 *		Fixed. Old code had slightly different problem here:
 43 *		extra fput() in situation when we passed the descriptor via
 44 *		such socket and closed it (descriptor). That would happen on
 45 *		each unix_gc() until the accept(). Since the struct file in
 46 *		question would go to the free list and might be reused...
 47 *		That might be the reason of random oopses on filp_close()
 48 *		in unrelated processes.
 49 *
 50 *	AV		28 Feb 1999
 51 *		Kill the explicit allocation of stack. Now we keep the tree
 52 *		with root in dummy + pointer (gc_current) to one of the nodes.
 53 *		Stack is represented as path from gc_current to dummy. Unmark
 54 *		now means "add to tree". Push == "make it a son of gc_current".
 55 *		Pop == "move gc_current to parent". We keep only pointers to
 56 *		parents (->gc_tree).
 57 *	AV		1 Mar 1999
 58 *		Damn. Added missing check for ->dead in listen queues scanning.
 59 *
 60 *	Miklos Szeredi 25 Jun 2007
 61 *		Reimplement with a cycle collecting algorithm. This should
 62 *		solve several problems with the previous code, like being racy
 63 *		wrt receive and holding up unrelated socket operations.
 64 */
 65
 66#include <linux/kernel.h>
 67#include <linux/string.h>
 68#include <linux/socket.h>
 69#include <linux/un.h>
 70#include <linux/net.h>
 71#include <linux/fs.h>
 72#include <linux/skbuff.h>
 73#include <linux/netdevice.h>
 74#include <linux/file.h>
 75#include <linux/proc_fs.h>
 76#include <linux/mutex.h>
 77#include <linux/wait.h>
 78
 79#include <net/sock.h>
 80#include <net/af_unix.h>
 81#include <net/scm.h>
 82#include <net/tcp_states.h>
 83
 84struct unix_sock *unix_get_socket(struct file *filp)
 
 
 
 
 
 
 
 
 
 85{
 
 86	struct inode *inode = file_inode(filp);
 87
 88	/* Socket ? */
 89	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
 90		struct socket *sock = SOCKET_I(inode);
 91		const struct proto_ops *ops;
 92		struct sock *sk = sock->sk;
 93
 94		ops = READ_ONCE(sock->ops);
 95
 96		/* PF_UNIX ? */
 97		if (sk && ops && ops->family == PF_UNIX)
 98			return unix_sk(sk);
 99	}
100
101	return NULL;
102}
103
104DEFINE_SPINLOCK(unix_gc_lock);
105unsigned int unix_tot_inflight;
106static LIST_HEAD(gc_candidates);
107static LIST_HEAD(gc_inflight_list);
108
109/* Keep the number of times in flight count for the file
110 * descriptor if it is for an AF_UNIX socket.
111 */
112void unix_inflight(struct user_struct *user, struct file *filp)
 
113{
114	struct unix_sock *u = unix_get_socket(filp);
115
116	spin_lock(&unix_gc_lock);
117
118	if (u) {
119		if (!u->inflight) {
120			WARN_ON_ONCE(!list_empty(&u->link));
 
 
121			list_add_tail(&u->link, &gc_inflight_list);
122		} else {
123			WARN_ON_ONCE(list_empty(&u->link));
124		}
125		u->inflight++;
126
127		/* Paired with READ_ONCE() in wait_for_unix_gc() */
128		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
129	}
130
131	WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
132
133	spin_unlock(&unix_gc_lock);
134}
135
136void unix_notinflight(struct user_struct *user, struct file *filp)
137{
138	struct unix_sock *u = unix_get_socket(filp);
139
140	spin_lock(&unix_gc_lock);
141
142	if (u) {
143		WARN_ON_ONCE(!u->inflight);
144		WARN_ON_ONCE(list_empty(&u->link));
 
145
146		u->inflight--;
147		if (!u->inflight)
148			list_del_init(&u->link);
149
150		/* Paired with READ_ONCE() in wait_for_unix_gc() */
151		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
152	}
153
154	WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
155
156	spin_unlock(&unix_gc_lock);
157}
158
159static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
160			  struct sk_buff_head *hitlist)
161{
162	struct sk_buff *skb;
163	struct sk_buff *next;
164
165	spin_lock(&x->sk_receive_queue.lock);
166	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
167		/* Do we have file descriptors ? */
168		if (UNIXCB(skb).fp) {
169			bool hit = false;
170			/* Process the descriptors of this socket */
171			int nfd = UNIXCB(skb).fp->count;
172			struct file **fp = UNIXCB(skb).fp->fp;
173
174			while (nfd--) {
175				/* Get the socket the fd matches if it indeed does so */
176				struct unix_sock *u = unix_get_socket(*fp++);
177
178				/* Ignore non-candidates, they could have been added
179				 * to the queues after starting the garbage collection
180				 */
181				if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
182					hit = true;
183
184					func(u);
 
 
 
 
 
 
 
 
185				}
186			}
187			if (hit && hitlist != NULL) {
188				__skb_unlink(skb, &x->sk_receive_queue);
189				__skb_queue_tail(hitlist, skb);
190			}
191		}
192	}
193	spin_unlock(&x->sk_receive_queue.lock);
194}
195
196static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
197			  struct sk_buff_head *hitlist)
198{
199	if (x->sk_state != TCP_LISTEN) {
200		scan_inflight(x, func, hitlist);
201	} else {
202		struct sk_buff *skb;
203		struct sk_buff *next;
204		struct unix_sock *u;
205		LIST_HEAD(embryos);
206
207		/* For a listening socket collect the queued embryos
208		 * and perform a scan on them as well.
209		 */
210		spin_lock(&x->sk_receive_queue.lock);
211		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
212			u = unix_sk(skb->sk);
213
214			/* An embryo cannot be in-flight, so it's safe
215			 * to use the list link.
216			 */
217			WARN_ON_ONCE(!list_empty(&u->link));
218			list_add_tail(&u->link, &embryos);
219		}
220		spin_unlock(&x->sk_receive_queue.lock);
221
222		while (!list_empty(&embryos)) {
223			u = list_entry(embryos.next, struct unix_sock, link);
224			scan_inflight(&u->sk, func, hitlist);
225			list_del_init(&u->link);
226		}
227	}
228}
229
230static void dec_inflight(struct unix_sock *usk)
231{
232	usk->inflight--;
233}
234
235static void inc_inflight(struct unix_sock *usk)
236{
237	usk->inflight++;
238}
239
240static void inc_inflight_move_tail(struct unix_sock *u)
241{
242	u->inflight++;
243
244	/* If this still might be part of a cycle, move it to the end
245	 * of the list, so that it's checked even if it was already
246	 * passed over
247	 */
248	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
249		list_move_tail(&u->link, &gc_candidates);
250}
251
252static bool gc_in_progress;
 
 
 
 
 
 
 
 
 
 
 
253
254static void __unix_gc(struct work_struct *work)
 
255{
 
 
256	struct sk_buff_head hitlist;
257	struct unix_sock *u, *next;
258	LIST_HEAD(not_cycle_list);
259	struct list_head cursor;
260
261	spin_lock(&unix_gc_lock);
262
 
 
 
 
 
263	/* First, select candidates for garbage collection.  Only
264	 * in-flight sockets are considered, and from those only ones
265	 * which don't have any external reference.
266	 *
267	 * Holding unix_gc_lock will protect these candidates from
268	 * being detached, and hence from gaining an external
269	 * reference.  Since there are no possible receivers, all
270	 * buffers currently on the candidates' queues stay there
271	 * during the garbage collection.
272	 *
273	 * We also know that no new candidate can be added onto the
274	 * receive queues.  Other, non candidate sockets _can_ be
275	 * added to queue, so we must make sure only to touch
276	 * candidates.
277	 *
278	 * Embryos, though never candidates themselves, affect which
279	 * candidates are reachable by the garbage collector.  Before
280	 * being added to a listener's queue, an embryo may already
281	 * receive data carrying SCM_RIGHTS, potentially making the
282	 * passed socket a candidate that is not yet reachable by the
283	 * collector.  It becomes reachable once the embryo is
284	 * enqueued.  Therefore, we must ensure that no SCM-laden
285	 * embryo appears in a (candidate) listener's queue between
286	 * consecutive scan_children() calls.
287	 */
288	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
289		struct sock *sk = &u->sk;
290		long total_refs;
 
291
292		total_refs = file_count(sk->sk_socket->file);
 
293
294		WARN_ON_ONCE(!u->inflight);
295		WARN_ON_ONCE(total_refs < u->inflight);
296		if (total_refs == u->inflight) {
297			list_move_tail(&u->link, &gc_candidates);
298			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
299			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
300
301			if (sk->sk_state == TCP_LISTEN) {
302				unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
303				unix_state_unlock(sk);
304			}
305		}
306	}
307
308	/* Now remove all internal in-flight reference to children of
309	 * the candidates.
310	 */
311	list_for_each_entry(u, &gc_candidates, link)
312		scan_children(&u->sk, dec_inflight, NULL);
313
314	/* Restore the references for children of all candidates,
315	 * which have remaining references.  Do this recursively, so
316	 * only those remain, which form cyclic references.
317	 *
318	 * Use a "cursor" link, to make the list traversal safe, even
319	 * though elements might be moved about.
320	 */
321	list_add(&cursor, &gc_candidates);
322	while (cursor.next != &gc_candidates) {
323		u = list_entry(cursor.next, struct unix_sock, link);
324
325		/* Move cursor to after the current position. */
326		list_move(&cursor, &u->link);
327
328		if (u->inflight) {
329			list_move_tail(&u->link, &not_cycle_list);
330			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
331			scan_children(&u->sk, inc_inflight_move_tail, NULL);
332		}
333	}
334	list_del(&cursor);
335
336	/* Now gc_candidates contains only garbage.  Restore original
337	 * inflight counters for these as well, and remove the skbuffs
338	 * which are creating the cycle(s).
339	 */
340	skb_queue_head_init(&hitlist);
341	list_for_each_entry(u, &gc_candidates, link) {
342		scan_children(&u->sk, inc_inflight, &hitlist);
343
344#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
345		if (u->oob_skb) {
346			kfree_skb(u->oob_skb);
347			u->oob_skb = NULL;
348		}
349#endif
350	}
351
352	/* not_cycle_list contains those sockets which do not make up a
353	 * cycle.  Restore these to the inflight list.
354	 */
355	while (!list_empty(&not_cycle_list)) {
356		u = list_entry(not_cycle_list.next, struct unix_sock, link);
357		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
358		list_move_tail(&u->link, &gc_inflight_list);
359	}
360
 
 
 
 
 
 
 
 
361	spin_unlock(&unix_gc_lock);
362
363	/* Here we are. Hitlist is filled. Die. */
364	__skb_queue_purge(&hitlist);
365
366	spin_lock(&unix_gc_lock);
367
368	/* All candidates should have been detached by now. */
369	WARN_ON_ONCE(!list_empty(&gc_candidates));
370
371	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
372	WRITE_ONCE(gc_in_progress, false);
373
 
374	spin_unlock(&unix_gc_lock);
375}
376
377static DECLARE_WORK(unix_gc_work, __unix_gc);
378
379void unix_gc(void)
380{
381	WRITE_ONCE(gc_in_progress, true);
382	queue_work(system_unbound_wq, &unix_gc_work);
383}
384
385#define UNIX_INFLIGHT_TRIGGER_GC 16000
386#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
387
388void wait_for_unix_gc(struct scm_fp_list *fpl)
389{
390	/* If number of inflight sockets is insane,
391	 * force a garbage collect right now.
392	 *
393	 * Paired with the WRITE_ONCE() in unix_inflight(),
394	 * unix_notinflight(), and __unix_gc().
395	 */
396	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
397	    !READ_ONCE(gc_in_progress))
398		unix_gc();
399
400	/* Penalise users who want to send AF_UNIX sockets
401	 * but whose sockets have not been received yet.
402	 */
403	if (!fpl || !fpl->count_unix ||
404	    READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
405		return;
406
407	if (READ_ONCE(gc_in_progress))
408		flush_work(&unix_gc_work);
409}