Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * NET3:	Garbage Collector For AF_UNIX sockets
  4 *
  5 * Garbage Collector:
  6 *	Copyright (C) Barak A. Pearlmutter.
 
  7 *
  8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9 * If it doesn't work blame me, it worked when Barak sent it.
 10 *
 11 * Assumptions:
 12 *
 13 *  - object w/ a bit
 14 *  - free list
 15 *
 16 * Current optimizations:
 17 *
 18 *  - explicit stack instead of recursion
 19 *  - tail recurse on first born instead of immediate push/pop
 20 *  - we gather the stuff that should not be killed into tree
 21 *    and stack is just a path from root to the current pointer.
 22 *
 23 *  Future optimizations:
 24 *
 25 *  - don't just push entire root set; process in place
 26 *
 
 
 
 
 
 27 *  Fixes:
 28 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 29 *					Cope with changing max_files.
 30 *	Al Viro		11 Oct 1998
 31 *		Graph may have cycles. That is, we can send the descriptor
 32 *		of foo to bar and vice versa. Current code chokes on that.
 33 *		Fix: move SCM_RIGHTS ones into the separate list and then
 34 *		skb_free() them all instead of doing explicit fput's.
 35 *		Another problem: since fput() may block somebody may
 36 *		create a new unix_socket when we are in the middle of sweep
 37 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 38 *		upon the beginning and unmark non-junk ones.
 39 *
 40 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 41 *		sent to connect()'ed but still not accept()'ed sockets.
 42 *		Fixed. Old code had slightly different problem here:
 43 *		extra fput() in situation when we passed the descriptor via
 44 *		such socket and closed it (descriptor). That would happen on
 45 *		each unix_gc() until the accept(). Since the struct file in
 46 *		question would go to the free list and might be reused...
 47 *		That might be the reason of random oopses on filp_close()
 48 *		in unrelated processes.
 49 *
 50 *	AV		28 Feb 1999
 51 *		Kill the explicit allocation of stack. Now we keep the tree
 52 *		with root in dummy + pointer (gc_current) to one of the nodes.
 53 *		Stack is represented as path from gc_current to dummy. Unmark
 54 *		now means "add to tree". Push == "make it a son of gc_current".
 55 *		Pop == "move gc_current to parent". We keep only pointers to
 56 *		parents (->gc_tree).
 57 *	AV		1 Mar 1999
 58 *		Damn. Added missing check for ->dead in listen queues scanning.
 59 *
 60 *	Miklos Szeredi 25 Jun 2007
 61 *		Reimplement with a cycle collecting algorithm. This should
 62 *		solve several problems with the previous code, like being racy
 63 *		wrt receive and holding up unrelated socket operations.
 64 */
 65
 66#include <linux/kernel.h>
 67#include <linux/string.h>
 68#include <linux/socket.h>
 69#include <linux/un.h>
 70#include <linux/net.h>
 71#include <linux/fs.h>
 72#include <linux/skbuff.h>
 73#include <linux/netdevice.h>
 74#include <linux/file.h>
 75#include <linux/proc_fs.h>
 76#include <linux/mutex.h>
 77#include <linux/wait.h>
 78
 79#include <net/sock.h>
 80#include <net/af_unix.h>
 81#include <net/scm.h>
 82#include <net/tcp_states.h>
 83
 84#include "scm.h"
 85
 86/* Internal data structures and random procedures: */
 87
 
 88static LIST_HEAD(gc_candidates);
 
 89static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
 92			  struct sk_buff_head *hitlist)
 93{
 94	struct sk_buff *skb;
 95	struct sk_buff *next;
 96
 97	spin_lock(&x->sk_receive_queue.lock);
 98	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
 99		/* Do we have file descriptors ? */
 
 
100		if (UNIXCB(skb).fp) {
101			bool hit = false;
102			/* Process the descriptors of this socket */
 
 
103			int nfd = UNIXCB(skb).fp->count;
104			struct file **fp = UNIXCB(skb).fp->fp;
105
106			while (nfd--) {
107				/* Get the socket the fd matches if it indeed does so */
 
 
 
108				struct sock *sk = unix_get_socket(*fp++);
109
110				if (sk) {
111					struct unix_sock *u = unix_sk(sk);
112
113					/* Ignore non-candidates, they could
 
114					 * have been added to the queues after
115					 * starting the garbage collection
116					 */
117					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
118						hit = true;
119
120						func(u);
121					}
122				}
123			}
124			if (hit && hitlist != NULL) {
125				__skb_unlink(skb, &x->sk_receive_queue);
126				__skb_queue_tail(hitlist, skb);
127			}
128		}
129	}
130	spin_unlock(&x->sk_receive_queue.lock);
131}
132
133static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
134			  struct sk_buff_head *hitlist)
135{
136	if (x->sk_state != TCP_LISTEN) {
137		scan_inflight(x, func, hitlist);
138	} else {
139		struct sk_buff *skb;
140		struct sk_buff *next;
141		struct unix_sock *u;
142		LIST_HEAD(embryos);
143
144		/* For a listening socket collect the queued embryos
 
145		 * and perform a scan on them as well.
146		 */
147		spin_lock(&x->sk_receive_queue.lock);
148		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
149			u = unix_sk(skb->sk);
150
151			/* An embryo cannot be in-flight, so it's safe
 
152			 * to use the list link.
153			 */
154			BUG_ON(!list_empty(&u->link));
155			list_add_tail(&u->link, &embryos);
156		}
157		spin_unlock(&x->sk_receive_queue.lock);
158
159		while (!list_empty(&embryos)) {
160			u = list_entry(embryos.next, struct unix_sock, link);
161			scan_inflight(&u->sk, func, hitlist);
162			list_del_init(&u->link);
163		}
164	}
165}
166
167static void dec_inflight(struct unix_sock *usk)
168{
169	atomic_long_dec(&usk->inflight);
170}
171
172static void inc_inflight(struct unix_sock *usk)
173{
174	atomic_long_inc(&usk->inflight);
175}
176
177static void inc_inflight_move_tail(struct unix_sock *u)
178{
179	atomic_long_inc(&u->inflight);
180	/* If this still might be part of a cycle, move it to the end
 
181	 * of the list, so that it's checked even if it was already
182	 * passed over
183	 */
184	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
185		list_move_tail(&u->link, &gc_candidates);
186}
187
188static bool gc_in_progress;
189#define UNIX_INFLIGHT_TRIGGER_GC 16000
190
191void wait_for_unix_gc(void)
192{
193	/* If number of inflight sockets is insane,
 
194	 * force a garbage collect right now.
195	 * Paired with the WRITE_ONCE() in unix_inflight(),
196	 * unix_notinflight() and gc_in_progress().
197	 */
198	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
199	    !READ_ONCE(gc_in_progress))
200		unix_gc();
201	wait_event(unix_gc_wait, gc_in_progress == false);
202}
203
204/* The external entry point: unix_gc() */
205void unix_gc(void)
206{
207	struct sk_buff *next_skb, *skb;
208	struct unix_sock *u;
209	struct unix_sock *next;
210	struct sk_buff_head hitlist;
211	struct list_head cursor;
212	LIST_HEAD(not_cycle_list);
213
214	spin_lock(&unix_gc_lock);
215
216	/* Avoid a recursive GC. */
217	if (gc_in_progress)
218		goto out;
219
220	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
221	WRITE_ONCE(gc_in_progress, true);
222
223	/* First, select candidates for garbage collection.  Only
224	 * in-flight sockets are considered, and from those only ones
225	 * which don't have any external reference.
226	 *
227	 * Holding unix_gc_lock will protect these candidates from
228	 * being detached, and hence from gaining an external
229	 * reference.  Since there are no possible receivers, all
230	 * buffers currently on the candidates' queues stay there
231	 * during the garbage collection.
232	 *
233	 * We also know that no new candidate can be added onto the
234	 * receive queues.  Other, non candidate sockets _can_ be
235	 * added to queue, so we must make sure only to touch
236	 * candidates.
237	 */
238	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
239		long total_refs;
240		long inflight_refs;
241
242		total_refs = file_count(u->sk.sk_socket->file);
243		inflight_refs = atomic_long_read(&u->inflight);
244
245		BUG_ON(inflight_refs < 1);
246		BUG_ON(total_refs < inflight_refs);
247		if (total_refs == inflight_refs) {
248			list_move_tail(&u->link, &gc_candidates);
249			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
250			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
251		}
252	}
253
254	/* Now remove all internal in-flight reference to children of
 
255	 * the candidates.
256	 */
257	list_for_each_entry(u, &gc_candidates, link)
258		scan_children(&u->sk, dec_inflight, NULL);
259
260	/* Restore the references for children of all candidates,
 
261	 * which have remaining references.  Do this recursively, so
262	 * only those remain, which form cyclic references.
263	 *
264	 * Use a "cursor" link, to make the list traversal safe, even
265	 * though elements might be moved about.
266	 */
267	list_add(&cursor, &gc_candidates);
268	while (cursor.next != &gc_candidates) {
269		u = list_entry(cursor.next, struct unix_sock, link);
270
271		/* Move cursor to after the current position. */
272		list_move(&cursor, &u->link);
273
274		if (atomic_long_read(&u->inflight) > 0) {
275			list_move_tail(&u->link, &not_cycle_list);
276			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
277			scan_children(&u->sk, inc_inflight_move_tail, NULL);
278		}
279	}
280	list_del(&cursor);
281
282	/* Now gc_candidates contains only garbage.  Restore original
283	 * inflight counters for these as well, and remove the skbuffs
284	 * which are creating the cycle(s).
285	 */
286	skb_queue_head_init(&hitlist);
287	list_for_each_entry(u, &gc_candidates, link) {
288		scan_children(&u->sk, inc_inflight, &hitlist);
289
290#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
291		if (u->oob_skb) {
292			kfree_skb(u->oob_skb);
293			u->oob_skb = NULL;
294		}
295#endif
296	}
297
298	/* not_cycle_list contains those sockets which do not make up a
299	 * cycle.  Restore these to the inflight list.
300	 */
301	while (!list_empty(&not_cycle_list)) {
302		u = list_entry(not_cycle_list.next, struct unix_sock, link);
303		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
304		list_move_tail(&u->link, &gc_inflight_list);
305	}
306
307	spin_unlock(&unix_gc_lock);
308
309	/* We need io_uring to clean its registered files, ignore all io_uring
310	 * originated skbs. It's fine as io_uring doesn't keep references to
311	 * other io_uring instances and so killing all other files in the cycle
312	 * will put all io_uring references forcing it to go through normal
313	 * release.path eventually putting registered files.
314	 */
315	skb_queue_walk_safe(&hitlist, skb, next_skb) {
316		if (skb->destructor == io_uring_destruct_scm) {
317			__skb_unlink(skb, &hitlist);
318			skb_queue_tail(&skb->sk->sk_receive_queue, skb);
319		}
320	}
321
322	/* Here we are. Hitlist is filled. Die. */
323	__skb_queue_purge(&hitlist);
324
325	spin_lock(&unix_gc_lock);
326
327	/* There could be io_uring registered files, just push them back to
328	 * the inflight list
329	 */
330	list_for_each_entry_safe(u, next, &gc_candidates, link)
331		list_move_tail(&u->link, &gc_inflight_list);
332
333	/* All candidates should have been detached by now. */
334	BUG_ON(!list_empty(&gc_candidates));
335
336	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
337	WRITE_ONCE(gc_in_progress, false);
338
339	wake_up(&unix_gc_wait);
340
341 out:
342	spin_unlock(&unix_gc_lock);
343}
v3.5.6
 
  1/*
  2 * NET3:	Garbage Collector For AF_UNIX sockets
  3 *
  4 * Garbage Collector:
  5 *	Copyright (C) Barak A. Pearlmutter.
  6 *	Released under the GPL version 2 or later.
  7 *
  8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9 * If it doesn't work blame me, it worked when Barak sent it.
 10 *
 11 * Assumptions:
 12 *
 13 *  - object w/ a bit
 14 *  - free list
 15 *
 16 * Current optimizations:
 17 *
 18 *  - explicit stack instead of recursion
 19 *  - tail recurse on first born instead of immediate push/pop
 20 *  - we gather the stuff that should not be killed into tree
 21 *    and stack is just a path from root to the current pointer.
 22 *
 23 *  Future optimizations:
 24 *
 25 *  - don't just push entire root set; process in place
 26 *
 27 *	This program is free software; you can redistribute it and/or
 28 *	modify it under the terms of the GNU General Public License
 29 *	as published by the Free Software Foundation; either version
 30 *	2 of the License, or (at your option) any later version.
 31 *
 32 *  Fixes:
 33 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 34 *					Cope with changing max_files.
 35 *	Al Viro		11 Oct 1998
 36 *		Graph may have cycles. That is, we can send the descriptor
 37 *		of foo to bar and vice versa. Current code chokes on that.
 38 *		Fix: move SCM_RIGHTS ones into the separate list and then
 39 *		skb_free() them all instead of doing explicit fput's.
 40 *		Another problem: since fput() may block somebody may
 41 *		create a new unix_socket when we are in the middle of sweep
 42 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 43 *		upon the beginning and unmark non-junk ones.
 44 *
 45 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 46 *		sent to connect()'ed but still not accept()'ed sockets.
 47 *		Fixed. Old code had slightly different problem here:
 48 *		extra fput() in situation when we passed the descriptor via
 49 *		such socket and closed it (descriptor). That would happen on
 50 *		each unix_gc() until the accept(). Since the struct file in
 51 *		question would go to the free list and might be reused...
 52 *		That might be the reason of random oopses on filp_close()
 53 *		in unrelated processes.
 54 *
 55 *	AV		28 Feb 1999
 56 *		Kill the explicit allocation of stack. Now we keep the tree
 57 *		with root in dummy + pointer (gc_current) to one of the nodes.
 58 *		Stack is represented as path from gc_current to dummy. Unmark
 59 *		now means "add to tree". Push == "make it a son of gc_current".
 60 *		Pop == "move gc_current to parent". We keep only pointers to
 61 *		parents (->gc_tree).
 62 *	AV		1 Mar 1999
 63 *		Damn. Added missing check for ->dead in listen queues scanning.
 64 *
 65 *	Miklos Szeredi 25 Jun 2007
 66 *		Reimplement with a cycle collecting algorithm. This should
 67 *		solve several problems with the previous code, like being racy
 68 *		wrt receive and holding up unrelated socket operations.
 69 */
 70
 71#include <linux/kernel.h>
 72#include <linux/string.h>
 73#include <linux/socket.h>
 74#include <linux/un.h>
 75#include <linux/net.h>
 76#include <linux/fs.h>
 77#include <linux/skbuff.h>
 78#include <linux/netdevice.h>
 79#include <linux/file.h>
 80#include <linux/proc_fs.h>
 81#include <linux/mutex.h>
 82#include <linux/wait.h>
 83
 84#include <net/sock.h>
 85#include <net/af_unix.h>
 86#include <net/scm.h>
 87#include <net/tcp_states.h>
 88
 
 
 89/* Internal data structures and random procedures: */
 90
 91static LIST_HEAD(gc_inflight_list);
 92static LIST_HEAD(gc_candidates);
 93static DEFINE_SPINLOCK(unix_gc_lock);
 94static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
 95
 96unsigned int unix_tot_inflight;
 97
 98
 99struct sock *unix_get_socket(struct file *filp)
100{
101	struct sock *u_sock = NULL;
102	struct inode *inode = filp->f_path.dentry->d_inode;
103
104	/*
105	 *	Socket ?
106	 */
107	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
108		struct socket *sock = SOCKET_I(inode);
109		struct sock *s = sock->sk;
110
111		/*
112		 *	PF_UNIX ?
113		 */
114		if (s && sock->ops && sock->ops->family == PF_UNIX)
115			u_sock = s;
116	}
117	return u_sock;
118}
119
120/*
121 *	Keep the number of times in flight count for the file
122 *	descriptor if it is for an AF_UNIX socket.
123 */
124
125void unix_inflight(struct file *fp)
126{
127	struct sock *s = unix_get_socket(fp);
128	if (s) {
129		struct unix_sock *u = unix_sk(s);
130		spin_lock(&unix_gc_lock);
131		if (atomic_long_inc_return(&u->inflight) == 1) {
132			BUG_ON(!list_empty(&u->link));
133			list_add_tail(&u->link, &gc_inflight_list);
134		} else {
135			BUG_ON(list_empty(&u->link));
136		}
137		unix_tot_inflight++;
138		spin_unlock(&unix_gc_lock);
139	}
140}
141
142void unix_notinflight(struct file *fp)
143{
144	struct sock *s = unix_get_socket(fp);
145	if (s) {
146		struct unix_sock *u = unix_sk(s);
147		spin_lock(&unix_gc_lock);
148		BUG_ON(list_empty(&u->link));
149		if (atomic_long_dec_and_test(&u->inflight))
150			list_del_init(&u->link);
151		unix_tot_inflight--;
152		spin_unlock(&unix_gc_lock);
153	}
154}
155
156static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
157			  struct sk_buff_head *hitlist)
158{
159	struct sk_buff *skb;
160	struct sk_buff *next;
161
162	spin_lock(&x->sk_receive_queue.lock);
163	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
164		/*
165		 *	Do we have file descriptors ?
166		 */
167		if (UNIXCB(skb).fp) {
168			bool hit = false;
169			/*
170			 *	Process the descriptors of this socket
171			 */
172			int nfd = UNIXCB(skb).fp->count;
173			struct file **fp = UNIXCB(skb).fp->fp;
 
174			while (nfd--) {
175				/*
176				 *	Get the socket the fd matches
177				 *	if it indeed does so
178				 */
179				struct sock *sk = unix_get_socket(*fp++);
 
180				if (sk) {
181					struct unix_sock *u = unix_sk(sk);
182
183					/*
184					 * Ignore non-candidates, they could
185					 * have been added to the queues after
186					 * starting the garbage collection
187					 */
188					if (u->gc_candidate) {
189						hit = true;
 
190						func(u);
191					}
192				}
193			}
194			if (hit && hitlist != NULL) {
195				__skb_unlink(skb, &x->sk_receive_queue);
196				__skb_queue_tail(hitlist, skb);
197			}
198		}
199	}
200	spin_unlock(&x->sk_receive_queue.lock);
201}
202
203static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
204			  struct sk_buff_head *hitlist)
205{
206	if (x->sk_state != TCP_LISTEN)
207		scan_inflight(x, func, hitlist);
208	else {
209		struct sk_buff *skb;
210		struct sk_buff *next;
211		struct unix_sock *u;
212		LIST_HEAD(embryos);
213
214		/*
215		 * For a listening socket collect the queued embryos
216		 * and perform a scan on them as well.
217		 */
218		spin_lock(&x->sk_receive_queue.lock);
219		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
220			u = unix_sk(skb->sk);
221
222			/*
223			 * An embryo cannot be in-flight, so it's safe
224			 * to use the list link.
225			 */
226			BUG_ON(!list_empty(&u->link));
227			list_add_tail(&u->link, &embryos);
228		}
229		spin_unlock(&x->sk_receive_queue.lock);
230
231		while (!list_empty(&embryos)) {
232			u = list_entry(embryos.next, struct unix_sock, link);
233			scan_inflight(&u->sk, func, hitlist);
234			list_del_init(&u->link);
235		}
236	}
237}
238
239static void dec_inflight(struct unix_sock *usk)
240{
241	atomic_long_dec(&usk->inflight);
242}
243
244static void inc_inflight(struct unix_sock *usk)
245{
246	atomic_long_inc(&usk->inflight);
247}
248
249static void inc_inflight_move_tail(struct unix_sock *u)
250{
251	atomic_long_inc(&u->inflight);
252	/*
253	 * If this still might be part of a cycle, move it to the end
254	 * of the list, so that it's checked even if it was already
255	 * passed over
256	 */
257	if (u->gc_maybe_cycle)
258		list_move_tail(&u->link, &gc_candidates);
259}
260
261static bool gc_in_progress = false;
262#define UNIX_INFLIGHT_TRIGGER_GC 16000
263
264void wait_for_unix_gc(void)
265{
266	/*
267	 * If number of inflight sockets is insane,
268	 * force a garbage collect right now.
 
 
269	 */
270	if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
 
271		unix_gc();
272	wait_event(unix_gc_wait, gc_in_progress == false);
273}
274
275/* The external entry point: unix_gc() */
276void unix_gc(void)
277{
 
278	struct unix_sock *u;
279	struct unix_sock *next;
280	struct sk_buff_head hitlist;
281	struct list_head cursor;
282	LIST_HEAD(not_cycle_list);
283
284	spin_lock(&unix_gc_lock);
285
286	/* Avoid a recursive GC. */
287	if (gc_in_progress)
288		goto out;
289
290	gc_in_progress = true;
291	/*
292	 * First, select candidates for garbage collection.  Only
 
293	 * in-flight sockets are considered, and from those only ones
294	 * which don't have any external reference.
295	 *
296	 * Holding unix_gc_lock will protect these candidates from
297	 * being detached, and hence from gaining an external
298	 * reference.  Since there are no possible receivers, all
299	 * buffers currently on the candidates' queues stay there
300	 * during the garbage collection.
301	 *
302	 * We also know that no new candidate can be added onto the
303	 * receive queues.  Other, non candidate sockets _can_ be
304	 * added to queue, so we must make sure only to touch
305	 * candidates.
306	 */
307	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
308		long total_refs;
309		long inflight_refs;
310
311		total_refs = file_count(u->sk.sk_socket->file);
312		inflight_refs = atomic_long_read(&u->inflight);
313
314		BUG_ON(inflight_refs < 1);
315		BUG_ON(total_refs < inflight_refs);
316		if (total_refs == inflight_refs) {
317			list_move_tail(&u->link, &gc_candidates);
318			u->gc_candidate = 1;
319			u->gc_maybe_cycle = 1;
320		}
321	}
322
323	/*
324	 * Now remove all internal in-flight reference to children of
325	 * the candidates.
326	 */
327	list_for_each_entry(u, &gc_candidates, link)
328		scan_children(&u->sk, dec_inflight, NULL);
329
330	/*
331	 * Restore the references for children of all candidates,
332	 * which have remaining references.  Do this recursively, so
333	 * only those remain, which form cyclic references.
334	 *
335	 * Use a "cursor" link, to make the list traversal safe, even
336	 * though elements might be moved about.
337	 */
338	list_add(&cursor, &gc_candidates);
339	while (cursor.next != &gc_candidates) {
340		u = list_entry(cursor.next, struct unix_sock, link);
341
342		/* Move cursor to after the current position. */
343		list_move(&cursor, &u->link);
344
345		if (atomic_long_read(&u->inflight) > 0) {
346			list_move_tail(&u->link, &not_cycle_list);
347			u->gc_maybe_cycle = 0;
348			scan_children(&u->sk, inc_inflight_move_tail, NULL);
349		}
350	}
351	list_del(&cursor);
352
353	/*
354	 * not_cycle_list contains those sockets which do not make up a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355	 * cycle.  Restore these to the inflight list.
356	 */
357	while (!list_empty(&not_cycle_list)) {
358		u = list_entry(not_cycle_list.next, struct unix_sock, link);
359		u->gc_candidate = 0;
360		list_move_tail(&u->link, &gc_inflight_list);
361	}
362
363	/*
364	 * Now gc_candidates contains only garbage.  Restore original
365	 * inflight counters for these as well, and remove the skbuffs
366	 * which are creating the cycle(s).
 
 
 
367	 */
368	skb_queue_head_init(&hitlist);
369	list_for_each_entry(u, &gc_candidates, link)
370	scan_children(&u->sk, inc_inflight, &hitlist);
371
372	spin_unlock(&unix_gc_lock);
 
373
374	/* Here we are. Hitlist is filled. Die. */
375	__skb_queue_purge(&hitlist);
376
377	spin_lock(&unix_gc_lock);
378
 
 
 
 
 
 
379	/* All candidates should have been detached by now. */
380	BUG_ON(!list_empty(&gc_candidates));
381	gc_in_progress = false;
 
 
 
382	wake_up(&unix_gc_wait);
383
384 out:
385	spin_unlock(&unix_gc_lock);
386}