Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NET3: Garbage Collector For AF_UNIX sockets
4 *
5 * Garbage Collector:
6 * Copyright (C) Barak A. Pearlmutter.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * Fixes:
28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
29 * Cope with changing max_files.
30 * Al Viro 11 Oct 1998
31 * Graph may have cycles. That is, we can send the descriptor
32 * of foo to bar and vice versa. Current code chokes on that.
33 * Fix: move SCM_RIGHTS ones into the separate list and then
34 * skb_free() them all instead of doing explicit fput's.
35 * Another problem: since fput() may block somebody may
36 * create a new unix_socket when we are in the middle of sweep
37 * phase. Fix: revert the logic wrt MARKED. Mark everything
38 * upon the beginning and unmark non-junk ones.
39 *
40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41 * sent to connect()'ed but still not accept()'ed sockets.
42 * Fixed. Old code had slightly different problem here:
43 * extra fput() in situation when we passed the descriptor via
44 * such socket and closed it (descriptor). That would happen on
45 * each unix_gc() until the accept(). Since the struct file in
46 * question would go to the free list and might be reused...
47 * That might be the reason of random oopses on filp_close()
48 * in unrelated processes.
49 *
50 * AV 28 Feb 1999
51 * Kill the explicit allocation of stack. Now we keep the tree
52 * with root in dummy + pointer (gc_current) to one of the nodes.
53 * Stack is represented as path from gc_current to dummy. Unmark
54 * now means "add to tree". Push == "make it a son of gc_current".
55 * Pop == "move gc_current to parent". We keep only pointers to
56 * parents (->gc_tree).
57 * AV 1 Mar 1999
58 * Damn. Added missing check for ->dead in listen queues scanning.
59 *
60 * Miklos Szeredi 25 Jun 2007
61 * Reimplement with a cycle collecting algorithm. This should
62 * solve several problems with the previous code, like being racy
63 * wrt receive and holding up unrelated socket operations.
64 */
65
66#include <linux/kernel.h>
67#include <linux/string.h>
68#include <linux/socket.h>
69#include <linux/un.h>
70#include <linux/net.h>
71#include <linux/fs.h>
72#include <linux/skbuff.h>
73#include <linux/netdevice.h>
74#include <linux/file.h>
75#include <linux/proc_fs.h>
76#include <linux/mutex.h>
77#include <linux/wait.h>
78
79#include <net/sock.h>
80#include <net/af_unix.h>
81#include <net/scm.h>
82#include <net/tcp_states.h>
83
84#include "scm.h"
85
86/* Internal data structures and random procedures: */
87
88static LIST_HEAD(gc_candidates);
89static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
90
91static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
92 struct sk_buff_head *hitlist)
93{
94 struct sk_buff *skb;
95 struct sk_buff *next;
96
97 spin_lock(&x->sk_receive_queue.lock);
98 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
99 /* Do we have file descriptors ? */
100 if (UNIXCB(skb).fp) {
101 bool hit = false;
102 /* Process the descriptors of this socket */
103 int nfd = UNIXCB(skb).fp->count;
104 struct file **fp = UNIXCB(skb).fp->fp;
105
106 while (nfd--) {
107 /* Get the socket the fd matches if it indeed does so */
108 struct sock *sk = unix_get_socket(*fp++);
109
110 if (sk) {
111 struct unix_sock *u = unix_sk(sk);
112
113 /* Ignore non-candidates, they could
114 * have been added to the queues after
115 * starting the garbage collection
116 */
117 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
118 hit = true;
119
120 func(u);
121 }
122 }
123 }
124 if (hit && hitlist != NULL) {
125 __skb_unlink(skb, &x->sk_receive_queue);
126 __skb_queue_tail(hitlist, skb);
127 }
128 }
129 }
130 spin_unlock(&x->sk_receive_queue.lock);
131}
132
133static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
134 struct sk_buff_head *hitlist)
135{
136 if (x->sk_state != TCP_LISTEN) {
137 scan_inflight(x, func, hitlist);
138 } else {
139 struct sk_buff *skb;
140 struct sk_buff *next;
141 struct unix_sock *u;
142 LIST_HEAD(embryos);
143
144 /* For a listening socket collect the queued embryos
145 * and perform a scan on them as well.
146 */
147 spin_lock(&x->sk_receive_queue.lock);
148 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
149 u = unix_sk(skb->sk);
150
151 /* An embryo cannot be in-flight, so it's safe
152 * to use the list link.
153 */
154 BUG_ON(!list_empty(&u->link));
155 list_add_tail(&u->link, &embryos);
156 }
157 spin_unlock(&x->sk_receive_queue.lock);
158
159 while (!list_empty(&embryos)) {
160 u = list_entry(embryos.next, struct unix_sock, link);
161 scan_inflight(&u->sk, func, hitlist);
162 list_del_init(&u->link);
163 }
164 }
165}
166
167static void dec_inflight(struct unix_sock *usk)
168{
169 atomic_long_dec(&usk->inflight);
170}
171
172static void inc_inflight(struct unix_sock *usk)
173{
174 atomic_long_inc(&usk->inflight);
175}
176
177static void inc_inflight_move_tail(struct unix_sock *u)
178{
179 atomic_long_inc(&u->inflight);
180 /* If this still might be part of a cycle, move it to the end
181 * of the list, so that it's checked even if it was already
182 * passed over
183 */
184 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
185 list_move_tail(&u->link, &gc_candidates);
186}
187
188static bool gc_in_progress;
189#define UNIX_INFLIGHT_TRIGGER_GC 16000
190
191void wait_for_unix_gc(void)
192{
193 /* If number of inflight sockets is insane,
194 * force a garbage collect right now.
195 * Paired with the WRITE_ONCE() in unix_inflight(),
196 * unix_notinflight() and gc_in_progress().
197 */
198 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
199 !READ_ONCE(gc_in_progress))
200 unix_gc();
201 wait_event(unix_gc_wait, gc_in_progress == false);
202}
203
204/* The external entry point: unix_gc() */
205void unix_gc(void)
206{
207 struct sk_buff *next_skb, *skb;
208 struct unix_sock *u;
209 struct unix_sock *next;
210 struct sk_buff_head hitlist;
211 struct list_head cursor;
212 LIST_HEAD(not_cycle_list);
213
214 spin_lock(&unix_gc_lock);
215
216 /* Avoid a recursive GC. */
217 if (gc_in_progress)
218 goto out;
219
220 /* Paired with READ_ONCE() in wait_for_unix_gc(). */
221 WRITE_ONCE(gc_in_progress, true);
222
223 /* First, select candidates for garbage collection. Only
224 * in-flight sockets are considered, and from those only ones
225 * which don't have any external reference.
226 *
227 * Holding unix_gc_lock will protect these candidates from
228 * being detached, and hence from gaining an external
229 * reference. Since there are no possible receivers, all
230 * buffers currently on the candidates' queues stay there
231 * during the garbage collection.
232 *
233 * We also know that no new candidate can be added onto the
234 * receive queues. Other, non candidate sockets _can_ be
235 * added to queue, so we must make sure only to touch
236 * candidates.
237 */
238 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
239 long total_refs;
240 long inflight_refs;
241
242 total_refs = file_count(u->sk.sk_socket->file);
243 inflight_refs = atomic_long_read(&u->inflight);
244
245 BUG_ON(inflight_refs < 1);
246 BUG_ON(total_refs < inflight_refs);
247 if (total_refs == inflight_refs) {
248 list_move_tail(&u->link, &gc_candidates);
249 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
250 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
251 }
252 }
253
254 /* Now remove all internal in-flight reference to children of
255 * the candidates.
256 */
257 list_for_each_entry(u, &gc_candidates, link)
258 scan_children(&u->sk, dec_inflight, NULL);
259
260 /* Restore the references for children of all candidates,
261 * which have remaining references. Do this recursively, so
262 * only those remain, which form cyclic references.
263 *
264 * Use a "cursor" link, to make the list traversal safe, even
265 * though elements might be moved about.
266 */
267 list_add(&cursor, &gc_candidates);
268 while (cursor.next != &gc_candidates) {
269 u = list_entry(cursor.next, struct unix_sock, link);
270
271 /* Move cursor to after the current position. */
272 list_move(&cursor, &u->link);
273
274 if (atomic_long_read(&u->inflight) > 0) {
275 list_move_tail(&u->link, ¬_cycle_list);
276 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
277 scan_children(&u->sk, inc_inflight_move_tail, NULL);
278 }
279 }
280 list_del(&cursor);
281
282 /* Now gc_candidates contains only garbage. Restore original
283 * inflight counters for these as well, and remove the skbuffs
284 * which are creating the cycle(s).
285 */
286 skb_queue_head_init(&hitlist);
287 list_for_each_entry(u, &gc_candidates, link) {
288 scan_children(&u->sk, inc_inflight, &hitlist);
289
290#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
291 if (u->oob_skb) {
292 kfree_skb(u->oob_skb);
293 u->oob_skb = NULL;
294 }
295#endif
296 }
297
298 /* not_cycle_list contains those sockets which do not make up a
299 * cycle. Restore these to the inflight list.
300 */
301 while (!list_empty(¬_cycle_list)) {
302 u = list_entry(not_cycle_list.next, struct unix_sock, link);
303 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
304 list_move_tail(&u->link, &gc_inflight_list);
305 }
306
307 spin_unlock(&unix_gc_lock);
308
309 /* We need io_uring to clean its registered files, ignore all io_uring
310 * originated skbs. It's fine as io_uring doesn't keep references to
311 * other io_uring instances and so killing all other files in the cycle
312 * will put all io_uring references forcing it to go through normal
313 * release.path eventually putting registered files.
314 */
315 skb_queue_walk_safe(&hitlist, skb, next_skb) {
316 if (skb->destructor == io_uring_destruct_scm) {
317 __skb_unlink(skb, &hitlist);
318 skb_queue_tail(&skb->sk->sk_receive_queue, skb);
319 }
320 }
321
322 /* Here we are. Hitlist is filled. Die. */
323 __skb_queue_purge(&hitlist);
324
325 spin_lock(&unix_gc_lock);
326
327 /* There could be io_uring registered files, just push them back to
328 * the inflight list
329 */
330 list_for_each_entry_safe(u, next, &gc_candidates, link)
331 list_move_tail(&u->link, &gc_inflight_list);
332
333 /* All candidates should have been detached by now. */
334 BUG_ON(!list_empty(&gc_candidates));
335
336 /* Paired with READ_ONCE() in wait_for_unix_gc(). */
337 WRITE_ONCE(gc_in_progress, false);
338
339 wake_up(&unix_gc_wait);
340
341 out:
342 spin_unlock(&unix_gc_lock);
343}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NET3: Garbage Collector For AF_UNIX sockets
4 *
5 * Garbage Collector:
6 * Copyright (C) Barak A. Pearlmutter.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * Fixes:
28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
29 * Cope with changing max_files.
30 * Al Viro 11 Oct 1998
31 * Graph may have cycles. That is, we can send the descriptor
32 * of foo to bar and vice versa. Current code chokes on that.
33 * Fix: move SCM_RIGHTS ones into the separate list and then
34 * skb_free() them all instead of doing explicit fput's.
35 * Another problem: since fput() may block somebody may
36 * create a new unix_socket when we are in the middle of sweep
37 * phase. Fix: revert the logic wrt MARKED. Mark everything
38 * upon the beginning and unmark non-junk ones.
39 *
40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41 * sent to connect()'ed but still not accept()'ed sockets.
42 * Fixed. Old code had slightly different problem here:
43 * extra fput() in situation when we passed the descriptor via
44 * such socket and closed it (descriptor). That would happen on
45 * each unix_gc() until the accept(). Since the struct file in
46 * question would go to the free list and might be reused...
47 * That might be the reason of random oopses on filp_close()
48 * in unrelated processes.
49 *
50 * AV 28 Feb 1999
51 * Kill the explicit allocation of stack. Now we keep the tree
52 * with root in dummy + pointer (gc_current) to one of the nodes.
53 * Stack is represented as path from gc_current to dummy. Unmark
54 * now means "add to tree". Push == "make it a son of gc_current".
55 * Pop == "move gc_current to parent". We keep only pointers to
56 * parents (->gc_tree).
57 * AV 1 Mar 1999
58 * Damn. Added missing check for ->dead in listen queues scanning.
59 *
60 * Miklos Szeredi 25 Jun 2007
61 * Reimplement with a cycle collecting algorithm. This should
62 * solve several problems with the previous code, like being racy
63 * wrt receive and holding up unrelated socket operations.
64 */
65
66#include <linux/kernel.h>
67#include <linux/string.h>
68#include <linux/socket.h>
69#include <linux/un.h>
70#include <linux/net.h>
71#include <linux/fs.h>
72#include <linux/skbuff.h>
73#include <linux/netdevice.h>
74#include <linux/file.h>
75#include <linux/proc_fs.h>
76#include <linux/mutex.h>
77#include <linux/wait.h>
78
79#include <net/sock.h>
80#include <net/af_unix.h>
81#include <net/scm.h>
82#include <net/tcp_states.h>
83
84struct unix_sock *unix_get_socket(struct file *filp)
85{
86 struct inode *inode = file_inode(filp);
87
88 /* Socket ? */
89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
90 struct socket *sock = SOCKET_I(inode);
91 const struct proto_ops *ops;
92 struct sock *sk = sock->sk;
93
94 ops = READ_ONCE(sock->ops);
95
96 /* PF_UNIX ? */
97 if (sk && ops && ops->family == PF_UNIX)
98 return unix_sk(sk);
99 }
100
101 return NULL;
102}
103
104static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
105{
106 /* If an embryo socket has a fd,
107 * the listener indirectly holds the fd's refcnt.
108 */
109 if (edge->successor->listener)
110 return unix_sk(edge->successor->listener)->vertex;
111
112 return edge->successor->vertex;
113}
114
115static bool unix_graph_maybe_cyclic;
116static bool unix_graph_grouped;
117
118static void unix_update_graph(struct unix_vertex *vertex)
119{
120 /* If the receiver socket is not inflight, no cyclic
121 * reference could be formed.
122 */
123 if (!vertex)
124 return;
125
126 unix_graph_maybe_cyclic = true;
127 unix_graph_grouped = false;
128}
129
130static LIST_HEAD(unix_unvisited_vertices);
131
132enum unix_vertex_index {
133 UNIX_VERTEX_INDEX_MARK1,
134 UNIX_VERTEX_INDEX_MARK2,
135 UNIX_VERTEX_INDEX_START,
136};
137
138static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
139
140static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
141{
142 struct unix_vertex *vertex = edge->predecessor->vertex;
143
144 if (!vertex) {
145 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
146 vertex->index = unix_vertex_unvisited_index;
147 vertex->out_degree = 0;
148 INIT_LIST_HEAD(&vertex->edges);
149 INIT_LIST_HEAD(&vertex->scc_entry);
150
151 list_move_tail(&vertex->entry, &unix_unvisited_vertices);
152 edge->predecessor->vertex = vertex;
153 }
154
155 vertex->out_degree++;
156 list_add_tail(&edge->vertex_entry, &vertex->edges);
157
158 unix_update_graph(unix_edge_successor(edge));
159}
160
161static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
162{
163 struct unix_vertex *vertex = edge->predecessor->vertex;
164
165 if (!fpl->dead)
166 unix_update_graph(unix_edge_successor(edge));
167
168 list_del(&edge->vertex_entry);
169 vertex->out_degree--;
170
171 if (!vertex->out_degree) {
172 edge->predecessor->vertex = NULL;
173 list_move_tail(&vertex->entry, &fpl->vertices);
174 }
175}
176
177static void unix_free_vertices(struct scm_fp_list *fpl)
178{
179 struct unix_vertex *vertex, *next_vertex;
180
181 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
182 list_del(&vertex->entry);
183 kfree(vertex);
184 }
185}
186
187static DEFINE_SPINLOCK(unix_gc_lock);
188unsigned int unix_tot_inflight;
189
190void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
191{
192 int i = 0, j = 0;
193
194 spin_lock(&unix_gc_lock);
195
196 if (!fpl->count_unix)
197 goto out;
198
199 do {
200 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
201 struct unix_edge *edge;
202
203 if (!inflight)
204 continue;
205
206 edge = fpl->edges + i++;
207 edge->predecessor = inflight;
208 edge->successor = receiver;
209
210 unix_add_edge(fpl, edge);
211 } while (i < fpl->count_unix);
212
213 receiver->scm_stat.nr_unix_fds += fpl->count_unix;
214 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
215out:
216 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
217
218 spin_unlock(&unix_gc_lock);
219
220 fpl->inflight = true;
221
222 unix_free_vertices(fpl);
223}
224
225void unix_del_edges(struct scm_fp_list *fpl)
226{
227 struct unix_sock *receiver;
228 int i = 0;
229
230 spin_lock(&unix_gc_lock);
231
232 if (!fpl->count_unix)
233 goto out;
234
235 do {
236 struct unix_edge *edge = fpl->edges + i++;
237
238 unix_del_edge(fpl, edge);
239 } while (i < fpl->count_unix);
240
241 if (!fpl->dead) {
242 receiver = fpl->edges[0].successor;
243 receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
244 }
245 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
246out:
247 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
248
249 spin_unlock(&unix_gc_lock);
250
251 fpl->inflight = false;
252}
253
254void unix_update_edges(struct unix_sock *receiver)
255{
256 /* nr_unix_fds is only updated under unix_state_lock().
257 * If it's 0 here, the embryo socket is not part of the
258 * inflight graph, and GC will not see it, so no lock needed.
259 */
260 if (!receiver->scm_stat.nr_unix_fds) {
261 receiver->listener = NULL;
262 } else {
263 spin_lock(&unix_gc_lock);
264 unix_update_graph(unix_sk(receiver->listener)->vertex);
265 receiver->listener = NULL;
266 spin_unlock(&unix_gc_lock);
267 }
268}
269
270int unix_prepare_fpl(struct scm_fp_list *fpl)
271{
272 struct unix_vertex *vertex;
273 int i;
274
275 if (!fpl->count_unix)
276 return 0;
277
278 for (i = 0; i < fpl->count_unix; i++) {
279 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
280 if (!vertex)
281 goto err;
282
283 list_add(&vertex->entry, &fpl->vertices);
284 }
285
286 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
287 GFP_KERNEL_ACCOUNT);
288 if (!fpl->edges)
289 goto err;
290
291 return 0;
292
293err:
294 unix_free_vertices(fpl);
295 return -ENOMEM;
296}
297
298void unix_destroy_fpl(struct scm_fp_list *fpl)
299{
300 if (fpl->inflight)
301 unix_del_edges(fpl);
302
303 kvfree(fpl->edges);
304 unix_free_vertices(fpl);
305}
306
307static bool unix_vertex_dead(struct unix_vertex *vertex)
308{
309 struct unix_edge *edge;
310 struct unix_sock *u;
311 long total_ref;
312
313 list_for_each_entry(edge, &vertex->edges, vertex_entry) {
314 struct unix_vertex *next_vertex = unix_edge_successor(edge);
315
316 /* The vertex's fd can be received by a non-inflight socket. */
317 if (!next_vertex)
318 return false;
319
320 /* The vertex's fd can be received by an inflight socket in
321 * another SCC.
322 */
323 if (next_vertex->scc_index != vertex->scc_index)
324 return false;
325 }
326
327 /* No receiver exists out of the same SCC. */
328
329 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
330 u = edge->predecessor;
331 total_ref = file_count(u->sk.sk_socket->file);
332
333 /* If not close()d, total_ref > out_degree. */
334 if (total_ref != vertex->out_degree)
335 return false;
336
337 return true;
338}
339
340static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
341{
342 struct unix_vertex *vertex;
343
344 list_for_each_entry_reverse(vertex, scc, scc_entry) {
345 struct sk_buff_head *queue;
346 struct unix_edge *edge;
347 struct unix_sock *u;
348
349 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
350 u = edge->predecessor;
351 queue = &u->sk.sk_receive_queue;
352
353 spin_lock(&queue->lock);
354
355 if (u->sk.sk_state == TCP_LISTEN) {
356 struct sk_buff *skb;
357
358 skb_queue_walk(queue, skb) {
359 struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
360
361 spin_lock(&embryo_queue->lock);
362 skb_queue_splice_init(embryo_queue, hitlist);
363 spin_unlock(&embryo_queue->lock);
364 }
365 } else {
366 skb_queue_splice_init(queue, hitlist);
367 }
368
369 spin_unlock(&queue->lock);
370 }
371}
372
373static bool unix_scc_cyclic(struct list_head *scc)
374{
375 struct unix_vertex *vertex;
376 struct unix_edge *edge;
377
378 /* SCC containing multiple vertices ? */
379 if (!list_is_singular(scc))
380 return true;
381
382 vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
383
384 /* Self-reference or a embryo-listener circle ? */
385 list_for_each_entry(edge, &vertex->edges, vertex_entry) {
386 if (unix_edge_successor(edge) == vertex)
387 return true;
388 }
389
390 return false;
391}
392
393static LIST_HEAD(unix_visited_vertices);
394static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
395
396static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
397 struct sk_buff_head *hitlist)
398{
399 LIST_HEAD(vertex_stack);
400 struct unix_edge *edge;
401 LIST_HEAD(edge_stack);
402
403next_vertex:
404 /* Push vertex to vertex_stack and mark it as on-stack
405 * (index >= UNIX_VERTEX_INDEX_START).
406 * The vertex will be popped when finalising SCC later.
407 */
408 list_add(&vertex->scc_entry, &vertex_stack);
409
410 vertex->index = *last_index;
411 vertex->scc_index = *last_index;
412 (*last_index)++;
413
414 /* Explore neighbour vertices (receivers of the current vertex's fd). */
415 list_for_each_entry(edge, &vertex->edges, vertex_entry) {
416 struct unix_vertex *next_vertex = unix_edge_successor(edge);
417
418 if (!next_vertex)
419 continue;
420
421 if (next_vertex->index == unix_vertex_unvisited_index) {
422 /* Iterative deepening depth first search
423 *
424 * 1. Push a forward edge to edge_stack and set
425 * the successor to vertex for the next iteration.
426 */
427 list_add(&edge->stack_entry, &edge_stack);
428
429 vertex = next_vertex;
430 goto next_vertex;
431
432 /* 2. Pop the edge directed to the current vertex
433 * and restore the ancestor for backtracking.
434 */
435prev_vertex:
436 edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
437 list_del_init(&edge->stack_entry);
438
439 next_vertex = vertex;
440 vertex = edge->predecessor->vertex;
441
442 /* If the successor has a smaller scc_index, two vertices
443 * are in the same SCC, so propagate the smaller scc_index
444 * to skip SCC finalisation.
445 */
446 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
447 } else if (next_vertex->index != unix_vertex_grouped_index) {
448 /* Loop detected by a back/cross edge.
449 *
450 * The successor is on vertex_stack, so two vertices are in
451 * the same SCC. If the successor has a smaller *scc_index*,
452 * propagate it to skip SCC finalisation.
453 */
454 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
455 } else {
456 /* The successor was already grouped as another SCC */
457 }
458 }
459
460 if (vertex->index == vertex->scc_index) {
461 struct unix_vertex *v;
462 struct list_head scc;
463 bool scc_dead = true;
464
465 /* SCC finalised.
466 *
467 * If the scc_index was not updated, all the vertices above on
468 * vertex_stack are in the same SCC. Group them using scc_entry.
469 */
470 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
471
472 list_for_each_entry_reverse(v, &scc, scc_entry) {
473 /* Don't restart DFS from this vertex in unix_walk_scc(). */
474 list_move_tail(&v->entry, &unix_visited_vertices);
475
476 /* Mark vertex as off-stack. */
477 v->index = unix_vertex_grouped_index;
478
479 if (scc_dead)
480 scc_dead = unix_vertex_dead(v);
481 }
482
483 if (scc_dead)
484 unix_collect_skb(&scc, hitlist);
485 else if (!unix_graph_maybe_cyclic)
486 unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
487
488 list_del(&scc);
489 }
490
491 /* Need backtracking ? */
492 if (!list_empty(&edge_stack))
493 goto prev_vertex;
494}
495
496static void unix_walk_scc(struct sk_buff_head *hitlist)
497{
498 unsigned long last_index = UNIX_VERTEX_INDEX_START;
499
500 unix_graph_maybe_cyclic = false;
501
502 /* Visit every vertex exactly once.
503 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
504 */
505 while (!list_empty(&unix_unvisited_vertices)) {
506 struct unix_vertex *vertex;
507
508 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
509 __unix_walk_scc(vertex, &last_index, hitlist);
510 }
511
512 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
513 swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
514
515 unix_graph_grouped = true;
516}
517
518static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
519{
520 unix_graph_maybe_cyclic = false;
521
522 while (!list_empty(&unix_unvisited_vertices)) {
523 struct unix_vertex *vertex;
524 struct list_head scc;
525 bool scc_dead = true;
526
527 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
528 list_add(&scc, &vertex->scc_entry);
529
530 list_for_each_entry_reverse(vertex, &scc, scc_entry) {
531 list_move_tail(&vertex->entry, &unix_visited_vertices);
532
533 if (scc_dead)
534 scc_dead = unix_vertex_dead(vertex);
535 }
536
537 if (scc_dead)
538 unix_collect_skb(&scc, hitlist);
539 else if (!unix_graph_maybe_cyclic)
540 unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
541
542 list_del(&scc);
543 }
544
545 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
546}
547
548static bool gc_in_progress;
549
550static void __unix_gc(struct work_struct *work)
551{
552 struct sk_buff_head hitlist;
553 struct sk_buff *skb;
554
555 spin_lock(&unix_gc_lock);
556
557 if (!unix_graph_maybe_cyclic) {
558 spin_unlock(&unix_gc_lock);
559 goto skip_gc;
560 }
561
562 __skb_queue_head_init(&hitlist);
563
564 if (unix_graph_grouped)
565 unix_walk_scc_fast(&hitlist);
566 else
567 unix_walk_scc(&hitlist);
568
569 spin_unlock(&unix_gc_lock);
570
571 skb_queue_walk(&hitlist, skb) {
572 if (UNIXCB(skb).fp)
573 UNIXCB(skb).fp->dead = true;
574 }
575
576 __skb_queue_purge(&hitlist);
577skip_gc:
578 WRITE_ONCE(gc_in_progress, false);
579}
580
581static DECLARE_WORK(unix_gc_work, __unix_gc);
582
583void unix_gc(void)
584{
585 WRITE_ONCE(gc_in_progress, true);
586 queue_work(system_unbound_wq, &unix_gc_work);
587}
588
589#define UNIX_INFLIGHT_TRIGGER_GC 16000
590#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
591
592void wait_for_unix_gc(struct scm_fp_list *fpl)
593{
594 /* If number of inflight sockets is insane,
595 * force a garbage collect right now.
596 *
597 * Paired with the WRITE_ONCE() in unix_inflight(),
598 * unix_notinflight(), and __unix_gc().
599 */
600 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
601 !READ_ONCE(gc_in_progress))
602 unix_gc();
603
604 /* Penalise users who want to send AF_UNIX sockets
605 * but whose sockets have not been received yet.
606 */
607 if (!fpl || !fpl->count_unix ||
608 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
609 return;
610
611 if (READ_ONCE(gc_in_progress))
612 flush_work(&unix_gc_work);
613}