Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * NET3:	Garbage Collector For AF_UNIX sockets
  4 *
  5 * Garbage Collector:
  6 *	Copyright (C) Barak A. Pearlmutter.
 
  7 *
  8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9 * If it doesn't work blame me, it worked when Barak sent it.
 10 *
 11 * Assumptions:
 12 *
 13 *  - object w/ a bit
 14 *  - free list
 15 *
 16 * Current optimizations:
 17 *
 18 *  - explicit stack instead of recursion
 19 *  - tail recurse on first born instead of immediate push/pop
 20 *  - we gather the stuff that should not be killed into tree
 21 *    and stack is just a path from root to the current pointer.
 22 *
 23 *  Future optimizations:
 24 *
 25 *  - don't just push entire root set; process in place
 26 *
 
 
 
 
 
 27 *  Fixes:
 28 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 29 *					Cope with changing max_files.
 30 *	Al Viro		11 Oct 1998
 31 *		Graph may have cycles. That is, we can send the descriptor
 32 *		of foo to bar and vice versa. Current code chokes on that.
 33 *		Fix: move SCM_RIGHTS ones into the separate list and then
 34 *		skb_free() them all instead of doing explicit fput's.
 35 *		Another problem: since fput() may block somebody may
 36 *		create a new unix_socket when we are in the middle of sweep
 37 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 38 *		upon the beginning and unmark non-junk ones.
 39 *
 40 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 41 *		sent to connect()'ed but still not accept()'ed sockets.
 42 *		Fixed. Old code had slightly different problem here:
 43 *		extra fput() in situation when we passed the descriptor via
 44 *		such socket and closed it (descriptor). That would happen on
 45 *		each unix_gc() until the accept(). Since the struct file in
 46 *		question would go to the free list and might be reused...
 47 *		That might be the reason of random oopses on filp_close()
 48 *		in unrelated processes.
 49 *
 50 *	AV		28 Feb 1999
 51 *		Kill the explicit allocation of stack. Now we keep the tree
 52 *		with root in dummy + pointer (gc_current) to one of the nodes.
 53 *		Stack is represented as path from gc_current to dummy. Unmark
 54 *		now means "add to tree". Push == "make it a son of gc_current".
 55 *		Pop == "move gc_current to parent". We keep only pointers to
 56 *		parents (->gc_tree).
 57 *	AV		1 Mar 1999
 58 *		Damn. Added missing check for ->dead in listen queues scanning.
 59 *
 60 *	Miklos Szeredi 25 Jun 2007
 61 *		Reimplement with a cycle collecting algorithm. This should
 62 *		solve several problems with the previous code, like being racy
 63 *		wrt receive and holding up unrelated socket operations.
 64 */
 65
 66#include <linux/kernel.h>
 67#include <linux/string.h>
 68#include <linux/socket.h>
 69#include <linux/un.h>
 70#include <linux/net.h>
 71#include <linux/fs.h>
 72#include <linux/skbuff.h>
 73#include <linux/netdevice.h>
 74#include <linux/file.h>
 75#include <linux/proc_fs.h>
 76#include <linux/mutex.h>
 77#include <linux/wait.h>
 78
 79#include <net/sock.h>
 80#include <net/af_unix.h>
 81#include <net/scm.h>
 82#include <net/tcp_states.h>
 83
 84struct unix_sock *unix_get_socket(struct file *filp)
 
 
 
 
 
 
 
 
 
 85{
 
 86	struct inode *inode = file_inode(filp);
 87
 88	/* Socket ? */
 89	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
 90		struct socket *sock = SOCKET_I(inode);
 91		const struct proto_ops *ops;
 92		struct sock *sk = sock->sk;
 93
 94		ops = READ_ONCE(sock->ops);
 95
 96		/* PF_UNIX ? */
 97		if (sk && ops && ops->family == PF_UNIX)
 98			return unix_sk(sk);
 99	}
100
101	return NULL;
102}
103
104static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
105{
106	/* If an embryo socket has a fd,
107	 * the listener indirectly holds the fd's refcnt.
108	 */
109	if (edge->successor->listener)
110		return unix_sk(edge->successor->listener)->vertex;
111
112	return edge->successor->vertex;
113}
114
115static bool unix_graph_maybe_cyclic;
116static bool unix_graph_grouped;
117
118static void unix_update_graph(struct unix_vertex *vertex)
119{
120	/* If the receiver socket is not inflight, no cyclic
121	 * reference could be formed.
122	 */
123	if (!vertex)
124		return;
125
126	unix_graph_maybe_cyclic = true;
127	unix_graph_grouped = false;
128}
129
130static LIST_HEAD(unix_unvisited_vertices);
131
132enum unix_vertex_index {
133	UNIX_VERTEX_INDEX_MARK1,
134	UNIX_VERTEX_INDEX_MARK2,
135	UNIX_VERTEX_INDEX_START,
136};
137
138static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
139
140static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
141{
142	struct unix_vertex *vertex = edge->predecessor->vertex;
143
144	if (!vertex) {
145		vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
146		vertex->index = unix_vertex_unvisited_index;
147		vertex->out_degree = 0;
148		INIT_LIST_HEAD(&vertex->edges);
149		INIT_LIST_HEAD(&vertex->scc_entry);
150
151		list_move_tail(&vertex->entry, &unix_unvisited_vertices);
152		edge->predecessor->vertex = vertex;
153	}
154
155	vertex->out_degree++;
156	list_add_tail(&edge->vertex_entry, &vertex->edges);
157
158	unix_update_graph(unix_edge_successor(edge));
159}
160
161static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
162{
163	struct unix_vertex *vertex = edge->predecessor->vertex;
164
165	if (!fpl->dead)
166		unix_update_graph(unix_edge_successor(edge));
167
168	list_del(&edge->vertex_entry);
169	vertex->out_degree--;
170
171	if (!vertex->out_degree) {
172		edge->predecessor->vertex = NULL;
173		list_move_tail(&vertex->entry, &fpl->vertices);
174	}
175}
176
177static void unix_free_vertices(struct scm_fp_list *fpl)
178{
179	struct unix_vertex *vertex, *next_vertex;
180
181	list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
182		list_del(&vertex->entry);
183		kfree(vertex);
184	}
185}
186
187static DEFINE_SPINLOCK(unix_gc_lock);
188unsigned int unix_tot_inflight;
189
190void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
191{
192	int i = 0, j = 0;
193
194	spin_lock(&unix_gc_lock);
195
196	if (!fpl->count_unix)
197		goto out;
198
199	do {
200		struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
201		struct unix_edge *edge;
202
203		if (!inflight)
204			continue;
205
206		edge = fpl->edges + i++;
207		edge->predecessor = inflight;
208		edge->successor = receiver;
209
210		unix_add_edge(fpl, edge);
211	} while (i < fpl->count_unix);
212
213	receiver->scm_stat.nr_unix_fds += fpl->count_unix;
214	WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
215out:
216	WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
217
 
 
 
 
 
 
 
 
 
218	spin_unlock(&unix_gc_lock);
219
220	fpl->inflight = true;
221
222	unix_free_vertices(fpl);
223}
224
225void unix_del_edges(struct scm_fp_list *fpl)
226{
227	struct unix_sock *receiver;
228	int i = 0;
229
230	spin_lock(&unix_gc_lock);
231
232	if (!fpl->count_unix)
233		goto out;
234
235	do {
236		struct unix_edge *edge = fpl->edges + i++;
237
238		unix_del_edge(fpl, edge);
239	} while (i < fpl->count_unix);
240
241	if (!fpl->dead) {
242		receiver = fpl->edges[0].successor;
243		receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
244	}
245	WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
246out:
247	WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
248
249	spin_unlock(&unix_gc_lock);
250
251	fpl->inflight = false;
252}
253
254void unix_update_edges(struct unix_sock *receiver)
 
255{
256	/* nr_unix_fds is only updated under unix_state_lock().
257	 * If it's 0 here, the embryo socket is not part of the
258	 * inflight graph, and GC will not see it, so no lock needed.
259	 */
260	if (!receiver->scm_stat.nr_unix_fds) {
261		receiver->listener = NULL;
262	} else {
263		spin_lock(&unix_gc_lock);
264		unix_update_graph(unix_sk(receiver->listener)->vertex);
265		receiver->listener = NULL;
266		spin_unlock(&unix_gc_lock);
267	}
268}
269
270int unix_prepare_fpl(struct scm_fp_list *fpl)
271{
272	struct unix_vertex *vertex;
273	int i;
274
275	if (!fpl->count_unix)
276		return 0;
277
278	for (i = 0; i < fpl->count_unix; i++) {
279		vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
280		if (!vertex)
281			goto err;
282
283		list_add(&vertex->entry, &fpl->vertices);
284	}
285
286	fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
287				    GFP_KERNEL_ACCOUNT);
288	if (!fpl->edges)
289		goto err;
290
291	return 0;
292
293err:
294	unix_free_vertices(fpl);
295	return -ENOMEM;
296}
297
298void unix_destroy_fpl(struct scm_fp_list *fpl)
299{
300	if (fpl->inflight)
301		unix_del_edges(fpl);
302
303	kvfree(fpl->edges);
304	unix_free_vertices(fpl);
305}
306
307static bool unix_vertex_dead(struct unix_vertex *vertex)
308{
309	struct unix_edge *edge;
310	struct unix_sock *u;
311	long total_ref;
312
313	list_for_each_entry(edge, &vertex->edges, vertex_entry) {
314		struct unix_vertex *next_vertex = unix_edge_successor(edge);
315
316		/* The vertex's fd can be received by a non-inflight socket. */
317		if (!next_vertex)
318			return false;
319
320		/* The vertex's fd can be received by an inflight socket in
321		 * another SCC.
322		 */
323		if (next_vertex->scc_index != vertex->scc_index)
324			return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325	}
326
327	/* No receiver exists out of the same SCC. */
328
329	edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
330	u = edge->predecessor;
331	total_ref = file_count(u->sk.sk_socket->file);
332
333	/* If not close()d, total_ref > out_degree. */
334	if (total_ref != vertex->out_degree)
335		return false;
336
337	return true;
338}
339
340static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
 
341{
342	struct unix_vertex *vertex;
343
344	list_for_each_entry_reverse(vertex, scc, scc_entry) {
345		struct sk_buff_head *queue;
346		struct unix_edge *edge;
347		struct unix_sock *u;
 
348
349		edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
350		u = edge->predecessor;
351		queue = &u->sk.sk_receive_queue;
352
353		spin_lock(&queue->lock);
354
355		if (u->sk.sk_state == TCP_LISTEN) {
356			struct sk_buff *skb;
357
358			skb_queue_walk(queue, skb) {
359				struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
360
361				spin_lock(&embryo_queue->lock);
362				skb_queue_splice_init(embryo_queue, hitlist);
363				spin_unlock(&embryo_queue->lock);
364			}
365		} else {
366			skb_queue_splice_init(queue, hitlist);
367		}
 
368
369		spin_unlock(&queue->lock);
 
 
 
 
370	}
371}
372
373static bool unix_scc_cyclic(struct list_head *scc)
374{
375	struct unix_vertex *vertex;
376	struct unix_edge *edge;
377
378	/* SCC containing multiple vertices ? */
379	if (!list_is_singular(scc))
380		return true;
381
382	vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
383
384	/* Self-reference or a embryo-listener circle ? */
385	list_for_each_entry(edge, &vertex->edges, vertex_entry) {
386		if (unix_edge_successor(edge) == vertex)
387			return true;
388	}
389
390	return false;
391}
392
393static LIST_HEAD(unix_visited_vertices);
394static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
395
396static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
397			    struct sk_buff_head *hitlist)
398{
399	LIST_HEAD(vertex_stack);
400	struct unix_edge *edge;
401	LIST_HEAD(edge_stack);
402
403next_vertex:
404	/* Push vertex to vertex_stack and mark it as on-stack
405	 * (index >= UNIX_VERTEX_INDEX_START).
406	 * The vertex will be popped when finalising SCC later.
407	 */
408	list_add(&vertex->scc_entry, &vertex_stack);
409
410	vertex->index = *last_index;
411	vertex->scc_index = *last_index;
412	(*last_index)++;
413
414	/* Explore neighbour vertices (receivers of the current vertex's fd). */
415	list_for_each_entry(edge, &vertex->edges, vertex_entry) {
416		struct unix_vertex *next_vertex = unix_edge_successor(edge);
417
418		if (!next_vertex)
419			continue;
420
421		if (next_vertex->index == unix_vertex_unvisited_index) {
422			/* Iterative deepening depth first search
423			 *
424			 *   1. Push a forward edge to edge_stack and set
425			 *      the successor to vertex for the next iteration.
426			 */
427			list_add(&edge->stack_entry, &edge_stack);
428
429			vertex = next_vertex;
430			goto next_vertex;
431
432			/*   2. Pop the edge directed to the current vertex
433			 *      and restore the ancestor for backtracking.
434			 */
435prev_vertex:
436			edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
437			list_del_init(&edge->stack_entry);
438
439			next_vertex = vertex;
440			vertex = edge->predecessor->vertex;
441
442			/* If the successor has a smaller scc_index, two vertices
443			 * are in the same SCC, so propagate the smaller scc_index
444			 * to skip SCC finalisation.
445			 */
446			vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
447		} else if (next_vertex->index != unix_vertex_grouped_index) {
448			/* Loop detected by a back/cross edge.
449			 *
450			 * The successor is on vertex_stack, so two vertices are in
451			 * the same SCC.  If the successor has a smaller *scc_index*,
452			 * propagate it to skip SCC finalisation.
453			 */
454			vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
455		} else {
456			/* The successor was already grouped as another SCC */
457		}
458	}
459
460	if (vertex->index == vertex->scc_index) {
461		struct unix_vertex *v;
462		struct list_head scc;
463		bool scc_dead = true;
464
465		/* SCC finalised.
466		 *
467		 * If the scc_index was not updated, all the vertices above on
468		 * vertex_stack are in the same SCC.  Group them using scc_entry.
469		 */
470		__list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
471
472		list_for_each_entry_reverse(v, &scc, scc_entry) {
473			/* Don't restart DFS from this vertex in unix_walk_scc(). */
474			list_move_tail(&v->entry, &unix_visited_vertices);
475
476			/* Mark vertex as off-stack. */
477			v->index = unix_vertex_grouped_index;
478
479			if (scc_dead)
480				scc_dead = unix_vertex_dead(v);
481		}
482
483		if (scc_dead)
484			unix_collect_skb(&scc, hitlist);
485		else if (!unix_graph_maybe_cyclic)
486			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
487
488		list_del(&scc);
489	}
490
491	/* Need backtracking ? */
492	if (!list_empty(&edge_stack))
493		goto prev_vertex;
494}
495
496static void unix_walk_scc(struct sk_buff_head *hitlist)
497{
498	unsigned long last_index = UNIX_VERTEX_INDEX_START;
499
500	unix_graph_maybe_cyclic = false;
501
502	/* Visit every vertex exactly once.
503	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
504	 */
505	while (!list_empty(&unix_unvisited_vertices)) {
506		struct unix_vertex *vertex;
507
508		vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
509		__unix_walk_scc(vertex, &last_index, hitlist);
510	}
511
512	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
513	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
514
515	unix_graph_grouped = true;
 
 
 
 
 
 
 
516}
517
518static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
 
519{
520	unix_graph_maybe_cyclic = false;
521
522	while (!list_empty(&unix_unvisited_vertices)) {
523		struct unix_vertex *vertex;
524		struct list_head scc;
525		bool scc_dead = true;
526
527		vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
528		list_add(&scc, &vertex->scc_entry);
529
530		list_for_each_entry_reverse(vertex, &scc, scc_entry) {
531			list_move_tail(&vertex->entry, &unix_visited_vertices);
 
532
533			if (scc_dead)
534				scc_dead = unix_vertex_dead(vertex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535		}
536
537		if (scc_dead)
538			unix_collect_skb(&scc, hitlist);
539		else if (!unix_graph_maybe_cyclic)
540			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
541
542		list_del(&scc);
543	}
544
545	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
546}
547
548static bool gc_in_progress;
549
550static void __unix_gc(struct work_struct *work)
551{
552	struct sk_buff_head hitlist;
553	struct sk_buff *skb;
554
555	spin_lock(&unix_gc_lock);
556
557	if (!unix_graph_maybe_cyclic) {
558		spin_unlock(&unix_gc_lock);
559		goto skip_gc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
560	}
 
561
562	__skb_queue_head_init(&hitlist);
 
 
 
 
 
 
563
564	if (unix_graph_grouped)
565		unix_walk_scc_fast(&hitlist);
566	else
567		unix_walk_scc(&hitlist);
 
 
 
 
568
569	spin_unlock(&unix_gc_lock);
570
571	skb_queue_walk(&hitlist, skb) {
572		if (UNIXCB(skb).fp)
573			UNIXCB(skb).fp->dead = true;
574	}
575
576	__skb_queue_purge(&hitlist);
577skip_gc:
578	WRITE_ONCE(gc_in_progress, false);
579}
580
581static DECLARE_WORK(unix_gc_work, __unix_gc);
582
583void unix_gc(void)
584{
585	WRITE_ONCE(gc_in_progress, true);
586	queue_work(system_unbound_wq, &unix_gc_work);
587}
588
589#define UNIX_INFLIGHT_TRIGGER_GC 16000
590#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
591
592void wait_for_unix_gc(struct scm_fp_list *fpl)
593{
594	/* If number of inflight sockets is insane,
595	 * force a garbage collect right now.
596	 *
597	 * Paired with the WRITE_ONCE() in unix_inflight(),
598	 * unix_notinflight(), and __unix_gc().
599	 */
600	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
601	    !READ_ONCE(gc_in_progress))
602		unix_gc();
603
604	/* Penalise users who want to send AF_UNIX sockets
605	 * but whose sockets have not been received yet.
606	 */
607	if (!fpl || !fpl->count_unix ||
608	    READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
609		return;
610
611	if (READ_ONCE(gc_in_progress))
612		flush_work(&unix_gc_work);
613}
v4.17
 
  1/*
  2 * NET3:	Garbage Collector For AF_UNIX sockets
  3 *
  4 * Garbage Collector:
  5 *	Copyright (C) Barak A. Pearlmutter.
  6 *	Released under the GPL version 2 or later.
  7 *
  8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9 * If it doesn't work blame me, it worked when Barak sent it.
 10 *
 11 * Assumptions:
 12 *
 13 *  - object w/ a bit
 14 *  - free list
 15 *
 16 * Current optimizations:
 17 *
 18 *  - explicit stack instead of recursion
 19 *  - tail recurse on first born instead of immediate push/pop
 20 *  - we gather the stuff that should not be killed into tree
 21 *    and stack is just a path from root to the current pointer.
 22 *
 23 *  Future optimizations:
 24 *
 25 *  - don't just push entire root set; process in place
 26 *
 27 *	This program is free software; you can redistribute it and/or
 28 *	modify it under the terms of the GNU General Public License
 29 *	as published by the Free Software Foundation; either version
 30 *	2 of the License, or (at your option) any later version.
 31 *
 32 *  Fixes:
 33 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 34 *					Cope with changing max_files.
 35 *	Al Viro		11 Oct 1998
 36 *		Graph may have cycles. That is, we can send the descriptor
 37 *		of foo to bar and vice versa. Current code chokes on that.
 38 *		Fix: move SCM_RIGHTS ones into the separate list and then
 39 *		skb_free() them all instead of doing explicit fput's.
 40 *		Another problem: since fput() may block somebody may
 41 *		create a new unix_socket when we are in the middle of sweep
 42 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 43 *		upon the beginning and unmark non-junk ones.
 44 *
 45 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 46 *		sent to connect()'ed but still not accept()'ed sockets.
 47 *		Fixed. Old code had slightly different problem here:
 48 *		extra fput() in situation when we passed the descriptor via
 49 *		such socket and closed it (descriptor). That would happen on
 50 *		each unix_gc() until the accept(). Since the struct file in
 51 *		question would go to the free list and might be reused...
 52 *		That might be the reason of random oopses on filp_close()
 53 *		in unrelated processes.
 54 *
 55 *	AV		28 Feb 1999
 56 *		Kill the explicit allocation of stack. Now we keep the tree
 57 *		with root in dummy + pointer (gc_current) to one of the nodes.
 58 *		Stack is represented as path from gc_current to dummy. Unmark
 59 *		now means "add to tree". Push == "make it a son of gc_current".
 60 *		Pop == "move gc_current to parent". We keep only pointers to
 61 *		parents (->gc_tree).
 62 *	AV		1 Mar 1999
 63 *		Damn. Added missing check for ->dead in listen queues scanning.
 64 *
 65 *	Miklos Szeredi 25 Jun 2007
 66 *		Reimplement with a cycle collecting algorithm. This should
 67 *		solve several problems with the previous code, like being racy
 68 *		wrt receive and holding up unrelated socket operations.
 69 */
 70
 71#include <linux/kernel.h>
 72#include <linux/string.h>
 73#include <linux/socket.h>
 74#include <linux/un.h>
 75#include <linux/net.h>
 76#include <linux/fs.h>
 77#include <linux/skbuff.h>
 78#include <linux/netdevice.h>
 79#include <linux/file.h>
 80#include <linux/proc_fs.h>
 81#include <linux/mutex.h>
 82#include <linux/wait.h>
 83
 84#include <net/sock.h>
 85#include <net/af_unix.h>
 86#include <net/scm.h>
 87#include <net/tcp_states.h>
 88
 89/* Internal data structures and random procedures: */
 90
 91static LIST_HEAD(gc_inflight_list);
 92static LIST_HEAD(gc_candidates);
 93static DEFINE_SPINLOCK(unix_gc_lock);
 94static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
 95
 96unsigned int unix_tot_inflight;
 97
 98struct sock *unix_get_socket(struct file *filp)
 99{
100	struct sock *u_sock = NULL;
101	struct inode *inode = file_inode(filp);
102
103	/* Socket ? */
104	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
105		struct socket *sock = SOCKET_I(inode);
106		struct sock *s = sock->sk;
 
 
 
107
108		/* PF_UNIX ? */
109		if (s && sock->ops && sock->ops->family == PF_UNIX)
110			u_sock = s;
111	}
112	return u_sock;
 
113}
114
115/* Keep the number of times in flight count for the file
116 * descriptor if it is for an AF_UNIX socket.
117 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
119void unix_inflight(struct user_struct *user, struct file *fp)
120{
121	struct sock *s = unix_get_socket(fp);
122
123	spin_lock(&unix_gc_lock);
124
125	if (s) {
126		struct unix_sock *u = unix_sk(s);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
128		if (atomic_long_inc_return(&u->inflight) == 1) {
129			BUG_ON(!list_empty(&u->link));
130			list_add_tail(&u->link, &gc_inflight_list);
131		} else {
132			BUG_ON(list_empty(&u->link));
133		}
134		unix_tot_inflight++;
135	}
136	user->unix_inflight++;
137	spin_unlock(&unix_gc_lock);
 
 
 
 
138}
139
140void unix_notinflight(struct user_struct *user, struct file *fp)
141{
142	struct sock *s = unix_get_socket(fp);
 
143
144	spin_lock(&unix_gc_lock);
145
146	if (s) {
147		struct unix_sock *u = unix_sk(s);
 
 
 
148
149		BUG_ON(!atomic_long_read(&u->inflight));
150		BUG_ON(list_empty(&u->link));
151
152		if (atomic_long_dec_and_test(&u->inflight))
153			list_del_init(&u->link);
154		unix_tot_inflight--;
155	}
156	user->unix_inflight--;
 
 
 
157	spin_unlock(&unix_gc_lock);
 
 
158}
159
160static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
161			  struct sk_buff_head *hitlist)
162{
163	struct sk_buff *skb;
164	struct sk_buff *next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
166	spin_lock(&x->sk_receive_queue.lock);
167	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
168		/* Do we have file descriptors ? */
169		if (UNIXCB(skb).fp) {
170			bool hit = false;
171			/* Process the descriptors of this socket */
172			int nfd = UNIXCB(skb).fp->count;
173			struct file **fp = UNIXCB(skb).fp->fp;
174
175			while (nfd--) {
176				/* Get the socket the fd matches if it indeed does so */
177				struct sock *sk = unix_get_socket(*fp++);
178
179				if (sk) {
180					struct unix_sock *u = unix_sk(sk);
181
182					/* Ignore non-candidates, they could
183					 * have been added to the queues after
184					 * starting the garbage collection
185					 */
186					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
187						hit = true;
188
189						func(u);
190					}
191				}
192			}
193			if (hit && hitlist != NULL) {
194				__skb_unlink(skb, &x->sk_receive_queue);
195				__skb_queue_tail(hitlist, skb);
196			}
197		}
198	}
199	spin_unlock(&x->sk_receive_queue.lock);
 
 
 
 
 
 
 
 
 
 
 
200}
201
202static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
203			  struct sk_buff_head *hitlist)
204{
205	if (x->sk_state != TCP_LISTEN) {
206		scan_inflight(x, func, hitlist);
207	} else {
208		struct sk_buff *skb;
209		struct sk_buff *next;
210		struct unix_sock *u;
211		LIST_HEAD(embryos);
212
213		/* For a listening socket collect the queued embryos
214		 * and perform a scan on them as well.
215		 */
216		spin_lock(&x->sk_receive_queue.lock);
217		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
218			u = unix_sk(skb->sk);
 
 
 
 
 
219
220			/* An embryo cannot be in-flight, so it's safe
221			 * to use the list link.
222			 */
223			BUG_ON(!list_empty(&u->link));
224			list_add_tail(&u->link, &embryos);
 
225		}
226		spin_unlock(&x->sk_receive_queue.lock);
227
228		while (!list_empty(&embryos)) {
229			u = list_entry(embryos.next, struct unix_sock, link);
230			scan_inflight(&u->sk, func, hitlist);
231			list_del_init(&u->link);
232		}
233	}
234}
235
236static void dec_inflight(struct unix_sock *usk)
237{
238	atomic_long_dec(&usk->inflight);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239}
240
241static void inc_inflight(struct unix_sock *usk)
 
 
 
 
242{
243	atomic_long_inc(&usk->inflight);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244}
245
246static void inc_inflight_move_tail(struct unix_sock *u)
247{
248	atomic_long_inc(&u->inflight);
249	/* If this still might be part of a cycle, move it to the end
250	 * of the list, so that it's checked even if it was already
251	 * passed over
 
 
252	 */
253	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
254		list_move_tail(&u->link, &gc_candidates);
255}
 
 
 
256
257static bool gc_in_progress;
258#define UNIX_INFLIGHT_TRIGGER_GC 16000
259
260void wait_for_unix_gc(void)
261{
262	/* If number of inflight sockets is insane,
263	 * force a garbage collect right now.
264	 */
265	if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
266		unix_gc();
267	wait_event(unix_gc_wait, gc_in_progress == false);
268}
269
270/* The external entry point: unix_gc() */
271void unix_gc(void)
272{
273	struct unix_sock *u;
274	struct unix_sock *next;
275	struct sk_buff_head hitlist;
276	struct list_head cursor;
277	LIST_HEAD(not_cycle_list);
 
278
279	spin_lock(&unix_gc_lock);
 
280
281	/* Avoid a recursive GC. */
282	if (gc_in_progress)
283		goto out;
284
285	gc_in_progress = true;
286	/* First, select candidates for garbage collection.  Only
287	 * in-flight sockets are considered, and from those only ones
288	 * which don't have any external reference.
289	 *
290	 * Holding unix_gc_lock will protect these candidates from
291	 * being detached, and hence from gaining an external
292	 * reference.  Since there are no possible receivers, all
293	 * buffers currently on the candidates' queues stay there
294	 * during the garbage collection.
295	 *
296	 * We also know that no new candidate can be added onto the
297	 * receive queues.  Other, non candidate sockets _can_ be
298	 * added to queue, so we must make sure only to touch
299	 * candidates.
300	 */
301	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
302		long total_refs;
303		long inflight_refs;
304
305		total_refs = file_count(u->sk.sk_socket->file);
306		inflight_refs = atomic_long_read(&u->inflight);
307
308		BUG_ON(inflight_refs < 1);
309		BUG_ON(total_refs < inflight_refs);
310		if (total_refs == inflight_refs) {
311			list_move_tail(&u->link, &gc_candidates);
312			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
313			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
314		}
 
 
 
 
 
 
 
315	}
316
317	/* Now remove all internal in-flight reference to children of
318	 * the candidates.
319	 */
320	list_for_each_entry(u, &gc_candidates, link)
321		scan_children(&u->sk, dec_inflight, NULL);
 
 
 
 
 
 
322
323	/* Restore the references for children of all candidates,
324	 * which have remaining references.  Do this recursively, so
325	 * only those remain, which form cyclic references.
326	 *
327	 * Use a "cursor" link, to make the list traversal safe, even
328	 * though elements might be moved about.
329	 */
330	list_add(&cursor, &gc_candidates);
331	while (cursor.next != &gc_candidates) {
332		u = list_entry(cursor.next, struct unix_sock, link);
333
334		/* Move cursor to after the current position. */
335		list_move(&cursor, &u->link);
336
337		if (atomic_long_read(&u->inflight) > 0) {
338			list_move_tail(&u->link, &not_cycle_list);
339			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
340			scan_children(&u->sk, inc_inflight_move_tail, NULL);
341		}
342	}
343	list_del(&cursor);
344
345	/* Now gc_candidates contains only garbage.  Restore original
346	 * inflight counters for these as well, and remove the skbuffs
347	 * which are creating the cycle(s).
348	 */
349	skb_queue_head_init(&hitlist);
350	list_for_each_entry(u, &gc_candidates, link)
351		scan_children(&u->sk, inc_inflight, &hitlist);
352
353	/* not_cycle_list contains those sockets which do not make up a
354	 * cycle.  Restore these to the inflight list.
355	 */
356	while (!list_empty(&not_cycle_list)) {
357		u = list_entry(not_cycle_list.next, struct unix_sock, link);
358		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
359		list_move_tail(&u->link, &gc_inflight_list);
360	}
361
362	spin_unlock(&unix_gc_lock);
363
364	/* Here we are. Hitlist is filled. Die. */
 
 
 
 
365	__skb_queue_purge(&hitlist);
 
 
 
 
 
 
 
 
 
 
 
366
367	spin_lock(&unix_gc_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
368
369	/* All candidates should have been detached by now. */
370	BUG_ON(!list_empty(&gc_candidates));
371	gc_in_progress = false;
372	wake_up(&unix_gc_wait);
 
 
373
374 out:
375	spin_unlock(&unix_gc_lock);
376}