Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
 
 
 
 
 
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
 
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/termios.h>
  93#include <linux/sockios.h>
  94#include <linux/net.h>
  95#include <linux/in.h>
  96#include <linux/fs.h>
  97#include <linux/slab.h>
  98#include <linux/uaccess.h>
  99#include <linux/skbuff.h>
 100#include <linux/netdevice.h>
 101#include <net/net_namespace.h>
 102#include <net/sock.h>
 103#include <net/tcp_states.h>
 104#include <net/af_unix.h>
 105#include <linux/proc_fs.h>
 106#include <linux/seq_file.h>
 107#include <net/scm.h>
 108#include <linux/init.h>
 109#include <linux/poll.h>
 110#include <linux/rtnetlink.h>
 111#include <linux/mount.h>
 112#include <net/checksum.h>
 113#include <linux/security.h>
 114#include <linux/freezer.h>
 115#include <linux/file.h>
 116
 117#include "scm.h"
 118
 119struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 120EXPORT_SYMBOL_GPL(unix_socket_table);
 121DEFINE_SPINLOCK(unix_table_lock);
 122EXPORT_SYMBOL_GPL(unix_table_lock);
 123static atomic_long_t unix_nr_socks;
 124
 
 125
 126static struct hlist_head *unix_sockets_unbound(void *addr)
 127{
 128	unsigned long hash = (unsigned long)addr;
 129
 130	hash ^= hash >> 16;
 131	hash ^= hash >> 8;
 132	hash %= UNIX_HASH_SIZE;
 133	return &unix_socket_table[UNIX_HASH_SIZE + hash];
 134}
 135
 136#define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
 137
 138#ifdef CONFIG_SECURITY_NETWORK
 139static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 140{
 141	UNIXCB(skb).secid = scm->secid;
 142}
 143
 144static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 145{
 146	scm->secid = UNIXCB(skb).secid;
 147}
 148
 149static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 150{
 151	return (scm->secid == UNIXCB(skb).secid);
 152}
 153#else
 154static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 155{ }
 156
 157static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 158{ }
 159
 160static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 161{
 162	return true;
 163}
 164#endif /* CONFIG_SECURITY_NETWORK */
 165
 166/*
 167 *  SMP locking strategy:
 168 *    hash table is protected with spinlock unix_table_lock
 169 *    each socket state is protected by separate spin lock.
 170 */
 171
 172static inline unsigned int unix_hash_fold(__wsum n)
 173{
 174	unsigned int hash = (__force unsigned int)csum_fold(n);
 175
 176	hash ^= hash>>8;
 177	return hash&(UNIX_HASH_SIZE-1);
 178}
 179
 180#define unix_peer(sk) (unix_sk(sk)->peer)
 181
 182static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 183{
 184	return unix_peer(osk) == sk;
 185}
 186
 187static inline int unix_may_send(struct sock *sk, struct sock *osk)
 188{
 189	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 190}
 191
 192static inline int unix_recvq_full(const struct sock *sk)
 193{
 194	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 195}
 196
 197static inline int unix_recvq_full_lockless(const struct sock *sk)
 198{
 199	return skb_queue_len_lockless(&sk->sk_receive_queue) >
 200		READ_ONCE(sk->sk_max_ack_backlog);
 201}
 202
 203struct sock *unix_peer_get(struct sock *s)
 204{
 205	struct sock *peer;
 206
 207	unix_state_lock(s);
 208	peer = unix_peer(s);
 209	if (peer)
 210		sock_hold(peer);
 211	unix_state_unlock(s);
 212	return peer;
 213}
 214EXPORT_SYMBOL_GPL(unix_peer_get);
 215
 216static inline void unix_release_addr(struct unix_address *addr)
 217{
 218	if (refcount_dec_and_test(&addr->refcnt))
 219		kfree(addr);
 220}
 221
 222/*
 223 *	Check unix socket name:
 224 *		- should be not zero length.
 225 *	        - if started by not zero, should be NULL terminated (FS object)
 226 *		- if started by zero, it is abstract name.
 227 */
 228
 229static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
 230{
 231	*hashp = 0;
 232
 233	if (len <= sizeof(short) || len > sizeof(*sunaddr))
 234		return -EINVAL;
 235	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
 236		return -EINVAL;
 237	if (sunaddr->sun_path[0]) {
 238		/*
 239		 * This may look like an off by one error but it is a bit more
 240		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
 241		 * sun_path[108] doesn't as such exist.  However in kernel space
 242		 * we are guaranteed that it is a valid memory location in our
 243		 * kernel address buffer.
 244		 */
 245		((char *)sunaddr)[len] = 0;
 246		len = strlen(sunaddr->sun_path)+1+sizeof(short);
 247		return len;
 248	}
 249
 250	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
 251	return len;
 252}
 253
 254static void __unix_remove_socket(struct sock *sk)
 255{
 256	sk_del_node_init(sk);
 257}
 258
 259static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
 260{
 261	WARN_ON(!sk_unhashed(sk));
 262	sk_add_node(sk, list);
 263}
 264
 265static void __unix_set_addr(struct sock *sk, struct unix_address *addr,
 266			    unsigned hash)
 267{
 268	__unix_remove_socket(sk);
 269	smp_store_release(&unix_sk(sk)->addr, addr);
 270	__unix_insert_socket(&unix_socket_table[hash], sk);
 271}
 272
 273static inline void unix_remove_socket(struct sock *sk)
 274{
 275	spin_lock(&unix_table_lock);
 276	__unix_remove_socket(sk);
 277	spin_unlock(&unix_table_lock);
 278}
 279
 280static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
 281{
 282	spin_lock(&unix_table_lock);
 283	__unix_insert_socket(list, sk);
 284	spin_unlock(&unix_table_lock);
 285}
 286
 287static struct sock *__unix_find_socket_byname(struct net *net,
 288					      struct sockaddr_un *sunname,
 289					      int len, unsigned int hash)
 290{
 291	struct sock *s;
 
 292
 293	sk_for_each(s, &unix_socket_table[hash]) {
 294		struct unix_sock *u = unix_sk(s);
 295
 296		if (!net_eq(sock_net(s), net))
 297			continue;
 298
 299		if (u->addr->len == len &&
 300		    !memcmp(u->addr->name, sunname, len))
 301			return s;
 302	}
 303	return NULL;
 
 
 304}
 305
 306static inline struct sock *unix_find_socket_byname(struct net *net,
 307						   struct sockaddr_un *sunname,
 308						   int len, unsigned int hash)
 
 309{
 310	struct sock *s;
 311
 312	spin_lock(&unix_table_lock);
 313	s = __unix_find_socket_byname(net, sunname, len, hash);
 314	if (s)
 315		sock_hold(s);
 316	spin_unlock(&unix_table_lock);
 317	return s;
 318}
 319
 320static struct sock *unix_find_socket_byinode(struct inode *i)
 321{
 322	struct sock *s;
 
 323
 324	spin_lock(&unix_table_lock);
 325	sk_for_each(s,
 326		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
 327		struct dentry *dentry = unix_sk(s)->path.dentry;
 328
 329		if (dentry && d_backing_inode(dentry) == i) {
 330			sock_hold(s);
 331			goto found;
 332		}
 333	}
 334	s = NULL;
 335found:
 336	spin_unlock(&unix_table_lock);
 337	return s;
 338}
 339
 340/* Support code for asymmetrically connected dgram sockets
 341 *
 342 * If a datagram socket is connected to a socket not itself connected
 343 * to the first socket (eg, /dev/log), clients may only enqueue more
 344 * messages if the present receive queue of the server socket is not
 345 * "too large". This means there's a second writeability condition
 346 * poll and sendmsg need to test. The dgram recv code will do a wake
 347 * up on the peer_wait wait queue of a socket upon reception of a
 348 * datagram which needs to be propagated to sleeping would-be writers
 349 * since these might not have sent anything so far. This can't be
 350 * accomplished via poll_wait because the lifetime of the server
 351 * socket might be less than that of its clients if these break their
 352 * association with it or if the server socket is closed while clients
 353 * are still connected to it and there's no way to inform "a polling
 354 * implementation" that it should let go of a certain wait queue
 355 *
 356 * In order to propagate a wake up, a wait_queue_entry_t of the client
 357 * socket is enqueued on the peer_wait queue of the server socket
 358 * whose wake function does a wake_up on the ordinary client socket
 359 * wait queue. This connection is established whenever a write (or
 360 * poll for write) hit the flow control condition and broken when the
 361 * association to the server socket is dissolved or after a wake up
 362 * was relayed.
 363 */
 364
 365static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 366				      void *key)
 367{
 368	struct unix_sock *u;
 369	wait_queue_head_t *u_sleep;
 370
 371	u = container_of(q, struct unix_sock, peer_wake);
 372
 373	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 374			    q);
 375	u->peer_wake.private = NULL;
 376
 377	/* relaying can only happen while the wq still exists */
 378	u_sleep = sk_sleep(&u->sk);
 379	if (u_sleep)
 380		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 381
 382	return 0;
 383}
 384
 385static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 386{
 387	struct unix_sock *u, *u_other;
 388	int rc;
 389
 390	u = unix_sk(sk);
 391	u_other = unix_sk(other);
 392	rc = 0;
 393	spin_lock(&u_other->peer_wait.lock);
 394
 395	if (!u->peer_wake.private) {
 396		u->peer_wake.private = other;
 397		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 398
 399		rc = 1;
 400	}
 401
 402	spin_unlock(&u_other->peer_wait.lock);
 403	return rc;
 404}
 405
 406static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 407					    struct sock *other)
 408{
 409	struct unix_sock *u, *u_other;
 410
 411	u = unix_sk(sk);
 412	u_other = unix_sk(other);
 413	spin_lock(&u_other->peer_wait.lock);
 414
 415	if (u->peer_wake.private == other) {
 416		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 417		u->peer_wake.private = NULL;
 418	}
 419
 420	spin_unlock(&u_other->peer_wait.lock);
 421}
 422
 423static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 424						   struct sock *other)
 425{
 426	unix_dgram_peer_wake_disconnect(sk, other);
 427	wake_up_interruptible_poll(sk_sleep(sk),
 428				   EPOLLOUT |
 429				   EPOLLWRNORM |
 430				   EPOLLWRBAND);
 431}
 432
 433/* preconditions:
 434 *	- unix_peer(sk) == other
 435 *	- association is stable
 436 */
 437static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 438{
 439	int connected;
 440
 441	connected = unix_dgram_peer_wake_connect(sk, other);
 442
 443	/* If other is SOCK_DEAD, we want to make sure we signal
 444	 * POLLOUT, such that a subsequent write() can get a
 445	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 446	 * to other and its full, we will hang waiting for POLLOUT.
 447	 */
 448	if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
 449		return 1;
 450
 451	if (connected)
 452		unix_dgram_peer_wake_disconnect(sk, other);
 453
 454	return 0;
 455}
 456
 457static int unix_writable(const struct sock *sk)
 458{
 459	return sk->sk_state != TCP_LISTEN &&
 460	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 461}
 462
 463static void unix_write_space(struct sock *sk)
 464{
 465	struct socket_wq *wq;
 466
 467	rcu_read_lock();
 468	if (unix_writable(sk)) {
 469		wq = rcu_dereference(sk->sk_wq);
 470		if (skwq_has_sleeper(wq))
 471			wake_up_interruptible_sync_poll(&wq->wait,
 472				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 473		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 474	}
 475	rcu_read_unlock();
 476}
 477
 478/* When dgram socket disconnects (or changes its peer), we clear its receive
 479 * queue of packets arrived from previous peer. First, it allows to do
 480 * flow control based only on wmem_alloc; second, sk connected to peer
 481 * may receive messages only from that peer. */
 482static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 483{
 484	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 485		skb_queue_purge(&sk->sk_receive_queue);
 486		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 487
 488		/* If one link of bidirectional dgram pipe is disconnected,
 489		 * we signal error. Messages are lost. Do not make this,
 490		 * when peer was not connected to us.
 491		 */
 492		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 493			other->sk_err = ECONNRESET;
 494			sk_error_report(other);
 495		}
 496	}
 497}
 498
 499static void unix_sock_destructor(struct sock *sk)
 500{
 501	struct unix_sock *u = unix_sk(sk);
 502
 503	skb_queue_purge(&sk->sk_receive_queue);
 504
 505	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 506	WARN_ON(!sk_unhashed(sk));
 507	WARN_ON(sk->sk_socket);
 508	if (!sock_flag(sk, SOCK_DEAD)) {
 509		pr_info("Attempt to release alive unix socket: %p\n", sk);
 510		return;
 511	}
 512
 513	if (u->addr)
 514		unix_release_addr(u->addr);
 515
 516	atomic_long_dec(&unix_nr_socks);
 517	local_bh_disable();
 518	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 519	local_bh_enable();
 520#ifdef UNIX_REFCNT_DEBUG
 521	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 522		atomic_long_read(&unix_nr_socks));
 523#endif
 524}
 525
 526static void unix_release_sock(struct sock *sk, int embrion)
 527{
 528	struct unix_sock *u = unix_sk(sk);
 529	struct path path;
 
 530	struct sock *skpair;
 531	struct sk_buff *skb;
 532	int state;
 533
 534	unix_remove_socket(sk);
 535
 536	/* Clear state */
 537	unix_state_lock(sk);
 538	sock_orphan(sk);
 539	sk->sk_shutdown = SHUTDOWN_MASK;
 540	path	     = u->path;
 541	u->path.dentry = NULL;
 542	u->path.mnt = NULL;
 
 543	state = sk->sk_state;
 544	sk->sk_state = TCP_CLOSE;
 545
 546	skpair = unix_peer(sk);
 547	unix_peer(sk) = NULL;
 548
 549	unix_state_unlock(sk);
 550
 551	wake_up_interruptible_all(&u->peer_wait);
 552
 
 
 553	if (skpair != NULL) {
 554		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 555			unix_state_lock(skpair);
 556			/* No more writes */
 557			skpair->sk_shutdown = SHUTDOWN_MASK;
 558			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
 559				skpair->sk_err = ECONNRESET;
 560			unix_state_unlock(skpair);
 561			skpair->sk_state_change(skpair);
 562			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 563		}
 564
 565		unix_dgram_peer_wake_disconnect(sk, skpair);
 566		sock_put(skpair); /* It may now die */
 
 567	}
 568
 569	/* Try to flush out this socket. Throw out buffers at least */
 570
 571	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 572		if (state == TCP_LISTEN)
 573			unix_release_sock(skb->sk, 1);
 574		/* passed fds are erased in the kfree_skb hook	      */
 575		UNIXCB(skb).consumed = skb->len;
 576		kfree_skb(skb);
 577	}
 578
 579	if (path.dentry)
 580		path_put(&path);
 
 
 581
 582	sock_put(sk);
 583
 584	/* ---- Socket is dead now and most probably destroyed ---- */
 585
 586	/*
 587	 * Fixme: BSD difference: In BSD all sockets connected to us get
 588	 *	  ECONNRESET and we die on the spot. In Linux we behave
 589	 *	  like files and pipes do and wait for the last
 590	 *	  dereference.
 591	 *
 592	 * Can't we simply set sock->err?
 593	 *
 594	 *	  What the above comment does talk about? --ANK(980817)
 595	 */
 596
 597	if (unix_tot_inflight)
 598		unix_gc();		/* Garbage collect fds */
 
 
 599}
 600
 601static void init_peercred(struct sock *sk)
 602{
 603	const struct cred *old_cred;
 604	struct pid *old_pid;
 605
 606	spin_lock(&sk->sk_peer_lock);
 607	old_pid = sk->sk_peer_pid;
 608	old_cred = sk->sk_peer_cred;
 609	sk->sk_peer_pid  = get_pid(task_tgid(current));
 610	sk->sk_peer_cred = get_current_cred();
 611	spin_unlock(&sk->sk_peer_lock);
 612
 613	put_pid(old_pid);
 614	put_cred(old_cred);
 615}
 616
 617static void copy_peercred(struct sock *sk, struct sock *peersk)
 618{
 619	const struct cred *old_cred;
 620	struct pid *old_pid;
 621
 622	if (sk < peersk) {
 623		spin_lock(&sk->sk_peer_lock);
 624		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 625	} else {
 626		spin_lock(&peersk->sk_peer_lock);
 627		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 628	}
 629	old_pid = sk->sk_peer_pid;
 630	old_cred = sk->sk_peer_cred;
 631	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
 632	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 633
 634	spin_unlock(&sk->sk_peer_lock);
 635	spin_unlock(&peersk->sk_peer_lock);
 636
 637	put_pid(old_pid);
 638	put_cred(old_cred);
 639}
 640
 641static int unix_listen(struct socket *sock, int backlog)
 642{
 643	int err;
 644	struct sock *sk = sock->sk;
 645	struct unix_sock *u = unix_sk(sk);
 
 
 646
 647	err = -EOPNOTSUPP;
 648	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 649		goto out;	/* Only stream/seqpacket sockets accept */
 650	err = -EINVAL;
 651	if (!u->addr)
 652		goto out;	/* No listens on an unbound socket */
 653	unix_state_lock(sk);
 654	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 655		goto out_unlock;
 656	if (backlog > sk->sk_max_ack_backlog)
 657		wake_up_interruptible_all(&u->peer_wait);
 658	sk->sk_max_ack_backlog	= backlog;
 659	sk->sk_state		= TCP_LISTEN;
 660	/* set credentials so connect can copy them */
 661	init_peercred(sk);
 662	err = 0;
 663
 664out_unlock:
 665	unix_state_unlock(sk);
 
 
 
 666out:
 667	return err;
 668}
 669
 670static int unix_release(struct socket *);
 671static int unix_bind(struct socket *, struct sockaddr *, int);
 672static int unix_stream_connect(struct socket *, struct sockaddr *,
 673			       int addr_len, int flags);
 674static int unix_socketpair(struct socket *, struct socket *);
 675static int unix_accept(struct socket *, struct socket *, int, bool);
 676static int unix_getname(struct socket *, struct sockaddr *, int);
 677static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 678static __poll_t unix_dgram_poll(struct file *, struct socket *,
 679				    poll_table *);
 680static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 681#ifdef CONFIG_COMPAT
 682static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 683#endif
 684static int unix_shutdown(struct socket *, int);
 685static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 686static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 687static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
 688				    size_t size, int flags);
 689static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 690				       struct pipe_inode_info *, size_t size,
 691				       unsigned int flags);
 692static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 693static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 694static int unix_dgram_connect(struct socket *, struct sockaddr *,
 695			      int, int);
 696static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 697static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 698				  int);
 699
 700static int unix_set_peek_off(struct sock *sk, int val)
 701{
 702	struct unix_sock *u = unix_sk(sk);
 703
 704	if (mutex_lock_interruptible(&u->iolock))
 705		return -EINTR;
 706
 707	sk->sk_peek_off = val;
 708	mutex_unlock(&u->iolock);
 709
 710	return 0;
 711}
 712
 713#ifdef CONFIG_PROC_FS
 714static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 715{
 716	struct sock *sk = sock->sk;
 717	struct unix_sock *u;
 718
 719	if (sk) {
 720		u = unix_sk(sock->sk);
 721		seq_printf(m, "scm_fds: %u\n",
 722			   atomic_read(&u->scm_stat.nr_fds));
 723	}
 724}
 725#else
 726#define unix_show_fdinfo NULL
 727#endif
 728
 729static const struct proto_ops unix_stream_ops = {
 730	.family =	PF_UNIX,
 731	.owner =	THIS_MODULE,
 732	.release =	unix_release,
 733	.bind =		unix_bind,
 734	.connect =	unix_stream_connect,
 735	.socketpair =	unix_socketpair,
 736	.accept =	unix_accept,
 737	.getname =	unix_getname,
 738	.poll =		unix_poll,
 739	.ioctl =	unix_ioctl,
 740#ifdef CONFIG_COMPAT
 741	.compat_ioctl =	unix_compat_ioctl,
 742#endif
 743	.listen =	unix_listen,
 744	.shutdown =	unix_shutdown,
 
 
 745	.sendmsg =	unix_stream_sendmsg,
 746	.recvmsg =	unix_stream_recvmsg,
 747	.mmap =		sock_no_mmap,
 748	.sendpage =	unix_stream_sendpage,
 749	.splice_read =	unix_stream_splice_read,
 750	.set_peek_off =	unix_set_peek_off,
 751	.show_fdinfo =	unix_show_fdinfo,
 752};
 753
 754static const struct proto_ops unix_dgram_ops = {
 755	.family =	PF_UNIX,
 756	.owner =	THIS_MODULE,
 757	.release =	unix_release,
 758	.bind =		unix_bind,
 759	.connect =	unix_dgram_connect,
 760	.socketpair =	unix_socketpair,
 761	.accept =	sock_no_accept,
 762	.getname =	unix_getname,
 763	.poll =		unix_dgram_poll,
 764	.ioctl =	unix_ioctl,
 765#ifdef CONFIG_COMPAT
 766	.compat_ioctl =	unix_compat_ioctl,
 767#endif
 768	.listen =	sock_no_listen,
 769	.shutdown =	unix_shutdown,
 
 
 770	.sendmsg =	unix_dgram_sendmsg,
 771	.recvmsg =	unix_dgram_recvmsg,
 772	.mmap =		sock_no_mmap,
 773	.sendpage =	sock_no_sendpage,
 774	.set_peek_off =	unix_set_peek_off,
 775	.show_fdinfo =	unix_show_fdinfo,
 776};
 777
 778static const struct proto_ops unix_seqpacket_ops = {
 779	.family =	PF_UNIX,
 780	.owner =	THIS_MODULE,
 781	.release =	unix_release,
 782	.bind =		unix_bind,
 783	.connect =	unix_stream_connect,
 784	.socketpair =	unix_socketpair,
 785	.accept =	unix_accept,
 786	.getname =	unix_getname,
 787	.poll =		unix_dgram_poll,
 788	.ioctl =	unix_ioctl,
 789#ifdef CONFIG_COMPAT
 790	.compat_ioctl =	unix_compat_ioctl,
 791#endif
 792	.listen =	unix_listen,
 793	.shutdown =	unix_shutdown,
 
 
 794	.sendmsg =	unix_seqpacket_sendmsg,
 795	.recvmsg =	unix_seqpacket_recvmsg,
 796	.mmap =		sock_no_mmap,
 797	.sendpage =	sock_no_sendpage,
 798	.set_peek_off =	unix_set_peek_off,
 799	.show_fdinfo =	unix_show_fdinfo,
 800};
 801
 802static struct proto unix_proto = {
 803	.name			= "UNIX",
 804	.owner			= THIS_MODULE,
 805	.obj_size		= sizeof(struct unix_sock),
 806};
 807
 808static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
 
 
 
 
 
 
 
 
 809{
 810	struct sock *sk = NULL;
 811	struct unix_sock *u;
 812
 813	atomic_long_inc(&unix_nr_socks);
 814	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
 815		goto out;
 816
 817	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
 818	if (!sk)
 819		goto out;
 820
 821	sock_init_data(sock, sk);
 
 
 822
 823	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
 824	sk->sk_write_space	= unix_write_space;
 825	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
 826	sk->sk_destruct		= unix_sock_destructor;
 827	u	  = unix_sk(sk);
 828	u->path.dentry = NULL;
 829	u->path.mnt = NULL;
 830	spin_lock_init(&u->lock);
 831	atomic_long_set(&u->inflight, 0);
 832	INIT_LIST_HEAD(&u->link);
 833	mutex_init(&u->iolock); /* single task reading lock */
 834	mutex_init(&u->bindlock); /* single task binding lock */
 835	init_waitqueue_head(&u->peer_wait);
 836	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 837	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
 838	unix_insert_socket(unix_sockets_unbound(sk), sk);
 839out:
 840	if (sk == NULL)
 841		atomic_long_dec(&unix_nr_socks);
 842	else {
 843		local_bh_disable();
 844		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 845		local_bh_enable();
 846	}
 847	return sk;
 848}
 849
 850static int unix_create(struct net *net, struct socket *sock, int protocol,
 851		       int kern)
 852{
 853	if (protocol && protocol != PF_UNIX)
 854		return -EPROTONOSUPPORT;
 855
 856	sock->state = SS_UNCONNECTED;
 857
 858	switch (sock->type) {
 859	case SOCK_STREAM:
 860		sock->ops = &unix_stream_ops;
 861		break;
 862		/*
 863		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
 864		 *	nothing uses it.
 865		 */
 866	case SOCK_RAW:
 867		sock->type = SOCK_DGRAM;
 868		fallthrough;
 869	case SOCK_DGRAM:
 870		sock->ops = &unix_dgram_ops;
 871		break;
 872	case SOCK_SEQPACKET:
 873		sock->ops = &unix_seqpacket_ops;
 874		break;
 875	default:
 876		return -ESOCKTNOSUPPORT;
 877	}
 878
 879	return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
 880}
 881
 882static int unix_release(struct socket *sock)
 883{
 884	struct sock *sk = sock->sk;
 885
 886	if (!sk)
 887		return 0;
 888
 889	unix_release_sock(sk, 0);
 890	sock->sk = NULL;
 891
 892	return 0;
 893}
 894
 895static int unix_autobind(struct socket *sock)
 896{
 897	struct sock *sk = sock->sk;
 898	struct net *net = sock_net(sk);
 899	struct unix_sock *u = unix_sk(sk);
 900	static u32 ordernum = 1;
 901	struct unix_address *addr;
 902	int err;
 903	unsigned int retries = 0;
 904
 905	err = mutex_lock_interruptible(&u->bindlock);
 906	if (err)
 907		return err;
 908
 
 909	if (u->addr)
 910		goto out;
 911
 912	err = -ENOMEM;
 913	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
 914	if (!addr)
 915		goto out;
 916
 917	addr->name->sun_family = AF_UNIX;
 918	refcount_set(&addr->refcnt, 1);
 919
 920retry:
 921	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
 922	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
 923	addr->hash ^= sk->sk_type;
 924
 925	spin_lock(&unix_table_lock);
 926	ordernum = (ordernum+1)&0xFFFFF;
 927
 928	if (__unix_find_socket_byname(net, addr->name, addr->len, addr->hash)) {
 
 929		spin_unlock(&unix_table_lock);
 930		/*
 931		 * __unix_find_socket_byname() may take long time if many names
 932		 * are already in use.
 933		 */
 934		cond_resched();
 935		/* Give up if all names seems to be in use. */
 936		if (retries++ == 0xFFFFF) {
 937			err = -ENOSPC;
 938			kfree(addr);
 939			goto out;
 940		}
 941		goto retry;
 942	}
 
 943
 944	__unix_set_addr(sk, addr, addr->hash);
 
 
 945	spin_unlock(&unix_table_lock);
 946	err = 0;
 947
 948out:	mutex_unlock(&u->bindlock);
 949	return err;
 950}
 951
 952static struct sock *unix_find_other(struct net *net,
 953				    struct sockaddr_un *sunname, int len,
 954				    int type, unsigned int hash, int *error)
 955{
 956	struct sock *u;
 957	struct path path;
 958	int err = 0;
 959
 960	if (sunname->sun_path[0]) {
 961		struct inode *inode;
 962		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
 963		if (err)
 964			goto fail;
 965		inode = d_backing_inode(path.dentry);
 966		err = path_permission(&path, MAY_WRITE);
 967		if (err)
 968			goto put_fail;
 969
 970		err = -ECONNREFUSED;
 971		if (!S_ISSOCK(inode->i_mode))
 972			goto put_fail;
 973		u = unix_find_socket_byinode(inode);
 974		if (!u)
 975			goto put_fail;
 976
 977		if (u->sk_type == type)
 978			touch_atime(&path);
 979
 980		path_put(&path);
 981
 982		err = -EPROTOTYPE;
 983		if (u->sk_type != type) {
 984			sock_put(u);
 985			goto fail;
 986		}
 987	} else {
 988		err = -ECONNREFUSED;
 989		u = unix_find_socket_byname(net, sunname, len, type ^ hash);
 990		if (u) {
 991			struct dentry *dentry;
 992			dentry = unix_sk(u)->path.dentry;
 993			if (dentry)
 994				touch_atime(&unix_sk(u)->path);
 995		} else
 996			goto fail;
 997	}
 998	return u;
 999
1000put_fail:
1001	path_put(&path);
1002fail:
1003	*error = err;
1004	return NULL;
1005}
1006
1007static int unix_bind_bsd(struct sock *sk, struct unix_address *addr)
1008{
1009	struct unix_sock *u = unix_sk(sk);
1010	umode_t mode = S_IFSOCK |
1011	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1012	struct user_namespace *ns; // barf...
1013	struct path parent;
1014	struct dentry *dentry;
1015	unsigned int hash;
1016	int err;
1017
1018	/*
1019	 * Get the parent directory, calculate the hash for last
1020	 * component.
1021	 */
1022	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1023	if (IS_ERR(dentry))
1024		return PTR_ERR(dentry);
1025	ns = mnt_user_ns(parent.mnt);
1026
1027	/*
1028	 * All right, let's create it.
1029	 */
1030	err = security_path_mknod(&parent, dentry, mode, 0);
1031	if (!err)
1032		err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
1033	if (err)
1034		goto out;
1035	err = mutex_lock_interruptible(&u->bindlock);
1036	if (err)
1037		goto out_unlink;
1038	if (u->addr)
1039		goto out_unlock;
1040
1041	addr->hash = UNIX_HASH_SIZE;
1042	hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1043	spin_lock(&unix_table_lock);
1044	u->path.mnt = mntget(parent.mnt);
1045	u->path.dentry = dget(dentry);
1046	__unix_set_addr(sk, addr, hash);
1047	spin_unlock(&unix_table_lock);
1048	mutex_unlock(&u->bindlock);
1049	done_path_create(&parent, dentry);
1050	return 0;
1051
1052out_unlock:
1053	mutex_unlock(&u->bindlock);
1054	err = -EINVAL;
1055out_unlink:
1056	/* failed after successful mknod?  unlink what we'd created... */
1057	vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
1058out:
1059	done_path_create(&parent, dentry);
1060	return err;
1061}
1062
1063static int unix_bind_abstract(struct sock *sk, struct unix_address *addr)
1064{
1065	struct unix_sock *u = unix_sk(sk);
1066	int err;
1067
1068	err = mutex_lock_interruptible(&u->bindlock);
1069	if (err)
1070		return err;
1071
1072	if (u->addr) {
1073		mutex_unlock(&u->bindlock);
1074		return -EINVAL;
1075	}
1076
1077	spin_lock(&unix_table_lock);
1078	if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
1079				      addr->hash)) {
1080		spin_unlock(&unix_table_lock);
1081		mutex_unlock(&u->bindlock);
1082		return -EADDRINUSE;
1083	}
1084	__unix_set_addr(sk, addr, addr->hash);
1085	spin_unlock(&unix_table_lock);
1086	mutex_unlock(&u->bindlock);
1087	return 0;
1088}
1089
1090static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1091{
1092	struct sock *sk = sock->sk;
 
 
1093	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1094	char *sun_path = sunaddr->sun_path;
 
 
1095	int err;
1096	unsigned int hash;
1097	struct unix_address *addr;
 
1098
1099	if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1100	    sunaddr->sun_family != AF_UNIX)
1101		return -EINVAL;
1102
1103	if (addr_len == sizeof(short))
1104		return unix_autobind(sock);
 
 
1105
1106	err = unix_mkname(sunaddr, addr_len, &hash);
1107	if (err < 0)
1108		return err;
1109	addr_len = err;
 
 
 
 
 
 
 
 
1110	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1111	if (!addr)
1112		return -ENOMEM;
1113
1114	memcpy(addr->name, sunaddr, addr_len);
1115	addr->len = addr_len;
1116	addr->hash = hash ^ sk->sk_type;
1117	refcount_set(&addr->refcnt, 1);
1118
1119	if (sun_path[0])
1120		err = unix_bind_bsd(sk, addr);
1121	else
1122		err = unix_bind_abstract(sk, addr);
1123	if (err)
1124		unix_release_addr(addr);
1125	return err == -EEXIST ? -EADDRINUSE : err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126}
1127
1128static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1129{
1130	if (unlikely(sk1 == sk2) || !sk2) {
1131		unix_state_lock(sk1);
1132		return;
1133	}
1134	if (sk1 < sk2) {
1135		unix_state_lock(sk1);
1136		unix_state_lock_nested(sk2);
1137	} else {
1138		unix_state_lock(sk2);
1139		unix_state_lock_nested(sk1);
1140	}
1141}
1142
1143static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1144{
1145	if (unlikely(sk1 == sk2) || !sk2) {
1146		unix_state_unlock(sk1);
1147		return;
1148	}
1149	unix_state_unlock(sk1);
1150	unix_state_unlock(sk2);
1151}
1152
1153static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1154			      int alen, int flags)
1155{
1156	struct sock *sk = sock->sk;
1157	struct net *net = sock_net(sk);
1158	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1159	struct sock *other;
1160	unsigned int hash;
1161	int err;
1162
1163	err = -EINVAL;
1164	if (alen < offsetofend(struct sockaddr, sa_family))
1165		goto out;
1166
1167	if (addr->sa_family != AF_UNSPEC) {
1168		err = unix_mkname(sunaddr, alen, &hash);
1169		if (err < 0)
1170			goto out;
1171		alen = err;
1172
1173		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1174		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1175			goto out;
1176
1177restart:
1178		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1179		if (!other)
1180			goto out;
1181
1182		unix_state_double_lock(sk, other);
1183
1184		/* Apparently VFS overslept socket death. Retry. */
1185		if (sock_flag(other, SOCK_DEAD)) {
1186			unix_state_double_unlock(sk, other);
1187			sock_put(other);
1188			goto restart;
1189		}
1190
1191		err = -EPERM;
1192		if (!unix_may_send(sk, other))
1193			goto out_unlock;
1194
1195		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1196		if (err)
1197			goto out_unlock;
1198
1199	} else {
1200		/*
1201		 *	1003.1g breaking connected state with AF_UNSPEC
1202		 */
1203		other = NULL;
1204		unix_state_double_lock(sk, other);
1205	}
1206
1207	/*
1208	 * If it was connected, reconnect.
1209	 */
1210	if (unix_peer(sk)) {
1211		struct sock *old_peer = unix_peer(sk);
1212		unix_peer(sk) = other;
1213		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1214
1215		unix_state_double_unlock(sk, other);
1216
1217		if (other != old_peer)
1218			unix_dgram_disconnected(sk, old_peer);
1219		sock_put(old_peer);
1220	} else {
1221		unix_peer(sk) = other;
1222		unix_state_double_unlock(sk, other);
1223	}
1224	return 0;
1225
1226out_unlock:
1227	unix_state_double_unlock(sk, other);
1228	sock_put(other);
1229out:
1230	return err;
1231}
1232
1233static long unix_wait_for_peer(struct sock *other, long timeo)
1234	__releases(&unix_sk(other)->lock)
1235{
1236	struct unix_sock *u = unix_sk(other);
1237	int sched;
1238	DEFINE_WAIT(wait);
1239
1240	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1241
1242	sched = !sock_flag(other, SOCK_DEAD) &&
1243		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1244		unix_recvq_full(other);
1245
1246	unix_state_unlock(other);
1247
1248	if (sched)
1249		timeo = schedule_timeout(timeo);
1250
1251	finish_wait(&u->peer_wait, &wait);
1252	return timeo;
1253}
1254
1255static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1256			       int addr_len, int flags)
1257{
1258	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1259	struct sock *sk = sock->sk;
1260	struct net *net = sock_net(sk);
1261	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1262	struct sock *newsk = NULL;
1263	struct sock *other = NULL;
1264	struct sk_buff *skb = NULL;
1265	unsigned int hash;
1266	int st;
1267	int err;
1268	long timeo;
1269
1270	err = unix_mkname(sunaddr, addr_len, &hash);
1271	if (err < 0)
1272		goto out;
1273	addr_len = err;
1274
1275	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1276	    (err = unix_autobind(sock)) != 0)
1277		goto out;
1278
1279	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1280
1281	/* First of all allocate resources.
1282	   If we will make it after state is locked,
1283	   we will have to recheck all again in any case.
1284	 */
1285
1286	err = -ENOMEM;
1287
1288	/* create new sock for complete connection */
1289	newsk = unix_create1(sock_net(sk), NULL, 0);
1290	if (newsk == NULL)
1291		goto out;
1292
1293	/* Allocate skb for sending to listening sock */
1294	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1295	if (skb == NULL)
1296		goto out;
1297
1298restart:
1299	/*  Find listening sock. */
1300	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1301	if (!other)
1302		goto out;
1303
1304	/* Latch state of peer */
1305	unix_state_lock(other);
1306
1307	/* Apparently VFS overslept socket death. Retry. */
1308	if (sock_flag(other, SOCK_DEAD)) {
1309		unix_state_unlock(other);
1310		sock_put(other);
1311		goto restart;
1312	}
1313
1314	err = -ECONNREFUSED;
1315	if (other->sk_state != TCP_LISTEN)
1316		goto out_unlock;
1317	if (other->sk_shutdown & RCV_SHUTDOWN)
1318		goto out_unlock;
1319
1320	if (unix_recvq_full(other)) {
1321		err = -EAGAIN;
1322		if (!timeo)
1323			goto out_unlock;
1324
1325		timeo = unix_wait_for_peer(other, timeo);
1326
1327		err = sock_intr_errno(timeo);
1328		if (signal_pending(current))
1329			goto out;
1330		sock_put(other);
1331		goto restart;
1332	}
1333
1334	/* Latch our state.
1335
1336	   It is tricky place. We need to grab our state lock and cannot
1337	   drop lock on peer. It is dangerous because deadlock is
1338	   possible. Connect to self case and simultaneous
1339	   attempt to connect are eliminated by checking socket
1340	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1341	   check this before attempt to grab lock.
1342
1343	   Well, and we have to recheck the state after socket locked.
1344	 */
1345	st = sk->sk_state;
1346
1347	switch (st) {
1348	case TCP_CLOSE:
1349		/* This is ok... continue with connect */
1350		break;
1351	case TCP_ESTABLISHED:
1352		/* Socket is already connected */
1353		err = -EISCONN;
1354		goto out_unlock;
1355	default:
1356		err = -EINVAL;
1357		goto out_unlock;
1358	}
1359
1360	unix_state_lock_nested(sk);
1361
1362	if (sk->sk_state != st) {
1363		unix_state_unlock(sk);
1364		unix_state_unlock(other);
1365		sock_put(other);
1366		goto restart;
1367	}
1368
1369	err = security_unix_stream_connect(sk, other, newsk);
1370	if (err) {
1371		unix_state_unlock(sk);
1372		goto out_unlock;
1373	}
1374
1375	/* The way is open! Fastly set all the necessary fields... */
1376
1377	sock_hold(sk);
1378	unix_peer(newsk)	= sk;
1379	newsk->sk_state		= TCP_ESTABLISHED;
1380	newsk->sk_type		= sk->sk_type;
1381	init_peercred(newsk);
1382	newu = unix_sk(newsk);
1383	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1384	otheru = unix_sk(other);
1385
1386	/* copy address information from listening to new sock
1387	 *
1388	 * The contents of *(otheru->addr) and otheru->path
1389	 * are seen fully set up here, since we have found
1390	 * otheru in hash under unix_table_lock.  Insertion
1391	 * into the hash chain we'd found it in had been done
1392	 * in an earlier critical area protected by unix_table_lock,
1393	 * the same one where we'd set *(otheru->addr) contents,
1394	 * as well as otheru->path and otheru->addr itself.
1395	 *
1396	 * Using smp_store_release() here to set newu->addr
1397	 * is enough to make those stores, as well as stores
1398	 * to newu->path visible to anyone who gets newu->addr
1399	 * by smp_load_acquire().  IOW, the same warranties
1400	 * as for unix_sock instances bound in unix_bind() or
1401	 * in unix_autobind().
1402	 */
1403	if (otheru->path.dentry) {
1404		path_get(&otheru->path);
1405		newu->path = otheru->path;
1406	}
1407	refcount_inc(&otheru->addr->refcnt);
1408	smp_store_release(&newu->addr, otheru->addr);
1409
1410	/* Set credentials */
1411	copy_peercred(sk, other);
1412
1413	sock->state	= SS_CONNECTED;
1414	sk->sk_state	= TCP_ESTABLISHED;
1415	sock_hold(newsk);
1416
1417	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1418	unix_peer(sk)	= newsk;
1419
1420	unix_state_unlock(sk);
1421
1422	/* take ten and send info to listening sock */
1423	spin_lock(&other->sk_receive_queue.lock);
1424	__skb_queue_tail(&other->sk_receive_queue, skb);
1425	spin_unlock(&other->sk_receive_queue.lock);
1426	unix_state_unlock(other);
1427	other->sk_data_ready(other);
1428	sock_put(other);
1429	return 0;
1430
1431out_unlock:
1432	if (other)
1433		unix_state_unlock(other);
1434
1435out:
1436	kfree_skb(skb);
1437	if (newsk)
1438		unix_release_sock(newsk, 0);
1439	if (other)
1440		sock_put(other);
1441	return err;
1442}
1443
1444static int unix_socketpair(struct socket *socka, struct socket *sockb)
1445{
1446	struct sock *ska = socka->sk, *skb = sockb->sk;
1447
1448	/* Join our sockets back to back */
1449	sock_hold(ska);
1450	sock_hold(skb);
1451	unix_peer(ska) = skb;
1452	unix_peer(skb) = ska;
1453	init_peercred(ska);
1454	init_peercred(skb);
1455
1456	if (ska->sk_type != SOCK_DGRAM) {
1457		ska->sk_state = TCP_ESTABLISHED;
1458		skb->sk_state = TCP_ESTABLISHED;
1459		socka->state  = SS_CONNECTED;
1460		sockb->state  = SS_CONNECTED;
1461	}
1462	return 0;
1463}
1464
1465static void unix_sock_inherit_flags(const struct socket *old,
1466				    struct socket *new)
1467{
1468	if (test_bit(SOCK_PASSCRED, &old->flags))
1469		set_bit(SOCK_PASSCRED, &new->flags);
1470	if (test_bit(SOCK_PASSSEC, &old->flags))
1471		set_bit(SOCK_PASSSEC, &new->flags);
1472}
1473
1474static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1475		       bool kern)
1476{
1477	struct sock *sk = sock->sk;
1478	struct sock *tsk;
1479	struct sk_buff *skb;
1480	int err;
1481
1482	err = -EOPNOTSUPP;
1483	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1484		goto out;
1485
1486	err = -EINVAL;
1487	if (sk->sk_state != TCP_LISTEN)
1488		goto out;
1489
1490	/* If socket state is TCP_LISTEN it cannot change (for now...),
1491	 * so that no locks are necessary.
1492	 */
1493
1494	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1495	if (!skb) {
1496		/* This means receive shutdown. */
1497		if (err == 0)
1498			err = -EINVAL;
1499		goto out;
1500	}
1501
1502	tsk = skb->sk;
1503	skb_free_datagram(sk, skb);
1504	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1505
1506	/* attach accepted sock to socket */
1507	unix_state_lock(tsk);
1508	newsock->state = SS_CONNECTED;
1509	unix_sock_inherit_flags(sock, newsock);
1510	sock_graft(tsk, newsock);
1511	unix_state_unlock(tsk);
1512	return 0;
1513
1514out:
1515	return err;
1516}
1517
1518
1519static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1520{
1521	struct sock *sk = sock->sk;
1522	struct unix_address *addr;
1523	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1524	int err = 0;
1525
1526	if (peer) {
1527		sk = unix_peer_get(sk);
1528
1529		err = -ENOTCONN;
1530		if (!sk)
1531			goto out;
1532		err = 0;
1533	} else {
1534		sock_hold(sk);
1535	}
1536
1537	addr = smp_load_acquire(&unix_sk(sk)->addr);
1538	if (!addr) {
 
1539		sunaddr->sun_family = AF_UNIX;
1540		sunaddr->sun_path[0] = 0;
1541		err = sizeof(short);
1542	} else {
1543		err = addr->len;
1544		memcpy(sunaddr, addr->name, addr->len);
 
 
1545	}
 
1546	sock_put(sk);
1547out:
1548	return err;
1549}
1550
1551static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1552{
1553	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1554
1555	/*
1556	 * Garbage collection of unix sockets starts by selecting a set of
1557	 * candidate sockets which have reference only from being in flight
1558	 * (total_refs == inflight_refs).  This condition is checked once during
1559	 * the candidate collection phase, and candidates are marked as such, so
1560	 * that non-candidates can later be ignored.  While inflight_refs is
1561	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1562	 * is an instantaneous decision.
1563	 *
1564	 * Once a candidate, however, the socket must not be reinstalled into a
1565	 * file descriptor while the garbage collection is in progress.
1566	 *
1567	 * If the above conditions are met, then the directed graph of
1568	 * candidates (*) does not change while unix_gc_lock is held.
1569	 *
1570	 * Any operations that changes the file count through file descriptors
1571	 * (dup, close, sendmsg) does not change the graph since candidates are
1572	 * not installed in fds.
1573	 *
1574	 * Dequeing a candidate via recvmsg would install it into an fd, but
1575	 * that takes unix_gc_lock to decrement the inflight count, so it's
1576	 * serialized with garbage collection.
1577	 *
1578	 * MSG_PEEK is special in that it does not change the inflight count,
1579	 * yet does install the socket into an fd.  The following lock/unlock
1580	 * pair is to ensure serialization with garbage collection.  It must be
1581	 * done between incrementing the file count and installing the file into
1582	 * an fd.
1583	 *
1584	 * If garbage collection starts after the barrier provided by the
1585	 * lock/unlock, then it will see the elevated refcount and not mark this
1586	 * as a candidate.  If a garbage collection is already in progress
1587	 * before the file count was incremented, then the lock/unlock pair will
1588	 * ensure that garbage collection is finished before progressing to
1589	 * installing the fd.
1590	 *
1591	 * (*) A -> B where B is on the queue of A or B is on the queue of C
1592	 * which is on the queue of listening socket A.
1593	 */
1594	spin_lock(&unix_gc_lock);
1595	spin_unlock(&unix_gc_lock);
1596}
1597
1598static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1599{
1600	int err = 0;
1601
1602	UNIXCB(skb).pid  = get_pid(scm->pid);
1603	UNIXCB(skb).uid = scm->creds.uid;
1604	UNIXCB(skb).gid = scm->creds.gid;
1605	UNIXCB(skb).fp = NULL;
1606	unix_get_secdata(scm, skb);
1607	if (scm->fp && send_fds)
1608		err = unix_attach_fds(scm, skb);
1609
1610	skb->destructor = unix_destruct_scm;
1611	return err;
1612}
1613
1614static bool unix_passcred_enabled(const struct socket *sock,
1615				  const struct sock *other)
1616{
1617	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1618	       !other->sk_socket ||
1619	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1620}
 
 
1621
1622/*
1623 * Some apps rely on write() giving SCM_CREDENTIALS
1624 * We include credentials if source or destination socket
1625 * asserted SOCK_PASSCRED.
1626 */
1627static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1628			    const struct sock *other)
1629{
1630	if (UNIXCB(skb).pid)
1631		return;
1632	if (unix_passcred_enabled(sock, other)) {
1633		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1634		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1635	}
1636}
1637
1638static int maybe_init_creds(struct scm_cookie *scm,
1639			    struct socket *socket,
1640			    const struct sock *other)
1641{
1642	int err;
1643	struct msghdr msg = { .msg_controllen = 0 };
 
1644
1645	err = scm_send(socket, &msg, scm, false);
1646	if (err)
1647		return err;
1648
1649	if (unix_passcred_enabled(socket, other)) {
1650		scm->pid = get_pid(task_tgid(current));
1651		current_uid_gid(&scm->creds.uid, &scm->creds.gid);
 
 
1652	}
1653	return err;
1654}
1655
1656static bool unix_skb_scm_eq(struct sk_buff *skb,
1657			    struct scm_cookie *scm)
1658{
1659	const struct unix_skb_parms *u = &UNIXCB(skb);
1660
1661	return u->pid == scm->pid &&
1662	       uid_eq(u->uid, scm->creds.uid) &&
1663	       gid_eq(u->gid, scm->creds.gid) &&
1664	       unix_secdata_eq(scm, skb);
1665}
1666
1667static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1668{
1669	struct scm_fp_list *fp = UNIXCB(skb).fp;
1670	struct unix_sock *u = unix_sk(sk);
 
 
 
 
1671
1672	if (unlikely(fp && fp->count))
1673		atomic_add(fp->count, &u->scm_stat.nr_fds);
 
 
 
1674}
1675
1676static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1677{
1678	struct scm_fp_list *fp = UNIXCB(skb).fp;
1679	struct unix_sock *u = unix_sk(sk);
 
 
 
 
1680
1681	if (unlikely(fp && fp->count))
1682		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1683}
1684
1685/*
1686 *	Send AF_UNIX data.
1687 */
1688
1689static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1690			      size_t len)
1691{
 
1692	struct sock *sk = sock->sk;
1693	struct net *net = sock_net(sk);
1694	struct unix_sock *u = unix_sk(sk);
1695	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1696	struct sock *other = NULL;
1697	int namelen = 0; /* fake GCC */
1698	int err;
1699	unsigned int hash;
1700	struct sk_buff *skb;
1701	long timeo;
1702	struct scm_cookie scm;
1703	int data_len = 0;
1704	int sk_locked;
1705
 
 
1706	wait_for_unix_gc();
1707	err = scm_send(sock, msg, &scm, false);
1708	if (err < 0)
1709		return err;
1710
1711	err = -EOPNOTSUPP;
1712	if (msg->msg_flags&MSG_OOB)
1713		goto out;
1714
1715	if (msg->msg_namelen) {
1716		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1717		if (err < 0)
1718			goto out;
1719		namelen = err;
1720	} else {
1721		sunaddr = NULL;
1722		err = -ENOTCONN;
1723		other = unix_peer_get(sk);
1724		if (!other)
1725			goto out;
1726	}
1727
1728	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1729	    && (err = unix_autobind(sock)) != 0)
1730		goto out;
1731
1732	err = -EMSGSIZE;
1733	if (len > sk->sk_sndbuf - 32)
1734		goto out;
1735
1736	if (len > SKB_MAX_ALLOC) {
1737		data_len = min_t(size_t,
1738				 len - SKB_MAX_ALLOC,
1739				 MAX_SKB_FRAGS * PAGE_SIZE);
1740		data_len = PAGE_ALIGN(data_len);
1741
1742		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1743	}
1744
1745	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1746				   msg->msg_flags & MSG_DONTWAIT, &err,
1747				   PAGE_ALLOC_COSTLY_ORDER);
1748	if (skb == NULL)
1749		goto out;
1750
1751	err = unix_scm_to_skb(&scm, skb, true);
1752	if (err < 0)
1753		goto out_free;
 
 
1754
1755	skb_put(skb, len - data_len);
1756	skb->data_len = data_len;
1757	skb->len = len;
1758	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1759	if (err)
1760		goto out_free;
1761
1762	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1763
1764restart:
1765	if (!other) {
1766		err = -ECONNRESET;
1767		if (sunaddr == NULL)
1768			goto out_free;
1769
1770		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1771					hash, &err);
1772		if (other == NULL)
1773			goto out_free;
1774	}
1775
1776	if (sk_filter(other, skb) < 0) {
1777		/* Toss the packet but do not return any error to the sender */
1778		err = len;
1779		goto out_free;
1780	}
1781
1782	sk_locked = 0;
1783	unix_state_lock(other);
1784restart_locked:
1785	err = -EPERM;
1786	if (!unix_may_send(sk, other))
1787		goto out_unlock;
1788
1789	if (unlikely(sock_flag(other, SOCK_DEAD))) {
1790		/*
1791		 *	Check with 1003.1g - what should
1792		 *	datagram error
1793		 */
1794		unix_state_unlock(other);
1795		sock_put(other);
1796
1797		if (!sk_locked)
1798			unix_state_lock(sk);
1799
1800		err = 0;
 
1801		if (unix_peer(sk) == other) {
1802			unix_peer(sk) = NULL;
1803			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1804
1805			unix_state_unlock(sk);
1806
1807			unix_dgram_disconnected(sk, other);
1808			sock_put(other);
1809			err = -ECONNREFUSED;
1810		} else {
1811			unix_state_unlock(sk);
1812		}
1813
1814		other = NULL;
1815		if (err)
1816			goto out_free;
1817		goto restart;
1818	}
1819
1820	err = -EPIPE;
1821	if (other->sk_shutdown & RCV_SHUTDOWN)
1822		goto out_unlock;
1823
1824	if (sk->sk_type != SOCK_SEQPACKET) {
1825		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1826		if (err)
1827			goto out_unlock;
1828	}
1829
1830	/* other == sk && unix_peer(other) != sk if
1831	 * - unix_peer(sk) == NULL, destination address bound to sk
1832	 * - unix_peer(sk) == sk by time of get but disconnected before lock
1833	 */
1834	if (other != sk &&
1835	    unlikely(unix_peer(other) != sk &&
1836	    unix_recvq_full_lockless(other))) {
1837		if (timeo) {
1838			timeo = unix_wait_for_peer(other, timeo);
1839
1840			err = sock_intr_errno(timeo);
1841			if (signal_pending(current))
1842				goto out_free;
1843
1844			goto restart;
1845		}
1846
1847		if (!sk_locked) {
1848			unix_state_unlock(other);
1849			unix_state_double_lock(sk, other);
1850		}
1851
1852		if (unix_peer(sk) != other ||
1853		    unix_dgram_peer_wake_me(sk, other)) {
1854			err = -EAGAIN;
1855			sk_locked = 1;
1856			goto out_unlock;
1857		}
1858
1859		if (!sk_locked) {
1860			sk_locked = 1;
1861			goto restart_locked;
1862		}
1863	}
1864
1865	if (unlikely(sk_locked))
1866		unix_state_unlock(sk);
 
 
 
 
1867
1868	if (sock_flag(other, SOCK_RCVTSTAMP))
1869		__net_timestamp(skb);
1870	maybe_add_creds(skb, sock, other);
1871	scm_stat_add(other, skb);
1872	skb_queue_tail(&other->sk_receive_queue, skb);
 
 
1873	unix_state_unlock(other);
1874	other->sk_data_ready(other);
1875	sock_put(other);
1876	scm_destroy(&scm);
1877	return len;
1878
1879out_unlock:
1880	if (sk_locked)
1881		unix_state_unlock(sk);
1882	unix_state_unlock(other);
1883out_free:
1884	kfree_skb(skb);
1885out:
1886	if (other)
1887		sock_put(other);
1888	scm_destroy(&scm);
1889	return err;
1890}
1891
1892/* We use paged skbs for stream sockets, and limit occupancy to 32768
1893 * bytes, and a minimum of a full page.
1894 */
1895#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1896
1897static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1898			       size_t len)
1899{
 
1900	struct sock *sk = sock->sk;
1901	struct sock *other = NULL;
1902	int err, size;
1903	struct sk_buff *skb;
1904	int sent = 0;
1905	struct scm_cookie scm;
1906	bool fds_sent = false;
1907	int data_len;
1908
 
 
1909	wait_for_unix_gc();
1910	err = scm_send(sock, msg, &scm, false);
1911	if (err < 0)
1912		return err;
1913
1914	err = -EOPNOTSUPP;
1915	if (msg->msg_flags&MSG_OOB)
1916		goto out_err;
1917
1918	if (msg->msg_namelen) {
1919		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1920		goto out_err;
1921	} else {
1922		err = -ENOTCONN;
1923		other = unix_peer(sk);
1924		if (!other)
1925			goto out_err;
1926	}
1927
1928	if (sk->sk_shutdown & SEND_SHUTDOWN)
1929		goto pipe_err;
1930
1931	while (sent < len) {
1932		size = len - sent;
 
 
 
 
 
1933
1934		/* Keep two messages in the pipe so it schedules better */
1935		size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
 
1936
1937		/* allow fallback to order-0 allocations */
1938		size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1939
1940		data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
 
 
1941
1942		data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
 
1943
1944		skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1945					   msg->msg_flags & MSG_DONTWAIT, &err,
1946					   get_order(UNIX_SKB_FRAGS_SZ));
1947		if (!skb)
1948			goto out_err;
1949
 
 
 
 
 
 
 
 
 
 
1950		/* Only send the fds in the first buffer */
1951		err = unix_scm_to_skb(&scm, skb, !fds_sent);
1952		if (err < 0) {
1953			kfree_skb(skb);
1954			goto out_err;
1955		}
 
1956		fds_sent = true;
1957
1958		skb_put(skb, size - data_len);
1959		skb->data_len = data_len;
1960		skb->len = size;
1961		err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1962		if (err) {
1963			kfree_skb(skb);
1964			goto out_err;
1965		}
1966
1967		unix_state_lock(other);
1968
1969		if (sock_flag(other, SOCK_DEAD) ||
1970		    (other->sk_shutdown & RCV_SHUTDOWN))
1971			goto pipe_err_free;
1972
1973		maybe_add_creds(skb, sock, other);
1974		scm_stat_add(other, skb);
1975		skb_queue_tail(&other->sk_receive_queue, skb);
 
 
1976		unix_state_unlock(other);
1977		other->sk_data_ready(other);
1978		sent += size;
1979	}
1980
1981	scm_destroy(&scm);
 
1982
1983	return sent;
1984
1985pipe_err_free:
1986	unix_state_unlock(other);
1987	kfree_skb(skb);
1988pipe_err:
1989	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1990		send_sig(SIGPIPE, current, 0);
1991	err = -EPIPE;
1992out_err:
1993	scm_destroy(&scm);
 
1994	return sent ? : err;
1995}
1996
1997static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1998				    int offset, size_t size, int flags)
1999{
2000	int err;
2001	bool send_sigpipe = false;
2002	bool init_scm = true;
2003	struct scm_cookie scm;
2004	struct sock *other, *sk = socket->sk;
2005	struct sk_buff *skb, *newskb = NULL, *tail = NULL;
2006
2007	if (flags & MSG_OOB)
2008		return -EOPNOTSUPP;
2009
2010	other = unix_peer(sk);
2011	if (!other || sk->sk_state != TCP_ESTABLISHED)
2012		return -ENOTCONN;
2013
2014	if (false) {
2015alloc_skb:
2016		unix_state_unlock(other);
2017		mutex_unlock(&unix_sk(other)->iolock);
2018		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2019					      &err, 0);
2020		if (!newskb)
2021			goto err;
2022	}
2023
2024	/* we must acquire iolock as we modify already present
2025	 * skbs in the sk_receive_queue and mess with skb->len
2026	 */
2027	err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2028	if (err) {
2029		err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2030		goto err;
2031	}
2032
2033	if (sk->sk_shutdown & SEND_SHUTDOWN) {
2034		err = -EPIPE;
2035		send_sigpipe = true;
2036		goto err_unlock;
2037	}
2038
2039	unix_state_lock(other);
2040
2041	if (sock_flag(other, SOCK_DEAD) ||
2042	    other->sk_shutdown & RCV_SHUTDOWN) {
2043		err = -EPIPE;
2044		send_sigpipe = true;
2045		goto err_state_unlock;
2046	}
2047
2048	if (init_scm) {
2049		err = maybe_init_creds(&scm, socket, other);
2050		if (err)
2051			goto err_state_unlock;
2052		init_scm = false;
2053	}
2054
2055	skb = skb_peek_tail(&other->sk_receive_queue);
2056	if (tail && tail == skb) {
2057		skb = newskb;
2058	} else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2059		if (newskb) {
2060			skb = newskb;
2061		} else {
2062			tail = skb;
2063			goto alloc_skb;
2064		}
2065	} else if (newskb) {
2066		/* this is fast path, we don't necessarily need to
2067		 * call to kfree_skb even though with newskb == NULL
2068		 * this - does no harm
2069		 */
2070		consume_skb(newskb);
2071		newskb = NULL;
2072	}
2073
2074	if (skb_append_pagefrags(skb, page, offset, size)) {
2075		tail = skb;
2076		goto alloc_skb;
2077	}
2078
2079	skb->len += size;
2080	skb->data_len += size;
2081	skb->truesize += size;
2082	refcount_add(size, &sk->sk_wmem_alloc);
2083
2084	if (newskb) {
2085		err = unix_scm_to_skb(&scm, skb, false);
2086		if (err)
2087			goto err_state_unlock;
2088		spin_lock(&other->sk_receive_queue.lock);
2089		__skb_queue_tail(&other->sk_receive_queue, newskb);
2090		spin_unlock(&other->sk_receive_queue.lock);
2091	}
2092
2093	unix_state_unlock(other);
2094	mutex_unlock(&unix_sk(other)->iolock);
2095
2096	other->sk_data_ready(other);
2097	scm_destroy(&scm);
2098	return size;
2099
2100err_state_unlock:
2101	unix_state_unlock(other);
2102err_unlock:
2103	mutex_unlock(&unix_sk(other)->iolock);
2104err:
2105	kfree_skb(newskb);
2106	if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2107		send_sig(SIGPIPE, current, 0);
2108	if (!init_scm)
2109		scm_destroy(&scm);
2110	return err;
2111}
2112
2113static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2114				  size_t len)
2115{
2116	int err;
2117	struct sock *sk = sock->sk;
2118
2119	err = sock_error(sk);
2120	if (err)
2121		return err;
2122
2123	if (sk->sk_state != TCP_ESTABLISHED)
2124		return -ENOTCONN;
2125
2126	if (msg->msg_namelen)
2127		msg->msg_namelen = 0;
2128
2129	return unix_dgram_sendmsg(sock, msg, len);
2130}
2131
2132static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2133				  size_t size, int flags)
 
2134{
2135	struct sock *sk = sock->sk;
2136
2137	if (sk->sk_state != TCP_ESTABLISHED)
2138		return -ENOTCONN;
2139
2140	return unix_dgram_recvmsg(sock, msg, size, flags);
2141}
2142
2143static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2144{
2145	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2146
2147	if (addr) {
2148		msg->msg_namelen = addr->len;
2149		memcpy(msg->msg_name, addr->name, addr->len);
 
2150	}
2151}
2152
2153static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2154			      size_t size, int flags)
 
2155{
2156	struct scm_cookie scm;
 
2157	struct sock *sk = sock->sk;
2158	struct unix_sock *u = unix_sk(sk);
2159	struct sk_buff *skb, *last;
2160	long timeo;
2161	int skip;
2162	int err;
2163
2164	err = -EOPNOTSUPP;
2165	if (flags&MSG_OOB)
2166		goto out;
2167
2168	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2169
2170	do {
2171		mutex_lock(&u->iolock);
2172
2173		skip = sk_peek_offset(sk, flags);
2174		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2175					      &skip, &err, &last);
2176		if (skb) {
2177			if (!(flags & MSG_PEEK))
2178				scm_stat_del(sk, skb);
2179			break;
2180		}
2181
2182		mutex_unlock(&u->iolock);
2183
2184		if (err != -EAGAIN)
2185			break;
2186	} while (timeo &&
2187		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2188					      &err, &timeo, last));
2189
2190	if (!skb) { /* implies iolock unlocked */
 
2191		unix_state_lock(sk);
2192		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2193		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2194		    (sk->sk_shutdown & RCV_SHUTDOWN))
2195			err = 0;
2196		unix_state_unlock(sk);
2197		goto out;
2198	}
2199
2200	if (wq_has_sleeper(&u->peer_wait))
2201		wake_up_interruptible_sync_poll(&u->peer_wait,
2202						EPOLLOUT | EPOLLWRNORM |
2203						EPOLLWRBAND);
2204
2205	if (msg->msg_name)
2206		unix_copy_addr(msg, skb->sk);
2207
2208	if (size > skb->len - skip)
2209		size = skb->len - skip;
2210	else if (size < skb->len - skip)
2211		msg->msg_flags |= MSG_TRUNC;
2212
2213	err = skb_copy_datagram_msg(skb, skip, msg, size);
2214	if (err)
2215		goto out_free;
2216
2217	if (sock_flag(sk, SOCK_RCVTSTAMP))
2218		__sock_recv_timestamp(msg, sk, skb);
2219
2220	memset(&scm, 0, sizeof(scm));
2221
2222	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2223	unix_set_secdata(&scm, skb);
 
 
2224
2225	if (!(flags & MSG_PEEK)) {
2226		if (UNIXCB(skb).fp)
2227			unix_detach_fds(&scm, skb);
2228
2229		sk_peek_offset_bwd(sk, skb->len);
2230	} else {
2231		/* It is questionable: on PEEK we could:
2232		   - do not return fds - good, but too simple 8)
2233		   - return fds, and do not return them on read (old strategy,
2234		     apparently wrong)
2235		   - clone fds (I chose it for now, it is the most universal
2236		     solution)
2237
2238		   POSIX 1003.1g does not actually define this clearly
2239		   at all. POSIX 1003.1g doesn't define a lot of things
2240		   clearly however!
2241
2242		*/
2243
2244		sk_peek_offset_fwd(sk, size);
2245
2246		if (UNIXCB(skb).fp)
2247			unix_peek_fds(&scm, skb);
2248	}
2249	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2250
2251	scm_recv(sock, msg, &scm, flags);
2252
2253out_free:
2254	skb_free_datagram(sk, skb);
2255	mutex_unlock(&u->iolock);
 
2256out:
2257	return err;
2258}
2259
2260/*
2261 *	Sleep until more data has arrived. But check for races..
2262 */
2263static long unix_stream_data_wait(struct sock *sk, long timeo,
2264				  struct sk_buff *last, unsigned int last_len,
2265				  bool freezable)
2266{
2267	struct sk_buff *tail;
2268	DEFINE_WAIT(wait);
2269
2270	unix_state_lock(sk);
2271
2272	for (;;) {
2273		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2274
2275		tail = skb_peek_tail(&sk->sk_receive_queue);
2276		if (tail != last ||
2277		    (tail && tail->len != last_len) ||
2278		    sk->sk_err ||
2279		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2280		    signal_pending(current) ||
2281		    !timeo)
2282			break;
2283
2284		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2285		unix_state_unlock(sk);
2286		if (freezable)
2287			timeo = freezable_schedule_timeout(timeo);
2288		else
2289			timeo = schedule_timeout(timeo);
2290		unix_state_lock(sk);
2291
2292		if (sock_flag(sk, SOCK_DEAD))
2293			break;
2294
2295		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2296	}
2297
2298	finish_wait(sk_sleep(sk), &wait);
2299	unix_state_unlock(sk);
2300	return timeo;
2301}
2302
2303static unsigned int unix_skb_len(const struct sk_buff *skb)
2304{
2305	return skb->len - UNIXCB(skb).consumed;
2306}
2307
2308struct unix_stream_read_state {
2309	int (*recv_actor)(struct sk_buff *, int, int,
2310			  struct unix_stream_read_state *);
2311	struct socket *socket;
2312	struct msghdr *msg;
2313	struct pipe_inode_info *pipe;
2314	size_t size;
2315	int flags;
2316	unsigned int splice_flags;
2317};
2318
2319static int unix_stream_read_generic(struct unix_stream_read_state *state,
2320				    bool freezable)
 
2321{
2322	struct scm_cookie scm;
2323	struct socket *sock = state->socket;
2324	struct sock *sk = sock->sk;
2325	struct unix_sock *u = unix_sk(sk);
 
2326	int copied = 0;
2327	int flags = state->flags;
2328	int noblock = flags & MSG_DONTWAIT;
2329	bool check_creds = false;
2330	int target;
2331	int err = 0;
2332	long timeo;
2333	int skip;
2334	size_t size = state->size;
2335	unsigned int last_len;
2336
2337	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2338		err = -EINVAL;
2339		goto out;
2340	}
2341
2342	if (unlikely(flags & MSG_OOB)) {
2343		err = -EOPNOTSUPP;
2344		goto out;
2345	}
2346
2347	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2348	timeo = sock_rcvtimeo(sk, noblock);
2349
2350	memset(&scm, 0, sizeof(scm));
2351
2352	/* Lock the socket to prevent queue disordering
2353	 * while sleeps in memcpy_tomsg
2354	 */
2355	mutex_lock(&u->iolock);
2356
2357	skip = max(sk_peek_offset(sk, flags), 0);
 
 
 
 
 
 
 
 
 
2358
2359	do {
2360		int chunk;
2361		bool drop_skb;
2362		struct sk_buff *skb, *last;
2363
2364redo:
2365		unix_state_lock(sk);
2366		if (sock_flag(sk, SOCK_DEAD)) {
2367			err = -ECONNRESET;
2368			goto unlock;
2369		}
2370		last = skb = skb_peek(&sk->sk_receive_queue);
2371		last_len = last ? last->len : 0;
2372again:
2373		if (skb == NULL) {
 
2374			if (copied >= target)
2375				goto unlock;
2376
2377			/*
2378			 *	POSIX 1003.1g mandates this order.
2379			 */
2380
2381			err = sock_error(sk);
2382			if (err)
2383				goto unlock;
2384			if (sk->sk_shutdown & RCV_SHUTDOWN)
2385				goto unlock;
2386
2387			unix_state_unlock(sk);
2388			if (!timeo) {
2389				err = -EAGAIN;
2390				break;
2391			}
2392
2393			mutex_unlock(&u->iolock);
2394
2395			timeo = unix_stream_data_wait(sk, timeo, last,
2396						      last_len, freezable);
2397
2398			if (signal_pending(current)) {
2399				err = sock_intr_errno(timeo);
2400				scm_destroy(&scm);
2401				goto out;
2402			}
2403
2404			mutex_lock(&u->iolock);
2405			goto redo;
2406unlock:
2407			unix_state_unlock(sk);
2408			break;
2409		}
2410
2411		while (skip >= unix_skb_len(skb)) {
2412			skip -= unix_skb_len(skb);
2413			last = skb;
2414			last_len = skb->len;
2415			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2416			if (!skb)
2417				goto again;
2418		}
2419
2420		unix_state_unlock(sk);
2421
2422		if (check_creds) {
2423			/* Never glue messages from different writers */
2424			if (!unix_skb_scm_eq(skb, &scm))
 
 
2425				break;
2426		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
 
2427			/* Copy credentials */
2428			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2429			unix_set_secdata(&scm, skb);
2430			check_creds = true;
2431		}
2432
2433		/* Copy address just once */
2434		if (state->msg && state->msg->msg_name) {
2435			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2436					 state->msg->msg_name);
2437			unix_copy_addr(state->msg, skb->sk);
2438			sunaddr = NULL;
2439		}
2440
2441		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2442		skb_get(skb);
2443		chunk = state->recv_actor(skb, skip, chunk, state);
2444		drop_skb = !unix_skb_len(skb);
2445		/* skb is only safe to use if !drop_skb */
2446		consume_skb(skb);
2447		if (chunk < 0) {
2448			if (copied == 0)
2449				copied = -EFAULT;
2450			break;
2451		}
2452		copied += chunk;
2453		size -= chunk;
2454
2455		if (drop_skb) {
2456			/* the skb was touched by a concurrent reader;
2457			 * we should not expect anything from this skb
2458			 * anymore and assume it invalid - we can be
2459			 * sure it was dropped from the socket queue
2460			 *
2461			 * let's report a short read
2462			 */
2463			err = 0;
2464			break;
2465		}
2466
2467		/* Mark read part of skb as used */
2468		if (!(flags & MSG_PEEK)) {
2469			UNIXCB(skb).consumed += chunk;
2470
2471			sk_peek_offset_bwd(sk, chunk);
2472
2473			if (UNIXCB(skb).fp) {
2474				scm_stat_del(sk, skb);
2475				unix_detach_fds(&scm, skb);
2476			}
2477
2478			if (unix_skb_len(skb))
 
 
2479				break;
 
2480
2481			skb_unlink(skb, &sk->sk_receive_queue);
2482			consume_skb(skb);
2483
2484			if (scm.fp)
2485				break;
2486		} else {
2487			/* It is questionable, see note in unix_dgram_recvmsg.
2488			 */
2489			if (UNIXCB(skb).fp)
2490				unix_peek_fds(&scm, skb);
2491
2492			sk_peek_offset_fwd(sk, chunk);
2493
2494			if (UNIXCB(skb).fp)
2495				break;
2496
2497			skip = 0;
2498			last = skb;
2499			last_len = skb->len;
2500			unix_state_lock(sk);
2501			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2502			if (skb)
2503				goto again;
2504			unix_state_unlock(sk);
2505			break;
2506		}
2507	} while (size);
2508
2509	mutex_unlock(&u->iolock);
2510	if (state->msg)
2511		scm_recv(sock, state->msg, &scm, flags);
2512	else
2513		scm_destroy(&scm);
2514out:
2515	return copied ? : err;
2516}
2517
2518static int unix_stream_read_actor(struct sk_buff *skb,
2519				  int skip, int chunk,
2520				  struct unix_stream_read_state *state)
2521{
2522	int ret;
2523
2524	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2525				    state->msg, chunk);
2526	return ret ?: chunk;
2527}
2528
2529static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2530			       size_t size, int flags)
2531{
2532	struct unix_stream_read_state state = {
2533		.recv_actor = unix_stream_read_actor,
2534		.socket = sock,
2535		.msg = msg,
2536		.size = size,
2537		.flags = flags
2538	};
2539
2540	return unix_stream_read_generic(&state, true);
2541}
2542
2543static int unix_stream_splice_actor(struct sk_buff *skb,
2544				    int skip, int chunk,
2545				    struct unix_stream_read_state *state)
2546{
2547	return skb_splice_bits(skb, state->socket->sk,
2548			       UNIXCB(skb).consumed + skip,
2549			       state->pipe, chunk, state->splice_flags);
2550}
2551
2552static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2553				       struct pipe_inode_info *pipe,
2554				       size_t size, unsigned int flags)
2555{
2556	struct unix_stream_read_state state = {
2557		.recv_actor = unix_stream_splice_actor,
2558		.socket = sock,
2559		.pipe = pipe,
2560		.size = size,
2561		.splice_flags = flags,
2562	};
2563
2564	if (unlikely(*ppos))
2565		return -ESPIPE;
2566
2567	if (sock->file->f_flags & O_NONBLOCK ||
2568	    flags & SPLICE_F_NONBLOCK)
2569		state.flags = MSG_DONTWAIT;
2570
2571	return unix_stream_read_generic(&state, false);
2572}
2573
2574static int unix_shutdown(struct socket *sock, int mode)
2575{
2576	struct sock *sk = sock->sk;
2577	struct sock *other;
2578
2579	if (mode < SHUT_RD || mode > SHUT_RDWR)
2580		return -EINVAL;
2581	/* This maps:
2582	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2583	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2584	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2585	 */
2586	++mode;
2587
2588	unix_state_lock(sk);
2589	sk->sk_shutdown |= mode;
2590	other = unix_peer(sk);
2591	if (other)
2592		sock_hold(other);
2593	unix_state_unlock(sk);
2594	sk->sk_state_change(sk);
2595
2596	if (other &&
2597		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2598
2599		int peer_mode = 0;
2600
2601		if (mode&RCV_SHUTDOWN)
2602			peer_mode |= SEND_SHUTDOWN;
2603		if (mode&SEND_SHUTDOWN)
2604			peer_mode |= RCV_SHUTDOWN;
2605		unix_state_lock(other);
2606		other->sk_shutdown |= peer_mode;
2607		unix_state_unlock(other);
2608		other->sk_state_change(other);
2609		if (peer_mode == SHUTDOWN_MASK)
2610			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2611		else if (peer_mode & RCV_SHUTDOWN)
2612			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2613	}
2614	if (other)
2615		sock_put(other);
2616
2617	return 0;
2618}
2619
2620long unix_inq_len(struct sock *sk)
2621{
2622	struct sk_buff *skb;
2623	long amount = 0;
2624
2625	if (sk->sk_state == TCP_LISTEN)
2626		return -EINVAL;
2627
2628	spin_lock(&sk->sk_receive_queue.lock);
2629	if (sk->sk_type == SOCK_STREAM ||
2630	    sk->sk_type == SOCK_SEQPACKET) {
2631		skb_queue_walk(&sk->sk_receive_queue, skb)
2632			amount += unix_skb_len(skb);
2633	} else {
2634		skb = skb_peek(&sk->sk_receive_queue);
2635		if (skb)
2636			amount = skb->len;
2637	}
2638	spin_unlock(&sk->sk_receive_queue.lock);
2639
2640	return amount;
2641}
2642EXPORT_SYMBOL_GPL(unix_inq_len);
2643
2644long unix_outq_len(struct sock *sk)
2645{
2646	return sk_wmem_alloc_get(sk);
2647}
2648EXPORT_SYMBOL_GPL(unix_outq_len);
2649
2650static int unix_open_file(struct sock *sk)
2651{
2652	struct path path;
2653	struct file *f;
2654	int fd;
2655
2656	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2657		return -EPERM;
2658
2659	if (!smp_load_acquire(&unix_sk(sk)->addr))
2660		return -ENOENT;
2661
2662	path = unix_sk(sk)->path;
2663	if (!path.dentry)
2664		return -ENOENT;
2665
2666	path_get(&path);
2667
2668	fd = get_unused_fd_flags(O_CLOEXEC);
2669	if (fd < 0)
2670		goto out;
2671
2672	f = dentry_open(&path, O_PATH, current_cred());
2673	if (IS_ERR(f)) {
2674		put_unused_fd(fd);
2675		fd = PTR_ERR(f);
2676		goto out;
2677	}
2678
2679	fd_install(fd, f);
2680out:
2681	path_put(&path);
2682
2683	return fd;
2684}
2685
2686static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2687{
2688	struct sock *sk = sock->sk;
2689	long amount = 0;
2690	int err;
2691
2692	switch (cmd) {
2693	case SIOCOUTQ:
2694		amount = unix_outq_len(sk);
2695		err = put_user(amount, (int __user *)arg);
2696		break;
2697	case SIOCINQ:
2698		amount = unix_inq_len(sk);
2699		if (amount < 0)
2700			err = amount;
2701		else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2702			err = put_user(amount, (int __user *)arg);
2703		break;
2704	case SIOCUNIXFILE:
2705		err = unix_open_file(sk);
2706		break;
2707	default:
2708		err = -ENOIOCTLCMD;
2709		break;
2710	}
2711	return err;
2712}
2713
2714#ifdef CONFIG_COMPAT
2715static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2716{
2717	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
2718}
2719#endif
2720
2721static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2722{
2723	struct sock *sk = sock->sk;
2724	__poll_t mask;
2725
2726	sock_poll_wait(file, sock, wait);
2727	mask = 0;
2728
2729	/* exceptional events? */
2730	if (sk->sk_err)
2731		mask |= EPOLLERR;
2732	if (sk->sk_shutdown == SHUTDOWN_MASK)
2733		mask |= EPOLLHUP;
2734	if (sk->sk_shutdown & RCV_SHUTDOWN)
2735		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2736
2737	/* readable? */
2738	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2739		mask |= EPOLLIN | EPOLLRDNORM;
2740
2741	/* Connection-based need to check for termination and startup */
2742	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2743	    sk->sk_state == TCP_CLOSE)
2744		mask |= EPOLLHUP;
2745
2746	/*
2747	 * we set writable also when the other side has shut down the
2748	 * connection. This prevents stuck sockets.
2749	 */
2750	if (unix_writable(sk))
2751		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2752
2753	return mask;
2754}
2755
2756static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2757				    poll_table *wait)
2758{
2759	struct sock *sk = sock->sk, *other;
2760	unsigned int writable;
2761	__poll_t mask;
2762
2763	sock_poll_wait(file, sock, wait);
2764	mask = 0;
2765
2766	/* exceptional events? */
2767	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
2768		mask |= EPOLLERR |
2769			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
2770
2771	if (sk->sk_shutdown & RCV_SHUTDOWN)
2772		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2773	if (sk->sk_shutdown == SHUTDOWN_MASK)
2774		mask |= EPOLLHUP;
2775
2776	/* readable? */
2777	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2778		mask |= EPOLLIN | EPOLLRDNORM;
2779
2780	/* Connection-based need to check for termination and startup */
2781	if (sk->sk_type == SOCK_SEQPACKET) {
2782		if (sk->sk_state == TCP_CLOSE)
2783			mask |= EPOLLHUP;
2784		/* connection hasn't started yet? */
2785		if (sk->sk_state == TCP_SYN_SENT)
2786			return mask;
2787	}
2788
2789	/* No write status requested, avoid expensive OUT tests. */
2790	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
2791		return mask;
2792
2793	writable = unix_writable(sk);
2794	if (writable) {
2795		unix_state_lock(sk);
2796
2797		other = unix_peer(sk);
2798		if (other && unix_peer(other) != sk &&
2799		    unix_recvq_full_lockless(other) &&
2800		    unix_dgram_peer_wake_me(sk, other))
2801			writable = 0;
2802
2803		unix_state_unlock(sk);
2804	}
2805
2806	if (writable)
2807		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2808	else
2809		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2810
2811	return mask;
2812}
2813
2814#ifdef CONFIG_PROC_FS
2815
2816#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2817
2818#define get_bucket(x) ((x) >> BUCKET_SPACE)
2819#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2820#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2821
2822static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2823{
2824	unsigned long offset = get_offset(*pos);
2825	unsigned long bucket = get_bucket(*pos);
2826	struct sock *sk;
2827	unsigned long count = 0;
2828
2829	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2830		if (sock_net(sk) != seq_file_net(seq))
2831			continue;
2832		if (++count == offset)
2833			break;
2834	}
2835
2836	return sk;
2837}
2838
2839static struct sock *unix_next_socket(struct seq_file *seq,
2840				     struct sock *sk,
2841				     loff_t *pos)
2842{
2843	unsigned long bucket;
2844
2845	while (sk > (struct sock *)SEQ_START_TOKEN) {
2846		sk = sk_next(sk);
2847		if (!sk)
2848			goto next_bucket;
2849		if (sock_net(sk) == seq_file_net(seq))
2850			return sk;
2851	}
 
 
2852
2853	do {
2854		sk = unix_from_bucket(seq, pos);
2855		if (sk)
2856			return sk;
2857
2858next_bucket:
2859		bucket = get_bucket(*pos) + 1;
2860		*pos = set_bucket_offset(bucket, 1);
2861	} while (bucket < ARRAY_SIZE(unix_socket_table));
 
2862
 
 
 
 
 
 
 
2863	return NULL;
2864}
2865
2866static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2867	__acquires(unix_table_lock)
2868{
2869	spin_lock(&unix_table_lock);
2870
2871	if (!*pos)
2872		return SEQ_START_TOKEN;
2873
2874	if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2875		return NULL;
2876
2877	return unix_next_socket(seq, NULL, pos);
2878}
2879
2880static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2881{
 
 
2882	++*pos;
2883	return unix_next_socket(seq, v, pos);
 
 
 
 
 
 
 
2884}
2885
2886static void unix_seq_stop(struct seq_file *seq, void *v)
2887	__releases(unix_table_lock)
2888{
2889	spin_unlock(&unix_table_lock);
2890}
2891
2892static int unix_seq_show(struct seq_file *seq, void *v)
2893{
2894
2895	if (v == SEQ_START_TOKEN)
2896		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2897			 "Inode Path\n");
2898	else {
2899		struct sock *s = v;
2900		struct unix_sock *u = unix_sk(s);
2901		unix_state_lock(s);
2902
2903		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2904			s,
2905			refcount_read(&s->sk_refcnt),
2906			0,
2907			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2908			s->sk_type,
2909			s->sk_socket ?
2910			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2911			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2912			sock_i_ino(s));
2913
2914		if (u->addr) {	// under unix_table_lock here
2915			int i, len;
2916			seq_putc(seq, ' ');
2917
2918			i = 0;
2919			len = u->addr->len - sizeof(short);
2920			if (!UNIX_ABSTRACT(s))
2921				len--;
2922			else {
2923				seq_putc(seq, '@');
2924				i++;
2925			}
2926			for ( ; i < len; i++)
2927				seq_putc(seq, u->addr->name->sun_path[i] ?:
2928					 '@');
2929		}
2930		unix_state_unlock(s);
2931		seq_putc(seq, '\n');
2932	}
2933
2934	return 0;
2935}
2936
2937static const struct seq_operations unix_seq_ops = {
2938	.start  = unix_seq_start,
2939	.next   = unix_seq_next,
2940	.stop   = unix_seq_stop,
2941	.show   = unix_seq_show,
2942};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2943#endif
2944
2945static const struct net_proto_family unix_family_ops = {
2946	.family = PF_UNIX,
2947	.create = unix_create,
2948	.owner	= THIS_MODULE,
2949};
2950
2951
2952static int __net_init unix_net_init(struct net *net)
2953{
2954	int error = -ENOMEM;
2955
2956	net->unx.sysctl_max_dgram_qlen = 10;
2957	if (unix_sysctl_register(net))
2958		goto out;
2959
2960#ifdef CONFIG_PROC_FS
2961	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
2962			sizeof(struct seq_net_private))) {
2963		unix_sysctl_unregister(net);
2964		goto out;
2965	}
2966#endif
2967	error = 0;
2968out:
2969	return error;
2970}
2971
2972static void __net_exit unix_net_exit(struct net *net)
2973{
2974	unix_sysctl_unregister(net);
2975	remove_proc_entry("unix", net->proc_net);
2976}
2977
2978static struct pernet_operations unix_net_ops = {
2979	.init = unix_net_init,
2980	.exit = unix_net_exit,
2981};
2982
2983static int __init af_unix_init(void)
2984{
2985	int rc = -1;
 
2986
2987	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
2988
2989	rc = proto_register(&unix_proto, 1);
2990	if (rc != 0) {
2991		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
 
2992		goto out;
2993	}
2994
2995	sock_register(&unix_family_ops);
2996	register_pernet_subsys(&unix_net_ops);
2997out:
2998	return rc;
2999}
3000
3001static void __exit af_unix_exit(void)
3002{
3003	sock_unregister(PF_UNIX);
3004	proto_unregister(&unix_proto);
3005	unregister_pernet_subsys(&unix_net_ops);
3006}
3007
3008/* Earlier than device_initcall() so that other drivers invoking
3009   request_module() don't end up in a loop when modprobe tries
3010   to use a UNIX socket. But later than subsys_initcall() because
3011   we depend on stuff initialised there */
3012fs_initcall(af_unix_init);
3013module_exit(af_unix_exit);
3014
3015MODULE_LICENSE("GPL");
3016MODULE_ALIAS_NETPROTO(PF_UNIX);
v3.1
 
   1/*
   2 * NET4:	Implementation of BSD Unix domain sockets.
   3 *
   4 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   5 *
   6 *		This program is free software; you can redistribute it and/or
   7 *		modify it under the terms of the GNU General Public License
   8 *		as published by the Free Software Foundation; either version
   9 *		2 of the License, or (at your option) any later version.
  10 *
  11 * Fixes:
  12 *		Linus Torvalds	:	Assorted bug cures.
  13 *		Niibe Yutaka	:	async I/O support.
  14 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  15 *		Alan Cox	:	Limit size of allocated blocks.
  16 *		Alan Cox	:	Fixed the stupid socketpair bug.
  17 *		Alan Cox	:	BSD compatibility fine tuning.
  18 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  19 *		Alan Cox	:	Sorted out a proper draft version of
  20 *					file descriptor passing hacked up from
  21 *					Mike Shaver's work.
  22 *		Marty Leisner	:	Fixes to fd passing
  23 *		Nick Nevin	:	recvmsg bugfix.
  24 *		Alan Cox	:	Started proper garbage collector
  25 *		Heiko EiBfeldt	:	Missing verify_area check
  26 *		Alan Cox	:	Started POSIXisms
  27 *		Andreas Schwab	:	Replace inode by dentry for proper
  28 *					reference counting
  29 *		Kirk Petersen	:	Made this a module
  30 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  31 *					Lots of bug fixes.
  32 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  33 *					by above two patches.
  34 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  35 *					if the max backlog of the listen socket
  36 *					is been reached. This won't break
  37 *					old apps and it will avoid huge amount
  38 *					of socks hashed (this for unix_gc()
  39 *					performances reasons).
  40 *					Security fix that limits the max
  41 *					number of socks to 2*max_files and
  42 *					the number of skb queueable in the
  43 *					dgram receiver.
  44 *		Artur Skawina   :	Hash function optimizations
  45 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  46 *	      Malcolm Beattie   :	Set peercred for socketpair
  47 *	     Michal Ostrowski   :       Module initialization cleanup.
  48 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  49 *	     				the core infrastructure is doing that
  50 *	     				for all net proto families now (2.5.69+)
  51 *
  52 *
  53 * Known differences from reference BSD that was tested:
  54 *
  55 *	[TO FIX]
  56 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  57 *		other the moment one end closes.
  58 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  59 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  60 *	[NOT TO FIX]
  61 *	accept() returns a path name even if the connecting socket has closed
  62 *		in the meantime (BSD loses the path and gives up).
  63 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  64 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  65 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  66 *	BSD af_unix apparently has connect forgetting to block properly.
  67 *		(need to check this with the POSIX spec in detail)
  68 *
  69 * Differences from 2.0.0-11-... (ANK)
  70 *	Bug fixes and improvements.
  71 *		- client shutdown killed server socket.
  72 *		- removed all useless cli/sti pairs.
  73 *
  74 *	Semantic changes/extensions.
  75 *		- generic control message passing.
  76 *		- SCM_CREDENTIALS control message.
  77 *		- "Abstract" (not FS based) socket bindings.
  78 *		  Abstract names are sequences of bytes (not zero terminated)
  79 *		  started by 0, so that this name space does not intersect
  80 *		  with BSD names.
  81 */
  82
 
 
  83#include <linux/module.h>
  84#include <linux/kernel.h>
  85#include <linux/signal.h>
  86#include <linux/sched.h>
  87#include <linux/errno.h>
  88#include <linux/string.h>
  89#include <linux/stat.h>
  90#include <linux/dcache.h>
  91#include <linux/namei.h>
  92#include <linux/socket.h>
  93#include <linux/un.h>
  94#include <linux/fcntl.h>
  95#include <linux/termios.h>
  96#include <linux/sockios.h>
  97#include <linux/net.h>
  98#include <linux/in.h>
  99#include <linux/fs.h>
 100#include <linux/slab.h>
 101#include <asm/uaccess.h>
 102#include <linux/skbuff.h>
 103#include <linux/netdevice.h>
 104#include <net/net_namespace.h>
 105#include <net/sock.h>
 106#include <net/tcp_states.h>
 107#include <net/af_unix.h>
 108#include <linux/proc_fs.h>
 109#include <linux/seq_file.h>
 110#include <net/scm.h>
 111#include <linux/init.h>
 112#include <linux/poll.h>
 113#include <linux/rtnetlink.h>
 114#include <linux/mount.h>
 115#include <net/checksum.h>
 116#include <linux/security.h>
 
 
 117
 118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
 119static DEFINE_SPINLOCK(unix_table_lock);
 
 
 
 
 120static atomic_long_t unix_nr_socks;
 121
 122#define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])
 123
 124#define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
 
 
 
 
 
 
 
 
 
 
 125
 126#ifdef CONFIG_SECURITY_NETWORK
 127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 128{
 129	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
 130}
 131
 132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 133{
 134	scm->secid = *UNIXSID(skb);
 
 
 
 
 
 135}
 136#else
 137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 138{ }
 139
 140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 141{ }
 
 
 
 
 
 142#endif /* CONFIG_SECURITY_NETWORK */
 143
 144/*
 145 *  SMP locking strategy:
 146 *    hash table is protected with spinlock unix_table_lock
 147 *    each socket state is protected by separate spin lock.
 148 */
 149
 150static inline unsigned unix_hash_fold(__wsum n)
 151{
 152	unsigned hash = (__force unsigned)n;
 153	hash ^= hash>>16;
 154	hash ^= hash>>8;
 155	return hash&(UNIX_HASH_SIZE-1);
 156}
 157
 158#define unix_peer(sk) (unix_sk(sk)->peer)
 159
 160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 161{
 162	return unix_peer(osk) == sk;
 163}
 164
 165static inline int unix_may_send(struct sock *sk, struct sock *osk)
 166{
 167	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 168}
 169
 170static inline int unix_recvq_full(struct sock const *sk)
 171{
 172	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 173}
 174
 175static struct sock *unix_peer_get(struct sock *s)
 
 
 
 
 
 
 176{
 177	struct sock *peer;
 178
 179	unix_state_lock(s);
 180	peer = unix_peer(s);
 181	if (peer)
 182		sock_hold(peer);
 183	unix_state_unlock(s);
 184	return peer;
 185}
 
 186
 187static inline void unix_release_addr(struct unix_address *addr)
 188{
 189	if (atomic_dec_and_test(&addr->refcnt))
 190		kfree(addr);
 191}
 192
 193/*
 194 *	Check unix socket name:
 195 *		- should be not zero length.
 196 *	        - if started by not zero, should be NULL terminated (FS object)
 197 *		- if started by zero, it is abstract name.
 198 */
 199
 200static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
 201{
 
 
 202	if (len <= sizeof(short) || len > sizeof(*sunaddr))
 203		return -EINVAL;
 204	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
 205		return -EINVAL;
 206	if (sunaddr->sun_path[0]) {
 207		/*
 208		 * This may look like an off by one error but it is a bit more
 209		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
 210		 * sun_path[108] doesn't as such exist.  However in kernel space
 211		 * we are guaranteed that it is a valid memory location in our
 212		 * kernel address buffer.
 213		 */
 214		((char *)sunaddr)[len] = 0;
 215		len = strlen(sunaddr->sun_path)+1+sizeof(short);
 216		return len;
 217	}
 218
 219	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
 220	return len;
 221}
 222
 223static void __unix_remove_socket(struct sock *sk)
 224{
 225	sk_del_node_init(sk);
 226}
 227
 228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
 229{
 230	WARN_ON(!sk_unhashed(sk));
 231	sk_add_node(sk, list);
 232}
 233
 
 
 
 
 
 
 
 
 234static inline void unix_remove_socket(struct sock *sk)
 235{
 236	spin_lock(&unix_table_lock);
 237	__unix_remove_socket(sk);
 238	spin_unlock(&unix_table_lock);
 239}
 240
 241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
 242{
 243	spin_lock(&unix_table_lock);
 244	__unix_insert_socket(list, sk);
 245	spin_unlock(&unix_table_lock);
 246}
 247
 248static struct sock *__unix_find_socket_byname(struct net *net,
 249					      struct sockaddr_un *sunname,
 250					      int len, int type, unsigned hash)
 251{
 252	struct sock *s;
 253	struct hlist_node *node;
 254
 255	sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
 256		struct unix_sock *u = unix_sk(s);
 257
 258		if (!net_eq(sock_net(s), net))
 259			continue;
 260
 261		if (u->addr->len == len &&
 262		    !memcmp(u->addr->name, sunname, len))
 263			goto found;
 264	}
 265	s = NULL;
 266found:
 267	return s;
 268}
 269
 270static inline struct sock *unix_find_socket_byname(struct net *net,
 271						   struct sockaddr_un *sunname,
 272						   int len, int type,
 273						   unsigned hash)
 274{
 275	struct sock *s;
 276
 277	spin_lock(&unix_table_lock);
 278	s = __unix_find_socket_byname(net, sunname, len, type, hash);
 279	if (s)
 280		sock_hold(s);
 281	spin_unlock(&unix_table_lock);
 282	return s;
 283}
 284
 285static struct sock *unix_find_socket_byinode(struct inode *i)
 286{
 287	struct sock *s;
 288	struct hlist_node *node;
 289
 290	spin_lock(&unix_table_lock);
 291	sk_for_each(s, node,
 292		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
 293		struct dentry *dentry = unix_sk(s)->dentry;
 294
 295		if (dentry && dentry->d_inode == i) {
 296			sock_hold(s);
 297			goto found;
 298		}
 299	}
 300	s = NULL;
 301found:
 302	spin_unlock(&unix_table_lock);
 303	return s;
 304}
 305
 306static inline int unix_writable(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307{
 308	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 
 309}
 310
 311static void unix_write_space(struct sock *sk)
 312{
 313	struct socket_wq *wq;
 314
 315	rcu_read_lock();
 316	if (unix_writable(sk)) {
 317		wq = rcu_dereference(sk->sk_wq);
 318		if (wq_has_sleeper(wq))
 319			wake_up_interruptible_sync_poll(&wq->wait,
 320				POLLOUT | POLLWRNORM | POLLWRBAND);
 321		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 322	}
 323	rcu_read_unlock();
 324}
 325
 326/* When dgram socket disconnects (or changes its peer), we clear its receive
 327 * queue of packets arrived from previous peer. First, it allows to do
 328 * flow control based only on wmem_alloc; second, sk connected to peer
 329 * may receive messages only from that peer. */
 330static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 331{
 332	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 333		skb_queue_purge(&sk->sk_receive_queue);
 334		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 335
 336		/* If one link of bidirectional dgram pipe is disconnected,
 337		 * we signal error. Messages are lost. Do not make this,
 338		 * when peer was not connected to us.
 339		 */
 340		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 341			other->sk_err = ECONNRESET;
 342			other->sk_error_report(other);
 343		}
 344	}
 345}
 346
 347static void unix_sock_destructor(struct sock *sk)
 348{
 349	struct unix_sock *u = unix_sk(sk);
 350
 351	skb_queue_purge(&sk->sk_receive_queue);
 352
 353	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
 354	WARN_ON(!sk_unhashed(sk));
 355	WARN_ON(sk->sk_socket);
 356	if (!sock_flag(sk, SOCK_DEAD)) {
 357		printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
 358		return;
 359	}
 360
 361	if (u->addr)
 362		unix_release_addr(u->addr);
 363
 364	atomic_long_dec(&unix_nr_socks);
 365	local_bh_disable();
 366	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 367	local_bh_enable();
 368#ifdef UNIX_REFCNT_DEBUG
 369	printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
 370		atomic_long_read(&unix_nr_socks));
 371#endif
 372}
 373
 374static int unix_release_sock(struct sock *sk, int embrion)
 375{
 376	struct unix_sock *u = unix_sk(sk);
 377	struct dentry *dentry;
 378	struct vfsmount *mnt;
 379	struct sock *skpair;
 380	struct sk_buff *skb;
 381	int state;
 382
 383	unix_remove_socket(sk);
 384
 385	/* Clear state */
 386	unix_state_lock(sk);
 387	sock_orphan(sk);
 388	sk->sk_shutdown = SHUTDOWN_MASK;
 389	dentry	     = u->dentry;
 390	u->dentry    = NULL;
 391	mnt	     = u->mnt;
 392	u->mnt	     = NULL;
 393	state = sk->sk_state;
 394	sk->sk_state = TCP_CLOSE;
 
 
 
 
 395	unix_state_unlock(sk);
 396
 397	wake_up_interruptible_all(&u->peer_wait);
 398
 399	skpair = unix_peer(sk);
 400
 401	if (skpair != NULL) {
 402		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 403			unix_state_lock(skpair);
 404			/* No more writes */
 405			skpair->sk_shutdown = SHUTDOWN_MASK;
 406			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
 407				skpair->sk_err = ECONNRESET;
 408			unix_state_unlock(skpair);
 409			skpair->sk_state_change(skpair);
 410			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 411		}
 
 
 412		sock_put(skpair); /* It may now die */
 413		unix_peer(sk) = NULL;
 414	}
 415
 416	/* Try to flush out this socket. Throw out buffers at least */
 417
 418	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 419		if (state == TCP_LISTEN)
 420			unix_release_sock(skb->sk, 1);
 421		/* passed fds are erased in the kfree_skb hook	      */
 
 422		kfree_skb(skb);
 423	}
 424
 425	if (dentry) {
 426		dput(dentry);
 427		mntput(mnt);
 428	}
 429
 430	sock_put(sk);
 431
 432	/* ---- Socket is dead now and most probably destroyed ---- */
 433
 434	/*
 435	 * Fixme: BSD difference: In BSD all sockets connected to use get
 436	 *	  ECONNRESET and we die on the spot. In Linux we behave
 437	 *	  like files and pipes do and wait for the last
 438	 *	  dereference.
 439	 *
 440	 * Can't we simply set sock->err?
 441	 *
 442	 *	  What the above comment does talk about? --ANK(980817)
 443	 */
 444
 445	if (unix_tot_inflight)
 446		unix_gc();		/* Garbage collect fds */
 447
 448	return 0;
 449}
 450
 451static void init_peercred(struct sock *sk)
 452{
 453	put_pid(sk->sk_peer_pid);
 454	if (sk->sk_peer_cred)
 455		put_cred(sk->sk_peer_cred);
 
 
 
 456	sk->sk_peer_pid  = get_pid(task_tgid(current));
 457	sk->sk_peer_cred = get_current_cred();
 
 
 
 
 458}
 459
 460static void copy_peercred(struct sock *sk, struct sock *peersk)
 461{
 462	put_pid(sk->sk_peer_pid);
 463	if (sk->sk_peer_cred)
 464		put_cred(sk->sk_peer_cred);
 
 
 
 
 
 
 
 
 
 465	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
 466	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 
 
 
 
 
 
 467}
 468
 469static int unix_listen(struct socket *sock, int backlog)
 470{
 471	int err;
 472	struct sock *sk = sock->sk;
 473	struct unix_sock *u = unix_sk(sk);
 474	struct pid *old_pid = NULL;
 475	const struct cred *old_cred = NULL;
 476
 477	err = -EOPNOTSUPP;
 478	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 479		goto out;	/* Only stream/seqpacket sockets accept */
 480	err = -EINVAL;
 481	if (!u->addr)
 482		goto out;	/* No listens on an unbound socket */
 483	unix_state_lock(sk);
 484	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 485		goto out_unlock;
 486	if (backlog > sk->sk_max_ack_backlog)
 487		wake_up_interruptible_all(&u->peer_wait);
 488	sk->sk_max_ack_backlog	= backlog;
 489	sk->sk_state		= TCP_LISTEN;
 490	/* set credentials so connect can copy them */
 491	init_peercred(sk);
 492	err = 0;
 493
 494out_unlock:
 495	unix_state_unlock(sk);
 496	put_pid(old_pid);
 497	if (old_cred)
 498		put_cred(old_cred);
 499out:
 500	return err;
 501}
 502
 503static int unix_release(struct socket *);
 504static int unix_bind(struct socket *, struct sockaddr *, int);
 505static int unix_stream_connect(struct socket *, struct sockaddr *,
 506			       int addr_len, int flags);
 507static int unix_socketpair(struct socket *, struct socket *);
 508static int unix_accept(struct socket *, struct socket *, int);
 509static int unix_getname(struct socket *, struct sockaddr *, int *, int);
 510static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
 511static unsigned int unix_dgram_poll(struct file *, struct socket *,
 512				    poll_table *);
 513static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 
 
 
 514static int unix_shutdown(struct socket *, int);
 515static int unix_stream_sendmsg(struct kiocb *, struct socket *,
 516			       struct msghdr *, size_t);
 517static int unix_stream_recvmsg(struct kiocb *, struct socket *,
 518			       struct msghdr *, size_t, int);
 519static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
 520			      struct msghdr *, size_t);
 521static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
 522			      struct msghdr *, size_t, int);
 
 523static int unix_dgram_connect(struct socket *, struct sockaddr *,
 524			      int, int);
 525static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
 526				  struct msghdr *, size_t);
 527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
 528				  struct msghdr *, size_t, int);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529
 530static const struct proto_ops unix_stream_ops = {
 531	.family =	PF_UNIX,
 532	.owner =	THIS_MODULE,
 533	.release =	unix_release,
 534	.bind =		unix_bind,
 535	.connect =	unix_stream_connect,
 536	.socketpair =	unix_socketpair,
 537	.accept =	unix_accept,
 538	.getname =	unix_getname,
 539	.poll =		unix_poll,
 540	.ioctl =	unix_ioctl,
 
 
 
 541	.listen =	unix_listen,
 542	.shutdown =	unix_shutdown,
 543	.setsockopt =	sock_no_setsockopt,
 544	.getsockopt =	sock_no_getsockopt,
 545	.sendmsg =	unix_stream_sendmsg,
 546	.recvmsg =	unix_stream_recvmsg,
 547	.mmap =		sock_no_mmap,
 548	.sendpage =	sock_no_sendpage,
 
 
 
 549};
 550
 551static const struct proto_ops unix_dgram_ops = {
 552	.family =	PF_UNIX,
 553	.owner =	THIS_MODULE,
 554	.release =	unix_release,
 555	.bind =		unix_bind,
 556	.connect =	unix_dgram_connect,
 557	.socketpair =	unix_socketpair,
 558	.accept =	sock_no_accept,
 559	.getname =	unix_getname,
 560	.poll =		unix_dgram_poll,
 561	.ioctl =	unix_ioctl,
 
 
 
 562	.listen =	sock_no_listen,
 563	.shutdown =	unix_shutdown,
 564	.setsockopt =	sock_no_setsockopt,
 565	.getsockopt =	sock_no_getsockopt,
 566	.sendmsg =	unix_dgram_sendmsg,
 567	.recvmsg =	unix_dgram_recvmsg,
 568	.mmap =		sock_no_mmap,
 569	.sendpage =	sock_no_sendpage,
 
 
 570};
 571
 572static const struct proto_ops unix_seqpacket_ops = {
 573	.family =	PF_UNIX,
 574	.owner =	THIS_MODULE,
 575	.release =	unix_release,
 576	.bind =		unix_bind,
 577	.connect =	unix_stream_connect,
 578	.socketpair =	unix_socketpair,
 579	.accept =	unix_accept,
 580	.getname =	unix_getname,
 581	.poll =		unix_dgram_poll,
 582	.ioctl =	unix_ioctl,
 
 
 
 583	.listen =	unix_listen,
 584	.shutdown =	unix_shutdown,
 585	.setsockopt =	sock_no_setsockopt,
 586	.getsockopt =	sock_no_getsockopt,
 587	.sendmsg =	unix_seqpacket_sendmsg,
 588	.recvmsg =	unix_seqpacket_recvmsg,
 589	.mmap =		sock_no_mmap,
 590	.sendpage =	sock_no_sendpage,
 
 
 591};
 592
 593static struct proto unix_proto = {
 594	.name			= "UNIX",
 595	.owner			= THIS_MODULE,
 596	.obj_size		= sizeof(struct unix_sock),
 597};
 598
 599/*
 600 * AF_UNIX sockets do not interact with hardware, hence they
 601 * dont trigger interrupts - so it's safe for them to have
 602 * bh-unsafe locking for their sk_receive_queue.lock. Split off
 603 * this special lock-class by reinitializing the spinlock key:
 604 */
 605static struct lock_class_key af_unix_sk_receive_queue_lock_key;
 606
 607static struct sock *unix_create1(struct net *net, struct socket *sock)
 608{
 609	struct sock *sk = NULL;
 610	struct unix_sock *u;
 611
 612	atomic_long_inc(&unix_nr_socks);
 613	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
 614		goto out;
 615
 616	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
 617	if (!sk)
 618		goto out;
 619
 620	sock_init_data(sock, sk);
 621	lockdep_set_class(&sk->sk_receive_queue.lock,
 622				&af_unix_sk_receive_queue_lock_key);
 623
 
 624	sk->sk_write_space	= unix_write_space;
 625	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
 626	sk->sk_destruct		= unix_sock_destructor;
 627	u	  = unix_sk(sk);
 628	u->dentry = NULL;
 629	u->mnt	  = NULL;
 630	spin_lock_init(&u->lock);
 631	atomic_long_set(&u->inflight, 0);
 632	INIT_LIST_HEAD(&u->link);
 633	mutex_init(&u->readlock); /* single task reading lock */
 
 634	init_waitqueue_head(&u->peer_wait);
 635	unix_insert_socket(unix_sockets_unbound, sk);
 
 
 636out:
 637	if (sk == NULL)
 638		atomic_long_dec(&unix_nr_socks);
 639	else {
 640		local_bh_disable();
 641		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 642		local_bh_enable();
 643	}
 644	return sk;
 645}
 646
 647static int unix_create(struct net *net, struct socket *sock, int protocol,
 648		       int kern)
 649{
 650	if (protocol && protocol != PF_UNIX)
 651		return -EPROTONOSUPPORT;
 652
 653	sock->state = SS_UNCONNECTED;
 654
 655	switch (sock->type) {
 656	case SOCK_STREAM:
 657		sock->ops = &unix_stream_ops;
 658		break;
 659		/*
 660		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
 661		 *	nothing uses it.
 662		 */
 663	case SOCK_RAW:
 664		sock->type = SOCK_DGRAM;
 
 665	case SOCK_DGRAM:
 666		sock->ops = &unix_dgram_ops;
 667		break;
 668	case SOCK_SEQPACKET:
 669		sock->ops = &unix_seqpacket_ops;
 670		break;
 671	default:
 672		return -ESOCKTNOSUPPORT;
 673	}
 674
 675	return unix_create1(net, sock) ? 0 : -ENOMEM;
 676}
 677
 678static int unix_release(struct socket *sock)
 679{
 680	struct sock *sk = sock->sk;
 681
 682	if (!sk)
 683		return 0;
 684
 
 685	sock->sk = NULL;
 686
 687	return unix_release_sock(sk, 0);
 688}
 689
 690static int unix_autobind(struct socket *sock)
 691{
 692	struct sock *sk = sock->sk;
 693	struct net *net = sock_net(sk);
 694	struct unix_sock *u = unix_sk(sk);
 695	static u32 ordernum = 1;
 696	struct unix_address *addr;
 697	int err;
 698	unsigned int retries = 0;
 699
 700	mutex_lock(&u->readlock);
 
 
 701
 702	err = 0;
 703	if (u->addr)
 704		goto out;
 705
 706	err = -ENOMEM;
 707	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
 708	if (!addr)
 709		goto out;
 710
 711	addr->name->sun_family = AF_UNIX;
 712	atomic_set(&addr->refcnt, 1);
 713
 714retry:
 715	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
 716	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
 
 717
 718	spin_lock(&unix_table_lock);
 719	ordernum = (ordernum+1)&0xFFFFF;
 720
 721	if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
 722				      addr->hash)) {
 723		spin_unlock(&unix_table_lock);
 724		/*
 725		 * __unix_find_socket_byname() may take long time if many names
 726		 * are already in use.
 727		 */
 728		cond_resched();
 729		/* Give up if all names seems to be in use. */
 730		if (retries++ == 0xFFFFF) {
 731			err = -ENOSPC;
 732			kfree(addr);
 733			goto out;
 734		}
 735		goto retry;
 736	}
 737	addr->hash ^= sk->sk_type;
 738
 739	__unix_remove_socket(sk);
 740	u->addr = addr;
 741	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
 742	spin_unlock(&unix_table_lock);
 743	err = 0;
 744
 745out:	mutex_unlock(&u->readlock);
 746	return err;
 747}
 748
 749static struct sock *unix_find_other(struct net *net,
 750				    struct sockaddr_un *sunname, int len,
 751				    int type, unsigned hash, int *error)
 752{
 753	struct sock *u;
 754	struct path path;
 755	int err = 0;
 756
 757	if (sunname->sun_path[0]) {
 758		struct inode *inode;
 759		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
 760		if (err)
 761			goto fail;
 762		inode = path.dentry->d_inode;
 763		err = inode_permission(inode, MAY_WRITE);
 764		if (err)
 765			goto put_fail;
 766
 767		err = -ECONNREFUSED;
 768		if (!S_ISSOCK(inode->i_mode))
 769			goto put_fail;
 770		u = unix_find_socket_byinode(inode);
 771		if (!u)
 772			goto put_fail;
 773
 774		if (u->sk_type == type)
 775			touch_atime(path.mnt, path.dentry);
 776
 777		path_put(&path);
 778
 779		err = -EPROTOTYPE;
 780		if (u->sk_type != type) {
 781			sock_put(u);
 782			goto fail;
 783		}
 784	} else {
 785		err = -ECONNREFUSED;
 786		u = unix_find_socket_byname(net, sunname, len, type, hash);
 787		if (u) {
 788			struct dentry *dentry;
 789			dentry = unix_sk(u)->dentry;
 790			if (dentry)
 791				touch_atime(unix_sk(u)->mnt, dentry);
 792		} else
 793			goto fail;
 794	}
 795	return u;
 796
 797put_fail:
 798	path_put(&path);
 799fail:
 800	*error = err;
 801	return NULL;
 802}
 803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804
 805static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 806{
 807	struct sock *sk = sock->sk;
 808	struct net *net = sock_net(sk);
 809	struct unix_sock *u = unix_sk(sk);
 810	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
 811	char *sun_path = sunaddr->sun_path;
 812	struct dentry *dentry = NULL;
 813	struct path path;
 814	int err;
 815	unsigned hash;
 816	struct unix_address *addr;
 817	struct hlist_head *list;
 818
 819	err = -EINVAL;
 820	if (sunaddr->sun_family != AF_UNIX)
 821		goto out;
 822
 823	if (addr_len == sizeof(short)) {
 824		err = unix_autobind(sock);
 825		goto out;
 826	}
 827
 828	err = unix_mkname(sunaddr, addr_len, &hash);
 829	if (err < 0)
 830		goto out;
 831	addr_len = err;
 832
 833	mutex_lock(&u->readlock);
 834
 835	err = -EINVAL;
 836	if (u->addr)
 837		goto out_up;
 838
 839	err = -ENOMEM;
 840	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
 841	if (!addr)
 842		goto out_up;
 843
 844	memcpy(addr->name, sunaddr, addr_len);
 845	addr->len = addr_len;
 846	addr->hash = hash ^ sk->sk_type;
 847	atomic_set(&addr->refcnt, 1);
 848
 849	if (sun_path[0]) {
 850		unsigned int mode;
 851		err = 0;
 852		/*
 853		 * Get the parent directory, calculate the hash for last
 854		 * component.
 855		 */
 856		dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
 857		err = PTR_ERR(dentry);
 858		if (IS_ERR(dentry))
 859			goto out_mknod_parent;
 860
 861		/*
 862		 * All right, let's create it.
 863		 */
 864		mode = S_IFSOCK |
 865		       (SOCK_INODE(sock)->i_mode & ~current_umask());
 866		err = mnt_want_write(path.mnt);
 867		if (err)
 868			goto out_mknod_dput;
 869		err = security_path_mknod(&path, dentry, mode, 0);
 870		if (err)
 871			goto out_mknod_drop_write;
 872		err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
 873out_mknod_drop_write:
 874		mnt_drop_write(path.mnt);
 875		if (err)
 876			goto out_mknod_dput;
 877		mutex_unlock(&path.dentry->d_inode->i_mutex);
 878		dput(path.dentry);
 879		path.dentry = dentry;
 880
 881		addr->hash = UNIX_HASH_SIZE;
 882	}
 883
 884	spin_lock(&unix_table_lock);
 885
 886	if (!sun_path[0]) {
 887		err = -EADDRINUSE;
 888		if (__unix_find_socket_byname(net, sunaddr, addr_len,
 889					      sk->sk_type, hash)) {
 890			unix_release_addr(addr);
 891			goto out_unlock;
 892		}
 893
 894		list = &unix_socket_table[addr->hash];
 895	} else {
 896		list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
 897		u->dentry = path.dentry;
 898		u->mnt    = path.mnt;
 899	}
 900
 901	err = 0;
 902	__unix_remove_socket(sk);
 903	u->addr = addr;
 904	__unix_insert_socket(list, sk);
 905
 906out_unlock:
 907	spin_unlock(&unix_table_lock);
 908out_up:
 909	mutex_unlock(&u->readlock);
 910out:
 911	return err;
 912
 913out_mknod_dput:
 914	dput(dentry);
 915	mutex_unlock(&path.dentry->d_inode->i_mutex);
 916	path_put(&path);
 917out_mknod_parent:
 918	if (err == -EEXIST)
 919		err = -EADDRINUSE;
 920	unix_release_addr(addr);
 921	goto out_up;
 922}
 923
 924static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
 925{
 926	if (unlikely(sk1 == sk2) || !sk2) {
 927		unix_state_lock(sk1);
 928		return;
 929	}
 930	if (sk1 < sk2) {
 931		unix_state_lock(sk1);
 932		unix_state_lock_nested(sk2);
 933	} else {
 934		unix_state_lock(sk2);
 935		unix_state_lock_nested(sk1);
 936	}
 937}
 938
 939static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
 940{
 941	if (unlikely(sk1 == sk2) || !sk2) {
 942		unix_state_unlock(sk1);
 943		return;
 944	}
 945	unix_state_unlock(sk1);
 946	unix_state_unlock(sk2);
 947}
 948
 949static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
 950			      int alen, int flags)
 951{
 952	struct sock *sk = sock->sk;
 953	struct net *net = sock_net(sk);
 954	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
 955	struct sock *other;
 956	unsigned hash;
 957	int err;
 958
 
 
 
 
 959	if (addr->sa_family != AF_UNSPEC) {
 960		err = unix_mkname(sunaddr, alen, &hash);
 961		if (err < 0)
 962			goto out;
 963		alen = err;
 964
 965		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
 966		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
 967			goto out;
 968
 969restart:
 970		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
 971		if (!other)
 972			goto out;
 973
 974		unix_state_double_lock(sk, other);
 975
 976		/* Apparently VFS overslept socket death. Retry. */
 977		if (sock_flag(other, SOCK_DEAD)) {
 978			unix_state_double_unlock(sk, other);
 979			sock_put(other);
 980			goto restart;
 981		}
 982
 983		err = -EPERM;
 984		if (!unix_may_send(sk, other))
 985			goto out_unlock;
 986
 987		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
 988		if (err)
 989			goto out_unlock;
 990
 991	} else {
 992		/*
 993		 *	1003.1g breaking connected state with AF_UNSPEC
 994		 */
 995		other = NULL;
 996		unix_state_double_lock(sk, other);
 997	}
 998
 999	/*
1000	 * If it was connected, reconnect.
1001	 */
1002	if (unix_peer(sk)) {
1003		struct sock *old_peer = unix_peer(sk);
1004		unix_peer(sk) = other;
 
 
1005		unix_state_double_unlock(sk, other);
1006
1007		if (other != old_peer)
1008			unix_dgram_disconnected(sk, old_peer);
1009		sock_put(old_peer);
1010	} else {
1011		unix_peer(sk) = other;
1012		unix_state_double_unlock(sk, other);
1013	}
1014	return 0;
1015
1016out_unlock:
1017	unix_state_double_unlock(sk, other);
1018	sock_put(other);
1019out:
1020	return err;
1021}
1022
1023static long unix_wait_for_peer(struct sock *other, long timeo)
 
1024{
1025	struct unix_sock *u = unix_sk(other);
1026	int sched;
1027	DEFINE_WAIT(wait);
1028
1029	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1030
1031	sched = !sock_flag(other, SOCK_DEAD) &&
1032		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1033		unix_recvq_full(other);
1034
1035	unix_state_unlock(other);
1036
1037	if (sched)
1038		timeo = schedule_timeout(timeo);
1039
1040	finish_wait(&u->peer_wait, &wait);
1041	return timeo;
1042}
1043
1044static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1045			       int addr_len, int flags)
1046{
1047	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1048	struct sock *sk = sock->sk;
1049	struct net *net = sock_net(sk);
1050	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1051	struct sock *newsk = NULL;
1052	struct sock *other = NULL;
1053	struct sk_buff *skb = NULL;
1054	unsigned hash;
1055	int st;
1056	int err;
1057	long timeo;
1058
1059	err = unix_mkname(sunaddr, addr_len, &hash);
1060	if (err < 0)
1061		goto out;
1062	addr_len = err;
1063
1064	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1065	    (err = unix_autobind(sock)) != 0)
1066		goto out;
1067
1068	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1069
1070	/* First of all allocate resources.
1071	   If we will make it after state is locked,
1072	   we will have to recheck all again in any case.
1073	 */
1074
1075	err = -ENOMEM;
1076
1077	/* create new sock for complete connection */
1078	newsk = unix_create1(sock_net(sk), NULL);
1079	if (newsk == NULL)
1080		goto out;
1081
1082	/* Allocate skb for sending to listening sock */
1083	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1084	if (skb == NULL)
1085		goto out;
1086
1087restart:
1088	/*  Find listening sock. */
1089	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1090	if (!other)
1091		goto out;
1092
1093	/* Latch state of peer */
1094	unix_state_lock(other);
1095
1096	/* Apparently VFS overslept socket death. Retry. */
1097	if (sock_flag(other, SOCK_DEAD)) {
1098		unix_state_unlock(other);
1099		sock_put(other);
1100		goto restart;
1101	}
1102
1103	err = -ECONNREFUSED;
1104	if (other->sk_state != TCP_LISTEN)
1105		goto out_unlock;
1106	if (other->sk_shutdown & RCV_SHUTDOWN)
1107		goto out_unlock;
1108
1109	if (unix_recvq_full(other)) {
1110		err = -EAGAIN;
1111		if (!timeo)
1112			goto out_unlock;
1113
1114		timeo = unix_wait_for_peer(other, timeo);
1115
1116		err = sock_intr_errno(timeo);
1117		if (signal_pending(current))
1118			goto out;
1119		sock_put(other);
1120		goto restart;
1121	}
1122
1123	/* Latch our state.
1124
1125	   It is tricky place. We need to grab our state lock and cannot
1126	   drop lock on peer. It is dangerous because deadlock is
1127	   possible. Connect to self case and simultaneous
1128	   attempt to connect are eliminated by checking socket
1129	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1130	   check this before attempt to grab lock.
1131
1132	   Well, and we have to recheck the state after socket locked.
1133	 */
1134	st = sk->sk_state;
1135
1136	switch (st) {
1137	case TCP_CLOSE:
1138		/* This is ok... continue with connect */
1139		break;
1140	case TCP_ESTABLISHED:
1141		/* Socket is already connected */
1142		err = -EISCONN;
1143		goto out_unlock;
1144	default:
1145		err = -EINVAL;
1146		goto out_unlock;
1147	}
1148
1149	unix_state_lock_nested(sk);
1150
1151	if (sk->sk_state != st) {
1152		unix_state_unlock(sk);
1153		unix_state_unlock(other);
1154		sock_put(other);
1155		goto restart;
1156	}
1157
1158	err = security_unix_stream_connect(sk, other, newsk);
1159	if (err) {
1160		unix_state_unlock(sk);
1161		goto out_unlock;
1162	}
1163
1164	/* The way is open! Fastly set all the necessary fields... */
1165
1166	sock_hold(sk);
1167	unix_peer(newsk)	= sk;
1168	newsk->sk_state		= TCP_ESTABLISHED;
1169	newsk->sk_type		= sk->sk_type;
1170	init_peercred(newsk);
1171	newu = unix_sk(newsk);
1172	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1173	otheru = unix_sk(other);
1174
1175	/* copy address information from listening to new sock*/
1176	if (otheru->addr) {
1177		atomic_inc(&otheru->addr->refcnt);
1178		newu->addr = otheru->addr;
1179	}
1180	if (otheru->dentry) {
1181		newu->dentry	= dget(otheru->dentry);
1182		newu->mnt	= mntget(otheru->mnt);
 
 
 
 
 
 
 
 
 
 
 
 
1183	}
 
 
1184
1185	/* Set credentials */
1186	copy_peercred(sk, other);
1187
1188	sock->state	= SS_CONNECTED;
1189	sk->sk_state	= TCP_ESTABLISHED;
1190	sock_hold(newsk);
1191
1192	smp_mb__after_atomic_inc();	/* sock_hold() does an atomic_inc() */
1193	unix_peer(sk)	= newsk;
1194
1195	unix_state_unlock(sk);
1196
1197	/* take ten and and send info to listening sock */
1198	spin_lock(&other->sk_receive_queue.lock);
1199	__skb_queue_tail(&other->sk_receive_queue, skb);
1200	spin_unlock(&other->sk_receive_queue.lock);
1201	unix_state_unlock(other);
1202	other->sk_data_ready(other, 0);
1203	sock_put(other);
1204	return 0;
1205
1206out_unlock:
1207	if (other)
1208		unix_state_unlock(other);
1209
1210out:
1211	kfree_skb(skb);
1212	if (newsk)
1213		unix_release_sock(newsk, 0);
1214	if (other)
1215		sock_put(other);
1216	return err;
1217}
1218
1219static int unix_socketpair(struct socket *socka, struct socket *sockb)
1220{
1221	struct sock *ska = socka->sk, *skb = sockb->sk;
1222
1223	/* Join our sockets back to back */
1224	sock_hold(ska);
1225	sock_hold(skb);
1226	unix_peer(ska) = skb;
1227	unix_peer(skb) = ska;
1228	init_peercred(ska);
1229	init_peercred(skb);
1230
1231	if (ska->sk_type != SOCK_DGRAM) {
1232		ska->sk_state = TCP_ESTABLISHED;
1233		skb->sk_state = TCP_ESTABLISHED;
1234		socka->state  = SS_CONNECTED;
1235		sockb->state  = SS_CONNECTED;
1236	}
1237	return 0;
1238}
1239
1240static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
 
 
 
 
 
 
 
 
 
 
1241{
1242	struct sock *sk = sock->sk;
1243	struct sock *tsk;
1244	struct sk_buff *skb;
1245	int err;
1246
1247	err = -EOPNOTSUPP;
1248	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1249		goto out;
1250
1251	err = -EINVAL;
1252	if (sk->sk_state != TCP_LISTEN)
1253		goto out;
1254
1255	/* If socket state is TCP_LISTEN it cannot change (for now...),
1256	 * so that no locks are necessary.
1257	 */
1258
1259	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1260	if (!skb) {
1261		/* This means receive shutdown. */
1262		if (err == 0)
1263			err = -EINVAL;
1264		goto out;
1265	}
1266
1267	tsk = skb->sk;
1268	skb_free_datagram(sk, skb);
1269	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1270
1271	/* attach accepted sock to socket */
1272	unix_state_lock(tsk);
1273	newsock->state = SS_CONNECTED;
 
1274	sock_graft(tsk, newsock);
1275	unix_state_unlock(tsk);
1276	return 0;
1277
1278out:
1279	return err;
1280}
1281
1282
1283static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1284{
1285	struct sock *sk = sock->sk;
1286	struct unix_sock *u;
1287	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1288	int err = 0;
1289
1290	if (peer) {
1291		sk = unix_peer_get(sk);
1292
1293		err = -ENOTCONN;
1294		if (!sk)
1295			goto out;
1296		err = 0;
1297	} else {
1298		sock_hold(sk);
1299	}
1300
1301	u = unix_sk(sk);
1302	unix_state_lock(sk);
1303	if (!u->addr) {
1304		sunaddr->sun_family = AF_UNIX;
1305		sunaddr->sun_path[0] = 0;
1306		*uaddr_len = sizeof(short);
1307	} else {
1308		struct unix_address *addr = u->addr;
1309
1310		*uaddr_len = addr->len;
1311		memcpy(sunaddr, addr->name, *uaddr_len);
1312	}
1313	unix_state_unlock(sk);
1314	sock_put(sk);
1315out:
1316	return err;
1317}
1318
1319static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1320{
1321	int i;
1322
1323	scm->fp = UNIXCB(skb).fp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324	UNIXCB(skb).fp = NULL;
 
 
 
1325
1326	for (i = scm->fp->count-1; i >= 0; i--)
1327		unix_notinflight(scm->fp->fp[i]);
1328}
1329
1330static void unix_destruct_scm(struct sk_buff *skb)
 
1331{
1332	struct scm_cookie scm;
1333	memset(&scm, 0, sizeof(scm));
1334	scm.pid  = UNIXCB(skb).pid;
1335	scm.cred = UNIXCB(skb).cred;
1336	if (UNIXCB(skb).fp)
1337		unix_detach_fds(&scm, skb);
1338
1339	/* Alas, it calls VFS */
1340	/* So fscking what? fput() had been SMP-safe since the last Summer */
1341	scm_destroy(&scm);
1342	sock_wfree(skb);
 
 
 
 
 
 
 
 
 
 
1343}
1344
1345#define MAX_RECURSION_LEVEL 4
1346
1347static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1348{
1349	int i;
1350	unsigned char max_level = 0;
1351	int unix_sock_count = 0;
1352
1353	for (i = scm->fp->count - 1; i >= 0; i--) {
1354		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
 
1355
1356		if (sk) {
1357			unix_sock_count++;
1358			max_level = max(max_level,
1359					unix_sk(sk)->recursion_level);
1360		}
1361	}
1362	if (unlikely(max_level > MAX_RECURSION_LEVEL))
1363		return -ETOOMANYREFS;
 
 
 
 
 
 
 
 
 
 
 
1364
1365	/*
1366	 * Need to duplicate file references for the sake of garbage
1367	 * collection.  Otherwise a socket in the fps might become a
1368	 * candidate for GC while the skb is not yet queued.
1369	 */
1370	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1371	if (!UNIXCB(skb).fp)
1372		return -ENOMEM;
1373
1374	if (unix_sock_count) {
1375		for (i = scm->fp->count - 1; i >= 0; i--)
1376			unix_inflight(scm->fp->fp[i]);
1377	}
1378	return max_level;
1379}
1380
1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1382{
1383	int err = 0;
1384	UNIXCB(skb).pid  = get_pid(scm->pid);
1385	UNIXCB(skb).cred = get_cred(scm->cred);
1386	UNIXCB(skb).fp = NULL;
1387	if (scm->fp && send_fds)
1388		err = unix_attach_fds(scm, skb);
1389
1390	skb->destructor = unix_destruct_scm;
1391	return err;
1392}
1393
1394/*
1395 *	Send AF_UNIX data.
1396 */
1397
1398static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1399			      struct msghdr *msg, size_t len)
1400{
1401	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1402	struct sock *sk = sock->sk;
1403	struct net *net = sock_net(sk);
1404	struct unix_sock *u = unix_sk(sk);
1405	struct sockaddr_un *sunaddr = msg->msg_name;
1406	struct sock *other = NULL;
1407	int namelen = 0; /* fake GCC */
1408	int err;
1409	unsigned hash;
1410	struct sk_buff *skb;
1411	long timeo;
1412	struct scm_cookie tmp_scm;
1413	int max_level;
 
1414
1415	if (NULL == siocb->scm)
1416		siocb->scm = &tmp_scm;
1417	wait_for_unix_gc();
1418	err = scm_send(sock, msg, siocb->scm);
1419	if (err < 0)
1420		return err;
1421
1422	err = -EOPNOTSUPP;
1423	if (msg->msg_flags&MSG_OOB)
1424		goto out;
1425
1426	if (msg->msg_namelen) {
1427		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1428		if (err < 0)
1429			goto out;
1430		namelen = err;
1431	} else {
1432		sunaddr = NULL;
1433		err = -ENOTCONN;
1434		other = unix_peer_get(sk);
1435		if (!other)
1436			goto out;
1437	}
1438
1439	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1440	    && (err = unix_autobind(sock)) != 0)
1441		goto out;
1442
1443	err = -EMSGSIZE;
1444	if (len > sk->sk_sndbuf - 32)
1445		goto out;
1446
1447	skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
 
 
 
 
 
 
 
 
 
 
 
1448	if (skb == NULL)
1449		goto out;
1450
1451	err = unix_scm_to_skb(siocb->scm, skb, true);
1452	if (err < 0)
1453		goto out_free;
1454	max_level = err + 1;
1455	unix_get_secdata(siocb->scm, skb);
1456
1457	skb_reset_transport_header(skb);
1458	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
 
 
1459	if (err)
1460		goto out_free;
1461
1462	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1463
1464restart:
1465	if (!other) {
1466		err = -ECONNRESET;
1467		if (sunaddr == NULL)
1468			goto out_free;
1469
1470		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1471					hash, &err);
1472		if (other == NULL)
1473			goto out_free;
1474	}
1475
1476	if (sk_filter(other, skb) < 0) {
1477		/* Toss the packet but do not return any error to the sender */
1478		err = len;
1479		goto out_free;
1480	}
1481
 
1482	unix_state_lock(other);
 
1483	err = -EPERM;
1484	if (!unix_may_send(sk, other))
1485		goto out_unlock;
1486
1487	if (sock_flag(other, SOCK_DEAD)) {
1488		/*
1489		 *	Check with 1003.1g - what should
1490		 *	datagram error
1491		 */
1492		unix_state_unlock(other);
1493		sock_put(other);
1494
 
 
 
1495		err = 0;
1496		unix_state_lock(sk);
1497		if (unix_peer(sk) == other) {
1498			unix_peer(sk) = NULL;
 
 
1499			unix_state_unlock(sk);
1500
1501			unix_dgram_disconnected(sk, other);
1502			sock_put(other);
1503			err = -ECONNREFUSED;
1504		} else {
1505			unix_state_unlock(sk);
1506		}
1507
1508		other = NULL;
1509		if (err)
1510			goto out_free;
1511		goto restart;
1512	}
1513
1514	err = -EPIPE;
1515	if (other->sk_shutdown & RCV_SHUTDOWN)
1516		goto out_unlock;
1517
1518	if (sk->sk_type != SOCK_SEQPACKET) {
1519		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1520		if (err)
1521			goto out_unlock;
1522	}
1523
1524	if (unix_peer(other) != sk && unix_recvq_full(other)) {
1525		if (!timeo) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526			err = -EAGAIN;
 
1527			goto out_unlock;
1528		}
1529
1530		timeo = unix_wait_for_peer(other, timeo);
 
 
 
 
1531
1532		err = sock_intr_errno(timeo);
1533		if (signal_pending(current))
1534			goto out_free;
1535
1536		goto restart;
1537	}
1538
1539	if (sock_flag(other, SOCK_RCVTSTAMP))
1540		__net_timestamp(skb);
 
 
1541	skb_queue_tail(&other->sk_receive_queue, skb);
1542	if (max_level > unix_sk(other)->recursion_level)
1543		unix_sk(other)->recursion_level = max_level;
1544	unix_state_unlock(other);
1545	other->sk_data_ready(other, len);
1546	sock_put(other);
1547	scm_destroy(siocb->scm);
1548	return len;
1549
1550out_unlock:
 
 
1551	unix_state_unlock(other);
1552out_free:
1553	kfree_skb(skb);
1554out:
1555	if (other)
1556		sock_put(other);
1557	scm_destroy(siocb->scm);
1558	return err;
1559}
1560
 
 
 
 
1561
1562static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1563			       struct msghdr *msg, size_t len)
1564{
1565	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1566	struct sock *sk = sock->sk;
1567	struct sock *other = NULL;
1568	int err, size;
1569	struct sk_buff *skb;
1570	int sent = 0;
1571	struct scm_cookie tmp_scm;
1572	bool fds_sent = false;
1573	int max_level;
1574
1575	if (NULL == siocb->scm)
1576		siocb->scm = &tmp_scm;
1577	wait_for_unix_gc();
1578	err = scm_send(sock, msg, siocb->scm);
1579	if (err < 0)
1580		return err;
1581
1582	err = -EOPNOTSUPP;
1583	if (msg->msg_flags&MSG_OOB)
1584		goto out_err;
1585
1586	if (msg->msg_namelen) {
1587		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1588		goto out_err;
1589	} else {
1590		err = -ENOTCONN;
1591		other = unix_peer(sk);
1592		if (!other)
1593			goto out_err;
1594	}
1595
1596	if (sk->sk_shutdown & SEND_SHUTDOWN)
1597		goto pipe_err;
1598
1599	while (sent < len) {
1600		/*
1601		 *	Optimisation for the fact that under 0.01% of X
1602		 *	messages typically need breaking up.
1603		 */
1604
1605		size = len-sent;
1606
1607		/* Keep two messages in the pipe so it schedules better */
1608		if (size > ((sk->sk_sndbuf >> 1) - 64))
1609			size = (sk->sk_sndbuf >> 1) - 64;
1610
1611		if (size > SKB_MAX_ALLOC)
1612			size = SKB_MAX_ALLOC;
1613
1614		/*
1615		 *	Grab a buffer
1616		 */
1617
1618		skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1619					  &err);
1620
1621		if (skb == NULL)
 
 
 
1622			goto out_err;
1623
1624		/*
1625		 *	If you pass two values to the sock_alloc_send_skb
1626		 *	it tries to grab the large buffer with GFP_NOFS
1627		 *	(which can fail easily), and if it fails grab the
1628		 *	fallback size buffer which is under a page and will
1629		 *	succeed. [Alan]
1630		 */
1631		size = min_t(int, size, skb_tailroom(skb));
1632
1633
1634		/* Only send the fds in the first buffer */
1635		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1636		if (err < 0) {
1637			kfree_skb(skb);
1638			goto out_err;
1639		}
1640		max_level = err + 1;
1641		fds_sent = true;
1642
1643		err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
 
 
 
1644		if (err) {
1645			kfree_skb(skb);
1646			goto out_err;
1647		}
1648
1649		unix_state_lock(other);
1650
1651		if (sock_flag(other, SOCK_DEAD) ||
1652		    (other->sk_shutdown & RCV_SHUTDOWN))
1653			goto pipe_err_free;
1654
 
 
1655		skb_queue_tail(&other->sk_receive_queue, skb);
1656		if (max_level > unix_sk(other)->recursion_level)
1657			unix_sk(other)->recursion_level = max_level;
1658		unix_state_unlock(other);
1659		other->sk_data_ready(other, size);
1660		sent += size;
1661	}
1662
1663	scm_destroy(siocb->scm);
1664	siocb->scm = NULL;
1665
1666	return sent;
1667
1668pipe_err_free:
1669	unix_state_unlock(other);
1670	kfree_skb(skb);
1671pipe_err:
1672	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1673		send_sig(SIGPIPE, current, 0);
1674	err = -EPIPE;
1675out_err:
1676	scm_destroy(siocb->scm);
1677	siocb->scm = NULL;
1678	return sent ? : err;
1679}
1680
1681static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1682				  struct msghdr *msg, size_t len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1683{
1684	int err;
1685	struct sock *sk = sock->sk;
1686
1687	err = sock_error(sk);
1688	if (err)
1689		return err;
1690
1691	if (sk->sk_state != TCP_ESTABLISHED)
1692		return -ENOTCONN;
1693
1694	if (msg->msg_namelen)
1695		msg->msg_namelen = 0;
1696
1697	return unix_dgram_sendmsg(kiocb, sock, msg, len);
1698}
1699
1700static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1701			      struct msghdr *msg, size_t size,
1702			      int flags)
1703{
1704	struct sock *sk = sock->sk;
1705
1706	if (sk->sk_state != TCP_ESTABLISHED)
1707		return -ENOTCONN;
1708
1709	return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1710}
1711
1712static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1713{
1714	struct unix_sock *u = unix_sk(sk);
1715
1716	msg->msg_namelen = 0;
1717	if (u->addr) {
1718		msg->msg_namelen = u->addr->len;
1719		memcpy(msg->msg_name, u->addr->name, u->addr->len);
1720	}
1721}
1722
1723static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1724			      struct msghdr *msg, size_t size,
1725			      int flags)
1726{
1727	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1728	struct scm_cookie tmp_scm;
1729	struct sock *sk = sock->sk;
1730	struct unix_sock *u = unix_sk(sk);
1731	int noblock = flags & MSG_DONTWAIT;
1732	struct sk_buff *skb;
 
1733	int err;
1734
1735	err = -EOPNOTSUPP;
1736	if (flags&MSG_OOB)
1737		goto out;
1738
1739	msg->msg_namelen = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1740
1741	err = mutex_lock_interruptible(&u->readlock);
1742	if (err) {
1743		err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1744		goto out;
1745	}
1746
1747	skb = skb_recv_datagram(sk, flags, noblock, &err);
1748	if (!skb) {
1749		unix_state_lock(sk);
1750		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1751		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1752		    (sk->sk_shutdown & RCV_SHUTDOWN))
1753			err = 0;
1754		unix_state_unlock(sk);
1755		goto out_unlock;
1756	}
1757
1758	wake_up_interruptible_sync_poll(&u->peer_wait,
1759					POLLOUT | POLLWRNORM | POLLWRBAND);
 
 
1760
1761	if (msg->msg_name)
1762		unix_copy_addr(msg, skb->sk);
1763
1764	if (size > skb->len)
1765		size = skb->len;
1766	else if (size < skb->len)
1767		msg->msg_flags |= MSG_TRUNC;
1768
1769	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1770	if (err)
1771		goto out_free;
1772
1773	if (sock_flag(sk, SOCK_RCVTSTAMP))
1774		__sock_recv_timestamp(msg, sk, skb);
1775
1776	if (!siocb->scm) {
1777		siocb->scm = &tmp_scm;
1778		memset(&tmp_scm, 0, sizeof(tmp_scm));
1779	}
1780	scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1781	unix_set_secdata(siocb->scm, skb);
1782
1783	if (!(flags & MSG_PEEK)) {
1784		if (UNIXCB(skb).fp)
1785			unix_detach_fds(siocb->scm, skb);
 
 
1786	} else {
1787		/* It is questionable: on PEEK we could:
1788		   - do not return fds - good, but too simple 8)
1789		   - return fds, and do not return them on read (old strategy,
1790		     apparently wrong)
1791		   - clone fds (I chose it for now, it is the most universal
1792		     solution)
1793
1794		   POSIX 1003.1g does not actually define this clearly
1795		   at all. POSIX 1003.1g doesn't define a lot of things
1796		   clearly however!
1797
1798		*/
 
 
 
1799		if (UNIXCB(skb).fp)
1800			siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1801	}
1802	err = size;
1803
1804	scm_recv(sock, msg, siocb->scm, flags);
1805
1806out_free:
1807	skb_free_datagram(sk, skb);
1808out_unlock:
1809	mutex_unlock(&u->readlock);
1810out:
1811	return err;
1812}
1813
1814/*
1815 *	Sleep until data has arrive. But check for races..
1816 */
1817
1818static long unix_stream_data_wait(struct sock *sk, long timeo)
 
1819{
 
1820	DEFINE_WAIT(wait);
1821
1822	unix_state_lock(sk);
1823
1824	for (;;) {
1825		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1826
1827		if (!skb_queue_empty(&sk->sk_receive_queue) ||
 
 
1828		    sk->sk_err ||
1829		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1830		    signal_pending(current) ||
1831		    !timeo)
1832			break;
1833
1834		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1835		unix_state_unlock(sk);
1836		timeo = schedule_timeout(timeo);
 
 
 
1837		unix_state_lock(sk);
1838		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
 
 
 
 
1839	}
1840
1841	finish_wait(sk_sleep(sk), &wait);
1842	unix_state_unlock(sk);
1843	return timeo;
1844}
1845
 
 
 
 
1846
 
 
 
 
 
 
 
 
 
 
1847
1848static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1849			       struct msghdr *msg, size_t size,
1850			       int flags)
1851{
1852	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1853	struct scm_cookie tmp_scm;
1854	struct sock *sk = sock->sk;
1855	struct unix_sock *u = unix_sk(sk);
1856	struct sockaddr_un *sunaddr = msg->msg_name;
1857	int copied = 0;
1858	int check_creds = 0;
 
 
1859	int target;
1860	int err = 0;
1861	long timeo;
 
 
 
1862
1863	err = -EINVAL;
1864	if (sk->sk_state != TCP_ESTABLISHED)
1865		goto out;
 
1866
1867	err = -EOPNOTSUPP;
1868	if (flags&MSG_OOB)
1869		goto out;
 
1870
1871	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1872	timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1873
1874	msg->msg_namelen = 0;
1875
1876	/* Lock the socket to prevent queue disordering
1877	 * while sleeps in memcpy_tomsg
1878	 */
 
1879
1880	if (!siocb->scm) {
1881		siocb->scm = &tmp_scm;
1882		memset(&tmp_scm, 0, sizeof(tmp_scm));
1883	}
1884
1885	err = mutex_lock_interruptible(&u->readlock);
1886	if (err) {
1887		err = sock_intr_errno(timeo);
1888		goto out;
1889	}
1890
1891	do {
1892		int chunk;
1893		struct sk_buff *skb;
 
1894
 
1895		unix_state_lock(sk);
1896		skb = skb_dequeue(&sk->sk_receive_queue);
 
 
 
 
 
 
1897		if (skb == NULL) {
1898			unix_sk(sk)->recursion_level = 0;
1899			if (copied >= target)
1900				goto unlock;
1901
1902			/*
1903			 *	POSIX 1003.1g mandates this order.
1904			 */
1905
1906			err = sock_error(sk);
1907			if (err)
1908				goto unlock;
1909			if (sk->sk_shutdown & RCV_SHUTDOWN)
1910				goto unlock;
1911
1912			unix_state_unlock(sk);
1913			err = -EAGAIN;
1914			if (!timeo)
1915				break;
1916			mutex_unlock(&u->readlock);
1917
1918			timeo = unix_stream_data_wait(sk, timeo);
1919
1920			if (signal_pending(current)
1921			    ||  mutex_lock_interruptible(&u->readlock)) {
 
 
1922				err = sock_intr_errno(timeo);
 
1923				goto out;
1924			}
1925
1926			continue;
1927 unlock:
 
1928			unix_state_unlock(sk);
1929			break;
1930		}
 
 
 
 
 
 
 
 
 
 
1931		unix_state_unlock(sk);
1932
1933		if (check_creds) {
1934			/* Never glue messages from different writers */
1935			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
1936			    (UNIXCB(skb).cred != siocb->scm->cred)) {
1937				skb_queue_head(&sk->sk_receive_queue, skb);
1938				break;
1939			}
1940		} else {
1941			/* Copy credentials */
1942			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1943			check_creds = 1;
 
1944		}
1945
1946		/* Copy address just once */
1947		if (sunaddr) {
1948			unix_copy_addr(msg, skb->sk);
 
 
1949			sunaddr = NULL;
1950		}
1951
1952		chunk = min_t(unsigned int, skb->len, size);
1953		if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1954			skb_queue_head(&sk->sk_receive_queue, skb);
 
 
 
 
1955			if (copied == 0)
1956				copied = -EFAULT;
1957			break;
1958		}
1959		copied += chunk;
1960		size -= chunk;
1961
 
 
 
 
 
 
 
 
 
 
 
 
1962		/* Mark read part of skb as used */
1963		if (!(flags & MSG_PEEK)) {
1964			skb_pull(skb, chunk);
1965
1966			if (UNIXCB(skb).fp)
1967				unix_detach_fds(siocb->scm, skb);
 
 
 
 
1968
1969			/* put the skb back if we didn't use it up.. */
1970			if (skb->len) {
1971				skb_queue_head(&sk->sk_receive_queue, skb);
1972				break;
1973			}
1974
 
1975			consume_skb(skb);
1976
1977			if (siocb->scm->fp)
1978				break;
1979		} else {
1980			/* It is questionable, see note in unix_dgram_recvmsg.
1981			 */
1982			if (UNIXCB(skb).fp)
1983				siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1984
1985			/* put message back and return */
1986			skb_queue_head(&sk->sk_receive_queue, skb);
 
 
 
 
 
 
 
 
 
 
 
1987			break;
1988		}
1989	} while (size);
1990
1991	mutex_unlock(&u->readlock);
1992	scm_recv(sock, msg, siocb->scm, flags);
 
 
 
1993out:
1994	return copied ? : err;
1995}
1996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1997static int unix_shutdown(struct socket *sock, int mode)
1998{
1999	struct sock *sk = sock->sk;
2000	struct sock *other;
2001
2002	mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2003
2004	if (!mode)
2005		return 0;
 
 
 
 
2006
2007	unix_state_lock(sk);
2008	sk->sk_shutdown |= mode;
2009	other = unix_peer(sk);
2010	if (other)
2011		sock_hold(other);
2012	unix_state_unlock(sk);
2013	sk->sk_state_change(sk);
2014
2015	if (other &&
2016		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2017
2018		int peer_mode = 0;
2019
2020		if (mode&RCV_SHUTDOWN)
2021			peer_mode |= SEND_SHUTDOWN;
2022		if (mode&SEND_SHUTDOWN)
2023			peer_mode |= RCV_SHUTDOWN;
2024		unix_state_lock(other);
2025		other->sk_shutdown |= peer_mode;
2026		unix_state_unlock(other);
2027		other->sk_state_change(other);
2028		if (peer_mode == SHUTDOWN_MASK)
2029			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2030		else if (peer_mode & RCV_SHUTDOWN)
2031			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2032	}
2033	if (other)
2034		sock_put(other);
2035
2036	return 0;
2037}
2038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2039static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2040{
2041	struct sock *sk = sock->sk;
2042	long amount = 0;
2043	int err;
2044
2045	switch (cmd) {
2046	case SIOCOUTQ:
2047		amount = sk_wmem_alloc_get(sk);
2048		err = put_user(amount, (int __user *)arg);
2049		break;
2050	case SIOCINQ:
2051		{
2052			struct sk_buff *skb;
2053
2054			if (sk->sk_state == TCP_LISTEN) {
2055				err = -EINVAL;
2056				break;
2057			}
2058
2059			spin_lock(&sk->sk_receive_queue.lock);
2060			if (sk->sk_type == SOCK_STREAM ||
2061			    sk->sk_type == SOCK_SEQPACKET) {
2062				skb_queue_walk(&sk->sk_receive_queue, skb)
2063					amount += skb->len;
2064			} else {
2065				skb = skb_peek(&sk->sk_receive_queue);
2066				if (skb)
2067					amount = skb->len;
2068			}
2069			spin_unlock(&sk->sk_receive_queue.lock);
2070			err = put_user(amount, (int __user *)arg);
2071			break;
2072		}
2073
 
2074	default:
2075		err = -ENOIOCTLCMD;
2076		break;
2077	}
2078	return err;
2079}
2080
2081static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 
 
 
 
 
 
 
2082{
2083	struct sock *sk = sock->sk;
2084	unsigned int mask;
2085
2086	sock_poll_wait(file, sk_sleep(sk), wait);
2087	mask = 0;
2088
2089	/* exceptional events? */
2090	if (sk->sk_err)
2091		mask |= POLLERR;
2092	if (sk->sk_shutdown == SHUTDOWN_MASK)
2093		mask |= POLLHUP;
2094	if (sk->sk_shutdown & RCV_SHUTDOWN)
2095		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2096
2097	/* readable? */
2098	if (!skb_queue_empty(&sk->sk_receive_queue))
2099		mask |= POLLIN | POLLRDNORM;
2100
2101	/* Connection-based need to check for termination and startup */
2102	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2103	    sk->sk_state == TCP_CLOSE)
2104		mask |= POLLHUP;
2105
2106	/*
2107	 * we set writable also when the other side has shut down the
2108	 * connection. This prevents stuck sockets.
2109	 */
2110	if (unix_writable(sk))
2111		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2112
2113	return mask;
2114}
2115
2116static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2117				    poll_table *wait)
2118{
2119	struct sock *sk = sock->sk, *other;
2120	unsigned int mask, writable;
 
2121
2122	sock_poll_wait(file, sk_sleep(sk), wait);
2123	mask = 0;
2124
2125	/* exceptional events? */
2126	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2127		mask |= POLLERR;
 
 
2128	if (sk->sk_shutdown & RCV_SHUTDOWN)
2129		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2130	if (sk->sk_shutdown == SHUTDOWN_MASK)
2131		mask |= POLLHUP;
2132
2133	/* readable? */
2134	if (!skb_queue_empty(&sk->sk_receive_queue))
2135		mask |= POLLIN | POLLRDNORM;
2136
2137	/* Connection-based need to check for termination and startup */
2138	if (sk->sk_type == SOCK_SEQPACKET) {
2139		if (sk->sk_state == TCP_CLOSE)
2140			mask |= POLLHUP;
2141		/* connection hasn't started yet? */
2142		if (sk->sk_state == TCP_SYN_SENT)
2143			return mask;
2144	}
2145
2146	/* No write status requested, avoid expensive OUT tests. */
2147	if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2148		return mask;
2149
2150	writable = unix_writable(sk);
2151	other = unix_peer_get(sk);
2152	if (other) {
2153		if (unix_peer(other) != sk) {
2154			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2155			if (unix_recvq_full(other))
2156				writable = 0;
2157		}
2158		sock_put(other);
 
 
2159	}
2160
2161	if (writable)
2162		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2163	else
2164		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2165
2166	return mask;
2167}
2168
2169#ifdef CONFIG_PROC_FS
2170static struct sock *first_unix_socket(int *i)
 
 
 
 
 
 
 
2171{
2172	for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2173		if (!hlist_empty(&unix_socket_table[*i]))
2174			return __sk_head(&unix_socket_table[*i]);
 
 
 
 
 
 
 
2175	}
2176	return NULL;
 
2177}
2178
2179static struct sock *next_unix_socket(int *i, struct sock *s)
 
 
2180{
2181	struct sock *next = sk_next(s);
2182	/* More in this chain? */
2183	if (next)
2184		return next;
2185	/* Look for next non-empty chain. */
2186	for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2187		if (!hlist_empty(&unix_socket_table[*i]))
2188			return __sk_head(&unix_socket_table[*i]);
2189	}
2190	return NULL;
2191}
2192
2193struct unix_iter_state {
2194	struct seq_net_private p;
2195	int i;
2196};
2197
2198static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2199{
2200	struct unix_iter_state *iter = seq->private;
2201	loff_t off = 0;
2202	struct sock *s;
2203
2204	for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2205		if (sock_net(s) != seq_file_net(seq))
2206			continue;
2207		if (off == pos)
2208			return s;
2209		++off;
2210	}
2211	return NULL;
2212}
2213
2214static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2215	__acquires(unix_table_lock)
2216{
2217	spin_lock(&unix_table_lock);
2218	return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 
 
 
 
 
 
 
2219}
2220
2221static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2222{
2223	struct unix_iter_state *iter = seq->private;
2224	struct sock *sk = v;
2225	++*pos;
2226
2227	if (v == SEQ_START_TOKEN)
2228		sk = first_unix_socket(&iter->i);
2229	else
2230		sk = next_unix_socket(&iter->i, sk);
2231	while (sk && (sock_net(sk) != seq_file_net(seq)))
2232		sk = next_unix_socket(&iter->i, sk);
2233	return sk;
2234}
2235
2236static void unix_seq_stop(struct seq_file *seq, void *v)
2237	__releases(unix_table_lock)
2238{
2239	spin_unlock(&unix_table_lock);
2240}
2241
2242static int unix_seq_show(struct seq_file *seq, void *v)
2243{
2244
2245	if (v == SEQ_START_TOKEN)
2246		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2247			 "Inode Path\n");
2248	else {
2249		struct sock *s = v;
2250		struct unix_sock *u = unix_sk(s);
2251		unix_state_lock(s);
2252
2253		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2254			s,
2255			atomic_read(&s->sk_refcnt),
2256			0,
2257			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2258			s->sk_type,
2259			s->sk_socket ?
2260			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2261			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2262			sock_i_ino(s));
2263
2264		if (u->addr) {
2265			int i, len;
2266			seq_putc(seq, ' ');
2267
2268			i = 0;
2269			len = u->addr->len - sizeof(short);
2270			if (!UNIX_ABSTRACT(s))
2271				len--;
2272			else {
2273				seq_putc(seq, '@');
2274				i++;
2275			}
2276			for ( ; i < len; i++)
2277				seq_putc(seq, u->addr->name->sun_path[i]);
 
2278		}
2279		unix_state_unlock(s);
2280		seq_putc(seq, '\n');
2281	}
2282
2283	return 0;
2284}
2285
2286static const struct seq_operations unix_seq_ops = {
2287	.start  = unix_seq_start,
2288	.next   = unix_seq_next,
2289	.stop   = unix_seq_stop,
2290	.show   = unix_seq_show,
2291};
2292
2293static int unix_seq_open(struct inode *inode, struct file *file)
2294{
2295	return seq_open_net(inode, file, &unix_seq_ops,
2296			    sizeof(struct unix_iter_state));
2297}
2298
2299static const struct file_operations unix_seq_fops = {
2300	.owner		= THIS_MODULE,
2301	.open		= unix_seq_open,
2302	.read		= seq_read,
2303	.llseek		= seq_lseek,
2304	.release	= seq_release_net,
2305};
2306
2307#endif
2308
2309static const struct net_proto_family unix_family_ops = {
2310	.family = PF_UNIX,
2311	.create = unix_create,
2312	.owner	= THIS_MODULE,
2313};
2314
2315
2316static int __net_init unix_net_init(struct net *net)
2317{
2318	int error = -ENOMEM;
2319
2320	net->unx.sysctl_max_dgram_qlen = 10;
2321	if (unix_sysctl_register(net))
2322		goto out;
2323
2324#ifdef CONFIG_PROC_FS
2325	if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
 
2326		unix_sysctl_unregister(net);
2327		goto out;
2328	}
2329#endif
2330	error = 0;
2331out:
2332	return error;
2333}
2334
2335static void __net_exit unix_net_exit(struct net *net)
2336{
2337	unix_sysctl_unregister(net);
2338	proc_net_remove(net, "unix");
2339}
2340
2341static struct pernet_operations unix_net_ops = {
2342	.init = unix_net_init,
2343	.exit = unix_net_exit,
2344};
2345
2346static int __init af_unix_init(void)
2347{
2348	int rc = -1;
2349	struct sk_buff *dummy_skb;
2350
2351	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2352
2353	rc = proto_register(&unix_proto, 1);
2354	if (rc != 0) {
2355		printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2356		       __func__);
2357		goto out;
2358	}
2359
2360	sock_register(&unix_family_ops);
2361	register_pernet_subsys(&unix_net_ops);
2362out:
2363	return rc;
2364}
2365
2366static void __exit af_unix_exit(void)
2367{
2368	sock_unregister(PF_UNIX);
2369	proto_unregister(&unix_proto);
2370	unregister_pernet_subsys(&unix_net_ops);
2371}
2372
2373/* Earlier than device_initcall() so that other drivers invoking
2374   request_module() don't end up in a loop when modprobe tries
2375   to use a UNIX socket. But later than subsys_initcall() because
2376   we depend on stuff initialised there */
2377fs_initcall(af_unix_init);
2378module_exit(af_unix_exit);
2379
2380MODULE_LICENSE("GPL");
2381MODULE_ALIAS_NETPROTO(PF_UNIX);