Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/filter.h>
  93#include <linux/termios.h>
  94#include <linux/sockios.h>
  95#include <linux/net.h>
  96#include <linux/in.h>
  97#include <linux/fs.h>
  98#include <linux/slab.h>
  99#include <linux/uaccess.h>
 100#include <linux/skbuff.h>
 101#include <linux/netdevice.h>
 102#include <net/net_namespace.h>
 103#include <net/sock.h>
 104#include <net/tcp_states.h>
 105#include <net/af_unix.h>
 106#include <linux/proc_fs.h>
 107#include <linux/seq_file.h>
 108#include <net/scm.h>
 109#include <linux/init.h>
 110#include <linux/poll.h>
 111#include <linux/rtnetlink.h>
 112#include <linux/mount.h>
 113#include <net/checksum.h>
 114#include <linux/security.h>
 115#include <linux/splice.h>
 116#include <linux/freezer.h>
 117#include <linux/file.h>
 118#include <linux/btf_ids.h>
 119#include <linux/bpf-cgroup.h>
 120
 121#include "scm.h"
 122
 123static atomic_long_t unix_nr_socks;
 124static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
 125static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
 126
 127/* SMP locking strategy:
 128 *    hash table is protected with spinlock.
 129 *    each socket state is protected by separate spinlock.
 130 */
 131
 132static unsigned int unix_unbound_hash(struct sock *sk)
 133{
 134	unsigned long hash = (unsigned long)sk;
 135
 136	hash ^= hash >> 16;
 137	hash ^= hash >> 8;
 138	hash ^= sk->sk_type;
 139
 140	return hash & UNIX_HASH_MOD;
 141}
 142
 143static unsigned int unix_bsd_hash(struct inode *i)
 144{
 145	return i->i_ino & UNIX_HASH_MOD;
 146}
 147
 148static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
 149				       int addr_len, int type)
 150{
 151	__wsum csum = csum_partial(sunaddr, addr_len, 0);
 152	unsigned int hash;
 153
 154	hash = (__force unsigned int)csum_fold(csum);
 155	hash ^= hash >> 8;
 156	hash ^= type;
 157
 158	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
 159}
 160
 161static void unix_table_double_lock(struct net *net,
 162				   unsigned int hash1, unsigned int hash2)
 163{
 164	if (hash1 == hash2) {
 165		spin_lock(&net->unx.table.locks[hash1]);
 166		return;
 167	}
 168
 169	if (hash1 > hash2)
 170		swap(hash1, hash2);
 171
 172	spin_lock(&net->unx.table.locks[hash1]);
 173	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
 174}
 175
 176static void unix_table_double_unlock(struct net *net,
 177				     unsigned int hash1, unsigned int hash2)
 178{
 179	if (hash1 == hash2) {
 180		spin_unlock(&net->unx.table.locks[hash1]);
 181		return;
 182	}
 183
 184	spin_unlock(&net->unx.table.locks[hash1]);
 185	spin_unlock(&net->unx.table.locks[hash2]);
 186}
 187
 188#ifdef CONFIG_SECURITY_NETWORK
 189static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 190{
 191	UNIXCB(skb).secid = scm->secid;
 192}
 193
 194static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 195{
 196	scm->secid = UNIXCB(skb).secid;
 197}
 198
 199static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 200{
 201	return (scm->secid == UNIXCB(skb).secid);
 202}
 203#else
 204static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 205{ }
 206
 207static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 208{ }
 209
 210static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 211{
 212	return true;
 213}
 214#endif /* CONFIG_SECURITY_NETWORK */
 215
 216static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 217{
 218	return unix_peer(osk) == sk;
 219}
 220
 221static inline int unix_may_send(struct sock *sk, struct sock *osk)
 222{
 223	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 224}
 225
 226static inline int unix_recvq_full(const struct sock *sk)
 227{
 228	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 229}
 230
 231static inline int unix_recvq_full_lockless(const struct sock *sk)
 232{
 233	return skb_queue_len_lockless(&sk->sk_receive_queue) >
 234		READ_ONCE(sk->sk_max_ack_backlog);
 235}
 236
 237struct sock *unix_peer_get(struct sock *s)
 238{
 239	struct sock *peer;
 240
 241	unix_state_lock(s);
 242	peer = unix_peer(s);
 243	if (peer)
 244		sock_hold(peer);
 245	unix_state_unlock(s);
 246	return peer;
 247}
 248EXPORT_SYMBOL_GPL(unix_peer_get);
 249
 250static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
 251					     int addr_len)
 252{
 253	struct unix_address *addr;
 254
 255	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
 256	if (!addr)
 257		return NULL;
 258
 259	refcount_set(&addr->refcnt, 1);
 260	addr->len = addr_len;
 261	memcpy(addr->name, sunaddr, addr_len);
 262
 263	return addr;
 264}
 265
 266static inline void unix_release_addr(struct unix_address *addr)
 267{
 268	if (refcount_dec_and_test(&addr->refcnt))
 269		kfree(addr);
 270}
 271
 272/*
 273 *	Check unix socket name:
 274 *		- should be not zero length.
 275 *	        - if started by not zero, should be NULL terminated (FS object)
 276 *		- if started by zero, it is abstract name.
 277 */
 278
 279static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
 280{
 281	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
 282	    addr_len > sizeof(*sunaddr))
 283		return -EINVAL;
 284
 285	if (sunaddr->sun_family != AF_UNIX)
 286		return -EINVAL;
 287
 288	return 0;
 289}
 290
 291static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
 292{
 293	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
 294	short offset = offsetof(struct sockaddr_storage, __data);
 295
 296	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
 297
 298	/* This may look like an off by one error but it is a bit more
 299	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
 300	 * sun_path[108] doesn't as such exist.  However in kernel space
 301	 * we are guaranteed that it is a valid memory location in our
 302	 * kernel address buffer because syscall functions always pass
 303	 * a pointer of struct sockaddr_storage which has a bigger buffer
 304	 * than 108.  Also, we must terminate sun_path for strlen() in
 305	 * getname_kernel().
 306	 */
 307	addr->__data[addr_len - offset] = 0;
 308
 309	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
 310	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
 311	 * know the actual buffer.
 312	 */
 313	return strlen(addr->__data) + offset + 1;
 314}
 315
 316static void __unix_remove_socket(struct sock *sk)
 317{
 318	sk_del_node_init(sk);
 319}
 320
 321static void __unix_insert_socket(struct net *net, struct sock *sk)
 322{
 323	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 324	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
 325}
 326
 327static void __unix_set_addr_hash(struct net *net, struct sock *sk,
 328				 struct unix_address *addr, unsigned int hash)
 329{
 330	__unix_remove_socket(sk);
 331	smp_store_release(&unix_sk(sk)->addr, addr);
 332
 333	sk->sk_hash = hash;
 334	__unix_insert_socket(net, sk);
 335}
 336
 337static void unix_remove_socket(struct net *net, struct sock *sk)
 338{
 339	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 340	__unix_remove_socket(sk);
 341	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 342}
 343
 344static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
 345{
 346	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 347	__unix_insert_socket(net, sk);
 348	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 349}
 350
 351static void unix_insert_bsd_socket(struct sock *sk)
 352{
 353	spin_lock(&bsd_socket_locks[sk->sk_hash]);
 354	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
 355	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 356}
 357
 358static void unix_remove_bsd_socket(struct sock *sk)
 359{
 360	if (!hlist_unhashed(&sk->sk_bind_node)) {
 361		spin_lock(&bsd_socket_locks[sk->sk_hash]);
 362		__sk_del_bind_node(sk);
 363		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 364
 365		sk_node_init(&sk->sk_bind_node);
 366	}
 367}
 368
 369static struct sock *__unix_find_socket_byname(struct net *net,
 370					      struct sockaddr_un *sunname,
 371					      int len, unsigned int hash)
 372{
 373	struct sock *s;
 374
 375	sk_for_each(s, &net->unx.table.buckets[hash]) {
 376		struct unix_sock *u = unix_sk(s);
 377
 378		if (u->addr->len == len &&
 379		    !memcmp(u->addr->name, sunname, len))
 380			return s;
 381	}
 382	return NULL;
 383}
 384
 385static inline struct sock *unix_find_socket_byname(struct net *net,
 386						   struct sockaddr_un *sunname,
 387						   int len, unsigned int hash)
 388{
 389	struct sock *s;
 390
 391	spin_lock(&net->unx.table.locks[hash]);
 392	s = __unix_find_socket_byname(net, sunname, len, hash);
 393	if (s)
 394		sock_hold(s);
 395	spin_unlock(&net->unx.table.locks[hash]);
 396	return s;
 397}
 398
 399static struct sock *unix_find_socket_byinode(struct inode *i)
 400{
 401	unsigned int hash = unix_bsd_hash(i);
 402	struct sock *s;
 403
 404	spin_lock(&bsd_socket_locks[hash]);
 405	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
 406		struct dentry *dentry = unix_sk(s)->path.dentry;
 407
 408		if (dentry && d_backing_inode(dentry) == i) {
 409			sock_hold(s);
 410			spin_unlock(&bsd_socket_locks[hash]);
 411			return s;
 412		}
 413	}
 414	spin_unlock(&bsd_socket_locks[hash]);
 415	return NULL;
 416}
 417
 418/* Support code for asymmetrically connected dgram sockets
 419 *
 420 * If a datagram socket is connected to a socket not itself connected
 421 * to the first socket (eg, /dev/log), clients may only enqueue more
 422 * messages if the present receive queue of the server socket is not
 423 * "too large". This means there's a second writeability condition
 424 * poll and sendmsg need to test. The dgram recv code will do a wake
 425 * up on the peer_wait wait queue of a socket upon reception of a
 426 * datagram which needs to be propagated to sleeping would-be writers
 427 * since these might not have sent anything so far. This can't be
 428 * accomplished via poll_wait because the lifetime of the server
 429 * socket might be less than that of its clients if these break their
 430 * association with it or if the server socket is closed while clients
 431 * are still connected to it and there's no way to inform "a polling
 432 * implementation" that it should let go of a certain wait queue
 433 *
 434 * In order to propagate a wake up, a wait_queue_entry_t of the client
 435 * socket is enqueued on the peer_wait queue of the server socket
 436 * whose wake function does a wake_up on the ordinary client socket
 437 * wait queue. This connection is established whenever a write (or
 438 * poll for write) hit the flow control condition and broken when the
 439 * association to the server socket is dissolved or after a wake up
 440 * was relayed.
 441 */
 442
 443static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 444				      void *key)
 445{
 446	struct unix_sock *u;
 447	wait_queue_head_t *u_sleep;
 448
 449	u = container_of(q, struct unix_sock, peer_wake);
 450
 451	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 452			    q);
 453	u->peer_wake.private = NULL;
 454
 455	/* relaying can only happen while the wq still exists */
 456	u_sleep = sk_sleep(&u->sk);
 457	if (u_sleep)
 458		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 459
 460	return 0;
 461}
 462
 463static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 464{
 465	struct unix_sock *u, *u_other;
 466	int rc;
 467
 468	u = unix_sk(sk);
 469	u_other = unix_sk(other);
 470	rc = 0;
 471	spin_lock(&u_other->peer_wait.lock);
 472
 473	if (!u->peer_wake.private) {
 474		u->peer_wake.private = other;
 475		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 476
 477		rc = 1;
 478	}
 479
 480	spin_unlock(&u_other->peer_wait.lock);
 481	return rc;
 482}
 483
 484static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 485					    struct sock *other)
 486{
 487	struct unix_sock *u, *u_other;
 488
 489	u = unix_sk(sk);
 490	u_other = unix_sk(other);
 491	spin_lock(&u_other->peer_wait.lock);
 492
 493	if (u->peer_wake.private == other) {
 494		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 495		u->peer_wake.private = NULL;
 496	}
 497
 498	spin_unlock(&u_other->peer_wait.lock);
 499}
 500
 501static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 502						   struct sock *other)
 503{
 504	unix_dgram_peer_wake_disconnect(sk, other);
 505	wake_up_interruptible_poll(sk_sleep(sk),
 506				   EPOLLOUT |
 507				   EPOLLWRNORM |
 508				   EPOLLWRBAND);
 509}
 510
 511/* preconditions:
 512 *	- unix_peer(sk) == other
 513 *	- association is stable
 514 */
 515static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 516{
 517	int connected;
 518
 519	connected = unix_dgram_peer_wake_connect(sk, other);
 520
 521	/* If other is SOCK_DEAD, we want to make sure we signal
 522	 * POLLOUT, such that a subsequent write() can get a
 523	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 524	 * to other and its full, we will hang waiting for POLLOUT.
 525	 */
 526	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
 527		return 1;
 528
 529	if (connected)
 530		unix_dgram_peer_wake_disconnect(sk, other);
 531
 532	return 0;
 533}
 534
 535static int unix_writable(const struct sock *sk)
 536{
 537	return sk->sk_state != TCP_LISTEN &&
 538	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 539}
 540
 541static void unix_write_space(struct sock *sk)
 542{
 543	struct socket_wq *wq;
 544
 545	rcu_read_lock();
 546	if (unix_writable(sk)) {
 547		wq = rcu_dereference(sk->sk_wq);
 548		if (skwq_has_sleeper(wq))
 549			wake_up_interruptible_sync_poll(&wq->wait,
 550				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 551		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 552	}
 553	rcu_read_unlock();
 554}
 555
 556/* When dgram socket disconnects (or changes its peer), we clear its receive
 557 * queue of packets arrived from previous peer. First, it allows to do
 558 * flow control based only on wmem_alloc; second, sk connected to peer
 559 * may receive messages only from that peer. */
 560static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 561{
 562	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 563		skb_queue_purge(&sk->sk_receive_queue);
 564		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 565
 566		/* If one link of bidirectional dgram pipe is disconnected,
 567		 * we signal error. Messages are lost. Do not make this,
 568		 * when peer was not connected to us.
 569		 */
 570		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 571			WRITE_ONCE(other->sk_err, ECONNRESET);
 572			sk_error_report(other);
 573		}
 574	}
 575	other->sk_state = TCP_CLOSE;
 576}
 577
 578static void unix_sock_destructor(struct sock *sk)
 579{
 580	struct unix_sock *u = unix_sk(sk);
 581
 582	skb_queue_purge(&sk->sk_receive_queue);
 583
 584	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
 585	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 586	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
 587	if (!sock_flag(sk, SOCK_DEAD)) {
 588		pr_info("Attempt to release alive unix socket: %p\n", sk);
 589		return;
 590	}
 591
 592	if (u->addr)
 593		unix_release_addr(u->addr);
 594
 595	atomic_long_dec(&unix_nr_socks);
 596	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 597#ifdef UNIX_REFCNT_DEBUG
 598	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 599		atomic_long_read(&unix_nr_socks));
 600#endif
 601}
 602
 603static void unix_release_sock(struct sock *sk, int embrion)
 604{
 605	struct unix_sock *u = unix_sk(sk);
 606	struct sock *skpair;
 607	struct sk_buff *skb;
 608	struct path path;
 609	int state;
 610
 611	unix_remove_socket(sock_net(sk), sk);
 612	unix_remove_bsd_socket(sk);
 613
 614	/* Clear state */
 615	unix_state_lock(sk);
 616	sock_orphan(sk);
 617	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 618	path	     = u->path;
 619	u->path.dentry = NULL;
 620	u->path.mnt = NULL;
 621	state = sk->sk_state;
 622	sk->sk_state = TCP_CLOSE;
 623
 624	skpair = unix_peer(sk);
 625	unix_peer(sk) = NULL;
 626
 627	unix_state_unlock(sk);
 628
 629#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 630	if (u->oob_skb) {
 631		kfree_skb(u->oob_skb);
 632		u->oob_skb = NULL;
 633	}
 634#endif
 635
 636	wake_up_interruptible_all(&u->peer_wait);
 637
 638	if (skpair != NULL) {
 639		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 640			unix_state_lock(skpair);
 641			/* No more writes */
 642			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
 643			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
 644				WRITE_ONCE(skpair->sk_err, ECONNRESET);
 645			unix_state_unlock(skpair);
 646			skpair->sk_state_change(skpair);
 647			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 648		}
 649
 650		unix_dgram_peer_wake_disconnect(sk, skpair);
 651		sock_put(skpair); /* It may now die */
 652	}
 653
 654	/* Try to flush out this socket. Throw out buffers at least */
 655
 656	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 657		if (state == TCP_LISTEN)
 658			unix_release_sock(skb->sk, 1);
 659		/* passed fds are erased in the kfree_skb hook	      */
 660		UNIXCB(skb).consumed = skb->len;
 661		kfree_skb(skb);
 662	}
 663
 664	if (path.dentry)
 665		path_put(&path);
 666
 667	sock_put(sk);
 668
 669	/* ---- Socket is dead now and most probably destroyed ---- */
 670
 671	/*
 672	 * Fixme: BSD difference: In BSD all sockets connected to us get
 673	 *	  ECONNRESET and we die on the spot. In Linux we behave
 674	 *	  like files and pipes do and wait for the last
 675	 *	  dereference.
 676	 *
 677	 * Can't we simply set sock->err?
 678	 *
 679	 *	  What the above comment does talk about? --ANK(980817)
 680	 */
 681
 682	if (READ_ONCE(unix_tot_inflight))
 683		unix_gc();		/* Garbage collect fds */
 684}
 685
 686static void init_peercred(struct sock *sk)
 687{
 688	const struct cred *old_cred;
 689	struct pid *old_pid;
 690
 691	spin_lock(&sk->sk_peer_lock);
 692	old_pid = sk->sk_peer_pid;
 693	old_cred = sk->sk_peer_cred;
 694	sk->sk_peer_pid  = get_pid(task_tgid(current));
 695	sk->sk_peer_cred = get_current_cred();
 696	spin_unlock(&sk->sk_peer_lock);
 697
 698	put_pid(old_pid);
 699	put_cred(old_cred);
 700}
 701
 702static void copy_peercred(struct sock *sk, struct sock *peersk)
 703{
 704	const struct cred *old_cred;
 705	struct pid *old_pid;
 706
 707	if (sk < peersk) {
 708		spin_lock(&sk->sk_peer_lock);
 709		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 710	} else {
 711		spin_lock(&peersk->sk_peer_lock);
 712		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 713	}
 714	old_pid = sk->sk_peer_pid;
 715	old_cred = sk->sk_peer_cred;
 716	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
 717	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 718
 719	spin_unlock(&sk->sk_peer_lock);
 720	spin_unlock(&peersk->sk_peer_lock);
 721
 722	put_pid(old_pid);
 723	put_cred(old_cred);
 724}
 725
 726static int unix_listen(struct socket *sock, int backlog)
 727{
 728	int err;
 729	struct sock *sk = sock->sk;
 730	struct unix_sock *u = unix_sk(sk);
 731
 732	err = -EOPNOTSUPP;
 733	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 734		goto out;	/* Only stream/seqpacket sockets accept */
 735	err = -EINVAL;
 736	if (!u->addr)
 737		goto out;	/* No listens on an unbound socket */
 738	unix_state_lock(sk);
 739	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 740		goto out_unlock;
 741	if (backlog > sk->sk_max_ack_backlog)
 742		wake_up_interruptible_all(&u->peer_wait);
 743	sk->sk_max_ack_backlog	= backlog;
 744	sk->sk_state		= TCP_LISTEN;
 745	/* set credentials so connect can copy them */
 746	init_peercred(sk);
 747	err = 0;
 748
 749out_unlock:
 750	unix_state_unlock(sk);
 751out:
 752	return err;
 753}
 754
 755static int unix_release(struct socket *);
 756static int unix_bind(struct socket *, struct sockaddr *, int);
 757static int unix_stream_connect(struct socket *, struct sockaddr *,
 758			       int addr_len, int flags);
 759static int unix_socketpair(struct socket *, struct socket *);
 760static int unix_accept(struct socket *, struct socket *, int, bool);
 761static int unix_getname(struct socket *, struct sockaddr *, int);
 762static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 763static __poll_t unix_dgram_poll(struct file *, struct socket *,
 764				    poll_table *);
 765static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 766#ifdef CONFIG_COMPAT
 767static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 768#endif
 769static int unix_shutdown(struct socket *, int);
 770static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 771static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 772static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 773				       struct pipe_inode_info *, size_t size,
 774				       unsigned int flags);
 775static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 776static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 777static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 778static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 779static int unix_dgram_connect(struct socket *, struct sockaddr *,
 780			      int, int);
 781static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 782static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 783				  int);
 784
 785#ifdef CONFIG_PROC_FS
 786static int unix_count_nr_fds(struct sock *sk)
 787{
 788	struct sk_buff *skb;
 789	struct unix_sock *u;
 790	int nr_fds = 0;
 791
 792	spin_lock(&sk->sk_receive_queue.lock);
 793	skb = skb_peek(&sk->sk_receive_queue);
 794	while (skb) {
 795		u = unix_sk(skb->sk);
 796		nr_fds += atomic_read(&u->scm_stat.nr_fds);
 797		skb = skb_peek_next(skb, &sk->sk_receive_queue);
 798	}
 799	spin_unlock(&sk->sk_receive_queue.lock);
 800
 801	return nr_fds;
 802}
 803
 804static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 805{
 806	struct sock *sk = sock->sk;
 807	unsigned char s_state;
 808	struct unix_sock *u;
 809	int nr_fds = 0;
 810
 811	if (sk) {
 812		s_state = READ_ONCE(sk->sk_state);
 813		u = unix_sk(sk);
 814
 815		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
 816		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
 817		 * SOCK_DGRAM is ordinary. So, no lock is needed.
 818		 */
 819		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
 820			nr_fds = atomic_read(&u->scm_stat.nr_fds);
 821		else if (s_state == TCP_LISTEN)
 822			nr_fds = unix_count_nr_fds(sk);
 823
 824		seq_printf(m, "scm_fds: %u\n", nr_fds);
 825	}
 826}
 827#else
 828#define unix_show_fdinfo NULL
 829#endif
 830
 831static const struct proto_ops unix_stream_ops = {
 832	.family =	PF_UNIX,
 833	.owner =	THIS_MODULE,
 834	.release =	unix_release,
 835	.bind =		unix_bind,
 836	.connect =	unix_stream_connect,
 837	.socketpair =	unix_socketpair,
 838	.accept =	unix_accept,
 839	.getname =	unix_getname,
 840	.poll =		unix_poll,
 841	.ioctl =	unix_ioctl,
 842#ifdef CONFIG_COMPAT
 843	.compat_ioctl =	unix_compat_ioctl,
 844#endif
 845	.listen =	unix_listen,
 846	.shutdown =	unix_shutdown,
 847	.sendmsg =	unix_stream_sendmsg,
 848	.recvmsg =	unix_stream_recvmsg,
 849	.read_skb =	unix_stream_read_skb,
 850	.mmap =		sock_no_mmap,
 851	.splice_read =	unix_stream_splice_read,
 852	.set_peek_off =	sk_set_peek_off,
 853	.show_fdinfo =	unix_show_fdinfo,
 854};
 855
 856static const struct proto_ops unix_dgram_ops = {
 857	.family =	PF_UNIX,
 858	.owner =	THIS_MODULE,
 859	.release =	unix_release,
 860	.bind =		unix_bind,
 861	.connect =	unix_dgram_connect,
 862	.socketpair =	unix_socketpair,
 863	.accept =	sock_no_accept,
 864	.getname =	unix_getname,
 865	.poll =		unix_dgram_poll,
 866	.ioctl =	unix_ioctl,
 867#ifdef CONFIG_COMPAT
 868	.compat_ioctl =	unix_compat_ioctl,
 869#endif
 870	.listen =	sock_no_listen,
 871	.shutdown =	unix_shutdown,
 872	.sendmsg =	unix_dgram_sendmsg,
 873	.read_skb =	unix_read_skb,
 874	.recvmsg =	unix_dgram_recvmsg,
 875	.mmap =		sock_no_mmap,
 876	.set_peek_off =	sk_set_peek_off,
 877	.show_fdinfo =	unix_show_fdinfo,
 878};
 879
 880static const struct proto_ops unix_seqpacket_ops = {
 881	.family =	PF_UNIX,
 882	.owner =	THIS_MODULE,
 883	.release =	unix_release,
 884	.bind =		unix_bind,
 885	.connect =	unix_stream_connect,
 886	.socketpair =	unix_socketpair,
 887	.accept =	unix_accept,
 888	.getname =	unix_getname,
 889	.poll =		unix_dgram_poll,
 890	.ioctl =	unix_ioctl,
 891#ifdef CONFIG_COMPAT
 892	.compat_ioctl =	unix_compat_ioctl,
 893#endif
 894	.listen =	unix_listen,
 895	.shutdown =	unix_shutdown,
 896	.sendmsg =	unix_seqpacket_sendmsg,
 897	.recvmsg =	unix_seqpacket_recvmsg,
 898	.mmap =		sock_no_mmap,
 899	.set_peek_off =	sk_set_peek_off,
 900	.show_fdinfo =	unix_show_fdinfo,
 901};
 902
 903static void unix_close(struct sock *sk, long timeout)
 904{
 905	/* Nothing to do here, unix socket does not need a ->close().
 906	 * This is merely for sockmap.
 907	 */
 908}
 909
 910static void unix_unhash(struct sock *sk)
 911{
 912	/* Nothing to do here, unix socket does not need a ->unhash().
 913	 * This is merely for sockmap.
 914	 */
 915}
 916
 917static bool unix_bpf_bypass_getsockopt(int level, int optname)
 918{
 919	if (level == SOL_SOCKET) {
 920		switch (optname) {
 921		case SO_PEERPIDFD:
 922			return true;
 923		default:
 924			return false;
 925		}
 926	}
 927
 928	return false;
 929}
 930
 931struct proto unix_dgram_proto = {
 932	.name			= "UNIX",
 933	.owner			= THIS_MODULE,
 934	.obj_size		= sizeof(struct unix_sock),
 935	.close			= unix_close,
 936	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 937#ifdef CONFIG_BPF_SYSCALL
 938	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
 939#endif
 940};
 941
 942struct proto unix_stream_proto = {
 943	.name			= "UNIX-STREAM",
 944	.owner			= THIS_MODULE,
 945	.obj_size		= sizeof(struct unix_sock),
 946	.close			= unix_close,
 947	.unhash			= unix_unhash,
 948	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 949#ifdef CONFIG_BPF_SYSCALL
 950	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
 951#endif
 952};
 953
 954static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
 955{
 956	struct unix_sock *u;
 957	struct sock *sk;
 958	int err;
 959
 960	atomic_long_inc(&unix_nr_socks);
 961	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
 962		err = -ENFILE;
 963		goto err;
 964	}
 965
 966	if (type == SOCK_STREAM)
 967		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
 968	else /*dgram and  seqpacket */
 969		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
 970
 971	if (!sk) {
 972		err = -ENOMEM;
 973		goto err;
 974	}
 975
 976	sock_init_data(sock, sk);
 977
 978	sk->sk_hash		= unix_unbound_hash(sk);
 979	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
 980	sk->sk_write_space	= unix_write_space;
 981	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
 982	sk->sk_destruct		= unix_sock_destructor;
 983	u	  = unix_sk(sk);
 
 984	u->path.dentry = NULL;
 985	u->path.mnt = NULL;
 986	spin_lock_init(&u->lock);
 987	atomic_long_set(&u->inflight, 0);
 988	INIT_LIST_HEAD(&u->link);
 989	mutex_init(&u->iolock); /* single task reading lock */
 990	mutex_init(&u->bindlock); /* single task binding lock */
 991	init_waitqueue_head(&u->peer_wait);
 992	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 993	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
 994	unix_insert_unbound_socket(net, sk);
 995
 996	sock_prot_inuse_add(net, sk->sk_prot, 1);
 997
 998	return sk;
 999
1000err:
1001	atomic_long_dec(&unix_nr_socks);
1002	return ERR_PTR(err);
1003}
1004
1005static int unix_create(struct net *net, struct socket *sock, int protocol,
1006		       int kern)
1007{
1008	struct sock *sk;
1009
1010	if (protocol && protocol != PF_UNIX)
1011		return -EPROTONOSUPPORT;
1012
1013	sock->state = SS_UNCONNECTED;
1014
1015	switch (sock->type) {
1016	case SOCK_STREAM:
1017		sock->ops = &unix_stream_ops;
1018		break;
1019		/*
1020		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1021		 *	nothing uses it.
1022		 */
1023	case SOCK_RAW:
1024		sock->type = SOCK_DGRAM;
1025		fallthrough;
1026	case SOCK_DGRAM:
1027		sock->ops = &unix_dgram_ops;
1028		break;
1029	case SOCK_SEQPACKET:
1030		sock->ops = &unix_seqpacket_ops;
1031		break;
1032	default:
1033		return -ESOCKTNOSUPPORT;
1034	}
1035
1036	sk = unix_create1(net, sock, kern, sock->type);
1037	if (IS_ERR(sk))
1038		return PTR_ERR(sk);
1039
1040	return 0;
1041}
1042
1043static int unix_release(struct socket *sock)
1044{
1045	struct sock *sk = sock->sk;
1046
1047	if (!sk)
1048		return 0;
1049
1050	sk->sk_prot->close(sk, 0);
1051	unix_release_sock(sk, 0);
1052	sock->sk = NULL;
1053
1054	return 0;
1055}
1056
1057static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1058				  int type)
1059{
1060	struct inode *inode;
1061	struct path path;
1062	struct sock *sk;
1063	int err;
1064
1065	unix_mkname_bsd(sunaddr, addr_len);
1066	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1067	if (err)
1068		goto fail;
1069
1070	err = path_permission(&path, MAY_WRITE);
1071	if (err)
1072		goto path_put;
1073
1074	err = -ECONNREFUSED;
1075	inode = d_backing_inode(path.dentry);
1076	if (!S_ISSOCK(inode->i_mode))
1077		goto path_put;
1078
1079	sk = unix_find_socket_byinode(inode);
1080	if (!sk)
1081		goto path_put;
1082
1083	err = -EPROTOTYPE;
1084	if (sk->sk_type == type)
1085		touch_atime(&path);
1086	else
1087		goto sock_put;
1088
1089	path_put(&path);
1090
1091	return sk;
1092
1093sock_put:
1094	sock_put(sk);
1095path_put:
1096	path_put(&path);
1097fail:
1098	return ERR_PTR(err);
1099}
1100
1101static struct sock *unix_find_abstract(struct net *net,
1102				       struct sockaddr_un *sunaddr,
1103				       int addr_len, int type)
1104{
1105	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1106	struct dentry *dentry;
1107	struct sock *sk;
1108
1109	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1110	if (!sk)
1111		return ERR_PTR(-ECONNREFUSED);
1112
1113	dentry = unix_sk(sk)->path.dentry;
1114	if (dentry)
1115		touch_atime(&unix_sk(sk)->path);
1116
1117	return sk;
1118}
1119
1120static struct sock *unix_find_other(struct net *net,
1121				    struct sockaddr_un *sunaddr,
1122				    int addr_len, int type)
1123{
1124	struct sock *sk;
1125
1126	if (sunaddr->sun_path[0])
1127		sk = unix_find_bsd(sunaddr, addr_len, type);
1128	else
1129		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1130
1131	return sk;
1132}
1133
1134static int unix_autobind(struct sock *sk)
1135{
1136	unsigned int new_hash, old_hash = sk->sk_hash;
1137	struct unix_sock *u = unix_sk(sk);
 
1138	struct net *net = sock_net(sk);
1139	struct unix_address *addr;
1140	u32 lastnum, ordernum;
1141	int err;
1142
1143	err = mutex_lock_interruptible(&u->bindlock);
1144	if (err)
1145		return err;
1146
1147	if (u->addr)
1148		goto out;
1149
1150	err = -ENOMEM;
1151	addr = kzalloc(sizeof(*addr) +
1152		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1153	if (!addr)
1154		goto out;
1155
1156	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1157	addr->name->sun_family = AF_UNIX;
1158	refcount_set(&addr->refcnt, 1);
1159
 
1160	ordernum = get_random_u32();
1161	lastnum = ordernum & 0xFFFFF;
1162retry:
1163	ordernum = (ordernum + 1) & 0xFFFFF;
1164	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1165
1166	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1167	unix_table_double_lock(net, old_hash, new_hash);
1168
1169	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1170		unix_table_double_unlock(net, old_hash, new_hash);
1171
1172		/* __unix_find_socket_byname() may take long time if many names
1173		 * are already in use.
1174		 */
1175		cond_resched();
1176
1177		if (ordernum == lastnum) {
1178			/* Give up if all names seems to be in use. */
1179			err = -ENOSPC;
1180			unix_release_addr(addr);
1181			goto out;
1182		}
1183
1184		goto retry;
1185	}
1186
1187	__unix_set_addr_hash(net, sk, addr, new_hash);
1188	unix_table_double_unlock(net, old_hash, new_hash);
1189	err = 0;
1190
1191out:	mutex_unlock(&u->bindlock);
1192	return err;
1193}
1194
1195static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1196			 int addr_len)
1197{
1198	umode_t mode = S_IFSOCK |
1199	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1200	unsigned int new_hash, old_hash = sk->sk_hash;
1201	struct unix_sock *u = unix_sk(sk);
 
1202	struct net *net = sock_net(sk);
1203	struct mnt_idmap *idmap;
1204	struct unix_address *addr;
1205	struct dentry *dentry;
1206	struct path parent;
1207	int err;
1208
1209	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1210	addr = unix_create_addr(sunaddr, addr_len);
1211	if (!addr)
1212		return -ENOMEM;
1213
1214	/*
1215	 * Get the parent directory, calculate the hash for last
1216	 * component.
1217	 */
1218	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1219	if (IS_ERR(dentry)) {
1220		err = PTR_ERR(dentry);
1221		goto out;
1222	}
1223
1224	/*
1225	 * All right, let's create it.
1226	 */
1227	idmap = mnt_idmap(parent.mnt);
1228	err = security_path_mknod(&parent, dentry, mode, 0);
1229	if (!err)
1230		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1231	if (err)
1232		goto out_path;
1233	err = mutex_lock_interruptible(&u->bindlock);
1234	if (err)
1235		goto out_unlink;
1236	if (u->addr)
1237		goto out_unlock;
1238
 
1239	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1240	unix_table_double_lock(net, old_hash, new_hash);
1241	u->path.mnt = mntget(parent.mnt);
1242	u->path.dentry = dget(dentry);
1243	__unix_set_addr_hash(net, sk, addr, new_hash);
1244	unix_table_double_unlock(net, old_hash, new_hash);
1245	unix_insert_bsd_socket(sk);
1246	mutex_unlock(&u->bindlock);
1247	done_path_create(&parent, dentry);
1248	return 0;
1249
1250out_unlock:
1251	mutex_unlock(&u->bindlock);
1252	err = -EINVAL;
1253out_unlink:
1254	/* failed after successful mknod?  unlink what we'd created... */
1255	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1256out_path:
1257	done_path_create(&parent, dentry);
1258out:
1259	unix_release_addr(addr);
1260	return err == -EEXIST ? -EADDRINUSE : err;
1261}
1262
1263static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264			      int addr_len)
1265{
1266	unsigned int new_hash, old_hash = sk->sk_hash;
1267	struct unix_sock *u = unix_sk(sk);
 
1268	struct net *net = sock_net(sk);
1269	struct unix_address *addr;
1270	int err;
1271
1272	addr = unix_create_addr(sunaddr, addr_len);
1273	if (!addr)
1274		return -ENOMEM;
1275
1276	err = mutex_lock_interruptible(&u->bindlock);
1277	if (err)
1278		goto out;
1279
1280	if (u->addr) {
1281		err = -EINVAL;
1282		goto out_mutex;
1283	}
1284
 
1285	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1286	unix_table_double_lock(net, old_hash, new_hash);
1287
1288	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1289		goto out_spin;
1290
1291	__unix_set_addr_hash(net, sk, addr, new_hash);
1292	unix_table_double_unlock(net, old_hash, new_hash);
1293	mutex_unlock(&u->bindlock);
1294	return 0;
1295
1296out_spin:
1297	unix_table_double_unlock(net, old_hash, new_hash);
1298	err = -EADDRINUSE;
1299out_mutex:
1300	mutex_unlock(&u->bindlock);
1301out:
1302	unix_release_addr(addr);
1303	return err;
1304}
1305
1306static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1307{
1308	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1309	struct sock *sk = sock->sk;
1310	int err;
1311
1312	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1313	    sunaddr->sun_family == AF_UNIX)
1314		return unix_autobind(sk);
1315
1316	err = unix_validate_addr(sunaddr, addr_len);
1317	if (err)
1318		return err;
1319
1320	if (sunaddr->sun_path[0])
1321		err = unix_bind_bsd(sk, sunaddr, addr_len);
1322	else
1323		err = unix_bind_abstract(sk, sunaddr, addr_len);
1324
1325	return err;
1326}
1327
1328static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1329{
1330	if (unlikely(sk1 == sk2) || !sk2) {
1331		unix_state_lock(sk1);
1332		return;
1333	}
1334	if (sk1 > sk2)
1335		swap(sk1, sk2);
1336
1337	unix_state_lock(sk1);
1338	unix_state_lock_nested(sk2, U_LOCK_SECOND);
1339}
1340
1341static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1342{
1343	if (unlikely(sk1 == sk2) || !sk2) {
1344		unix_state_unlock(sk1);
1345		return;
1346	}
1347	unix_state_unlock(sk1);
1348	unix_state_unlock(sk2);
1349}
1350
1351static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1352			      int alen, int flags)
1353{
1354	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1355	struct sock *sk = sock->sk;
1356	struct sock *other;
1357	int err;
1358
1359	err = -EINVAL;
1360	if (alen < offsetofend(struct sockaddr, sa_family))
1361		goto out;
1362
1363	if (addr->sa_family != AF_UNSPEC) {
1364		err = unix_validate_addr(sunaddr, alen);
1365		if (err)
1366			goto out;
1367
1368		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1369		if (err)
1370			goto out;
1371
1372		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1373		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1374		    !unix_sk(sk)->addr) {
1375			err = unix_autobind(sk);
1376			if (err)
1377				goto out;
1378		}
1379
1380restart:
1381		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1382		if (IS_ERR(other)) {
1383			err = PTR_ERR(other);
1384			goto out;
1385		}
1386
1387		unix_state_double_lock(sk, other);
1388
1389		/* Apparently VFS overslept socket death. Retry. */
1390		if (sock_flag(other, SOCK_DEAD)) {
1391			unix_state_double_unlock(sk, other);
1392			sock_put(other);
1393			goto restart;
1394		}
1395
1396		err = -EPERM;
1397		if (!unix_may_send(sk, other))
1398			goto out_unlock;
1399
1400		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1401		if (err)
1402			goto out_unlock;
1403
1404		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1405	} else {
1406		/*
1407		 *	1003.1g breaking connected state with AF_UNSPEC
1408		 */
1409		other = NULL;
1410		unix_state_double_lock(sk, other);
1411	}
1412
1413	/*
1414	 * If it was connected, reconnect.
1415	 */
1416	if (unix_peer(sk)) {
1417		struct sock *old_peer = unix_peer(sk);
1418
1419		unix_peer(sk) = other;
1420		if (!other)
1421			sk->sk_state = TCP_CLOSE;
1422		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1423
1424		unix_state_double_unlock(sk, other);
1425
1426		if (other != old_peer)
1427			unix_dgram_disconnected(sk, old_peer);
1428		sock_put(old_peer);
1429	} else {
1430		unix_peer(sk) = other;
1431		unix_state_double_unlock(sk, other);
1432	}
1433
1434	return 0;
1435
1436out_unlock:
1437	unix_state_double_unlock(sk, other);
1438	sock_put(other);
1439out:
1440	return err;
1441}
1442
1443static long unix_wait_for_peer(struct sock *other, long timeo)
1444	__releases(&unix_sk(other)->lock)
1445{
1446	struct unix_sock *u = unix_sk(other);
1447	int sched;
1448	DEFINE_WAIT(wait);
1449
1450	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1451
1452	sched = !sock_flag(other, SOCK_DEAD) &&
1453		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1454		unix_recvq_full_lockless(other);
1455
1456	unix_state_unlock(other);
1457
1458	if (sched)
1459		timeo = schedule_timeout(timeo);
1460
1461	finish_wait(&u->peer_wait, &wait);
1462	return timeo;
1463}
1464
1465static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1466			       int addr_len, int flags)
1467{
1468	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1469	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1470	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1471	struct net *net = sock_net(sk);
1472	struct sk_buff *skb = NULL;
1473	long timeo;
1474	int err;
1475	int st;
1476
1477	err = unix_validate_addr(sunaddr, addr_len);
1478	if (err)
1479		goto out;
1480
1481	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1482	if (err)
1483		goto out;
1484
1485	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1486	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
 
1487		err = unix_autobind(sk);
1488		if (err)
1489			goto out;
1490	}
1491
1492	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1493
1494	/* First of all allocate resources.
1495	   If we will make it after state is locked,
1496	   we will have to recheck all again in any case.
1497	 */
1498
1499	/* create new sock for complete connection */
1500	newsk = unix_create1(net, NULL, 0, sock->type);
1501	if (IS_ERR(newsk)) {
1502		err = PTR_ERR(newsk);
1503		newsk = NULL;
1504		goto out;
1505	}
1506
1507	err = -ENOMEM;
1508
1509	/* Allocate skb for sending to listening sock */
1510	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1511	if (skb == NULL)
1512		goto out;
1513
1514restart:
1515	/*  Find listening sock. */
1516	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1517	if (IS_ERR(other)) {
1518		err = PTR_ERR(other);
1519		other = NULL;
1520		goto out;
1521	}
1522
1523	/* Latch state of peer */
1524	unix_state_lock(other);
1525
1526	/* Apparently VFS overslept socket death. Retry. */
1527	if (sock_flag(other, SOCK_DEAD)) {
1528		unix_state_unlock(other);
1529		sock_put(other);
1530		goto restart;
1531	}
1532
1533	err = -ECONNREFUSED;
1534	if (other->sk_state != TCP_LISTEN)
1535		goto out_unlock;
1536	if (other->sk_shutdown & RCV_SHUTDOWN)
1537		goto out_unlock;
1538
1539	if (unix_recvq_full(other)) {
1540		err = -EAGAIN;
1541		if (!timeo)
1542			goto out_unlock;
1543
1544		timeo = unix_wait_for_peer(other, timeo);
1545
1546		err = sock_intr_errno(timeo);
1547		if (signal_pending(current))
1548			goto out;
1549		sock_put(other);
1550		goto restart;
1551	}
1552
1553	/* Latch our state.
1554
1555	   It is tricky place. We need to grab our state lock and cannot
1556	   drop lock on peer. It is dangerous because deadlock is
1557	   possible. Connect to self case and simultaneous
1558	   attempt to connect are eliminated by checking socket
1559	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1560	   check this before attempt to grab lock.
1561
1562	   Well, and we have to recheck the state after socket locked.
1563	 */
1564	st = sk->sk_state;
1565
1566	switch (st) {
1567	case TCP_CLOSE:
1568		/* This is ok... continue with connect */
1569		break;
1570	case TCP_ESTABLISHED:
1571		/* Socket is already connected */
1572		err = -EISCONN;
1573		goto out_unlock;
1574	default:
1575		err = -EINVAL;
1576		goto out_unlock;
1577	}
1578
1579	unix_state_lock_nested(sk, U_LOCK_SECOND);
1580
1581	if (sk->sk_state != st) {
1582		unix_state_unlock(sk);
1583		unix_state_unlock(other);
1584		sock_put(other);
1585		goto restart;
1586	}
1587
1588	err = security_unix_stream_connect(sk, other, newsk);
1589	if (err) {
1590		unix_state_unlock(sk);
1591		goto out_unlock;
1592	}
1593
1594	/* The way is open! Fastly set all the necessary fields... */
1595
1596	sock_hold(sk);
1597	unix_peer(newsk)	= sk;
1598	newsk->sk_state		= TCP_ESTABLISHED;
1599	newsk->sk_type		= sk->sk_type;
1600	init_peercred(newsk);
1601	newu = unix_sk(newsk);
1602	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1603	otheru = unix_sk(other);
1604
1605	/* copy address information from listening to new sock
1606	 *
1607	 * The contents of *(otheru->addr) and otheru->path
1608	 * are seen fully set up here, since we have found
1609	 * otheru in hash under its lock.  Insertion into the
1610	 * hash chain we'd found it in had been done in an
1611	 * earlier critical area protected by the chain's lock,
1612	 * the same one where we'd set *(otheru->addr) contents,
1613	 * as well as otheru->path and otheru->addr itself.
1614	 *
1615	 * Using smp_store_release() here to set newu->addr
1616	 * is enough to make those stores, as well as stores
1617	 * to newu->path visible to anyone who gets newu->addr
1618	 * by smp_load_acquire().  IOW, the same warranties
1619	 * as for unix_sock instances bound in unix_bind() or
1620	 * in unix_autobind().
1621	 */
1622	if (otheru->path.dentry) {
1623		path_get(&otheru->path);
1624		newu->path = otheru->path;
1625	}
1626	refcount_inc(&otheru->addr->refcnt);
1627	smp_store_release(&newu->addr, otheru->addr);
1628
1629	/* Set credentials */
1630	copy_peercred(sk, other);
1631
1632	sock->state	= SS_CONNECTED;
1633	sk->sk_state	= TCP_ESTABLISHED;
1634	sock_hold(newsk);
1635
1636	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1637	unix_peer(sk)	= newsk;
1638
1639	unix_state_unlock(sk);
1640
1641	/* take ten and send info to listening sock */
1642	spin_lock(&other->sk_receive_queue.lock);
1643	__skb_queue_tail(&other->sk_receive_queue, skb);
1644	spin_unlock(&other->sk_receive_queue.lock);
1645	unix_state_unlock(other);
1646	other->sk_data_ready(other);
1647	sock_put(other);
1648	return 0;
1649
1650out_unlock:
1651	if (other)
1652		unix_state_unlock(other);
1653
1654out:
1655	kfree_skb(skb);
1656	if (newsk)
1657		unix_release_sock(newsk, 0);
1658	if (other)
1659		sock_put(other);
1660	return err;
1661}
1662
1663static int unix_socketpair(struct socket *socka, struct socket *sockb)
1664{
1665	struct sock *ska = socka->sk, *skb = sockb->sk;
1666
1667	/* Join our sockets back to back */
1668	sock_hold(ska);
1669	sock_hold(skb);
1670	unix_peer(ska) = skb;
1671	unix_peer(skb) = ska;
1672	init_peercred(ska);
1673	init_peercred(skb);
1674
1675	ska->sk_state = TCP_ESTABLISHED;
1676	skb->sk_state = TCP_ESTABLISHED;
1677	socka->state  = SS_CONNECTED;
1678	sockb->state  = SS_CONNECTED;
1679	return 0;
1680}
1681
1682static void unix_sock_inherit_flags(const struct socket *old,
1683				    struct socket *new)
1684{
1685	if (test_bit(SOCK_PASSCRED, &old->flags))
1686		set_bit(SOCK_PASSCRED, &new->flags);
1687	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1688		set_bit(SOCK_PASSPIDFD, &new->flags);
1689	if (test_bit(SOCK_PASSSEC, &old->flags))
1690		set_bit(SOCK_PASSSEC, &new->flags);
1691}
1692
1693static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1694		       bool kern)
1695{
1696	struct sock *sk = sock->sk;
1697	struct sock *tsk;
1698	struct sk_buff *skb;
1699	int err;
1700
1701	err = -EOPNOTSUPP;
1702	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1703		goto out;
1704
1705	err = -EINVAL;
1706	if (sk->sk_state != TCP_LISTEN)
1707		goto out;
1708
1709	/* If socket state is TCP_LISTEN it cannot change (for now...),
1710	 * so that no locks are necessary.
1711	 */
1712
1713	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1714				&err);
1715	if (!skb) {
1716		/* This means receive shutdown. */
1717		if (err == 0)
1718			err = -EINVAL;
1719		goto out;
1720	}
1721
1722	tsk = skb->sk;
1723	skb_free_datagram(sk, skb);
1724	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1725
1726	/* attach accepted sock to socket */
1727	unix_state_lock(tsk);
1728	newsock->state = SS_CONNECTED;
1729	unix_sock_inherit_flags(sock, newsock);
1730	sock_graft(tsk, newsock);
1731	unix_state_unlock(tsk);
1732	return 0;
1733
1734out:
1735	return err;
1736}
1737
1738
1739static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1740{
1741	struct sock *sk = sock->sk;
1742	struct unix_address *addr;
1743	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1744	int err = 0;
1745
1746	if (peer) {
1747		sk = unix_peer_get(sk);
1748
1749		err = -ENOTCONN;
1750		if (!sk)
1751			goto out;
1752		err = 0;
1753	} else {
1754		sock_hold(sk);
1755	}
1756
1757	addr = smp_load_acquire(&unix_sk(sk)->addr);
1758	if (!addr) {
1759		sunaddr->sun_family = AF_UNIX;
1760		sunaddr->sun_path[0] = 0;
1761		err = offsetof(struct sockaddr_un, sun_path);
1762	} else {
1763		err = addr->len;
1764		memcpy(sunaddr, addr->name, addr->len);
1765
1766		if (peer)
1767			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1768					       CGROUP_UNIX_GETPEERNAME);
1769		else
1770			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1771					       CGROUP_UNIX_GETSOCKNAME);
1772	}
1773	sock_put(sk);
1774out:
1775	return err;
1776}
1777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1778static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1779{
1780	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1781
1782	/*
1783	 * Garbage collection of unix sockets starts by selecting a set of
1784	 * candidate sockets which have reference only from being in flight
1785	 * (total_refs == inflight_refs).  This condition is checked once during
1786	 * the candidate collection phase, and candidates are marked as such, so
1787	 * that non-candidates can later be ignored.  While inflight_refs is
1788	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1789	 * is an instantaneous decision.
1790	 *
1791	 * Once a candidate, however, the socket must not be reinstalled into a
1792	 * file descriptor while the garbage collection is in progress.
1793	 *
1794	 * If the above conditions are met, then the directed graph of
1795	 * candidates (*) does not change while unix_gc_lock is held.
1796	 *
1797	 * Any operations that changes the file count through file descriptors
1798	 * (dup, close, sendmsg) does not change the graph since candidates are
1799	 * not installed in fds.
1800	 *
1801	 * Dequeing a candidate via recvmsg would install it into an fd, but
1802	 * that takes unix_gc_lock to decrement the inflight count, so it's
1803	 * serialized with garbage collection.
1804	 *
1805	 * MSG_PEEK is special in that it does not change the inflight count,
1806	 * yet does install the socket into an fd.  The following lock/unlock
1807	 * pair is to ensure serialization with garbage collection.  It must be
1808	 * done between incrementing the file count and installing the file into
1809	 * an fd.
1810	 *
1811	 * If garbage collection starts after the barrier provided by the
1812	 * lock/unlock, then it will see the elevated refcount and not mark this
1813	 * as a candidate.  If a garbage collection is already in progress
1814	 * before the file count was incremented, then the lock/unlock pair will
1815	 * ensure that garbage collection is finished before progressing to
1816	 * installing the fd.
1817	 *
1818	 * (*) A -> B where B is on the queue of A or B is on the queue of C
1819	 * which is on the queue of listening socket A.
1820	 */
1821	spin_lock(&unix_gc_lock);
1822	spin_unlock(&unix_gc_lock);
1823}
1824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1826{
1827	int err = 0;
1828
1829	UNIXCB(skb).pid  = get_pid(scm->pid);
1830	UNIXCB(skb).uid = scm->creds.uid;
1831	UNIXCB(skb).gid = scm->creds.gid;
1832	UNIXCB(skb).fp = NULL;
1833	unix_get_secdata(scm, skb);
1834	if (scm->fp && send_fds)
1835		err = unix_attach_fds(scm, skb);
1836
1837	skb->destructor = unix_destruct_scm;
1838	return err;
1839}
1840
1841static bool unix_passcred_enabled(const struct socket *sock,
1842				  const struct sock *other)
1843{
1844	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1845	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1846	       !other->sk_socket ||
1847	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1848	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1849}
1850
1851/*
1852 * Some apps rely on write() giving SCM_CREDENTIALS
1853 * We include credentials if source or destination socket
1854 * asserted SOCK_PASSCRED.
1855 */
1856static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1857			    const struct sock *other)
1858{
1859	if (UNIXCB(skb).pid)
1860		return;
1861	if (unix_passcred_enabled(sock, other)) {
1862		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1863		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1864	}
1865}
1866
1867static bool unix_skb_scm_eq(struct sk_buff *skb,
1868			    struct scm_cookie *scm)
1869{
1870	return UNIXCB(skb).pid == scm->pid &&
1871	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1872	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1873	       unix_secdata_eq(scm, skb);
1874}
1875
1876static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1877{
1878	struct scm_fp_list *fp = UNIXCB(skb).fp;
1879	struct unix_sock *u = unix_sk(sk);
1880
1881	if (unlikely(fp && fp->count))
1882		atomic_add(fp->count, &u->scm_stat.nr_fds);
1883}
1884
1885static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1886{
1887	struct scm_fp_list *fp = UNIXCB(skb).fp;
1888	struct unix_sock *u = unix_sk(sk);
1889
1890	if (unlikely(fp && fp->count))
1891		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1892}
1893
1894/*
1895 *	Send AF_UNIX data.
1896 */
1897
1898static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1899			      size_t len)
1900{
1901	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1902	struct sock *sk = sock->sk, *other = NULL;
1903	struct unix_sock *u = unix_sk(sk);
1904	struct scm_cookie scm;
1905	struct sk_buff *skb;
1906	int data_len = 0;
1907	int sk_locked;
1908	long timeo;
1909	int err;
1910
1911	wait_for_unix_gc();
1912	err = scm_send(sock, msg, &scm, false);
1913	if (err < 0)
1914		return err;
1915
 
 
1916	err = -EOPNOTSUPP;
1917	if (msg->msg_flags&MSG_OOB)
1918		goto out;
1919
1920	if (msg->msg_namelen) {
1921		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1922		if (err)
1923			goto out;
1924
1925		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1926							    msg->msg_name,
1927							    &msg->msg_namelen,
1928							    NULL);
1929		if (err)
1930			goto out;
1931	} else {
1932		sunaddr = NULL;
1933		err = -ENOTCONN;
1934		other = unix_peer_get(sk);
1935		if (!other)
1936			goto out;
1937	}
1938
1939	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1940	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
 
1941		err = unix_autobind(sk);
1942		if (err)
1943			goto out;
1944	}
1945
1946	err = -EMSGSIZE;
1947	if (len > sk->sk_sndbuf - 32)
1948		goto out;
1949
1950	if (len > SKB_MAX_ALLOC) {
1951		data_len = min_t(size_t,
1952				 len - SKB_MAX_ALLOC,
1953				 MAX_SKB_FRAGS * PAGE_SIZE);
1954		data_len = PAGE_ALIGN(data_len);
1955
1956		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1957	}
1958
1959	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1960				   msg->msg_flags & MSG_DONTWAIT, &err,
1961				   PAGE_ALLOC_COSTLY_ORDER);
1962	if (skb == NULL)
1963		goto out;
1964
1965	err = unix_scm_to_skb(&scm, skb, true);
1966	if (err < 0)
1967		goto out_free;
1968
1969	skb_put(skb, len - data_len);
1970	skb->data_len = data_len;
1971	skb->len = len;
1972	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1973	if (err)
1974		goto out_free;
1975
1976	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1977
1978restart:
1979	if (!other) {
1980		err = -ECONNRESET;
1981		if (sunaddr == NULL)
1982			goto out_free;
1983
1984		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1985					sk->sk_type);
1986		if (IS_ERR(other)) {
1987			err = PTR_ERR(other);
1988			other = NULL;
1989			goto out_free;
1990		}
1991	}
1992
1993	if (sk_filter(other, skb) < 0) {
1994		/* Toss the packet but do not return any error to the sender */
1995		err = len;
1996		goto out_free;
1997	}
1998
1999	sk_locked = 0;
2000	unix_state_lock(other);
2001restart_locked:
2002	err = -EPERM;
2003	if (!unix_may_send(sk, other))
2004		goto out_unlock;
2005
2006	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2007		/*
2008		 *	Check with 1003.1g - what should
2009		 *	datagram error
2010		 */
2011		unix_state_unlock(other);
2012		sock_put(other);
2013
2014		if (!sk_locked)
2015			unix_state_lock(sk);
2016
2017		err = 0;
2018		if (sk->sk_type == SOCK_SEQPACKET) {
2019			/* We are here only when racing with unix_release_sock()
2020			 * is clearing @other. Never change state to TCP_CLOSE
2021			 * unlike SOCK_DGRAM wants.
2022			 */
2023			unix_state_unlock(sk);
2024			err = -EPIPE;
2025		} else if (unix_peer(sk) == other) {
2026			unix_peer(sk) = NULL;
2027			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2028
2029			sk->sk_state = TCP_CLOSE;
2030			unix_state_unlock(sk);
2031
2032			unix_dgram_disconnected(sk, other);
2033			sock_put(other);
2034			err = -ECONNREFUSED;
2035		} else {
2036			unix_state_unlock(sk);
2037		}
2038
2039		other = NULL;
2040		if (err)
2041			goto out_free;
2042		goto restart;
2043	}
2044
2045	err = -EPIPE;
2046	if (other->sk_shutdown & RCV_SHUTDOWN)
2047		goto out_unlock;
2048
2049	if (sk->sk_type != SOCK_SEQPACKET) {
2050		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2051		if (err)
2052			goto out_unlock;
2053	}
2054
2055	/* other == sk && unix_peer(other) != sk if
2056	 * - unix_peer(sk) == NULL, destination address bound to sk
2057	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2058	 */
2059	if (other != sk &&
2060	    unlikely(unix_peer(other) != sk &&
2061	    unix_recvq_full_lockless(other))) {
2062		if (timeo) {
2063			timeo = unix_wait_for_peer(other, timeo);
2064
2065			err = sock_intr_errno(timeo);
2066			if (signal_pending(current))
2067				goto out_free;
2068
2069			goto restart;
2070		}
2071
2072		if (!sk_locked) {
2073			unix_state_unlock(other);
2074			unix_state_double_lock(sk, other);
2075		}
2076
2077		if (unix_peer(sk) != other ||
2078		    unix_dgram_peer_wake_me(sk, other)) {
2079			err = -EAGAIN;
2080			sk_locked = 1;
2081			goto out_unlock;
2082		}
2083
2084		if (!sk_locked) {
2085			sk_locked = 1;
2086			goto restart_locked;
2087		}
2088	}
2089
2090	if (unlikely(sk_locked))
2091		unix_state_unlock(sk);
2092
2093	if (sock_flag(other, SOCK_RCVTSTAMP))
2094		__net_timestamp(skb);
2095	maybe_add_creds(skb, sock, other);
2096	scm_stat_add(other, skb);
2097	skb_queue_tail(&other->sk_receive_queue, skb);
2098	unix_state_unlock(other);
2099	other->sk_data_ready(other);
2100	sock_put(other);
2101	scm_destroy(&scm);
2102	return len;
2103
2104out_unlock:
2105	if (sk_locked)
2106		unix_state_unlock(sk);
2107	unix_state_unlock(other);
2108out_free:
2109	kfree_skb(skb);
2110out:
2111	if (other)
2112		sock_put(other);
2113	scm_destroy(&scm);
2114	return err;
2115}
2116
2117/* We use paged skbs for stream sockets, and limit occupancy to 32768
2118 * bytes, and a minimum of a full page.
2119 */
2120#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2121
2122#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2123static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2124		     struct scm_cookie *scm, bool fds_sent)
2125{
2126	struct unix_sock *ousk = unix_sk(other);
2127	struct sk_buff *skb;
2128	int err = 0;
2129
2130	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2131
2132	if (!skb)
2133		return err;
2134
2135	err = unix_scm_to_skb(scm, skb, !fds_sent);
2136	if (err < 0) {
2137		kfree_skb(skb);
2138		return err;
2139	}
2140	skb_put(skb, 1);
2141	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2142
2143	if (err) {
2144		kfree_skb(skb);
2145		return err;
2146	}
2147
2148	unix_state_lock(other);
2149
2150	if (sock_flag(other, SOCK_DEAD) ||
2151	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2152		unix_state_unlock(other);
2153		kfree_skb(skb);
2154		return -EPIPE;
2155	}
2156
2157	maybe_add_creds(skb, sock, other);
2158	skb_get(skb);
2159
 
 
 
2160	if (ousk->oob_skb)
2161		consume_skb(ousk->oob_skb);
2162
2163	WRITE_ONCE(ousk->oob_skb, skb);
 
 
2164
2165	scm_stat_add(other, skb);
2166	skb_queue_tail(&other->sk_receive_queue, skb);
2167	sk_send_sigurg(other);
2168	unix_state_unlock(other);
2169	other->sk_data_ready(other);
2170
2171	return err;
2172}
2173#endif
2174
2175static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2176			       size_t len)
2177{
2178	struct sock *sk = sock->sk;
2179	struct sock *other = NULL;
2180	int err, size;
2181	struct sk_buff *skb;
2182	int sent = 0;
2183	struct scm_cookie scm;
2184	bool fds_sent = false;
2185	int data_len;
2186
2187	wait_for_unix_gc();
2188	err = scm_send(sock, msg, &scm, false);
2189	if (err < 0)
2190		return err;
2191
 
 
2192	err = -EOPNOTSUPP;
2193	if (msg->msg_flags & MSG_OOB) {
2194#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2195		if (len)
2196			len--;
2197		else
2198#endif
2199			goto out_err;
2200	}
2201
2202	if (msg->msg_namelen) {
2203		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2204		goto out_err;
2205	} else {
2206		err = -ENOTCONN;
2207		other = unix_peer(sk);
2208		if (!other)
2209			goto out_err;
2210	}
2211
2212	if (sk->sk_shutdown & SEND_SHUTDOWN)
2213		goto pipe_err;
2214
2215	while (sent < len) {
2216		size = len - sent;
2217
2218		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2219			skb = sock_alloc_send_pskb(sk, 0, 0,
2220						   msg->msg_flags & MSG_DONTWAIT,
2221						   &err, 0);
2222		} else {
2223			/* Keep two messages in the pipe so it schedules better */
2224			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2225
2226			/* allow fallback to order-0 allocations */
2227			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2228
2229			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2230
2231			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2232
2233			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2234						   msg->msg_flags & MSG_DONTWAIT, &err,
2235						   get_order(UNIX_SKB_FRAGS_SZ));
2236		}
2237		if (!skb)
2238			goto out_err;
2239
2240		/* Only send the fds in the first buffer */
2241		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2242		if (err < 0) {
2243			kfree_skb(skb);
2244			goto out_err;
2245		}
2246		fds_sent = true;
2247
2248		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2249			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2250						   sk->sk_allocation);
2251			if (err < 0) {
2252				kfree_skb(skb);
2253				goto out_err;
2254			}
2255			size = err;
2256			refcount_add(size, &sk->sk_wmem_alloc);
2257		} else {
2258			skb_put(skb, size - data_len);
2259			skb->data_len = data_len;
2260			skb->len = size;
2261			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2262			if (err) {
2263				kfree_skb(skb);
2264				goto out_err;
2265			}
2266		}
2267
2268		unix_state_lock(other);
2269
2270		if (sock_flag(other, SOCK_DEAD) ||
2271		    (other->sk_shutdown & RCV_SHUTDOWN))
2272			goto pipe_err_free;
2273
2274		maybe_add_creds(skb, sock, other);
2275		scm_stat_add(other, skb);
2276		skb_queue_tail(&other->sk_receive_queue, skb);
2277		unix_state_unlock(other);
2278		other->sk_data_ready(other);
2279		sent += size;
2280	}
2281
2282#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2283	if (msg->msg_flags & MSG_OOB) {
2284		err = queue_oob(sock, msg, other, &scm, fds_sent);
2285		if (err)
2286			goto out_err;
2287		sent++;
2288	}
2289#endif
2290
2291	scm_destroy(&scm);
2292
2293	return sent;
2294
2295pipe_err_free:
2296	unix_state_unlock(other);
2297	kfree_skb(skb);
2298pipe_err:
2299	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2300		send_sig(SIGPIPE, current, 0);
2301	err = -EPIPE;
2302out_err:
2303	scm_destroy(&scm);
2304	return sent ? : err;
2305}
2306
2307static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2308				  size_t len)
2309{
2310	int err;
2311	struct sock *sk = sock->sk;
2312
2313	err = sock_error(sk);
2314	if (err)
2315		return err;
2316
2317	if (sk->sk_state != TCP_ESTABLISHED)
2318		return -ENOTCONN;
2319
2320	if (msg->msg_namelen)
2321		msg->msg_namelen = 0;
2322
2323	return unix_dgram_sendmsg(sock, msg, len);
2324}
2325
2326static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2327				  size_t size, int flags)
2328{
2329	struct sock *sk = sock->sk;
2330
2331	if (sk->sk_state != TCP_ESTABLISHED)
2332		return -ENOTCONN;
2333
2334	return unix_dgram_recvmsg(sock, msg, size, flags);
2335}
2336
2337static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2338{
2339	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2340
2341	if (addr) {
2342		msg->msg_namelen = addr->len;
2343		memcpy(msg->msg_name, addr->name, addr->len);
2344	}
2345}
2346
2347int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2348			 int flags)
2349{
2350	struct scm_cookie scm;
2351	struct socket *sock = sk->sk_socket;
2352	struct unix_sock *u = unix_sk(sk);
2353	struct sk_buff *skb, *last;
2354	long timeo;
2355	int skip;
2356	int err;
2357
2358	err = -EOPNOTSUPP;
2359	if (flags&MSG_OOB)
2360		goto out;
2361
2362	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2363
2364	do {
2365		mutex_lock(&u->iolock);
2366
2367		skip = sk_peek_offset(sk, flags);
2368		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2369					      &skip, &err, &last);
2370		if (skb) {
2371			if (!(flags & MSG_PEEK))
2372				scm_stat_del(sk, skb);
2373			break;
2374		}
2375
2376		mutex_unlock(&u->iolock);
2377
2378		if (err != -EAGAIN)
2379			break;
2380	} while (timeo &&
2381		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2382					      &err, &timeo, last));
2383
2384	if (!skb) { /* implies iolock unlocked */
2385		unix_state_lock(sk);
2386		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2387		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2388		    (sk->sk_shutdown & RCV_SHUTDOWN))
2389			err = 0;
2390		unix_state_unlock(sk);
2391		goto out;
2392	}
2393
2394	if (wq_has_sleeper(&u->peer_wait))
2395		wake_up_interruptible_sync_poll(&u->peer_wait,
2396						EPOLLOUT | EPOLLWRNORM |
2397						EPOLLWRBAND);
2398
2399	if (msg->msg_name) {
2400		unix_copy_addr(msg, skb->sk);
2401
2402		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2403						      msg->msg_name,
2404						      &msg->msg_namelen);
2405	}
2406
2407	if (size > skb->len - skip)
2408		size = skb->len - skip;
2409	else if (size < skb->len - skip)
2410		msg->msg_flags |= MSG_TRUNC;
2411
2412	err = skb_copy_datagram_msg(skb, skip, msg, size);
2413	if (err)
2414		goto out_free;
2415
2416	if (sock_flag(sk, SOCK_RCVTSTAMP))
2417		__sock_recv_timestamp(msg, sk, skb);
2418
2419	memset(&scm, 0, sizeof(scm));
2420
2421	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2422	unix_set_secdata(&scm, skb);
2423
2424	if (!(flags & MSG_PEEK)) {
2425		if (UNIXCB(skb).fp)
2426			unix_detach_fds(&scm, skb);
2427
2428		sk_peek_offset_bwd(sk, skb->len);
2429	} else {
2430		/* It is questionable: on PEEK we could:
2431		   - do not return fds - good, but too simple 8)
2432		   - return fds, and do not return them on read (old strategy,
2433		     apparently wrong)
2434		   - clone fds (I chose it for now, it is the most universal
2435		     solution)
2436
2437		   POSIX 1003.1g does not actually define this clearly
2438		   at all. POSIX 1003.1g doesn't define a lot of things
2439		   clearly however!
2440
2441		*/
2442
2443		sk_peek_offset_fwd(sk, size);
2444
2445		if (UNIXCB(skb).fp)
2446			unix_peek_fds(&scm, skb);
2447	}
2448	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2449
2450	scm_recv_unix(sock, msg, &scm, flags);
2451
2452out_free:
2453	skb_free_datagram(sk, skb);
2454	mutex_unlock(&u->iolock);
2455out:
2456	return err;
2457}
2458
2459static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2460			      int flags)
2461{
2462	struct sock *sk = sock->sk;
2463
2464#ifdef CONFIG_BPF_SYSCALL
2465	const struct proto *prot = READ_ONCE(sk->sk_prot);
2466
2467	if (prot != &unix_dgram_proto)
2468		return prot->recvmsg(sk, msg, size, flags, NULL);
2469#endif
2470	return __unix_dgram_recvmsg(sk, msg, size, flags);
2471}
2472
2473static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2474{
2475	struct unix_sock *u = unix_sk(sk);
2476	struct sk_buff *skb;
2477	int err;
2478
2479	mutex_lock(&u->iolock);
2480	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2481	mutex_unlock(&u->iolock);
2482	if (!skb)
2483		return err;
2484
2485	return recv_actor(sk, skb);
2486}
2487
2488/*
2489 *	Sleep until more data has arrived. But check for races..
2490 */
2491static long unix_stream_data_wait(struct sock *sk, long timeo,
2492				  struct sk_buff *last, unsigned int last_len,
2493				  bool freezable)
2494{
2495	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2496	struct sk_buff *tail;
2497	DEFINE_WAIT(wait);
2498
2499	unix_state_lock(sk);
2500
2501	for (;;) {
2502		prepare_to_wait(sk_sleep(sk), &wait, state);
2503
2504		tail = skb_peek_tail(&sk->sk_receive_queue);
2505		if (tail != last ||
2506		    (tail && tail->len != last_len) ||
2507		    sk->sk_err ||
2508		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2509		    signal_pending(current) ||
2510		    !timeo)
2511			break;
2512
2513		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2514		unix_state_unlock(sk);
2515		timeo = schedule_timeout(timeo);
2516		unix_state_lock(sk);
2517
2518		if (sock_flag(sk, SOCK_DEAD))
2519			break;
2520
2521		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2522	}
2523
2524	finish_wait(sk_sleep(sk), &wait);
2525	unix_state_unlock(sk);
2526	return timeo;
2527}
2528
2529static unsigned int unix_skb_len(const struct sk_buff *skb)
2530{
2531	return skb->len - UNIXCB(skb).consumed;
2532}
2533
2534struct unix_stream_read_state {
2535	int (*recv_actor)(struct sk_buff *, int, int,
2536			  struct unix_stream_read_state *);
2537	struct socket *socket;
2538	struct msghdr *msg;
2539	struct pipe_inode_info *pipe;
2540	size_t size;
2541	int flags;
2542	unsigned int splice_flags;
2543};
2544
2545#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2546static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2547{
2548	struct socket *sock = state->socket;
2549	struct sock *sk = sock->sk;
2550	struct unix_sock *u = unix_sk(sk);
2551	int chunk = 1;
2552	struct sk_buff *oob_skb;
2553
2554	mutex_lock(&u->iolock);
2555	unix_state_lock(sk);
 
2556
2557	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
 
2558		unix_state_unlock(sk);
2559		mutex_unlock(&u->iolock);
2560		return -EINVAL;
2561	}
2562
2563	oob_skb = u->oob_skb;
2564
2565	if (!(state->flags & MSG_PEEK))
2566		WRITE_ONCE(u->oob_skb, NULL);
2567	else
2568		skb_get(oob_skb);
 
 
2569	unix_state_unlock(sk);
2570
2571	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2572
2573	if (!(state->flags & MSG_PEEK))
2574		UNIXCB(oob_skb).consumed += 1;
2575
2576	consume_skb(oob_skb);
2577
2578	mutex_unlock(&u->iolock);
2579
2580	if (chunk < 0)
2581		return -EFAULT;
2582
2583	state->msg->msg_flags |= MSG_OOB;
2584	return 1;
2585}
2586
2587static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2588				  int flags, int copied)
2589{
2590	struct unix_sock *u = unix_sk(sk);
2591
2592	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2593		skb_unlink(skb, &sk->sk_receive_queue);
2594		consume_skb(skb);
2595		skb = NULL;
2596	} else {
 
 
 
 
2597		if (skb == u->oob_skb) {
2598			if (copied) {
2599				skb = NULL;
2600			} else if (sock_flag(sk, SOCK_URGINLINE)) {
2601				if (!(flags & MSG_PEEK)) {
2602					WRITE_ONCE(u->oob_skb, NULL);
2603					consume_skb(skb);
2604				}
2605			} else if (!(flags & MSG_PEEK)) {
2606				skb_unlink(skb, &sk->sk_receive_queue);
2607				consume_skb(skb);
 
 
 
2608				skb = skb_peek(&sk->sk_receive_queue);
2609			}
2610		}
 
 
 
 
 
 
 
2611	}
2612	return skb;
2613}
2614#endif
2615
2616static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2617{
2618	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2619		return -ENOTCONN;
2620
2621	return unix_read_skb(sk, recv_actor);
2622}
2623
2624static int unix_stream_read_generic(struct unix_stream_read_state *state,
2625				    bool freezable)
2626{
2627	struct scm_cookie scm;
2628	struct socket *sock = state->socket;
2629	struct sock *sk = sock->sk;
2630	struct unix_sock *u = unix_sk(sk);
2631	int copied = 0;
2632	int flags = state->flags;
2633	int noblock = flags & MSG_DONTWAIT;
2634	bool check_creds = false;
2635	int target;
2636	int err = 0;
2637	long timeo;
2638	int skip;
2639	size_t size = state->size;
2640	unsigned int last_len;
2641
2642	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2643		err = -EINVAL;
2644		goto out;
2645	}
2646
2647	if (unlikely(flags & MSG_OOB)) {
2648		err = -EOPNOTSUPP;
2649#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2650		err = unix_stream_recv_urg(state);
2651#endif
2652		goto out;
2653	}
2654
2655	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2656	timeo = sock_rcvtimeo(sk, noblock);
2657
2658	memset(&scm, 0, sizeof(scm));
2659
2660	/* Lock the socket to prevent queue disordering
2661	 * while sleeps in memcpy_tomsg
2662	 */
2663	mutex_lock(&u->iolock);
2664
2665	skip = max(sk_peek_offset(sk, flags), 0);
2666
2667	do {
2668		int chunk;
2669		bool drop_skb;
2670		struct sk_buff *skb, *last;
2671
2672redo:
2673		unix_state_lock(sk);
2674		if (sock_flag(sk, SOCK_DEAD)) {
2675			err = -ECONNRESET;
2676			goto unlock;
2677		}
2678		last = skb = skb_peek(&sk->sk_receive_queue);
2679		last_len = last ? last->len : 0;
2680
 
2681#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2682		if (skb) {
2683			skb = manage_oob(skb, sk, flags, copied);
2684			if (!skb) {
2685				unix_state_unlock(sk);
2686				if (copied)
2687					break;
2688				goto redo;
2689			}
2690		}
2691#endif
2692again:
2693		if (skb == NULL) {
2694			if (copied >= target)
2695				goto unlock;
2696
2697			/*
2698			 *	POSIX 1003.1g mandates this order.
2699			 */
2700
2701			err = sock_error(sk);
2702			if (err)
2703				goto unlock;
2704			if (sk->sk_shutdown & RCV_SHUTDOWN)
2705				goto unlock;
2706
2707			unix_state_unlock(sk);
2708			if (!timeo) {
2709				err = -EAGAIN;
2710				break;
2711			}
2712
2713			mutex_unlock(&u->iolock);
2714
2715			timeo = unix_stream_data_wait(sk, timeo, last,
2716						      last_len, freezable);
2717
2718			if (signal_pending(current)) {
2719				err = sock_intr_errno(timeo);
2720				scm_destroy(&scm);
2721				goto out;
2722			}
2723
2724			mutex_lock(&u->iolock);
2725			goto redo;
2726unlock:
2727			unix_state_unlock(sk);
2728			break;
2729		}
2730
2731		while (skip >= unix_skb_len(skb)) {
2732			skip -= unix_skb_len(skb);
2733			last = skb;
2734			last_len = skb->len;
2735			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2736			if (!skb)
2737				goto again;
2738		}
2739
2740		unix_state_unlock(sk);
2741
2742		if (check_creds) {
2743			/* Never glue messages from different writers */
2744			if (!unix_skb_scm_eq(skb, &scm))
2745				break;
2746		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2747			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2748			/* Copy credentials */
2749			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2750			unix_set_secdata(&scm, skb);
2751			check_creds = true;
2752		}
2753
2754		/* Copy address just once */
2755		if (state->msg && state->msg->msg_name) {
2756			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2757					 state->msg->msg_name);
2758			unix_copy_addr(state->msg, skb->sk);
2759
2760			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2761							      state->msg->msg_name,
2762							      &state->msg->msg_namelen);
2763
2764			sunaddr = NULL;
2765		}
2766
2767		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2768		skb_get(skb);
2769		chunk = state->recv_actor(skb, skip, chunk, state);
2770		drop_skb = !unix_skb_len(skb);
2771		/* skb is only safe to use if !drop_skb */
2772		consume_skb(skb);
2773		if (chunk < 0) {
2774			if (copied == 0)
2775				copied = -EFAULT;
2776			break;
2777		}
2778		copied += chunk;
2779		size -= chunk;
2780
2781		if (drop_skb) {
2782			/* the skb was touched by a concurrent reader;
2783			 * we should not expect anything from this skb
2784			 * anymore and assume it invalid - we can be
2785			 * sure it was dropped from the socket queue
2786			 *
2787			 * let's report a short read
2788			 */
2789			err = 0;
2790			break;
2791		}
2792
2793		/* Mark read part of skb as used */
2794		if (!(flags & MSG_PEEK)) {
2795			UNIXCB(skb).consumed += chunk;
2796
2797			sk_peek_offset_bwd(sk, chunk);
2798
2799			if (UNIXCB(skb).fp) {
2800				scm_stat_del(sk, skb);
2801				unix_detach_fds(&scm, skb);
2802			}
2803
2804			if (unix_skb_len(skb))
2805				break;
2806
2807			skb_unlink(skb, &sk->sk_receive_queue);
2808			consume_skb(skb);
2809
2810			if (scm.fp)
2811				break;
2812		} else {
2813			/* It is questionable, see note in unix_dgram_recvmsg.
2814			 */
2815			if (UNIXCB(skb).fp)
2816				unix_peek_fds(&scm, skb);
2817
2818			sk_peek_offset_fwd(sk, chunk);
2819
2820			if (UNIXCB(skb).fp)
2821				break;
2822
2823			skip = 0;
2824			last = skb;
2825			last_len = skb->len;
2826			unix_state_lock(sk);
2827			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2828			if (skb)
2829				goto again;
2830			unix_state_unlock(sk);
2831			break;
2832		}
2833	} while (size);
2834
2835	mutex_unlock(&u->iolock);
2836	if (state->msg)
2837		scm_recv_unix(sock, state->msg, &scm, flags);
2838	else
2839		scm_destroy(&scm);
2840out:
2841	return copied ? : err;
2842}
2843
2844static int unix_stream_read_actor(struct sk_buff *skb,
2845				  int skip, int chunk,
2846				  struct unix_stream_read_state *state)
2847{
2848	int ret;
2849
2850	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2851				    state->msg, chunk);
2852	return ret ?: chunk;
2853}
2854
2855int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2856			  size_t size, int flags)
2857{
2858	struct unix_stream_read_state state = {
2859		.recv_actor = unix_stream_read_actor,
2860		.socket = sk->sk_socket,
2861		.msg = msg,
2862		.size = size,
2863		.flags = flags
2864	};
2865
2866	return unix_stream_read_generic(&state, true);
2867}
2868
2869static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2870			       size_t size, int flags)
2871{
2872	struct unix_stream_read_state state = {
2873		.recv_actor = unix_stream_read_actor,
2874		.socket = sock,
2875		.msg = msg,
2876		.size = size,
2877		.flags = flags
2878	};
2879
2880#ifdef CONFIG_BPF_SYSCALL
2881	struct sock *sk = sock->sk;
2882	const struct proto *prot = READ_ONCE(sk->sk_prot);
2883
2884	if (prot != &unix_stream_proto)
2885		return prot->recvmsg(sk, msg, size, flags, NULL);
2886#endif
2887	return unix_stream_read_generic(&state, true);
2888}
2889
2890static int unix_stream_splice_actor(struct sk_buff *skb,
2891				    int skip, int chunk,
2892				    struct unix_stream_read_state *state)
2893{
2894	return skb_splice_bits(skb, state->socket->sk,
2895			       UNIXCB(skb).consumed + skip,
2896			       state->pipe, chunk, state->splice_flags);
2897}
2898
2899static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2900				       struct pipe_inode_info *pipe,
2901				       size_t size, unsigned int flags)
2902{
2903	struct unix_stream_read_state state = {
2904		.recv_actor = unix_stream_splice_actor,
2905		.socket = sock,
2906		.pipe = pipe,
2907		.size = size,
2908		.splice_flags = flags,
2909	};
2910
2911	if (unlikely(*ppos))
2912		return -ESPIPE;
2913
2914	if (sock->file->f_flags & O_NONBLOCK ||
2915	    flags & SPLICE_F_NONBLOCK)
2916		state.flags = MSG_DONTWAIT;
2917
2918	return unix_stream_read_generic(&state, false);
2919}
2920
2921static int unix_shutdown(struct socket *sock, int mode)
2922{
2923	struct sock *sk = sock->sk;
2924	struct sock *other;
2925
2926	if (mode < SHUT_RD || mode > SHUT_RDWR)
2927		return -EINVAL;
2928	/* This maps:
2929	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2930	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2931	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2932	 */
2933	++mode;
2934
2935	unix_state_lock(sk);
2936	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2937	other = unix_peer(sk);
2938	if (other)
2939		sock_hold(other);
2940	unix_state_unlock(sk);
2941	sk->sk_state_change(sk);
2942
2943	if (other &&
2944		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2945
2946		int peer_mode = 0;
2947		const struct proto *prot = READ_ONCE(other->sk_prot);
2948
2949		if (prot->unhash)
2950			prot->unhash(other);
2951		if (mode&RCV_SHUTDOWN)
2952			peer_mode |= SEND_SHUTDOWN;
2953		if (mode&SEND_SHUTDOWN)
2954			peer_mode |= RCV_SHUTDOWN;
2955		unix_state_lock(other);
2956		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2957		unix_state_unlock(other);
2958		other->sk_state_change(other);
2959		if (peer_mode == SHUTDOWN_MASK)
2960			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2961		else if (peer_mode & RCV_SHUTDOWN)
2962			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2963	}
2964	if (other)
2965		sock_put(other);
2966
2967	return 0;
2968}
2969
2970long unix_inq_len(struct sock *sk)
2971{
2972	struct sk_buff *skb;
2973	long amount = 0;
2974
2975	if (sk->sk_state == TCP_LISTEN)
2976		return -EINVAL;
2977
2978	spin_lock(&sk->sk_receive_queue.lock);
2979	if (sk->sk_type == SOCK_STREAM ||
2980	    sk->sk_type == SOCK_SEQPACKET) {
2981		skb_queue_walk(&sk->sk_receive_queue, skb)
2982			amount += unix_skb_len(skb);
2983	} else {
2984		skb = skb_peek(&sk->sk_receive_queue);
2985		if (skb)
2986			amount = skb->len;
2987	}
2988	spin_unlock(&sk->sk_receive_queue.lock);
2989
2990	return amount;
2991}
2992EXPORT_SYMBOL_GPL(unix_inq_len);
2993
2994long unix_outq_len(struct sock *sk)
2995{
2996	return sk_wmem_alloc_get(sk);
2997}
2998EXPORT_SYMBOL_GPL(unix_outq_len);
2999
3000static int unix_open_file(struct sock *sk)
3001{
3002	struct path path;
3003	struct file *f;
3004	int fd;
3005
3006	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3007		return -EPERM;
3008
3009	if (!smp_load_acquire(&unix_sk(sk)->addr))
3010		return -ENOENT;
3011
3012	path = unix_sk(sk)->path;
3013	if (!path.dentry)
3014		return -ENOENT;
3015
3016	path_get(&path);
3017
3018	fd = get_unused_fd_flags(O_CLOEXEC);
3019	if (fd < 0)
3020		goto out;
3021
3022	f = dentry_open(&path, O_PATH, current_cred());
3023	if (IS_ERR(f)) {
3024		put_unused_fd(fd);
3025		fd = PTR_ERR(f);
3026		goto out;
3027	}
3028
3029	fd_install(fd, f);
3030out:
3031	path_put(&path);
3032
3033	return fd;
3034}
3035
3036static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3037{
3038	struct sock *sk = sock->sk;
3039	long amount = 0;
3040	int err;
3041
3042	switch (cmd) {
3043	case SIOCOUTQ:
3044		amount = unix_outq_len(sk);
3045		err = put_user(amount, (int __user *)arg);
3046		break;
3047	case SIOCINQ:
3048		amount = unix_inq_len(sk);
3049		if (amount < 0)
3050			err = amount;
3051		else
3052			err = put_user(amount, (int __user *)arg);
3053		break;
3054	case SIOCUNIXFILE:
3055		err = unix_open_file(sk);
3056		break;
3057#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3058	case SIOCATMARK:
3059		{
3060			struct sk_buff *skb;
3061			int answ = 0;
3062
3063			skb = skb_peek(&sk->sk_receive_queue);
3064			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3065				answ = 1;
3066			err = put_user(answ, (int __user *)arg);
3067		}
3068		break;
3069#endif
3070	default:
3071		err = -ENOIOCTLCMD;
3072		break;
3073	}
3074	return err;
3075}
3076
3077#ifdef CONFIG_COMPAT
3078static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3079{
3080	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3081}
3082#endif
3083
3084static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3085{
3086	struct sock *sk = sock->sk;
3087	__poll_t mask;
3088	u8 shutdown;
3089
3090	sock_poll_wait(file, sock, wait);
3091	mask = 0;
3092	shutdown = READ_ONCE(sk->sk_shutdown);
3093
3094	/* exceptional events? */
3095	if (READ_ONCE(sk->sk_err))
3096		mask |= EPOLLERR;
3097	if (shutdown == SHUTDOWN_MASK)
3098		mask |= EPOLLHUP;
3099	if (shutdown & RCV_SHUTDOWN)
3100		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3101
3102	/* readable? */
3103	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3104		mask |= EPOLLIN | EPOLLRDNORM;
3105	if (sk_is_readable(sk))
3106		mask |= EPOLLIN | EPOLLRDNORM;
3107#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3108	if (READ_ONCE(unix_sk(sk)->oob_skb))
3109		mask |= EPOLLPRI;
3110#endif
3111
3112	/* Connection-based need to check for termination and startup */
3113	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3114	    sk->sk_state == TCP_CLOSE)
3115		mask |= EPOLLHUP;
3116
3117	/*
3118	 * we set writable also when the other side has shut down the
3119	 * connection. This prevents stuck sockets.
3120	 */
3121	if (unix_writable(sk))
3122		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3123
3124	return mask;
3125}
3126
3127static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3128				    poll_table *wait)
3129{
3130	struct sock *sk = sock->sk, *other;
3131	unsigned int writable;
3132	__poll_t mask;
3133	u8 shutdown;
3134
3135	sock_poll_wait(file, sock, wait);
3136	mask = 0;
3137	shutdown = READ_ONCE(sk->sk_shutdown);
3138
3139	/* exceptional events? */
3140	if (READ_ONCE(sk->sk_err) ||
3141	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3142		mask |= EPOLLERR |
3143			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3144
3145	if (shutdown & RCV_SHUTDOWN)
3146		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3147	if (shutdown == SHUTDOWN_MASK)
3148		mask |= EPOLLHUP;
3149
3150	/* readable? */
3151	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3152		mask |= EPOLLIN | EPOLLRDNORM;
3153	if (sk_is_readable(sk))
3154		mask |= EPOLLIN | EPOLLRDNORM;
3155
3156	/* Connection-based need to check for termination and startup */
3157	if (sk->sk_type == SOCK_SEQPACKET) {
3158		if (sk->sk_state == TCP_CLOSE)
3159			mask |= EPOLLHUP;
3160		/* connection hasn't started yet? */
3161		if (sk->sk_state == TCP_SYN_SENT)
3162			return mask;
3163	}
3164
3165	/* No write status requested, avoid expensive OUT tests. */
3166	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3167		return mask;
3168
3169	writable = unix_writable(sk);
3170	if (writable) {
3171		unix_state_lock(sk);
3172
3173		other = unix_peer(sk);
3174		if (other && unix_peer(other) != sk &&
3175		    unix_recvq_full_lockless(other) &&
3176		    unix_dgram_peer_wake_me(sk, other))
3177			writable = 0;
3178
3179		unix_state_unlock(sk);
3180	}
3181
3182	if (writable)
3183		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3184	else
3185		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3186
3187	return mask;
3188}
3189
3190#ifdef CONFIG_PROC_FS
3191
3192#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3193
3194#define get_bucket(x) ((x) >> BUCKET_SPACE)
3195#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3196#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3197
3198static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3199{
3200	unsigned long offset = get_offset(*pos);
3201	unsigned long bucket = get_bucket(*pos);
3202	unsigned long count = 0;
3203	struct sock *sk;
3204
3205	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3206	     sk; sk = sk_next(sk)) {
3207		if (++count == offset)
3208			break;
3209	}
3210
3211	return sk;
3212}
3213
3214static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3215{
3216	unsigned long bucket = get_bucket(*pos);
3217	struct net *net = seq_file_net(seq);
3218	struct sock *sk;
3219
3220	while (bucket < UNIX_HASH_SIZE) {
3221		spin_lock(&net->unx.table.locks[bucket]);
3222
3223		sk = unix_from_bucket(seq, pos);
3224		if (sk)
3225			return sk;
3226
3227		spin_unlock(&net->unx.table.locks[bucket]);
3228
3229		*pos = set_bucket_offset(++bucket, 1);
3230	}
3231
3232	return NULL;
3233}
3234
3235static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3236				  loff_t *pos)
3237{
3238	unsigned long bucket = get_bucket(*pos);
3239
3240	sk = sk_next(sk);
3241	if (sk)
3242		return sk;
3243
3244
3245	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3246
3247	*pos = set_bucket_offset(++bucket, 1);
3248
3249	return unix_get_first(seq, pos);
3250}
3251
3252static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3253{
3254	if (!*pos)
3255		return SEQ_START_TOKEN;
3256
3257	return unix_get_first(seq, pos);
3258}
3259
3260static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3261{
3262	++*pos;
3263
3264	if (v == SEQ_START_TOKEN)
3265		return unix_get_first(seq, pos);
3266
3267	return unix_get_next(seq, v, pos);
3268}
3269
3270static void unix_seq_stop(struct seq_file *seq, void *v)
3271{
3272	struct sock *sk = v;
3273
3274	if (sk)
3275		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3276}
3277
3278static int unix_seq_show(struct seq_file *seq, void *v)
3279{
3280
3281	if (v == SEQ_START_TOKEN)
3282		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3283			 "Inode Path\n");
3284	else {
3285		struct sock *s = v;
3286		struct unix_sock *u = unix_sk(s);
3287		unix_state_lock(s);
3288
3289		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3290			s,
3291			refcount_read(&s->sk_refcnt),
3292			0,
3293			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3294			s->sk_type,
3295			s->sk_socket ?
3296			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3297			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3298			sock_i_ino(s));
3299
3300		if (u->addr) {	// under a hash table lock here
3301			int i, len;
3302			seq_putc(seq, ' ');
3303
3304			i = 0;
3305			len = u->addr->len -
3306				offsetof(struct sockaddr_un, sun_path);
3307			if (u->addr->name->sun_path[0]) {
3308				len--;
3309			} else {
3310				seq_putc(seq, '@');
3311				i++;
3312			}
3313			for ( ; i < len; i++)
3314				seq_putc(seq, u->addr->name->sun_path[i] ?:
3315					 '@');
3316		}
3317		unix_state_unlock(s);
3318		seq_putc(seq, '\n');
3319	}
3320
3321	return 0;
3322}
3323
3324static const struct seq_operations unix_seq_ops = {
3325	.start  = unix_seq_start,
3326	.next   = unix_seq_next,
3327	.stop   = unix_seq_stop,
3328	.show   = unix_seq_show,
3329};
3330
3331#ifdef CONFIG_BPF_SYSCALL
3332struct bpf_unix_iter_state {
3333	struct seq_net_private p;
3334	unsigned int cur_sk;
3335	unsigned int end_sk;
3336	unsigned int max_sk;
3337	struct sock **batch;
3338	bool st_bucket_done;
3339};
3340
3341struct bpf_iter__unix {
3342	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3343	__bpf_md_ptr(struct unix_sock *, unix_sk);
3344	uid_t uid __aligned(8);
3345};
3346
3347static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3348			      struct unix_sock *unix_sk, uid_t uid)
3349{
3350	struct bpf_iter__unix ctx;
3351
3352	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3353	ctx.meta = meta;
3354	ctx.unix_sk = unix_sk;
3355	ctx.uid = uid;
3356	return bpf_iter_run_prog(prog, &ctx);
3357}
3358
3359static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3360
3361{
3362	struct bpf_unix_iter_state *iter = seq->private;
3363	unsigned int expected = 1;
3364	struct sock *sk;
3365
3366	sock_hold(start_sk);
3367	iter->batch[iter->end_sk++] = start_sk;
3368
3369	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3370		if (iter->end_sk < iter->max_sk) {
3371			sock_hold(sk);
3372			iter->batch[iter->end_sk++] = sk;
3373		}
3374
3375		expected++;
3376	}
3377
3378	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3379
3380	return expected;
3381}
3382
3383static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3384{
3385	while (iter->cur_sk < iter->end_sk)
3386		sock_put(iter->batch[iter->cur_sk++]);
3387}
3388
3389static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3390				       unsigned int new_batch_sz)
3391{
3392	struct sock **new_batch;
3393
3394	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3395			     GFP_USER | __GFP_NOWARN);
3396	if (!new_batch)
3397		return -ENOMEM;
3398
3399	bpf_iter_unix_put_batch(iter);
3400	kvfree(iter->batch);
3401	iter->batch = new_batch;
3402	iter->max_sk = new_batch_sz;
3403
3404	return 0;
3405}
3406
3407static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3408					loff_t *pos)
3409{
3410	struct bpf_unix_iter_state *iter = seq->private;
3411	unsigned int expected;
3412	bool resized = false;
3413	struct sock *sk;
3414
3415	if (iter->st_bucket_done)
3416		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3417
3418again:
3419	/* Get a new batch */
3420	iter->cur_sk = 0;
3421	iter->end_sk = 0;
3422
3423	sk = unix_get_first(seq, pos);
3424	if (!sk)
3425		return NULL; /* Done */
3426
3427	expected = bpf_iter_unix_hold_batch(seq, sk);
3428
3429	if (iter->end_sk == expected) {
3430		iter->st_bucket_done = true;
3431		return sk;
3432	}
3433
3434	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3435		resized = true;
3436		goto again;
3437	}
3438
3439	return sk;
3440}
3441
3442static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3443{
3444	if (!*pos)
3445		return SEQ_START_TOKEN;
3446
3447	/* bpf iter does not support lseek, so it always
3448	 * continue from where it was stop()-ped.
3449	 */
3450	return bpf_iter_unix_batch(seq, pos);
3451}
3452
3453static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3454{
3455	struct bpf_unix_iter_state *iter = seq->private;
3456	struct sock *sk;
3457
3458	/* Whenever seq_next() is called, the iter->cur_sk is
3459	 * done with seq_show(), so advance to the next sk in
3460	 * the batch.
3461	 */
3462	if (iter->cur_sk < iter->end_sk)
3463		sock_put(iter->batch[iter->cur_sk++]);
3464
3465	++*pos;
3466
3467	if (iter->cur_sk < iter->end_sk)
3468		sk = iter->batch[iter->cur_sk];
3469	else
3470		sk = bpf_iter_unix_batch(seq, pos);
3471
3472	return sk;
3473}
3474
3475static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3476{
3477	struct bpf_iter_meta meta;
3478	struct bpf_prog *prog;
3479	struct sock *sk = v;
3480	uid_t uid;
3481	bool slow;
3482	int ret;
3483
3484	if (v == SEQ_START_TOKEN)
3485		return 0;
3486
3487	slow = lock_sock_fast(sk);
3488
3489	if (unlikely(sk_unhashed(sk))) {
3490		ret = SEQ_SKIP;
3491		goto unlock;
3492	}
3493
3494	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3495	meta.seq = seq;
3496	prog = bpf_iter_get_info(&meta, false);
3497	ret = unix_prog_seq_show(prog, &meta, v, uid);
3498unlock:
3499	unlock_sock_fast(sk, slow);
3500	return ret;
3501}
3502
3503static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3504{
3505	struct bpf_unix_iter_state *iter = seq->private;
3506	struct bpf_iter_meta meta;
3507	struct bpf_prog *prog;
3508
3509	if (!v) {
3510		meta.seq = seq;
3511		prog = bpf_iter_get_info(&meta, true);
3512		if (prog)
3513			(void)unix_prog_seq_show(prog, &meta, v, 0);
3514	}
3515
3516	if (iter->cur_sk < iter->end_sk)
3517		bpf_iter_unix_put_batch(iter);
3518}
3519
3520static const struct seq_operations bpf_iter_unix_seq_ops = {
3521	.start	= bpf_iter_unix_seq_start,
3522	.next	= bpf_iter_unix_seq_next,
3523	.stop	= bpf_iter_unix_seq_stop,
3524	.show	= bpf_iter_unix_seq_show,
3525};
3526#endif
3527#endif
3528
3529static const struct net_proto_family unix_family_ops = {
3530	.family = PF_UNIX,
3531	.create = unix_create,
3532	.owner	= THIS_MODULE,
3533};
3534
3535
3536static int __net_init unix_net_init(struct net *net)
3537{
3538	int i;
3539
3540	net->unx.sysctl_max_dgram_qlen = 10;
3541	if (unix_sysctl_register(net))
3542		goto out;
3543
3544#ifdef CONFIG_PROC_FS
3545	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3546			     sizeof(struct seq_net_private)))
3547		goto err_sysctl;
3548#endif
3549
3550	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3551					      sizeof(spinlock_t), GFP_KERNEL);
3552	if (!net->unx.table.locks)
3553		goto err_proc;
3554
3555	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3556						sizeof(struct hlist_head),
3557						GFP_KERNEL);
3558	if (!net->unx.table.buckets)
3559		goto free_locks;
3560
3561	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3562		spin_lock_init(&net->unx.table.locks[i]);
3563		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3564	}
3565
3566	return 0;
3567
3568free_locks:
3569	kvfree(net->unx.table.locks);
3570err_proc:
3571#ifdef CONFIG_PROC_FS
3572	remove_proc_entry("unix", net->proc_net);
3573err_sysctl:
3574#endif
3575	unix_sysctl_unregister(net);
3576out:
3577	return -ENOMEM;
3578}
3579
3580static void __net_exit unix_net_exit(struct net *net)
3581{
3582	kvfree(net->unx.table.buckets);
3583	kvfree(net->unx.table.locks);
3584	unix_sysctl_unregister(net);
3585	remove_proc_entry("unix", net->proc_net);
3586}
3587
3588static struct pernet_operations unix_net_ops = {
3589	.init = unix_net_init,
3590	.exit = unix_net_exit,
3591};
3592
3593#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3594DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3595		     struct unix_sock *unix_sk, uid_t uid)
3596
3597#define INIT_BATCH_SZ 16
3598
3599static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3600{
3601	struct bpf_unix_iter_state *iter = priv_data;
3602	int err;
3603
3604	err = bpf_iter_init_seq_net(priv_data, aux);
3605	if (err)
3606		return err;
3607
3608	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3609	if (err) {
3610		bpf_iter_fini_seq_net(priv_data);
3611		return err;
3612	}
3613
3614	return 0;
3615}
3616
3617static void bpf_iter_fini_unix(void *priv_data)
3618{
3619	struct bpf_unix_iter_state *iter = priv_data;
3620
3621	bpf_iter_fini_seq_net(priv_data);
3622	kvfree(iter->batch);
3623}
3624
3625static const struct bpf_iter_seq_info unix_seq_info = {
3626	.seq_ops		= &bpf_iter_unix_seq_ops,
3627	.init_seq_private	= bpf_iter_init_unix,
3628	.fini_seq_private	= bpf_iter_fini_unix,
3629	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3630};
3631
3632static const struct bpf_func_proto *
3633bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3634			     const struct bpf_prog *prog)
3635{
3636	switch (func_id) {
3637	case BPF_FUNC_setsockopt:
3638		return &bpf_sk_setsockopt_proto;
3639	case BPF_FUNC_getsockopt:
3640		return &bpf_sk_getsockopt_proto;
3641	default:
3642		return NULL;
3643	}
3644}
3645
3646static struct bpf_iter_reg unix_reg_info = {
3647	.target			= "unix",
3648	.ctx_arg_info_size	= 1,
3649	.ctx_arg_info		= {
3650		{ offsetof(struct bpf_iter__unix, unix_sk),
3651		  PTR_TO_BTF_ID_OR_NULL },
3652	},
3653	.get_func_proto         = bpf_iter_unix_get_func_proto,
3654	.seq_info		= &unix_seq_info,
3655};
3656
3657static void __init bpf_iter_register(void)
3658{
3659	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3660	if (bpf_iter_reg_target(&unix_reg_info))
3661		pr_warn("Warning: could not register bpf iterator unix\n");
3662}
3663#endif
3664
3665static int __init af_unix_init(void)
3666{
3667	int i, rc = -1;
3668
3669	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3670
3671	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3672		spin_lock_init(&bsd_socket_locks[i]);
3673		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3674	}
3675
3676	rc = proto_register(&unix_dgram_proto, 1);
3677	if (rc != 0) {
3678		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3679		goto out;
3680	}
3681
3682	rc = proto_register(&unix_stream_proto, 1);
3683	if (rc != 0) {
3684		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3685		proto_unregister(&unix_dgram_proto);
3686		goto out;
3687	}
3688
3689	sock_register(&unix_family_ops);
3690	register_pernet_subsys(&unix_net_ops);
3691	unix_bpf_build_proto();
3692
3693#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3694	bpf_iter_register();
3695#endif
3696
3697out:
3698	return rc;
3699}
3700
3701/* Later than subsys_initcall() because we depend on stuff initialised there */
3702fs_initcall(af_unix_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/filter.h>
  93#include <linux/termios.h>
  94#include <linux/sockios.h>
  95#include <linux/net.h>
  96#include <linux/in.h>
  97#include <linux/fs.h>
  98#include <linux/slab.h>
  99#include <linux/uaccess.h>
 100#include <linux/skbuff.h>
 101#include <linux/netdevice.h>
 102#include <net/net_namespace.h>
 103#include <net/sock.h>
 104#include <net/tcp_states.h>
 105#include <net/af_unix.h>
 106#include <linux/proc_fs.h>
 107#include <linux/seq_file.h>
 108#include <net/scm.h>
 109#include <linux/init.h>
 110#include <linux/poll.h>
 111#include <linux/rtnetlink.h>
 112#include <linux/mount.h>
 113#include <net/checksum.h>
 114#include <linux/security.h>
 115#include <linux/splice.h>
 116#include <linux/freezer.h>
 117#include <linux/file.h>
 118#include <linux/btf_ids.h>
 119#include <linux/bpf-cgroup.h>
 120
 
 
 121static atomic_long_t unix_nr_socks;
 122static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
 123static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
 124
 125/* SMP locking strategy:
 126 *    hash table is protected with spinlock.
 127 *    each socket state is protected by separate spinlock.
 128 */
 129
 130static unsigned int unix_unbound_hash(struct sock *sk)
 131{
 132	unsigned long hash = (unsigned long)sk;
 133
 134	hash ^= hash >> 16;
 135	hash ^= hash >> 8;
 136	hash ^= sk->sk_type;
 137
 138	return hash & UNIX_HASH_MOD;
 139}
 140
 141static unsigned int unix_bsd_hash(struct inode *i)
 142{
 143	return i->i_ino & UNIX_HASH_MOD;
 144}
 145
 146static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
 147				       int addr_len, int type)
 148{
 149	__wsum csum = csum_partial(sunaddr, addr_len, 0);
 150	unsigned int hash;
 151
 152	hash = (__force unsigned int)csum_fold(csum);
 153	hash ^= hash >> 8;
 154	hash ^= type;
 155
 156	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
 157}
 158
 159static void unix_table_double_lock(struct net *net,
 160				   unsigned int hash1, unsigned int hash2)
 161{
 162	if (hash1 == hash2) {
 163		spin_lock(&net->unx.table.locks[hash1]);
 164		return;
 165	}
 166
 167	if (hash1 > hash2)
 168		swap(hash1, hash2);
 169
 170	spin_lock(&net->unx.table.locks[hash1]);
 171	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
 172}
 173
 174static void unix_table_double_unlock(struct net *net,
 175				     unsigned int hash1, unsigned int hash2)
 176{
 177	if (hash1 == hash2) {
 178		spin_unlock(&net->unx.table.locks[hash1]);
 179		return;
 180	}
 181
 182	spin_unlock(&net->unx.table.locks[hash1]);
 183	spin_unlock(&net->unx.table.locks[hash2]);
 184}
 185
 186#ifdef CONFIG_SECURITY_NETWORK
 187static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 188{
 189	UNIXCB(skb).secid = scm->secid;
 190}
 191
 192static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 193{
 194	scm->secid = UNIXCB(skb).secid;
 195}
 196
 197static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 198{
 199	return (scm->secid == UNIXCB(skb).secid);
 200}
 201#else
 202static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 203{ }
 204
 205static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 206{ }
 207
 208static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 209{
 210	return true;
 211}
 212#endif /* CONFIG_SECURITY_NETWORK */
 213
 214static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 215{
 216	return unix_peer(osk) == sk;
 217}
 218
 219static inline int unix_may_send(struct sock *sk, struct sock *osk)
 220{
 221	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 222}
 223
 224static inline int unix_recvq_full(const struct sock *sk)
 225{
 226	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 227}
 228
 229static inline int unix_recvq_full_lockless(const struct sock *sk)
 230{
 231	return skb_queue_len_lockless(&sk->sk_receive_queue) >
 232		READ_ONCE(sk->sk_max_ack_backlog);
 233}
 234
 235struct sock *unix_peer_get(struct sock *s)
 236{
 237	struct sock *peer;
 238
 239	unix_state_lock(s);
 240	peer = unix_peer(s);
 241	if (peer)
 242		sock_hold(peer);
 243	unix_state_unlock(s);
 244	return peer;
 245}
 246EXPORT_SYMBOL_GPL(unix_peer_get);
 247
 248static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
 249					     int addr_len)
 250{
 251	struct unix_address *addr;
 252
 253	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
 254	if (!addr)
 255		return NULL;
 256
 257	refcount_set(&addr->refcnt, 1);
 258	addr->len = addr_len;
 259	memcpy(addr->name, sunaddr, addr_len);
 260
 261	return addr;
 262}
 263
 264static inline void unix_release_addr(struct unix_address *addr)
 265{
 266	if (refcount_dec_and_test(&addr->refcnt))
 267		kfree(addr);
 268}
 269
 270/*
 271 *	Check unix socket name:
 272 *		- should be not zero length.
 273 *	        - if started by not zero, should be NULL terminated (FS object)
 274 *		- if started by zero, it is abstract name.
 275 */
 276
 277static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
 278{
 279	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
 280	    addr_len > sizeof(*sunaddr))
 281		return -EINVAL;
 282
 283	if (sunaddr->sun_family != AF_UNIX)
 284		return -EINVAL;
 285
 286	return 0;
 287}
 288
 289static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
 290{
 291	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
 292	short offset = offsetof(struct sockaddr_storage, __data);
 293
 294	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
 295
 296	/* This may look like an off by one error but it is a bit more
 297	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
 298	 * sun_path[108] doesn't as such exist.  However in kernel space
 299	 * we are guaranteed that it is a valid memory location in our
 300	 * kernel address buffer because syscall functions always pass
 301	 * a pointer of struct sockaddr_storage which has a bigger buffer
 302	 * than 108.  Also, we must terminate sun_path for strlen() in
 303	 * getname_kernel().
 304	 */
 305	addr->__data[addr_len - offset] = 0;
 306
 307	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
 308	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
 309	 * know the actual buffer.
 310	 */
 311	return strlen(addr->__data) + offset + 1;
 312}
 313
 314static void __unix_remove_socket(struct sock *sk)
 315{
 316	sk_del_node_init(sk);
 317}
 318
 319static void __unix_insert_socket(struct net *net, struct sock *sk)
 320{
 321	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 322	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
 323}
 324
 325static void __unix_set_addr_hash(struct net *net, struct sock *sk,
 326				 struct unix_address *addr, unsigned int hash)
 327{
 328	__unix_remove_socket(sk);
 329	smp_store_release(&unix_sk(sk)->addr, addr);
 330
 331	sk->sk_hash = hash;
 332	__unix_insert_socket(net, sk);
 333}
 334
 335static void unix_remove_socket(struct net *net, struct sock *sk)
 336{
 337	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 338	__unix_remove_socket(sk);
 339	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 340}
 341
 342static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
 343{
 344	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 345	__unix_insert_socket(net, sk);
 346	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 347}
 348
 349static void unix_insert_bsd_socket(struct sock *sk)
 350{
 351	spin_lock(&bsd_socket_locks[sk->sk_hash]);
 352	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
 353	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 354}
 355
 356static void unix_remove_bsd_socket(struct sock *sk)
 357{
 358	if (!hlist_unhashed(&sk->sk_bind_node)) {
 359		spin_lock(&bsd_socket_locks[sk->sk_hash]);
 360		__sk_del_bind_node(sk);
 361		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 362
 363		sk_node_init(&sk->sk_bind_node);
 364	}
 365}
 366
 367static struct sock *__unix_find_socket_byname(struct net *net,
 368					      struct sockaddr_un *sunname,
 369					      int len, unsigned int hash)
 370{
 371	struct sock *s;
 372
 373	sk_for_each(s, &net->unx.table.buckets[hash]) {
 374		struct unix_sock *u = unix_sk(s);
 375
 376		if (u->addr->len == len &&
 377		    !memcmp(u->addr->name, sunname, len))
 378			return s;
 379	}
 380	return NULL;
 381}
 382
 383static inline struct sock *unix_find_socket_byname(struct net *net,
 384						   struct sockaddr_un *sunname,
 385						   int len, unsigned int hash)
 386{
 387	struct sock *s;
 388
 389	spin_lock(&net->unx.table.locks[hash]);
 390	s = __unix_find_socket_byname(net, sunname, len, hash);
 391	if (s)
 392		sock_hold(s);
 393	spin_unlock(&net->unx.table.locks[hash]);
 394	return s;
 395}
 396
 397static struct sock *unix_find_socket_byinode(struct inode *i)
 398{
 399	unsigned int hash = unix_bsd_hash(i);
 400	struct sock *s;
 401
 402	spin_lock(&bsd_socket_locks[hash]);
 403	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
 404		struct dentry *dentry = unix_sk(s)->path.dentry;
 405
 406		if (dentry && d_backing_inode(dentry) == i) {
 407			sock_hold(s);
 408			spin_unlock(&bsd_socket_locks[hash]);
 409			return s;
 410		}
 411	}
 412	spin_unlock(&bsd_socket_locks[hash]);
 413	return NULL;
 414}
 415
 416/* Support code for asymmetrically connected dgram sockets
 417 *
 418 * If a datagram socket is connected to a socket not itself connected
 419 * to the first socket (eg, /dev/log), clients may only enqueue more
 420 * messages if the present receive queue of the server socket is not
 421 * "too large". This means there's a second writeability condition
 422 * poll and sendmsg need to test. The dgram recv code will do a wake
 423 * up on the peer_wait wait queue of a socket upon reception of a
 424 * datagram which needs to be propagated to sleeping would-be writers
 425 * since these might not have sent anything so far. This can't be
 426 * accomplished via poll_wait because the lifetime of the server
 427 * socket might be less than that of its clients if these break their
 428 * association with it or if the server socket is closed while clients
 429 * are still connected to it and there's no way to inform "a polling
 430 * implementation" that it should let go of a certain wait queue
 431 *
 432 * In order to propagate a wake up, a wait_queue_entry_t of the client
 433 * socket is enqueued on the peer_wait queue of the server socket
 434 * whose wake function does a wake_up on the ordinary client socket
 435 * wait queue. This connection is established whenever a write (or
 436 * poll for write) hit the flow control condition and broken when the
 437 * association to the server socket is dissolved or after a wake up
 438 * was relayed.
 439 */
 440
 441static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 442				      void *key)
 443{
 444	struct unix_sock *u;
 445	wait_queue_head_t *u_sleep;
 446
 447	u = container_of(q, struct unix_sock, peer_wake);
 448
 449	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 450			    q);
 451	u->peer_wake.private = NULL;
 452
 453	/* relaying can only happen while the wq still exists */
 454	u_sleep = sk_sleep(&u->sk);
 455	if (u_sleep)
 456		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 457
 458	return 0;
 459}
 460
 461static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 462{
 463	struct unix_sock *u, *u_other;
 464	int rc;
 465
 466	u = unix_sk(sk);
 467	u_other = unix_sk(other);
 468	rc = 0;
 469	spin_lock(&u_other->peer_wait.lock);
 470
 471	if (!u->peer_wake.private) {
 472		u->peer_wake.private = other;
 473		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 474
 475		rc = 1;
 476	}
 477
 478	spin_unlock(&u_other->peer_wait.lock);
 479	return rc;
 480}
 481
 482static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 483					    struct sock *other)
 484{
 485	struct unix_sock *u, *u_other;
 486
 487	u = unix_sk(sk);
 488	u_other = unix_sk(other);
 489	spin_lock(&u_other->peer_wait.lock);
 490
 491	if (u->peer_wake.private == other) {
 492		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 493		u->peer_wake.private = NULL;
 494	}
 495
 496	spin_unlock(&u_other->peer_wait.lock);
 497}
 498
 499static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 500						   struct sock *other)
 501{
 502	unix_dgram_peer_wake_disconnect(sk, other);
 503	wake_up_interruptible_poll(sk_sleep(sk),
 504				   EPOLLOUT |
 505				   EPOLLWRNORM |
 506				   EPOLLWRBAND);
 507}
 508
 509/* preconditions:
 510 *	- unix_peer(sk) == other
 511 *	- association is stable
 512 */
 513static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 514{
 515	int connected;
 516
 517	connected = unix_dgram_peer_wake_connect(sk, other);
 518
 519	/* If other is SOCK_DEAD, we want to make sure we signal
 520	 * POLLOUT, such that a subsequent write() can get a
 521	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 522	 * to other and its full, we will hang waiting for POLLOUT.
 523	 */
 524	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
 525		return 1;
 526
 527	if (connected)
 528		unix_dgram_peer_wake_disconnect(sk, other);
 529
 530	return 0;
 531}
 532
 533static int unix_writable(const struct sock *sk)
 534{
 535	return sk->sk_state != TCP_LISTEN &&
 536	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 537}
 538
 539static void unix_write_space(struct sock *sk)
 540{
 541	struct socket_wq *wq;
 542
 543	rcu_read_lock();
 544	if (unix_writable(sk)) {
 545		wq = rcu_dereference(sk->sk_wq);
 546		if (skwq_has_sleeper(wq))
 547			wake_up_interruptible_sync_poll(&wq->wait,
 548				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 549		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 550	}
 551	rcu_read_unlock();
 552}
 553
 554/* When dgram socket disconnects (or changes its peer), we clear its receive
 555 * queue of packets arrived from previous peer. First, it allows to do
 556 * flow control based only on wmem_alloc; second, sk connected to peer
 557 * may receive messages only from that peer. */
 558static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 559{
 560	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 561		skb_queue_purge(&sk->sk_receive_queue);
 562		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 563
 564		/* If one link of bidirectional dgram pipe is disconnected,
 565		 * we signal error. Messages are lost. Do not make this,
 566		 * when peer was not connected to us.
 567		 */
 568		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 569			WRITE_ONCE(other->sk_err, ECONNRESET);
 570			sk_error_report(other);
 571		}
 572	}
 573	other->sk_state = TCP_CLOSE;
 574}
 575
 576static void unix_sock_destructor(struct sock *sk)
 577{
 578	struct unix_sock *u = unix_sk(sk);
 579
 580	skb_queue_purge(&sk->sk_receive_queue);
 581
 582	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
 583	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 584	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
 585	if (!sock_flag(sk, SOCK_DEAD)) {
 586		pr_info("Attempt to release alive unix socket: %p\n", sk);
 587		return;
 588	}
 589
 590	if (u->addr)
 591		unix_release_addr(u->addr);
 592
 593	atomic_long_dec(&unix_nr_socks);
 594	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 595#ifdef UNIX_REFCNT_DEBUG
 596	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 597		atomic_long_read(&unix_nr_socks));
 598#endif
 599}
 600
 601static void unix_release_sock(struct sock *sk, int embrion)
 602{
 603	struct unix_sock *u = unix_sk(sk);
 604	struct sock *skpair;
 605	struct sk_buff *skb;
 606	struct path path;
 607	int state;
 608
 609	unix_remove_socket(sock_net(sk), sk);
 610	unix_remove_bsd_socket(sk);
 611
 612	/* Clear state */
 613	unix_state_lock(sk);
 614	sock_orphan(sk);
 615	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 616	path	     = u->path;
 617	u->path.dentry = NULL;
 618	u->path.mnt = NULL;
 619	state = sk->sk_state;
 620	sk->sk_state = TCP_CLOSE;
 621
 622	skpair = unix_peer(sk);
 623	unix_peer(sk) = NULL;
 624
 625	unix_state_unlock(sk);
 626
 627#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 628	if (u->oob_skb) {
 629		kfree_skb(u->oob_skb);
 630		u->oob_skb = NULL;
 631	}
 632#endif
 633
 634	wake_up_interruptible_all(&u->peer_wait);
 635
 636	if (skpair != NULL) {
 637		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 638			unix_state_lock(skpair);
 639			/* No more writes */
 640			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
 641			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
 642				WRITE_ONCE(skpair->sk_err, ECONNRESET);
 643			unix_state_unlock(skpair);
 644			skpair->sk_state_change(skpair);
 645			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 646		}
 647
 648		unix_dgram_peer_wake_disconnect(sk, skpair);
 649		sock_put(skpair); /* It may now die */
 650	}
 651
 652	/* Try to flush out this socket. Throw out buffers at least */
 653
 654	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 655		if (state == TCP_LISTEN)
 656			unix_release_sock(skb->sk, 1);
 657		/* passed fds are erased in the kfree_skb hook	      */
 658		UNIXCB(skb).consumed = skb->len;
 659		kfree_skb(skb);
 660	}
 661
 662	if (path.dentry)
 663		path_put(&path);
 664
 665	sock_put(sk);
 666
 667	/* ---- Socket is dead now and most probably destroyed ---- */
 668
 669	/*
 670	 * Fixme: BSD difference: In BSD all sockets connected to us get
 671	 *	  ECONNRESET and we die on the spot. In Linux we behave
 672	 *	  like files and pipes do and wait for the last
 673	 *	  dereference.
 674	 *
 675	 * Can't we simply set sock->err?
 676	 *
 677	 *	  What the above comment does talk about? --ANK(980817)
 678	 */
 679
 680	if (READ_ONCE(unix_tot_inflight))
 681		unix_gc();		/* Garbage collect fds */
 682}
 683
 684static void init_peercred(struct sock *sk)
 685{
 686	const struct cred *old_cred;
 687	struct pid *old_pid;
 688
 689	spin_lock(&sk->sk_peer_lock);
 690	old_pid = sk->sk_peer_pid;
 691	old_cred = sk->sk_peer_cred;
 692	sk->sk_peer_pid  = get_pid(task_tgid(current));
 693	sk->sk_peer_cred = get_current_cred();
 694	spin_unlock(&sk->sk_peer_lock);
 695
 696	put_pid(old_pid);
 697	put_cred(old_cred);
 698}
 699
 700static void copy_peercred(struct sock *sk, struct sock *peersk)
 701{
 702	const struct cred *old_cred;
 703	struct pid *old_pid;
 704
 705	if (sk < peersk) {
 706		spin_lock(&sk->sk_peer_lock);
 707		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 708	} else {
 709		spin_lock(&peersk->sk_peer_lock);
 710		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 711	}
 712	old_pid = sk->sk_peer_pid;
 713	old_cred = sk->sk_peer_cred;
 714	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
 715	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 716
 717	spin_unlock(&sk->sk_peer_lock);
 718	spin_unlock(&peersk->sk_peer_lock);
 719
 720	put_pid(old_pid);
 721	put_cred(old_cred);
 722}
 723
 724static int unix_listen(struct socket *sock, int backlog)
 725{
 726	int err;
 727	struct sock *sk = sock->sk;
 728	struct unix_sock *u = unix_sk(sk);
 729
 730	err = -EOPNOTSUPP;
 731	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 732		goto out;	/* Only stream/seqpacket sockets accept */
 733	err = -EINVAL;
 734	if (!READ_ONCE(u->addr))
 735		goto out;	/* No listens on an unbound socket */
 736	unix_state_lock(sk);
 737	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 738		goto out_unlock;
 739	if (backlog > sk->sk_max_ack_backlog)
 740		wake_up_interruptible_all(&u->peer_wait);
 741	sk->sk_max_ack_backlog	= backlog;
 742	sk->sk_state		= TCP_LISTEN;
 743	/* set credentials so connect can copy them */
 744	init_peercred(sk);
 745	err = 0;
 746
 747out_unlock:
 748	unix_state_unlock(sk);
 749out:
 750	return err;
 751}
 752
 753static int unix_release(struct socket *);
 754static int unix_bind(struct socket *, struct sockaddr *, int);
 755static int unix_stream_connect(struct socket *, struct sockaddr *,
 756			       int addr_len, int flags);
 757static int unix_socketpair(struct socket *, struct socket *);
 758static int unix_accept(struct socket *, struct socket *, int, bool);
 759static int unix_getname(struct socket *, struct sockaddr *, int);
 760static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 761static __poll_t unix_dgram_poll(struct file *, struct socket *,
 762				    poll_table *);
 763static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 764#ifdef CONFIG_COMPAT
 765static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 766#endif
 767static int unix_shutdown(struct socket *, int);
 768static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 769static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 770static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 771				       struct pipe_inode_info *, size_t size,
 772				       unsigned int flags);
 773static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 774static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 775static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 776static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 777static int unix_dgram_connect(struct socket *, struct sockaddr *,
 778			      int, int);
 779static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 780static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 781				  int);
 782
 783#ifdef CONFIG_PROC_FS
 784static int unix_count_nr_fds(struct sock *sk)
 785{
 786	struct sk_buff *skb;
 787	struct unix_sock *u;
 788	int nr_fds = 0;
 789
 790	spin_lock(&sk->sk_receive_queue.lock);
 791	skb = skb_peek(&sk->sk_receive_queue);
 792	while (skb) {
 793		u = unix_sk(skb->sk);
 794		nr_fds += atomic_read(&u->scm_stat.nr_fds);
 795		skb = skb_peek_next(skb, &sk->sk_receive_queue);
 796	}
 797	spin_unlock(&sk->sk_receive_queue.lock);
 798
 799	return nr_fds;
 800}
 801
 802static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 803{
 804	struct sock *sk = sock->sk;
 805	unsigned char s_state;
 806	struct unix_sock *u;
 807	int nr_fds = 0;
 808
 809	if (sk) {
 810		s_state = READ_ONCE(sk->sk_state);
 811		u = unix_sk(sk);
 812
 813		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
 814		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
 815		 * SOCK_DGRAM is ordinary. So, no lock is needed.
 816		 */
 817		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
 818			nr_fds = atomic_read(&u->scm_stat.nr_fds);
 819		else if (s_state == TCP_LISTEN)
 820			nr_fds = unix_count_nr_fds(sk);
 821
 822		seq_printf(m, "scm_fds: %u\n", nr_fds);
 823	}
 824}
 825#else
 826#define unix_show_fdinfo NULL
 827#endif
 828
 829static const struct proto_ops unix_stream_ops = {
 830	.family =	PF_UNIX,
 831	.owner =	THIS_MODULE,
 832	.release =	unix_release,
 833	.bind =		unix_bind,
 834	.connect =	unix_stream_connect,
 835	.socketpair =	unix_socketpair,
 836	.accept =	unix_accept,
 837	.getname =	unix_getname,
 838	.poll =		unix_poll,
 839	.ioctl =	unix_ioctl,
 840#ifdef CONFIG_COMPAT
 841	.compat_ioctl =	unix_compat_ioctl,
 842#endif
 843	.listen =	unix_listen,
 844	.shutdown =	unix_shutdown,
 845	.sendmsg =	unix_stream_sendmsg,
 846	.recvmsg =	unix_stream_recvmsg,
 847	.read_skb =	unix_stream_read_skb,
 848	.mmap =		sock_no_mmap,
 849	.splice_read =	unix_stream_splice_read,
 850	.set_peek_off =	sk_set_peek_off,
 851	.show_fdinfo =	unix_show_fdinfo,
 852};
 853
 854static const struct proto_ops unix_dgram_ops = {
 855	.family =	PF_UNIX,
 856	.owner =	THIS_MODULE,
 857	.release =	unix_release,
 858	.bind =		unix_bind,
 859	.connect =	unix_dgram_connect,
 860	.socketpair =	unix_socketpair,
 861	.accept =	sock_no_accept,
 862	.getname =	unix_getname,
 863	.poll =		unix_dgram_poll,
 864	.ioctl =	unix_ioctl,
 865#ifdef CONFIG_COMPAT
 866	.compat_ioctl =	unix_compat_ioctl,
 867#endif
 868	.listen =	sock_no_listen,
 869	.shutdown =	unix_shutdown,
 870	.sendmsg =	unix_dgram_sendmsg,
 871	.read_skb =	unix_read_skb,
 872	.recvmsg =	unix_dgram_recvmsg,
 873	.mmap =		sock_no_mmap,
 874	.set_peek_off =	sk_set_peek_off,
 875	.show_fdinfo =	unix_show_fdinfo,
 876};
 877
 878static const struct proto_ops unix_seqpacket_ops = {
 879	.family =	PF_UNIX,
 880	.owner =	THIS_MODULE,
 881	.release =	unix_release,
 882	.bind =		unix_bind,
 883	.connect =	unix_stream_connect,
 884	.socketpair =	unix_socketpair,
 885	.accept =	unix_accept,
 886	.getname =	unix_getname,
 887	.poll =		unix_dgram_poll,
 888	.ioctl =	unix_ioctl,
 889#ifdef CONFIG_COMPAT
 890	.compat_ioctl =	unix_compat_ioctl,
 891#endif
 892	.listen =	unix_listen,
 893	.shutdown =	unix_shutdown,
 894	.sendmsg =	unix_seqpacket_sendmsg,
 895	.recvmsg =	unix_seqpacket_recvmsg,
 896	.mmap =		sock_no_mmap,
 897	.set_peek_off =	sk_set_peek_off,
 898	.show_fdinfo =	unix_show_fdinfo,
 899};
 900
 901static void unix_close(struct sock *sk, long timeout)
 902{
 903	/* Nothing to do here, unix socket does not need a ->close().
 904	 * This is merely for sockmap.
 905	 */
 906}
 907
 908static void unix_unhash(struct sock *sk)
 909{
 910	/* Nothing to do here, unix socket does not need a ->unhash().
 911	 * This is merely for sockmap.
 912	 */
 913}
 914
 915static bool unix_bpf_bypass_getsockopt(int level, int optname)
 916{
 917	if (level == SOL_SOCKET) {
 918		switch (optname) {
 919		case SO_PEERPIDFD:
 920			return true;
 921		default:
 922			return false;
 923		}
 924	}
 925
 926	return false;
 927}
 928
 929struct proto unix_dgram_proto = {
 930	.name			= "UNIX",
 931	.owner			= THIS_MODULE,
 932	.obj_size		= sizeof(struct unix_sock),
 933	.close			= unix_close,
 934	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 935#ifdef CONFIG_BPF_SYSCALL
 936	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
 937#endif
 938};
 939
 940struct proto unix_stream_proto = {
 941	.name			= "UNIX-STREAM",
 942	.owner			= THIS_MODULE,
 943	.obj_size		= sizeof(struct unix_sock),
 944	.close			= unix_close,
 945	.unhash			= unix_unhash,
 946	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 947#ifdef CONFIG_BPF_SYSCALL
 948	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
 949#endif
 950};
 951
 952static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
 953{
 954	struct unix_sock *u;
 955	struct sock *sk;
 956	int err;
 957
 958	atomic_long_inc(&unix_nr_socks);
 959	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
 960		err = -ENFILE;
 961		goto err;
 962	}
 963
 964	if (type == SOCK_STREAM)
 965		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
 966	else /*dgram and  seqpacket */
 967		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
 968
 969	if (!sk) {
 970		err = -ENOMEM;
 971		goto err;
 972	}
 973
 974	sock_init_data(sock, sk);
 975
 976	sk->sk_hash		= unix_unbound_hash(sk);
 977	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
 978	sk->sk_write_space	= unix_write_space;
 979	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
 980	sk->sk_destruct		= unix_sock_destructor;
 981	u = unix_sk(sk);
 982	u->inflight = 0;
 983	u->path.dentry = NULL;
 984	u->path.mnt = NULL;
 985	spin_lock_init(&u->lock);
 
 986	INIT_LIST_HEAD(&u->link);
 987	mutex_init(&u->iolock); /* single task reading lock */
 988	mutex_init(&u->bindlock); /* single task binding lock */
 989	init_waitqueue_head(&u->peer_wait);
 990	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 991	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
 992	unix_insert_unbound_socket(net, sk);
 993
 994	sock_prot_inuse_add(net, sk->sk_prot, 1);
 995
 996	return sk;
 997
 998err:
 999	atomic_long_dec(&unix_nr_socks);
1000	return ERR_PTR(err);
1001}
1002
1003static int unix_create(struct net *net, struct socket *sock, int protocol,
1004		       int kern)
1005{
1006	struct sock *sk;
1007
1008	if (protocol && protocol != PF_UNIX)
1009		return -EPROTONOSUPPORT;
1010
1011	sock->state = SS_UNCONNECTED;
1012
1013	switch (sock->type) {
1014	case SOCK_STREAM:
1015		sock->ops = &unix_stream_ops;
1016		break;
1017		/*
1018		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1019		 *	nothing uses it.
1020		 */
1021	case SOCK_RAW:
1022		sock->type = SOCK_DGRAM;
1023		fallthrough;
1024	case SOCK_DGRAM:
1025		sock->ops = &unix_dgram_ops;
1026		break;
1027	case SOCK_SEQPACKET:
1028		sock->ops = &unix_seqpacket_ops;
1029		break;
1030	default:
1031		return -ESOCKTNOSUPPORT;
1032	}
1033
1034	sk = unix_create1(net, sock, kern, sock->type);
1035	if (IS_ERR(sk))
1036		return PTR_ERR(sk);
1037
1038	return 0;
1039}
1040
1041static int unix_release(struct socket *sock)
1042{
1043	struct sock *sk = sock->sk;
1044
1045	if (!sk)
1046		return 0;
1047
1048	sk->sk_prot->close(sk, 0);
1049	unix_release_sock(sk, 0);
1050	sock->sk = NULL;
1051
1052	return 0;
1053}
1054
1055static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1056				  int type)
1057{
1058	struct inode *inode;
1059	struct path path;
1060	struct sock *sk;
1061	int err;
1062
1063	unix_mkname_bsd(sunaddr, addr_len);
1064	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1065	if (err)
1066		goto fail;
1067
1068	err = path_permission(&path, MAY_WRITE);
1069	if (err)
1070		goto path_put;
1071
1072	err = -ECONNREFUSED;
1073	inode = d_backing_inode(path.dentry);
1074	if (!S_ISSOCK(inode->i_mode))
1075		goto path_put;
1076
1077	sk = unix_find_socket_byinode(inode);
1078	if (!sk)
1079		goto path_put;
1080
1081	err = -EPROTOTYPE;
1082	if (sk->sk_type == type)
1083		touch_atime(&path);
1084	else
1085		goto sock_put;
1086
1087	path_put(&path);
1088
1089	return sk;
1090
1091sock_put:
1092	sock_put(sk);
1093path_put:
1094	path_put(&path);
1095fail:
1096	return ERR_PTR(err);
1097}
1098
1099static struct sock *unix_find_abstract(struct net *net,
1100				       struct sockaddr_un *sunaddr,
1101				       int addr_len, int type)
1102{
1103	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1104	struct dentry *dentry;
1105	struct sock *sk;
1106
1107	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1108	if (!sk)
1109		return ERR_PTR(-ECONNREFUSED);
1110
1111	dentry = unix_sk(sk)->path.dentry;
1112	if (dentry)
1113		touch_atime(&unix_sk(sk)->path);
1114
1115	return sk;
1116}
1117
1118static struct sock *unix_find_other(struct net *net,
1119				    struct sockaddr_un *sunaddr,
1120				    int addr_len, int type)
1121{
1122	struct sock *sk;
1123
1124	if (sunaddr->sun_path[0])
1125		sk = unix_find_bsd(sunaddr, addr_len, type);
1126	else
1127		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1128
1129	return sk;
1130}
1131
1132static int unix_autobind(struct sock *sk)
1133{
 
1134	struct unix_sock *u = unix_sk(sk);
1135	unsigned int new_hash, old_hash;
1136	struct net *net = sock_net(sk);
1137	struct unix_address *addr;
1138	u32 lastnum, ordernum;
1139	int err;
1140
1141	err = mutex_lock_interruptible(&u->bindlock);
1142	if (err)
1143		return err;
1144
1145	if (u->addr)
1146		goto out;
1147
1148	err = -ENOMEM;
1149	addr = kzalloc(sizeof(*addr) +
1150		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1151	if (!addr)
1152		goto out;
1153
1154	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1155	addr->name->sun_family = AF_UNIX;
1156	refcount_set(&addr->refcnt, 1);
1157
1158	old_hash = sk->sk_hash;
1159	ordernum = get_random_u32();
1160	lastnum = ordernum & 0xFFFFF;
1161retry:
1162	ordernum = (ordernum + 1) & 0xFFFFF;
1163	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1164
1165	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1166	unix_table_double_lock(net, old_hash, new_hash);
1167
1168	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1169		unix_table_double_unlock(net, old_hash, new_hash);
1170
1171		/* __unix_find_socket_byname() may take long time if many names
1172		 * are already in use.
1173		 */
1174		cond_resched();
1175
1176		if (ordernum == lastnum) {
1177			/* Give up if all names seems to be in use. */
1178			err = -ENOSPC;
1179			unix_release_addr(addr);
1180			goto out;
1181		}
1182
1183		goto retry;
1184	}
1185
1186	__unix_set_addr_hash(net, sk, addr, new_hash);
1187	unix_table_double_unlock(net, old_hash, new_hash);
1188	err = 0;
1189
1190out:	mutex_unlock(&u->bindlock);
1191	return err;
1192}
1193
1194static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1195			 int addr_len)
1196{
1197	umode_t mode = S_IFSOCK |
1198	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
 
1199	struct unix_sock *u = unix_sk(sk);
1200	unsigned int new_hash, old_hash;
1201	struct net *net = sock_net(sk);
1202	struct mnt_idmap *idmap;
1203	struct unix_address *addr;
1204	struct dentry *dentry;
1205	struct path parent;
1206	int err;
1207
1208	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1209	addr = unix_create_addr(sunaddr, addr_len);
1210	if (!addr)
1211		return -ENOMEM;
1212
1213	/*
1214	 * Get the parent directory, calculate the hash for last
1215	 * component.
1216	 */
1217	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1218	if (IS_ERR(dentry)) {
1219		err = PTR_ERR(dentry);
1220		goto out;
1221	}
1222
1223	/*
1224	 * All right, let's create it.
1225	 */
1226	idmap = mnt_idmap(parent.mnt);
1227	err = security_path_mknod(&parent, dentry, mode, 0);
1228	if (!err)
1229		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1230	if (err)
1231		goto out_path;
1232	err = mutex_lock_interruptible(&u->bindlock);
1233	if (err)
1234		goto out_unlink;
1235	if (u->addr)
1236		goto out_unlock;
1237
1238	old_hash = sk->sk_hash;
1239	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1240	unix_table_double_lock(net, old_hash, new_hash);
1241	u->path.mnt = mntget(parent.mnt);
1242	u->path.dentry = dget(dentry);
1243	__unix_set_addr_hash(net, sk, addr, new_hash);
1244	unix_table_double_unlock(net, old_hash, new_hash);
1245	unix_insert_bsd_socket(sk);
1246	mutex_unlock(&u->bindlock);
1247	done_path_create(&parent, dentry);
1248	return 0;
1249
1250out_unlock:
1251	mutex_unlock(&u->bindlock);
1252	err = -EINVAL;
1253out_unlink:
1254	/* failed after successful mknod?  unlink what we'd created... */
1255	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1256out_path:
1257	done_path_create(&parent, dentry);
1258out:
1259	unix_release_addr(addr);
1260	return err == -EEXIST ? -EADDRINUSE : err;
1261}
1262
1263static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264			      int addr_len)
1265{
 
1266	struct unix_sock *u = unix_sk(sk);
1267	unsigned int new_hash, old_hash;
1268	struct net *net = sock_net(sk);
1269	struct unix_address *addr;
1270	int err;
1271
1272	addr = unix_create_addr(sunaddr, addr_len);
1273	if (!addr)
1274		return -ENOMEM;
1275
1276	err = mutex_lock_interruptible(&u->bindlock);
1277	if (err)
1278		goto out;
1279
1280	if (u->addr) {
1281		err = -EINVAL;
1282		goto out_mutex;
1283	}
1284
1285	old_hash = sk->sk_hash;
1286	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1287	unix_table_double_lock(net, old_hash, new_hash);
1288
1289	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1290		goto out_spin;
1291
1292	__unix_set_addr_hash(net, sk, addr, new_hash);
1293	unix_table_double_unlock(net, old_hash, new_hash);
1294	mutex_unlock(&u->bindlock);
1295	return 0;
1296
1297out_spin:
1298	unix_table_double_unlock(net, old_hash, new_hash);
1299	err = -EADDRINUSE;
1300out_mutex:
1301	mutex_unlock(&u->bindlock);
1302out:
1303	unix_release_addr(addr);
1304	return err;
1305}
1306
1307static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1308{
1309	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1310	struct sock *sk = sock->sk;
1311	int err;
1312
1313	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1314	    sunaddr->sun_family == AF_UNIX)
1315		return unix_autobind(sk);
1316
1317	err = unix_validate_addr(sunaddr, addr_len);
1318	if (err)
1319		return err;
1320
1321	if (sunaddr->sun_path[0])
1322		err = unix_bind_bsd(sk, sunaddr, addr_len);
1323	else
1324		err = unix_bind_abstract(sk, sunaddr, addr_len);
1325
1326	return err;
1327}
1328
1329static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1330{
1331	if (unlikely(sk1 == sk2) || !sk2) {
1332		unix_state_lock(sk1);
1333		return;
1334	}
1335	if (sk1 > sk2)
1336		swap(sk1, sk2);
1337
1338	unix_state_lock(sk1);
1339	unix_state_lock_nested(sk2, U_LOCK_SECOND);
1340}
1341
1342static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1343{
1344	if (unlikely(sk1 == sk2) || !sk2) {
1345		unix_state_unlock(sk1);
1346		return;
1347	}
1348	unix_state_unlock(sk1);
1349	unix_state_unlock(sk2);
1350}
1351
1352static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1353			      int alen, int flags)
1354{
1355	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1356	struct sock *sk = sock->sk;
1357	struct sock *other;
1358	int err;
1359
1360	err = -EINVAL;
1361	if (alen < offsetofend(struct sockaddr, sa_family))
1362		goto out;
1363
1364	if (addr->sa_family != AF_UNSPEC) {
1365		err = unix_validate_addr(sunaddr, alen);
1366		if (err)
1367			goto out;
1368
1369		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1370		if (err)
1371			goto out;
1372
1373		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1374		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1375		    !READ_ONCE(unix_sk(sk)->addr)) {
1376			err = unix_autobind(sk);
1377			if (err)
1378				goto out;
1379		}
1380
1381restart:
1382		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1383		if (IS_ERR(other)) {
1384			err = PTR_ERR(other);
1385			goto out;
1386		}
1387
1388		unix_state_double_lock(sk, other);
1389
1390		/* Apparently VFS overslept socket death. Retry. */
1391		if (sock_flag(other, SOCK_DEAD)) {
1392			unix_state_double_unlock(sk, other);
1393			sock_put(other);
1394			goto restart;
1395		}
1396
1397		err = -EPERM;
1398		if (!unix_may_send(sk, other))
1399			goto out_unlock;
1400
1401		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1402		if (err)
1403			goto out_unlock;
1404
1405		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1406	} else {
1407		/*
1408		 *	1003.1g breaking connected state with AF_UNSPEC
1409		 */
1410		other = NULL;
1411		unix_state_double_lock(sk, other);
1412	}
1413
1414	/*
1415	 * If it was connected, reconnect.
1416	 */
1417	if (unix_peer(sk)) {
1418		struct sock *old_peer = unix_peer(sk);
1419
1420		unix_peer(sk) = other;
1421		if (!other)
1422			sk->sk_state = TCP_CLOSE;
1423		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1424
1425		unix_state_double_unlock(sk, other);
1426
1427		if (other != old_peer)
1428			unix_dgram_disconnected(sk, old_peer);
1429		sock_put(old_peer);
1430	} else {
1431		unix_peer(sk) = other;
1432		unix_state_double_unlock(sk, other);
1433	}
1434
1435	return 0;
1436
1437out_unlock:
1438	unix_state_double_unlock(sk, other);
1439	sock_put(other);
1440out:
1441	return err;
1442}
1443
1444static long unix_wait_for_peer(struct sock *other, long timeo)
1445	__releases(&unix_sk(other)->lock)
1446{
1447	struct unix_sock *u = unix_sk(other);
1448	int sched;
1449	DEFINE_WAIT(wait);
1450
1451	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1452
1453	sched = !sock_flag(other, SOCK_DEAD) &&
1454		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1455		unix_recvq_full_lockless(other);
1456
1457	unix_state_unlock(other);
1458
1459	if (sched)
1460		timeo = schedule_timeout(timeo);
1461
1462	finish_wait(&u->peer_wait, &wait);
1463	return timeo;
1464}
1465
1466static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1467			       int addr_len, int flags)
1468{
1469	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1470	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1471	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1472	struct net *net = sock_net(sk);
1473	struct sk_buff *skb = NULL;
1474	long timeo;
1475	int err;
1476	int st;
1477
1478	err = unix_validate_addr(sunaddr, addr_len);
1479	if (err)
1480		goto out;
1481
1482	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1483	if (err)
1484		goto out;
1485
1486	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1487	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1488	    !READ_ONCE(u->addr)) {
1489		err = unix_autobind(sk);
1490		if (err)
1491			goto out;
1492	}
1493
1494	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1495
1496	/* First of all allocate resources.
1497	   If we will make it after state is locked,
1498	   we will have to recheck all again in any case.
1499	 */
1500
1501	/* create new sock for complete connection */
1502	newsk = unix_create1(net, NULL, 0, sock->type);
1503	if (IS_ERR(newsk)) {
1504		err = PTR_ERR(newsk);
1505		newsk = NULL;
1506		goto out;
1507	}
1508
1509	err = -ENOMEM;
1510
1511	/* Allocate skb for sending to listening sock */
1512	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1513	if (skb == NULL)
1514		goto out;
1515
1516restart:
1517	/*  Find listening sock. */
1518	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1519	if (IS_ERR(other)) {
1520		err = PTR_ERR(other);
1521		other = NULL;
1522		goto out;
1523	}
1524
1525	/* Latch state of peer */
1526	unix_state_lock(other);
1527
1528	/* Apparently VFS overslept socket death. Retry. */
1529	if (sock_flag(other, SOCK_DEAD)) {
1530		unix_state_unlock(other);
1531		sock_put(other);
1532		goto restart;
1533	}
1534
1535	err = -ECONNREFUSED;
1536	if (other->sk_state != TCP_LISTEN)
1537		goto out_unlock;
1538	if (other->sk_shutdown & RCV_SHUTDOWN)
1539		goto out_unlock;
1540
1541	if (unix_recvq_full(other)) {
1542		err = -EAGAIN;
1543		if (!timeo)
1544			goto out_unlock;
1545
1546		timeo = unix_wait_for_peer(other, timeo);
1547
1548		err = sock_intr_errno(timeo);
1549		if (signal_pending(current))
1550			goto out;
1551		sock_put(other);
1552		goto restart;
1553	}
1554
1555	/* Latch our state.
1556
1557	   It is tricky place. We need to grab our state lock and cannot
1558	   drop lock on peer. It is dangerous because deadlock is
1559	   possible. Connect to self case and simultaneous
1560	   attempt to connect are eliminated by checking socket
1561	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1562	   check this before attempt to grab lock.
1563
1564	   Well, and we have to recheck the state after socket locked.
1565	 */
1566	st = sk->sk_state;
1567
1568	switch (st) {
1569	case TCP_CLOSE:
1570		/* This is ok... continue with connect */
1571		break;
1572	case TCP_ESTABLISHED:
1573		/* Socket is already connected */
1574		err = -EISCONN;
1575		goto out_unlock;
1576	default:
1577		err = -EINVAL;
1578		goto out_unlock;
1579	}
1580
1581	unix_state_lock_nested(sk, U_LOCK_SECOND);
1582
1583	if (sk->sk_state != st) {
1584		unix_state_unlock(sk);
1585		unix_state_unlock(other);
1586		sock_put(other);
1587		goto restart;
1588	}
1589
1590	err = security_unix_stream_connect(sk, other, newsk);
1591	if (err) {
1592		unix_state_unlock(sk);
1593		goto out_unlock;
1594	}
1595
1596	/* The way is open! Fastly set all the necessary fields... */
1597
1598	sock_hold(sk);
1599	unix_peer(newsk)	= sk;
1600	newsk->sk_state		= TCP_ESTABLISHED;
1601	newsk->sk_type		= sk->sk_type;
1602	init_peercred(newsk);
1603	newu = unix_sk(newsk);
1604	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1605	otheru = unix_sk(other);
1606
1607	/* copy address information from listening to new sock
1608	 *
1609	 * The contents of *(otheru->addr) and otheru->path
1610	 * are seen fully set up here, since we have found
1611	 * otheru in hash under its lock.  Insertion into the
1612	 * hash chain we'd found it in had been done in an
1613	 * earlier critical area protected by the chain's lock,
1614	 * the same one where we'd set *(otheru->addr) contents,
1615	 * as well as otheru->path and otheru->addr itself.
1616	 *
1617	 * Using smp_store_release() here to set newu->addr
1618	 * is enough to make those stores, as well as stores
1619	 * to newu->path visible to anyone who gets newu->addr
1620	 * by smp_load_acquire().  IOW, the same warranties
1621	 * as for unix_sock instances bound in unix_bind() or
1622	 * in unix_autobind().
1623	 */
1624	if (otheru->path.dentry) {
1625		path_get(&otheru->path);
1626		newu->path = otheru->path;
1627	}
1628	refcount_inc(&otheru->addr->refcnt);
1629	smp_store_release(&newu->addr, otheru->addr);
1630
1631	/* Set credentials */
1632	copy_peercred(sk, other);
1633
1634	sock->state	= SS_CONNECTED;
1635	sk->sk_state	= TCP_ESTABLISHED;
1636	sock_hold(newsk);
1637
1638	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1639	unix_peer(sk)	= newsk;
1640
1641	unix_state_unlock(sk);
1642
1643	/* take ten and send info to listening sock */
1644	spin_lock(&other->sk_receive_queue.lock);
1645	__skb_queue_tail(&other->sk_receive_queue, skb);
1646	spin_unlock(&other->sk_receive_queue.lock);
1647	unix_state_unlock(other);
1648	other->sk_data_ready(other);
1649	sock_put(other);
1650	return 0;
1651
1652out_unlock:
1653	if (other)
1654		unix_state_unlock(other);
1655
1656out:
1657	kfree_skb(skb);
1658	if (newsk)
1659		unix_release_sock(newsk, 0);
1660	if (other)
1661		sock_put(other);
1662	return err;
1663}
1664
1665static int unix_socketpair(struct socket *socka, struct socket *sockb)
1666{
1667	struct sock *ska = socka->sk, *skb = sockb->sk;
1668
1669	/* Join our sockets back to back */
1670	sock_hold(ska);
1671	sock_hold(skb);
1672	unix_peer(ska) = skb;
1673	unix_peer(skb) = ska;
1674	init_peercred(ska);
1675	init_peercred(skb);
1676
1677	ska->sk_state = TCP_ESTABLISHED;
1678	skb->sk_state = TCP_ESTABLISHED;
1679	socka->state  = SS_CONNECTED;
1680	sockb->state  = SS_CONNECTED;
1681	return 0;
1682}
1683
1684static void unix_sock_inherit_flags(const struct socket *old,
1685				    struct socket *new)
1686{
1687	if (test_bit(SOCK_PASSCRED, &old->flags))
1688		set_bit(SOCK_PASSCRED, &new->flags);
1689	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1690		set_bit(SOCK_PASSPIDFD, &new->flags);
1691	if (test_bit(SOCK_PASSSEC, &old->flags))
1692		set_bit(SOCK_PASSSEC, &new->flags);
1693}
1694
1695static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1696		       bool kern)
1697{
1698	struct sock *sk = sock->sk;
1699	struct sock *tsk;
1700	struct sk_buff *skb;
1701	int err;
1702
1703	err = -EOPNOTSUPP;
1704	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1705		goto out;
1706
1707	err = -EINVAL;
1708	if (sk->sk_state != TCP_LISTEN)
1709		goto out;
1710
1711	/* If socket state is TCP_LISTEN it cannot change (for now...),
1712	 * so that no locks are necessary.
1713	 */
1714
1715	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1716				&err);
1717	if (!skb) {
1718		/* This means receive shutdown. */
1719		if (err == 0)
1720			err = -EINVAL;
1721		goto out;
1722	}
1723
1724	tsk = skb->sk;
1725	skb_free_datagram(sk, skb);
1726	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1727
1728	/* attach accepted sock to socket */
1729	unix_state_lock(tsk);
1730	newsock->state = SS_CONNECTED;
1731	unix_sock_inherit_flags(sock, newsock);
1732	sock_graft(tsk, newsock);
1733	unix_state_unlock(tsk);
1734	return 0;
1735
1736out:
1737	return err;
1738}
1739
1740
1741static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1742{
1743	struct sock *sk = sock->sk;
1744	struct unix_address *addr;
1745	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1746	int err = 0;
1747
1748	if (peer) {
1749		sk = unix_peer_get(sk);
1750
1751		err = -ENOTCONN;
1752		if (!sk)
1753			goto out;
1754		err = 0;
1755	} else {
1756		sock_hold(sk);
1757	}
1758
1759	addr = smp_load_acquire(&unix_sk(sk)->addr);
1760	if (!addr) {
1761		sunaddr->sun_family = AF_UNIX;
1762		sunaddr->sun_path[0] = 0;
1763		err = offsetof(struct sockaddr_un, sun_path);
1764	} else {
1765		err = addr->len;
1766		memcpy(sunaddr, addr->name, addr->len);
1767
1768		if (peer)
1769			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1770					       CGROUP_UNIX_GETPEERNAME);
1771		else
1772			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1773					       CGROUP_UNIX_GETSOCKNAME);
1774	}
1775	sock_put(sk);
1776out:
1777	return err;
1778}
1779
1780/* The "user->unix_inflight" variable is protected by the garbage
1781 * collection lock, and we just read it locklessly here. If you go
1782 * over the limit, there might be a tiny race in actually noticing
1783 * it across threads. Tough.
1784 */
1785static inline bool too_many_unix_fds(struct task_struct *p)
1786{
1787	struct user_struct *user = current_user();
1788
1789	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1790		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1791	return false;
1792}
1793
1794static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1795{
1796	int i;
1797
1798	if (too_many_unix_fds(current))
1799		return -ETOOMANYREFS;
1800
1801	/* Need to duplicate file references for the sake of garbage
1802	 * collection.  Otherwise a socket in the fps might become a
1803	 * candidate for GC while the skb is not yet queued.
1804	 */
1805	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1806	if (!UNIXCB(skb).fp)
1807		return -ENOMEM;
1808
1809	for (i = scm->fp->count - 1; i >= 0; i--)
1810		unix_inflight(scm->fp->user, scm->fp->fp[i]);
1811
1812	return 0;
1813}
1814
1815static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1816{
1817	int i;
1818
1819	scm->fp = UNIXCB(skb).fp;
1820	UNIXCB(skb).fp = NULL;
1821
1822	for (i = scm->fp->count - 1; i >= 0; i--)
1823		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1824}
1825
1826static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1827{
1828	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1829
1830	/*
1831	 * Garbage collection of unix sockets starts by selecting a set of
1832	 * candidate sockets which have reference only from being in flight
1833	 * (total_refs == inflight_refs).  This condition is checked once during
1834	 * the candidate collection phase, and candidates are marked as such, so
1835	 * that non-candidates can later be ignored.  While inflight_refs is
1836	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1837	 * is an instantaneous decision.
1838	 *
1839	 * Once a candidate, however, the socket must not be reinstalled into a
1840	 * file descriptor while the garbage collection is in progress.
1841	 *
1842	 * If the above conditions are met, then the directed graph of
1843	 * candidates (*) does not change while unix_gc_lock is held.
1844	 *
1845	 * Any operations that changes the file count through file descriptors
1846	 * (dup, close, sendmsg) does not change the graph since candidates are
1847	 * not installed in fds.
1848	 *
1849	 * Dequeing a candidate via recvmsg would install it into an fd, but
1850	 * that takes unix_gc_lock to decrement the inflight count, so it's
1851	 * serialized with garbage collection.
1852	 *
1853	 * MSG_PEEK is special in that it does not change the inflight count,
1854	 * yet does install the socket into an fd.  The following lock/unlock
1855	 * pair is to ensure serialization with garbage collection.  It must be
1856	 * done between incrementing the file count and installing the file into
1857	 * an fd.
1858	 *
1859	 * If garbage collection starts after the barrier provided by the
1860	 * lock/unlock, then it will see the elevated refcount and not mark this
1861	 * as a candidate.  If a garbage collection is already in progress
1862	 * before the file count was incremented, then the lock/unlock pair will
1863	 * ensure that garbage collection is finished before progressing to
1864	 * installing the fd.
1865	 *
1866	 * (*) A -> B where B is on the queue of A or B is on the queue of C
1867	 * which is on the queue of listening socket A.
1868	 */
1869	spin_lock(&unix_gc_lock);
1870	spin_unlock(&unix_gc_lock);
1871}
1872
1873static void unix_destruct_scm(struct sk_buff *skb)
1874{
1875	struct scm_cookie scm;
1876
1877	memset(&scm, 0, sizeof(scm));
1878	scm.pid  = UNIXCB(skb).pid;
1879	if (UNIXCB(skb).fp)
1880		unix_detach_fds(&scm, skb);
1881
1882	/* Alas, it calls VFS */
1883	/* So fscking what? fput() had been SMP-safe since the last Summer */
1884	scm_destroy(&scm);
1885	sock_wfree(skb);
1886}
1887
1888static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1889{
1890	int err = 0;
1891
1892	UNIXCB(skb).pid  = get_pid(scm->pid);
1893	UNIXCB(skb).uid = scm->creds.uid;
1894	UNIXCB(skb).gid = scm->creds.gid;
1895	UNIXCB(skb).fp = NULL;
1896	unix_get_secdata(scm, skb);
1897	if (scm->fp && send_fds)
1898		err = unix_attach_fds(scm, skb);
1899
1900	skb->destructor = unix_destruct_scm;
1901	return err;
1902}
1903
1904static bool unix_passcred_enabled(const struct socket *sock,
1905				  const struct sock *other)
1906{
1907	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1908	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1909	       !other->sk_socket ||
1910	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1911	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1912}
1913
1914/*
1915 * Some apps rely on write() giving SCM_CREDENTIALS
1916 * We include credentials if source or destination socket
1917 * asserted SOCK_PASSCRED.
1918 */
1919static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1920			    const struct sock *other)
1921{
1922	if (UNIXCB(skb).pid)
1923		return;
1924	if (unix_passcred_enabled(sock, other)) {
1925		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1926		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1927	}
1928}
1929
1930static bool unix_skb_scm_eq(struct sk_buff *skb,
1931			    struct scm_cookie *scm)
1932{
1933	return UNIXCB(skb).pid == scm->pid &&
1934	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1935	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1936	       unix_secdata_eq(scm, skb);
1937}
1938
1939static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1940{
1941	struct scm_fp_list *fp = UNIXCB(skb).fp;
1942	struct unix_sock *u = unix_sk(sk);
1943
1944	if (unlikely(fp && fp->count))
1945		atomic_add(fp->count, &u->scm_stat.nr_fds);
1946}
1947
1948static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1949{
1950	struct scm_fp_list *fp = UNIXCB(skb).fp;
1951	struct unix_sock *u = unix_sk(sk);
1952
1953	if (unlikely(fp && fp->count))
1954		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1955}
1956
1957/*
1958 *	Send AF_UNIX data.
1959 */
1960
1961static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1962			      size_t len)
1963{
1964	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1965	struct sock *sk = sock->sk, *other = NULL;
1966	struct unix_sock *u = unix_sk(sk);
1967	struct scm_cookie scm;
1968	struct sk_buff *skb;
1969	int data_len = 0;
1970	int sk_locked;
1971	long timeo;
1972	int err;
1973
 
1974	err = scm_send(sock, msg, &scm, false);
1975	if (err < 0)
1976		return err;
1977
1978	wait_for_unix_gc(scm.fp);
1979
1980	err = -EOPNOTSUPP;
1981	if (msg->msg_flags&MSG_OOB)
1982		goto out;
1983
1984	if (msg->msg_namelen) {
1985		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1986		if (err)
1987			goto out;
1988
1989		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1990							    msg->msg_name,
1991							    &msg->msg_namelen,
1992							    NULL);
1993		if (err)
1994			goto out;
1995	} else {
1996		sunaddr = NULL;
1997		err = -ENOTCONN;
1998		other = unix_peer_get(sk);
1999		if (!other)
2000			goto out;
2001	}
2002
2003	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
2004	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
2005	    !READ_ONCE(u->addr)) {
2006		err = unix_autobind(sk);
2007		if (err)
2008			goto out;
2009	}
2010
2011	err = -EMSGSIZE;
2012	if (len > sk->sk_sndbuf - 32)
2013		goto out;
2014
2015	if (len > SKB_MAX_ALLOC) {
2016		data_len = min_t(size_t,
2017				 len - SKB_MAX_ALLOC,
2018				 MAX_SKB_FRAGS * PAGE_SIZE);
2019		data_len = PAGE_ALIGN(data_len);
2020
2021		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2022	}
2023
2024	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2025				   msg->msg_flags & MSG_DONTWAIT, &err,
2026				   PAGE_ALLOC_COSTLY_ORDER);
2027	if (skb == NULL)
2028		goto out;
2029
2030	err = unix_scm_to_skb(&scm, skb, true);
2031	if (err < 0)
2032		goto out_free;
2033
2034	skb_put(skb, len - data_len);
2035	skb->data_len = data_len;
2036	skb->len = len;
2037	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2038	if (err)
2039		goto out_free;
2040
2041	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2042
2043restart:
2044	if (!other) {
2045		err = -ECONNRESET;
2046		if (sunaddr == NULL)
2047			goto out_free;
2048
2049		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
2050					sk->sk_type);
2051		if (IS_ERR(other)) {
2052			err = PTR_ERR(other);
2053			other = NULL;
2054			goto out_free;
2055		}
2056	}
2057
2058	if (sk_filter(other, skb) < 0) {
2059		/* Toss the packet but do not return any error to the sender */
2060		err = len;
2061		goto out_free;
2062	}
2063
2064	sk_locked = 0;
2065	unix_state_lock(other);
2066restart_locked:
2067	err = -EPERM;
2068	if (!unix_may_send(sk, other))
2069		goto out_unlock;
2070
2071	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2072		/*
2073		 *	Check with 1003.1g - what should
2074		 *	datagram error
2075		 */
2076		unix_state_unlock(other);
2077		sock_put(other);
2078
2079		if (!sk_locked)
2080			unix_state_lock(sk);
2081
2082		err = 0;
2083		if (sk->sk_type == SOCK_SEQPACKET) {
2084			/* We are here only when racing with unix_release_sock()
2085			 * is clearing @other. Never change state to TCP_CLOSE
2086			 * unlike SOCK_DGRAM wants.
2087			 */
2088			unix_state_unlock(sk);
2089			err = -EPIPE;
2090		} else if (unix_peer(sk) == other) {
2091			unix_peer(sk) = NULL;
2092			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2093
2094			sk->sk_state = TCP_CLOSE;
2095			unix_state_unlock(sk);
2096
2097			unix_dgram_disconnected(sk, other);
2098			sock_put(other);
2099			err = -ECONNREFUSED;
2100		} else {
2101			unix_state_unlock(sk);
2102		}
2103
2104		other = NULL;
2105		if (err)
2106			goto out_free;
2107		goto restart;
2108	}
2109
2110	err = -EPIPE;
2111	if (other->sk_shutdown & RCV_SHUTDOWN)
2112		goto out_unlock;
2113
2114	if (sk->sk_type != SOCK_SEQPACKET) {
2115		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2116		if (err)
2117			goto out_unlock;
2118	}
2119
2120	/* other == sk && unix_peer(other) != sk if
2121	 * - unix_peer(sk) == NULL, destination address bound to sk
2122	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2123	 */
2124	if (other != sk &&
2125	    unlikely(unix_peer(other) != sk &&
2126	    unix_recvq_full_lockless(other))) {
2127		if (timeo) {
2128			timeo = unix_wait_for_peer(other, timeo);
2129
2130			err = sock_intr_errno(timeo);
2131			if (signal_pending(current))
2132				goto out_free;
2133
2134			goto restart;
2135		}
2136
2137		if (!sk_locked) {
2138			unix_state_unlock(other);
2139			unix_state_double_lock(sk, other);
2140		}
2141
2142		if (unix_peer(sk) != other ||
2143		    unix_dgram_peer_wake_me(sk, other)) {
2144			err = -EAGAIN;
2145			sk_locked = 1;
2146			goto out_unlock;
2147		}
2148
2149		if (!sk_locked) {
2150			sk_locked = 1;
2151			goto restart_locked;
2152		}
2153	}
2154
2155	if (unlikely(sk_locked))
2156		unix_state_unlock(sk);
2157
2158	if (sock_flag(other, SOCK_RCVTSTAMP))
2159		__net_timestamp(skb);
2160	maybe_add_creds(skb, sock, other);
2161	scm_stat_add(other, skb);
2162	skb_queue_tail(&other->sk_receive_queue, skb);
2163	unix_state_unlock(other);
2164	other->sk_data_ready(other);
2165	sock_put(other);
2166	scm_destroy(&scm);
2167	return len;
2168
2169out_unlock:
2170	if (sk_locked)
2171		unix_state_unlock(sk);
2172	unix_state_unlock(other);
2173out_free:
2174	kfree_skb(skb);
2175out:
2176	if (other)
2177		sock_put(other);
2178	scm_destroy(&scm);
2179	return err;
2180}
2181
2182/* We use paged skbs for stream sockets, and limit occupancy to 32768
2183 * bytes, and a minimum of a full page.
2184 */
2185#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2186
2187#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2188static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2189		     struct scm_cookie *scm, bool fds_sent)
2190{
2191	struct unix_sock *ousk = unix_sk(other);
2192	struct sk_buff *skb;
2193	int err = 0;
2194
2195	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2196
2197	if (!skb)
2198		return err;
2199
2200	err = unix_scm_to_skb(scm, skb, !fds_sent);
2201	if (err < 0) {
2202		kfree_skb(skb);
2203		return err;
2204	}
2205	skb_put(skb, 1);
2206	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2207
2208	if (err) {
2209		kfree_skb(skb);
2210		return err;
2211	}
2212
2213	unix_state_lock(other);
2214
2215	if (sock_flag(other, SOCK_DEAD) ||
2216	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2217		unix_state_unlock(other);
2218		kfree_skb(skb);
2219		return -EPIPE;
2220	}
2221
2222	maybe_add_creds(skb, sock, other);
2223	skb_get(skb);
2224
2225	scm_stat_add(other, skb);
2226
2227	spin_lock(&other->sk_receive_queue.lock);
2228	if (ousk->oob_skb)
2229		consume_skb(ousk->oob_skb);
 
2230	WRITE_ONCE(ousk->oob_skb, skb);
2231	__skb_queue_tail(&other->sk_receive_queue, skb);
2232	spin_unlock(&other->sk_receive_queue.lock);
2233
 
 
2234	sk_send_sigurg(other);
2235	unix_state_unlock(other);
2236	other->sk_data_ready(other);
2237
2238	return err;
2239}
2240#endif
2241
2242static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2243			       size_t len)
2244{
2245	struct sock *sk = sock->sk;
2246	struct sock *other = NULL;
2247	int err, size;
2248	struct sk_buff *skb;
2249	int sent = 0;
2250	struct scm_cookie scm;
2251	bool fds_sent = false;
2252	int data_len;
2253
 
2254	err = scm_send(sock, msg, &scm, false);
2255	if (err < 0)
2256		return err;
2257
2258	wait_for_unix_gc(scm.fp);
2259
2260	err = -EOPNOTSUPP;
2261	if (msg->msg_flags & MSG_OOB) {
2262#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2263		if (len)
2264			len--;
2265		else
2266#endif
2267			goto out_err;
2268	}
2269
2270	if (msg->msg_namelen) {
2271		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2272		goto out_err;
2273	} else {
2274		err = -ENOTCONN;
2275		other = unix_peer(sk);
2276		if (!other)
2277			goto out_err;
2278	}
2279
2280	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2281		goto pipe_err;
2282
2283	while (sent < len) {
2284		size = len - sent;
2285
2286		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2287			skb = sock_alloc_send_pskb(sk, 0, 0,
2288						   msg->msg_flags & MSG_DONTWAIT,
2289						   &err, 0);
2290		} else {
2291			/* Keep two messages in the pipe so it schedules better */
2292			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2293
2294			/* allow fallback to order-0 allocations */
2295			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2296
2297			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2298
2299			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2300
2301			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2302						   msg->msg_flags & MSG_DONTWAIT, &err,
2303						   get_order(UNIX_SKB_FRAGS_SZ));
2304		}
2305		if (!skb)
2306			goto out_err;
2307
2308		/* Only send the fds in the first buffer */
2309		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2310		if (err < 0) {
2311			kfree_skb(skb);
2312			goto out_err;
2313		}
2314		fds_sent = true;
2315
2316		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2317			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2318						   sk->sk_allocation);
2319			if (err < 0) {
2320				kfree_skb(skb);
2321				goto out_err;
2322			}
2323			size = err;
2324			refcount_add(size, &sk->sk_wmem_alloc);
2325		} else {
2326			skb_put(skb, size - data_len);
2327			skb->data_len = data_len;
2328			skb->len = size;
2329			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2330			if (err) {
2331				kfree_skb(skb);
2332				goto out_err;
2333			}
2334		}
2335
2336		unix_state_lock(other);
2337
2338		if (sock_flag(other, SOCK_DEAD) ||
2339		    (other->sk_shutdown & RCV_SHUTDOWN))
2340			goto pipe_err_free;
2341
2342		maybe_add_creds(skb, sock, other);
2343		scm_stat_add(other, skb);
2344		skb_queue_tail(&other->sk_receive_queue, skb);
2345		unix_state_unlock(other);
2346		other->sk_data_ready(other);
2347		sent += size;
2348	}
2349
2350#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2351	if (msg->msg_flags & MSG_OOB) {
2352		err = queue_oob(sock, msg, other, &scm, fds_sent);
2353		if (err)
2354			goto out_err;
2355		sent++;
2356	}
2357#endif
2358
2359	scm_destroy(&scm);
2360
2361	return sent;
2362
2363pipe_err_free:
2364	unix_state_unlock(other);
2365	kfree_skb(skb);
2366pipe_err:
2367	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2368		send_sig(SIGPIPE, current, 0);
2369	err = -EPIPE;
2370out_err:
2371	scm_destroy(&scm);
2372	return sent ? : err;
2373}
2374
2375static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2376				  size_t len)
2377{
2378	int err;
2379	struct sock *sk = sock->sk;
2380
2381	err = sock_error(sk);
2382	if (err)
2383		return err;
2384
2385	if (sk->sk_state != TCP_ESTABLISHED)
2386		return -ENOTCONN;
2387
2388	if (msg->msg_namelen)
2389		msg->msg_namelen = 0;
2390
2391	return unix_dgram_sendmsg(sock, msg, len);
2392}
2393
2394static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2395				  size_t size, int flags)
2396{
2397	struct sock *sk = sock->sk;
2398
2399	if (sk->sk_state != TCP_ESTABLISHED)
2400		return -ENOTCONN;
2401
2402	return unix_dgram_recvmsg(sock, msg, size, flags);
2403}
2404
2405static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2406{
2407	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2408
2409	if (addr) {
2410		msg->msg_namelen = addr->len;
2411		memcpy(msg->msg_name, addr->name, addr->len);
2412	}
2413}
2414
2415int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2416			 int flags)
2417{
2418	struct scm_cookie scm;
2419	struct socket *sock = sk->sk_socket;
2420	struct unix_sock *u = unix_sk(sk);
2421	struct sk_buff *skb, *last;
2422	long timeo;
2423	int skip;
2424	int err;
2425
2426	err = -EOPNOTSUPP;
2427	if (flags&MSG_OOB)
2428		goto out;
2429
2430	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2431
2432	do {
2433		mutex_lock(&u->iolock);
2434
2435		skip = sk_peek_offset(sk, flags);
2436		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2437					      &skip, &err, &last);
2438		if (skb) {
2439			if (!(flags & MSG_PEEK))
2440				scm_stat_del(sk, skb);
2441			break;
2442		}
2443
2444		mutex_unlock(&u->iolock);
2445
2446		if (err != -EAGAIN)
2447			break;
2448	} while (timeo &&
2449		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2450					      &err, &timeo, last));
2451
2452	if (!skb) { /* implies iolock unlocked */
2453		unix_state_lock(sk);
2454		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2455		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2456		    (sk->sk_shutdown & RCV_SHUTDOWN))
2457			err = 0;
2458		unix_state_unlock(sk);
2459		goto out;
2460	}
2461
2462	if (wq_has_sleeper(&u->peer_wait))
2463		wake_up_interruptible_sync_poll(&u->peer_wait,
2464						EPOLLOUT | EPOLLWRNORM |
2465						EPOLLWRBAND);
2466
2467	if (msg->msg_name) {
2468		unix_copy_addr(msg, skb->sk);
2469
2470		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2471						      msg->msg_name,
2472						      &msg->msg_namelen);
2473	}
2474
2475	if (size > skb->len - skip)
2476		size = skb->len - skip;
2477	else if (size < skb->len - skip)
2478		msg->msg_flags |= MSG_TRUNC;
2479
2480	err = skb_copy_datagram_msg(skb, skip, msg, size);
2481	if (err)
2482		goto out_free;
2483
2484	if (sock_flag(sk, SOCK_RCVTSTAMP))
2485		__sock_recv_timestamp(msg, sk, skb);
2486
2487	memset(&scm, 0, sizeof(scm));
2488
2489	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2490	unix_set_secdata(&scm, skb);
2491
2492	if (!(flags & MSG_PEEK)) {
2493		if (UNIXCB(skb).fp)
2494			unix_detach_fds(&scm, skb);
2495
2496		sk_peek_offset_bwd(sk, skb->len);
2497	} else {
2498		/* It is questionable: on PEEK we could:
2499		   - do not return fds - good, but too simple 8)
2500		   - return fds, and do not return them on read (old strategy,
2501		     apparently wrong)
2502		   - clone fds (I chose it for now, it is the most universal
2503		     solution)
2504
2505		   POSIX 1003.1g does not actually define this clearly
2506		   at all. POSIX 1003.1g doesn't define a lot of things
2507		   clearly however!
2508
2509		*/
2510
2511		sk_peek_offset_fwd(sk, size);
2512
2513		if (UNIXCB(skb).fp)
2514			unix_peek_fds(&scm, skb);
2515	}
2516	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2517
2518	scm_recv_unix(sock, msg, &scm, flags);
2519
2520out_free:
2521	skb_free_datagram(sk, skb);
2522	mutex_unlock(&u->iolock);
2523out:
2524	return err;
2525}
2526
2527static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2528			      int flags)
2529{
2530	struct sock *sk = sock->sk;
2531
2532#ifdef CONFIG_BPF_SYSCALL
2533	const struct proto *prot = READ_ONCE(sk->sk_prot);
2534
2535	if (prot != &unix_dgram_proto)
2536		return prot->recvmsg(sk, msg, size, flags, NULL);
2537#endif
2538	return __unix_dgram_recvmsg(sk, msg, size, flags);
2539}
2540
2541static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2542{
2543	struct unix_sock *u = unix_sk(sk);
2544	struct sk_buff *skb;
2545	int err;
2546
2547	mutex_lock(&u->iolock);
2548	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2549	mutex_unlock(&u->iolock);
2550	if (!skb)
2551		return err;
2552
2553	return recv_actor(sk, skb);
2554}
2555
2556/*
2557 *	Sleep until more data has arrived. But check for races..
2558 */
2559static long unix_stream_data_wait(struct sock *sk, long timeo,
2560				  struct sk_buff *last, unsigned int last_len,
2561				  bool freezable)
2562{
2563	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2564	struct sk_buff *tail;
2565	DEFINE_WAIT(wait);
2566
2567	unix_state_lock(sk);
2568
2569	for (;;) {
2570		prepare_to_wait(sk_sleep(sk), &wait, state);
2571
2572		tail = skb_peek_tail(&sk->sk_receive_queue);
2573		if (tail != last ||
2574		    (tail && tail->len != last_len) ||
2575		    sk->sk_err ||
2576		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2577		    signal_pending(current) ||
2578		    !timeo)
2579			break;
2580
2581		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2582		unix_state_unlock(sk);
2583		timeo = schedule_timeout(timeo);
2584		unix_state_lock(sk);
2585
2586		if (sock_flag(sk, SOCK_DEAD))
2587			break;
2588
2589		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2590	}
2591
2592	finish_wait(sk_sleep(sk), &wait);
2593	unix_state_unlock(sk);
2594	return timeo;
2595}
2596
2597static unsigned int unix_skb_len(const struct sk_buff *skb)
2598{
2599	return skb->len - UNIXCB(skb).consumed;
2600}
2601
2602struct unix_stream_read_state {
2603	int (*recv_actor)(struct sk_buff *, int, int,
2604			  struct unix_stream_read_state *);
2605	struct socket *socket;
2606	struct msghdr *msg;
2607	struct pipe_inode_info *pipe;
2608	size_t size;
2609	int flags;
2610	unsigned int splice_flags;
2611};
2612
2613#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2614static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2615{
2616	struct socket *sock = state->socket;
2617	struct sock *sk = sock->sk;
2618	struct unix_sock *u = unix_sk(sk);
2619	int chunk = 1;
2620	struct sk_buff *oob_skb;
2621
2622	mutex_lock(&u->iolock);
2623	unix_state_lock(sk);
2624	spin_lock(&sk->sk_receive_queue.lock);
2625
2626	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2627		spin_unlock(&sk->sk_receive_queue.lock);
2628		unix_state_unlock(sk);
2629		mutex_unlock(&u->iolock);
2630		return -EINVAL;
2631	}
2632
2633	oob_skb = u->oob_skb;
2634
2635	if (!(state->flags & MSG_PEEK))
2636		WRITE_ONCE(u->oob_skb, NULL);
2637	else
2638		skb_get(oob_skb);
2639
2640	spin_unlock(&sk->sk_receive_queue.lock);
2641	unix_state_unlock(sk);
2642
2643	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2644
2645	if (!(state->flags & MSG_PEEK))
2646		UNIXCB(oob_skb).consumed += 1;
2647
2648	consume_skb(oob_skb);
2649
2650	mutex_unlock(&u->iolock);
2651
2652	if (chunk < 0)
2653		return -EFAULT;
2654
2655	state->msg->msg_flags |= MSG_OOB;
2656	return 1;
2657}
2658
2659static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2660				  int flags, int copied)
2661{
2662	struct unix_sock *u = unix_sk(sk);
2663
2664	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2665		skb_unlink(skb, &sk->sk_receive_queue);
2666		consume_skb(skb);
2667		skb = NULL;
2668	} else {
2669		struct sk_buff *unlinked_skb = NULL;
2670
2671		spin_lock(&sk->sk_receive_queue.lock);
2672
2673		if (skb == u->oob_skb) {
2674			if (copied) {
2675				skb = NULL;
2676			} else if (sock_flag(sk, SOCK_URGINLINE)) {
2677				if (!(flags & MSG_PEEK)) {
2678					WRITE_ONCE(u->oob_skb, NULL);
2679					consume_skb(skb);
2680				}
2681			} else if (flags & MSG_PEEK) {
2682				skb = NULL;
2683			} else {
2684				__skb_unlink(skb, &sk->sk_receive_queue);
2685				WRITE_ONCE(u->oob_skb, NULL);
2686				unlinked_skb = skb;
2687				skb = skb_peek(&sk->sk_receive_queue);
2688			}
2689		}
2690
2691		spin_unlock(&sk->sk_receive_queue.lock);
2692
2693		if (unlinked_skb) {
2694			WARN_ON_ONCE(skb_unref(unlinked_skb));
2695			kfree_skb(unlinked_skb);
2696		}
2697	}
2698	return skb;
2699}
2700#endif
2701
2702static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2703{
2704	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2705		return -ENOTCONN;
2706
2707	return unix_read_skb(sk, recv_actor);
2708}
2709
2710static int unix_stream_read_generic(struct unix_stream_read_state *state,
2711				    bool freezable)
2712{
2713	struct scm_cookie scm;
2714	struct socket *sock = state->socket;
2715	struct sock *sk = sock->sk;
2716	struct unix_sock *u = unix_sk(sk);
2717	int copied = 0;
2718	int flags = state->flags;
2719	int noblock = flags & MSG_DONTWAIT;
2720	bool check_creds = false;
2721	int target;
2722	int err = 0;
2723	long timeo;
2724	int skip;
2725	size_t size = state->size;
2726	unsigned int last_len;
2727
2728	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2729		err = -EINVAL;
2730		goto out;
2731	}
2732
2733	if (unlikely(flags & MSG_OOB)) {
2734		err = -EOPNOTSUPP;
2735#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2736		err = unix_stream_recv_urg(state);
2737#endif
2738		goto out;
2739	}
2740
2741	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2742	timeo = sock_rcvtimeo(sk, noblock);
2743
2744	memset(&scm, 0, sizeof(scm));
2745
2746	/* Lock the socket to prevent queue disordering
2747	 * while sleeps in memcpy_tomsg
2748	 */
2749	mutex_lock(&u->iolock);
2750
2751	skip = max(sk_peek_offset(sk, flags), 0);
2752
2753	do {
2754		int chunk;
2755		bool drop_skb;
2756		struct sk_buff *skb, *last;
2757
2758redo:
2759		unix_state_lock(sk);
2760		if (sock_flag(sk, SOCK_DEAD)) {
2761			err = -ECONNRESET;
2762			goto unlock;
2763		}
2764		last = skb = skb_peek(&sk->sk_receive_queue);
2765		last_len = last ? last->len : 0;
2766
2767again:
2768#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2769		if (skb) {
2770			skb = manage_oob(skb, sk, flags, copied);
2771			if (!skb && copied) {
2772				unix_state_unlock(sk);
2773				break;
 
 
2774			}
2775		}
2776#endif
 
2777		if (skb == NULL) {
2778			if (copied >= target)
2779				goto unlock;
2780
2781			/*
2782			 *	POSIX 1003.1g mandates this order.
2783			 */
2784
2785			err = sock_error(sk);
2786			if (err)
2787				goto unlock;
2788			if (sk->sk_shutdown & RCV_SHUTDOWN)
2789				goto unlock;
2790
2791			unix_state_unlock(sk);
2792			if (!timeo) {
2793				err = -EAGAIN;
2794				break;
2795			}
2796
2797			mutex_unlock(&u->iolock);
2798
2799			timeo = unix_stream_data_wait(sk, timeo, last,
2800						      last_len, freezable);
2801
2802			if (signal_pending(current)) {
2803				err = sock_intr_errno(timeo);
2804				scm_destroy(&scm);
2805				goto out;
2806			}
2807
2808			mutex_lock(&u->iolock);
2809			goto redo;
2810unlock:
2811			unix_state_unlock(sk);
2812			break;
2813		}
2814
2815		while (skip >= unix_skb_len(skb)) {
2816			skip -= unix_skb_len(skb);
2817			last = skb;
2818			last_len = skb->len;
2819			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2820			if (!skb)
2821				goto again;
2822		}
2823
2824		unix_state_unlock(sk);
2825
2826		if (check_creds) {
2827			/* Never glue messages from different writers */
2828			if (!unix_skb_scm_eq(skb, &scm))
2829				break;
2830		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2831			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2832			/* Copy credentials */
2833			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2834			unix_set_secdata(&scm, skb);
2835			check_creds = true;
2836		}
2837
2838		/* Copy address just once */
2839		if (state->msg && state->msg->msg_name) {
2840			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2841					 state->msg->msg_name);
2842			unix_copy_addr(state->msg, skb->sk);
2843
2844			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2845							      state->msg->msg_name,
2846							      &state->msg->msg_namelen);
2847
2848			sunaddr = NULL;
2849		}
2850
2851		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2852		skb_get(skb);
2853		chunk = state->recv_actor(skb, skip, chunk, state);
2854		drop_skb = !unix_skb_len(skb);
2855		/* skb is only safe to use if !drop_skb */
2856		consume_skb(skb);
2857		if (chunk < 0) {
2858			if (copied == 0)
2859				copied = -EFAULT;
2860			break;
2861		}
2862		copied += chunk;
2863		size -= chunk;
2864
2865		if (drop_skb) {
2866			/* the skb was touched by a concurrent reader;
2867			 * we should not expect anything from this skb
2868			 * anymore and assume it invalid - we can be
2869			 * sure it was dropped from the socket queue
2870			 *
2871			 * let's report a short read
2872			 */
2873			err = 0;
2874			break;
2875		}
2876
2877		/* Mark read part of skb as used */
2878		if (!(flags & MSG_PEEK)) {
2879			UNIXCB(skb).consumed += chunk;
2880
2881			sk_peek_offset_bwd(sk, chunk);
2882
2883			if (UNIXCB(skb).fp) {
2884				scm_stat_del(sk, skb);
2885				unix_detach_fds(&scm, skb);
2886			}
2887
2888			if (unix_skb_len(skb))
2889				break;
2890
2891			skb_unlink(skb, &sk->sk_receive_queue);
2892			consume_skb(skb);
2893
2894			if (scm.fp)
2895				break;
2896		} else {
2897			/* It is questionable, see note in unix_dgram_recvmsg.
2898			 */
2899			if (UNIXCB(skb).fp)
2900				unix_peek_fds(&scm, skb);
2901
2902			sk_peek_offset_fwd(sk, chunk);
2903
2904			if (UNIXCB(skb).fp)
2905				break;
2906
2907			skip = 0;
2908			last = skb;
2909			last_len = skb->len;
2910			unix_state_lock(sk);
2911			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2912			if (skb)
2913				goto again;
2914			unix_state_unlock(sk);
2915			break;
2916		}
2917	} while (size);
2918
2919	mutex_unlock(&u->iolock);
2920	if (state->msg)
2921		scm_recv_unix(sock, state->msg, &scm, flags);
2922	else
2923		scm_destroy(&scm);
2924out:
2925	return copied ? : err;
2926}
2927
2928static int unix_stream_read_actor(struct sk_buff *skb,
2929				  int skip, int chunk,
2930				  struct unix_stream_read_state *state)
2931{
2932	int ret;
2933
2934	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2935				    state->msg, chunk);
2936	return ret ?: chunk;
2937}
2938
2939int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2940			  size_t size, int flags)
2941{
2942	struct unix_stream_read_state state = {
2943		.recv_actor = unix_stream_read_actor,
2944		.socket = sk->sk_socket,
2945		.msg = msg,
2946		.size = size,
2947		.flags = flags
2948	};
2949
2950	return unix_stream_read_generic(&state, true);
2951}
2952
2953static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2954			       size_t size, int flags)
2955{
2956	struct unix_stream_read_state state = {
2957		.recv_actor = unix_stream_read_actor,
2958		.socket = sock,
2959		.msg = msg,
2960		.size = size,
2961		.flags = flags
2962	};
2963
2964#ifdef CONFIG_BPF_SYSCALL
2965	struct sock *sk = sock->sk;
2966	const struct proto *prot = READ_ONCE(sk->sk_prot);
2967
2968	if (prot != &unix_stream_proto)
2969		return prot->recvmsg(sk, msg, size, flags, NULL);
2970#endif
2971	return unix_stream_read_generic(&state, true);
2972}
2973
2974static int unix_stream_splice_actor(struct sk_buff *skb,
2975				    int skip, int chunk,
2976				    struct unix_stream_read_state *state)
2977{
2978	return skb_splice_bits(skb, state->socket->sk,
2979			       UNIXCB(skb).consumed + skip,
2980			       state->pipe, chunk, state->splice_flags);
2981}
2982
2983static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2984				       struct pipe_inode_info *pipe,
2985				       size_t size, unsigned int flags)
2986{
2987	struct unix_stream_read_state state = {
2988		.recv_actor = unix_stream_splice_actor,
2989		.socket = sock,
2990		.pipe = pipe,
2991		.size = size,
2992		.splice_flags = flags,
2993	};
2994
2995	if (unlikely(*ppos))
2996		return -ESPIPE;
2997
2998	if (sock->file->f_flags & O_NONBLOCK ||
2999	    flags & SPLICE_F_NONBLOCK)
3000		state.flags = MSG_DONTWAIT;
3001
3002	return unix_stream_read_generic(&state, false);
3003}
3004
3005static int unix_shutdown(struct socket *sock, int mode)
3006{
3007	struct sock *sk = sock->sk;
3008	struct sock *other;
3009
3010	if (mode < SHUT_RD || mode > SHUT_RDWR)
3011		return -EINVAL;
3012	/* This maps:
3013	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3014	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3015	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3016	 */
3017	++mode;
3018
3019	unix_state_lock(sk);
3020	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3021	other = unix_peer(sk);
3022	if (other)
3023		sock_hold(other);
3024	unix_state_unlock(sk);
3025	sk->sk_state_change(sk);
3026
3027	if (other &&
3028		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3029
3030		int peer_mode = 0;
3031		const struct proto *prot = READ_ONCE(other->sk_prot);
3032
3033		if (prot->unhash)
3034			prot->unhash(other);
3035		if (mode&RCV_SHUTDOWN)
3036			peer_mode |= SEND_SHUTDOWN;
3037		if (mode&SEND_SHUTDOWN)
3038			peer_mode |= RCV_SHUTDOWN;
3039		unix_state_lock(other);
3040		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3041		unix_state_unlock(other);
3042		other->sk_state_change(other);
3043		if (peer_mode == SHUTDOWN_MASK)
3044			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3045		else if (peer_mode & RCV_SHUTDOWN)
3046			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3047	}
3048	if (other)
3049		sock_put(other);
3050
3051	return 0;
3052}
3053
3054long unix_inq_len(struct sock *sk)
3055{
3056	struct sk_buff *skb;
3057	long amount = 0;
3058
3059	if (sk->sk_state == TCP_LISTEN)
3060		return -EINVAL;
3061
3062	spin_lock(&sk->sk_receive_queue.lock);
3063	if (sk->sk_type == SOCK_STREAM ||
3064	    sk->sk_type == SOCK_SEQPACKET) {
3065		skb_queue_walk(&sk->sk_receive_queue, skb)
3066			amount += unix_skb_len(skb);
3067	} else {
3068		skb = skb_peek(&sk->sk_receive_queue);
3069		if (skb)
3070			amount = skb->len;
3071	}
3072	spin_unlock(&sk->sk_receive_queue.lock);
3073
3074	return amount;
3075}
3076EXPORT_SYMBOL_GPL(unix_inq_len);
3077
3078long unix_outq_len(struct sock *sk)
3079{
3080	return sk_wmem_alloc_get(sk);
3081}
3082EXPORT_SYMBOL_GPL(unix_outq_len);
3083
3084static int unix_open_file(struct sock *sk)
3085{
3086	struct path path;
3087	struct file *f;
3088	int fd;
3089
3090	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3091		return -EPERM;
3092
3093	if (!smp_load_acquire(&unix_sk(sk)->addr))
3094		return -ENOENT;
3095
3096	path = unix_sk(sk)->path;
3097	if (!path.dentry)
3098		return -ENOENT;
3099
3100	path_get(&path);
3101
3102	fd = get_unused_fd_flags(O_CLOEXEC);
3103	if (fd < 0)
3104		goto out;
3105
3106	f = dentry_open(&path, O_PATH, current_cred());
3107	if (IS_ERR(f)) {
3108		put_unused_fd(fd);
3109		fd = PTR_ERR(f);
3110		goto out;
3111	}
3112
3113	fd_install(fd, f);
3114out:
3115	path_put(&path);
3116
3117	return fd;
3118}
3119
3120static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3121{
3122	struct sock *sk = sock->sk;
3123	long amount = 0;
3124	int err;
3125
3126	switch (cmd) {
3127	case SIOCOUTQ:
3128		amount = unix_outq_len(sk);
3129		err = put_user(amount, (int __user *)arg);
3130		break;
3131	case SIOCINQ:
3132		amount = unix_inq_len(sk);
3133		if (amount < 0)
3134			err = amount;
3135		else
3136			err = put_user(amount, (int __user *)arg);
3137		break;
3138	case SIOCUNIXFILE:
3139		err = unix_open_file(sk);
3140		break;
3141#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3142	case SIOCATMARK:
3143		{
3144			struct sk_buff *skb;
3145			int answ = 0;
3146
3147			skb = skb_peek(&sk->sk_receive_queue);
3148			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3149				answ = 1;
3150			err = put_user(answ, (int __user *)arg);
3151		}
3152		break;
3153#endif
3154	default:
3155		err = -ENOIOCTLCMD;
3156		break;
3157	}
3158	return err;
3159}
3160
3161#ifdef CONFIG_COMPAT
3162static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3163{
3164	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3165}
3166#endif
3167
3168static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3169{
3170	struct sock *sk = sock->sk;
3171	__poll_t mask;
3172	u8 shutdown;
3173
3174	sock_poll_wait(file, sock, wait);
3175	mask = 0;
3176	shutdown = READ_ONCE(sk->sk_shutdown);
3177
3178	/* exceptional events? */
3179	if (READ_ONCE(sk->sk_err))
3180		mask |= EPOLLERR;
3181	if (shutdown == SHUTDOWN_MASK)
3182		mask |= EPOLLHUP;
3183	if (shutdown & RCV_SHUTDOWN)
3184		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3185
3186	/* readable? */
3187	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3188		mask |= EPOLLIN | EPOLLRDNORM;
3189	if (sk_is_readable(sk))
3190		mask |= EPOLLIN | EPOLLRDNORM;
3191#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3192	if (READ_ONCE(unix_sk(sk)->oob_skb))
3193		mask |= EPOLLPRI;
3194#endif
3195
3196	/* Connection-based need to check for termination and startup */
3197	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3198	    sk->sk_state == TCP_CLOSE)
3199		mask |= EPOLLHUP;
3200
3201	/*
3202	 * we set writable also when the other side has shut down the
3203	 * connection. This prevents stuck sockets.
3204	 */
3205	if (unix_writable(sk))
3206		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3207
3208	return mask;
3209}
3210
3211static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3212				    poll_table *wait)
3213{
3214	struct sock *sk = sock->sk, *other;
3215	unsigned int writable;
3216	__poll_t mask;
3217	u8 shutdown;
3218
3219	sock_poll_wait(file, sock, wait);
3220	mask = 0;
3221	shutdown = READ_ONCE(sk->sk_shutdown);
3222
3223	/* exceptional events? */
3224	if (READ_ONCE(sk->sk_err) ||
3225	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3226		mask |= EPOLLERR |
3227			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3228
3229	if (shutdown & RCV_SHUTDOWN)
3230		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3231	if (shutdown == SHUTDOWN_MASK)
3232		mask |= EPOLLHUP;
3233
3234	/* readable? */
3235	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3236		mask |= EPOLLIN | EPOLLRDNORM;
3237	if (sk_is_readable(sk))
3238		mask |= EPOLLIN | EPOLLRDNORM;
3239
3240	/* Connection-based need to check for termination and startup */
3241	if (sk->sk_type == SOCK_SEQPACKET) {
3242		if (sk->sk_state == TCP_CLOSE)
3243			mask |= EPOLLHUP;
3244		/* connection hasn't started yet? */
3245		if (sk->sk_state == TCP_SYN_SENT)
3246			return mask;
3247	}
3248
3249	/* No write status requested, avoid expensive OUT tests. */
3250	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3251		return mask;
3252
3253	writable = unix_writable(sk);
3254	if (writable) {
3255		unix_state_lock(sk);
3256
3257		other = unix_peer(sk);
3258		if (other && unix_peer(other) != sk &&
3259		    unix_recvq_full_lockless(other) &&
3260		    unix_dgram_peer_wake_me(sk, other))
3261			writable = 0;
3262
3263		unix_state_unlock(sk);
3264	}
3265
3266	if (writable)
3267		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3268	else
3269		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3270
3271	return mask;
3272}
3273
3274#ifdef CONFIG_PROC_FS
3275
3276#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3277
3278#define get_bucket(x) ((x) >> BUCKET_SPACE)
3279#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3280#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3281
3282static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3283{
3284	unsigned long offset = get_offset(*pos);
3285	unsigned long bucket = get_bucket(*pos);
3286	unsigned long count = 0;
3287	struct sock *sk;
3288
3289	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3290	     sk; sk = sk_next(sk)) {
3291		if (++count == offset)
3292			break;
3293	}
3294
3295	return sk;
3296}
3297
3298static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3299{
3300	unsigned long bucket = get_bucket(*pos);
3301	struct net *net = seq_file_net(seq);
3302	struct sock *sk;
3303
3304	while (bucket < UNIX_HASH_SIZE) {
3305		spin_lock(&net->unx.table.locks[bucket]);
3306
3307		sk = unix_from_bucket(seq, pos);
3308		if (sk)
3309			return sk;
3310
3311		spin_unlock(&net->unx.table.locks[bucket]);
3312
3313		*pos = set_bucket_offset(++bucket, 1);
3314	}
3315
3316	return NULL;
3317}
3318
3319static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3320				  loff_t *pos)
3321{
3322	unsigned long bucket = get_bucket(*pos);
3323
3324	sk = sk_next(sk);
3325	if (sk)
3326		return sk;
3327
3328
3329	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3330
3331	*pos = set_bucket_offset(++bucket, 1);
3332
3333	return unix_get_first(seq, pos);
3334}
3335
3336static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3337{
3338	if (!*pos)
3339		return SEQ_START_TOKEN;
3340
3341	return unix_get_first(seq, pos);
3342}
3343
3344static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3345{
3346	++*pos;
3347
3348	if (v == SEQ_START_TOKEN)
3349		return unix_get_first(seq, pos);
3350
3351	return unix_get_next(seq, v, pos);
3352}
3353
3354static void unix_seq_stop(struct seq_file *seq, void *v)
3355{
3356	struct sock *sk = v;
3357
3358	if (sk)
3359		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3360}
3361
3362static int unix_seq_show(struct seq_file *seq, void *v)
3363{
3364
3365	if (v == SEQ_START_TOKEN)
3366		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3367			 "Inode Path\n");
3368	else {
3369		struct sock *s = v;
3370		struct unix_sock *u = unix_sk(s);
3371		unix_state_lock(s);
3372
3373		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3374			s,
3375			refcount_read(&s->sk_refcnt),
3376			0,
3377			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3378			s->sk_type,
3379			s->sk_socket ?
3380			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3381			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3382			sock_i_ino(s));
3383
3384		if (u->addr) {	// under a hash table lock here
3385			int i, len;
3386			seq_putc(seq, ' ');
3387
3388			i = 0;
3389			len = u->addr->len -
3390				offsetof(struct sockaddr_un, sun_path);
3391			if (u->addr->name->sun_path[0]) {
3392				len--;
3393			} else {
3394				seq_putc(seq, '@');
3395				i++;
3396			}
3397			for ( ; i < len; i++)
3398				seq_putc(seq, u->addr->name->sun_path[i] ?:
3399					 '@');
3400		}
3401		unix_state_unlock(s);
3402		seq_putc(seq, '\n');
3403	}
3404
3405	return 0;
3406}
3407
3408static const struct seq_operations unix_seq_ops = {
3409	.start  = unix_seq_start,
3410	.next   = unix_seq_next,
3411	.stop   = unix_seq_stop,
3412	.show   = unix_seq_show,
3413};
3414
3415#ifdef CONFIG_BPF_SYSCALL
3416struct bpf_unix_iter_state {
3417	struct seq_net_private p;
3418	unsigned int cur_sk;
3419	unsigned int end_sk;
3420	unsigned int max_sk;
3421	struct sock **batch;
3422	bool st_bucket_done;
3423};
3424
3425struct bpf_iter__unix {
3426	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3427	__bpf_md_ptr(struct unix_sock *, unix_sk);
3428	uid_t uid __aligned(8);
3429};
3430
3431static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3432			      struct unix_sock *unix_sk, uid_t uid)
3433{
3434	struct bpf_iter__unix ctx;
3435
3436	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3437	ctx.meta = meta;
3438	ctx.unix_sk = unix_sk;
3439	ctx.uid = uid;
3440	return bpf_iter_run_prog(prog, &ctx);
3441}
3442
3443static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3444
3445{
3446	struct bpf_unix_iter_state *iter = seq->private;
3447	unsigned int expected = 1;
3448	struct sock *sk;
3449
3450	sock_hold(start_sk);
3451	iter->batch[iter->end_sk++] = start_sk;
3452
3453	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3454		if (iter->end_sk < iter->max_sk) {
3455			sock_hold(sk);
3456			iter->batch[iter->end_sk++] = sk;
3457		}
3458
3459		expected++;
3460	}
3461
3462	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3463
3464	return expected;
3465}
3466
3467static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3468{
3469	while (iter->cur_sk < iter->end_sk)
3470		sock_put(iter->batch[iter->cur_sk++]);
3471}
3472
3473static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3474				       unsigned int new_batch_sz)
3475{
3476	struct sock **new_batch;
3477
3478	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3479			     GFP_USER | __GFP_NOWARN);
3480	if (!new_batch)
3481		return -ENOMEM;
3482
3483	bpf_iter_unix_put_batch(iter);
3484	kvfree(iter->batch);
3485	iter->batch = new_batch;
3486	iter->max_sk = new_batch_sz;
3487
3488	return 0;
3489}
3490
3491static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3492					loff_t *pos)
3493{
3494	struct bpf_unix_iter_state *iter = seq->private;
3495	unsigned int expected;
3496	bool resized = false;
3497	struct sock *sk;
3498
3499	if (iter->st_bucket_done)
3500		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3501
3502again:
3503	/* Get a new batch */
3504	iter->cur_sk = 0;
3505	iter->end_sk = 0;
3506
3507	sk = unix_get_first(seq, pos);
3508	if (!sk)
3509		return NULL; /* Done */
3510
3511	expected = bpf_iter_unix_hold_batch(seq, sk);
3512
3513	if (iter->end_sk == expected) {
3514		iter->st_bucket_done = true;
3515		return sk;
3516	}
3517
3518	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3519		resized = true;
3520		goto again;
3521	}
3522
3523	return sk;
3524}
3525
3526static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3527{
3528	if (!*pos)
3529		return SEQ_START_TOKEN;
3530
3531	/* bpf iter does not support lseek, so it always
3532	 * continue from where it was stop()-ped.
3533	 */
3534	return bpf_iter_unix_batch(seq, pos);
3535}
3536
3537static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3538{
3539	struct bpf_unix_iter_state *iter = seq->private;
3540	struct sock *sk;
3541
3542	/* Whenever seq_next() is called, the iter->cur_sk is
3543	 * done with seq_show(), so advance to the next sk in
3544	 * the batch.
3545	 */
3546	if (iter->cur_sk < iter->end_sk)
3547		sock_put(iter->batch[iter->cur_sk++]);
3548
3549	++*pos;
3550
3551	if (iter->cur_sk < iter->end_sk)
3552		sk = iter->batch[iter->cur_sk];
3553	else
3554		sk = bpf_iter_unix_batch(seq, pos);
3555
3556	return sk;
3557}
3558
3559static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3560{
3561	struct bpf_iter_meta meta;
3562	struct bpf_prog *prog;
3563	struct sock *sk = v;
3564	uid_t uid;
3565	bool slow;
3566	int ret;
3567
3568	if (v == SEQ_START_TOKEN)
3569		return 0;
3570
3571	slow = lock_sock_fast(sk);
3572
3573	if (unlikely(sk_unhashed(sk))) {
3574		ret = SEQ_SKIP;
3575		goto unlock;
3576	}
3577
3578	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3579	meta.seq = seq;
3580	prog = bpf_iter_get_info(&meta, false);
3581	ret = unix_prog_seq_show(prog, &meta, v, uid);
3582unlock:
3583	unlock_sock_fast(sk, slow);
3584	return ret;
3585}
3586
3587static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3588{
3589	struct bpf_unix_iter_state *iter = seq->private;
3590	struct bpf_iter_meta meta;
3591	struct bpf_prog *prog;
3592
3593	if (!v) {
3594		meta.seq = seq;
3595		prog = bpf_iter_get_info(&meta, true);
3596		if (prog)
3597			(void)unix_prog_seq_show(prog, &meta, v, 0);
3598	}
3599
3600	if (iter->cur_sk < iter->end_sk)
3601		bpf_iter_unix_put_batch(iter);
3602}
3603
3604static const struct seq_operations bpf_iter_unix_seq_ops = {
3605	.start	= bpf_iter_unix_seq_start,
3606	.next	= bpf_iter_unix_seq_next,
3607	.stop	= bpf_iter_unix_seq_stop,
3608	.show	= bpf_iter_unix_seq_show,
3609};
3610#endif
3611#endif
3612
3613static const struct net_proto_family unix_family_ops = {
3614	.family = PF_UNIX,
3615	.create = unix_create,
3616	.owner	= THIS_MODULE,
3617};
3618
3619
3620static int __net_init unix_net_init(struct net *net)
3621{
3622	int i;
3623
3624	net->unx.sysctl_max_dgram_qlen = 10;
3625	if (unix_sysctl_register(net))
3626		goto out;
3627
3628#ifdef CONFIG_PROC_FS
3629	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3630			     sizeof(struct seq_net_private)))
3631		goto err_sysctl;
3632#endif
3633
3634	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3635					      sizeof(spinlock_t), GFP_KERNEL);
3636	if (!net->unx.table.locks)
3637		goto err_proc;
3638
3639	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3640						sizeof(struct hlist_head),
3641						GFP_KERNEL);
3642	if (!net->unx.table.buckets)
3643		goto free_locks;
3644
3645	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3646		spin_lock_init(&net->unx.table.locks[i]);
3647		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3648	}
3649
3650	return 0;
3651
3652free_locks:
3653	kvfree(net->unx.table.locks);
3654err_proc:
3655#ifdef CONFIG_PROC_FS
3656	remove_proc_entry("unix", net->proc_net);
3657err_sysctl:
3658#endif
3659	unix_sysctl_unregister(net);
3660out:
3661	return -ENOMEM;
3662}
3663
3664static void __net_exit unix_net_exit(struct net *net)
3665{
3666	kvfree(net->unx.table.buckets);
3667	kvfree(net->unx.table.locks);
3668	unix_sysctl_unregister(net);
3669	remove_proc_entry("unix", net->proc_net);
3670}
3671
3672static struct pernet_operations unix_net_ops = {
3673	.init = unix_net_init,
3674	.exit = unix_net_exit,
3675};
3676
3677#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3678DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3679		     struct unix_sock *unix_sk, uid_t uid)
3680
3681#define INIT_BATCH_SZ 16
3682
3683static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3684{
3685	struct bpf_unix_iter_state *iter = priv_data;
3686	int err;
3687
3688	err = bpf_iter_init_seq_net(priv_data, aux);
3689	if (err)
3690		return err;
3691
3692	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3693	if (err) {
3694		bpf_iter_fini_seq_net(priv_data);
3695		return err;
3696	}
3697
3698	return 0;
3699}
3700
3701static void bpf_iter_fini_unix(void *priv_data)
3702{
3703	struct bpf_unix_iter_state *iter = priv_data;
3704
3705	bpf_iter_fini_seq_net(priv_data);
3706	kvfree(iter->batch);
3707}
3708
3709static const struct bpf_iter_seq_info unix_seq_info = {
3710	.seq_ops		= &bpf_iter_unix_seq_ops,
3711	.init_seq_private	= bpf_iter_init_unix,
3712	.fini_seq_private	= bpf_iter_fini_unix,
3713	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3714};
3715
3716static const struct bpf_func_proto *
3717bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3718			     const struct bpf_prog *prog)
3719{
3720	switch (func_id) {
3721	case BPF_FUNC_setsockopt:
3722		return &bpf_sk_setsockopt_proto;
3723	case BPF_FUNC_getsockopt:
3724		return &bpf_sk_getsockopt_proto;
3725	default:
3726		return NULL;
3727	}
3728}
3729
3730static struct bpf_iter_reg unix_reg_info = {
3731	.target			= "unix",
3732	.ctx_arg_info_size	= 1,
3733	.ctx_arg_info		= {
3734		{ offsetof(struct bpf_iter__unix, unix_sk),
3735		  PTR_TO_BTF_ID_OR_NULL },
3736	},
3737	.get_func_proto         = bpf_iter_unix_get_func_proto,
3738	.seq_info		= &unix_seq_info,
3739};
3740
3741static void __init bpf_iter_register(void)
3742{
3743	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3744	if (bpf_iter_reg_target(&unix_reg_info))
3745		pr_warn("Warning: could not register bpf iterator unix\n");
3746}
3747#endif
3748
3749static int __init af_unix_init(void)
3750{
3751	int i, rc = -1;
3752
3753	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3754
3755	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3756		spin_lock_init(&bsd_socket_locks[i]);
3757		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3758	}
3759
3760	rc = proto_register(&unix_dgram_proto, 1);
3761	if (rc != 0) {
3762		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3763		goto out;
3764	}
3765
3766	rc = proto_register(&unix_stream_proto, 1);
3767	if (rc != 0) {
3768		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3769		proto_unregister(&unix_dgram_proto);
3770		goto out;
3771	}
3772
3773	sock_register(&unix_family_ops);
3774	register_pernet_subsys(&unix_net_ops);
3775	unix_bpf_build_proto();
3776
3777#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3778	bpf_iter_register();
3779#endif
3780
3781out:
3782	return rc;
3783}
3784
3785/* Later than subsys_initcall() because we depend on stuff initialised there */
3786fs_initcall(af_unix_init);