Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/filter.h>
  93#include <linux/termios.h>
  94#include <linux/sockios.h>
  95#include <linux/net.h>
  96#include <linux/in.h>
  97#include <linux/fs.h>
  98#include <linux/slab.h>
  99#include <linux/uaccess.h>
 100#include <linux/skbuff.h>
 101#include <linux/netdevice.h>
 102#include <net/net_namespace.h>
 103#include <net/sock.h>
 104#include <net/tcp_states.h>
 105#include <net/af_unix.h>
 106#include <linux/proc_fs.h>
 107#include <linux/seq_file.h>
 108#include <net/scm.h>
 109#include <linux/init.h>
 110#include <linux/poll.h>
 111#include <linux/rtnetlink.h>
 112#include <linux/mount.h>
 113#include <net/checksum.h>
 114#include <linux/security.h>
 115#include <linux/splice.h>
 116#include <linux/freezer.h>
 117#include <linux/file.h>
 118#include <linux/btf_ids.h>
 119#include <linux/bpf-cgroup.h>
 120
 121#include "scm.h"
 122
 123static atomic_long_t unix_nr_socks;
 124static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
 125static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
 126
 127/* SMP locking strategy:
 128 *    hash table is protected with spinlock.
 129 *    each socket state is protected by separate spinlock.
 130 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131
 132static unsigned int unix_unbound_hash(struct sock *sk)
 133{
 134	unsigned long hash = (unsigned long)sk;
 135
 136	hash ^= hash >> 16;
 137	hash ^= hash >> 8;
 138	hash ^= sk->sk_type;
 139
 140	return hash & UNIX_HASH_MOD;
 141}
 142
 143static unsigned int unix_bsd_hash(struct inode *i)
 144{
 145	return i->i_ino & UNIX_HASH_MOD;
 146}
 147
 148static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
 149				       int addr_len, int type)
 150{
 151	__wsum csum = csum_partial(sunaddr, addr_len, 0);
 152	unsigned int hash;
 153
 154	hash = (__force unsigned int)csum_fold(csum);
 155	hash ^= hash >> 8;
 156	hash ^= type;
 157
 158	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
 159}
 160
 161static void unix_table_double_lock(struct net *net,
 162				   unsigned int hash1, unsigned int hash2)
 163{
 164	if (hash1 == hash2) {
 165		spin_lock(&net->unx.table.locks[hash1]);
 166		return;
 167	}
 168
 169	if (hash1 > hash2)
 170		swap(hash1, hash2);
 171
 172	spin_lock(&net->unx.table.locks[hash1]);
 173	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
 174}
 175
 176static void unix_table_double_unlock(struct net *net,
 177				     unsigned int hash1, unsigned int hash2)
 178{
 179	if (hash1 == hash2) {
 180		spin_unlock(&net->unx.table.locks[hash1]);
 181		return;
 182	}
 183
 184	spin_unlock(&net->unx.table.locks[hash1]);
 185	spin_unlock(&net->unx.table.locks[hash2]);
 186}
 187
 188#ifdef CONFIG_SECURITY_NETWORK
 189static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 190{
 191	UNIXCB(skb).secid = scm->secid;
 192}
 193
 194static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 195{
 196	scm->secid = UNIXCB(skb).secid;
 197}
 198
 199static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 200{
 201	return (scm->secid == UNIXCB(skb).secid);
 202}
 203#else
 204static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 205{ }
 206
 207static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 208{ }
 209
 210static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 211{
 212	return true;
 213}
 214#endif /* CONFIG_SECURITY_NETWORK */
 215
 216static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 217{
 218	return unix_peer(osk) == sk;
 219}
 220
 221static inline int unix_may_send(struct sock *sk, struct sock *osk)
 222{
 223	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 224}
 225
 226static inline int unix_recvq_full(const struct sock *sk)
 227{
 228	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 229}
 230
 231static inline int unix_recvq_full_lockless(const struct sock *sk)
 232{
 233	return skb_queue_len_lockless(&sk->sk_receive_queue) >
 234		READ_ONCE(sk->sk_max_ack_backlog);
 235}
 236
 237struct sock *unix_peer_get(struct sock *s)
 238{
 239	struct sock *peer;
 240
 241	unix_state_lock(s);
 242	peer = unix_peer(s);
 243	if (peer)
 244		sock_hold(peer);
 245	unix_state_unlock(s);
 246	return peer;
 247}
 248EXPORT_SYMBOL_GPL(unix_peer_get);
 249
 250static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
 251					     int addr_len)
 252{
 253	struct unix_address *addr;
 254
 255	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
 256	if (!addr)
 257		return NULL;
 258
 259	refcount_set(&addr->refcnt, 1);
 260	addr->len = addr_len;
 261	memcpy(addr->name, sunaddr, addr_len);
 262
 263	return addr;
 264}
 265
 266static inline void unix_release_addr(struct unix_address *addr)
 267{
 268	if (refcount_dec_and_test(&addr->refcnt))
 269		kfree(addr);
 270}
 271
 272/*
 273 *	Check unix socket name:
 274 *		- should be not zero length.
 275 *	        - if started by not zero, should be NULL terminated (FS object)
 276 *		- if started by zero, it is abstract name.
 277 */
 278
 279static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
 280{
 281	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
 282	    addr_len > sizeof(*sunaddr))
 283		return -EINVAL;
 284
 285	if (sunaddr->sun_family != AF_UNIX)
 286		return -EINVAL;
 287
 288	return 0;
 289}
 290
 291static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
 292{
 293	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
 294	short offset = offsetof(struct sockaddr_storage, __data);
 295
 296	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
 297
 298	/* This may look like an off by one error but it is a bit more
 299	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
 300	 * sun_path[108] doesn't as such exist.  However in kernel space
 301	 * we are guaranteed that it is a valid memory location in our
 302	 * kernel address buffer because syscall functions always pass
 303	 * a pointer of struct sockaddr_storage which has a bigger buffer
 304	 * than 108.  Also, we must terminate sun_path for strlen() in
 305	 * getname_kernel().
 306	 */
 307	addr->__data[addr_len - offset] = 0;
 308
 309	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
 310	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
 311	 * know the actual buffer.
 312	 */
 313	return strlen(addr->__data) + offset + 1;
 314}
 315
 316static void __unix_remove_socket(struct sock *sk)
 317{
 318	sk_del_node_init(sk);
 319}
 320
 321static void __unix_insert_socket(struct net *net, struct sock *sk)
 322{
 323	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 324	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
 325}
 326
 327static void __unix_set_addr_hash(struct net *net, struct sock *sk,
 328				 struct unix_address *addr, unsigned int hash)
 329{
 330	__unix_remove_socket(sk);
 331	smp_store_release(&unix_sk(sk)->addr, addr);
 332
 333	sk->sk_hash = hash;
 334	__unix_insert_socket(net, sk);
 335}
 336
 337static void unix_remove_socket(struct net *net, struct sock *sk)
 338{
 339	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 340	__unix_remove_socket(sk);
 341	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 342}
 343
 344static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
 345{
 346	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 347	__unix_insert_socket(net, sk);
 348	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 349}
 350
 351static void unix_insert_bsd_socket(struct sock *sk)
 352{
 353	spin_lock(&bsd_socket_locks[sk->sk_hash]);
 354	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
 355	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 356}
 357
 358static void unix_remove_bsd_socket(struct sock *sk)
 359{
 360	if (!hlist_unhashed(&sk->sk_bind_node)) {
 361		spin_lock(&bsd_socket_locks[sk->sk_hash]);
 362		__sk_del_bind_node(sk);
 363		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 364
 365		sk_node_init(&sk->sk_bind_node);
 366	}
 367}
 368
 369static struct sock *__unix_find_socket_byname(struct net *net,
 370					      struct sockaddr_un *sunname,
 371					      int len, unsigned int hash)
 372{
 373	struct sock *s;
 374
 375	sk_for_each(s, &net->unx.table.buckets[hash]) {
 376		struct unix_sock *u = unix_sk(s);
 377
 378		if (u->addr->len == len &&
 379		    !memcmp(u->addr->name, sunname, len))
 380			return s;
 381	}
 382	return NULL;
 383}
 384
 385static inline struct sock *unix_find_socket_byname(struct net *net,
 386						   struct sockaddr_un *sunname,
 387						   int len, unsigned int hash)
 388{
 389	struct sock *s;
 390
 391	spin_lock(&net->unx.table.locks[hash]);
 392	s = __unix_find_socket_byname(net, sunname, len, hash);
 393	if (s)
 394		sock_hold(s);
 395	spin_unlock(&net->unx.table.locks[hash]);
 396	return s;
 397}
 398
 399static struct sock *unix_find_socket_byinode(struct inode *i)
 400{
 401	unsigned int hash = unix_bsd_hash(i);
 402	struct sock *s;
 403
 404	spin_lock(&bsd_socket_locks[hash]);
 405	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
 406		struct dentry *dentry = unix_sk(s)->path.dentry;
 407
 408		if (dentry && d_backing_inode(dentry) == i) {
 409			sock_hold(s);
 410			spin_unlock(&bsd_socket_locks[hash]);
 411			return s;
 412		}
 413	}
 414	spin_unlock(&bsd_socket_locks[hash]);
 415	return NULL;
 416}
 417
 418/* Support code for asymmetrically connected dgram sockets
 419 *
 420 * If a datagram socket is connected to a socket not itself connected
 421 * to the first socket (eg, /dev/log), clients may only enqueue more
 422 * messages if the present receive queue of the server socket is not
 423 * "too large". This means there's a second writeability condition
 424 * poll and sendmsg need to test. The dgram recv code will do a wake
 425 * up on the peer_wait wait queue of a socket upon reception of a
 426 * datagram which needs to be propagated to sleeping would-be writers
 427 * since these might not have sent anything so far. This can't be
 428 * accomplished via poll_wait because the lifetime of the server
 429 * socket might be less than that of its clients if these break their
 430 * association with it or if the server socket is closed while clients
 431 * are still connected to it and there's no way to inform "a polling
 432 * implementation" that it should let go of a certain wait queue
 433 *
 434 * In order to propagate a wake up, a wait_queue_entry_t of the client
 435 * socket is enqueued on the peer_wait queue of the server socket
 436 * whose wake function does a wake_up on the ordinary client socket
 437 * wait queue. This connection is established whenever a write (or
 438 * poll for write) hit the flow control condition and broken when the
 439 * association to the server socket is dissolved or after a wake up
 440 * was relayed.
 441 */
 442
 443static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 444				      void *key)
 445{
 446	struct unix_sock *u;
 447	wait_queue_head_t *u_sleep;
 448
 449	u = container_of(q, struct unix_sock, peer_wake);
 450
 451	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 452			    q);
 453	u->peer_wake.private = NULL;
 454
 455	/* relaying can only happen while the wq still exists */
 456	u_sleep = sk_sleep(&u->sk);
 457	if (u_sleep)
 458		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 459
 460	return 0;
 461}
 462
 463static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 464{
 465	struct unix_sock *u, *u_other;
 466	int rc;
 467
 468	u = unix_sk(sk);
 469	u_other = unix_sk(other);
 470	rc = 0;
 471	spin_lock(&u_other->peer_wait.lock);
 472
 473	if (!u->peer_wake.private) {
 474		u->peer_wake.private = other;
 475		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 476
 477		rc = 1;
 478	}
 479
 480	spin_unlock(&u_other->peer_wait.lock);
 481	return rc;
 482}
 483
 484static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 485					    struct sock *other)
 486{
 487	struct unix_sock *u, *u_other;
 488
 489	u = unix_sk(sk);
 490	u_other = unix_sk(other);
 491	spin_lock(&u_other->peer_wait.lock);
 492
 493	if (u->peer_wake.private == other) {
 494		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 495		u->peer_wake.private = NULL;
 496	}
 497
 498	spin_unlock(&u_other->peer_wait.lock);
 499}
 500
 501static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 502						   struct sock *other)
 503{
 504	unix_dgram_peer_wake_disconnect(sk, other);
 505	wake_up_interruptible_poll(sk_sleep(sk),
 506				   EPOLLOUT |
 507				   EPOLLWRNORM |
 508				   EPOLLWRBAND);
 509}
 510
 511/* preconditions:
 512 *	- unix_peer(sk) == other
 513 *	- association is stable
 514 */
 515static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 516{
 517	int connected;
 518
 519	connected = unix_dgram_peer_wake_connect(sk, other);
 520
 521	/* If other is SOCK_DEAD, we want to make sure we signal
 522	 * POLLOUT, such that a subsequent write() can get a
 523	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 524	 * to other and its full, we will hang waiting for POLLOUT.
 525	 */
 526	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
 527		return 1;
 528
 529	if (connected)
 530		unix_dgram_peer_wake_disconnect(sk, other);
 531
 532	return 0;
 533}
 534
 535static int unix_writable(const struct sock *sk)
 536{
 537	return sk->sk_state != TCP_LISTEN &&
 538	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 539}
 540
 541static void unix_write_space(struct sock *sk)
 542{
 543	struct socket_wq *wq;
 544
 545	rcu_read_lock();
 546	if (unix_writable(sk)) {
 547		wq = rcu_dereference(sk->sk_wq);
 548		if (skwq_has_sleeper(wq))
 549			wake_up_interruptible_sync_poll(&wq->wait,
 550				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 551		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 552	}
 553	rcu_read_unlock();
 554}
 555
 556/* When dgram socket disconnects (or changes its peer), we clear its receive
 557 * queue of packets arrived from previous peer. First, it allows to do
 558 * flow control based only on wmem_alloc; second, sk connected to peer
 559 * may receive messages only from that peer. */
 560static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 561{
 562	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 563		skb_queue_purge(&sk->sk_receive_queue);
 564		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 565
 566		/* If one link of bidirectional dgram pipe is disconnected,
 567		 * we signal error. Messages are lost. Do not make this,
 568		 * when peer was not connected to us.
 569		 */
 570		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 571			WRITE_ONCE(other->sk_err, ECONNRESET);
 572			sk_error_report(other);
 573		}
 574	}
 575	other->sk_state = TCP_CLOSE;
 576}
 577
 578static void unix_sock_destructor(struct sock *sk)
 579{
 580	struct unix_sock *u = unix_sk(sk);
 581
 582	skb_queue_purge(&sk->sk_receive_queue);
 583
 584	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
 585	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 586	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
 587	if (!sock_flag(sk, SOCK_DEAD)) {
 588		pr_info("Attempt to release alive unix socket: %p\n", sk);
 589		return;
 590	}
 591
 592	if (u->addr)
 593		unix_release_addr(u->addr);
 594
 595	atomic_long_dec(&unix_nr_socks);
 596	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 597#ifdef UNIX_REFCNT_DEBUG
 598	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 599		atomic_long_read(&unix_nr_socks));
 600#endif
 601}
 602
 603static void unix_release_sock(struct sock *sk, int embrion)
 604{
 605	struct unix_sock *u = unix_sk(sk);
 606	struct sock *skpair;
 607	struct sk_buff *skb;
 608	struct path path;
 609	int state;
 610
 611	unix_remove_socket(sock_net(sk), sk);
 612	unix_remove_bsd_socket(sk);
 613
 614	/* Clear state */
 615	unix_state_lock(sk);
 616	sock_orphan(sk);
 617	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 618	path	     = u->path;
 619	u->path.dentry = NULL;
 620	u->path.mnt = NULL;
 621	state = sk->sk_state;
 622	sk->sk_state = TCP_CLOSE;
 623
 624	skpair = unix_peer(sk);
 625	unix_peer(sk) = NULL;
 626
 627	unix_state_unlock(sk);
 628
 629#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 630	if (u->oob_skb) {
 631		kfree_skb(u->oob_skb);
 632		u->oob_skb = NULL;
 633	}
 634#endif
 635
 636	wake_up_interruptible_all(&u->peer_wait);
 637
 638	if (skpair != NULL) {
 639		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 640			unix_state_lock(skpair);
 641			/* No more writes */
 642			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
 643			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
 644				WRITE_ONCE(skpair->sk_err, ECONNRESET);
 645			unix_state_unlock(skpair);
 646			skpair->sk_state_change(skpair);
 647			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 648		}
 649
 650		unix_dgram_peer_wake_disconnect(sk, skpair);
 651		sock_put(skpair); /* It may now die */
 652	}
 653
 654	/* Try to flush out this socket. Throw out buffers at least */
 655
 656	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 657		if (state == TCP_LISTEN)
 658			unix_release_sock(skb->sk, 1);
 
 659		/* passed fds are erased in the kfree_skb hook	      */
 660		UNIXCB(skb).consumed = skb->len;
 661		kfree_skb(skb);
 662	}
 663
 664	if (path.dentry)
 665		path_put(&path);
 666
 667	sock_put(sk);
 668
 669	/* ---- Socket is dead now and most probably destroyed ---- */
 670
 671	/*
 672	 * Fixme: BSD difference: In BSD all sockets connected to us get
 673	 *	  ECONNRESET and we die on the spot. In Linux we behave
 674	 *	  like files and pipes do and wait for the last
 675	 *	  dereference.
 676	 *
 677	 * Can't we simply set sock->err?
 678	 *
 679	 *	  What the above comment does talk about? --ANK(980817)
 680	 */
 681
 682	if (READ_ONCE(unix_tot_inflight))
 683		unix_gc();		/* Garbage collect fds */
 684}
 685
 686static void init_peercred(struct sock *sk)
 687{
 
 
 
 
 
 
 688	const struct cred *old_cred;
 689	struct pid *old_pid;
 690
 691	spin_lock(&sk->sk_peer_lock);
 692	old_pid = sk->sk_peer_pid;
 693	old_cred = sk->sk_peer_cred;
 694	sk->sk_peer_pid  = get_pid(task_tgid(current));
 695	sk->sk_peer_cred = get_current_cred();
 696	spin_unlock(&sk->sk_peer_lock);
 697
 698	put_pid(old_pid);
 699	put_cred(old_cred);
 700}
 701
 702static void copy_peercred(struct sock *sk, struct sock *peersk)
 703{
 704	const struct cred *old_cred;
 705	struct pid *old_pid;
 706
 707	if (sk < peersk) {
 708		spin_lock(&sk->sk_peer_lock);
 709		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 710	} else {
 711		spin_lock(&peersk->sk_peer_lock);
 712		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 713	}
 714	old_pid = sk->sk_peer_pid;
 715	old_cred = sk->sk_peer_cred;
 716	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
 717	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 718
 719	spin_unlock(&sk->sk_peer_lock);
 720	spin_unlock(&peersk->sk_peer_lock);
 721
 722	put_pid(old_pid);
 723	put_cred(old_cred);
 724}
 725
 726static int unix_listen(struct socket *sock, int backlog)
 727{
 728	int err;
 729	struct sock *sk = sock->sk;
 730	struct unix_sock *u = unix_sk(sk);
 731
 732	err = -EOPNOTSUPP;
 733	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 734		goto out;	/* Only stream/seqpacket sockets accept */
 735	err = -EINVAL;
 736	if (!u->addr)
 737		goto out;	/* No listens on an unbound socket */
 738	unix_state_lock(sk);
 739	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 740		goto out_unlock;
 741	if (backlog > sk->sk_max_ack_backlog)
 742		wake_up_interruptible_all(&u->peer_wait);
 743	sk->sk_max_ack_backlog	= backlog;
 744	sk->sk_state		= TCP_LISTEN;
 
 745	/* set credentials so connect can copy them */
 746	init_peercred(sk);
 747	err = 0;
 748
 749out_unlock:
 750	unix_state_unlock(sk);
 751out:
 752	return err;
 753}
 754
 755static int unix_release(struct socket *);
 756static int unix_bind(struct socket *, struct sockaddr *, int);
 757static int unix_stream_connect(struct socket *, struct sockaddr *,
 758			       int addr_len, int flags);
 759static int unix_socketpair(struct socket *, struct socket *);
 760static int unix_accept(struct socket *, struct socket *, int, bool);
 761static int unix_getname(struct socket *, struct sockaddr *, int);
 762static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 763static __poll_t unix_dgram_poll(struct file *, struct socket *,
 764				    poll_table *);
 765static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 766#ifdef CONFIG_COMPAT
 767static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 768#endif
 769static int unix_shutdown(struct socket *, int);
 770static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 771static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 772static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 773				       struct pipe_inode_info *, size_t size,
 774				       unsigned int flags);
 775static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 776static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 777static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 778static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 779static int unix_dgram_connect(struct socket *, struct sockaddr *,
 780			      int, int);
 781static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 782static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 783				  int);
 784
 785#ifdef CONFIG_PROC_FS
 786static int unix_count_nr_fds(struct sock *sk)
 787{
 788	struct sk_buff *skb;
 789	struct unix_sock *u;
 790	int nr_fds = 0;
 791
 792	spin_lock(&sk->sk_receive_queue.lock);
 793	skb = skb_peek(&sk->sk_receive_queue);
 794	while (skb) {
 795		u = unix_sk(skb->sk);
 796		nr_fds += atomic_read(&u->scm_stat.nr_fds);
 797		skb = skb_peek_next(skb, &sk->sk_receive_queue);
 798	}
 799	spin_unlock(&sk->sk_receive_queue.lock);
 800
 801	return nr_fds;
 802}
 803
 804static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 805{
 806	struct sock *sk = sock->sk;
 807	unsigned char s_state;
 808	struct unix_sock *u;
 809	int nr_fds = 0;
 810
 811	if (sk) {
 812		s_state = READ_ONCE(sk->sk_state);
 813		u = unix_sk(sk);
 814
 815		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
 816		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
 817		 * SOCK_DGRAM is ordinary. So, no lock is needed.
 818		 */
 819		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
 820			nr_fds = atomic_read(&u->scm_stat.nr_fds);
 821		else if (s_state == TCP_LISTEN)
 822			nr_fds = unix_count_nr_fds(sk);
 823
 824		seq_printf(m, "scm_fds: %u\n", nr_fds);
 825	}
 826}
 827#else
 828#define unix_show_fdinfo NULL
 829#endif
 830
 831static const struct proto_ops unix_stream_ops = {
 832	.family =	PF_UNIX,
 833	.owner =	THIS_MODULE,
 834	.release =	unix_release,
 835	.bind =		unix_bind,
 836	.connect =	unix_stream_connect,
 837	.socketpair =	unix_socketpair,
 838	.accept =	unix_accept,
 839	.getname =	unix_getname,
 840	.poll =		unix_poll,
 841	.ioctl =	unix_ioctl,
 842#ifdef CONFIG_COMPAT
 843	.compat_ioctl =	unix_compat_ioctl,
 844#endif
 845	.listen =	unix_listen,
 846	.shutdown =	unix_shutdown,
 847	.sendmsg =	unix_stream_sendmsg,
 848	.recvmsg =	unix_stream_recvmsg,
 849	.read_skb =	unix_stream_read_skb,
 850	.mmap =		sock_no_mmap,
 851	.splice_read =	unix_stream_splice_read,
 852	.set_peek_off =	sk_set_peek_off,
 853	.show_fdinfo =	unix_show_fdinfo,
 854};
 855
 856static const struct proto_ops unix_dgram_ops = {
 857	.family =	PF_UNIX,
 858	.owner =	THIS_MODULE,
 859	.release =	unix_release,
 860	.bind =		unix_bind,
 861	.connect =	unix_dgram_connect,
 862	.socketpair =	unix_socketpair,
 863	.accept =	sock_no_accept,
 864	.getname =	unix_getname,
 865	.poll =		unix_dgram_poll,
 866	.ioctl =	unix_ioctl,
 867#ifdef CONFIG_COMPAT
 868	.compat_ioctl =	unix_compat_ioctl,
 869#endif
 870	.listen =	sock_no_listen,
 871	.shutdown =	unix_shutdown,
 872	.sendmsg =	unix_dgram_sendmsg,
 873	.read_skb =	unix_read_skb,
 874	.recvmsg =	unix_dgram_recvmsg,
 875	.mmap =		sock_no_mmap,
 876	.set_peek_off =	sk_set_peek_off,
 877	.show_fdinfo =	unix_show_fdinfo,
 878};
 879
 880static const struct proto_ops unix_seqpacket_ops = {
 881	.family =	PF_UNIX,
 882	.owner =	THIS_MODULE,
 883	.release =	unix_release,
 884	.bind =		unix_bind,
 885	.connect =	unix_stream_connect,
 886	.socketpair =	unix_socketpair,
 887	.accept =	unix_accept,
 888	.getname =	unix_getname,
 889	.poll =		unix_dgram_poll,
 890	.ioctl =	unix_ioctl,
 891#ifdef CONFIG_COMPAT
 892	.compat_ioctl =	unix_compat_ioctl,
 893#endif
 894	.listen =	unix_listen,
 895	.shutdown =	unix_shutdown,
 896	.sendmsg =	unix_seqpacket_sendmsg,
 897	.recvmsg =	unix_seqpacket_recvmsg,
 898	.mmap =		sock_no_mmap,
 899	.set_peek_off =	sk_set_peek_off,
 900	.show_fdinfo =	unix_show_fdinfo,
 901};
 902
 903static void unix_close(struct sock *sk, long timeout)
 904{
 905	/* Nothing to do here, unix socket does not need a ->close().
 906	 * This is merely for sockmap.
 907	 */
 908}
 909
 910static void unix_unhash(struct sock *sk)
 911{
 912	/* Nothing to do here, unix socket does not need a ->unhash().
 913	 * This is merely for sockmap.
 914	 */
 915}
 916
 917static bool unix_bpf_bypass_getsockopt(int level, int optname)
 918{
 919	if (level == SOL_SOCKET) {
 920		switch (optname) {
 921		case SO_PEERPIDFD:
 922			return true;
 923		default:
 924			return false;
 925		}
 926	}
 927
 928	return false;
 929}
 930
 931struct proto unix_dgram_proto = {
 932	.name			= "UNIX",
 933	.owner			= THIS_MODULE,
 934	.obj_size		= sizeof(struct unix_sock),
 935	.close			= unix_close,
 936	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 937#ifdef CONFIG_BPF_SYSCALL
 938	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
 939#endif
 940};
 941
 942struct proto unix_stream_proto = {
 943	.name			= "UNIX-STREAM",
 944	.owner			= THIS_MODULE,
 945	.obj_size		= sizeof(struct unix_sock),
 946	.close			= unix_close,
 947	.unhash			= unix_unhash,
 948	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 949#ifdef CONFIG_BPF_SYSCALL
 950	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
 951#endif
 952};
 953
 954static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
 955{
 956	struct unix_sock *u;
 957	struct sock *sk;
 958	int err;
 959
 960	atomic_long_inc(&unix_nr_socks);
 961	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
 962		err = -ENFILE;
 963		goto err;
 964	}
 965
 966	if (type == SOCK_STREAM)
 967		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
 968	else /*dgram and  seqpacket */
 969		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
 970
 971	if (!sk) {
 972		err = -ENOMEM;
 973		goto err;
 974	}
 975
 976	sock_init_data(sock, sk);
 977
 978	sk->sk_hash		= unix_unbound_hash(sk);
 979	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
 980	sk->sk_write_space	= unix_write_space;
 981	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
 982	sk->sk_destruct		= unix_sock_destructor;
 983	u	  = unix_sk(sk);
 
 
 
 
 984	u->path.dentry = NULL;
 985	u->path.mnt = NULL;
 986	spin_lock_init(&u->lock);
 987	atomic_long_set(&u->inflight, 0);
 988	INIT_LIST_HEAD(&u->link);
 989	mutex_init(&u->iolock); /* single task reading lock */
 990	mutex_init(&u->bindlock); /* single task binding lock */
 991	init_waitqueue_head(&u->peer_wait);
 992	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 993	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
 994	unix_insert_unbound_socket(net, sk);
 995
 996	sock_prot_inuse_add(net, sk->sk_prot, 1);
 997
 998	return sk;
 999
1000err:
1001	atomic_long_dec(&unix_nr_socks);
1002	return ERR_PTR(err);
1003}
1004
1005static int unix_create(struct net *net, struct socket *sock, int protocol,
1006		       int kern)
1007{
1008	struct sock *sk;
1009
1010	if (protocol && protocol != PF_UNIX)
1011		return -EPROTONOSUPPORT;
1012
1013	sock->state = SS_UNCONNECTED;
1014
1015	switch (sock->type) {
1016	case SOCK_STREAM:
1017		sock->ops = &unix_stream_ops;
1018		break;
1019		/*
1020		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1021		 *	nothing uses it.
1022		 */
1023	case SOCK_RAW:
1024		sock->type = SOCK_DGRAM;
1025		fallthrough;
1026	case SOCK_DGRAM:
1027		sock->ops = &unix_dgram_ops;
1028		break;
1029	case SOCK_SEQPACKET:
1030		sock->ops = &unix_seqpacket_ops;
1031		break;
1032	default:
1033		return -ESOCKTNOSUPPORT;
1034	}
1035
1036	sk = unix_create1(net, sock, kern, sock->type);
1037	if (IS_ERR(sk))
1038		return PTR_ERR(sk);
1039
1040	return 0;
1041}
1042
1043static int unix_release(struct socket *sock)
1044{
1045	struct sock *sk = sock->sk;
1046
1047	if (!sk)
1048		return 0;
1049
1050	sk->sk_prot->close(sk, 0);
1051	unix_release_sock(sk, 0);
1052	sock->sk = NULL;
1053
1054	return 0;
1055}
1056
1057static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1058				  int type)
1059{
1060	struct inode *inode;
1061	struct path path;
1062	struct sock *sk;
1063	int err;
1064
1065	unix_mkname_bsd(sunaddr, addr_len);
1066	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1067	if (err)
1068		goto fail;
1069
1070	err = path_permission(&path, MAY_WRITE);
1071	if (err)
1072		goto path_put;
1073
1074	err = -ECONNREFUSED;
1075	inode = d_backing_inode(path.dentry);
1076	if (!S_ISSOCK(inode->i_mode))
1077		goto path_put;
1078
1079	sk = unix_find_socket_byinode(inode);
1080	if (!sk)
1081		goto path_put;
1082
1083	err = -EPROTOTYPE;
1084	if (sk->sk_type == type)
1085		touch_atime(&path);
1086	else
1087		goto sock_put;
1088
1089	path_put(&path);
1090
1091	return sk;
1092
1093sock_put:
1094	sock_put(sk);
1095path_put:
1096	path_put(&path);
1097fail:
1098	return ERR_PTR(err);
1099}
1100
1101static struct sock *unix_find_abstract(struct net *net,
1102				       struct sockaddr_un *sunaddr,
1103				       int addr_len, int type)
1104{
1105	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1106	struct dentry *dentry;
1107	struct sock *sk;
1108
1109	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1110	if (!sk)
1111		return ERR_PTR(-ECONNREFUSED);
1112
1113	dentry = unix_sk(sk)->path.dentry;
1114	if (dentry)
1115		touch_atime(&unix_sk(sk)->path);
1116
1117	return sk;
1118}
1119
1120static struct sock *unix_find_other(struct net *net,
1121				    struct sockaddr_un *sunaddr,
1122				    int addr_len, int type)
1123{
1124	struct sock *sk;
1125
1126	if (sunaddr->sun_path[0])
1127		sk = unix_find_bsd(sunaddr, addr_len, type);
1128	else
1129		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1130
1131	return sk;
1132}
1133
1134static int unix_autobind(struct sock *sk)
1135{
1136	unsigned int new_hash, old_hash = sk->sk_hash;
1137	struct unix_sock *u = unix_sk(sk);
 
1138	struct net *net = sock_net(sk);
1139	struct unix_address *addr;
1140	u32 lastnum, ordernum;
1141	int err;
1142
1143	err = mutex_lock_interruptible(&u->bindlock);
1144	if (err)
1145		return err;
1146
1147	if (u->addr)
1148		goto out;
1149
1150	err = -ENOMEM;
1151	addr = kzalloc(sizeof(*addr) +
1152		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1153	if (!addr)
1154		goto out;
1155
1156	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1157	addr->name->sun_family = AF_UNIX;
1158	refcount_set(&addr->refcnt, 1);
1159
 
1160	ordernum = get_random_u32();
1161	lastnum = ordernum & 0xFFFFF;
1162retry:
1163	ordernum = (ordernum + 1) & 0xFFFFF;
1164	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1165
1166	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1167	unix_table_double_lock(net, old_hash, new_hash);
1168
1169	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1170		unix_table_double_unlock(net, old_hash, new_hash);
1171
1172		/* __unix_find_socket_byname() may take long time if many names
1173		 * are already in use.
1174		 */
1175		cond_resched();
1176
1177		if (ordernum == lastnum) {
1178			/* Give up if all names seems to be in use. */
1179			err = -ENOSPC;
1180			unix_release_addr(addr);
1181			goto out;
1182		}
1183
1184		goto retry;
1185	}
1186
1187	__unix_set_addr_hash(net, sk, addr, new_hash);
1188	unix_table_double_unlock(net, old_hash, new_hash);
1189	err = 0;
1190
1191out:	mutex_unlock(&u->bindlock);
1192	return err;
1193}
1194
1195static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1196			 int addr_len)
1197{
1198	umode_t mode = S_IFSOCK |
1199	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1200	unsigned int new_hash, old_hash = sk->sk_hash;
1201	struct unix_sock *u = unix_sk(sk);
 
1202	struct net *net = sock_net(sk);
1203	struct mnt_idmap *idmap;
1204	struct unix_address *addr;
1205	struct dentry *dentry;
1206	struct path parent;
1207	int err;
1208
1209	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1210	addr = unix_create_addr(sunaddr, addr_len);
1211	if (!addr)
1212		return -ENOMEM;
1213
1214	/*
1215	 * Get the parent directory, calculate the hash for last
1216	 * component.
1217	 */
1218	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1219	if (IS_ERR(dentry)) {
1220		err = PTR_ERR(dentry);
1221		goto out;
1222	}
1223
1224	/*
1225	 * All right, let's create it.
1226	 */
1227	idmap = mnt_idmap(parent.mnt);
1228	err = security_path_mknod(&parent, dentry, mode, 0);
1229	if (!err)
1230		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1231	if (err)
1232		goto out_path;
1233	err = mutex_lock_interruptible(&u->bindlock);
1234	if (err)
1235		goto out_unlink;
1236	if (u->addr)
1237		goto out_unlock;
1238
 
1239	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1240	unix_table_double_lock(net, old_hash, new_hash);
1241	u->path.mnt = mntget(parent.mnt);
1242	u->path.dentry = dget(dentry);
1243	__unix_set_addr_hash(net, sk, addr, new_hash);
1244	unix_table_double_unlock(net, old_hash, new_hash);
1245	unix_insert_bsd_socket(sk);
1246	mutex_unlock(&u->bindlock);
1247	done_path_create(&parent, dentry);
1248	return 0;
1249
1250out_unlock:
1251	mutex_unlock(&u->bindlock);
1252	err = -EINVAL;
1253out_unlink:
1254	/* failed after successful mknod?  unlink what we'd created... */
1255	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1256out_path:
1257	done_path_create(&parent, dentry);
1258out:
1259	unix_release_addr(addr);
1260	return err == -EEXIST ? -EADDRINUSE : err;
1261}
1262
1263static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264			      int addr_len)
1265{
1266	unsigned int new_hash, old_hash = sk->sk_hash;
1267	struct unix_sock *u = unix_sk(sk);
 
1268	struct net *net = sock_net(sk);
1269	struct unix_address *addr;
1270	int err;
1271
1272	addr = unix_create_addr(sunaddr, addr_len);
1273	if (!addr)
1274		return -ENOMEM;
1275
1276	err = mutex_lock_interruptible(&u->bindlock);
1277	if (err)
1278		goto out;
1279
1280	if (u->addr) {
1281		err = -EINVAL;
1282		goto out_mutex;
1283	}
1284
 
1285	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1286	unix_table_double_lock(net, old_hash, new_hash);
1287
1288	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1289		goto out_spin;
1290
1291	__unix_set_addr_hash(net, sk, addr, new_hash);
1292	unix_table_double_unlock(net, old_hash, new_hash);
1293	mutex_unlock(&u->bindlock);
1294	return 0;
1295
1296out_spin:
1297	unix_table_double_unlock(net, old_hash, new_hash);
1298	err = -EADDRINUSE;
1299out_mutex:
1300	mutex_unlock(&u->bindlock);
1301out:
1302	unix_release_addr(addr);
1303	return err;
1304}
1305
1306static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1307{
1308	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1309	struct sock *sk = sock->sk;
1310	int err;
1311
1312	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1313	    sunaddr->sun_family == AF_UNIX)
1314		return unix_autobind(sk);
1315
1316	err = unix_validate_addr(sunaddr, addr_len);
1317	if (err)
1318		return err;
1319
1320	if (sunaddr->sun_path[0])
1321		err = unix_bind_bsd(sk, sunaddr, addr_len);
1322	else
1323		err = unix_bind_abstract(sk, sunaddr, addr_len);
1324
1325	return err;
1326}
1327
1328static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1329{
1330	if (unlikely(sk1 == sk2) || !sk2) {
1331		unix_state_lock(sk1);
1332		return;
1333	}
 
1334	if (sk1 > sk2)
1335		swap(sk1, sk2);
1336
1337	unix_state_lock(sk1);
1338	unix_state_lock_nested(sk2, U_LOCK_SECOND);
1339}
1340
1341static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1342{
1343	if (unlikely(sk1 == sk2) || !sk2) {
1344		unix_state_unlock(sk1);
1345		return;
1346	}
1347	unix_state_unlock(sk1);
1348	unix_state_unlock(sk2);
1349}
1350
1351static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1352			      int alen, int flags)
1353{
1354	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1355	struct sock *sk = sock->sk;
1356	struct sock *other;
1357	int err;
1358
1359	err = -EINVAL;
1360	if (alen < offsetofend(struct sockaddr, sa_family))
1361		goto out;
1362
1363	if (addr->sa_family != AF_UNSPEC) {
1364		err = unix_validate_addr(sunaddr, alen);
1365		if (err)
1366			goto out;
1367
1368		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1369		if (err)
1370			goto out;
1371
1372		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1373		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1374		    !unix_sk(sk)->addr) {
1375			err = unix_autobind(sk);
1376			if (err)
1377				goto out;
1378		}
1379
1380restart:
1381		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1382		if (IS_ERR(other)) {
1383			err = PTR_ERR(other);
1384			goto out;
1385		}
1386
1387		unix_state_double_lock(sk, other);
1388
1389		/* Apparently VFS overslept socket death. Retry. */
1390		if (sock_flag(other, SOCK_DEAD)) {
1391			unix_state_double_unlock(sk, other);
1392			sock_put(other);
1393			goto restart;
1394		}
1395
1396		err = -EPERM;
1397		if (!unix_may_send(sk, other))
1398			goto out_unlock;
1399
1400		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1401		if (err)
1402			goto out_unlock;
1403
1404		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
 
1405	} else {
1406		/*
1407		 *	1003.1g breaking connected state with AF_UNSPEC
1408		 */
1409		other = NULL;
1410		unix_state_double_lock(sk, other);
1411	}
1412
1413	/*
1414	 * If it was connected, reconnect.
1415	 */
1416	if (unix_peer(sk)) {
1417		struct sock *old_peer = unix_peer(sk);
1418
1419		unix_peer(sk) = other;
1420		if (!other)
1421			sk->sk_state = TCP_CLOSE;
1422		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1423
1424		unix_state_double_unlock(sk, other);
1425
1426		if (other != old_peer)
1427			unix_dgram_disconnected(sk, old_peer);
 
 
 
 
 
 
 
1428		sock_put(old_peer);
1429	} else {
1430		unix_peer(sk) = other;
1431		unix_state_double_unlock(sk, other);
1432	}
1433
1434	return 0;
1435
1436out_unlock:
1437	unix_state_double_unlock(sk, other);
1438	sock_put(other);
1439out:
1440	return err;
1441}
1442
1443static long unix_wait_for_peer(struct sock *other, long timeo)
1444	__releases(&unix_sk(other)->lock)
1445{
1446	struct unix_sock *u = unix_sk(other);
1447	int sched;
1448	DEFINE_WAIT(wait);
1449
1450	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1451
1452	sched = !sock_flag(other, SOCK_DEAD) &&
1453		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1454		unix_recvq_full_lockless(other);
1455
1456	unix_state_unlock(other);
1457
1458	if (sched)
1459		timeo = schedule_timeout(timeo);
1460
1461	finish_wait(&u->peer_wait, &wait);
1462	return timeo;
1463}
1464
1465static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1466			       int addr_len, int flags)
1467{
1468	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1469	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1470	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1471	struct net *net = sock_net(sk);
1472	struct sk_buff *skb = NULL;
 
1473	long timeo;
1474	int err;
1475	int st;
1476
1477	err = unix_validate_addr(sunaddr, addr_len);
1478	if (err)
1479		goto out;
1480
1481	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1482	if (err)
1483		goto out;
1484
1485	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1486	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
 
1487		err = unix_autobind(sk);
1488		if (err)
1489			goto out;
1490	}
1491
1492	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1493
1494	/* First of all allocate resources.
1495	   If we will make it after state is locked,
1496	   we will have to recheck all again in any case.
1497	 */
1498
1499	/* create new sock for complete connection */
1500	newsk = unix_create1(net, NULL, 0, sock->type);
1501	if (IS_ERR(newsk)) {
1502		err = PTR_ERR(newsk);
1503		newsk = NULL;
1504		goto out;
1505	}
1506
1507	err = -ENOMEM;
1508
1509	/* Allocate skb for sending to listening sock */
1510	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1511	if (skb == NULL)
1512		goto out;
1513
1514restart:
1515	/*  Find listening sock. */
1516	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1517	if (IS_ERR(other)) {
1518		err = PTR_ERR(other);
1519		other = NULL;
1520		goto out;
1521	}
1522
1523	/* Latch state of peer */
1524	unix_state_lock(other);
1525
1526	/* Apparently VFS overslept socket death. Retry. */
1527	if (sock_flag(other, SOCK_DEAD)) {
1528		unix_state_unlock(other);
1529		sock_put(other);
1530		goto restart;
1531	}
1532
1533	err = -ECONNREFUSED;
1534	if (other->sk_state != TCP_LISTEN)
1535		goto out_unlock;
1536	if (other->sk_shutdown & RCV_SHUTDOWN)
1537		goto out_unlock;
1538
1539	if (unix_recvq_full(other)) {
1540		err = -EAGAIN;
1541		if (!timeo)
1542			goto out_unlock;
1543
1544		timeo = unix_wait_for_peer(other, timeo);
1545
1546		err = sock_intr_errno(timeo);
1547		if (signal_pending(current))
1548			goto out;
1549		sock_put(other);
1550		goto restart;
1551	}
1552
1553	/* Latch our state.
1554
1555	   It is tricky place. We need to grab our state lock and cannot
1556	   drop lock on peer. It is dangerous because deadlock is
1557	   possible. Connect to self case and simultaneous
1558	   attempt to connect are eliminated by checking socket
1559	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1560	   check this before attempt to grab lock.
1561
1562	   Well, and we have to recheck the state after socket locked.
1563	 */
1564	st = sk->sk_state;
1565
1566	switch (st) {
1567	case TCP_CLOSE:
1568		/* This is ok... continue with connect */
1569		break;
1570	case TCP_ESTABLISHED:
1571		/* Socket is already connected */
1572		err = -EISCONN;
1573		goto out_unlock;
1574	default:
1575		err = -EINVAL;
1576		goto out_unlock;
1577	}
1578
1579	unix_state_lock_nested(sk, U_LOCK_SECOND);
1580
1581	if (sk->sk_state != st) {
 
1582		unix_state_unlock(sk);
1583		unix_state_unlock(other);
1584		sock_put(other);
1585		goto restart;
1586	}
1587
1588	err = security_unix_stream_connect(sk, other, newsk);
1589	if (err) {
1590		unix_state_unlock(sk);
1591		goto out_unlock;
1592	}
1593
1594	/* The way is open! Fastly set all the necessary fields... */
1595
1596	sock_hold(sk);
1597	unix_peer(newsk)	= sk;
1598	newsk->sk_state		= TCP_ESTABLISHED;
1599	newsk->sk_type		= sk->sk_type;
1600	init_peercred(newsk);
1601	newu = unix_sk(newsk);
 
1602	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1603	otheru = unix_sk(other);
1604
1605	/* copy address information from listening to new sock
1606	 *
1607	 * The contents of *(otheru->addr) and otheru->path
1608	 * are seen fully set up here, since we have found
1609	 * otheru in hash under its lock.  Insertion into the
1610	 * hash chain we'd found it in had been done in an
1611	 * earlier critical area protected by the chain's lock,
1612	 * the same one where we'd set *(otheru->addr) contents,
1613	 * as well as otheru->path and otheru->addr itself.
1614	 *
1615	 * Using smp_store_release() here to set newu->addr
1616	 * is enough to make those stores, as well as stores
1617	 * to newu->path visible to anyone who gets newu->addr
1618	 * by smp_load_acquire().  IOW, the same warranties
1619	 * as for unix_sock instances bound in unix_bind() or
1620	 * in unix_autobind().
1621	 */
1622	if (otheru->path.dentry) {
1623		path_get(&otheru->path);
1624		newu->path = otheru->path;
1625	}
1626	refcount_inc(&otheru->addr->refcnt);
1627	smp_store_release(&newu->addr, otheru->addr);
1628
1629	/* Set credentials */
1630	copy_peercred(sk, other);
1631
1632	sock->state	= SS_CONNECTED;
1633	sk->sk_state	= TCP_ESTABLISHED;
1634	sock_hold(newsk);
1635
1636	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1637	unix_peer(sk)	= newsk;
1638
1639	unix_state_unlock(sk);
1640
1641	/* take ten and send info to listening sock */
1642	spin_lock(&other->sk_receive_queue.lock);
1643	__skb_queue_tail(&other->sk_receive_queue, skb);
1644	spin_unlock(&other->sk_receive_queue.lock);
1645	unix_state_unlock(other);
1646	other->sk_data_ready(other);
1647	sock_put(other);
1648	return 0;
1649
1650out_unlock:
1651	if (other)
1652		unix_state_unlock(other);
1653
1654out:
1655	kfree_skb(skb);
1656	if (newsk)
1657		unix_release_sock(newsk, 0);
1658	if (other)
1659		sock_put(other);
1660	return err;
1661}
1662
1663static int unix_socketpair(struct socket *socka, struct socket *sockb)
1664{
1665	struct sock *ska = socka->sk, *skb = sockb->sk;
1666
1667	/* Join our sockets back to back */
1668	sock_hold(ska);
1669	sock_hold(skb);
1670	unix_peer(ska) = skb;
1671	unix_peer(skb) = ska;
1672	init_peercred(ska);
1673	init_peercred(skb);
1674
1675	ska->sk_state = TCP_ESTABLISHED;
1676	skb->sk_state = TCP_ESTABLISHED;
1677	socka->state  = SS_CONNECTED;
1678	sockb->state  = SS_CONNECTED;
1679	return 0;
1680}
1681
1682static void unix_sock_inherit_flags(const struct socket *old,
1683				    struct socket *new)
1684{
1685	if (test_bit(SOCK_PASSCRED, &old->flags))
1686		set_bit(SOCK_PASSCRED, &new->flags);
1687	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1688		set_bit(SOCK_PASSPIDFD, &new->flags);
1689	if (test_bit(SOCK_PASSSEC, &old->flags))
1690		set_bit(SOCK_PASSSEC, &new->flags);
1691}
1692
1693static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1694		       bool kern)
1695{
1696	struct sock *sk = sock->sk;
1697	struct sock *tsk;
1698	struct sk_buff *skb;
1699	int err;
1700
1701	err = -EOPNOTSUPP;
1702	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1703		goto out;
1704
1705	err = -EINVAL;
1706	if (sk->sk_state != TCP_LISTEN)
1707		goto out;
1708
1709	/* If socket state is TCP_LISTEN it cannot change (for now...),
1710	 * so that no locks are necessary.
1711	 */
1712
1713	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1714				&err);
1715	if (!skb) {
1716		/* This means receive shutdown. */
1717		if (err == 0)
1718			err = -EINVAL;
1719		goto out;
1720	}
1721
1722	tsk = skb->sk;
1723	skb_free_datagram(sk, skb);
1724	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1725
1726	/* attach accepted sock to socket */
1727	unix_state_lock(tsk);
 
1728	newsock->state = SS_CONNECTED;
1729	unix_sock_inherit_flags(sock, newsock);
1730	sock_graft(tsk, newsock);
1731	unix_state_unlock(tsk);
1732	return 0;
1733
1734out:
1735	return err;
1736}
1737
1738
1739static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1740{
1741	struct sock *sk = sock->sk;
1742	struct unix_address *addr;
1743	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1744	int err = 0;
1745
1746	if (peer) {
1747		sk = unix_peer_get(sk);
1748
1749		err = -ENOTCONN;
1750		if (!sk)
1751			goto out;
1752		err = 0;
1753	} else {
1754		sock_hold(sk);
1755	}
1756
1757	addr = smp_load_acquire(&unix_sk(sk)->addr);
1758	if (!addr) {
1759		sunaddr->sun_family = AF_UNIX;
1760		sunaddr->sun_path[0] = 0;
1761		err = offsetof(struct sockaddr_un, sun_path);
1762	} else {
1763		err = addr->len;
1764		memcpy(sunaddr, addr->name, addr->len);
1765
1766		if (peer)
1767			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1768					       CGROUP_UNIX_GETPEERNAME);
1769		else
1770			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1771					       CGROUP_UNIX_GETSOCKNAME);
1772	}
1773	sock_put(sk);
1774out:
1775	return err;
1776}
1777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1778static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1779{
1780	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
 
1781
1782	/*
1783	 * Garbage collection of unix sockets starts by selecting a set of
1784	 * candidate sockets which have reference only from being in flight
1785	 * (total_refs == inflight_refs).  This condition is checked once during
1786	 * the candidate collection phase, and candidates are marked as such, so
1787	 * that non-candidates can later be ignored.  While inflight_refs is
1788	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1789	 * is an instantaneous decision.
1790	 *
1791	 * Once a candidate, however, the socket must not be reinstalled into a
1792	 * file descriptor while the garbage collection is in progress.
1793	 *
1794	 * If the above conditions are met, then the directed graph of
1795	 * candidates (*) does not change while unix_gc_lock is held.
1796	 *
1797	 * Any operations that changes the file count through file descriptors
1798	 * (dup, close, sendmsg) does not change the graph since candidates are
1799	 * not installed in fds.
1800	 *
1801	 * Dequeing a candidate via recvmsg would install it into an fd, but
1802	 * that takes unix_gc_lock to decrement the inflight count, so it's
1803	 * serialized with garbage collection.
1804	 *
1805	 * MSG_PEEK is special in that it does not change the inflight count,
1806	 * yet does install the socket into an fd.  The following lock/unlock
1807	 * pair is to ensure serialization with garbage collection.  It must be
1808	 * done between incrementing the file count and installing the file into
1809	 * an fd.
1810	 *
1811	 * If garbage collection starts after the barrier provided by the
1812	 * lock/unlock, then it will see the elevated refcount and not mark this
1813	 * as a candidate.  If a garbage collection is already in progress
1814	 * before the file count was incremented, then the lock/unlock pair will
1815	 * ensure that garbage collection is finished before progressing to
1816	 * installing the fd.
1817	 *
1818	 * (*) A -> B where B is on the queue of A or B is on the queue of C
1819	 * which is on the queue of listening socket A.
1820	 */
1821	spin_lock(&unix_gc_lock);
1822	spin_unlock(&unix_gc_lock);
1823}
1824
1825static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1826{
1827	int err = 0;
1828
1829	UNIXCB(skb).pid  = get_pid(scm->pid);
1830	UNIXCB(skb).uid = scm->creds.uid;
1831	UNIXCB(skb).gid = scm->creds.gid;
1832	UNIXCB(skb).fp = NULL;
1833	unix_get_secdata(scm, skb);
1834	if (scm->fp && send_fds)
1835		err = unix_attach_fds(scm, skb);
1836
1837	skb->destructor = unix_destruct_scm;
1838	return err;
1839}
1840
1841static bool unix_passcred_enabled(const struct socket *sock,
1842				  const struct sock *other)
1843{
1844	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1845	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1846	       !other->sk_socket ||
1847	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1848	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1849}
1850
1851/*
1852 * Some apps rely on write() giving SCM_CREDENTIALS
1853 * We include credentials if source or destination socket
1854 * asserted SOCK_PASSCRED.
1855 */
1856static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1857			    const struct sock *other)
1858{
1859	if (UNIXCB(skb).pid)
1860		return;
1861	if (unix_passcred_enabled(sock, other)) {
1862		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1863		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1864	}
1865}
1866
1867static bool unix_skb_scm_eq(struct sk_buff *skb,
1868			    struct scm_cookie *scm)
1869{
1870	return UNIXCB(skb).pid == scm->pid &&
1871	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1872	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1873	       unix_secdata_eq(scm, skb);
1874}
1875
1876static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1877{
1878	struct scm_fp_list *fp = UNIXCB(skb).fp;
1879	struct unix_sock *u = unix_sk(sk);
1880
1881	if (unlikely(fp && fp->count))
1882		atomic_add(fp->count, &u->scm_stat.nr_fds);
 
 
1883}
1884
1885static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1886{
1887	struct scm_fp_list *fp = UNIXCB(skb).fp;
1888	struct unix_sock *u = unix_sk(sk);
1889
1890	if (unlikely(fp && fp->count))
1891		atomic_sub(fp->count, &u->scm_stat.nr_fds);
 
 
1892}
1893
1894/*
1895 *	Send AF_UNIX data.
1896 */
1897
1898static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1899			      size_t len)
1900{
1901	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1902	struct sock *sk = sock->sk, *other = NULL;
1903	struct unix_sock *u = unix_sk(sk);
1904	struct scm_cookie scm;
1905	struct sk_buff *skb;
1906	int data_len = 0;
1907	int sk_locked;
1908	long timeo;
1909	int err;
1910
1911	wait_for_unix_gc();
1912	err = scm_send(sock, msg, &scm, false);
1913	if (err < 0)
1914		return err;
1915
 
 
1916	err = -EOPNOTSUPP;
1917	if (msg->msg_flags&MSG_OOB)
1918		goto out;
1919
1920	if (msg->msg_namelen) {
1921		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1922		if (err)
1923			goto out;
1924
1925		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1926							    msg->msg_name,
1927							    &msg->msg_namelen,
1928							    NULL);
1929		if (err)
1930			goto out;
1931	} else {
1932		sunaddr = NULL;
1933		err = -ENOTCONN;
1934		other = unix_peer_get(sk);
1935		if (!other)
1936			goto out;
1937	}
1938
1939	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1940	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
 
1941		err = unix_autobind(sk);
1942		if (err)
1943			goto out;
1944	}
1945
1946	err = -EMSGSIZE;
1947	if (len > sk->sk_sndbuf - 32)
1948		goto out;
1949
1950	if (len > SKB_MAX_ALLOC) {
1951		data_len = min_t(size_t,
1952				 len - SKB_MAX_ALLOC,
1953				 MAX_SKB_FRAGS * PAGE_SIZE);
1954		data_len = PAGE_ALIGN(data_len);
1955
1956		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1957	}
1958
1959	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1960				   msg->msg_flags & MSG_DONTWAIT, &err,
1961				   PAGE_ALLOC_COSTLY_ORDER);
1962	if (skb == NULL)
1963		goto out;
1964
1965	err = unix_scm_to_skb(&scm, skb, true);
1966	if (err < 0)
1967		goto out_free;
1968
1969	skb_put(skb, len - data_len);
1970	skb->data_len = data_len;
1971	skb->len = len;
1972	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1973	if (err)
1974		goto out_free;
1975
1976	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1977
1978restart:
1979	if (!other) {
1980		err = -ECONNRESET;
1981		if (sunaddr == NULL)
1982			goto out_free;
1983
1984		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1985					sk->sk_type);
1986		if (IS_ERR(other)) {
1987			err = PTR_ERR(other);
1988			other = NULL;
1989			goto out_free;
1990		}
1991	}
1992
1993	if (sk_filter(other, skb) < 0) {
1994		/* Toss the packet but do not return any error to the sender */
1995		err = len;
1996		goto out_free;
1997	}
1998
1999	sk_locked = 0;
2000	unix_state_lock(other);
2001restart_locked:
2002	err = -EPERM;
2003	if (!unix_may_send(sk, other))
2004		goto out_unlock;
2005
2006	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2007		/*
2008		 *	Check with 1003.1g - what should
2009		 *	datagram error
2010		 */
2011		unix_state_unlock(other);
2012		sock_put(other);
2013
2014		if (!sk_locked)
2015			unix_state_lock(sk);
2016
2017		err = 0;
2018		if (sk->sk_type == SOCK_SEQPACKET) {
2019			/* We are here only when racing with unix_release_sock()
2020			 * is clearing @other. Never change state to TCP_CLOSE
2021			 * unlike SOCK_DGRAM wants.
2022			 */
2023			unix_state_unlock(sk);
2024			err = -EPIPE;
2025		} else if (unix_peer(sk) == other) {
2026			unix_peer(sk) = NULL;
2027			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2028
2029			sk->sk_state = TCP_CLOSE;
2030			unix_state_unlock(sk);
2031
2032			unix_dgram_disconnected(sk, other);
2033			sock_put(other);
2034			err = -ECONNREFUSED;
2035		} else {
2036			unix_state_unlock(sk);
2037		}
2038
2039		other = NULL;
2040		if (err)
2041			goto out_free;
2042		goto restart;
2043	}
2044
2045	err = -EPIPE;
2046	if (other->sk_shutdown & RCV_SHUTDOWN)
2047		goto out_unlock;
2048
2049	if (sk->sk_type != SOCK_SEQPACKET) {
2050		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2051		if (err)
2052			goto out_unlock;
2053	}
2054
2055	/* other == sk && unix_peer(other) != sk if
2056	 * - unix_peer(sk) == NULL, destination address bound to sk
2057	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2058	 */
2059	if (other != sk &&
2060	    unlikely(unix_peer(other) != sk &&
2061	    unix_recvq_full_lockless(other))) {
2062		if (timeo) {
2063			timeo = unix_wait_for_peer(other, timeo);
2064
2065			err = sock_intr_errno(timeo);
2066			if (signal_pending(current))
2067				goto out_free;
2068
2069			goto restart;
2070		}
2071
2072		if (!sk_locked) {
2073			unix_state_unlock(other);
2074			unix_state_double_lock(sk, other);
2075		}
2076
2077		if (unix_peer(sk) != other ||
2078		    unix_dgram_peer_wake_me(sk, other)) {
2079			err = -EAGAIN;
2080			sk_locked = 1;
2081			goto out_unlock;
2082		}
2083
2084		if (!sk_locked) {
2085			sk_locked = 1;
2086			goto restart_locked;
2087		}
2088	}
2089
2090	if (unlikely(sk_locked))
2091		unix_state_unlock(sk);
2092
2093	if (sock_flag(other, SOCK_RCVTSTAMP))
2094		__net_timestamp(skb);
2095	maybe_add_creds(skb, sock, other);
2096	scm_stat_add(other, skb);
2097	skb_queue_tail(&other->sk_receive_queue, skb);
2098	unix_state_unlock(other);
2099	other->sk_data_ready(other);
2100	sock_put(other);
2101	scm_destroy(&scm);
2102	return len;
2103
2104out_unlock:
2105	if (sk_locked)
2106		unix_state_unlock(sk);
2107	unix_state_unlock(other);
2108out_free:
2109	kfree_skb(skb);
2110out:
2111	if (other)
2112		sock_put(other);
2113	scm_destroy(&scm);
2114	return err;
2115}
2116
2117/* We use paged skbs for stream sockets, and limit occupancy to 32768
2118 * bytes, and a minimum of a full page.
2119 */
2120#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2121
2122#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2123static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2124		     struct scm_cookie *scm, bool fds_sent)
2125{
2126	struct unix_sock *ousk = unix_sk(other);
2127	struct sk_buff *skb;
2128	int err = 0;
2129
2130	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2131
2132	if (!skb)
2133		return err;
2134
2135	err = unix_scm_to_skb(scm, skb, !fds_sent);
2136	if (err < 0) {
2137		kfree_skb(skb);
2138		return err;
2139	}
2140	skb_put(skb, 1);
2141	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2142
2143	if (err) {
2144		kfree_skb(skb);
2145		return err;
2146	}
2147
2148	unix_state_lock(other);
2149
2150	if (sock_flag(other, SOCK_DEAD) ||
2151	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2152		unix_state_unlock(other);
2153		kfree_skb(skb);
2154		return -EPIPE;
2155	}
2156
2157	maybe_add_creds(skb, sock, other);
2158	skb_get(skb);
2159
2160	if (ousk->oob_skb)
2161		consume_skb(ousk->oob_skb);
2162
 
2163	WRITE_ONCE(ousk->oob_skb, skb);
 
 
2164
2165	scm_stat_add(other, skb);
2166	skb_queue_tail(&other->sk_receive_queue, skb);
2167	sk_send_sigurg(other);
2168	unix_state_unlock(other);
2169	other->sk_data_ready(other);
2170
2171	return err;
2172}
2173#endif
2174
2175static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2176			       size_t len)
2177{
2178	struct sock *sk = sock->sk;
2179	struct sock *other = NULL;
2180	int err, size;
2181	struct sk_buff *skb;
2182	int sent = 0;
2183	struct scm_cookie scm;
2184	bool fds_sent = false;
2185	int data_len;
2186
2187	wait_for_unix_gc();
2188	err = scm_send(sock, msg, &scm, false);
2189	if (err < 0)
2190		return err;
2191
 
 
2192	err = -EOPNOTSUPP;
2193	if (msg->msg_flags & MSG_OOB) {
2194#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2195		if (len)
2196			len--;
2197		else
2198#endif
2199			goto out_err;
2200	}
2201
2202	if (msg->msg_namelen) {
2203		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2204		goto out_err;
2205	} else {
2206		err = -ENOTCONN;
2207		other = unix_peer(sk);
2208		if (!other)
2209			goto out_err;
2210	}
2211
2212	if (sk->sk_shutdown & SEND_SHUTDOWN)
2213		goto pipe_err;
2214
2215	while (sent < len) {
2216		size = len - sent;
2217
2218		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2219			skb = sock_alloc_send_pskb(sk, 0, 0,
2220						   msg->msg_flags & MSG_DONTWAIT,
2221						   &err, 0);
2222		} else {
2223			/* Keep two messages in the pipe so it schedules better */
2224			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2225
2226			/* allow fallback to order-0 allocations */
2227			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2228
2229			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2230
2231			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2232
2233			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2234						   msg->msg_flags & MSG_DONTWAIT, &err,
2235						   get_order(UNIX_SKB_FRAGS_SZ));
2236		}
2237		if (!skb)
2238			goto out_err;
2239
2240		/* Only send the fds in the first buffer */
2241		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2242		if (err < 0) {
2243			kfree_skb(skb);
2244			goto out_err;
2245		}
2246		fds_sent = true;
2247
2248		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
 
2249			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2250						   sk->sk_allocation);
2251			if (err < 0) {
2252				kfree_skb(skb);
2253				goto out_err;
2254			}
2255			size = err;
2256			refcount_add(size, &sk->sk_wmem_alloc);
2257		} else {
2258			skb_put(skb, size - data_len);
2259			skb->data_len = data_len;
2260			skb->len = size;
2261			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2262			if (err) {
2263				kfree_skb(skb);
2264				goto out_err;
2265			}
2266		}
2267
2268		unix_state_lock(other);
2269
2270		if (sock_flag(other, SOCK_DEAD) ||
2271		    (other->sk_shutdown & RCV_SHUTDOWN))
2272			goto pipe_err_free;
2273
2274		maybe_add_creds(skb, sock, other);
2275		scm_stat_add(other, skb);
2276		skb_queue_tail(&other->sk_receive_queue, skb);
2277		unix_state_unlock(other);
2278		other->sk_data_ready(other);
2279		sent += size;
2280	}
2281
2282#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2283	if (msg->msg_flags & MSG_OOB) {
2284		err = queue_oob(sock, msg, other, &scm, fds_sent);
2285		if (err)
2286			goto out_err;
2287		sent++;
2288	}
2289#endif
2290
2291	scm_destroy(&scm);
2292
2293	return sent;
2294
2295pipe_err_free:
2296	unix_state_unlock(other);
2297	kfree_skb(skb);
2298pipe_err:
2299	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2300		send_sig(SIGPIPE, current, 0);
2301	err = -EPIPE;
2302out_err:
2303	scm_destroy(&scm);
2304	return sent ? : err;
2305}
2306
2307static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2308				  size_t len)
2309{
2310	int err;
2311	struct sock *sk = sock->sk;
2312
2313	err = sock_error(sk);
2314	if (err)
2315		return err;
2316
2317	if (sk->sk_state != TCP_ESTABLISHED)
2318		return -ENOTCONN;
2319
2320	if (msg->msg_namelen)
2321		msg->msg_namelen = 0;
2322
2323	return unix_dgram_sendmsg(sock, msg, len);
2324}
2325
2326static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2327				  size_t size, int flags)
2328{
2329	struct sock *sk = sock->sk;
2330
2331	if (sk->sk_state != TCP_ESTABLISHED)
2332		return -ENOTCONN;
2333
2334	return unix_dgram_recvmsg(sock, msg, size, flags);
2335}
2336
2337static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2338{
2339	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2340
2341	if (addr) {
2342		msg->msg_namelen = addr->len;
2343		memcpy(msg->msg_name, addr->name, addr->len);
2344	}
2345}
2346
2347int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2348			 int flags)
2349{
2350	struct scm_cookie scm;
2351	struct socket *sock = sk->sk_socket;
2352	struct unix_sock *u = unix_sk(sk);
2353	struct sk_buff *skb, *last;
2354	long timeo;
2355	int skip;
2356	int err;
2357
2358	err = -EOPNOTSUPP;
2359	if (flags&MSG_OOB)
2360		goto out;
2361
2362	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2363
2364	do {
2365		mutex_lock(&u->iolock);
2366
2367		skip = sk_peek_offset(sk, flags);
2368		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2369					      &skip, &err, &last);
2370		if (skb) {
2371			if (!(flags & MSG_PEEK))
2372				scm_stat_del(sk, skb);
2373			break;
2374		}
2375
2376		mutex_unlock(&u->iolock);
2377
2378		if (err != -EAGAIN)
2379			break;
2380	} while (timeo &&
2381		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2382					      &err, &timeo, last));
2383
2384	if (!skb) { /* implies iolock unlocked */
2385		unix_state_lock(sk);
2386		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2387		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2388		    (sk->sk_shutdown & RCV_SHUTDOWN))
2389			err = 0;
2390		unix_state_unlock(sk);
2391		goto out;
2392	}
2393
2394	if (wq_has_sleeper(&u->peer_wait))
2395		wake_up_interruptible_sync_poll(&u->peer_wait,
2396						EPOLLOUT | EPOLLWRNORM |
2397						EPOLLWRBAND);
2398
2399	if (msg->msg_name) {
2400		unix_copy_addr(msg, skb->sk);
2401
2402		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2403						      msg->msg_name,
2404						      &msg->msg_namelen);
2405	}
2406
2407	if (size > skb->len - skip)
2408		size = skb->len - skip;
2409	else if (size < skb->len - skip)
2410		msg->msg_flags |= MSG_TRUNC;
2411
2412	err = skb_copy_datagram_msg(skb, skip, msg, size);
2413	if (err)
2414		goto out_free;
2415
2416	if (sock_flag(sk, SOCK_RCVTSTAMP))
2417		__sock_recv_timestamp(msg, sk, skb);
2418
2419	memset(&scm, 0, sizeof(scm));
2420
2421	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2422	unix_set_secdata(&scm, skb);
2423
2424	if (!(flags & MSG_PEEK)) {
2425		if (UNIXCB(skb).fp)
2426			unix_detach_fds(&scm, skb);
2427
2428		sk_peek_offset_bwd(sk, skb->len);
2429	} else {
2430		/* It is questionable: on PEEK we could:
2431		   - do not return fds - good, but too simple 8)
2432		   - return fds, and do not return them on read (old strategy,
2433		     apparently wrong)
2434		   - clone fds (I chose it for now, it is the most universal
2435		     solution)
2436
2437		   POSIX 1003.1g does not actually define this clearly
2438		   at all. POSIX 1003.1g doesn't define a lot of things
2439		   clearly however!
2440
2441		*/
2442
2443		sk_peek_offset_fwd(sk, size);
2444
2445		if (UNIXCB(skb).fp)
2446			unix_peek_fds(&scm, skb);
2447	}
2448	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2449
2450	scm_recv_unix(sock, msg, &scm, flags);
2451
2452out_free:
2453	skb_free_datagram(sk, skb);
2454	mutex_unlock(&u->iolock);
2455out:
2456	return err;
2457}
2458
2459static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2460			      int flags)
2461{
2462	struct sock *sk = sock->sk;
2463
2464#ifdef CONFIG_BPF_SYSCALL
2465	const struct proto *prot = READ_ONCE(sk->sk_prot);
2466
2467	if (prot != &unix_dgram_proto)
2468		return prot->recvmsg(sk, msg, size, flags, NULL);
2469#endif
2470	return __unix_dgram_recvmsg(sk, msg, size, flags);
2471}
2472
2473static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2474{
2475	struct unix_sock *u = unix_sk(sk);
2476	struct sk_buff *skb;
2477	int err;
2478
2479	mutex_lock(&u->iolock);
2480	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2481	mutex_unlock(&u->iolock);
2482	if (!skb)
2483		return err;
2484
2485	return recv_actor(sk, skb);
2486}
2487
2488/*
2489 *	Sleep until more data has arrived. But check for races..
2490 */
2491static long unix_stream_data_wait(struct sock *sk, long timeo,
2492				  struct sk_buff *last, unsigned int last_len,
2493				  bool freezable)
2494{
2495	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2496	struct sk_buff *tail;
2497	DEFINE_WAIT(wait);
2498
2499	unix_state_lock(sk);
2500
2501	for (;;) {
2502		prepare_to_wait(sk_sleep(sk), &wait, state);
2503
2504		tail = skb_peek_tail(&sk->sk_receive_queue);
2505		if (tail != last ||
2506		    (tail && tail->len != last_len) ||
2507		    sk->sk_err ||
2508		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2509		    signal_pending(current) ||
2510		    !timeo)
2511			break;
2512
2513		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2514		unix_state_unlock(sk);
2515		timeo = schedule_timeout(timeo);
2516		unix_state_lock(sk);
2517
2518		if (sock_flag(sk, SOCK_DEAD))
2519			break;
2520
2521		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2522	}
2523
2524	finish_wait(sk_sleep(sk), &wait);
2525	unix_state_unlock(sk);
2526	return timeo;
2527}
2528
2529static unsigned int unix_skb_len(const struct sk_buff *skb)
2530{
2531	return skb->len - UNIXCB(skb).consumed;
2532}
2533
2534struct unix_stream_read_state {
2535	int (*recv_actor)(struct sk_buff *, int, int,
2536			  struct unix_stream_read_state *);
2537	struct socket *socket;
2538	struct msghdr *msg;
2539	struct pipe_inode_info *pipe;
2540	size_t size;
2541	int flags;
2542	unsigned int splice_flags;
2543};
2544
2545#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2546static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2547{
2548	struct socket *sock = state->socket;
2549	struct sock *sk = sock->sk;
2550	struct unix_sock *u = unix_sk(sk);
2551	int chunk = 1;
2552	struct sk_buff *oob_skb;
2553
2554	mutex_lock(&u->iolock);
2555	unix_state_lock(sk);
 
2556
2557	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
 
2558		unix_state_unlock(sk);
2559		mutex_unlock(&u->iolock);
2560		return -EINVAL;
2561	}
2562
2563	oob_skb = u->oob_skb;
2564
2565	if (!(state->flags & MSG_PEEK))
2566		WRITE_ONCE(u->oob_skb, NULL);
2567	else
2568		skb_get(oob_skb);
2569	unix_state_unlock(sk);
2570
2571	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2572
2573	if (!(state->flags & MSG_PEEK))
2574		UNIXCB(oob_skb).consumed += 1;
2575
2576	consume_skb(oob_skb);
2577
2578	mutex_unlock(&u->iolock);
2579
2580	if (chunk < 0)
2581		return -EFAULT;
2582
2583	state->msg->msg_flags |= MSG_OOB;
2584	return 1;
2585}
2586
2587static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2588				  int flags, int copied)
2589{
 
2590	struct unix_sock *u = unix_sk(sk);
2591
2592	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2593		skb_unlink(skb, &sk->sk_receive_queue);
2594		consume_skb(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2595		skb = NULL;
2596	} else {
2597		if (skb == u->oob_skb) {
2598			if (copied) {
2599				skb = NULL;
2600			} else if (sock_flag(sk, SOCK_URGINLINE)) {
2601				if (!(flags & MSG_PEEK)) {
2602					WRITE_ONCE(u->oob_skb, NULL);
2603					consume_skb(skb);
2604				}
2605			} else if (!(flags & MSG_PEEK)) {
2606				skb_unlink(skb, &sk->sk_receive_queue);
2607				consume_skb(skb);
2608				skb = skb_peek(&sk->sk_receive_queue);
2609			}
2610		}
 
 
2611	}
 
 
 
 
 
 
 
2612	return skb;
2613}
2614#endif
2615
2616static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2617{
2618	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
 
 
 
 
2619		return -ENOTCONN;
2620
2621	return unix_read_skb(sk, recv_actor);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2622}
2623
2624static int unix_stream_read_generic(struct unix_stream_read_state *state,
2625				    bool freezable)
2626{
2627	struct scm_cookie scm;
2628	struct socket *sock = state->socket;
2629	struct sock *sk = sock->sk;
2630	struct unix_sock *u = unix_sk(sk);
2631	int copied = 0;
2632	int flags = state->flags;
2633	int noblock = flags & MSG_DONTWAIT;
2634	bool check_creds = false;
2635	int target;
2636	int err = 0;
2637	long timeo;
2638	int skip;
2639	size_t size = state->size;
2640	unsigned int last_len;
2641
2642	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2643		err = -EINVAL;
2644		goto out;
2645	}
2646
2647	if (unlikely(flags & MSG_OOB)) {
2648		err = -EOPNOTSUPP;
2649#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2650		err = unix_stream_recv_urg(state);
2651#endif
2652		goto out;
2653	}
2654
2655	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2656	timeo = sock_rcvtimeo(sk, noblock);
2657
2658	memset(&scm, 0, sizeof(scm));
2659
2660	/* Lock the socket to prevent queue disordering
2661	 * while sleeps in memcpy_tomsg
2662	 */
2663	mutex_lock(&u->iolock);
2664
2665	skip = max(sk_peek_offset(sk, flags), 0);
2666
2667	do {
2668		int chunk;
2669		bool drop_skb;
2670		struct sk_buff *skb, *last;
 
2671
2672redo:
2673		unix_state_lock(sk);
2674		if (sock_flag(sk, SOCK_DEAD)) {
2675			err = -ECONNRESET;
2676			goto unlock;
2677		}
2678		last = skb = skb_peek(&sk->sk_receive_queue);
2679		last_len = last ? last->len : 0;
2680
 
2681#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2682		if (skb) {
2683			skb = manage_oob(skb, sk, flags, copied);
2684			if (!skb) {
2685				unix_state_unlock(sk);
2686				if (copied)
2687					break;
2688				goto redo;
2689			}
2690		}
2691#endif
2692again:
2693		if (skb == NULL) {
2694			if (copied >= target)
2695				goto unlock;
2696
2697			/*
2698			 *	POSIX 1003.1g mandates this order.
2699			 */
2700
2701			err = sock_error(sk);
2702			if (err)
2703				goto unlock;
2704			if (sk->sk_shutdown & RCV_SHUTDOWN)
2705				goto unlock;
2706
2707			unix_state_unlock(sk);
2708			if (!timeo) {
2709				err = -EAGAIN;
2710				break;
2711			}
2712
2713			mutex_unlock(&u->iolock);
2714
2715			timeo = unix_stream_data_wait(sk, timeo, last,
2716						      last_len, freezable);
2717
2718			if (signal_pending(current)) {
2719				err = sock_intr_errno(timeo);
2720				scm_destroy(&scm);
2721				goto out;
2722			}
2723
2724			mutex_lock(&u->iolock);
2725			goto redo;
2726unlock:
2727			unix_state_unlock(sk);
2728			break;
2729		}
2730
2731		while (skip >= unix_skb_len(skb)) {
2732			skip -= unix_skb_len(skb);
2733			last = skb;
2734			last_len = skb->len;
2735			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2736			if (!skb)
2737				goto again;
2738		}
2739
2740		unix_state_unlock(sk);
2741
2742		if (check_creds) {
2743			/* Never glue messages from different writers */
2744			if (!unix_skb_scm_eq(skb, &scm))
2745				break;
2746		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2747			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2748			/* Copy credentials */
2749			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2750			unix_set_secdata(&scm, skb);
2751			check_creds = true;
2752		}
2753
2754		/* Copy address just once */
2755		if (state->msg && state->msg->msg_name) {
2756			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2757					 state->msg->msg_name);
2758			unix_copy_addr(state->msg, skb->sk);
2759
2760			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2761							      state->msg->msg_name,
2762							      &state->msg->msg_namelen);
2763
2764			sunaddr = NULL;
2765		}
2766
2767		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2768		skb_get(skb);
2769		chunk = state->recv_actor(skb, skip, chunk, state);
2770		drop_skb = !unix_skb_len(skb);
2771		/* skb is only safe to use if !drop_skb */
2772		consume_skb(skb);
2773		if (chunk < 0) {
2774			if (copied == 0)
2775				copied = -EFAULT;
2776			break;
2777		}
2778		copied += chunk;
2779		size -= chunk;
2780
2781		if (drop_skb) {
2782			/* the skb was touched by a concurrent reader;
2783			 * we should not expect anything from this skb
2784			 * anymore and assume it invalid - we can be
2785			 * sure it was dropped from the socket queue
2786			 *
2787			 * let's report a short read
2788			 */
2789			err = 0;
2790			break;
2791		}
2792
2793		/* Mark read part of skb as used */
2794		if (!(flags & MSG_PEEK)) {
2795			UNIXCB(skb).consumed += chunk;
2796
2797			sk_peek_offset_bwd(sk, chunk);
2798
2799			if (UNIXCB(skb).fp) {
2800				scm_stat_del(sk, skb);
2801				unix_detach_fds(&scm, skb);
2802			}
2803
2804			if (unix_skb_len(skb))
2805				break;
2806
2807			skb_unlink(skb, &sk->sk_receive_queue);
2808			consume_skb(skb);
2809
2810			if (scm.fp)
2811				break;
2812		} else {
2813			/* It is questionable, see note in unix_dgram_recvmsg.
2814			 */
2815			if (UNIXCB(skb).fp)
2816				unix_peek_fds(&scm, skb);
2817
2818			sk_peek_offset_fwd(sk, chunk);
2819
2820			if (UNIXCB(skb).fp)
2821				break;
2822
2823			skip = 0;
2824			last = skb;
2825			last_len = skb->len;
2826			unix_state_lock(sk);
2827			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2828			if (skb)
2829				goto again;
2830			unix_state_unlock(sk);
2831			break;
2832		}
2833	} while (size);
2834
2835	mutex_unlock(&u->iolock);
2836	if (state->msg)
2837		scm_recv_unix(sock, state->msg, &scm, flags);
2838	else
2839		scm_destroy(&scm);
2840out:
2841	return copied ? : err;
2842}
2843
2844static int unix_stream_read_actor(struct sk_buff *skb,
2845				  int skip, int chunk,
2846				  struct unix_stream_read_state *state)
2847{
2848	int ret;
2849
2850	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2851				    state->msg, chunk);
2852	return ret ?: chunk;
2853}
2854
2855int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2856			  size_t size, int flags)
2857{
2858	struct unix_stream_read_state state = {
2859		.recv_actor = unix_stream_read_actor,
2860		.socket = sk->sk_socket,
2861		.msg = msg,
2862		.size = size,
2863		.flags = flags
2864	};
2865
2866	return unix_stream_read_generic(&state, true);
2867}
2868
2869static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2870			       size_t size, int flags)
2871{
2872	struct unix_stream_read_state state = {
2873		.recv_actor = unix_stream_read_actor,
2874		.socket = sock,
2875		.msg = msg,
2876		.size = size,
2877		.flags = flags
2878	};
2879
2880#ifdef CONFIG_BPF_SYSCALL
2881	struct sock *sk = sock->sk;
2882	const struct proto *prot = READ_ONCE(sk->sk_prot);
2883
2884	if (prot != &unix_stream_proto)
2885		return prot->recvmsg(sk, msg, size, flags, NULL);
2886#endif
2887	return unix_stream_read_generic(&state, true);
2888}
2889
2890static int unix_stream_splice_actor(struct sk_buff *skb,
2891				    int skip, int chunk,
2892				    struct unix_stream_read_state *state)
2893{
2894	return skb_splice_bits(skb, state->socket->sk,
2895			       UNIXCB(skb).consumed + skip,
2896			       state->pipe, chunk, state->splice_flags);
2897}
2898
2899static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2900				       struct pipe_inode_info *pipe,
2901				       size_t size, unsigned int flags)
2902{
2903	struct unix_stream_read_state state = {
2904		.recv_actor = unix_stream_splice_actor,
2905		.socket = sock,
2906		.pipe = pipe,
2907		.size = size,
2908		.splice_flags = flags,
2909	};
2910
2911	if (unlikely(*ppos))
2912		return -ESPIPE;
2913
2914	if (sock->file->f_flags & O_NONBLOCK ||
2915	    flags & SPLICE_F_NONBLOCK)
2916		state.flags = MSG_DONTWAIT;
2917
2918	return unix_stream_read_generic(&state, false);
2919}
2920
2921static int unix_shutdown(struct socket *sock, int mode)
2922{
2923	struct sock *sk = sock->sk;
2924	struct sock *other;
2925
2926	if (mode < SHUT_RD || mode > SHUT_RDWR)
2927		return -EINVAL;
2928	/* This maps:
2929	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2930	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2931	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2932	 */
2933	++mode;
2934
2935	unix_state_lock(sk);
2936	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2937	other = unix_peer(sk);
2938	if (other)
2939		sock_hold(other);
2940	unix_state_unlock(sk);
2941	sk->sk_state_change(sk);
2942
2943	if (other &&
2944		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2945
2946		int peer_mode = 0;
2947		const struct proto *prot = READ_ONCE(other->sk_prot);
2948
2949		if (prot->unhash)
2950			prot->unhash(other);
2951		if (mode&RCV_SHUTDOWN)
2952			peer_mode |= SEND_SHUTDOWN;
2953		if (mode&SEND_SHUTDOWN)
2954			peer_mode |= RCV_SHUTDOWN;
2955		unix_state_lock(other);
2956		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2957		unix_state_unlock(other);
2958		other->sk_state_change(other);
2959		if (peer_mode == SHUTDOWN_MASK)
2960			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2961		else if (peer_mode & RCV_SHUTDOWN)
2962			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2963	}
2964	if (other)
2965		sock_put(other);
2966
2967	return 0;
2968}
2969
2970long unix_inq_len(struct sock *sk)
2971{
2972	struct sk_buff *skb;
2973	long amount = 0;
2974
2975	if (sk->sk_state == TCP_LISTEN)
2976		return -EINVAL;
2977
2978	spin_lock(&sk->sk_receive_queue.lock);
2979	if (sk->sk_type == SOCK_STREAM ||
2980	    sk->sk_type == SOCK_SEQPACKET) {
2981		skb_queue_walk(&sk->sk_receive_queue, skb)
2982			amount += unix_skb_len(skb);
2983	} else {
2984		skb = skb_peek(&sk->sk_receive_queue);
2985		if (skb)
2986			amount = skb->len;
2987	}
2988	spin_unlock(&sk->sk_receive_queue.lock);
2989
2990	return amount;
2991}
2992EXPORT_SYMBOL_GPL(unix_inq_len);
2993
2994long unix_outq_len(struct sock *sk)
2995{
2996	return sk_wmem_alloc_get(sk);
2997}
2998EXPORT_SYMBOL_GPL(unix_outq_len);
2999
3000static int unix_open_file(struct sock *sk)
3001{
3002	struct path path;
3003	struct file *f;
3004	int fd;
3005
3006	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3007		return -EPERM;
3008
3009	if (!smp_load_acquire(&unix_sk(sk)->addr))
3010		return -ENOENT;
3011
3012	path = unix_sk(sk)->path;
3013	if (!path.dentry)
3014		return -ENOENT;
3015
3016	path_get(&path);
3017
3018	fd = get_unused_fd_flags(O_CLOEXEC);
3019	if (fd < 0)
3020		goto out;
3021
3022	f = dentry_open(&path, O_PATH, current_cred());
3023	if (IS_ERR(f)) {
3024		put_unused_fd(fd);
3025		fd = PTR_ERR(f);
3026		goto out;
3027	}
3028
3029	fd_install(fd, f);
3030out:
3031	path_put(&path);
3032
3033	return fd;
3034}
3035
3036static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3037{
3038	struct sock *sk = sock->sk;
3039	long amount = 0;
3040	int err;
3041
3042	switch (cmd) {
3043	case SIOCOUTQ:
3044		amount = unix_outq_len(sk);
3045		err = put_user(amount, (int __user *)arg);
3046		break;
3047	case SIOCINQ:
3048		amount = unix_inq_len(sk);
3049		if (amount < 0)
3050			err = amount;
3051		else
3052			err = put_user(amount, (int __user *)arg);
3053		break;
3054	case SIOCUNIXFILE:
3055		err = unix_open_file(sk);
3056		break;
3057#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3058	case SIOCATMARK:
3059		{
 
3060			struct sk_buff *skb;
3061			int answ = 0;
3062
 
 
3063			skb = skb_peek(&sk->sk_receive_queue);
3064			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3065				answ = 1;
 
 
 
 
 
 
 
 
 
 
 
 
3066			err = put_user(answ, (int __user *)arg);
3067		}
3068		break;
3069#endif
3070	default:
3071		err = -ENOIOCTLCMD;
3072		break;
3073	}
3074	return err;
3075}
3076
3077#ifdef CONFIG_COMPAT
3078static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3079{
3080	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3081}
3082#endif
3083
3084static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3085{
3086	struct sock *sk = sock->sk;
 
3087	__poll_t mask;
3088	u8 shutdown;
3089
3090	sock_poll_wait(file, sock, wait);
3091	mask = 0;
3092	shutdown = READ_ONCE(sk->sk_shutdown);
 
3093
3094	/* exceptional events? */
3095	if (READ_ONCE(sk->sk_err))
3096		mask |= EPOLLERR;
3097	if (shutdown == SHUTDOWN_MASK)
3098		mask |= EPOLLHUP;
3099	if (shutdown & RCV_SHUTDOWN)
3100		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3101
3102	/* readable? */
3103	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3104		mask |= EPOLLIN | EPOLLRDNORM;
3105	if (sk_is_readable(sk))
3106		mask |= EPOLLIN | EPOLLRDNORM;
3107#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3108	if (READ_ONCE(unix_sk(sk)->oob_skb))
3109		mask |= EPOLLPRI;
3110#endif
3111
3112	/* Connection-based need to check for termination and startup */
3113	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3114	    sk->sk_state == TCP_CLOSE)
3115		mask |= EPOLLHUP;
3116
3117	/*
3118	 * we set writable also when the other side has shut down the
3119	 * connection. This prevents stuck sockets.
3120	 */
3121	if (unix_writable(sk))
3122		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3123
3124	return mask;
3125}
3126
3127static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3128				    poll_table *wait)
3129{
3130	struct sock *sk = sock->sk, *other;
3131	unsigned int writable;
 
3132	__poll_t mask;
3133	u8 shutdown;
3134
3135	sock_poll_wait(file, sock, wait);
3136	mask = 0;
3137	shutdown = READ_ONCE(sk->sk_shutdown);
 
3138
3139	/* exceptional events? */
3140	if (READ_ONCE(sk->sk_err) ||
3141	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3142		mask |= EPOLLERR |
3143			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3144
3145	if (shutdown & RCV_SHUTDOWN)
3146		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3147	if (shutdown == SHUTDOWN_MASK)
3148		mask |= EPOLLHUP;
3149
3150	/* readable? */
3151	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3152		mask |= EPOLLIN | EPOLLRDNORM;
3153	if (sk_is_readable(sk))
3154		mask |= EPOLLIN | EPOLLRDNORM;
3155
3156	/* Connection-based need to check for termination and startup */
3157	if (sk->sk_type == SOCK_SEQPACKET) {
3158		if (sk->sk_state == TCP_CLOSE)
3159			mask |= EPOLLHUP;
3160		/* connection hasn't started yet? */
3161		if (sk->sk_state == TCP_SYN_SENT)
3162			return mask;
3163	}
3164
3165	/* No write status requested, avoid expensive OUT tests. */
3166	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3167		return mask;
3168
3169	writable = unix_writable(sk);
3170	if (writable) {
3171		unix_state_lock(sk);
3172
3173		other = unix_peer(sk);
3174		if (other && unix_peer(other) != sk &&
3175		    unix_recvq_full_lockless(other) &&
3176		    unix_dgram_peer_wake_me(sk, other))
3177			writable = 0;
3178
3179		unix_state_unlock(sk);
3180	}
3181
3182	if (writable)
3183		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3184	else
3185		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3186
3187	return mask;
3188}
3189
3190#ifdef CONFIG_PROC_FS
3191
3192#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3193
3194#define get_bucket(x) ((x) >> BUCKET_SPACE)
3195#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3196#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3197
3198static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3199{
3200	unsigned long offset = get_offset(*pos);
3201	unsigned long bucket = get_bucket(*pos);
3202	unsigned long count = 0;
3203	struct sock *sk;
3204
3205	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3206	     sk; sk = sk_next(sk)) {
3207		if (++count == offset)
3208			break;
3209	}
3210
3211	return sk;
3212}
3213
3214static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3215{
3216	unsigned long bucket = get_bucket(*pos);
3217	struct net *net = seq_file_net(seq);
3218	struct sock *sk;
3219
3220	while (bucket < UNIX_HASH_SIZE) {
3221		spin_lock(&net->unx.table.locks[bucket]);
3222
3223		sk = unix_from_bucket(seq, pos);
3224		if (sk)
3225			return sk;
3226
3227		spin_unlock(&net->unx.table.locks[bucket]);
3228
3229		*pos = set_bucket_offset(++bucket, 1);
3230	}
3231
3232	return NULL;
3233}
3234
3235static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3236				  loff_t *pos)
3237{
3238	unsigned long bucket = get_bucket(*pos);
3239
3240	sk = sk_next(sk);
3241	if (sk)
3242		return sk;
3243
3244
3245	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3246
3247	*pos = set_bucket_offset(++bucket, 1);
3248
3249	return unix_get_first(seq, pos);
3250}
3251
3252static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3253{
3254	if (!*pos)
3255		return SEQ_START_TOKEN;
3256
3257	return unix_get_first(seq, pos);
3258}
3259
3260static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3261{
3262	++*pos;
3263
3264	if (v == SEQ_START_TOKEN)
3265		return unix_get_first(seq, pos);
3266
3267	return unix_get_next(seq, v, pos);
3268}
3269
3270static void unix_seq_stop(struct seq_file *seq, void *v)
3271{
3272	struct sock *sk = v;
3273
3274	if (sk)
3275		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3276}
3277
3278static int unix_seq_show(struct seq_file *seq, void *v)
3279{
3280
3281	if (v == SEQ_START_TOKEN)
3282		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3283			 "Inode Path\n");
3284	else {
3285		struct sock *s = v;
3286		struct unix_sock *u = unix_sk(s);
3287		unix_state_lock(s);
3288
3289		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3290			s,
3291			refcount_read(&s->sk_refcnt),
3292			0,
3293			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3294			s->sk_type,
3295			s->sk_socket ?
3296			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3297			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3298			sock_i_ino(s));
3299
3300		if (u->addr) {	// under a hash table lock here
3301			int i, len;
3302			seq_putc(seq, ' ');
3303
3304			i = 0;
3305			len = u->addr->len -
3306				offsetof(struct sockaddr_un, sun_path);
3307			if (u->addr->name->sun_path[0]) {
3308				len--;
3309			} else {
3310				seq_putc(seq, '@');
3311				i++;
3312			}
3313			for ( ; i < len; i++)
3314				seq_putc(seq, u->addr->name->sun_path[i] ?:
3315					 '@');
3316		}
3317		unix_state_unlock(s);
3318		seq_putc(seq, '\n');
3319	}
3320
3321	return 0;
3322}
3323
3324static const struct seq_operations unix_seq_ops = {
3325	.start  = unix_seq_start,
3326	.next   = unix_seq_next,
3327	.stop   = unix_seq_stop,
3328	.show   = unix_seq_show,
3329};
3330
3331#ifdef CONFIG_BPF_SYSCALL
3332struct bpf_unix_iter_state {
3333	struct seq_net_private p;
3334	unsigned int cur_sk;
3335	unsigned int end_sk;
3336	unsigned int max_sk;
3337	struct sock **batch;
3338	bool st_bucket_done;
3339};
3340
3341struct bpf_iter__unix {
3342	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3343	__bpf_md_ptr(struct unix_sock *, unix_sk);
3344	uid_t uid __aligned(8);
3345};
3346
3347static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3348			      struct unix_sock *unix_sk, uid_t uid)
3349{
3350	struct bpf_iter__unix ctx;
3351
3352	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3353	ctx.meta = meta;
3354	ctx.unix_sk = unix_sk;
3355	ctx.uid = uid;
3356	return bpf_iter_run_prog(prog, &ctx);
3357}
3358
3359static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3360
3361{
3362	struct bpf_unix_iter_state *iter = seq->private;
3363	unsigned int expected = 1;
3364	struct sock *sk;
3365
3366	sock_hold(start_sk);
3367	iter->batch[iter->end_sk++] = start_sk;
3368
3369	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3370		if (iter->end_sk < iter->max_sk) {
3371			sock_hold(sk);
3372			iter->batch[iter->end_sk++] = sk;
3373		}
3374
3375		expected++;
3376	}
3377
3378	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3379
3380	return expected;
3381}
3382
3383static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3384{
3385	while (iter->cur_sk < iter->end_sk)
3386		sock_put(iter->batch[iter->cur_sk++]);
3387}
3388
3389static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3390				       unsigned int new_batch_sz)
3391{
3392	struct sock **new_batch;
3393
3394	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3395			     GFP_USER | __GFP_NOWARN);
3396	if (!new_batch)
3397		return -ENOMEM;
3398
3399	bpf_iter_unix_put_batch(iter);
3400	kvfree(iter->batch);
3401	iter->batch = new_batch;
3402	iter->max_sk = new_batch_sz;
3403
3404	return 0;
3405}
3406
3407static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3408					loff_t *pos)
3409{
3410	struct bpf_unix_iter_state *iter = seq->private;
3411	unsigned int expected;
3412	bool resized = false;
3413	struct sock *sk;
3414
3415	if (iter->st_bucket_done)
3416		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3417
3418again:
3419	/* Get a new batch */
3420	iter->cur_sk = 0;
3421	iter->end_sk = 0;
3422
3423	sk = unix_get_first(seq, pos);
3424	if (!sk)
3425		return NULL; /* Done */
3426
3427	expected = bpf_iter_unix_hold_batch(seq, sk);
3428
3429	if (iter->end_sk == expected) {
3430		iter->st_bucket_done = true;
3431		return sk;
3432	}
3433
3434	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3435		resized = true;
3436		goto again;
3437	}
3438
3439	return sk;
3440}
3441
3442static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3443{
3444	if (!*pos)
3445		return SEQ_START_TOKEN;
3446
3447	/* bpf iter does not support lseek, so it always
3448	 * continue from where it was stop()-ped.
3449	 */
3450	return bpf_iter_unix_batch(seq, pos);
3451}
3452
3453static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3454{
3455	struct bpf_unix_iter_state *iter = seq->private;
3456	struct sock *sk;
3457
3458	/* Whenever seq_next() is called, the iter->cur_sk is
3459	 * done with seq_show(), so advance to the next sk in
3460	 * the batch.
3461	 */
3462	if (iter->cur_sk < iter->end_sk)
3463		sock_put(iter->batch[iter->cur_sk++]);
3464
3465	++*pos;
3466
3467	if (iter->cur_sk < iter->end_sk)
3468		sk = iter->batch[iter->cur_sk];
3469	else
3470		sk = bpf_iter_unix_batch(seq, pos);
3471
3472	return sk;
3473}
3474
3475static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3476{
3477	struct bpf_iter_meta meta;
3478	struct bpf_prog *prog;
3479	struct sock *sk = v;
3480	uid_t uid;
3481	bool slow;
3482	int ret;
3483
3484	if (v == SEQ_START_TOKEN)
3485		return 0;
3486
3487	slow = lock_sock_fast(sk);
3488
3489	if (unlikely(sk_unhashed(sk))) {
3490		ret = SEQ_SKIP;
3491		goto unlock;
3492	}
3493
3494	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3495	meta.seq = seq;
3496	prog = bpf_iter_get_info(&meta, false);
3497	ret = unix_prog_seq_show(prog, &meta, v, uid);
3498unlock:
3499	unlock_sock_fast(sk, slow);
3500	return ret;
3501}
3502
3503static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3504{
3505	struct bpf_unix_iter_state *iter = seq->private;
3506	struct bpf_iter_meta meta;
3507	struct bpf_prog *prog;
3508
3509	if (!v) {
3510		meta.seq = seq;
3511		prog = bpf_iter_get_info(&meta, true);
3512		if (prog)
3513			(void)unix_prog_seq_show(prog, &meta, v, 0);
3514	}
3515
3516	if (iter->cur_sk < iter->end_sk)
3517		bpf_iter_unix_put_batch(iter);
3518}
3519
3520static const struct seq_operations bpf_iter_unix_seq_ops = {
3521	.start	= bpf_iter_unix_seq_start,
3522	.next	= bpf_iter_unix_seq_next,
3523	.stop	= bpf_iter_unix_seq_stop,
3524	.show	= bpf_iter_unix_seq_show,
3525};
3526#endif
3527#endif
3528
3529static const struct net_proto_family unix_family_ops = {
3530	.family = PF_UNIX,
3531	.create = unix_create,
3532	.owner	= THIS_MODULE,
3533};
3534
3535
3536static int __net_init unix_net_init(struct net *net)
3537{
3538	int i;
3539
3540	net->unx.sysctl_max_dgram_qlen = 10;
3541	if (unix_sysctl_register(net))
3542		goto out;
3543
3544#ifdef CONFIG_PROC_FS
3545	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3546			     sizeof(struct seq_net_private)))
3547		goto err_sysctl;
3548#endif
3549
3550	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3551					      sizeof(spinlock_t), GFP_KERNEL);
3552	if (!net->unx.table.locks)
3553		goto err_proc;
3554
3555	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3556						sizeof(struct hlist_head),
3557						GFP_KERNEL);
3558	if (!net->unx.table.buckets)
3559		goto free_locks;
3560
3561	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3562		spin_lock_init(&net->unx.table.locks[i]);
 
3563		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3564	}
3565
3566	return 0;
3567
3568free_locks:
3569	kvfree(net->unx.table.locks);
3570err_proc:
3571#ifdef CONFIG_PROC_FS
3572	remove_proc_entry("unix", net->proc_net);
3573err_sysctl:
3574#endif
3575	unix_sysctl_unregister(net);
3576out:
3577	return -ENOMEM;
3578}
3579
3580static void __net_exit unix_net_exit(struct net *net)
3581{
3582	kvfree(net->unx.table.buckets);
3583	kvfree(net->unx.table.locks);
3584	unix_sysctl_unregister(net);
3585	remove_proc_entry("unix", net->proc_net);
3586}
3587
3588static struct pernet_operations unix_net_ops = {
3589	.init = unix_net_init,
3590	.exit = unix_net_exit,
3591};
3592
3593#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3594DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3595		     struct unix_sock *unix_sk, uid_t uid)
3596
3597#define INIT_BATCH_SZ 16
3598
3599static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3600{
3601	struct bpf_unix_iter_state *iter = priv_data;
3602	int err;
3603
3604	err = bpf_iter_init_seq_net(priv_data, aux);
3605	if (err)
3606		return err;
3607
3608	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3609	if (err) {
3610		bpf_iter_fini_seq_net(priv_data);
3611		return err;
3612	}
3613
3614	return 0;
3615}
3616
3617static void bpf_iter_fini_unix(void *priv_data)
3618{
3619	struct bpf_unix_iter_state *iter = priv_data;
3620
3621	bpf_iter_fini_seq_net(priv_data);
3622	kvfree(iter->batch);
3623}
3624
3625static const struct bpf_iter_seq_info unix_seq_info = {
3626	.seq_ops		= &bpf_iter_unix_seq_ops,
3627	.init_seq_private	= bpf_iter_init_unix,
3628	.fini_seq_private	= bpf_iter_fini_unix,
3629	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3630};
3631
3632static const struct bpf_func_proto *
3633bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3634			     const struct bpf_prog *prog)
3635{
3636	switch (func_id) {
3637	case BPF_FUNC_setsockopt:
3638		return &bpf_sk_setsockopt_proto;
3639	case BPF_FUNC_getsockopt:
3640		return &bpf_sk_getsockopt_proto;
3641	default:
3642		return NULL;
3643	}
3644}
3645
3646static struct bpf_iter_reg unix_reg_info = {
3647	.target			= "unix",
3648	.ctx_arg_info_size	= 1,
3649	.ctx_arg_info		= {
3650		{ offsetof(struct bpf_iter__unix, unix_sk),
3651		  PTR_TO_BTF_ID_OR_NULL },
3652	},
3653	.get_func_proto         = bpf_iter_unix_get_func_proto,
3654	.seq_info		= &unix_seq_info,
3655};
3656
3657static void __init bpf_iter_register(void)
3658{
3659	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3660	if (bpf_iter_reg_target(&unix_reg_info))
3661		pr_warn("Warning: could not register bpf iterator unix\n");
3662}
3663#endif
3664
3665static int __init af_unix_init(void)
3666{
3667	int i, rc = -1;
3668
3669	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3670
3671	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3672		spin_lock_init(&bsd_socket_locks[i]);
3673		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3674	}
3675
3676	rc = proto_register(&unix_dgram_proto, 1);
3677	if (rc != 0) {
3678		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3679		goto out;
3680	}
3681
3682	rc = proto_register(&unix_stream_proto, 1);
3683	if (rc != 0) {
3684		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3685		proto_unregister(&unix_dgram_proto);
3686		goto out;
3687	}
3688
3689	sock_register(&unix_family_ops);
3690	register_pernet_subsys(&unix_net_ops);
3691	unix_bpf_build_proto();
3692
3693#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3694	bpf_iter_register();
3695#endif
3696
3697out:
3698	return rc;
3699}
3700
3701/* Later than subsys_initcall() because we depend on stuff initialised there */
3702fs_initcall(af_unix_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/filter.h>
  93#include <linux/termios.h>
  94#include <linux/sockios.h>
  95#include <linux/net.h>
  96#include <linux/in.h>
  97#include <linux/fs.h>
  98#include <linux/slab.h>
  99#include <linux/uaccess.h>
 100#include <linux/skbuff.h>
 101#include <linux/netdevice.h>
 102#include <net/net_namespace.h>
 103#include <net/sock.h>
 104#include <net/tcp_states.h>
 105#include <net/af_unix.h>
 106#include <linux/proc_fs.h>
 107#include <linux/seq_file.h>
 108#include <net/scm.h>
 109#include <linux/init.h>
 110#include <linux/poll.h>
 111#include <linux/rtnetlink.h>
 112#include <linux/mount.h>
 113#include <net/checksum.h>
 114#include <linux/security.h>
 115#include <linux/splice.h>
 116#include <linux/freezer.h>
 117#include <linux/file.h>
 118#include <linux/btf_ids.h>
 119#include <linux/bpf-cgroup.h>
 120
 
 
 121static atomic_long_t unix_nr_socks;
 122static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
 123static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
 124
 125/* SMP locking strategy:
 126 *    hash table is protected with spinlock.
 127 *    each socket state is protected by separate spinlock.
 128 */
 129#ifdef CONFIG_PROVE_LOCKING
 130#define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
 131
 132static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
 133				  const struct lockdep_map *b)
 134{
 135	return cmp_ptr(a, b);
 136}
 137
 138static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
 139				  const struct lockdep_map *_b)
 140{
 141	const struct unix_sock *a, *b;
 142
 143	a = container_of(_a, struct unix_sock, lock.dep_map);
 144	b = container_of(_b, struct unix_sock, lock.dep_map);
 145
 146	if (a->sk.sk_state == TCP_LISTEN) {
 147		/* unix_stream_connect(): Before the 2nd unix_state_lock(),
 148		 *
 149		 *   1. a is TCP_LISTEN.
 150		 *   2. b is not a.
 151		 *   3. concurrent connect(b -> a) must fail.
 152		 *
 153		 * Except for 2. & 3., the b's state can be any possible
 154		 * value due to concurrent connect() or listen().
 155		 *
 156		 * 2. is detected in debug_spin_lock_before(), and 3. cannot
 157		 * be expressed as lock_cmp_fn.
 158		 */
 159		switch (b->sk.sk_state) {
 160		case TCP_CLOSE:
 161		case TCP_ESTABLISHED:
 162		case TCP_LISTEN:
 163			return -1;
 164		default:
 165			/* Invalid case. */
 166			return 0;
 167		}
 168	}
 169
 170	/* Should never happen.  Just to be symmetric. */
 171	if (b->sk.sk_state == TCP_LISTEN) {
 172		switch (b->sk.sk_state) {
 173		case TCP_CLOSE:
 174		case TCP_ESTABLISHED:
 175			return 1;
 176		default:
 177			return 0;
 178		}
 179	}
 180
 181	/* unix_state_double_lock(): ascending address order. */
 182	return cmp_ptr(a, b);
 183}
 184
 185static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
 186				  const struct lockdep_map *_b)
 187{
 188	const struct sock *a, *b;
 189
 190	a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
 191	b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
 192
 193	/* unix_collect_skb(): listener -> embryo order. */
 194	if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
 195		return -1;
 196
 197	/* Should never happen.  Just to be symmetric. */
 198	if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
 199		return 1;
 200
 201	return 0;
 202}
 203#endif
 204
 205static unsigned int unix_unbound_hash(struct sock *sk)
 206{
 207	unsigned long hash = (unsigned long)sk;
 208
 209	hash ^= hash >> 16;
 210	hash ^= hash >> 8;
 211	hash ^= sk->sk_type;
 212
 213	return hash & UNIX_HASH_MOD;
 214}
 215
 216static unsigned int unix_bsd_hash(struct inode *i)
 217{
 218	return i->i_ino & UNIX_HASH_MOD;
 219}
 220
 221static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
 222				       int addr_len, int type)
 223{
 224	__wsum csum = csum_partial(sunaddr, addr_len, 0);
 225	unsigned int hash;
 226
 227	hash = (__force unsigned int)csum_fold(csum);
 228	hash ^= hash >> 8;
 229	hash ^= type;
 230
 231	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
 232}
 233
 234static void unix_table_double_lock(struct net *net,
 235				   unsigned int hash1, unsigned int hash2)
 236{
 237	if (hash1 == hash2) {
 238		spin_lock(&net->unx.table.locks[hash1]);
 239		return;
 240	}
 241
 242	if (hash1 > hash2)
 243		swap(hash1, hash2);
 244
 245	spin_lock(&net->unx.table.locks[hash1]);
 246	spin_lock(&net->unx.table.locks[hash2]);
 247}
 248
 249static void unix_table_double_unlock(struct net *net,
 250				     unsigned int hash1, unsigned int hash2)
 251{
 252	if (hash1 == hash2) {
 253		spin_unlock(&net->unx.table.locks[hash1]);
 254		return;
 255	}
 256
 257	spin_unlock(&net->unx.table.locks[hash1]);
 258	spin_unlock(&net->unx.table.locks[hash2]);
 259}
 260
 261#ifdef CONFIG_SECURITY_NETWORK
 262static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 263{
 264	UNIXCB(skb).secid = scm->secid;
 265}
 266
 267static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 268{
 269	scm->secid = UNIXCB(skb).secid;
 270}
 271
 272static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 273{
 274	return (scm->secid == UNIXCB(skb).secid);
 275}
 276#else
 277static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 278{ }
 279
 280static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 281{ }
 282
 283static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 284{
 285	return true;
 286}
 287#endif /* CONFIG_SECURITY_NETWORK */
 288
 289static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 290{
 291	return unix_peer(osk) == sk;
 292}
 293
 294static inline int unix_may_send(struct sock *sk, struct sock *osk)
 295{
 296	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 297}
 298
 
 
 
 
 
 299static inline int unix_recvq_full_lockless(const struct sock *sk)
 300{
 301	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 
 302}
 303
 304struct sock *unix_peer_get(struct sock *s)
 305{
 306	struct sock *peer;
 307
 308	unix_state_lock(s);
 309	peer = unix_peer(s);
 310	if (peer)
 311		sock_hold(peer);
 312	unix_state_unlock(s);
 313	return peer;
 314}
 315EXPORT_SYMBOL_GPL(unix_peer_get);
 316
 317static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
 318					     int addr_len)
 319{
 320	struct unix_address *addr;
 321
 322	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
 323	if (!addr)
 324		return NULL;
 325
 326	refcount_set(&addr->refcnt, 1);
 327	addr->len = addr_len;
 328	memcpy(addr->name, sunaddr, addr_len);
 329
 330	return addr;
 331}
 332
 333static inline void unix_release_addr(struct unix_address *addr)
 334{
 335	if (refcount_dec_and_test(&addr->refcnt))
 336		kfree(addr);
 337}
 338
 339/*
 340 *	Check unix socket name:
 341 *		- should be not zero length.
 342 *	        - if started by not zero, should be NULL terminated (FS object)
 343 *		- if started by zero, it is abstract name.
 344 */
 345
 346static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
 347{
 348	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
 349	    addr_len > sizeof(*sunaddr))
 350		return -EINVAL;
 351
 352	if (sunaddr->sun_family != AF_UNIX)
 353		return -EINVAL;
 354
 355	return 0;
 356}
 357
 358static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
 359{
 360	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
 361	short offset = offsetof(struct sockaddr_storage, __data);
 362
 363	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
 364
 365	/* This may look like an off by one error but it is a bit more
 366	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
 367	 * sun_path[108] doesn't as such exist.  However in kernel space
 368	 * we are guaranteed that it is a valid memory location in our
 369	 * kernel address buffer because syscall functions always pass
 370	 * a pointer of struct sockaddr_storage which has a bigger buffer
 371	 * than 108.  Also, we must terminate sun_path for strlen() in
 372	 * getname_kernel().
 373	 */
 374	addr->__data[addr_len - offset] = 0;
 375
 376	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
 377	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
 378	 * know the actual buffer.
 379	 */
 380	return strlen(addr->__data) + offset + 1;
 381}
 382
 383static void __unix_remove_socket(struct sock *sk)
 384{
 385	sk_del_node_init(sk);
 386}
 387
 388static void __unix_insert_socket(struct net *net, struct sock *sk)
 389{
 390	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 391	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
 392}
 393
 394static void __unix_set_addr_hash(struct net *net, struct sock *sk,
 395				 struct unix_address *addr, unsigned int hash)
 396{
 397	__unix_remove_socket(sk);
 398	smp_store_release(&unix_sk(sk)->addr, addr);
 399
 400	sk->sk_hash = hash;
 401	__unix_insert_socket(net, sk);
 402}
 403
 404static void unix_remove_socket(struct net *net, struct sock *sk)
 405{
 406	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 407	__unix_remove_socket(sk);
 408	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 409}
 410
 411static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
 412{
 413	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 414	__unix_insert_socket(net, sk);
 415	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 416}
 417
 418static void unix_insert_bsd_socket(struct sock *sk)
 419{
 420	spin_lock(&bsd_socket_locks[sk->sk_hash]);
 421	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
 422	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 423}
 424
 425static void unix_remove_bsd_socket(struct sock *sk)
 426{
 427	if (!hlist_unhashed(&sk->sk_bind_node)) {
 428		spin_lock(&bsd_socket_locks[sk->sk_hash]);
 429		__sk_del_bind_node(sk);
 430		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 431
 432		sk_node_init(&sk->sk_bind_node);
 433	}
 434}
 435
 436static struct sock *__unix_find_socket_byname(struct net *net,
 437					      struct sockaddr_un *sunname,
 438					      int len, unsigned int hash)
 439{
 440	struct sock *s;
 441
 442	sk_for_each(s, &net->unx.table.buckets[hash]) {
 443		struct unix_sock *u = unix_sk(s);
 444
 445		if (u->addr->len == len &&
 446		    !memcmp(u->addr->name, sunname, len))
 447			return s;
 448	}
 449	return NULL;
 450}
 451
 452static inline struct sock *unix_find_socket_byname(struct net *net,
 453						   struct sockaddr_un *sunname,
 454						   int len, unsigned int hash)
 455{
 456	struct sock *s;
 457
 458	spin_lock(&net->unx.table.locks[hash]);
 459	s = __unix_find_socket_byname(net, sunname, len, hash);
 460	if (s)
 461		sock_hold(s);
 462	spin_unlock(&net->unx.table.locks[hash]);
 463	return s;
 464}
 465
 466static struct sock *unix_find_socket_byinode(struct inode *i)
 467{
 468	unsigned int hash = unix_bsd_hash(i);
 469	struct sock *s;
 470
 471	spin_lock(&bsd_socket_locks[hash]);
 472	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
 473		struct dentry *dentry = unix_sk(s)->path.dentry;
 474
 475		if (dentry && d_backing_inode(dentry) == i) {
 476			sock_hold(s);
 477			spin_unlock(&bsd_socket_locks[hash]);
 478			return s;
 479		}
 480	}
 481	spin_unlock(&bsd_socket_locks[hash]);
 482	return NULL;
 483}
 484
 485/* Support code for asymmetrically connected dgram sockets
 486 *
 487 * If a datagram socket is connected to a socket not itself connected
 488 * to the first socket (eg, /dev/log), clients may only enqueue more
 489 * messages if the present receive queue of the server socket is not
 490 * "too large". This means there's a second writeability condition
 491 * poll and sendmsg need to test. The dgram recv code will do a wake
 492 * up on the peer_wait wait queue of a socket upon reception of a
 493 * datagram which needs to be propagated to sleeping would-be writers
 494 * since these might not have sent anything so far. This can't be
 495 * accomplished via poll_wait because the lifetime of the server
 496 * socket might be less than that of its clients if these break their
 497 * association with it or if the server socket is closed while clients
 498 * are still connected to it and there's no way to inform "a polling
 499 * implementation" that it should let go of a certain wait queue
 500 *
 501 * In order to propagate a wake up, a wait_queue_entry_t of the client
 502 * socket is enqueued on the peer_wait queue of the server socket
 503 * whose wake function does a wake_up on the ordinary client socket
 504 * wait queue. This connection is established whenever a write (or
 505 * poll for write) hit the flow control condition and broken when the
 506 * association to the server socket is dissolved or after a wake up
 507 * was relayed.
 508 */
 509
 510static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 511				      void *key)
 512{
 513	struct unix_sock *u;
 514	wait_queue_head_t *u_sleep;
 515
 516	u = container_of(q, struct unix_sock, peer_wake);
 517
 518	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 519			    q);
 520	u->peer_wake.private = NULL;
 521
 522	/* relaying can only happen while the wq still exists */
 523	u_sleep = sk_sleep(&u->sk);
 524	if (u_sleep)
 525		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 526
 527	return 0;
 528}
 529
 530static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 531{
 532	struct unix_sock *u, *u_other;
 533	int rc;
 534
 535	u = unix_sk(sk);
 536	u_other = unix_sk(other);
 537	rc = 0;
 538	spin_lock(&u_other->peer_wait.lock);
 539
 540	if (!u->peer_wake.private) {
 541		u->peer_wake.private = other;
 542		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 543
 544		rc = 1;
 545	}
 546
 547	spin_unlock(&u_other->peer_wait.lock);
 548	return rc;
 549}
 550
 551static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 552					    struct sock *other)
 553{
 554	struct unix_sock *u, *u_other;
 555
 556	u = unix_sk(sk);
 557	u_other = unix_sk(other);
 558	spin_lock(&u_other->peer_wait.lock);
 559
 560	if (u->peer_wake.private == other) {
 561		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 562		u->peer_wake.private = NULL;
 563	}
 564
 565	spin_unlock(&u_other->peer_wait.lock);
 566}
 567
 568static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 569						   struct sock *other)
 570{
 571	unix_dgram_peer_wake_disconnect(sk, other);
 572	wake_up_interruptible_poll(sk_sleep(sk),
 573				   EPOLLOUT |
 574				   EPOLLWRNORM |
 575				   EPOLLWRBAND);
 576}
 577
 578/* preconditions:
 579 *	- unix_peer(sk) == other
 580 *	- association is stable
 581 */
 582static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 583{
 584	int connected;
 585
 586	connected = unix_dgram_peer_wake_connect(sk, other);
 587
 588	/* If other is SOCK_DEAD, we want to make sure we signal
 589	 * POLLOUT, such that a subsequent write() can get a
 590	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 591	 * to other and its full, we will hang waiting for POLLOUT.
 592	 */
 593	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
 594		return 1;
 595
 596	if (connected)
 597		unix_dgram_peer_wake_disconnect(sk, other);
 598
 599	return 0;
 600}
 601
 602static int unix_writable(const struct sock *sk, unsigned char state)
 603{
 604	return state != TCP_LISTEN &&
 605		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
 606}
 607
 608static void unix_write_space(struct sock *sk)
 609{
 610	struct socket_wq *wq;
 611
 612	rcu_read_lock();
 613	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
 614		wq = rcu_dereference(sk->sk_wq);
 615		if (skwq_has_sleeper(wq))
 616			wake_up_interruptible_sync_poll(&wq->wait,
 617				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 618		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
 619	}
 620	rcu_read_unlock();
 621}
 622
 623/* When dgram socket disconnects (or changes its peer), we clear its receive
 624 * queue of packets arrived from previous peer. First, it allows to do
 625 * flow control based only on wmem_alloc; second, sk connected to peer
 626 * may receive messages only from that peer. */
 627static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 628{
 629	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 630		skb_queue_purge(&sk->sk_receive_queue);
 631		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 632
 633		/* If one link of bidirectional dgram pipe is disconnected,
 634		 * we signal error. Messages are lost. Do not make this,
 635		 * when peer was not connected to us.
 636		 */
 637		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 638			WRITE_ONCE(other->sk_err, ECONNRESET);
 639			sk_error_report(other);
 640		}
 641	}
 
 642}
 643
 644static void unix_sock_destructor(struct sock *sk)
 645{
 646	struct unix_sock *u = unix_sk(sk);
 647
 648	skb_queue_purge(&sk->sk_receive_queue);
 649
 650	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
 651	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 652	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
 653	if (!sock_flag(sk, SOCK_DEAD)) {
 654		pr_info("Attempt to release alive unix socket: %p\n", sk);
 655		return;
 656	}
 657
 658	if (u->addr)
 659		unix_release_addr(u->addr);
 660
 661	atomic_long_dec(&unix_nr_socks);
 662	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 663#ifdef UNIX_REFCNT_DEBUG
 664	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 665		atomic_long_read(&unix_nr_socks));
 666#endif
 667}
 668
 669static void unix_release_sock(struct sock *sk, int embrion)
 670{
 671	struct unix_sock *u = unix_sk(sk);
 672	struct sock *skpair;
 673	struct sk_buff *skb;
 674	struct path path;
 675	int state;
 676
 677	unix_remove_socket(sock_net(sk), sk);
 678	unix_remove_bsd_socket(sk);
 679
 680	/* Clear state */
 681	unix_state_lock(sk);
 682	sock_orphan(sk);
 683	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 684	path	     = u->path;
 685	u->path.dentry = NULL;
 686	u->path.mnt = NULL;
 687	state = sk->sk_state;
 688	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
 689
 690	skpair = unix_peer(sk);
 691	unix_peer(sk) = NULL;
 692
 693	unix_state_unlock(sk);
 694
 695#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 696	u->oob_skb = NULL;
 
 
 
 697#endif
 698
 699	wake_up_interruptible_all(&u->peer_wait);
 700
 701	if (skpair != NULL) {
 702		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 703			unix_state_lock(skpair);
 704			/* No more writes */
 705			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
 706			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
 707				WRITE_ONCE(skpair->sk_err, ECONNRESET);
 708			unix_state_unlock(skpair);
 709			skpair->sk_state_change(skpair);
 710			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 711		}
 712
 713		unix_dgram_peer_wake_disconnect(sk, skpair);
 714		sock_put(skpair); /* It may now die */
 715	}
 716
 717	/* Try to flush out this socket. Throw out buffers at least */
 718
 719	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 720		if (state == TCP_LISTEN)
 721			unix_release_sock(skb->sk, 1);
 722
 723		/* passed fds are erased in the kfree_skb hook	      */
 
 724		kfree_skb(skb);
 725	}
 726
 727	if (path.dentry)
 728		path_put(&path);
 729
 730	sock_put(sk);
 731
 732	/* ---- Socket is dead now and most probably destroyed ---- */
 733
 734	/*
 735	 * Fixme: BSD difference: In BSD all sockets connected to us get
 736	 *	  ECONNRESET and we die on the spot. In Linux we behave
 737	 *	  like files and pipes do and wait for the last
 738	 *	  dereference.
 739	 *
 740	 * Can't we simply set sock->err?
 741	 *
 742	 *	  What the above comment does talk about? --ANK(980817)
 743	 */
 744
 745	if (READ_ONCE(unix_tot_inflight))
 746		unix_gc();		/* Garbage collect fds */
 747}
 748
 749static void init_peercred(struct sock *sk)
 750{
 751	sk->sk_peer_pid = get_pid(task_tgid(current));
 752	sk->sk_peer_cred = get_current_cred();
 753}
 754
 755static void update_peercred(struct sock *sk)
 756{
 757	const struct cred *old_cred;
 758	struct pid *old_pid;
 759
 760	spin_lock(&sk->sk_peer_lock);
 761	old_pid = sk->sk_peer_pid;
 762	old_cred = sk->sk_peer_cred;
 763	init_peercred(sk);
 
 764	spin_unlock(&sk->sk_peer_lock);
 765
 766	put_pid(old_pid);
 767	put_cred(old_cred);
 768}
 769
 770static void copy_peercred(struct sock *sk, struct sock *peersk)
 771{
 772	lockdep_assert_held(&unix_sk(peersk)->lock);
 
 773
 774	spin_lock(&sk->sk_peer_lock);
 775	sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
 
 
 
 
 
 
 
 
 776	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 
 777	spin_unlock(&sk->sk_peer_lock);
 
 
 
 
 778}
 779
 780static int unix_listen(struct socket *sock, int backlog)
 781{
 782	int err;
 783	struct sock *sk = sock->sk;
 784	struct unix_sock *u = unix_sk(sk);
 785
 786	err = -EOPNOTSUPP;
 787	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 788		goto out;	/* Only stream/seqpacket sockets accept */
 789	err = -EINVAL;
 790	if (!READ_ONCE(u->addr))
 791		goto out;	/* No listens on an unbound socket */
 792	unix_state_lock(sk);
 793	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 794		goto out_unlock;
 795	if (backlog > sk->sk_max_ack_backlog)
 796		wake_up_interruptible_all(&u->peer_wait);
 797	sk->sk_max_ack_backlog	= backlog;
 798	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
 799
 800	/* set credentials so connect can copy them */
 801	update_peercred(sk);
 802	err = 0;
 803
 804out_unlock:
 805	unix_state_unlock(sk);
 806out:
 807	return err;
 808}
 809
 810static int unix_release(struct socket *);
 811static int unix_bind(struct socket *, struct sockaddr *, int);
 812static int unix_stream_connect(struct socket *, struct sockaddr *,
 813			       int addr_len, int flags);
 814static int unix_socketpair(struct socket *, struct socket *);
 815static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
 816static int unix_getname(struct socket *, struct sockaddr *, int);
 817static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 818static __poll_t unix_dgram_poll(struct file *, struct socket *,
 819				    poll_table *);
 820static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 821#ifdef CONFIG_COMPAT
 822static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 823#endif
 824static int unix_shutdown(struct socket *, int);
 825static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 826static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 827static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 828				       struct pipe_inode_info *, size_t size,
 829				       unsigned int flags);
 830static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 831static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 832static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 833static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 834static int unix_dgram_connect(struct socket *, struct sockaddr *,
 835			      int, int);
 836static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 837static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 838				  int);
 839
 840#ifdef CONFIG_PROC_FS
 841static int unix_count_nr_fds(struct sock *sk)
 842{
 843	struct sk_buff *skb;
 844	struct unix_sock *u;
 845	int nr_fds = 0;
 846
 847	spin_lock(&sk->sk_receive_queue.lock);
 848	skb = skb_peek(&sk->sk_receive_queue);
 849	while (skb) {
 850		u = unix_sk(skb->sk);
 851		nr_fds += atomic_read(&u->scm_stat.nr_fds);
 852		skb = skb_peek_next(skb, &sk->sk_receive_queue);
 853	}
 854	spin_unlock(&sk->sk_receive_queue.lock);
 855
 856	return nr_fds;
 857}
 858
 859static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 860{
 861	struct sock *sk = sock->sk;
 862	unsigned char s_state;
 863	struct unix_sock *u;
 864	int nr_fds = 0;
 865
 866	if (sk) {
 867		s_state = READ_ONCE(sk->sk_state);
 868		u = unix_sk(sk);
 869
 870		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
 871		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
 872		 * SOCK_DGRAM is ordinary. So, no lock is needed.
 873		 */
 874		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
 875			nr_fds = atomic_read(&u->scm_stat.nr_fds);
 876		else if (s_state == TCP_LISTEN)
 877			nr_fds = unix_count_nr_fds(sk);
 878
 879		seq_printf(m, "scm_fds: %u\n", nr_fds);
 880	}
 881}
 882#else
 883#define unix_show_fdinfo NULL
 884#endif
 885
 886static const struct proto_ops unix_stream_ops = {
 887	.family =	PF_UNIX,
 888	.owner =	THIS_MODULE,
 889	.release =	unix_release,
 890	.bind =		unix_bind,
 891	.connect =	unix_stream_connect,
 892	.socketpair =	unix_socketpair,
 893	.accept =	unix_accept,
 894	.getname =	unix_getname,
 895	.poll =		unix_poll,
 896	.ioctl =	unix_ioctl,
 897#ifdef CONFIG_COMPAT
 898	.compat_ioctl =	unix_compat_ioctl,
 899#endif
 900	.listen =	unix_listen,
 901	.shutdown =	unix_shutdown,
 902	.sendmsg =	unix_stream_sendmsg,
 903	.recvmsg =	unix_stream_recvmsg,
 904	.read_skb =	unix_stream_read_skb,
 905	.mmap =		sock_no_mmap,
 906	.splice_read =	unix_stream_splice_read,
 907	.set_peek_off =	sk_set_peek_off,
 908	.show_fdinfo =	unix_show_fdinfo,
 909};
 910
 911static const struct proto_ops unix_dgram_ops = {
 912	.family =	PF_UNIX,
 913	.owner =	THIS_MODULE,
 914	.release =	unix_release,
 915	.bind =		unix_bind,
 916	.connect =	unix_dgram_connect,
 917	.socketpair =	unix_socketpair,
 918	.accept =	sock_no_accept,
 919	.getname =	unix_getname,
 920	.poll =		unix_dgram_poll,
 921	.ioctl =	unix_ioctl,
 922#ifdef CONFIG_COMPAT
 923	.compat_ioctl =	unix_compat_ioctl,
 924#endif
 925	.listen =	sock_no_listen,
 926	.shutdown =	unix_shutdown,
 927	.sendmsg =	unix_dgram_sendmsg,
 928	.read_skb =	unix_read_skb,
 929	.recvmsg =	unix_dgram_recvmsg,
 930	.mmap =		sock_no_mmap,
 931	.set_peek_off =	sk_set_peek_off,
 932	.show_fdinfo =	unix_show_fdinfo,
 933};
 934
 935static const struct proto_ops unix_seqpacket_ops = {
 936	.family =	PF_UNIX,
 937	.owner =	THIS_MODULE,
 938	.release =	unix_release,
 939	.bind =		unix_bind,
 940	.connect =	unix_stream_connect,
 941	.socketpair =	unix_socketpair,
 942	.accept =	unix_accept,
 943	.getname =	unix_getname,
 944	.poll =		unix_dgram_poll,
 945	.ioctl =	unix_ioctl,
 946#ifdef CONFIG_COMPAT
 947	.compat_ioctl =	unix_compat_ioctl,
 948#endif
 949	.listen =	unix_listen,
 950	.shutdown =	unix_shutdown,
 951	.sendmsg =	unix_seqpacket_sendmsg,
 952	.recvmsg =	unix_seqpacket_recvmsg,
 953	.mmap =		sock_no_mmap,
 954	.set_peek_off =	sk_set_peek_off,
 955	.show_fdinfo =	unix_show_fdinfo,
 956};
 957
 958static void unix_close(struct sock *sk, long timeout)
 959{
 960	/* Nothing to do here, unix socket does not need a ->close().
 961	 * This is merely for sockmap.
 962	 */
 963}
 964
 965static void unix_unhash(struct sock *sk)
 966{
 967	/* Nothing to do here, unix socket does not need a ->unhash().
 968	 * This is merely for sockmap.
 969	 */
 970}
 971
 972static bool unix_bpf_bypass_getsockopt(int level, int optname)
 973{
 974	if (level == SOL_SOCKET) {
 975		switch (optname) {
 976		case SO_PEERPIDFD:
 977			return true;
 978		default:
 979			return false;
 980		}
 981	}
 982
 983	return false;
 984}
 985
 986struct proto unix_dgram_proto = {
 987	.name			= "UNIX",
 988	.owner			= THIS_MODULE,
 989	.obj_size		= sizeof(struct unix_sock),
 990	.close			= unix_close,
 991	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 992#ifdef CONFIG_BPF_SYSCALL
 993	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
 994#endif
 995};
 996
 997struct proto unix_stream_proto = {
 998	.name			= "UNIX-STREAM",
 999	.owner			= THIS_MODULE,
1000	.obj_size		= sizeof(struct unix_sock),
1001	.close			= unix_close,
1002	.unhash			= unix_unhash,
1003	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1004#ifdef CONFIG_BPF_SYSCALL
1005	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
1006#endif
1007};
1008
1009static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
1010{
1011	struct unix_sock *u;
1012	struct sock *sk;
1013	int err;
1014
1015	atomic_long_inc(&unix_nr_socks);
1016	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
1017		err = -ENFILE;
1018		goto err;
1019	}
1020
1021	if (type == SOCK_STREAM)
1022		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
1023	else /*dgram and  seqpacket */
1024		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
1025
1026	if (!sk) {
1027		err = -ENOMEM;
1028		goto err;
1029	}
1030
1031	sock_init_data(sock, sk);
1032
1033	sk->sk_hash		= unix_unbound_hash(sk);
1034	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
1035	sk->sk_write_space	= unix_write_space;
1036	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
1037	sk->sk_destruct		= unix_sock_destructor;
1038	lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
1039
1040	u = unix_sk(sk);
1041	u->listener = NULL;
1042	u->vertex = NULL;
1043	u->path.dentry = NULL;
1044	u->path.mnt = NULL;
1045	spin_lock_init(&u->lock);
1046	lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
 
1047	mutex_init(&u->iolock); /* single task reading lock */
1048	mutex_init(&u->bindlock); /* single task binding lock */
1049	init_waitqueue_head(&u->peer_wait);
1050	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1051	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1052	unix_insert_unbound_socket(net, sk);
1053
1054	sock_prot_inuse_add(net, sk->sk_prot, 1);
1055
1056	return sk;
1057
1058err:
1059	atomic_long_dec(&unix_nr_socks);
1060	return ERR_PTR(err);
1061}
1062
1063static int unix_create(struct net *net, struct socket *sock, int protocol,
1064		       int kern)
1065{
1066	struct sock *sk;
1067
1068	if (protocol && protocol != PF_UNIX)
1069		return -EPROTONOSUPPORT;
1070
1071	sock->state = SS_UNCONNECTED;
1072
1073	switch (sock->type) {
1074	case SOCK_STREAM:
1075		sock->ops = &unix_stream_ops;
1076		break;
1077		/*
1078		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1079		 *	nothing uses it.
1080		 */
1081	case SOCK_RAW:
1082		sock->type = SOCK_DGRAM;
1083		fallthrough;
1084	case SOCK_DGRAM:
1085		sock->ops = &unix_dgram_ops;
1086		break;
1087	case SOCK_SEQPACKET:
1088		sock->ops = &unix_seqpacket_ops;
1089		break;
1090	default:
1091		return -ESOCKTNOSUPPORT;
1092	}
1093
1094	sk = unix_create1(net, sock, kern, sock->type);
1095	if (IS_ERR(sk))
1096		return PTR_ERR(sk);
1097
1098	return 0;
1099}
1100
1101static int unix_release(struct socket *sock)
1102{
1103	struct sock *sk = sock->sk;
1104
1105	if (!sk)
1106		return 0;
1107
1108	sk->sk_prot->close(sk, 0);
1109	unix_release_sock(sk, 0);
1110	sock->sk = NULL;
1111
1112	return 0;
1113}
1114
1115static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1116				  int type)
1117{
1118	struct inode *inode;
1119	struct path path;
1120	struct sock *sk;
1121	int err;
1122
1123	unix_mkname_bsd(sunaddr, addr_len);
1124	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1125	if (err)
1126		goto fail;
1127
1128	err = path_permission(&path, MAY_WRITE);
1129	if (err)
1130		goto path_put;
1131
1132	err = -ECONNREFUSED;
1133	inode = d_backing_inode(path.dentry);
1134	if (!S_ISSOCK(inode->i_mode))
1135		goto path_put;
1136
1137	sk = unix_find_socket_byinode(inode);
1138	if (!sk)
1139		goto path_put;
1140
1141	err = -EPROTOTYPE;
1142	if (sk->sk_type == type)
1143		touch_atime(&path);
1144	else
1145		goto sock_put;
1146
1147	path_put(&path);
1148
1149	return sk;
1150
1151sock_put:
1152	sock_put(sk);
1153path_put:
1154	path_put(&path);
1155fail:
1156	return ERR_PTR(err);
1157}
1158
1159static struct sock *unix_find_abstract(struct net *net,
1160				       struct sockaddr_un *sunaddr,
1161				       int addr_len, int type)
1162{
1163	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1164	struct dentry *dentry;
1165	struct sock *sk;
1166
1167	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1168	if (!sk)
1169		return ERR_PTR(-ECONNREFUSED);
1170
1171	dentry = unix_sk(sk)->path.dentry;
1172	if (dentry)
1173		touch_atime(&unix_sk(sk)->path);
1174
1175	return sk;
1176}
1177
1178static struct sock *unix_find_other(struct net *net,
1179				    struct sockaddr_un *sunaddr,
1180				    int addr_len, int type)
1181{
1182	struct sock *sk;
1183
1184	if (sunaddr->sun_path[0])
1185		sk = unix_find_bsd(sunaddr, addr_len, type);
1186	else
1187		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1188
1189	return sk;
1190}
1191
1192static int unix_autobind(struct sock *sk)
1193{
 
1194	struct unix_sock *u = unix_sk(sk);
1195	unsigned int new_hash, old_hash;
1196	struct net *net = sock_net(sk);
1197	struct unix_address *addr;
1198	u32 lastnum, ordernum;
1199	int err;
1200
1201	err = mutex_lock_interruptible(&u->bindlock);
1202	if (err)
1203		return err;
1204
1205	if (u->addr)
1206		goto out;
1207
1208	err = -ENOMEM;
1209	addr = kzalloc(sizeof(*addr) +
1210		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1211	if (!addr)
1212		goto out;
1213
1214	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1215	addr->name->sun_family = AF_UNIX;
1216	refcount_set(&addr->refcnt, 1);
1217
1218	old_hash = sk->sk_hash;
1219	ordernum = get_random_u32();
1220	lastnum = ordernum & 0xFFFFF;
1221retry:
1222	ordernum = (ordernum + 1) & 0xFFFFF;
1223	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1224
1225	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1226	unix_table_double_lock(net, old_hash, new_hash);
1227
1228	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1229		unix_table_double_unlock(net, old_hash, new_hash);
1230
1231		/* __unix_find_socket_byname() may take long time if many names
1232		 * are already in use.
1233		 */
1234		cond_resched();
1235
1236		if (ordernum == lastnum) {
1237			/* Give up if all names seems to be in use. */
1238			err = -ENOSPC;
1239			unix_release_addr(addr);
1240			goto out;
1241		}
1242
1243		goto retry;
1244	}
1245
1246	__unix_set_addr_hash(net, sk, addr, new_hash);
1247	unix_table_double_unlock(net, old_hash, new_hash);
1248	err = 0;
1249
1250out:	mutex_unlock(&u->bindlock);
1251	return err;
1252}
1253
1254static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1255			 int addr_len)
1256{
1257	umode_t mode = S_IFSOCK |
1258	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
 
1259	struct unix_sock *u = unix_sk(sk);
1260	unsigned int new_hash, old_hash;
1261	struct net *net = sock_net(sk);
1262	struct mnt_idmap *idmap;
1263	struct unix_address *addr;
1264	struct dentry *dentry;
1265	struct path parent;
1266	int err;
1267
1268	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1269	addr = unix_create_addr(sunaddr, addr_len);
1270	if (!addr)
1271		return -ENOMEM;
1272
1273	/*
1274	 * Get the parent directory, calculate the hash for last
1275	 * component.
1276	 */
1277	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1278	if (IS_ERR(dentry)) {
1279		err = PTR_ERR(dentry);
1280		goto out;
1281	}
1282
1283	/*
1284	 * All right, let's create it.
1285	 */
1286	idmap = mnt_idmap(parent.mnt);
1287	err = security_path_mknod(&parent, dentry, mode, 0);
1288	if (!err)
1289		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1290	if (err)
1291		goto out_path;
1292	err = mutex_lock_interruptible(&u->bindlock);
1293	if (err)
1294		goto out_unlink;
1295	if (u->addr)
1296		goto out_unlock;
1297
1298	old_hash = sk->sk_hash;
1299	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1300	unix_table_double_lock(net, old_hash, new_hash);
1301	u->path.mnt = mntget(parent.mnt);
1302	u->path.dentry = dget(dentry);
1303	__unix_set_addr_hash(net, sk, addr, new_hash);
1304	unix_table_double_unlock(net, old_hash, new_hash);
1305	unix_insert_bsd_socket(sk);
1306	mutex_unlock(&u->bindlock);
1307	done_path_create(&parent, dentry);
1308	return 0;
1309
1310out_unlock:
1311	mutex_unlock(&u->bindlock);
1312	err = -EINVAL;
1313out_unlink:
1314	/* failed after successful mknod?  unlink what we'd created... */
1315	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1316out_path:
1317	done_path_create(&parent, dentry);
1318out:
1319	unix_release_addr(addr);
1320	return err == -EEXIST ? -EADDRINUSE : err;
1321}
1322
1323static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1324			      int addr_len)
1325{
 
1326	struct unix_sock *u = unix_sk(sk);
1327	unsigned int new_hash, old_hash;
1328	struct net *net = sock_net(sk);
1329	struct unix_address *addr;
1330	int err;
1331
1332	addr = unix_create_addr(sunaddr, addr_len);
1333	if (!addr)
1334		return -ENOMEM;
1335
1336	err = mutex_lock_interruptible(&u->bindlock);
1337	if (err)
1338		goto out;
1339
1340	if (u->addr) {
1341		err = -EINVAL;
1342		goto out_mutex;
1343	}
1344
1345	old_hash = sk->sk_hash;
1346	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1347	unix_table_double_lock(net, old_hash, new_hash);
1348
1349	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1350		goto out_spin;
1351
1352	__unix_set_addr_hash(net, sk, addr, new_hash);
1353	unix_table_double_unlock(net, old_hash, new_hash);
1354	mutex_unlock(&u->bindlock);
1355	return 0;
1356
1357out_spin:
1358	unix_table_double_unlock(net, old_hash, new_hash);
1359	err = -EADDRINUSE;
1360out_mutex:
1361	mutex_unlock(&u->bindlock);
1362out:
1363	unix_release_addr(addr);
1364	return err;
1365}
1366
1367static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1368{
1369	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1370	struct sock *sk = sock->sk;
1371	int err;
1372
1373	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1374	    sunaddr->sun_family == AF_UNIX)
1375		return unix_autobind(sk);
1376
1377	err = unix_validate_addr(sunaddr, addr_len);
1378	if (err)
1379		return err;
1380
1381	if (sunaddr->sun_path[0])
1382		err = unix_bind_bsd(sk, sunaddr, addr_len);
1383	else
1384		err = unix_bind_abstract(sk, sunaddr, addr_len);
1385
1386	return err;
1387}
1388
1389static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1390{
1391	if (unlikely(sk1 == sk2) || !sk2) {
1392		unix_state_lock(sk1);
1393		return;
1394	}
1395
1396	if (sk1 > sk2)
1397		swap(sk1, sk2);
1398
1399	unix_state_lock(sk1);
1400	unix_state_lock(sk2);
1401}
1402
1403static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1404{
1405	if (unlikely(sk1 == sk2) || !sk2) {
1406		unix_state_unlock(sk1);
1407		return;
1408	}
1409	unix_state_unlock(sk1);
1410	unix_state_unlock(sk2);
1411}
1412
1413static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1414			      int alen, int flags)
1415{
1416	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1417	struct sock *sk = sock->sk;
1418	struct sock *other;
1419	int err;
1420
1421	err = -EINVAL;
1422	if (alen < offsetofend(struct sockaddr, sa_family))
1423		goto out;
1424
1425	if (addr->sa_family != AF_UNSPEC) {
1426		err = unix_validate_addr(sunaddr, alen);
1427		if (err)
1428			goto out;
1429
1430		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1431		if (err)
1432			goto out;
1433
1434		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1435		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1436		    !READ_ONCE(unix_sk(sk)->addr)) {
1437			err = unix_autobind(sk);
1438			if (err)
1439				goto out;
1440		}
1441
1442restart:
1443		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1444		if (IS_ERR(other)) {
1445			err = PTR_ERR(other);
1446			goto out;
1447		}
1448
1449		unix_state_double_lock(sk, other);
1450
1451		/* Apparently VFS overslept socket death. Retry. */
1452		if (sock_flag(other, SOCK_DEAD)) {
1453			unix_state_double_unlock(sk, other);
1454			sock_put(other);
1455			goto restart;
1456		}
1457
1458		err = -EPERM;
1459		if (!unix_may_send(sk, other))
1460			goto out_unlock;
1461
1462		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1463		if (err)
1464			goto out_unlock;
1465
1466		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1467		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1468	} else {
1469		/*
1470		 *	1003.1g breaking connected state with AF_UNSPEC
1471		 */
1472		other = NULL;
1473		unix_state_double_lock(sk, other);
1474	}
1475
1476	/*
1477	 * If it was connected, reconnect.
1478	 */
1479	if (unix_peer(sk)) {
1480		struct sock *old_peer = unix_peer(sk);
1481
1482		unix_peer(sk) = other;
1483		if (!other)
1484			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1485		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1486
1487		unix_state_double_unlock(sk, other);
1488
1489		if (other != old_peer) {
1490			unix_dgram_disconnected(sk, old_peer);
1491
1492			unix_state_lock(old_peer);
1493			if (!unix_peer(old_peer))
1494				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1495			unix_state_unlock(old_peer);
1496		}
1497
1498		sock_put(old_peer);
1499	} else {
1500		unix_peer(sk) = other;
1501		unix_state_double_unlock(sk, other);
1502	}
1503
1504	return 0;
1505
1506out_unlock:
1507	unix_state_double_unlock(sk, other);
1508	sock_put(other);
1509out:
1510	return err;
1511}
1512
1513static long unix_wait_for_peer(struct sock *other, long timeo)
1514	__releases(&unix_sk(other)->lock)
1515{
1516	struct unix_sock *u = unix_sk(other);
1517	int sched;
1518	DEFINE_WAIT(wait);
1519
1520	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1521
1522	sched = !sock_flag(other, SOCK_DEAD) &&
1523		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1524		unix_recvq_full_lockless(other);
1525
1526	unix_state_unlock(other);
1527
1528	if (sched)
1529		timeo = schedule_timeout(timeo);
1530
1531	finish_wait(&u->peer_wait, &wait);
1532	return timeo;
1533}
1534
1535static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1536			       int addr_len, int flags)
1537{
1538	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1539	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1540	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1541	struct net *net = sock_net(sk);
1542	struct sk_buff *skb = NULL;
1543	unsigned char state;
1544	long timeo;
1545	int err;
 
1546
1547	err = unix_validate_addr(sunaddr, addr_len);
1548	if (err)
1549		goto out;
1550
1551	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1552	if (err)
1553		goto out;
1554
1555	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1556	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1557	    !READ_ONCE(u->addr)) {
1558		err = unix_autobind(sk);
1559		if (err)
1560			goto out;
1561	}
1562
1563	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1564
1565	/* First of all allocate resources.
1566	   If we will make it after state is locked,
1567	   we will have to recheck all again in any case.
1568	 */
1569
1570	/* create new sock for complete connection */
1571	newsk = unix_create1(net, NULL, 0, sock->type);
1572	if (IS_ERR(newsk)) {
1573		err = PTR_ERR(newsk);
1574		newsk = NULL;
1575		goto out;
1576	}
1577
1578	err = -ENOMEM;
1579
1580	/* Allocate skb for sending to listening sock */
1581	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1582	if (skb == NULL)
1583		goto out;
1584
1585restart:
1586	/*  Find listening sock. */
1587	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1588	if (IS_ERR(other)) {
1589		err = PTR_ERR(other);
1590		other = NULL;
1591		goto out;
1592	}
1593
 
1594	unix_state_lock(other);
1595
1596	/* Apparently VFS overslept socket death. Retry. */
1597	if (sock_flag(other, SOCK_DEAD)) {
1598		unix_state_unlock(other);
1599		sock_put(other);
1600		goto restart;
1601	}
1602
1603	err = -ECONNREFUSED;
1604	if (other->sk_state != TCP_LISTEN)
1605		goto out_unlock;
1606	if (other->sk_shutdown & RCV_SHUTDOWN)
1607		goto out_unlock;
1608
1609	if (unix_recvq_full_lockless(other)) {
1610		err = -EAGAIN;
1611		if (!timeo)
1612			goto out_unlock;
1613
1614		timeo = unix_wait_for_peer(other, timeo);
1615
1616		err = sock_intr_errno(timeo);
1617		if (signal_pending(current))
1618			goto out;
1619		sock_put(other);
1620		goto restart;
1621	}
1622
1623	/* self connect and simultaneous connect are eliminated
1624	 * by rejecting TCP_LISTEN socket to avoid deadlock.
 
 
 
 
 
 
 
 
1625	 */
1626	state = READ_ONCE(sk->sk_state);
1627	if (unlikely(state != TCP_CLOSE)) {
1628		err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
 
 
 
 
 
 
 
 
 
1629		goto out_unlock;
1630	}
1631
1632	unix_state_lock(sk);
1633
1634	if (unlikely(sk->sk_state != TCP_CLOSE)) {
1635		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1636		unix_state_unlock(sk);
1637		goto out_unlock;
 
 
1638	}
1639
1640	err = security_unix_stream_connect(sk, other, newsk);
1641	if (err) {
1642		unix_state_unlock(sk);
1643		goto out_unlock;
1644	}
1645
1646	/* The way is open! Fastly set all the necessary fields... */
1647
1648	sock_hold(sk);
1649	unix_peer(newsk)	= sk;
1650	newsk->sk_state		= TCP_ESTABLISHED;
1651	newsk->sk_type		= sk->sk_type;
1652	init_peercred(newsk);
1653	newu = unix_sk(newsk);
1654	newu->listener = other;
1655	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1656	otheru = unix_sk(other);
1657
1658	/* copy address information from listening to new sock
1659	 *
1660	 * The contents of *(otheru->addr) and otheru->path
1661	 * are seen fully set up here, since we have found
1662	 * otheru in hash under its lock.  Insertion into the
1663	 * hash chain we'd found it in had been done in an
1664	 * earlier critical area protected by the chain's lock,
1665	 * the same one where we'd set *(otheru->addr) contents,
1666	 * as well as otheru->path and otheru->addr itself.
1667	 *
1668	 * Using smp_store_release() here to set newu->addr
1669	 * is enough to make those stores, as well as stores
1670	 * to newu->path visible to anyone who gets newu->addr
1671	 * by smp_load_acquire().  IOW, the same warranties
1672	 * as for unix_sock instances bound in unix_bind() or
1673	 * in unix_autobind().
1674	 */
1675	if (otheru->path.dentry) {
1676		path_get(&otheru->path);
1677		newu->path = otheru->path;
1678	}
1679	refcount_inc(&otheru->addr->refcnt);
1680	smp_store_release(&newu->addr, otheru->addr);
1681
1682	/* Set credentials */
1683	copy_peercred(sk, other);
1684
1685	sock->state	= SS_CONNECTED;
1686	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1687	sock_hold(newsk);
1688
1689	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1690	unix_peer(sk)	= newsk;
1691
1692	unix_state_unlock(sk);
1693
1694	/* take ten and send info to listening sock */
1695	spin_lock(&other->sk_receive_queue.lock);
1696	__skb_queue_tail(&other->sk_receive_queue, skb);
1697	spin_unlock(&other->sk_receive_queue.lock);
1698	unix_state_unlock(other);
1699	other->sk_data_ready(other);
1700	sock_put(other);
1701	return 0;
1702
1703out_unlock:
1704	if (other)
1705		unix_state_unlock(other);
1706
1707out:
1708	kfree_skb(skb);
1709	if (newsk)
1710		unix_release_sock(newsk, 0);
1711	if (other)
1712		sock_put(other);
1713	return err;
1714}
1715
1716static int unix_socketpair(struct socket *socka, struct socket *sockb)
1717{
1718	struct sock *ska = socka->sk, *skb = sockb->sk;
1719
1720	/* Join our sockets back to back */
1721	sock_hold(ska);
1722	sock_hold(skb);
1723	unix_peer(ska) = skb;
1724	unix_peer(skb) = ska;
1725	init_peercred(ska);
1726	init_peercred(skb);
1727
1728	ska->sk_state = TCP_ESTABLISHED;
1729	skb->sk_state = TCP_ESTABLISHED;
1730	socka->state  = SS_CONNECTED;
1731	sockb->state  = SS_CONNECTED;
1732	return 0;
1733}
1734
1735static void unix_sock_inherit_flags(const struct socket *old,
1736				    struct socket *new)
1737{
1738	if (test_bit(SOCK_PASSCRED, &old->flags))
1739		set_bit(SOCK_PASSCRED, &new->flags);
1740	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1741		set_bit(SOCK_PASSPIDFD, &new->flags);
1742	if (test_bit(SOCK_PASSSEC, &old->flags))
1743		set_bit(SOCK_PASSSEC, &new->flags);
1744}
1745
1746static int unix_accept(struct socket *sock, struct socket *newsock,
1747		       struct proto_accept_arg *arg)
1748{
1749	struct sock *sk = sock->sk;
 
1750	struct sk_buff *skb;
1751	struct sock *tsk;
1752
1753	arg->err = -EOPNOTSUPP;
1754	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1755		goto out;
1756
1757	arg->err = -EINVAL;
1758	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1759		goto out;
1760
1761	/* If socket state is TCP_LISTEN it cannot change (for now...),
1762	 * so that no locks are necessary.
1763	 */
1764
1765	skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1766				&arg->err);
1767	if (!skb) {
1768		/* This means receive shutdown. */
1769		if (arg->err == 0)
1770			arg->err = -EINVAL;
1771		goto out;
1772	}
1773
1774	tsk = skb->sk;
1775	skb_free_datagram(sk, skb);
1776	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1777
1778	/* attach accepted sock to socket */
1779	unix_state_lock(tsk);
1780	unix_update_edges(unix_sk(tsk));
1781	newsock->state = SS_CONNECTED;
1782	unix_sock_inherit_flags(sock, newsock);
1783	sock_graft(tsk, newsock);
1784	unix_state_unlock(tsk);
1785	return 0;
1786
1787out:
1788	return arg->err;
1789}
1790
1791
1792static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1793{
1794	struct sock *sk = sock->sk;
1795	struct unix_address *addr;
1796	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1797	int err = 0;
1798
1799	if (peer) {
1800		sk = unix_peer_get(sk);
1801
1802		err = -ENOTCONN;
1803		if (!sk)
1804			goto out;
1805		err = 0;
1806	} else {
1807		sock_hold(sk);
1808	}
1809
1810	addr = smp_load_acquire(&unix_sk(sk)->addr);
1811	if (!addr) {
1812		sunaddr->sun_family = AF_UNIX;
1813		sunaddr->sun_path[0] = 0;
1814		err = offsetof(struct sockaddr_un, sun_path);
1815	} else {
1816		err = addr->len;
1817		memcpy(sunaddr, addr->name, addr->len);
1818
1819		if (peer)
1820			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1821					       CGROUP_UNIX_GETPEERNAME);
1822		else
1823			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1824					       CGROUP_UNIX_GETSOCKNAME);
1825	}
1826	sock_put(sk);
1827out:
1828	return err;
1829}
1830
1831/* The "user->unix_inflight" variable is protected by the garbage
1832 * collection lock, and we just read it locklessly here. If you go
1833 * over the limit, there might be a tiny race in actually noticing
1834 * it across threads. Tough.
1835 */
1836static inline bool too_many_unix_fds(struct task_struct *p)
1837{
1838	struct user_struct *user = current_user();
1839
1840	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1841		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1842	return false;
1843}
1844
1845static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1846{
1847	if (too_many_unix_fds(current))
1848		return -ETOOMANYREFS;
1849
1850	UNIXCB(skb).fp = scm->fp;
1851	scm->fp = NULL;
1852
1853	if (unix_prepare_fpl(UNIXCB(skb).fp))
1854		return -ENOMEM;
1855
1856	return 0;
1857}
1858
1859static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1860{
1861	scm->fp = UNIXCB(skb).fp;
1862	UNIXCB(skb).fp = NULL;
1863
1864	unix_destroy_fpl(scm->fp);
1865}
1866
1867static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1868{
1869	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1870}
1871
1872static void unix_destruct_scm(struct sk_buff *skb)
1873{
1874	struct scm_cookie scm;
1875
1876	memset(&scm, 0, sizeof(scm));
1877	scm.pid  = UNIXCB(skb).pid;
1878	if (UNIXCB(skb).fp)
1879		unix_detach_fds(&scm, skb);
1880
1881	/* Alas, it calls VFS */
1882	/* So fscking what? fput() had been SMP-safe since the last Summer */
1883	scm_destroy(&scm);
1884	sock_wfree(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1885}
1886
1887static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1888{
1889	int err = 0;
1890
1891	UNIXCB(skb).pid  = get_pid(scm->pid);
1892	UNIXCB(skb).uid = scm->creds.uid;
1893	UNIXCB(skb).gid = scm->creds.gid;
1894	UNIXCB(skb).fp = NULL;
1895	unix_get_secdata(scm, skb);
1896	if (scm->fp && send_fds)
1897		err = unix_attach_fds(scm, skb);
1898
1899	skb->destructor = unix_destruct_scm;
1900	return err;
1901}
1902
1903static bool unix_passcred_enabled(const struct socket *sock,
1904				  const struct sock *other)
1905{
1906	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1907	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1908	       !other->sk_socket ||
1909	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1910	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1911}
1912
1913/*
1914 * Some apps rely on write() giving SCM_CREDENTIALS
1915 * We include credentials if source or destination socket
1916 * asserted SOCK_PASSCRED.
1917 */
1918static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1919			    const struct sock *other)
1920{
1921	if (UNIXCB(skb).pid)
1922		return;
1923	if (unix_passcred_enabled(sock, other)) {
1924		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1925		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1926	}
1927}
1928
1929static bool unix_skb_scm_eq(struct sk_buff *skb,
1930			    struct scm_cookie *scm)
1931{
1932	return UNIXCB(skb).pid == scm->pid &&
1933	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1934	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1935	       unix_secdata_eq(scm, skb);
1936}
1937
1938static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1939{
1940	struct scm_fp_list *fp = UNIXCB(skb).fp;
1941	struct unix_sock *u = unix_sk(sk);
1942
1943	if (unlikely(fp && fp->count)) {
1944		atomic_add(fp->count, &u->scm_stat.nr_fds);
1945		unix_add_edges(fp, u);
1946	}
1947}
1948
1949static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1950{
1951	struct scm_fp_list *fp = UNIXCB(skb).fp;
1952	struct unix_sock *u = unix_sk(sk);
1953
1954	if (unlikely(fp && fp->count)) {
1955		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1956		unix_del_edges(fp);
1957	}
1958}
1959
1960/*
1961 *	Send AF_UNIX data.
1962 */
1963
1964static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1965			      size_t len)
1966{
1967	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1968	struct sock *sk = sock->sk, *other = NULL;
1969	struct unix_sock *u = unix_sk(sk);
1970	struct scm_cookie scm;
1971	struct sk_buff *skb;
1972	int data_len = 0;
1973	int sk_locked;
1974	long timeo;
1975	int err;
1976
 
1977	err = scm_send(sock, msg, &scm, false);
1978	if (err < 0)
1979		return err;
1980
1981	wait_for_unix_gc(scm.fp);
1982
1983	err = -EOPNOTSUPP;
1984	if (msg->msg_flags&MSG_OOB)
1985		goto out;
1986
1987	if (msg->msg_namelen) {
1988		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1989		if (err)
1990			goto out;
1991
1992		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1993							    msg->msg_name,
1994							    &msg->msg_namelen,
1995							    NULL);
1996		if (err)
1997			goto out;
1998	} else {
1999		sunaddr = NULL;
2000		err = -ENOTCONN;
2001		other = unix_peer_get(sk);
2002		if (!other)
2003			goto out;
2004	}
2005
2006	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
2007	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
2008	    !READ_ONCE(u->addr)) {
2009		err = unix_autobind(sk);
2010		if (err)
2011			goto out;
2012	}
2013
2014	err = -EMSGSIZE;
2015	if (len > READ_ONCE(sk->sk_sndbuf) - 32)
2016		goto out;
2017
2018	if (len > SKB_MAX_ALLOC) {
2019		data_len = min_t(size_t,
2020				 len - SKB_MAX_ALLOC,
2021				 MAX_SKB_FRAGS * PAGE_SIZE);
2022		data_len = PAGE_ALIGN(data_len);
2023
2024		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2025	}
2026
2027	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2028				   msg->msg_flags & MSG_DONTWAIT, &err,
2029				   PAGE_ALLOC_COSTLY_ORDER);
2030	if (skb == NULL)
2031		goto out;
2032
2033	err = unix_scm_to_skb(&scm, skb, true);
2034	if (err < 0)
2035		goto out_free;
2036
2037	skb_put(skb, len - data_len);
2038	skb->data_len = data_len;
2039	skb->len = len;
2040	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2041	if (err)
2042		goto out_free;
2043
2044	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2045
2046restart:
2047	if (!other) {
2048		err = -ECONNRESET;
2049		if (sunaddr == NULL)
2050			goto out_free;
2051
2052		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
2053					sk->sk_type);
2054		if (IS_ERR(other)) {
2055			err = PTR_ERR(other);
2056			other = NULL;
2057			goto out_free;
2058		}
2059	}
2060
2061	if (sk_filter(other, skb) < 0) {
2062		/* Toss the packet but do not return any error to the sender */
2063		err = len;
2064		goto out_free;
2065	}
2066
2067	sk_locked = 0;
2068	unix_state_lock(other);
2069restart_locked:
2070	err = -EPERM;
2071	if (!unix_may_send(sk, other))
2072		goto out_unlock;
2073
2074	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2075		/*
2076		 *	Check with 1003.1g - what should
2077		 *	datagram error
2078		 */
2079		unix_state_unlock(other);
2080		sock_put(other);
2081
2082		if (!sk_locked)
2083			unix_state_lock(sk);
2084
2085		err = 0;
2086		if (sk->sk_type == SOCK_SEQPACKET) {
2087			/* We are here only when racing with unix_release_sock()
2088			 * is clearing @other. Never change state to TCP_CLOSE
2089			 * unlike SOCK_DGRAM wants.
2090			 */
2091			unix_state_unlock(sk);
2092			err = -EPIPE;
2093		} else if (unix_peer(sk) == other) {
2094			unix_peer(sk) = NULL;
2095			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2096
2097			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2098			unix_state_unlock(sk);
2099
2100			unix_dgram_disconnected(sk, other);
2101			sock_put(other);
2102			err = -ECONNREFUSED;
2103		} else {
2104			unix_state_unlock(sk);
2105		}
2106
2107		other = NULL;
2108		if (err)
2109			goto out_free;
2110		goto restart;
2111	}
2112
2113	err = -EPIPE;
2114	if (other->sk_shutdown & RCV_SHUTDOWN)
2115		goto out_unlock;
2116
2117	if (sk->sk_type != SOCK_SEQPACKET) {
2118		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2119		if (err)
2120			goto out_unlock;
2121	}
2122
2123	/* other == sk && unix_peer(other) != sk if
2124	 * - unix_peer(sk) == NULL, destination address bound to sk
2125	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2126	 */
2127	if (other != sk &&
2128	    unlikely(unix_peer(other) != sk &&
2129	    unix_recvq_full_lockless(other))) {
2130		if (timeo) {
2131			timeo = unix_wait_for_peer(other, timeo);
2132
2133			err = sock_intr_errno(timeo);
2134			if (signal_pending(current))
2135				goto out_free;
2136
2137			goto restart;
2138		}
2139
2140		if (!sk_locked) {
2141			unix_state_unlock(other);
2142			unix_state_double_lock(sk, other);
2143		}
2144
2145		if (unix_peer(sk) != other ||
2146		    unix_dgram_peer_wake_me(sk, other)) {
2147			err = -EAGAIN;
2148			sk_locked = 1;
2149			goto out_unlock;
2150		}
2151
2152		if (!sk_locked) {
2153			sk_locked = 1;
2154			goto restart_locked;
2155		}
2156	}
2157
2158	if (unlikely(sk_locked))
2159		unix_state_unlock(sk);
2160
2161	if (sock_flag(other, SOCK_RCVTSTAMP))
2162		__net_timestamp(skb);
2163	maybe_add_creds(skb, sock, other);
2164	scm_stat_add(other, skb);
2165	skb_queue_tail(&other->sk_receive_queue, skb);
2166	unix_state_unlock(other);
2167	other->sk_data_ready(other);
2168	sock_put(other);
2169	scm_destroy(&scm);
2170	return len;
2171
2172out_unlock:
2173	if (sk_locked)
2174		unix_state_unlock(sk);
2175	unix_state_unlock(other);
2176out_free:
2177	kfree_skb(skb);
2178out:
2179	if (other)
2180		sock_put(other);
2181	scm_destroy(&scm);
2182	return err;
2183}
2184
2185/* We use paged skbs for stream sockets, and limit occupancy to 32768
2186 * bytes, and a minimum of a full page.
2187 */
2188#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2189
2190#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2191static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2192		     struct scm_cookie *scm, bool fds_sent)
2193{
2194	struct unix_sock *ousk = unix_sk(other);
2195	struct sk_buff *skb;
2196	int err = 0;
2197
2198	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2199
2200	if (!skb)
2201		return err;
2202
2203	err = unix_scm_to_skb(scm, skb, !fds_sent);
2204	if (err < 0) {
2205		kfree_skb(skb);
2206		return err;
2207	}
2208	skb_put(skb, 1);
2209	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2210
2211	if (err) {
2212		kfree_skb(skb);
2213		return err;
2214	}
2215
2216	unix_state_lock(other);
2217
2218	if (sock_flag(other, SOCK_DEAD) ||
2219	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2220		unix_state_unlock(other);
2221		kfree_skb(skb);
2222		return -EPIPE;
2223	}
2224
2225	maybe_add_creds(skb, sock, other);
2226	scm_stat_add(other, skb);
 
 
 
2227
2228	spin_lock(&other->sk_receive_queue.lock);
2229	WRITE_ONCE(ousk->oob_skb, skb);
2230	__skb_queue_tail(&other->sk_receive_queue, skb);
2231	spin_unlock(&other->sk_receive_queue.lock);
2232
 
 
2233	sk_send_sigurg(other);
2234	unix_state_unlock(other);
2235	other->sk_data_ready(other);
2236
2237	return err;
2238}
2239#endif
2240
2241static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2242			       size_t len)
2243{
2244	struct sock *sk = sock->sk;
2245	struct sock *other = NULL;
2246	int err, size;
2247	struct sk_buff *skb;
2248	int sent = 0;
2249	struct scm_cookie scm;
2250	bool fds_sent = false;
2251	int data_len;
2252
 
2253	err = scm_send(sock, msg, &scm, false);
2254	if (err < 0)
2255		return err;
2256
2257	wait_for_unix_gc(scm.fp);
2258
2259	err = -EOPNOTSUPP;
2260	if (msg->msg_flags & MSG_OOB) {
2261#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2262		if (len)
2263			len--;
2264		else
2265#endif
2266			goto out_err;
2267	}
2268
2269	if (msg->msg_namelen) {
2270		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2271		goto out_err;
2272	} else {
2273		err = -ENOTCONN;
2274		other = unix_peer(sk);
2275		if (!other)
2276			goto out_err;
2277	}
2278
2279	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2280		goto pipe_err;
2281
2282	while (sent < len) {
2283		size = len - sent;
2284
2285		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2286			skb = sock_alloc_send_pskb(sk, 0, 0,
2287						   msg->msg_flags & MSG_DONTWAIT,
2288						   &err, 0);
2289		} else {
2290			/* Keep two messages in the pipe so it schedules better */
2291			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2292
2293			/* allow fallback to order-0 allocations */
2294			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2295
2296			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2297
2298			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2299
2300			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2301						   msg->msg_flags & MSG_DONTWAIT, &err,
2302						   get_order(UNIX_SKB_FRAGS_SZ));
2303		}
2304		if (!skb)
2305			goto out_err;
2306
2307		/* Only send the fds in the first buffer */
2308		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2309		if (err < 0) {
2310			kfree_skb(skb);
2311			goto out_err;
2312		}
2313		fds_sent = true;
2314
2315		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2316			skb->ip_summed = CHECKSUM_UNNECESSARY;
2317			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2318						   sk->sk_allocation);
2319			if (err < 0) {
2320				kfree_skb(skb);
2321				goto out_err;
2322			}
2323			size = err;
2324			refcount_add(size, &sk->sk_wmem_alloc);
2325		} else {
2326			skb_put(skb, size - data_len);
2327			skb->data_len = data_len;
2328			skb->len = size;
2329			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2330			if (err) {
2331				kfree_skb(skb);
2332				goto out_err;
2333			}
2334		}
2335
2336		unix_state_lock(other);
2337
2338		if (sock_flag(other, SOCK_DEAD) ||
2339		    (other->sk_shutdown & RCV_SHUTDOWN))
2340			goto pipe_err_free;
2341
2342		maybe_add_creds(skb, sock, other);
2343		scm_stat_add(other, skb);
2344		skb_queue_tail(&other->sk_receive_queue, skb);
2345		unix_state_unlock(other);
2346		other->sk_data_ready(other);
2347		sent += size;
2348	}
2349
2350#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2351	if (msg->msg_flags & MSG_OOB) {
2352		err = queue_oob(sock, msg, other, &scm, fds_sent);
2353		if (err)
2354			goto out_err;
2355		sent++;
2356	}
2357#endif
2358
2359	scm_destroy(&scm);
2360
2361	return sent;
2362
2363pipe_err_free:
2364	unix_state_unlock(other);
2365	kfree_skb(skb);
2366pipe_err:
2367	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2368		send_sig(SIGPIPE, current, 0);
2369	err = -EPIPE;
2370out_err:
2371	scm_destroy(&scm);
2372	return sent ? : err;
2373}
2374
2375static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2376				  size_t len)
2377{
2378	int err;
2379	struct sock *sk = sock->sk;
2380
2381	err = sock_error(sk);
2382	if (err)
2383		return err;
2384
2385	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2386		return -ENOTCONN;
2387
2388	if (msg->msg_namelen)
2389		msg->msg_namelen = 0;
2390
2391	return unix_dgram_sendmsg(sock, msg, len);
2392}
2393
2394static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2395				  size_t size, int flags)
2396{
2397	struct sock *sk = sock->sk;
2398
2399	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2400		return -ENOTCONN;
2401
2402	return unix_dgram_recvmsg(sock, msg, size, flags);
2403}
2404
2405static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2406{
2407	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2408
2409	if (addr) {
2410		msg->msg_namelen = addr->len;
2411		memcpy(msg->msg_name, addr->name, addr->len);
2412	}
2413}
2414
2415int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2416			 int flags)
2417{
2418	struct scm_cookie scm;
2419	struct socket *sock = sk->sk_socket;
2420	struct unix_sock *u = unix_sk(sk);
2421	struct sk_buff *skb, *last;
2422	long timeo;
2423	int skip;
2424	int err;
2425
2426	err = -EOPNOTSUPP;
2427	if (flags&MSG_OOB)
2428		goto out;
2429
2430	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2431
2432	do {
2433		mutex_lock(&u->iolock);
2434
2435		skip = sk_peek_offset(sk, flags);
2436		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2437					      &skip, &err, &last);
2438		if (skb) {
2439			if (!(flags & MSG_PEEK))
2440				scm_stat_del(sk, skb);
2441			break;
2442		}
2443
2444		mutex_unlock(&u->iolock);
2445
2446		if (err != -EAGAIN)
2447			break;
2448	} while (timeo &&
2449		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2450					      &err, &timeo, last));
2451
2452	if (!skb) { /* implies iolock unlocked */
2453		unix_state_lock(sk);
2454		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2455		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2456		    (sk->sk_shutdown & RCV_SHUTDOWN))
2457			err = 0;
2458		unix_state_unlock(sk);
2459		goto out;
2460	}
2461
2462	if (wq_has_sleeper(&u->peer_wait))
2463		wake_up_interruptible_sync_poll(&u->peer_wait,
2464						EPOLLOUT | EPOLLWRNORM |
2465						EPOLLWRBAND);
2466
2467	if (msg->msg_name) {
2468		unix_copy_addr(msg, skb->sk);
2469
2470		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2471						      msg->msg_name,
2472						      &msg->msg_namelen);
2473	}
2474
2475	if (size > skb->len - skip)
2476		size = skb->len - skip;
2477	else if (size < skb->len - skip)
2478		msg->msg_flags |= MSG_TRUNC;
2479
2480	err = skb_copy_datagram_msg(skb, skip, msg, size);
2481	if (err)
2482		goto out_free;
2483
2484	if (sock_flag(sk, SOCK_RCVTSTAMP))
2485		__sock_recv_timestamp(msg, sk, skb);
2486
2487	memset(&scm, 0, sizeof(scm));
2488
2489	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2490	unix_set_secdata(&scm, skb);
2491
2492	if (!(flags & MSG_PEEK)) {
2493		if (UNIXCB(skb).fp)
2494			unix_detach_fds(&scm, skb);
2495
2496		sk_peek_offset_bwd(sk, skb->len);
2497	} else {
2498		/* It is questionable: on PEEK we could:
2499		   - do not return fds - good, but too simple 8)
2500		   - return fds, and do not return them on read (old strategy,
2501		     apparently wrong)
2502		   - clone fds (I chose it for now, it is the most universal
2503		     solution)
2504
2505		   POSIX 1003.1g does not actually define this clearly
2506		   at all. POSIX 1003.1g doesn't define a lot of things
2507		   clearly however!
2508
2509		*/
2510
2511		sk_peek_offset_fwd(sk, size);
2512
2513		if (UNIXCB(skb).fp)
2514			unix_peek_fds(&scm, skb);
2515	}
2516	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2517
2518	scm_recv_unix(sock, msg, &scm, flags);
2519
2520out_free:
2521	skb_free_datagram(sk, skb);
2522	mutex_unlock(&u->iolock);
2523out:
2524	return err;
2525}
2526
2527static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2528			      int flags)
2529{
2530	struct sock *sk = sock->sk;
2531
2532#ifdef CONFIG_BPF_SYSCALL
2533	const struct proto *prot = READ_ONCE(sk->sk_prot);
2534
2535	if (prot != &unix_dgram_proto)
2536		return prot->recvmsg(sk, msg, size, flags, NULL);
2537#endif
2538	return __unix_dgram_recvmsg(sk, msg, size, flags);
2539}
2540
2541static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2542{
2543	struct unix_sock *u = unix_sk(sk);
2544	struct sk_buff *skb;
2545	int err;
2546
2547	mutex_lock(&u->iolock);
2548	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2549	mutex_unlock(&u->iolock);
2550	if (!skb)
2551		return err;
2552
2553	return recv_actor(sk, skb);
2554}
2555
2556/*
2557 *	Sleep until more data has arrived. But check for races..
2558 */
2559static long unix_stream_data_wait(struct sock *sk, long timeo,
2560				  struct sk_buff *last, unsigned int last_len,
2561				  bool freezable)
2562{
2563	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2564	struct sk_buff *tail;
2565	DEFINE_WAIT(wait);
2566
2567	unix_state_lock(sk);
2568
2569	for (;;) {
2570		prepare_to_wait(sk_sleep(sk), &wait, state);
2571
2572		tail = skb_peek_tail(&sk->sk_receive_queue);
2573		if (tail != last ||
2574		    (tail && tail->len != last_len) ||
2575		    sk->sk_err ||
2576		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2577		    signal_pending(current) ||
2578		    !timeo)
2579			break;
2580
2581		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2582		unix_state_unlock(sk);
2583		timeo = schedule_timeout(timeo);
2584		unix_state_lock(sk);
2585
2586		if (sock_flag(sk, SOCK_DEAD))
2587			break;
2588
2589		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2590	}
2591
2592	finish_wait(sk_sleep(sk), &wait);
2593	unix_state_unlock(sk);
2594	return timeo;
2595}
2596
2597static unsigned int unix_skb_len(const struct sk_buff *skb)
2598{
2599	return skb->len - UNIXCB(skb).consumed;
2600}
2601
2602struct unix_stream_read_state {
2603	int (*recv_actor)(struct sk_buff *, int, int,
2604			  struct unix_stream_read_state *);
2605	struct socket *socket;
2606	struct msghdr *msg;
2607	struct pipe_inode_info *pipe;
2608	size_t size;
2609	int flags;
2610	unsigned int splice_flags;
2611};
2612
2613#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2614static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2615{
2616	struct socket *sock = state->socket;
2617	struct sock *sk = sock->sk;
2618	struct unix_sock *u = unix_sk(sk);
2619	int chunk = 1;
2620	struct sk_buff *oob_skb;
2621
2622	mutex_lock(&u->iolock);
2623	unix_state_lock(sk);
2624	spin_lock(&sk->sk_receive_queue.lock);
2625
2626	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2627		spin_unlock(&sk->sk_receive_queue.lock);
2628		unix_state_unlock(sk);
2629		mutex_unlock(&u->iolock);
2630		return -EINVAL;
2631	}
2632
2633	oob_skb = u->oob_skb;
2634
2635	if (!(state->flags & MSG_PEEK))
2636		WRITE_ONCE(u->oob_skb, NULL);
2637
2638	spin_unlock(&sk->sk_receive_queue.lock);
2639	unix_state_unlock(sk);
2640
2641	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2642
2643	if (!(state->flags & MSG_PEEK))
2644		UNIXCB(oob_skb).consumed += 1;
2645
 
 
2646	mutex_unlock(&u->iolock);
2647
2648	if (chunk < 0)
2649		return -EFAULT;
2650
2651	state->msg->msg_flags |= MSG_OOB;
2652	return 1;
2653}
2654
2655static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2656				  int flags, int copied)
2657{
2658	struct sk_buff *read_skb = NULL, *unread_skb = NULL;
2659	struct unix_sock *u = unix_sk(sk);
2660
2661	if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
2662		return skb;
2663
2664	spin_lock(&sk->sk_receive_queue.lock);
2665
2666	if (!unix_skb_len(skb)) {
2667		if (copied && (!u->oob_skb || skb == u->oob_skb)) {
2668			skb = NULL;
2669		} else if (flags & MSG_PEEK) {
2670			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2671		} else {
2672			read_skb = skb;
2673			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2674			__skb_unlink(read_skb, &sk->sk_receive_queue);
2675		}
2676
2677		if (!skb)
2678			goto unlock;
2679	}
2680
2681	if (skb != u->oob_skb)
2682		goto unlock;
2683
2684	if (copied) {
2685		skb = NULL;
2686	} else if (!(flags & MSG_PEEK)) {
2687		WRITE_ONCE(u->oob_skb, NULL);
2688
2689		if (!sock_flag(sk, SOCK_URGINLINE)) {
2690			__skb_unlink(skb, &sk->sk_receive_queue);
2691			unread_skb = skb;
2692			skb = skb_peek(&sk->sk_receive_queue);
 
 
 
 
 
 
 
2693		}
2694	} else if (!sock_flag(sk, SOCK_URGINLINE)) {
2695		skb = skb_peek_next(skb, &sk->sk_receive_queue);
2696	}
2697
2698unlock:
2699	spin_unlock(&sk->sk_receive_queue.lock);
2700
2701	consume_skb(read_skb);
2702	kfree_skb(unread_skb);
2703
2704	return skb;
2705}
2706#endif
2707
2708static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2709{
2710	struct unix_sock *u = unix_sk(sk);
2711	struct sk_buff *skb;
2712	int err;
2713
2714	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2715		return -ENOTCONN;
2716
2717	mutex_lock(&u->iolock);
2718	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2719	mutex_unlock(&u->iolock);
2720	if (!skb)
2721		return err;
2722
2723#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2724	if (unlikely(skb == READ_ONCE(u->oob_skb))) {
2725		bool drop = false;
2726
2727		unix_state_lock(sk);
2728
2729		if (sock_flag(sk, SOCK_DEAD)) {
2730			unix_state_unlock(sk);
2731			kfree_skb(skb);
2732			return -ECONNRESET;
2733		}
2734
2735		spin_lock(&sk->sk_receive_queue.lock);
2736		if (likely(skb == u->oob_skb)) {
2737			WRITE_ONCE(u->oob_skb, NULL);
2738			drop = true;
2739		}
2740		spin_unlock(&sk->sk_receive_queue.lock);
2741
2742		unix_state_unlock(sk);
2743
2744		if (drop) {
2745			kfree_skb(skb);
2746			return -EAGAIN;
2747		}
2748	}
2749#endif
2750
2751	return recv_actor(sk, skb);
2752}
2753
2754static int unix_stream_read_generic(struct unix_stream_read_state *state,
2755				    bool freezable)
2756{
2757	struct scm_cookie scm;
2758	struct socket *sock = state->socket;
2759	struct sock *sk = sock->sk;
2760	struct unix_sock *u = unix_sk(sk);
2761	int copied = 0;
2762	int flags = state->flags;
2763	int noblock = flags & MSG_DONTWAIT;
2764	bool check_creds = false;
2765	int target;
2766	int err = 0;
2767	long timeo;
2768	int skip;
2769	size_t size = state->size;
2770	unsigned int last_len;
2771
2772	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2773		err = -EINVAL;
2774		goto out;
2775	}
2776
2777	if (unlikely(flags & MSG_OOB)) {
2778		err = -EOPNOTSUPP;
2779#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2780		err = unix_stream_recv_urg(state);
2781#endif
2782		goto out;
2783	}
2784
2785	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2786	timeo = sock_rcvtimeo(sk, noblock);
2787
2788	memset(&scm, 0, sizeof(scm));
2789
2790	/* Lock the socket to prevent queue disordering
2791	 * while sleeps in memcpy_tomsg
2792	 */
2793	mutex_lock(&u->iolock);
2794
2795	skip = max(sk_peek_offset(sk, flags), 0);
2796
2797	do {
 
 
2798		struct sk_buff *skb, *last;
2799		int chunk;
2800
2801redo:
2802		unix_state_lock(sk);
2803		if (sock_flag(sk, SOCK_DEAD)) {
2804			err = -ECONNRESET;
2805			goto unlock;
2806		}
2807		last = skb = skb_peek(&sk->sk_receive_queue);
2808		last_len = last ? last->len : 0;
2809
2810again:
2811#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2812		if (skb) {
2813			skb = manage_oob(skb, sk, flags, copied);
2814			if (!skb && copied) {
2815				unix_state_unlock(sk);
2816				break;
 
 
2817			}
2818		}
2819#endif
 
2820		if (skb == NULL) {
2821			if (copied >= target)
2822				goto unlock;
2823
2824			/*
2825			 *	POSIX 1003.1g mandates this order.
2826			 */
2827
2828			err = sock_error(sk);
2829			if (err)
2830				goto unlock;
2831			if (sk->sk_shutdown & RCV_SHUTDOWN)
2832				goto unlock;
2833
2834			unix_state_unlock(sk);
2835			if (!timeo) {
2836				err = -EAGAIN;
2837				break;
2838			}
2839
2840			mutex_unlock(&u->iolock);
2841
2842			timeo = unix_stream_data_wait(sk, timeo, last,
2843						      last_len, freezable);
2844
2845			if (signal_pending(current)) {
2846				err = sock_intr_errno(timeo);
2847				scm_destroy(&scm);
2848				goto out;
2849			}
2850
2851			mutex_lock(&u->iolock);
2852			goto redo;
2853unlock:
2854			unix_state_unlock(sk);
2855			break;
2856		}
2857
2858		while (skip >= unix_skb_len(skb)) {
2859			skip -= unix_skb_len(skb);
2860			last = skb;
2861			last_len = skb->len;
2862			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2863			if (!skb)
2864				goto again;
2865		}
2866
2867		unix_state_unlock(sk);
2868
2869		if (check_creds) {
2870			/* Never glue messages from different writers */
2871			if (!unix_skb_scm_eq(skb, &scm))
2872				break;
2873		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2874			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2875			/* Copy credentials */
2876			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2877			unix_set_secdata(&scm, skb);
2878			check_creds = true;
2879		}
2880
2881		/* Copy address just once */
2882		if (state->msg && state->msg->msg_name) {
2883			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2884					 state->msg->msg_name);
2885			unix_copy_addr(state->msg, skb->sk);
2886
2887			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2888							      state->msg->msg_name,
2889							      &state->msg->msg_namelen);
2890
2891			sunaddr = NULL;
2892		}
2893
2894		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
 
2895		chunk = state->recv_actor(skb, skip, chunk, state);
 
 
 
2896		if (chunk < 0) {
2897			if (copied == 0)
2898				copied = -EFAULT;
2899			break;
2900		}
2901		copied += chunk;
2902		size -= chunk;
2903
 
 
 
 
 
 
 
 
 
 
 
 
2904		/* Mark read part of skb as used */
2905		if (!(flags & MSG_PEEK)) {
2906			UNIXCB(skb).consumed += chunk;
2907
2908			sk_peek_offset_bwd(sk, chunk);
2909
2910			if (UNIXCB(skb).fp) {
2911				scm_stat_del(sk, skb);
2912				unix_detach_fds(&scm, skb);
2913			}
2914
2915			if (unix_skb_len(skb))
2916				break;
2917
2918			skb_unlink(skb, &sk->sk_receive_queue);
2919			consume_skb(skb);
2920
2921			if (scm.fp)
2922				break;
2923		} else {
2924			/* It is questionable, see note in unix_dgram_recvmsg.
2925			 */
2926			if (UNIXCB(skb).fp)
2927				unix_peek_fds(&scm, skb);
2928
2929			sk_peek_offset_fwd(sk, chunk);
2930
2931			if (UNIXCB(skb).fp)
2932				break;
2933
2934			skip = 0;
2935			last = skb;
2936			last_len = skb->len;
2937			unix_state_lock(sk);
2938			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2939			if (skb)
2940				goto again;
2941			unix_state_unlock(sk);
2942			break;
2943		}
2944	} while (size);
2945
2946	mutex_unlock(&u->iolock);
2947	if (state->msg)
2948		scm_recv_unix(sock, state->msg, &scm, flags);
2949	else
2950		scm_destroy(&scm);
2951out:
2952	return copied ? : err;
2953}
2954
2955static int unix_stream_read_actor(struct sk_buff *skb,
2956				  int skip, int chunk,
2957				  struct unix_stream_read_state *state)
2958{
2959	int ret;
2960
2961	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2962				    state->msg, chunk);
2963	return ret ?: chunk;
2964}
2965
2966int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2967			  size_t size, int flags)
2968{
2969	struct unix_stream_read_state state = {
2970		.recv_actor = unix_stream_read_actor,
2971		.socket = sk->sk_socket,
2972		.msg = msg,
2973		.size = size,
2974		.flags = flags
2975	};
2976
2977	return unix_stream_read_generic(&state, true);
2978}
2979
2980static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2981			       size_t size, int flags)
2982{
2983	struct unix_stream_read_state state = {
2984		.recv_actor = unix_stream_read_actor,
2985		.socket = sock,
2986		.msg = msg,
2987		.size = size,
2988		.flags = flags
2989	};
2990
2991#ifdef CONFIG_BPF_SYSCALL
2992	struct sock *sk = sock->sk;
2993	const struct proto *prot = READ_ONCE(sk->sk_prot);
2994
2995	if (prot != &unix_stream_proto)
2996		return prot->recvmsg(sk, msg, size, flags, NULL);
2997#endif
2998	return unix_stream_read_generic(&state, true);
2999}
3000
3001static int unix_stream_splice_actor(struct sk_buff *skb,
3002				    int skip, int chunk,
3003				    struct unix_stream_read_state *state)
3004{
3005	return skb_splice_bits(skb, state->socket->sk,
3006			       UNIXCB(skb).consumed + skip,
3007			       state->pipe, chunk, state->splice_flags);
3008}
3009
3010static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
3011				       struct pipe_inode_info *pipe,
3012				       size_t size, unsigned int flags)
3013{
3014	struct unix_stream_read_state state = {
3015		.recv_actor = unix_stream_splice_actor,
3016		.socket = sock,
3017		.pipe = pipe,
3018		.size = size,
3019		.splice_flags = flags,
3020	};
3021
3022	if (unlikely(*ppos))
3023		return -ESPIPE;
3024
3025	if (sock->file->f_flags & O_NONBLOCK ||
3026	    flags & SPLICE_F_NONBLOCK)
3027		state.flags = MSG_DONTWAIT;
3028
3029	return unix_stream_read_generic(&state, false);
3030}
3031
3032static int unix_shutdown(struct socket *sock, int mode)
3033{
3034	struct sock *sk = sock->sk;
3035	struct sock *other;
3036
3037	if (mode < SHUT_RD || mode > SHUT_RDWR)
3038		return -EINVAL;
3039	/* This maps:
3040	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3041	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3042	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3043	 */
3044	++mode;
3045
3046	unix_state_lock(sk);
3047	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3048	other = unix_peer(sk);
3049	if (other)
3050		sock_hold(other);
3051	unix_state_unlock(sk);
3052	sk->sk_state_change(sk);
3053
3054	if (other &&
3055		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3056
3057		int peer_mode = 0;
3058		const struct proto *prot = READ_ONCE(other->sk_prot);
3059
3060		if (prot->unhash)
3061			prot->unhash(other);
3062		if (mode&RCV_SHUTDOWN)
3063			peer_mode |= SEND_SHUTDOWN;
3064		if (mode&SEND_SHUTDOWN)
3065			peer_mode |= RCV_SHUTDOWN;
3066		unix_state_lock(other);
3067		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3068		unix_state_unlock(other);
3069		other->sk_state_change(other);
3070		if (peer_mode == SHUTDOWN_MASK)
3071			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3072		else if (peer_mode & RCV_SHUTDOWN)
3073			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3074	}
3075	if (other)
3076		sock_put(other);
3077
3078	return 0;
3079}
3080
3081long unix_inq_len(struct sock *sk)
3082{
3083	struct sk_buff *skb;
3084	long amount = 0;
3085
3086	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
3087		return -EINVAL;
3088
3089	spin_lock(&sk->sk_receive_queue.lock);
3090	if (sk->sk_type == SOCK_STREAM ||
3091	    sk->sk_type == SOCK_SEQPACKET) {
3092		skb_queue_walk(&sk->sk_receive_queue, skb)
3093			amount += unix_skb_len(skb);
3094	} else {
3095		skb = skb_peek(&sk->sk_receive_queue);
3096		if (skb)
3097			amount = skb->len;
3098	}
3099	spin_unlock(&sk->sk_receive_queue.lock);
3100
3101	return amount;
3102}
3103EXPORT_SYMBOL_GPL(unix_inq_len);
3104
3105long unix_outq_len(struct sock *sk)
3106{
3107	return sk_wmem_alloc_get(sk);
3108}
3109EXPORT_SYMBOL_GPL(unix_outq_len);
3110
3111static int unix_open_file(struct sock *sk)
3112{
3113	struct path path;
3114	struct file *f;
3115	int fd;
3116
3117	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3118		return -EPERM;
3119
3120	if (!smp_load_acquire(&unix_sk(sk)->addr))
3121		return -ENOENT;
3122
3123	path = unix_sk(sk)->path;
3124	if (!path.dentry)
3125		return -ENOENT;
3126
3127	path_get(&path);
3128
3129	fd = get_unused_fd_flags(O_CLOEXEC);
3130	if (fd < 0)
3131		goto out;
3132
3133	f = dentry_open(&path, O_PATH, current_cred());
3134	if (IS_ERR(f)) {
3135		put_unused_fd(fd);
3136		fd = PTR_ERR(f);
3137		goto out;
3138	}
3139
3140	fd_install(fd, f);
3141out:
3142	path_put(&path);
3143
3144	return fd;
3145}
3146
3147static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3148{
3149	struct sock *sk = sock->sk;
3150	long amount = 0;
3151	int err;
3152
3153	switch (cmd) {
3154	case SIOCOUTQ:
3155		amount = unix_outq_len(sk);
3156		err = put_user(amount, (int __user *)arg);
3157		break;
3158	case SIOCINQ:
3159		amount = unix_inq_len(sk);
3160		if (amount < 0)
3161			err = amount;
3162		else
3163			err = put_user(amount, (int __user *)arg);
3164		break;
3165	case SIOCUNIXFILE:
3166		err = unix_open_file(sk);
3167		break;
3168#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3169	case SIOCATMARK:
3170		{
3171			struct unix_sock *u = unix_sk(sk);
3172			struct sk_buff *skb;
3173			int answ = 0;
3174
3175			mutex_lock(&u->iolock);
3176
3177			skb = skb_peek(&sk->sk_receive_queue);
3178			if (skb) {
3179				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
3180				struct sk_buff *next_skb;
3181
3182				next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
3183
3184				if (skb == oob_skb ||
3185				    (!unix_skb_len(skb) &&
3186				     (!oob_skb || next_skb == oob_skb)))
3187					answ = 1;
3188			}
3189
3190			mutex_unlock(&u->iolock);
3191
3192			err = put_user(answ, (int __user *)arg);
3193		}
3194		break;
3195#endif
3196	default:
3197		err = -ENOIOCTLCMD;
3198		break;
3199	}
3200	return err;
3201}
3202
3203#ifdef CONFIG_COMPAT
3204static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3205{
3206	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3207}
3208#endif
3209
3210static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3211{
3212	struct sock *sk = sock->sk;
3213	unsigned char state;
3214	__poll_t mask;
3215	u8 shutdown;
3216
3217	sock_poll_wait(file, sock, wait);
3218	mask = 0;
3219	shutdown = READ_ONCE(sk->sk_shutdown);
3220	state = READ_ONCE(sk->sk_state);
3221
3222	/* exceptional events? */
3223	if (READ_ONCE(sk->sk_err))
3224		mask |= EPOLLERR;
3225	if (shutdown == SHUTDOWN_MASK)
3226		mask |= EPOLLHUP;
3227	if (shutdown & RCV_SHUTDOWN)
3228		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3229
3230	/* readable? */
3231	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3232		mask |= EPOLLIN | EPOLLRDNORM;
3233	if (sk_is_readable(sk))
3234		mask |= EPOLLIN | EPOLLRDNORM;
3235#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3236	if (READ_ONCE(unix_sk(sk)->oob_skb))
3237		mask |= EPOLLPRI;
3238#endif
3239
3240	/* Connection-based need to check for termination and startup */
3241	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3242	    state == TCP_CLOSE)
3243		mask |= EPOLLHUP;
3244
3245	/*
3246	 * we set writable also when the other side has shut down the
3247	 * connection. This prevents stuck sockets.
3248	 */
3249	if (unix_writable(sk, state))
3250		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3251
3252	return mask;
3253}
3254
3255static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3256				    poll_table *wait)
3257{
3258	struct sock *sk = sock->sk, *other;
3259	unsigned int writable;
3260	unsigned char state;
3261	__poll_t mask;
3262	u8 shutdown;
3263
3264	sock_poll_wait(file, sock, wait);
3265	mask = 0;
3266	shutdown = READ_ONCE(sk->sk_shutdown);
3267	state = READ_ONCE(sk->sk_state);
3268
3269	/* exceptional events? */
3270	if (READ_ONCE(sk->sk_err) ||
3271	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3272		mask |= EPOLLERR |
3273			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3274
3275	if (shutdown & RCV_SHUTDOWN)
3276		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3277	if (shutdown == SHUTDOWN_MASK)
3278		mask |= EPOLLHUP;
3279
3280	/* readable? */
3281	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3282		mask |= EPOLLIN | EPOLLRDNORM;
3283	if (sk_is_readable(sk))
3284		mask |= EPOLLIN | EPOLLRDNORM;
3285
3286	/* Connection-based need to check for termination and startup */
3287	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3288		mask |= EPOLLHUP;
 
 
 
 
 
3289
3290	/* No write status requested, avoid expensive OUT tests. */
3291	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3292		return mask;
3293
3294	writable = unix_writable(sk, state);
3295	if (writable) {
3296		unix_state_lock(sk);
3297
3298		other = unix_peer(sk);
3299		if (other && unix_peer(other) != sk &&
3300		    unix_recvq_full_lockless(other) &&
3301		    unix_dgram_peer_wake_me(sk, other))
3302			writable = 0;
3303
3304		unix_state_unlock(sk);
3305	}
3306
3307	if (writable)
3308		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3309	else
3310		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3311
3312	return mask;
3313}
3314
3315#ifdef CONFIG_PROC_FS
3316
3317#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3318
3319#define get_bucket(x) ((x) >> BUCKET_SPACE)
3320#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3321#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3322
3323static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3324{
3325	unsigned long offset = get_offset(*pos);
3326	unsigned long bucket = get_bucket(*pos);
3327	unsigned long count = 0;
3328	struct sock *sk;
3329
3330	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3331	     sk; sk = sk_next(sk)) {
3332		if (++count == offset)
3333			break;
3334	}
3335
3336	return sk;
3337}
3338
3339static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3340{
3341	unsigned long bucket = get_bucket(*pos);
3342	struct net *net = seq_file_net(seq);
3343	struct sock *sk;
3344
3345	while (bucket < UNIX_HASH_SIZE) {
3346		spin_lock(&net->unx.table.locks[bucket]);
3347
3348		sk = unix_from_bucket(seq, pos);
3349		if (sk)
3350			return sk;
3351
3352		spin_unlock(&net->unx.table.locks[bucket]);
3353
3354		*pos = set_bucket_offset(++bucket, 1);
3355	}
3356
3357	return NULL;
3358}
3359
3360static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3361				  loff_t *pos)
3362{
3363	unsigned long bucket = get_bucket(*pos);
3364
3365	sk = sk_next(sk);
3366	if (sk)
3367		return sk;
3368
3369
3370	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3371
3372	*pos = set_bucket_offset(++bucket, 1);
3373
3374	return unix_get_first(seq, pos);
3375}
3376
3377static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3378{
3379	if (!*pos)
3380		return SEQ_START_TOKEN;
3381
3382	return unix_get_first(seq, pos);
3383}
3384
3385static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3386{
3387	++*pos;
3388
3389	if (v == SEQ_START_TOKEN)
3390		return unix_get_first(seq, pos);
3391
3392	return unix_get_next(seq, v, pos);
3393}
3394
3395static void unix_seq_stop(struct seq_file *seq, void *v)
3396{
3397	struct sock *sk = v;
3398
3399	if (sk)
3400		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3401}
3402
3403static int unix_seq_show(struct seq_file *seq, void *v)
3404{
3405
3406	if (v == SEQ_START_TOKEN)
3407		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3408			 "Inode Path\n");
3409	else {
3410		struct sock *s = v;
3411		struct unix_sock *u = unix_sk(s);
3412		unix_state_lock(s);
3413
3414		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3415			s,
3416			refcount_read(&s->sk_refcnt),
3417			0,
3418			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3419			s->sk_type,
3420			s->sk_socket ?
3421			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3422			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3423			sock_i_ino(s));
3424
3425		if (u->addr) {	// under a hash table lock here
3426			int i, len;
3427			seq_putc(seq, ' ');
3428
3429			i = 0;
3430			len = u->addr->len -
3431				offsetof(struct sockaddr_un, sun_path);
3432			if (u->addr->name->sun_path[0]) {
3433				len--;
3434			} else {
3435				seq_putc(seq, '@');
3436				i++;
3437			}
3438			for ( ; i < len; i++)
3439				seq_putc(seq, u->addr->name->sun_path[i] ?:
3440					 '@');
3441		}
3442		unix_state_unlock(s);
3443		seq_putc(seq, '\n');
3444	}
3445
3446	return 0;
3447}
3448
3449static const struct seq_operations unix_seq_ops = {
3450	.start  = unix_seq_start,
3451	.next   = unix_seq_next,
3452	.stop   = unix_seq_stop,
3453	.show   = unix_seq_show,
3454};
3455
3456#ifdef CONFIG_BPF_SYSCALL
3457struct bpf_unix_iter_state {
3458	struct seq_net_private p;
3459	unsigned int cur_sk;
3460	unsigned int end_sk;
3461	unsigned int max_sk;
3462	struct sock **batch;
3463	bool st_bucket_done;
3464};
3465
3466struct bpf_iter__unix {
3467	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3468	__bpf_md_ptr(struct unix_sock *, unix_sk);
3469	uid_t uid __aligned(8);
3470};
3471
3472static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3473			      struct unix_sock *unix_sk, uid_t uid)
3474{
3475	struct bpf_iter__unix ctx;
3476
3477	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3478	ctx.meta = meta;
3479	ctx.unix_sk = unix_sk;
3480	ctx.uid = uid;
3481	return bpf_iter_run_prog(prog, &ctx);
3482}
3483
3484static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3485
3486{
3487	struct bpf_unix_iter_state *iter = seq->private;
3488	unsigned int expected = 1;
3489	struct sock *sk;
3490
3491	sock_hold(start_sk);
3492	iter->batch[iter->end_sk++] = start_sk;
3493
3494	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3495		if (iter->end_sk < iter->max_sk) {
3496			sock_hold(sk);
3497			iter->batch[iter->end_sk++] = sk;
3498		}
3499
3500		expected++;
3501	}
3502
3503	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3504
3505	return expected;
3506}
3507
3508static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3509{
3510	while (iter->cur_sk < iter->end_sk)
3511		sock_put(iter->batch[iter->cur_sk++]);
3512}
3513
3514static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3515				       unsigned int new_batch_sz)
3516{
3517	struct sock **new_batch;
3518
3519	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3520			     GFP_USER | __GFP_NOWARN);
3521	if (!new_batch)
3522		return -ENOMEM;
3523
3524	bpf_iter_unix_put_batch(iter);
3525	kvfree(iter->batch);
3526	iter->batch = new_batch;
3527	iter->max_sk = new_batch_sz;
3528
3529	return 0;
3530}
3531
3532static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3533					loff_t *pos)
3534{
3535	struct bpf_unix_iter_state *iter = seq->private;
3536	unsigned int expected;
3537	bool resized = false;
3538	struct sock *sk;
3539
3540	if (iter->st_bucket_done)
3541		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3542
3543again:
3544	/* Get a new batch */
3545	iter->cur_sk = 0;
3546	iter->end_sk = 0;
3547
3548	sk = unix_get_first(seq, pos);
3549	if (!sk)
3550		return NULL; /* Done */
3551
3552	expected = bpf_iter_unix_hold_batch(seq, sk);
3553
3554	if (iter->end_sk == expected) {
3555		iter->st_bucket_done = true;
3556		return sk;
3557	}
3558
3559	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3560		resized = true;
3561		goto again;
3562	}
3563
3564	return sk;
3565}
3566
3567static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3568{
3569	if (!*pos)
3570		return SEQ_START_TOKEN;
3571
3572	/* bpf iter does not support lseek, so it always
3573	 * continue from where it was stop()-ped.
3574	 */
3575	return bpf_iter_unix_batch(seq, pos);
3576}
3577
3578static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3579{
3580	struct bpf_unix_iter_state *iter = seq->private;
3581	struct sock *sk;
3582
3583	/* Whenever seq_next() is called, the iter->cur_sk is
3584	 * done with seq_show(), so advance to the next sk in
3585	 * the batch.
3586	 */
3587	if (iter->cur_sk < iter->end_sk)
3588		sock_put(iter->batch[iter->cur_sk++]);
3589
3590	++*pos;
3591
3592	if (iter->cur_sk < iter->end_sk)
3593		sk = iter->batch[iter->cur_sk];
3594	else
3595		sk = bpf_iter_unix_batch(seq, pos);
3596
3597	return sk;
3598}
3599
3600static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3601{
3602	struct bpf_iter_meta meta;
3603	struct bpf_prog *prog;
3604	struct sock *sk = v;
3605	uid_t uid;
3606	bool slow;
3607	int ret;
3608
3609	if (v == SEQ_START_TOKEN)
3610		return 0;
3611
3612	slow = lock_sock_fast(sk);
3613
3614	if (unlikely(sk_unhashed(sk))) {
3615		ret = SEQ_SKIP;
3616		goto unlock;
3617	}
3618
3619	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3620	meta.seq = seq;
3621	prog = bpf_iter_get_info(&meta, false);
3622	ret = unix_prog_seq_show(prog, &meta, v, uid);
3623unlock:
3624	unlock_sock_fast(sk, slow);
3625	return ret;
3626}
3627
3628static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3629{
3630	struct bpf_unix_iter_state *iter = seq->private;
3631	struct bpf_iter_meta meta;
3632	struct bpf_prog *prog;
3633
3634	if (!v) {
3635		meta.seq = seq;
3636		prog = bpf_iter_get_info(&meta, true);
3637		if (prog)
3638			(void)unix_prog_seq_show(prog, &meta, v, 0);
3639	}
3640
3641	if (iter->cur_sk < iter->end_sk)
3642		bpf_iter_unix_put_batch(iter);
3643}
3644
3645static const struct seq_operations bpf_iter_unix_seq_ops = {
3646	.start	= bpf_iter_unix_seq_start,
3647	.next	= bpf_iter_unix_seq_next,
3648	.stop	= bpf_iter_unix_seq_stop,
3649	.show	= bpf_iter_unix_seq_show,
3650};
3651#endif
3652#endif
3653
3654static const struct net_proto_family unix_family_ops = {
3655	.family = PF_UNIX,
3656	.create = unix_create,
3657	.owner	= THIS_MODULE,
3658};
3659
3660
3661static int __net_init unix_net_init(struct net *net)
3662{
3663	int i;
3664
3665	net->unx.sysctl_max_dgram_qlen = 10;
3666	if (unix_sysctl_register(net))
3667		goto out;
3668
3669#ifdef CONFIG_PROC_FS
3670	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3671			     sizeof(struct seq_net_private)))
3672		goto err_sysctl;
3673#endif
3674
3675	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3676					      sizeof(spinlock_t), GFP_KERNEL);
3677	if (!net->unx.table.locks)
3678		goto err_proc;
3679
3680	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3681						sizeof(struct hlist_head),
3682						GFP_KERNEL);
3683	if (!net->unx.table.buckets)
3684		goto free_locks;
3685
3686	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3687		spin_lock_init(&net->unx.table.locks[i]);
3688		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
3689		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3690	}
3691
3692	return 0;
3693
3694free_locks:
3695	kvfree(net->unx.table.locks);
3696err_proc:
3697#ifdef CONFIG_PROC_FS
3698	remove_proc_entry("unix", net->proc_net);
3699err_sysctl:
3700#endif
3701	unix_sysctl_unregister(net);
3702out:
3703	return -ENOMEM;
3704}
3705
3706static void __net_exit unix_net_exit(struct net *net)
3707{
3708	kvfree(net->unx.table.buckets);
3709	kvfree(net->unx.table.locks);
3710	unix_sysctl_unregister(net);
3711	remove_proc_entry("unix", net->proc_net);
3712}
3713
3714static struct pernet_operations unix_net_ops = {
3715	.init = unix_net_init,
3716	.exit = unix_net_exit,
3717};
3718
3719#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3720DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3721		     struct unix_sock *unix_sk, uid_t uid)
3722
3723#define INIT_BATCH_SZ 16
3724
3725static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3726{
3727	struct bpf_unix_iter_state *iter = priv_data;
3728	int err;
3729
3730	err = bpf_iter_init_seq_net(priv_data, aux);
3731	if (err)
3732		return err;
3733
3734	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3735	if (err) {
3736		bpf_iter_fini_seq_net(priv_data);
3737		return err;
3738	}
3739
3740	return 0;
3741}
3742
3743static void bpf_iter_fini_unix(void *priv_data)
3744{
3745	struct bpf_unix_iter_state *iter = priv_data;
3746
3747	bpf_iter_fini_seq_net(priv_data);
3748	kvfree(iter->batch);
3749}
3750
3751static const struct bpf_iter_seq_info unix_seq_info = {
3752	.seq_ops		= &bpf_iter_unix_seq_ops,
3753	.init_seq_private	= bpf_iter_init_unix,
3754	.fini_seq_private	= bpf_iter_fini_unix,
3755	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3756};
3757
3758static const struct bpf_func_proto *
3759bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3760			     const struct bpf_prog *prog)
3761{
3762	switch (func_id) {
3763	case BPF_FUNC_setsockopt:
3764		return &bpf_sk_setsockopt_proto;
3765	case BPF_FUNC_getsockopt:
3766		return &bpf_sk_getsockopt_proto;
3767	default:
3768		return NULL;
3769	}
3770}
3771
3772static struct bpf_iter_reg unix_reg_info = {
3773	.target			= "unix",
3774	.ctx_arg_info_size	= 1,
3775	.ctx_arg_info		= {
3776		{ offsetof(struct bpf_iter__unix, unix_sk),
3777		  PTR_TO_BTF_ID_OR_NULL },
3778	},
3779	.get_func_proto         = bpf_iter_unix_get_func_proto,
3780	.seq_info		= &unix_seq_info,
3781};
3782
3783static void __init bpf_iter_register(void)
3784{
3785	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3786	if (bpf_iter_reg_target(&unix_reg_info))
3787		pr_warn("Warning: could not register bpf iterator unix\n");
3788}
3789#endif
3790
3791static int __init af_unix_init(void)
3792{
3793	int i, rc = -1;
3794
3795	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3796
3797	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3798		spin_lock_init(&bsd_socket_locks[i]);
3799		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3800	}
3801
3802	rc = proto_register(&unix_dgram_proto, 1);
3803	if (rc != 0) {
3804		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3805		goto out;
3806	}
3807
3808	rc = proto_register(&unix_stream_proto, 1);
3809	if (rc != 0) {
3810		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3811		proto_unregister(&unix_dgram_proto);
3812		goto out;
3813	}
3814
3815	sock_register(&unix_family_ops);
3816	register_pernet_subsys(&unix_net_ops);
3817	unix_bpf_build_proto();
3818
3819#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3820	bpf_iter_register();
3821#endif
3822
3823out:
3824	return rc;
3825}
3826
3827/* Later than subsys_initcall() because we depend on stuff initialised there */
3828fs_initcall(af_unix_init);