Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/filter.h>
  93#include <linux/termios.h>
  94#include <linux/sockios.h>
  95#include <linux/net.h>
  96#include <linux/in.h>
  97#include <linux/fs.h>
  98#include <linux/slab.h>
  99#include <linux/uaccess.h>
 100#include <linux/skbuff.h>
 101#include <linux/netdevice.h>
 102#include <net/net_namespace.h>
 103#include <net/sock.h>
 104#include <net/tcp_states.h>
 105#include <net/af_unix.h>
 106#include <linux/proc_fs.h>
 107#include <linux/seq_file.h>
 108#include <net/scm.h>
 109#include <linux/init.h>
 110#include <linux/poll.h>
 111#include <linux/rtnetlink.h>
 112#include <linux/mount.h>
 113#include <net/checksum.h>
 114#include <linux/security.h>
 115#include <linux/splice.h>
 116#include <linux/freezer.h>
 117#include <linux/file.h>
 118#include <linux/btf_ids.h>
 119#include <linux/bpf-cgroup.h>
 120
 121static atomic_long_t unix_nr_socks;
 122static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
 123static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
 124
 125/* SMP locking strategy:
 126 *    hash table is protected with spinlock.
 127 *    each socket state is protected by separate spinlock.
 128 */
 129#ifdef CONFIG_PROVE_LOCKING
 130#define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
 131
 132static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
 133				  const struct lockdep_map *b)
 134{
 135	return cmp_ptr(a, b);
 136}
 137
 138static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
 139				  const struct lockdep_map *_b)
 140{
 141	const struct unix_sock *a, *b;
 142
 143	a = container_of(_a, struct unix_sock, lock.dep_map);
 144	b = container_of(_b, struct unix_sock, lock.dep_map);
 145
 146	if (a->sk.sk_state == TCP_LISTEN) {
 147		/* unix_stream_connect(): Before the 2nd unix_state_lock(),
 148		 *
 149		 *   1. a is TCP_LISTEN.
 150		 *   2. b is not a.
 151		 *   3. concurrent connect(b -> a) must fail.
 152		 *
 153		 * Except for 2. & 3., the b's state can be any possible
 154		 * value due to concurrent connect() or listen().
 155		 *
 156		 * 2. is detected in debug_spin_lock_before(), and 3. cannot
 157		 * be expressed as lock_cmp_fn.
 158		 */
 159		switch (b->sk.sk_state) {
 160		case TCP_CLOSE:
 161		case TCP_ESTABLISHED:
 162		case TCP_LISTEN:
 163			return -1;
 164		default:
 165			/* Invalid case. */
 166			return 0;
 167		}
 168	}
 169
 170	/* Should never happen.  Just to be symmetric. */
 171	if (b->sk.sk_state == TCP_LISTEN) {
 172		switch (b->sk.sk_state) {
 173		case TCP_CLOSE:
 174		case TCP_ESTABLISHED:
 175			return 1;
 176		default:
 177			return 0;
 178		}
 179	}
 180
 181	/* unix_state_double_lock(): ascending address order. */
 182	return cmp_ptr(a, b);
 183}
 184
 185static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
 186				  const struct lockdep_map *_b)
 187{
 188	const struct sock *a, *b;
 189
 190	a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
 191	b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
 192
 193	/* unix_collect_skb(): listener -> embryo order. */
 194	if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
 195		return -1;
 196
 197	/* Should never happen.  Just to be symmetric. */
 198	if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
 199		return 1;
 200
 201	return 0;
 202}
 203#endif
 204
 205static unsigned int unix_unbound_hash(struct sock *sk)
 206{
 207	unsigned long hash = (unsigned long)sk;
 208
 209	hash ^= hash >> 16;
 210	hash ^= hash >> 8;
 211	hash ^= sk->sk_type;
 212
 213	return hash & UNIX_HASH_MOD;
 214}
 215
 216static unsigned int unix_bsd_hash(struct inode *i)
 217{
 218	return i->i_ino & UNIX_HASH_MOD;
 219}
 220
 221static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
 222				       int addr_len, int type)
 223{
 224	__wsum csum = csum_partial(sunaddr, addr_len, 0);
 225	unsigned int hash;
 226
 227	hash = (__force unsigned int)csum_fold(csum);
 228	hash ^= hash >> 8;
 229	hash ^= type;
 230
 231	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
 232}
 233
 234static void unix_table_double_lock(struct net *net,
 235				   unsigned int hash1, unsigned int hash2)
 236{
 237	if (hash1 == hash2) {
 238		spin_lock(&net->unx.table.locks[hash1]);
 239		return;
 240	}
 241
 242	if (hash1 > hash2)
 243		swap(hash1, hash2);
 244
 245	spin_lock(&net->unx.table.locks[hash1]);
 246	spin_lock(&net->unx.table.locks[hash2]);
 247}
 248
 249static void unix_table_double_unlock(struct net *net,
 250				     unsigned int hash1, unsigned int hash2)
 251{
 252	if (hash1 == hash2) {
 253		spin_unlock(&net->unx.table.locks[hash1]);
 254		return;
 255	}
 256
 257	spin_unlock(&net->unx.table.locks[hash1]);
 258	spin_unlock(&net->unx.table.locks[hash2]);
 259}
 260
 261#ifdef CONFIG_SECURITY_NETWORK
 262static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 263{
 264	UNIXCB(skb).secid = scm->secid;
 265}
 266
 267static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 268{
 269	scm->secid = UNIXCB(skb).secid;
 270}
 271
 272static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 273{
 274	return (scm->secid == UNIXCB(skb).secid);
 275}
 276#else
 277static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 278{ }
 279
 280static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 281{ }
 282
 283static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 284{
 285	return true;
 286}
 287#endif /* CONFIG_SECURITY_NETWORK */
 288
 289static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 290{
 291	return unix_peer(osk) == sk;
 292}
 293
 294static inline int unix_may_send(struct sock *sk, struct sock *osk)
 295{
 296	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 297}
 298
 
 
 
 
 
 299static inline int unix_recvq_full_lockless(const struct sock *sk)
 300{
 301	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 
 302}
 303
 304struct sock *unix_peer_get(struct sock *s)
 305{
 306	struct sock *peer;
 307
 308	unix_state_lock(s);
 309	peer = unix_peer(s);
 310	if (peer)
 311		sock_hold(peer);
 312	unix_state_unlock(s);
 313	return peer;
 314}
 315EXPORT_SYMBOL_GPL(unix_peer_get);
 316
 317static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
 318					     int addr_len)
 319{
 320	struct unix_address *addr;
 321
 322	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
 323	if (!addr)
 324		return NULL;
 325
 326	refcount_set(&addr->refcnt, 1);
 327	addr->len = addr_len;
 328	memcpy(addr->name, sunaddr, addr_len);
 329
 330	return addr;
 331}
 332
 333static inline void unix_release_addr(struct unix_address *addr)
 334{
 335	if (refcount_dec_and_test(&addr->refcnt))
 336		kfree(addr);
 337}
 338
 339/*
 340 *	Check unix socket name:
 341 *		- should be not zero length.
 342 *	        - if started by not zero, should be NULL terminated (FS object)
 343 *		- if started by zero, it is abstract name.
 344 */
 345
 346static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
 347{
 348	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
 349	    addr_len > sizeof(*sunaddr))
 350		return -EINVAL;
 351
 352	if (sunaddr->sun_family != AF_UNIX)
 353		return -EINVAL;
 354
 355	return 0;
 356}
 357
 358static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
 359{
 360	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
 361	short offset = offsetof(struct sockaddr_storage, __data);
 362
 363	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
 364
 365	/* This may look like an off by one error but it is a bit more
 366	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
 367	 * sun_path[108] doesn't as such exist.  However in kernel space
 368	 * we are guaranteed that it is a valid memory location in our
 369	 * kernel address buffer because syscall functions always pass
 370	 * a pointer of struct sockaddr_storage which has a bigger buffer
 371	 * than 108.  Also, we must terminate sun_path for strlen() in
 372	 * getname_kernel().
 373	 */
 374	addr->__data[addr_len - offset] = 0;
 375
 376	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
 377	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
 378	 * know the actual buffer.
 379	 */
 380	return strlen(addr->__data) + offset + 1;
 381}
 382
 383static void __unix_remove_socket(struct sock *sk)
 384{
 385	sk_del_node_init(sk);
 386}
 387
 388static void __unix_insert_socket(struct net *net, struct sock *sk)
 389{
 390	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 391	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
 392}
 393
 394static void __unix_set_addr_hash(struct net *net, struct sock *sk,
 395				 struct unix_address *addr, unsigned int hash)
 396{
 397	__unix_remove_socket(sk);
 398	smp_store_release(&unix_sk(sk)->addr, addr);
 399
 400	sk->sk_hash = hash;
 401	__unix_insert_socket(net, sk);
 402}
 403
 404static void unix_remove_socket(struct net *net, struct sock *sk)
 405{
 406	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 407	__unix_remove_socket(sk);
 408	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 409}
 410
 411static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
 412{
 413	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 414	__unix_insert_socket(net, sk);
 415	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 416}
 417
 418static void unix_insert_bsd_socket(struct sock *sk)
 419{
 420	spin_lock(&bsd_socket_locks[sk->sk_hash]);
 421	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
 422	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 423}
 424
 425static void unix_remove_bsd_socket(struct sock *sk)
 426{
 427	if (!hlist_unhashed(&sk->sk_bind_node)) {
 428		spin_lock(&bsd_socket_locks[sk->sk_hash]);
 429		__sk_del_bind_node(sk);
 430		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 431
 432		sk_node_init(&sk->sk_bind_node);
 433	}
 434}
 435
 436static struct sock *__unix_find_socket_byname(struct net *net,
 437					      struct sockaddr_un *sunname,
 438					      int len, unsigned int hash)
 439{
 440	struct sock *s;
 441
 442	sk_for_each(s, &net->unx.table.buckets[hash]) {
 443		struct unix_sock *u = unix_sk(s);
 444
 445		if (u->addr->len == len &&
 446		    !memcmp(u->addr->name, sunname, len))
 447			return s;
 448	}
 449	return NULL;
 450}
 451
 452static inline struct sock *unix_find_socket_byname(struct net *net,
 453						   struct sockaddr_un *sunname,
 454						   int len, unsigned int hash)
 455{
 456	struct sock *s;
 457
 458	spin_lock(&net->unx.table.locks[hash]);
 459	s = __unix_find_socket_byname(net, sunname, len, hash);
 460	if (s)
 461		sock_hold(s);
 462	spin_unlock(&net->unx.table.locks[hash]);
 463	return s;
 464}
 465
 466static struct sock *unix_find_socket_byinode(struct inode *i)
 467{
 468	unsigned int hash = unix_bsd_hash(i);
 469	struct sock *s;
 470
 471	spin_lock(&bsd_socket_locks[hash]);
 472	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
 473		struct dentry *dentry = unix_sk(s)->path.dentry;
 474
 475		if (dentry && d_backing_inode(dentry) == i) {
 476			sock_hold(s);
 477			spin_unlock(&bsd_socket_locks[hash]);
 478			return s;
 479		}
 480	}
 481	spin_unlock(&bsd_socket_locks[hash]);
 482	return NULL;
 483}
 484
 485/* Support code for asymmetrically connected dgram sockets
 486 *
 487 * If a datagram socket is connected to a socket not itself connected
 488 * to the first socket (eg, /dev/log), clients may only enqueue more
 489 * messages if the present receive queue of the server socket is not
 490 * "too large". This means there's a second writeability condition
 491 * poll and sendmsg need to test. The dgram recv code will do a wake
 492 * up on the peer_wait wait queue of a socket upon reception of a
 493 * datagram which needs to be propagated to sleeping would-be writers
 494 * since these might not have sent anything so far. This can't be
 495 * accomplished via poll_wait because the lifetime of the server
 496 * socket might be less than that of its clients if these break their
 497 * association with it or if the server socket is closed while clients
 498 * are still connected to it and there's no way to inform "a polling
 499 * implementation" that it should let go of a certain wait queue
 500 *
 501 * In order to propagate a wake up, a wait_queue_entry_t of the client
 502 * socket is enqueued on the peer_wait queue of the server socket
 503 * whose wake function does a wake_up on the ordinary client socket
 504 * wait queue. This connection is established whenever a write (or
 505 * poll for write) hit the flow control condition and broken when the
 506 * association to the server socket is dissolved or after a wake up
 507 * was relayed.
 508 */
 509
 510static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 511				      void *key)
 512{
 513	struct unix_sock *u;
 514	wait_queue_head_t *u_sleep;
 515
 516	u = container_of(q, struct unix_sock, peer_wake);
 517
 518	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 519			    q);
 520	u->peer_wake.private = NULL;
 521
 522	/* relaying can only happen while the wq still exists */
 523	u_sleep = sk_sleep(&u->sk);
 524	if (u_sleep)
 525		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 526
 527	return 0;
 528}
 529
 530static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 531{
 532	struct unix_sock *u, *u_other;
 533	int rc;
 534
 535	u = unix_sk(sk);
 536	u_other = unix_sk(other);
 537	rc = 0;
 538	spin_lock(&u_other->peer_wait.lock);
 539
 540	if (!u->peer_wake.private) {
 541		u->peer_wake.private = other;
 542		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 543
 544		rc = 1;
 545	}
 546
 547	spin_unlock(&u_other->peer_wait.lock);
 548	return rc;
 549}
 550
 551static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 552					    struct sock *other)
 553{
 554	struct unix_sock *u, *u_other;
 555
 556	u = unix_sk(sk);
 557	u_other = unix_sk(other);
 558	spin_lock(&u_other->peer_wait.lock);
 559
 560	if (u->peer_wake.private == other) {
 561		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 562		u->peer_wake.private = NULL;
 563	}
 564
 565	spin_unlock(&u_other->peer_wait.lock);
 566}
 567
 568static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 569						   struct sock *other)
 570{
 571	unix_dgram_peer_wake_disconnect(sk, other);
 572	wake_up_interruptible_poll(sk_sleep(sk),
 573				   EPOLLOUT |
 574				   EPOLLWRNORM |
 575				   EPOLLWRBAND);
 576}
 577
 578/* preconditions:
 579 *	- unix_peer(sk) == other
 580 *	- association is stable
 581 */
 582static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 583{
 584	int connected;
 585
 586	connected = unix_dgram_peer_wake_connect(sk, other);
 587
 588	/* If other is SOCK_DEAD, we want to make sure we signal
 589	 * POLLOUT, such that a subsequent write() can get a
 590	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 591	 * to other and its full, we will hang waiting for POLLOUT.
 592	 */
 593	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
 594		return 1;
 595
 596	if (connected)
 597		unix_dgram_peer_wake_disconnect(sk, other);
 598
 599	return 0;
 600}
 601
 602static int unix_writable(const struct sock *sk, unsigned char state)
 603{
 604	return state != TCP_LISTEN &&
 605		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
 606}
 607
 608static void unix_write_space(struct sock *sk)
 609{
 610	struct socket_wq *wq;
 611
 612	rcu_read_lock();
 613	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
 614		wq = rcu_dereference(sk->sk_wq);
 615		if (skwq_has_sleeper(wq))
 616			wake_up_interruptible_sync_poll(&wq->wait,
 617				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 618		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
 619	}
 620	rcu_read_unlock();
 621}
 622
 623/* When dgram socket disconnects (or changes its peer), we clear its receive
 624 * queue of packets arrived from previous peer. First, it allows to do
 625 * flow control based only on wmem_alloc; second, sk connected to peer
 626 * may receive messages only from that peer. */
 627static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 628{
 629	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 630		skb_queue_purge(&sk->sk_receive_queue);
 631		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 632
 633		/* If one link of bidirectional dgram pipe is disconnected,
 634		 * we signal error. Messages are lost. Do not make this,
 635		 * when peer was not connected to us.
 636		 */
 637		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 638			WRITE_ONCE(other->sk_err, ECONNRESET);
 639			sk_error_report(other);
 640		}
 641	}
 
 642}
 643
 644static void unix_sock_destructor(struct sock *sk)
 645{
 646	struct unix_sock *u = unix_sk(sk);
 647
 648	skb_queue_purge(&sk->sk_receive_queue);
 649
 650	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
 651	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 652	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
 653	if (!sock_flag(sk, SOCK_DEAD)) {
 654		pr_info("Attempt to release alive unix socket: %p\n", sk);
 655		return;
 656	}
 657
 658	if (u->addr)
 659		unix_release_addr(u->addr);
 660
 661	atomic_long_dec(&unix_nr_socks);
 662	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 663#ifdef UNIX_REFCNT_DEBUG
 664	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 665		atomic_long_read(&unix_nr_socks));
 666#endif
 667}
 668
 669static void unix_release_sock(struct sock *sk, int embrion)
 670{
 671	struct unix_sock *u = unix_sk(sk);
 672	struct sock *skpair;
 673	struct sk_buff *skb;
 674	struct path path;
 675	int state;
 676
 677	unix_remove_socket(sock_net(sk), sk);
 678	unix_remove_bsd_socket(sk);
 679
 680	/* Clear state */
 681	unix_state_lock(sk);
 682	sock_orphan(sk);
 683	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 684	path	     = u->path;
 685	u->path.dentry = NULL;
 686	u->path.mnt = NULL;
 687	state = sk->sk_state;
 688	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
 689
 690	skpair = unix_peer(sk);
 691	unix_peer(sk) = NULL;
 692
 693	unix_state_unlock(sk);
 694
 695#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 696	u->oob_skb = NULL;
 
 
 
 697#endif
 698
 699	wake_up_interruptible_all(&u->peer_wait);
 700
 701	if (skpair != NULL) {
 702		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 703			unix_state_lock(skpair);
 704			/* No more writes */
 705			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
 706			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
 707				WRITE_ONCE(skpair->sk_err, ECONNRESET);
 708			unix_state_unlock(skpair);
 709			skpair->sk_state_change(skpair);
 710			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 711		}
 712
 713		unix_dgram_peer_wake_disconnect(sk, skpair);
 714		sock_put(skpair); /* It may now die */
 715	}
 716
 717	/* Try to flush out this socket. Throw out buffers at least */
 718
 719	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 720		if (state == TCP_LISTEN)
 721			unix_release_sock(skb->sk, 1);
 722
 723		/* passed fds are erased in the kfree_skb hook	      */
 
 724		kfree_skb(skb);
 725	}
 726
 727	if (path.dentry)
 728		path_put(&path);
 729
 730	sock_put(sk);
 731
 732	/* ---- Socket is dead now and most probably destroyed ---- */
 733
 734	/*
 735	 * Fixme: BSD difference: In BSD all sockets connected to us get
 736	 *	  ECONNRESET and we die on the spot. In Linux we behave
 737	 *	  like files and pipes do and wait for the last
 738	 *	  dereference.
 739	 *
 740	 * Can't we simply set sock->err?
 741	 *
 742	 *	  What the above comment does talk about? --ANK(980817)
 743	 */
 744
 745	if (READ_ONCE(unix_tot_inflight))
 746		unix_gc();		/* Garbage collect fds */
 747}
 748
 749static void init_peercred(struct sock *sk)
 750{
 751	sk->sk_peer_pid = get_pid(task_tgid(current));
 752	sk->sk_peer_cred = get_current_cred();
 753}
 754
 755static void update_peercred(struct sock *sk)
 756{
 757	const struct cred *old_cred;
 758	struct pid *old_pid;
 759
 760	spin_lock(&sk->sk_peer_lock);
 761	old_pid = sk->sk_peer_pid;
 762	old_cred = sk->sk_peer_cred;
 763	init_peercred(sk);
 
 764	spin_unlock(&sk->sk_peer_lock);
 765
 766	put_pid(old_pid);
 767	put_cred(old_cred);
 768}
 769
 770static void copy_peercred(struct sock *sk, struct sock *peersk)
 771{
 772	lockdep_assert_held(&unix_sk(peersk)->lock);
 
 773
 774	spin_lock(&sk->sk_peer_lock);
 775	sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
 
 
 
 
 
 
 
 
 776	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 
 777	spin_unlock(&sk->sk_peer_lock);
 
 
 
 
 778}
 779
 780static int unix_listen(struct socket *sock, int backlog)
 781{
 782	int err;
 783	struct sock *sk = sock->sk;
 784	struct unix_sock *u = unix_sk(sk);
 785
 786	err = -EOPNOTSUPP;
 787	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 788		goto out;	/* Only stream/seqpacket sockets accept */
 789	err = -EINVAL;
 790	if (!READ_ONCE(u->addr))
 791		goto out;	/* No listens on an unbound socket */
 792	unix_state_lock(sk);
 793	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 794		goto out_unlock;
 795	if (backlog > sk->sk_max_ack_backlog)
 796		wake_up_interruptible_all(&u->peer_wait);
 797	sk->sk_max_ack_backlog	= backlog;
 798	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
 799
 800	/* set credentials so connect can copy them */
 801	update_peercred(sk);
 802	err = 0;
 803
 804out_unlock:
 805	unix_state_unlock(sk);
 806out:
 807	return err;
 808}
 809
 810static int unix_release(struct socket *);
 811static int unix_bind(struct socket *, struct sockaddr *, int);
 812static int unix_stream_connect(struct socket *, struct sockaddr *,
 813			       int addr_len, int flags);
 814static int unix_socketpair(struct socket *, struct socket *);
 815static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
 816static int unix_getname(struct socket *, struct sockaddr *, int);
 817static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 818static __poll_t unix_dgram_poll(struct file *, struct socket *,
 819				    poll_table *);
 820static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 821#ifdef CONFIG_COMPAT
 822static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 823#endif
 824static int unix_shutdown(struct socket *, int);
 825static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 826static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 827static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 828				       struct pipe_inode_info *, size_t size,
 829				       unsigned int flags);
 830static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 831static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 832static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 833static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 834static int unix_dgram_connect(struct socket *, struct sockaddr *,
 835			      int, int);
 836static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 837static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 838				  int);
 839
 840#ifdef CONFIG_PROC_FS
 841static int unix_count_nr_fds(struct sock *sk)
 842{
 843	struct sk_buff *skb;
 844	struct unix_sock *u;
 845	int nr_fds = 0;
 846
 847	spin_lock(&sk->sk_receive_queue.lock);
 848	skb = skb_peek(&sk->sk_receive_queue);
 849	while (skb) {
 850		u = unix_sk(skb->sk);
 851		nr_fds += atomic_read(&u->scm_stat.nr_fds);
 852		skb = skb_peek_next(skb, &sk->sk_receive_queue);
 853	}
 854	spin_unlock(&sk->sk_receive_queue.lock);
 855
 856	return nr_fds;
 857}
 858
 859static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 860{
 861	struct sock *sk = sock->sk;
 862	unsigned char s_state;
 863	struct unix_sock *u;
 864	int nr_fds = 0;
 865
 866	if (sk) {
 867		s_state = READ_ONCE(sk->sk_state);
 868		u = unix_sk(sk);
 869
 870		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
 871		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
 872		 * SOCK_DGRAM is ordinary. So, no lock is needed.
 873		 */
 874		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
 875			nr_fds = atomic_read(&u->scm_stat.nr_fds);
 876		else if (s_state == TCP_LISTEN)
 877			nr_fds = unix_count_nr_fds(sk);
 878
 879		seq_printf(m, "scm_fds: %u\n", nr_fds);
 880	}
 881}
 882#else
 883#define unix_show_fdinfo NULL
 884#endif
 885
 886static const struct proto_ops unix_stream_ops = {
 887	.family =	PF_UNIX,
 888	.owner =	THIS_MODULE,
 889	.release =	unix_release,
 890	.bind =		unix_bind,
 891	.connect =	unix_stream_connect,
 892	.socketpair =	unix_socketpair,
 893	.accept =	unix_accept,
 894	.getname =	unix_getname,
 895	.poll =		unix_poll,
 896	.ioctl =	unix_ioctl,
 897#ifdef CONFIG_COMPAT
 898	.compat_ioctl =	unix_compat_ioctl,
 899#endif
 900	.listen =	unix_listen,
 901	.shutdown =	unix_shutdown,
 902	.sendmsg =	unix_stream_sendmsg,
 903	.recvmsg =	unix_stream_recvmsg,
 904	.read_skb =	unix_stream_read_skb,
 905	.mmap =		sock_no_mmap,
 906	.splice_read =	unix_stream_splice_read,
 907	.set_peek_off =	sk_set_peek_off,
 908	.show_fdinfo =	unix_show_fdinfo,
 909};
 910
 911static const struct proto_ops unix_dgram_ops = {
 912	.family =	PF_UNIX,
 913	.owner =	THIS_MODULE,
 914	.release =	unix_release,
 915	.bind =		unix_bind,
 916	.connect =	unix_dgram_connect,
 917	.socketpair =	unix_socketpair,
 918	.accept =	sock_no_accept,
 919	.getname =	unix_getname,
 920	.poll =		unix_dgram_poll,
 921	.ioctl =	unix_ioctl,
 922#ifdef CONFIG_COMPAT
 923	.compat_ioctl =	unix_compat_ioctl,
 924#endif
 925	.listen =	sock_no_listen,
 926	.shutdown =	unix_shutdown,
 927	.sendmsg =	unix_dgram_sendmsg,
 928	.read_skb =	unix_read_skb,
 929	.recvmsg =	unix_dgram_recvmsg,
 930	.mmap =		sock_no_mmap,
 931	.set_peek_off =	sk_set_peek_off,
 932	.show_fdinfo =	unix_show_fdinfo,
 933};
 934
 935static const struct proto_ops unix_seqpacket_ops = {
 936	.family =	PF_UNIX,
 937	.owner =	THIS_MODULE,
 938	.release =	unix_release,
 939	.bind =		unix_bind,
 940	.connect =	unix_stream_connect,
 941	.socketpair =	unix_socketpair,
 942	.accept =	unix_accept,
 943	.getname =	unix_getname,
 944	.poll =		unix_dgram_poll,
 945	.ioctl =	unix_ioctl,
 946#ifdef CONFIG_COMPAT
 947	.compat_ioctl =	unix_compat_ioctl,
 948#endif
 949	.listen =	unix_listen,
 950	.shutdown =	unix_shutdown,
 951	.sendmsg =	unix_seqpacket_sendmsg,
 952	.recvmsg =	unix_seqpacket_recvmsg,
 953	.mmap =		sock_no_mmap,
 954	.set_peek_off =	sk_set_peek_off,
 955	.show_fdinfo =	unix_show_fdinfo,
 956};
 957
 958static void unix_close(struct sock *sk, long timeout)
 959{
 960	/* Nothing to do here, unix socket does not need a ->close().
 961	 * This is merely for sockmap.
 962	 */
 963}
 964
 965static void unix_unhash(struct sock *sk)
 966{
 967	/* Nothing to do here, unix socket does not need a ->unhash().
 968	 * This is merely for sockmap.
 969	 */
 970}
 971
 972static bool unix_bpf_bypass_getsockopt(int level, int optname)
 973{
 974	if (level == SOL_SOCKET) {
 975		switch (optname) {
 976		case SO_PEERPIDFD:
 977			return true;
 978		default:
 979			return false;
 980		}
 981	}
 982
 983	return false;
 984}
 985
 986struct proto unix_dgram_proto = {
 987	.name			= "UNIX",
 988	.owner			= THIS_MODULE,
 989	.obj_size		= sizeof(struct unix_sock),
 990	.close			= unix_close,
 991	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 992#ifdef CONFIG_BPF_SYSCALL
 993	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
 994#endif
 995};
 996
 997struct proto unix_stream_proto = {
 998	.name			= "UNIX-STREAM",
 999	.owner			= THIS_MODULE,
1000	.obj_size		= sizeof(struct unix_sock),
1001	.close			= unix_close,
1002	.unhash			= unix_unhash,
1003	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1004#ifdef CONFIG_BPF_SYSCALL
1005	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
1006#endif
1007};
1008
1009static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
1010{
1011	struct unix_sock *u;
1012	struct sock *sk;
1013	int err;
1014
1015	atomic_long_inc(&unix_nr_socks);
1016	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
1017		err = -ENFILE;
1018		goto err;
1019	}
1020
1021	if (type == SOCK_STREAM)
1022		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
1023	else /*dgram and  seqpacket */
1024		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
1025
1026	if (!sk) {
1027		err = -ENOMEM;
1028		goto err;
1029	}
1030
1031	sock_init_data(sock, sk);
1032
1033	sk->sk_hash		= unix_unbound_hash(sk);
1034	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
1035	sk->sk_write_space	= unix_write_space;
1036	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
1037	sk->sk_destruct		= unix_sock_destructor;
1038	lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
1039
1040	u = unix_sk(sk);
1041	u->listener = NULL;
1042	u->vertex = NULL;
1043	u->path.dentry = NULL;
1044	u->path.mnt = NULL;
1045	spin_lock_init(&u->lock);
1046	lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
1047	mutex_init(&u->iolock); /* single task reading lock */
1048	mutex_init(&u->bindlock); /* single task binding lock */
1049	init_waitqueue_head(&u->peer_wait);
1050	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1051	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1052	unix_insert_unbound_socket(net, sk);
1053
1054	sock_prot_inuse_add(net, sk->sk_prot, 1);
1055
1056	return sk;
1057
1058err:
1059	atomic_long_dec(&unix_nr_socks);
1060	return ERR_PTR(err);
1061}
1062
1063static int unix_create(struct net *net, struct socket *sock, int protocol,
1064		       int kern)
1065{
1066	struct sock *sk;
1067
1068	if (protocol && protocol != PF_UNIX)
1069		return -EPROTONOSUPPORT;
1070
1071	sock->state = SS_UNCONNECTED;
1072
1073	switch (sock->type) {
1074	case SOCK_STREAM:
1075		sock->ops = &unix_stream_ops;
1076		break;
1077		/*
1078		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1079		 *	nothing uses it.
1080		 */
1081	case SOCK_RAW:
1082		sock->type = SOCK_DGRAM;
1083		fallthrough;
1084	case SOCK_DGRAM:
1085		sock->ops = &unix_dgram_ops;
1086		break;
1087	case SOCK_SEQPACKET:
1088		sock->ops = &unix_seqpacket_ops;
1089		break;
1090	default:
1091		return -ESOCKTNOSUPPORT;
1092	}
1093
1094	sk = unix_create1(net, sock, kern, sock->type);
1095	if (IS_ERR(sk))
1096		return PTR_ERR(sk);
1097
1098	return 0;
1099}
1100
1101static int unix_release(struct socket *sock)
1102{
1103	struct sock *sk = sock->sk;
1104
1105	if (!sk)
1106		return 0;
1107
1108	sk->sk_prot->close(sk, 0);
1109	unix_release_sock(sk, 0);
1110	sock->sk = NULL;
1111
1112	return 0;
1113}
1114
1115static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1116				  int type)
1117{
1118	struct inode *inode;
1119	struct path path;
1120	struct sock *sk;
1121	int err;
1122
1123	unix_mkname_bsd(sunaddr, addr_len);
1124	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1125	if (err)
1126		goto fail;
1127
1128	err = path_permission(&path, MAY_WRITE);
1129	if (err)
1130		goto path_put;
1131
1132	err = -ECONNREFUSED;
1133	inode = d_backing_inode(path.dentry);
1134	if (!S_ISSOCK(inode->i_mode))
1135		goto path_put;
1136
1137	sk = unix_find_socket_byinode(inode);
1138	if (!sk)
1139		goto path_put;
1140
1141	err = -EPROTOTYPE;
1142	if (sk->sk_type == type)
1143		touch_atime(&path);
1144	else
1145		goto sock_put;
1146
1147	path_put(&path);
1148
1149	return sk;
1150
1151sock_put:
1152	sock_put(sk);
1153path_put:
1154	path_put(&path);
1155fail:
1156	return ERR_PTR(err);
1157}
1158
1159static struct sock *unix_find_abstract(struct net *net,
1160				       struct sockaddr_un *sunaddr,
1161				       int addr_len, int type)
1162{
1163	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1164	struct dentry *dentry;
1165	struct sock *sk;
1166
1167	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1168	if (!sk)
1169		return ERR_PTR(-ECONNREFUSED);
1170
1171	dentry = unix_sk(sk)->path.dentry;
1172	if (dentry)
1173		touch_atime(&unix_sk(sk)->path);
1174
1175	return sk;
1176}
1177
1178static struct sock *unix_find_other(struct net *net,
1179				    struct sockaddr_un *sunaddr,
1180				    int addr_len, int type)
1181{
1182	struct sock *sk;
1183
1184	if (sunaddr->sun_path[0])
1185		sk = unix_find_bsd(sunaddr, addr_len, type);
1186	else
1187		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1188
1189	return sk;
1190}
1191
1192static int unix_autobind(struct sock *sk)
1193{
1194	struct unix_sock *u = unix_sk(sk);
1195	unsigned int new_hash, old_hash;
1196	struct net *net = sock_net(sk);
1197	struct unix_address *addr;
1198	u32 lastnum, ordernum;
1199	int err;
1200
1201	err = mutex_lock_interruptible(&u->bindlock);
1202	if (err)
1203		return err;
1204
1205	if (u->addr)
1206		goto out;
1207
1208	err = -ENOMEM;
1209	addr = kzalloc(sizeof(*addr) +
1210		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1211	if (!addr)
1212		goto out;
1213
1214	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1215	addr->name->sun_family = AF_UNIX;
1216	refcount_set(&addr->refcnt, 1);
1217
1218	old_hash = sk->sk_hash;
1219	ordernum = get_random_u32();
1220	lastnum = ordernum & 0xFFFFF;
1221retry:
1222	ordernum = (ordernum + 1) & 0xFFFFF;
1223	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1224
1225	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1226	unix_table_double_lock(net, old_hash, new_hash);
1227
1228	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1229		unix_table_double_unlock(net, old_hash, new_hash);
1230
1231		/* __unix_find_socket_byname() may take long time if many names
1232		 * are already in use.
1233		 */
1234		cond_resched();
1235
1236		if (ordernum == lastnum) {
1237			/* Give up if all names seems to be in use. */
1238			err = -ENOSPC;
1239			unix_release_addr(addr);
1240			goto out;
1241		}
1242
1243		goto retry;
1244	}
1245
1246	__unix_set_addr_hash(net, sk, addr, new_hash);
1247	unix_table_double_unlock(net, old_hash, new_hash);
1248	err = 0;
1249
1250out:	mutex_unlock(&u->bindlock);
1251	return err;
1252}
1253
1254static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1255			 int addr_len)
1256{
1257	umode_t mode = S_IFSOCK |
1258	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1259	struct unix_sock *u = unix_sk(sk);
1260	unsigned int new_hash, old_hash;
1261	struct net *net = sock_net(sk);
1262	struct mnt_idmap *idmap;
1263	struct unix_address *addr;
1264	struct dentry *dentry;
1265	struct path parent;
1266	int err;
1267
1268	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1269	addr = unix_create_addr(sunaddr, addr_len);
1270	if (!addr)
1271		return -ENOMEM;
1272
1273	/*
1274	 * Get the parent directory, calculate the hash for last
1275	 * component.
1276	 */
1277	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1278	if (IS_ERR(dentry)) {
1279		err = PTR_ERR(dentry);
1280		goto out;
1281	}
1282
1283	/*
1284	 * All right, let's create it.
1285	 */
1286	idmap = mnt_idmap(parent.mnt);
1287	err = security_path_mknod(&parent, dentry, mode, 0);
1288	if (!err)
1289		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1290	if (err)
1291		goto out_path;
1292	err = mutex_lock_interruptible(&u->bindlock);
1293	if (err)
1294		goto out_unlink;
1295	if (u->addr)
1296		goto out_unlock;
1297
1298	old_hash = sk->sk_hash;
1299	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1300	unix_table_double_lock(net, old_hash, new_hash);
1301	u->path.mnt = mntget(parent.mnt);
1302	u->path.dentry = dget(dentry);
1303	__unix_set_addr_hash(net, sk, addr, new_hash);
1304	unix_table_double_unlock(net, old_hash, new_hash);
1305	unix_insert_bsd_socket(sk);
1306	mutex_unlock(&u->bindlock);
1307	done_path_create(&parent, dentry);
1308	return 0;
1309
1310out_unlock:
1311	mutex_unlock(&u->bindlock);
1312	err = -EINVAL;
1313out_unlink:
1314	/* failed after successful mknod?  unlink what we'd created... */
1315	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1316out_path:
1317	done_path_create(&parent, dentry);
1318out:
1319	unix_release_addr(addr);
1320	return err == -EEXIST ? -EADDRINUSE : err;
1321}
1322
1323static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1324			      int addr_len)
1325{
1326	struct unix_sock *u = unix_sk(sk);
1327	unsigned int new_hash, old_hash;
1328	struct net *net = sock_net(sk);
1329	struct unix_address *addr;
1330	int err;
1331
1332	addr = unix_create_addr(sunaddr, addr_len);
1333	if (!addr)
1334		return -ENOMEM;
1335
1336	err = mutex_lock_interruptible(&u->bindlock);
1337	if (err)
1338		goto out;
1339
1340	if (u->addr) {
1341		err = -EINVAL;
1342		goto out_mutex;
1343	}
1344
1345	old_hash = sk->sk_hash;
1346	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1347	unix_table_double_lock(net, old_hash, new_hash);
1348
1349	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1350		goto out_spin;
1351
1352	__unix_set_addr_hash(net, sk, addr, new_hash);
1353	unix_table_double_unlock(net, old_hash, new_hash);
1354	mutex_unlock(&u->bindlock);
1355	return 0;
1356
1357out_spin:
1358	unix_table_double_unlock(net, old_hash, new_hash);
1359	err = -EADDRINUSE;
1360out_mutex:
1361	mutex_unlock(&u->bindlock);
1362out:
1363	unix_release_addr(addr);
1364	return err;
1365}
1366
1367static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1368{
1369	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1370	struct sock *sk = sock->sk;
1371	int err;
1372
1373	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1374	    sunaddr->sun_family == AF_UNIX)
1375		return unix_autobind(sk);
1376
1377	err = unix_validate_addr(sunaddr, addr_len);
1378	if (err)
1379		return err;
1380
1381	if (sunaddr->sun_path[0])
1382		err = unix_bind_bsd(sk, sunaddr, addr_len);
1383	else
1384		err = unix_bind_abstract(sk, sunaddr, addr_len);
1385
1386	return err;
1387}
1388
1389static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1390{
1391	if (unlikely(sk1 == sk2) || !sk2) {
1392		unix_state_lock(sk1);
1393		return;
1394	}
1395
1396	if (sk1 > sk2)
1397		swap(sk1, sk2);
1398
1399	unix_state_lock(sk1);
1400	unix_state_lock(sk2);
1401}
1402
1403static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1404{
1405	if (unlikely(sk1 == sk2) || !sk2) {
1406		unix_state_unlock(sk1);
1407		return;
1408	}
1409	unix_state_unlock(sk1);
1410	unix_state_unlock(sk2);
1411}
1412
1413static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1414			      int alen, int flags)
1415{
1416	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1417	struct sock *sk = sock->sk;
1418	struct sock *other;
1419	int err;
1420
1421	err = -EINVAL;
1422	if (alen < offsetofend(struct sockaddr, sa_family))
1423		goto out;
1424
1425	if (addr->sa_family != AF_UNSPEC) {
1426		err = unix_validate_addr(sunaddr, alen);
1427		if (err)
1428			goto out;
1429
1430		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1431		if (err)
1432			goto out;
1433
1434		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1435		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1436		    !READ_ONCE(unix_sk(sk)->addr)) {
1437			err = unix_autobind(sk);
1438			if (err)
1439				goto out;
1440		}
1441
1442restart:
1443		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1444		if (IS_ERR(other)) {
1445			err = PTR_ERR(other);
1446			goto out;
1447		}
1448
1449		unix_state_double_lock(sk, other);
1450
1451		/* Apparently VFS overslept socket death. Retry. */
1452		if (sock_flag(other, SOCK_DEAD)) {
1453			unix_state_double_unlock(sk, other);
1454			sock_put(other);
1455			goto restart;
1456		}
1457
1458		err = -EPERM;
1459		if (!unix_may_send(sk, other))
1460			goto out_unlock;
1461
1462		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1463		if (err)
1464			goto out_unlock;
1465
1466		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1467		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1468	} else {
1469		/*
1470		 *	1003.1g breaking connected state with AF_UNSPEC
1471		 */
1472		other = NULL;
1473		unix_state_double_lock(sk, other);
1474	}
1475
1476	/*
1477	 * If it was connected, reconnect.
1478	 */
1479	if (unix_peer(sk)) {
1480		struct sock *old_peer = unix_peer(sk);
1481
1482		unix_peer(sk) = other;
1483		if (!other)
1484			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1485		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1486
1487		unix_state_double_unlock(sk, other);
1488
1489		if (other != old_peer) {
1490			unix_dgram_disconnected(sk, old_peer);
1491
1492			unix_state_lock(old_peer);
1493			if (!unix_peer(old_peer))
1494				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1495			unix_state_unlock(old_peer);
1496		}
1497
1498		sock_put(old_peer);
1499	} else {
1500		unix_peer(sk) = other;
1501		unix_state_double_unlock(sk, other);
1502	}
1503
1504	return 0;
1505
1506out_unlock:
1507	unix_state_double_unlock(sk, other);
1508	sock_put(other);
1509out:
1510	return err;
1511}
1512
1513static long unix_wait_for_peer(struct sock *other, long timeo)
1514	__releases(&unix_sk(other)->lock)
1515{
1516	struct unix_sock *u = unix_sk(other);
1517	int sched;
1518	DEFINE_WAIT(wait);
1519
1520	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1521
1522	sched = !sock_flag(other, SOCK_DEAD) &&
1523		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1524		unix_recvq_full_lockless(other);
1525
1526	unix_state_unlock(other);
1527
1528	if (sched)
1529		timeo = schedule_timeout(timeo);
1530
1531	finish_wait(&u->peer_wait, &wait);
1532	return timeo;
1533}
1534
1535static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1536			       int addr_len, int flags)
1537{
1538	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1539	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1540	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1541	struct net *net = sock_net(sk);
1542	struct sk_buff *skb = NULL;
1543	unsigned char state;
1544	long timeo;
1545	int err;
 
1546
1547	err = unix_validate_addr(sunaddr, addr_len);
1548	if (err)
1549		goto out;
1550
1551	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1552	if (err)
1553		goto out;
1554
1555	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1556	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1557	    !READ_ONCE(u->addr)) {
1558		err = unix_autobind(sk);
1559		if (err)
1560			goto out;
1561	}
1562
1563	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1564
1565	/* First of all allocate resources.
1566	   If we will make it after state is locked,
1567	   we will have to recheck all again in any case.
1568	 */
1569
1570	/* create new sock for complete connection */
1571	newsk = unix_create1(net, NULL, 0, sock->type);
1572	if (IS_ERR(newsk)) {
1573		err = PTR_ERR(newsk);
1574		newsk = NULL;
1575		goto out;
1576	}
1577
1578	err = -ENOMEM;
1579
1580	/* Allocate skb for sending to listening sock */
1581	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1582	if (skb == NULL)
1583		goto out;
1584
1585restart:
1586	/*  Find listening sock. */
1587	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1588	if (IS_ERR(other)) {
1589		err = PTR_ERR(other);
1590		other = NULL;
1591		goto out;
1592	}
1593
 
1594	unix_state_lock(other);
1595
1596	/* Apparently VFS overslept socket death. Retry. */
1597	if (sock_flag(other, SOCK_DEAD)) {
1598		unix_state_unlock(other);
1599		sock_put(other);
1600		goto restart;
1601	}
1602
1603	err = -ECONNREFUSED;
1604	if (other->sk_state != TCP_LISTEN)
1605		goto out_unlock;
1606	if (other->sk_shutdown & RCV_SHUTDOWN)
1607		goto out_unlock;
1608
1609	if (unix_recvq_full_lockless(other)) {
1610		err = -EAGAIN;
1611		if (!timeo)
1612			goto out_unlock;
1613
1614		timeo = unix_wait_for_peer(other, timeo);
1615
1616		err = sock_intr_errno(timeo);
1617		if (signal_pending(current))
1618			goto out;
1619		sock_put(other);
1620		goto restart;
1621	}
1622
1623	/* self connect and simultaneous connect are eliminated
1624	 * by rejecting TCP_LISTEN socket to avoid deadlock.
 
 
 
 
 
 
 
 
1625	 */
1626	state = READ_ONCE(sk->sk_state);
1627	if (unlikely(state != TCP_CLOSE)) {
1628		err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
 
 
 
 
 
 
 
 
 
1629		goto out_unlock;
1630	}
1631
1632	unix_state_lock(sk);
1633
1634	if (unlikely(sk->sk_state != TCP_CLOSE)) {
1635		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1636		unix_state_unlock(sk);
1637		goto out_unlock;
 
 
1638	}
1639
1640	err = security_unix_stream_connect(sk, other, newsk);
1641	if (err) {
1642		unix_state_unlock(sk);
1643		goto out_unlock;
1644	}
1645
1646	/* The way is open! Fastly set all the necessary fields... */
1647
1648	sock_hold(sk);
1649	unix_peer(newsk)	= sk;
1650	newsk->sk_state		= TCP_ESTABLISHED;
1651	newsk->sk_type		= sk->sk_type;
1652	init_peercred(newsk);
1653	newu = unix_sk(newsk);
1654	newu->listener = other;
1655	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1656	otheru = unix_sk(other);
1657
1658	/* copy address information from listening to new sock
1659	 *
1660	 * The contents of *(otheru->addr) and otheru->path
1661	 * are seen fully set up here, since we have found
1662	 * otheru in hash under its lock.  Insertion into the
1663	 * hash chain we'd found it in had been done in an
1664	 * earlier critical area protected by the chain's lock,
1665	 * the same one where we'd set *(otheru->addr) contents,
1666	 * as well as otheru->path and otheru->addr itself.
1667	 *
1668	 * Using smp_store_release() here to set newu->addr
1669	 * is enough to make those stores, as well as stores
1670	 * to newu->path visible to anyone who gets newu->addr
1671	 * by smp_load_acquire().  IOW, the same warranties
1672	 * as for unix_sock instances bound in unix_bind() or
1673	 * in unix_autobind().
1674	 */
1675	if (otheru->path.dentry) {
1676		path_get(&otheru->path);
1677		newu->path = otheru->path;
1678	}
1679	refcount_inc(&otheru->addr->refcnt);
1680	smp_store_release(&newu->addr, otheru->addr);
1681
1682	/* Set credentials */
1683	copy_peercred(sk, other);
1684
1685	sock->state	= SS_CONNECTED;
1686	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1687	sock_hold(newsk);
1688
1689	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1690	unix_peer(sk)	= newsk;
1691
1692	unix_state_unlock(sk);
1693
1694	/* take ten and send info to listening sock */
1695	spin_lock(&other->sk_receive_queue.lock);
1696	__skb_queue_tail(&other->sk_receive_queue, skb);
1697	spin_unlock(&other->sk_receive_queue.lock);
1698	unix_state_unlock(other);
1699	other->sk_data_ready(other);
1700	sock_put(other);
1701	return 0;
1702
1703out_unlock:
1704	if (other)
1705		unix_state_unlock(other);
1706
1707out:
1708	kfree_skb(skb);
1709	if (newsk)
1710		unix_release_sock(newsk, 0);
1711	if (other)
1712		sock_put(other);
1713	return err;
1714}
1715
1716static int unix_socketpair(struct socket *socka, struct socket *sockb)
1717{
1718	struct sock *ska = socka->sk, *skb = sockb->sk;
1719
1720	/* Join our sockets back to back */
1721	sock_hold(ska);
1722	sock_hold(skb);
1723	unix_peer(ska) = skb;
1724	unix_peer(skb) = ska;
1725	init_peercred(ska);
1726	init_peercred(skb);
1727
1728	ska->sk_state = TCP_ESTABLISHED;
1729	skb->sk_state = TCP_ESTABLISHED;
1730	socka->state  = SS_CONNECTED;
1731	sockb->state  = SS_CONNECTED;
1732	return 0;
1733}
1734
1735static void unix_sock_inherit_flags(const struct socket *old,
1736				    struct socket *new)
1737{
1738	if (test_bit(SOCK_PASSCRED, &old->flags))
1739		set_bit(SOCK_PASSCRED, &new->flags);
1740	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1741		set_bit(SOCK_PASSPIDFD, &new->flags);
1742	if (test_bit(SOCK_PASSSEC, &old->flags))
1743		set_bit(SOCK_PASSSEC, &new->flags);
1744}
1745
1746static int unix_accept(struct socket *sock, struct socket *newsock,
1747		       struct proto_accept_arg *arg)
1748{
1749	struct sock *sk = sock->sk;
1750	struct sk_buff *skb;
1751	struct sock *tsk;
 
 
1752
1753	arg->err = -EOPNOTSUPP;
1754	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1755		goto out;
1756
1757	arg->err = -EINVAL;
1758	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1759		goto out;
1760
1761	/* If socket state is TCP_LISTEN it cannot change (for now...),
1762	 * so that no locks are necessary.
1763	 */
1764
1765	skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1766				&arg->err);
1767	if (!skb) {
1768		/* This means receive shutdown. */
1769		if (arg->err == 0)
1770			arg->err = -EINVAL;
1771		goto out;
1772	}
1773
1774	tsk = skb->sk;
1775	skb_free_datagram(sk, skb);
1776	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1777
1778	/* attach accepted sock to socket */
1779	unix_state_lock(tsk);
1780	unix_update_edges(unix_sk(tsk));
1781	newsock->state = SS_CONNECTED;
1782	unix_sock_inherit_flags(sock, newsock);
1783	sock_graft(tsk, newsock);
1784	unix_state_unlock(tsk);
1785	return 0;
1786
1787out:
1788	return arg->err;
1789}
1790
1791
1792static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1793{
1794	struct sock *sk = sock->sk;
1795	struct unix_address *addr;
1796	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1797	int err = 0;
1798
1799	if (peer) {
1800		sk = unix_peer_get(sk);
1801
1802		err = -ENOTCONN;
1803		if (!sk)
1804			goto out;
1805		err = 0;
1806	} else {
1807		sock_hold(sk);
1808	}
1809
1810	addr = smp_load_acquire(&unix_sk(sk)->addr);
1811	if (!addr) {
1812		sunaddr->sun_family = AF_UNIX;
1813		sunaddr->sun_path[0] = 0;
1814		err = offsetof(struct sockaddr_un, sun_path);
1815	} else {
1816		err = addr->len;
1817		memcpy(sunaddr, addr->name, addr->len);
1818
1819		if (peer)
1820			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1821					       CGROUP_UNIX_GETPEERNAME);
1822		else
1823			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1824					       CGROUP_UNIX_GETSOCKNAME);
1825	}
1826	sock_put(sk);
1827out:
1828	return err;
1829}
1830
1831/* The "user->unix_inflight" variable is protected by the garbage
1832 * collection lock, and we just read it locklessly here. If you go
1833 * over the limit, there might be a tiny race in actually noticing
1834 * it across threads. Tough.
1835 */
1836static inline bool too_many_unix_fds(struct task_struct *p)
1837{
1838	struct user_struct *user = current_user();
1839
1840	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1841		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1842	return false;
1843}
1844
1845static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1846{
 
 
1847	if (too_many_unix_fds(current))
1848		return -ETOOMANYREFS;
1849
1850	UNIXCB(skb).fp = scm->fp;
1851	scm->fp = NULL;
1852
1853	if (unix_prepare_fpl(UNIXCB(skb).fp))
 
 
1854		return -ENOMEM;
1855
 
 
 
1856	return 0;
1857}
1858
1859static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1860{
 
 
1861	scm->fp = UNIXCB(skb).fp;
1862	UNIXCB(skb).fp = NULL;
1863
1864	unix_destroy_fpl(scm->fp);
 
1865}
1866
1867static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1868{
1869	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1870}
1871
1872static void unix_destruct_scm(struct sk_buff *skb)
1873{
1874	struct scm_cookie scm;
1875
1876	memset(&scm, 0, sizeof(scm));
1877	scm.pid  = UNIXCB(skb).pid;
1878	if (UNIXCB(skb).fp)
1879		unix_detach_fds(&scm, skb);
1880
1881	/* Alas, it calls VFS */
1882	/* So fscking what? fput() had been SMP-safe since the last Summer */
1883	scm_destroy(&scm);
1884	sock_wfree(skb);
1885}
1886
1887static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1888{
1889	int err = 0;
1890
1891	UNIXCB(skb).pid  = get_pid(scm->pid);
1892	UNIXCB(skb).uid = scm->creds.uid;
1893	UNIXCB(skb).gid = scm->creds.gid;
1894	UNIXCB(skb).fp = NULL;
1895	unix_get_secdata(scm, skb);
1896	if (scm->fp && send_fds)
1897		err = unix_attach_fds(scm, skb);
1898
1899	skb->destructor = unix_destruct_scm;
1900	return err;
1901}
1902
1903static bool unix_passcred_enabled(const struct socket *sock,
1904				  const struct sock *other)
1905{
1906	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1907	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1908	       !other->sk_socket ||
1909	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1910	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1911}
1912
1913/*
1914 * Some apps rely on write() giving SCM_CREDENTIALS
1915 * We include credentials if source or destination socket
1916 * asserted SOCK_PASSCRED.
1917 */
1918static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1919			    const struct sock *other)
1920{
1921	if (UNIXCB(skb).pid)
1922		return;
1923	if (unix_passcred_enabled(sock, other)) {
1924		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1925		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1926	}
1927}
1928
1929static bool unix_skb_scm_eq(struct sk_buff *skb,
1930			    struct scm_cookie *scm)
1931{
1932	return UNIXCB(skb).pid == scm->pid &&
1933	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1934	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1935	       unix_secdata_eq(scm, skb);
1936}
1937
1938static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1939{
1940	struct scm_fp_list *fp = UNIXCB(skb).fp;
1941	struct unix_sock *u = unix_sk(sk);
1942
1943	if (unlikely(fp && fp->count)) {
1944		atomic_add(fp->count, &u->scm_stat.nr_fds);
1945		unix_add_edges(fp, u);
1946	}
1947}
1948
1949static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1950{
1951	struct scm_fp_list *fp = UNIXCB(skb).fp;
1952	struct unix_sock *u = unix_sk(sk);
1953
1954	if (unlikely(fp && fp->count)) {
1955		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1956		unix_del_edges(fp);
1957	}
1958}
1959
1960/*
1961 *	Send AF_UNIX data.
1962 */
1963
1964static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1965			      size_t len)
1966{
1967	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1968	struct sock *sk = sock->sk, *other = NULL;
1969	struct unix_sock *u = unix_sk(sk);
1970	struct scm_cookie scm;
1971	struct sk_buff *skb;
1972	int data_len = 0;
1973	int sk_locked;
1974	long timeo;
1975	int err;
1976
1977	err = scm_send(sock, msg, &scm, false);
1978	if (err < 0)
1979		return err;
1980
1981	wait_for_unix_gc(scm.fp);
1982
1983	err = -EOPNOTSUPP;
1984	if (msg->msg_flags&MSG_OOB)
1985		goto out;
1986
1987	if (msg->msg_namelen) {
1988		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1989		if (err)
1990			goto out;
1991
1992		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1993							    msg->msg_name,
1994							    &msg->msg_namelen,
1995							    NULL);
1996		if (err)
1997			goto out;
1998	} else {
1999		sunaddr = NULL;
2000		err = -ENOTCONN;
2001		other = unix_peer_get(sk);
2002		if (!other)
2003			goto out;
2004	}
2005
2006	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
2007	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
2008	    !READ_ONCE(u->addr)) {
2009		err = unix_autobind(sk);
2010		if (err)
2011			goto out;
2012	}
2013
2014	err = -EMSGSIZE;
2015	if (len > READ_ONCE(sk->sk_sndbuf) - 32)
2016		goto out;
2017
2018	if (len > SKB_MAX_ALLOC) {
2019		data_len = min_t(size_t,
2020				 len - SKB_MAX_ALLOC,
2021				 MAX_SKB_FRAGS * PAGE_SIZE);
2022		data_len = PAGE_ALIGN(data_len);
2023
2024		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2025	}
2026
2027	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2028				   msg->msg_flags & MSG_DONTWAIT, &err,
2029				   PAGE_ALLOC_COSTLY_ORDER);
2030	if (skb == NULL)
2031		goto out;
2032
2033	err = unix_scm_to_skb(&scm, skb, true);
2034	if (err < 0)
2035		goto out_free;
2036
2037	skb_put(skb, len - data_len);
2038	skb->data_len = data_len;
2039	skb->len = len;
2040	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2041	if (err)
2042		goto out_free;
2043
2044	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2045
2046restart:
2047	if (!other) {
2048		err = -ECONNRESET;
2049		if (sunaddr == NULL)
2050			goto out_free;
2051
2052		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
2053					sk->sk_type);
2054		if (IS_ERR(other)) {
2055			err = PTR_ERR(other);
2056			other = NULL;
2057			goto out_free;
2058		}
2059	}
2060
2061	if (sk_filter(other, skb) < 0) {
2062		/* Toss the packet but do not return any error to the sender */
2063		err = len;
2064		goto out_free;
2065	}
2066
2067	sk_locked = 0;
2068	unix_state_lock(other);
2069restart_locked:
2070	err = -EPERM;
2071	if (!unix_may_send(sk, other))
2072		goto out_unlock;
2073
2074	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2075		/*
2076		 *	Check with 1003.1g - what should
2077		 *	datagram error
2078		 */
2079		unix_state_unlock(other);
2080		sock_put(other);
2081
2082		if (!sk_locked)
2083			unix_state_lock(sk);
2084
2085		err = 0;
2086		if (sk->sk_type == SOCK_SEQPACKET) {
2087			/* We are here only when racing with unix_release_sock()
2088			 * is clearing @other. Never change state to TCP_CLOSE
2089			 * unlike SOCK_DGRAM wants.
2090			 */
2091			unix_state_unlock(sk);
2092			err = -EPIPE;
2093		} else if (unix_peer(sk) == other) {
2094			unix_peer(sk) = NULL;
2095			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2096
2097			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2098			unix_state_unlock(sk);
2099
2100			unix_dgram_disconnected(sk, other);
2101			sock_put(other);
2102			err = -ECONNREFUSED;
2103		} else {
2104			unix_state_unlock(sk);
2105		}
2106
2107		other = NULL;
2108		if (err)
2109			goto out_free;
2110		goto restart;
2111	}
2112
2113	err = -EPIPE;
2114	if (other->sk_shutdown & RCV_SHUTDOWN)
2115		goto out_unlock;
2116
2117	if (sk->sk_type != SOCK_SEQPACKET) {
2118		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2119		if (err)
2120			goto out_unlock;
2121	}
2122
2123	/* other == sk && unix_peer(other) != sk if
2124	 * - unix_peer(sk) == NULL, destination address bound to sk
2125	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2126	 */
2127	if (other != sk &&
2128	    unlikely(unix_peer(other) != sk &&
2129	    unix_recvq_full_lockless(other))) {
2130		if (timeo) {
2131			timeo = unix_wait_for_peer(other, timeo);
2132
2133			err = sock_intr_errno(timeo);
2134			if (signal_pending(current))
2135				goto out_free;
2136
2137			goto restart;
2138		}
2139
2140		if (!sk_locked) {
2141			unix_state_unlock(other);
2142			unix_state_double_lock(sk, other);
2143		}
2144
2145		if (unix_peer(sk) != other ||
2146		    unix_dgram_peer_wake_me(sk, other)) {
2147			err = -EAGAIN;
2148			sk_locked = 1;
2149			goto out_unlock;
2150		}
2151
2152		if (!sk_locked) {
2153			sk_locked = 1;
2154			goto restart_locked;
2155		}
2156	}
2157
2158	if (unlikely(sk_locked))
2159		unix_state_unlock(sk);
2160
2161	if (sock_flag(other, SOCK_RCVTSTAMP))
2162		__net_timestamp(skb);
2163	maybe_add_creds(skb, sock, other);
2164	scm_stat_add(other, skb);
2165	skb_queue_tail(&other->sk_receive_queue, skb);
2166	unix_state_unlock(other);
2167	other->sk_data_ready(other);
2168	sock_put(other);
2169	scm_destroy(&scm);
2170	return len;
2171
2172out_unlock:
2173	if (sk_locked)
2174		unix_state_unlock(sk);
2175	unix_state_unlock(other);
2176out_free:
2177	kfree_skb(skb);
2178out:
2179	if (other)
2180		sock_put(other);
2181	scm_destroy(&scm);
2182	return err;
2183}
2184
2185/* We use paged skbs for stream sockets, and limit occupancy to 32768
2186 * bytes, and a minimum of a full page.
2187 */
2188#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2189
2190#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2191static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2192		     struct scm_cookie *scm, bool fds_sent)
2193{
2194	struct unix_sock *ousk = unix_sk(other);
2195	struct sk_buff *skb;
2196	int err = 0;
2197
2198	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2199
2200	if (!skb)
2201		return err;
2202
2203	err = unix_scm_to_skb(scm, skb, !fds_sent);
2204	if (err < 0) {
2205		kfree_skb(skb);
2206		return err;
2207	}
2208	skb_put(skb, 1);
2209	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2210
2211	if (err) {
2212		kfree_skb(skb);
2213		return err;
2214	}
2215
2216	unix_state_lock(other);
2217
2218	if (sock_flag(other, SOCK_DEAD) ||
2219	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2220		unix_state_unlock(other);
2221		kfree_skb(skb);
2222		return -EPIPE;
2223	}
2224
2225	maybe_add_creds(skb, sock, other);
 
 
2226	scm_stat_add(other, skb);
2227
2228	spin_lock(&other->sk_receive_queue.lock);
 
 
2229	WRITE_ONCE(ousk->oob_skb, skb);
2230	__skb_queue_tail(&other->sk_receive_queue, skb);
2231	spin_unlock(&other->sk_receive_queue.lock);
2232
2233	sk_send_sigurg(other);
2234	unix_state_unlock(other);
2235	other->sk_data_ready(other);
2236
2237	return err;
2238}
2239#endif
2240
2241static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2242			       size_t len)
2243{
2244	struct sock *sk = sock->sk;
2245	struct sock *other = NULL;
2246	int err, size;
2247	struct sk_buff *skb;
2248	int sent = 0;
2249	struct scm_cookie scm;
2250	bool fds_sent = false;
2251	int data_len;
2252
2253	err = scm_send(sock, msg, &scm, false);
2254	if (err < 0)
2255		return err;
2256
2257	wait_for_unix_gc(scm.fp);
2258
2259	err = -EOPNOTSUPP;
2260	if (msg->msg_flags & MSG_OOB) {
2261#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2262		if (len)
2263			len--;
2264		else
2265#endif
2266			goto out_err;
2267	}
2268
2269	if (msg->msg_namelen) {
2270		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2271		goto out_err;
2272	} else {
2273		err = -ENOTCONN;
2274		other = unix_peer(sk);
2275		if (!other)
2276			goto out_err;
2277	}
2278
2279	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2280		goto pipe_err;
2281
2282	while (sent < len) {
2283		size = len - sent;
2284
2285		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2286			skb = sock_alloc_send_pskb(sk, 0, 0,
2287						   msg->msg_flags & MSG_DONTWAIT,
2288						   &err, 0);
2289		} else {
2290			/* Keep two messages in the pipe so it schedules better */
2291			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2292
2293			/* allow fallback to order-0 allocations */
2294			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2295
2296			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2297
2298			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2299
2300			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2301						   msg->msg_flags & MSG_DONTWAIT, &err,
2302						   get_order(UNIX_SKB_FRAGS_SZ));
2303		}
2304		if (!skb)
2305			goto out_err;
2306
2307		/* Only send the fds in the first buffer */
2308		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2309		if (err < 0) {
2310			kfree_skb(skb);
2311			goto out_err;
2312		}
2313		fds_sent = true;
2314
2315		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2316			skb->ip_summed = CHECKSUM_UNNECESSARY;
2317			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2318						   sk->sk_allocation);
2319			if (err < 0) {
2320				kfree_skb(skb);
2321				goto out_err;
2322			}
2323			size = err;
2324			refcount_add(size, &sk->sk_wmem_alloc);
2325		} else {
2326			skb_put(skb, size - data_len);
2327			skb->data_len = data_len;
2328			skb->len = size;
2329			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2330			if (err) {
2331				kfree_skb(skb);
2332				goto out_err;
2333			}
2334		}
2335
2336		unix_state_lock(other);
2337
2338		if (sock_flag(other, SOCK_DEAD) ||
2339		    (other->sk_shutdown & RCV_SHUTDOWN))
2340			goto pipe_err_free;
2341
2342		maybe_add_creds(skb, sock, other);
2343		scm_stat_add(other, skb);
2344		skb_queue_tail(&other->sk_receive_queue, skb);
2345		unix_state_unlock(other);
2346		other->sk_data_ready(other);
2347		sent += size;
2348	}
2349
2350#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2351	if (msg->msg_flags & MSG_OOB) {
2352		err = queue_oob(sock, msg, other, &scm, fds_sent);
2353		if (err)
2354			goto out_err;
2355		sent++;
2356	}
2357#endif
2358
2359	scm_destroy(&scm);
2360
2361	return sent;
2362
2363pipe_err_free:
2364	unix_state_unlock(other);
2365	kfree_skb(skb);
2366pipe_err:
2367	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2368		send_sig(SIGPIPE, current, 0);
2369	err = -EPIPE;
2370out_err:
2371	scm_destroy(&scm);
2372	return sent ? : err;
2373}
2374
2375static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2376				  size_t len)
2377{
2378	int err;
2379	struct sock *sk = sock->sk;
2380
2381	err = sock_error(sk);
2382	if (err)
2383		return err;
2384
2385	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2386		return -ENOTCONN;
2387
2388	if (msg->msg_namelen)
2389		msg->msg_namelen = 0;
2390
2391	return unix_dgram_sendmsg(sock, msg, len);
2392}
2393
2394static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2395				  size_t size, int flags)
2396{
2397	struct sock *sk = sock->sk;
2398
2399	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2400		return -ENOTCONN;
2401
2402	return unix_dgram_recvmsg(sock, msg, size, flags);
2403}
2404
2405static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2406{
2407	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2408
2409	if (addr) {
2410		msg->msg_namelen = addr->len;
2411		memcpy(msg->msg_name, addr->name, addr->len);
2412	}
2413}
2414
2415int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2416			 int flags)
2417{
2418	struct scm_cookie scm;
2419	struct socket *sock = sk->sk_socket;
2420	struct unix_sock *u = unix_sk(sk);
2421	struct sk_buff *skb, *last;
2422	long timeo;
2423	int skip;
2424	int err;
2425
2426	err = -EOPNOTSUPP;
2427	if (flags&MSG_OOB)
2428		goto out;
2429
2430	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2431
2432	do {
2433		mutex_lock(&u->iolock);
2434
2435		skip = sk_peek_offset(sk, flags);
2436		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2437					      &skip, &err, &last);
2438		if (skb) {
2439			if (!(flags & MSG_PEEK))
2440				scm_stat_del(sk, skb);
2441			break;
2442		}
2443
2444		mutex_unlock(&u->iolock);
2445
2446		if (err != -EAGAIN)
2447			break;
2448	} while (timeo &&
2449		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2450					      &err, &timeo, last));
2451
2452	if (!skb) { /* implies iolock unlocked */
2453		unix_state_lock(sk);
2454		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2455		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2456		    (sk->sk_shutdown & RCV_SHUTDOWN))
2457			err = 0;
2458		unix_state_unlock(sk);
2459		goto out;
2460	}
2461
2462	if (wq_has_sleeper(&u->peer_wait))
2463		wake_up_interruptible_sync_poll(&u->peer_wait,
2464						EPOLLOUT | EPOLLWRNORM |
2465						EPOLLWRBAND);
2466
2467	if (msg->msg_name) {
2468		unix_copy_addr(msg, skb->sk);
2469
2470		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2471						      msg->msg_name,
2472						      &msg->msg_namelen);
2473	}
2474
2475	if (size > skb->len - skip)
2476		size = skb->len - skip;
2477	else if (size < skb->len - skip)
2478		msg->msg_flags |= MSG_TRUNC;
2479
2480	err = skb_copy_datagram_msg(skb, skip, msg, size);
2481	if (err)
2482		goto out_free;
2483
2484	if (sock_flag(sk, SOCK_RCVTSTAMP))
2485		__sock_recv_timestamp(msg, sk, skb);
2486
2487	memset(&scm, 0, sizeof(scm));
2488
2489	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2490	unix_set_secdata(&scm, skb);
2491
2492	if (!(flags & MSG_PEEK)) {
2493		if (UNIXCB(skb).fp)
2494			unix_detach_fds(&scm, skb);
2495
2496		sk_peek_offset_bwd(sk, skb->len);
2497	} else {
2498		/* It is questionable: on PEEK we could:
2499		   - do not return fds - good, but too simple 8)
2500		   - return fds, and do not return them on read (old strategy,
2501		     apparently wrong)
2502		   - clone fds (I chose it for now, it is the most universal
2503		     solution)
2504
2505		   POSIX 1003.1g does not actually define this clearly
2506		   at all. POSIX 1003.1g doesn't define a lot of things
2507		   clearly however!
2508
2509		*/
2510
2511		sk_peek_offset_fwd(sk, size);
2512
2513		if (UNIXCB(skb).fp)
2514			unix_peek_fds(&scm, skb);
2515	}
2516	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2517
2518	scm_recv_unix(sock, msg, &scm, flags);
2519
2520out_free:
2521	skb_free_datagram(sk, skb);
2522	mutex_unlock(&u->iolock);
2523out:
2524	return err;
2525}
2526
2527static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2528			      int flags)
2529{
2530	struct sock *sk = sock->sk;
2531
2532#ifdef CONFIG_BPF_SYSCALL
2533	const struct proto *prot = READ_ONCE(sk->sk_prot);
2534
2535	if (prot != &unix_dgram_proto)
2536		return prot->recvmsg(sk, msg, size, flags, NULL);
2537#endif
2538	return __unix_dgram_recvmsg(sk, msg, size, flags);
2539}
2540
2541static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2542{
2543	struct unix_sock *u = unix_sk(sk);
2544	struct sk_buff *skb;
2545	int err;
2546
2547	mutex_lock(&u->iolock);
2548	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2549	mutex_unlock(&u->iolock);
2550	if (!skb)
2551		return err;
2552
2553	return recv_actor(sk, skb);
2554}
2555
2556/*
2557 *	Sleep until more data has arrived. But check for races..
2558 */
2559static long unix_stream_data_wait(struct sock *sk, long timeo,
2560				  struct sk_buff *last, unsigned int last_len,
2561				  bool freezable)
2562{
2563	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2564	struct sk_buff *tail;
2565	DEFINE_WAIT(wait);
2566
2567	unix_state_lock(sk);
2568
2569	for (;;) {
2570		prepare_to_wait(sk_sleep(sk), &wait, state);
2571
2572		tail = skb_peek_tail(&sk->sk_receive_queue);
2573		if (tail != last ||
2574		    (tail && tail->len != last_len) ||
2575		    sk->sk_err ||
2576		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2577		    signal_pending(current) ||
2578		    !timeo)
2579			break;
2580
2581		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2582		unix_state_unlock(sk);
2583		timeo = schedule_timeout(timeo);
2584		unix_state_lock(sk);
2585
2586		if (sock_flag(sk, SOCK_DEAD))
2587			break;
2588
2589		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2590	}
2591
2592	finish_wait(sk_sleep(sk), &wait);
2593	unix_state_unlock(sk);
2594	return timeo;
2595}
2596
2597static unsigned int unix_skb_len(const struct sk_buff *skb)
2598{
2599	return skb->len - UNIXCB(skb).consumed;
2600}
2601
2602struct unix_stream_read_state {
2603	int (*recv_actor)(struct sk_buff *, int, int,
2604			  struct unix_stream_read_state *);
2605	struct socket *socket;
2606	struct msghdr *msg;
2607	struct pipe_inode_info *pipe;
2608	size_t size;
2609	int flags;
2610	unsigned int splice_flags;
2611};
2612
2613#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2614static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2615{
2616	struct socket *sock = state->socket;
2617	struct sock *sk = sock->sk;
2618	struct unix_sock *u = unix_sk(sk);
2619	int chunk = 1;
2620	struct sk_buff *oob_skb;
2621
2622	mutex_lock(&u->iolock);
2623	unix_state_lock(sk);
2624	spin_lock(&sk->sk_receive_queue.lock);
2625
2626	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2627		spin_unlock(&sk->sk_receive_queue.lock);
2628		unix_state_unlock(sk);
2629		mutex_unlock(&u->iolock);
2630		return -EINVAL;
2631	}
2632
2633	oob_skb = u->oob_skb;
2634
2635	if (!(state->flags & MSG_PEEK))
2636		WRITE_ONCE(u->oob_skb, NULL);
 
 
2637
2638	spin_unlock(&sk->sk_receive_queue.lock);
2639	unix_state_unlock(sk);
2640
2641	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2642
2643	if (!(state->flags & MSG_PEEK))
2644		UNIXCB(oob_skb).consumed += 1;
2645
 
 
2646	mutex_unlock(&u->iolock);
2647
2648	if (chunk < 0)
2649		return -EFAULT;
2650
2651	state->msg->msg_flags |= MSG_OOB;
2652	return 1;
2653}
2654
2655static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2656				  int flags, int copied)
2657{
2658	struct sk_buff *read_skb = NULL, *unread_skb = NULL;
2659	struct unix_sock *u = unix_sk(sk);
2660
2661	if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
2662		return skb;
 
 
 
 
2663
2664	spin_lock(&sk->sk_receive_queue.lock);
2665
2666	if (!unix_skb_len(skb)) {
2667		if (copied && (!u->oob_skb || skb == u->oob_skb)) {
2668			skb = NULL;
2669		} else if (flags & MSG_PEEK) {
2670			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2671		} else {
2672			read_skb = skb;
2673			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2674			__skb_unlink(read_skb, &sk->sk_receive_queue);
 
 
 
 
 
 
 
2675		}
2676
2677		if (!skb)
2678			goto unlock;
2679	}
2680
2681	if (skb != u->oob_skb)
2682		goto unlock;
2683
2684	if (copied) {
2685		skb = NULL;
2686	} else if (!(flags & MSG_PEEK)) {
2687		WRITE_ONCE(u->oob_skb, NULL);
2688
2689		if (!sock_flag(sk, SOCK_URGINLINE)) {
2690			__skb_unlink(skb, &sk->sk_receive_queue);
2691			unread_skb = skb;
2692			skb = skb_peek(&sk->sk_receive_queue);
2693		}
2694	} else if (!sock_flag(sk, SOCK_URGINLINE)) {
2695		skb = skb_peek_next(skb, &sk->sk_receive_queue);
2696	}
2697
2698unlock:
2699	spin_unlock(&sk->sk_receive_queue.lock);
2700
2701	consume_skb(read_skb);
2702	kfree_skb(unread_skb);
2703
2704	return skb;
2705}
2706#endif
2707
2708static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2709{
2710	struct unix_sock *u = unix_sk(sk);
2711	struct sk_buff *skb;
2712	int err;
2713
2714	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2715		return -ENOTCONN;
2716
2717	mutex_lock(&u->iolock);
2718	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2719	mutex_unlock(&u->iolock);
2720	if (!skb)
2721		return err;
2722
2723#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2724	if (unlikely(skb == READ_ONCE(u->oob_skb))) {
2725		bool drop = false;
2726
2727		unix_state_lock(sk);
2728
2729		if (sock_flag(sk, SOCK_DEAD)) {
2730			unix_state_unlock(sk);
2731			kfree_skb(skb);
2732			return -ECONNRESET;
2733		}
2734
2735		spin_lock(&sk->sk_receive_queue.lock);
2736		if (likely(skb == u->oob_skb)) {
2737			WRITE_ONCE(u->oob_skb, NULL);
2738			drop = true;
2739		}
2740		spin_unlock(&sk->sk_receive_queue.lock);
2741
2742		unix_state_unlock(sk);
2743
2744		if (drop) {
2745			kfree_skb(skb);
2746			return -EAGAIN;
2747		}
2748	}
2749#endif
2750
2751	return recv_actor(sk, skb);
2752}
2753
2754static int unix_stream_read_generic(struct unix_stream_read_state *state,
2755				    bool freezable)
2756{
2757	struct scm_cookie scm;
2758	struct socket *sock = state->socket;
2759	struct sock *sk = sock->sk;
2760	struct unix_sock *u = unix_sk(sk);
2761	int copied = 0;
2762	int flags = state->flags;
2763	int noblock = flags & MSG_DONTWAIT;
2764	bool check_creds = false;
2765	int target;
2766	int err = 0;
2767	long timeo;
2768	int skip;
2769	size_t size = state->size;
2770	unsigned int last_len;
2771
2772	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2773		err = -EINVAL;
2774		goto out;
2775	}
2776
2777	if (unlikely(flags & MSG_OOB)) {
2778		err = -EOPNOTSUPP;
2779#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2780		err = unix_stream_recv_urg(state);
2781#endif
2782		goto out;
2783	}
2784
2785	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2786	timeo = sock_rcvtimeo(sk, noblock);
2787
2788	memset(&scm, 0, sizeof(scm));
2789
2790	/* Lock the socket to prevent queue disordering
2791	 * while sleeps in memcpy_tomsg
2792	 */
2793	mutex_lock(&u->iolock);
2794
2795	skip = max(sk_peek_offset(sk, flags), 0);
2796
2797	do {
2798		struct sk_buff *skb, *last;
2799		int chunk;
 
 
2800
2801redo:
2802		unix_state_lock(sk);
2803		if (sock_flag(sk, SOCK_DEAD)) {
2804			err = -ECONNRESET;
2805			goto unlock;
2806		}
2807		last = skb = skb_peek(&sk->sk_receive_queue);
2808		last_len = last ? last->len : 0;
2809
2810again:
2811#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2812		if (skb) {
2813			skb = manage_oob(skb, sk, flags, copied);
2814			if (!skb && copied) {
2815				unix_state_unlock(sk);
2816				break;
2817			}
2818		}
2819#endif
2820		if (skb == NULL) {
2821			if (copied >= target)
2822				goto unlock;
2823
2824			/*
2825			 *	POSIX 1003.1g mandates this order.
2826			 */
2827
2828			err = sock_error(sk);
2829			if (err)
2830				goto unlock;
2831			if (sk->sk_shutdown & RCV_SHUTDOWN)
2832				goto unlock;
2833
2834			unix_state_unlock(sk);
2835			if (!timeo) {
2836				err = -EAGAIN;
2837				break;
2838			}
2839
2840			mutex_unlock(&u->iolock);
2841
2842			timeo = unix_stream_data_wait(sk, timeo, last,
2843						      last_len, freezable);
2844
2845			if (signal_pending(current)) {
2846				err = sock_intr_errno(timeo);
2847				scm_destroy(&scm);
2848				goto out;
2849			}
2850
2851			mutex_lock(&u->iolock);
2852			goto redo;
2853unlock:
2854			unix_state_unlock(sk);
2855			break;
2856		}
2857
2858		while (skip >= unix_skb_len(skb)) {
2859			skip -= unix_skb_len(skb);
2860			last = skb;
2861			last_len = skb->len;
2862			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2863			if (!skb)
2864				goto again;
2865		}
2866
2867		unix_state_unlock(sk);
2868
2869		if (check_creds) {
2870			/* Never glue messages from different writers */
2871			if (!unix_skb_scm_eq(skb, &scm))
2872				break;
2873		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2874			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2875			/* Copy credentials */
2876			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2877			unix_set_secdata(&scm, skb);
2878			check_creds = true;
2879		}
2880
2881		/* Copy address just once */
2882		if (state->msg && state->msg->msg_name) {
2883			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2884					 state->msg->msg_name);
2885			unix_copy_addr(state->msg, skb->sk);
2886
2887			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2888							      state->msg->msg_name,
2889							      &state->msg->msg_namelen);
2890
2891			sunaddr = NULL;
2892		}
2893
2894		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
 
2895		chunk = state->recv_actor(skb, skip, chunk, state);
 
 
 
2896		if (chunk < 0) {
2897			if (copied == 0)
2898				copied = -EFAULT;
2899			break;
2900		}
2901		copied += chunk;
2902		size -= chunk;
2903
 
 
 
 
 
 
 
 
 
 
 
 
2904		/* Mark read part of skb as used */
2905		if (!(flags & MSG_PEEK)) {
2906			UNIXCB(skb).consumed += chunk;
2907
2908			sk_peek_offset_bwd(sk, chunk);
2909
2910			if (UNIXCB(skb).fp) {
2911				scm_stat_del(sk, skb);
2912				unix_detach_fds(&scm, skb);
2913			}
2914
2915			if (unix_skb_len(skb))
2916				break;
2917
2918			skb_unlink(skb, &sk->sk_receive_queue);
2919			consume_skb(skb);
2920
2921			if (scm.fp)
2922				break;
2923		} else {
2924			/* It is questionable, see note in unix_dgram_recvmsg.
2925			 */
2926			if (UNIXCB(skb).fp)
2927				unix_peek_fds(&scm, skb);
2928
2929			sk_peek_offset_fwd(sk, chunk);
2930
2931			if (UNIXCB(skb).fp)
2932				break;
2933
2934			skip = 0;
2935			last = skb;
2936			last_len = skb->len;
2937			unix_state_lock(sk);
2938			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2939			if (skb)
2940				goto again;
2941			unix_state_unlock(sk);
2942			break;
2943		}
2944	} while (size);
2945
2946	mutex_unlock(&u->iolock);
2947	if (state->msg)
2948		scm_recv_unix(sock, state->msg, &scm, flags);
2949	else
2950		scm_destroy(&scm);
2951out:
2952	return copied ? : err;
2953}
2954
2955static int unix_stream_read_actor(struct sk_buff *skb,
2956				  int skip, int chunk,
2957				  struct unix_stream_read_state *state)
2958{
2959	int ret;
2960
2961	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2962				    state->msg, chunk);
2963	return ret ?: chunk;
2964}
2965
2966int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2967			  size_t size, int flags)
2968{
2969	struct unix_stream_read_state state = {
2970		.recv_actor = unix_stream_read_actor,
2971		.socket = sk->sk_socket,
2972		.msg = msg,
2973		.size = size,
2974		.flags = flags
2975	};
2976
2977	return unix_stream_read_generic(&state, true);
2978}
2979
2980static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2981			       size_t size, int flags)
2982{
2983	struct unix_stream_read_state state = {
2984		.recv_actor = unix_stream_read_actor,
2985		.socket = sock,
2986		.msg = msg,
2987		.size = size,
2988		.flags = flags
2989	};
2990
2991#ifdef CONFIG_BPF_SYSCALL
2992	struct sock *sk = sock->sk;
2993	const struct proto *prot = READ_ONCE(sk->sk_prot);
2994
2995	if (prot != &unix_stream_proto)
2996		return prot->recvmsg(sk, msg, size, flags, NULL);
2997#endif
2998	return unix_stream_read_generic(&state, true);
2999}
3000
3001static int unix_stream_splice_actor(struct sk_buff *skb,
3002				    int skip, int chunk,
3003				    struct unix_stream_read_state *state)
3004{
3005	return skb_splice_bits(skb, state->socket->sk,
3006			       UNIXCB(skb).consumed + skip,
3007			       state->pipe, chunk, state->splice_flags);
3008}
3009
3010static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
3011				       struct pipe_inode_info *pipe,
3012				       size_t size, unsigned int flags)
3013{
3014	struct unix_stream_read_state state = {
3015		.recv_actor = unix_stream_splice_actor,
3016		.socket = sock,
3017		.pipe = pipe,
3018		.size = size,
3019		.splice_flags = flags,
3020	};
3021
3022	if (unlikely(*ppos))
3023		return -ESPIPE;
3024
3025	if (sock->file->f_flags & O_NONBLOCK ||
3026	    flags & SPLICE_F_NONBLOCK)
3027		state.flags = MSG_DONTWAIT;
3028
3029	return unix_stream_read_generic(&state, false);
3030}
3031
3032static int unix_shutdown(struct socket *sock, int mode)
3033{
3034	struct sock *sk = sock->sk;
3035	struct sock *other;
3036
3037	if (mode < SHUT_RD || mode > SHUT_RDWR)
3038		return -EINVAL;
3039	/* This maps:
3040	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3041	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3042	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3043	 */
3044	++mode;
3045
3046	unix_state_lock(sk);
3047	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3048	other = unix_peer(sk);
3049	if (other)
3050		sock_hold(other);
3051	unix_state_unlock(sk);
3052	sk->sk_state_change(sk);
3053
3054	if (other &&
3055		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3056
3057		int peer_mode = 0;
3058		const struct proto *prot = READ_ONCE(other->sk_prot);
3059
3060		if (prot->unhash)
3061			prot->unhash(other);
3062		if (mode&RCV_SHUTDOWN)
3063			peer_mode |= SEND_SHUTDOWN;
3064		if (mode&SEND_SHUTDOWN)
3065			peer_mode |= RCV_SHUTDOWN;
3066		unix_state_lock(other);
3067		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3068		unix_state_unlock(other);
3069		other->sk_state_change(other);
3070		if (peer_mode == SHUTDOWN_MASK)
3071			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3072		else if (peer_mode & RCV_SHUTDOWN)
3073			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3074	}
3075	if (other)
3076		sock_put(other);
3077
3078	return 0;
3079}
3080
3081long unix_inq_len(struct sock *sk)
3082{
3083	struct sk_buff *skb;
3084	long amount = 0;
3085
3086	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
3087		return -EINVAL;
3088
3089	spin_lock(&sk->sk_receive_queue.lock);
3090	if (sk->sk_type == SOCK_STREAM ||
3091	    sk->sk_type == SOCK_SEQPACKET) {
3092		skb_queue_walk(&sk->sk_receive_queue, skb)
3093			amount += unix_skb_len(skb);
3094	} else {
3095		skb = skb_peek(&sk->sk_receive_queue);
3096		if (skb)
3097			amount = skb->len;
3098	}
3099	spin_unlock(&sk->sk_receive_queue.lock);
3100
3101	return amount;
3102}
3103EXPORT_SYMBOL_GPL(unix_inq_len);
3104
3105long unix_outq_len(struct sock *sk)
3106{
3107	return sk_wmem_alloc_get(sk);
3108}
3109EXPORT_SYMBOL_GPL(unix_outq_len);
3110
3111static int unix_open_file(struct sock *sk)
3112{
3113	struct path path;
3114	struct file *f;
3115	int fd;
3116
3117	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3118		return -EPERM;
3119
3120	if (!smp_load_acquire(&unix_sk(sk)->addr))
3121		return -ENOENT;
3122
3123	path = unix_sk(sk)->path;
3124	if (!path.dentry)
3125		return -ENOENT;
3126
3127	path_get(&path);
3128
3129	fd = get_unused_fd_flags(O_CLOEXEC);
3130	if (fd < 0)
3131		goto out;
3132
3133	f = dentry_open(&path, O_PATH, current_cred());
3134	if (IS_ERR(f)) {
3135		put_unused_fd(fd);
3136		fd = PTR_ERR(f);
3137		goto out;
3138	}
3139
3140	fd_install(fd, f);
3141out:
3142	path_put(&path);
3143
3144	return fd;
3145}
3146
3147static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3148{
3149	struct sock *sk = sock->sk;
3150	long amount = 0;
3151	int err;
3152
3153	switch (cmd) {
3154	case SIOCOUTQ:
3155		amount = unix_outq_len(sk);
3156		err = put_user(amount, (int __user *)arg);
3157		break;
3158	case SIOCINQ:
3159		amount = unix_inq_len(sk);
3160		if (amount < 0)
3161			err = amount;
3162		else
3163			err = put_user(amount, (int __user *)arg);
3164		break;
3165	case SIOCUNIXFILE:
3166		err = unix_open_file(sk);
3167		break;
3168#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3169	case SIOCATMARK:
3170		{
3171			struct unix_sock *u = unix_sk(sk);
3172			struct sk_buff *skb;
3173			int answ = 0;
3174
3175			mutex_lock(&u->iolock);
3176
3177			skb = skb_peek(&sk->sk_receive_queue);
3178			if (skb) {
3179				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
3180				struct sk_buff *next_skb;
3181
3182				next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
3183
3184				if (skb == oob_skb ||
3185				    (!unix_skb_len(skb) &&
3186				     (!oob_skb || next_skb == oob_skb)))
3187					answ = 1;
3188			}
3189
3190			mutex_unlock(&u->iolock);
3191
3192			err = put_user(answ, (int __user *)arg);
3193		}
3194		break;
3195#endif
3196	default:
3197		err = -ENOIOCTLCMD;
3198		break;
3199	}
3200	return err;
3201}
3202
3203#ifdef CONFIG_COMPAT
3204static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3205{
3206	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3207}
3208#endif
3209
3210static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3211{
3212	struct sock *sk = sock->sk;
3213	unsigned char state;
3214	__poll_t mask;
3215	u8 shutdown;
3216
3217	sock_poll_wait(file, sock, wait);
3218	mask = 0;
3219	shutdown = READ_ONCE(sk->sk_shutdown);
3220	state = READ_ONCE(sk->sk_state);
3221
3222	/* exceptional events? */
3223	if (READ_ONCE(sk->sk_err))
3224		mask |= EPOLLERR;
3225	if (shutdown == SHUTDOWN_MASK)
3226		mask |= EPOLLHUP;
3227	if (shutdown & RCV_SHUTDOWN)
3228		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3229
3230	/* readable? */
3231	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3232		mask |= EPOLLIN | EPOLLRDNORM;
3233	if (sk_is_readable(sk))
3234		mask |= EPOLLIN | EPOLLRDNORM;
3235#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3236	if (READ_ONCE(unix_sk(sk)->oob_skb))
3237		mask |= EPOLLPRI;
3238#endif
3239
3240	/* Connection-based need to check for termination and startup */
3241	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3242	    state == TCP_CLOSE)
3243		mask |= EPOLLHUP;
3244
3245	/*
3246	 * we set writable also when the other side has shut down the
3247	 * connection. This prevents stuck sockets.
3248	 */
3249	if (unix_writable(sk, state))
3250		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3251
3252	return mask;
3253}
3254
3255static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3256				    poll_table *wait)
3257{
3258	struct sock *sk = sock->sk, *other;
3259	unsigned int writable;
3260	unsigned char state;
3261	__poll_t mask;
3262	u8 shutdown;
3263
3264	sock_poll_wait(file, sock, wait);
3265	mask = 0;
3266	shutdown = READ_ONCE(sk->sk_shutdown);
3267	state = READ_ONCE(sk->sk_state);
3268
3269	/* exceptional events? */
3270	if (READ_ONCE(sk->sk_err) ||
3271	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3272		mask |= EPOLLERR |
3273			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3274
3275	if (shutdown & RCV_SHUTDOWN)
3276		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3277	if (shutdown == SHUTDOWN_MASK)
3278		mask |= EPOLLHUP;
3279
3280	/* readable? */
3281	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3282		mask |= EPOLLIN | EPOLLRDNORM;
3283	if (sk_is_readable(sk))
3284		mask |= EPOLLIN | EPOLLRDNORM;
3285
3286	/* Connection-based need to check for termination and startup */
3287	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3288		mask |= EPOLLHUP;
 
 
 
 
 
3289
3290	/* No write status requested, avoid expensive OUT tests. */
3291	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3292		return mask;
3293
3294	writable = unix_writable(sk, state);
3295	if (writable) {
3296		unix_state_lock(sk);
3297
3298		other = unix_peer(sk);
3299		if (other && unix_peer(other) != sk &&
3300		    unix_recvq_full_lockless(other) &&
3301		    unix_dgram_peer_wake_me(sk, other))
3302			writable = 0;
3303
3304		unix_state_unlock(sk);
3305	}
3306
3307	if (writable)
3308		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3309	else
3310		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3311
3312	return mask;
3313}
3314
3315#ifdef CONFIG_PROC_FS
3316
3317#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3318
3319#define get_bucket(x) ((x) >> BUCKET_SPACE)
3320#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3321#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3322
3323static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3324{
3325	unsigned long offset = get_offset(*pos);
3326	unsigned long bucket = get_bucket(*pos);
3327	unsigned long count = 0;
3328	struct sock *sk;
3329
3330	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3331	     sk; sk = sk_next(sk)) {
3332		if (++count == offset)
3333			break;
3334	}
3335
3336	return sk;
3337}
3338
3339static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3340{
3341	unsigned long bucket = get_bucket(*pos);
3342	struct net *net = seq_file_net(seq);
3343	struct sock *sk;
3344
3345	while (bucket < UNIX_HASH_SIZE) {
3346		spin_lock(&net->unx.table.locks[bucket]);
3347
3348		sk = unix_from_bucket(seq, pos);
3349		if (sk)
3350			return sk;
3351
3352		spin_unlock(&net->unx.table.locks[bucket]);
3353
3354		*pos = set_bucket_offset(++bucket, 1);
3355	}
3356
3357	return NULL;
3358}
3359
3360static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3361				  loff_t *pos)
3362{
3363	unsigned long bucket = get_bucket(*pos);
3364
3365	sk = sk_next(sk);
3366	if (sk)
3367		return sk;
3368
3369
3370	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3371
3372	*pos = set_bucket_offset(++bucket, 1);
3373
3374	return unix_get_first(seq, pos);
3375}
3376
3377static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3378{
3379	if (!*pos)
3380		return SEQ_START_TOKEN;
3381
3382	return unix_get_first(seq, pos);
3383}
3384
3385static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3386{
3387	++*pos;
3388
3389	if (v == SEQ_START_TOKEN)
3390		return unix_get_first(seq, pos);
3391
3392	return unix_get_next(seq, v, pos);
3393}
3394
3395static void unix_seq_stop(struct seq_file *seq, void *v)
3396{
3397	struct sock *sk = v;
3398
3399	if (sk)
3400		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3401}
3402
3403static int unix_seq_show(struct seq_file *seq, void *v)
3404{
3405
3406	if (v == SEQ_START_TOKEN)
3407		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3408			 "Inode Path\n");
3409	else {
3410		struct sock *s = v;
3411		struct unix_sock *u = unix_sk(s);
3412		unix_state_lock(s);
3413
3414		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3415			s,
3416			refcount_read(&s->sk_refcnt),
3417			0,
3418			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3419			s->sk_type,
3420			s->sk_socket ?
3421			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3422			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3423			sock_i_ino(s));
3424
3425		if (u->addr) {	// under a hash table lock here
3426			int i, len;
3427			seq_putc(seq, ' ');
3428
3429			i = 0;
3430			len = u->addr->len -
3431				offsetof(struct sockaddr_un, sun_path);
3432			if (u->addr->name->sun_path[0]) {
3433				len--;
3434			} else {
3435				seq_putc(seq, '@');
3436				i++;
3437			}
3438			for ( ; i < len; i++)
3439				seq_putc(seq, u->addr->name->sun_path[i] ?:
3440					 '@');
3441		}
3442		unix_state_unlock(s);
3443		seq_putc(seq, '\n');
3444	}
3445
3446	return 0;
3447}
3448
3449static const struct seq_operations unix_seq_ops = {
3450	.start  = unix_seq_start,
3451	.next   = unix_seq_next,
3452	.stop   = unix_seq_stop,
3453	.show   = unix_seq_show,
3454};
3455
3456#ifdef CONFIG_BPF_SYSCALL
3457struct bpf_unix_iter_state {
3458	struct seq_net_private p;
3459	unsigned int cur_sk;
3460	unsigned int end_sk;
3461	unsigned int max_sk;
3462	struct sock **batch;
3463	bool st_bucket_done;
3464};
3465
3466struct bpf_iter__unix {
3467	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3468	__bpf_md_ptr(struct unix_sock *, unix_sk);
3469	uid_t uid __aligned(8);
3470};
3471
3472static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3473			      struct unix_sock *unix_sk, uid_t uid)
3474{
3475	struct bpf_iter__unix ctx;
3476
3477	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3478	ctx.meta = meta;
3479	ctx.unix_sk = unix_sk;
3480	ctx.uid = uid;
3481	return bpf_iter_run_prog(prog, &ctx);
3482}
3483
3484static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3485
3486{
3487	struct bpf_unix_iter_state *iter = seq->private;
3488	unsigned int expected = 1;
3489	struct sock *sk;
3490
3491	sock_hold(start_sk);
3492	iter->batch[iter->end_sk++] = start_sk;
3493
3494	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3495		if (iter->end_sk < iter->max_sk) {
3496			sock_hold(sk);
3497			iter->batch[iter->end_sk++] = sk;
3498		}
3499
3500		expected++;
3501	}
3502
3503	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3504
3505	return expected;
3506}
3507
3508static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3509{
3510	while (iter->cur_sk < iter->end_sk)
3511		sock_put(iter->batch[iter->cur_sk++]);
3512}
3513
3514static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3515				       unsigned int new_batch_sz)
3516{
3517	struct sock **new_batch;
3518
3519	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3520			     GFP_USER | __GFP_NOWARN);
3521	if (!new_batch)
3522		return -ENOMEM;
3523
3524	bpf_iter_unix_put_batch(iter);
3525	kvfree(iter->batch);
3526	iter->batch = new_batch;
3527	iter->max_sk = new_batch_sz;
3528
3529	return 0;
3530}
3531
3532static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3533					loff_t *pos)
3534{
3535	struct bpf_unix_iter_state *iter = seq->private;
3536	unsigned int expected;
3537	bool resized = false;
3538	struct sock *sk;
3539
3540	if (iter->st_bucket_done)
3541		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3542
3543again:
3544	/* Get a new batch */
3545	iter->cur_sk = 0;
3546	iter->end_sk = 0;
3547
3548	sk = unix_get_first(seq, pos);
3549	if (!sk)
3550		return NULL; /* Done */
3551
3552	expected = bpf_iter_unix_hold_batch(seq, sk);
3553
3554	if (iter->end_sk == expected) {
3555		iter->st_bucket_done = true;
3556		return sk;
3557	}
3558
3559	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3560		resized = true;
3561		goto again;
3562	}
3563
3564	return sk;
3565}
3566
3567static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3568{
3569	if (!*pos)
3570		return SEQ_START_TOKEN;
3571
3572	/* bpf iter does not support lseek, so it always
3573	 * continue from where it was stop()-ped.
3574	 */
3575	return bpf_iter_unix_batch(seq, pos);
3576}
3577
3578static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3579{
3580	struct bpf_unix_iter_state *iter = seq->private;
3581	struct sock *sk;
3582
3583	/* Whenever seq_next() is called, the iter->cur_sk is
3584	 * done with seq_show(), so advance to the next sk in
3585	 * the batch.
3586	 */
3587	if (iter->cur_sk < iter->end_sk)
3588		sock_put(iter->batch[iter->cur_sk++]);
3589
3590	++*pos;
3591
3592	if (iter->cur_sk < iter->end_sk)
3593		sk = iter->batch[iter->cur_sk];
3594	else
3595		sk = bpf_iter_unix_batch(seq, pos);
3596
3597	return sk;
3598}
3599
3600static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3601{
3602	struct bpf_iter_meta meta;
3603	struct bpf_prog *prog;
3604	struct sock *sk = v;
3605	uid_t uid;
3606	bool slow;
3607	int ret;
3608
3609	if (v == SEQ_START_TOKEN)
3610		return 0;
3611
3612	slow = lock_sock_fast(sk);
3613
3614	if (unlikely(sk_unhashed(sk))) {
3615		ret = SEQ_SKIP;
3616		goto unlock;
3617	}
3618
3619	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3620	meta.seq = seq;
3621	prog = bpf_iter_get_info(&meta, false);
3622	ret = unix_prog_seq_show(prog, &meta, v, uid);
3623unlock:
3624	unlock_sock_fast(sk, slow);
3625	return ret;
3626}
3627
3628static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3629{
3630	struct bpf_unix_iter_state *iter = seq->private;
3631	struct bpf_iter_meta meta;
3632	struct bpf_prog *prog;
3633
3634	if (!v) {
3635		meta.seq = seq;
3636		prog = bpf_iter_get_info(&meta, true);
3637		if (prog)
3638			(void)unix_prog_seq_show(prog, &meta, v, 0);
3639	}
3640
3641	if (iter->cur_sk < iter->end_sk)
3642		bpf_iter_unix_put_batch(iter);
3643}
3644
3645static const struct seq_operations bpf_iter_unix_seq_ops = {
3646	.start	= bpf_iter_unix_seq_start,
3647	.next	= bpf_iter_unix_seq_next,
3648	.stop	= bpf_iter_unix_seq_stop,
3649	.show	= bpf_iter_unix_seq_show,
3650};
3651#endif
3652#endif
3653
3654static const struct net_proto_family unix_family_ops = {
3655	.family = PF_UNIX,
3656	.create = unix_create,
3657	.owner	= THIS_MODULE,
3658};
3659
3660
3661static int __net_init unix_net_init(struct net *net)
3662{
3663	int i;
3664
3665	net->unx.sysctl_max_dgram_qlen = 10;
3666	if (unix_sysctl_register(net))
3667		goto out;
3668
3669#ifdef CONFIG_PROC_FS
3670	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3671			     sizeof(struct seq_net_private)))
3672		goto err_sysctl;
3673#endif
3674
3675	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3676					      sizeof(spinlock_t), GFP_KERNEL);
3677	if (!net->unx.table.locks)
3678		goto err_proc;
3679
3680	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3681						sizeof(struct hlist_head),
3682						GFP_KERNEL);
3683	if (!net->unx.table.buckets)
3684		goto free_locks;
3685
3686	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3687		spin_lock_init(&net->unx.table.locks[i]);
3688		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
3689		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3690	}
3691
3692	return 0;
3693
3694free_locks:
3695	kvfree(net->unx.table.locks);
3696err_proc:
3697#ifdef CONFIG_PROC_FS
3698	remove_proc_entry("unix", net->proc_net);
3699err_sysctl:
3700#endif
3701	unix_sysctl_unregister(net);
3702out:
3703	return -ENOMEM;
3704}
3705
3706static void __net_exit unix_net_exit(struct net *net)
3707{
3708	kvfree(net->unx.table.buckets);
3709	kvfree(net->unx.table.locks);
3710	unix_sysctl_unregister(net);
3711	remove_proc_entry("unix", net->proc_net);
3712}
3713
3714static struct pernet_operations unix_net_ops = {
3715	.init = unix_net_init,
3716	.exit = unix_net_exit,
3717};
3718
3719#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3720DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3721		     struct unix_sock *unix_sk, uid_t uid)
3722
3723#define INIT_BATCH_SZ 16
3724
3725static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3726{
3727	struct bpf_unix_iter_state *iter = priv_data;
3728	int err;
3729
3730	err = bpf_iter_init_seq_net(priv_data, aux);
3731	if (err)
3732		return err;
3733
3734	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3735	if (err) {
3736		bpf_iter_fini_seq_net(priv_data);
3737		return err;
3738	}
3739
3740	return 0;
3741}
3742
3743static void bpf_iter_fini_unix(void *priv_data)
3744{
3745	struct bpf_unix_iter_state *iter = priv_data;
3746
3747	bpf_iter_fini_seq_net(priv_data);
3748	kvfree(iter->batch);
3749}
3750
3751static const struct bpf_iter_seq_info unix_seq_info = {
3752	.seq_ops		= &bpf_iter_unix_seq_ops,
3753	.init_seq_private	= bpf_iter_init_unix,
3754	.fini_seq_private	= bpf_iter_fini_unix,
3755	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3756};
3757
3758static const struct bpf_func_proto *
3759bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3760			     const struct bpf_prog *prog)
3761{
3762	switch (func_id) {
3763	case BPF_FUNC_setsockopt:
3764		return &bpf_sk_setsockopt_proto;
3765	case BPF_FUNC_getsockopt:
3766		return &bpf_sk_getsockopt_proto;
3767	default:
3768		return NULL;
3769	}
3770}
3771
3772static struct bpf_iter_reg unix_reg_info = {
3773	.target			= "unix",
3774	.ctx_arg_info_size	= 1,
3775	.ctx_arg_info		= {
3776		{ offsetof(struct bpf_iter__unix, unix_sk),
3777		  PTR_TO_BTF_ID_OR_NULL },
3778	},
3779	.get_func_proto         = bpf_iter_unix_get_func_proto,
3780	.seq_info		= &unix_seq_info,
3781};
3782
3783static void __init bpf_iter_register(void)
3784{
3785	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3786	if (bpf_iter_reg_target(&unix_reg_info))
3787		pr_warn("Warning: could not register bpf iterator unix\n");
3788}
3789#endif
3790
3791static int __init af_unix_init(void)
3792{
3793	int i, rc = -1;
3794
3795	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3796
3797	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3798		spin_lock_init(&bsd_socket_locks[i]);
3799		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3800	}
3801
3802	rc = proto_register(&unix_dgram_proto, 1);
3803	if (rc != 0) {
3804		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3805		goto out;
3806	}
3807
3808	rc = proto_register(&unix_stream_proto, 1);
3809	if (rc != 0) {
3810		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3811		proto_unregister(&unix_dgram_proto);
3812		goto out;
3813	}
3814
3815	sock_register(&unix_family_ops);
3816	register_pernet_subsys(&unix_net_ops);
3817	unix_bpf_build_proto();
3818
3819#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3820	bpf_iter_register();
3821#endif
3822
3823out:
3824	return rc;
3825}
3826
3827/* Later than subsys_initcall() because we depend on stuff initialised there */
3828fs_initcall(af_unix_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NET4:	Implementation of BSD Unix domain sockets.
   4 *
   5 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
   6 *
   7 * Fixes:
   8 *		Linus Torvalds	:	Assorted bug cures.
   9 *		Niibe Yutaka	:	async I/O support.
  10 *		Carsten Paeth	:	PF_UNIX check, address fixes.
  11 *		Alan Cox	:	Limit size of allocated blocks.
  12 *		Alan Cox	:	Fixed the stupid socketpair bug.
  13 *		Alan Cox	:	BSD compatibility fine tuning.
  14 *		Alan Cox	:	Fixed a bug in connect when interrupted.
  15 *		Alan Cox	:	Sorted out a proper draft version of
  16 *					file descriptor passing hacked up from
  17 *					Mike Shaver's work.
  18 *		Marty Leisner	:	Fixes to fd passing
  19 *		Nick Nevin	:	recvmsg bugfix.
  20 *		Alan Cox	:	Started proper garbage collector
  21 *		Heiko EiBfeldt	:	Missing verify_area check
  22 *		Alan Cox	:	Started POSIXisms
  23 *		Andreas Schwab	:	Replace inode by dentry for proper
  24 *					reference counting
  25 *		Kirk Petersen	:	Made this a module
  26 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
  27 *					Lots of bug fixes.
  28 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
  29 *					by above two patches.
  30 *	     Andrea Arcangeli	:	If possible we block in connect(2)
  31 *					if the max backlog of the listen socket
  32 *					is been reached. This won't break
  33 *					old apps and it will avoid huge amount
  34 *					of socks hashed (this for unix_gc()
  35 *					performances reasons).
  36 *					Security fix that limits the max
  37 *					number of socks to 2*max_files and
  38 *					the number of skb queueable in the
  39 *					dgram receiver.
  40 *		Artur Skawina   :	Hash function optimizations
  41 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
  42 *	      Malcolm Beattie   :	Set peercred for socketpair
  43 *	     Michal Ostrowski   :       Module initialization cleanup.
  44 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
  45 *	     				the core infrastructure is doing that
  46 *	     				for all net proto families now (2.5.69+)
  47 *
  48 * Known differences from reference BSD that was tested:
  49 *
  50 *	[TO FIX]
  51 *	ECONNREFUSED is not returned from one end of a connected() socket to the
  52 *		other the moment one end closes.
  53 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55 *	[NOT TO FIX]
  56 *	accept() returns a path name even if the connecting socket has closed
  57 *		in the meantime (BSD loses the path and gives up).
  58 *	accept() returns 0 length path for an unbound connector. BSD returns 16
  59 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61 *	BSD af_unix apparently has connect forgetting to block properly.
  62 *		(need to check this with the POSIX spec in detail)
  63 *
  64 * Differences from 2.0.0-11-... (ANK)
  65 *	Bug fixes and improvements.
  66 *		- client shutdown killed server socket.
  67 *		- removed all useless cli/sti pairs.
  68 *
  69 *	Semantic changes/extensions.
  70 *		- generic control message passing.
  71 *		- SCM_CREDENTIALS control message.
  72 *		- "Abstract" (not FS based) socket bindings.
  73 *		  Abstract names are sequences of bytes (not zero terminated)
  74 *		  started by 0, so that this name space does not intersect
  75 *		  with BSD names.
  76 */
  77
  78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  79
  80#include <linux/module.h>
  81#include <linux/kernel.h>
  82#include <linux/signal.h>
  83#include <linux/sched/signal.h>
  84#include <linux/errno.h>
  85#include <linux/string.h>
  86#include <linux/stat.h>
  87#include <linux/dcache.h>
  88#include <linux/namei.h>
  89#include <linux/socket.h>
  90#include <linux/un.h>
  91#include <linux/fcntl.h>
  92#include <linux/filter.h>
  93#include <linux/termios.h>
  94#include <linux/sockios.h>
  95#include <linux/net.h>
  96#include <linux/in.h>
  97#include <linux/fs.h>
  98#include <linux/slab.h>
  99#include <linux/uaccess.h>
 100#include <linux/skbuff.h>
 101#include <linux/netdevice.h>
 102#include <net/net_namespace.h>
 103#include <net/sock.h>
 104#include <net/tcp_states.h>
 105#include <net/af_unix.h>
 106#include <linux/proc_fs.h>
 107#include <linux/seq_file.h>
 108#include <net/scm.h>
 109#include <linux/init.h>
 110#include <linux/poll.h>
 111#include <linux/rtnetlink.h>
 112#include <linux/mount.h>
 113#include <net/checksum.h>
 114#include <linux/security.h>
 115#include <linux/splice.h>
 116#include <linux/freezer.h>
 117#include <linux/file.h>
 118#include <linux/btf_ids.h>
 119#include <linux/bpf-cgroup.h>
 120
 121static atomic_long_t unix_nr_socks;
 122static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
 123static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
 124
 125/* SMP locking strategy:
 126 *    hash table is protected with spinlock.
 127 *    each socket state is protected by separate spinlock.
 128 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129
 130static unsigned int unix_unbound_hash(struct sock *sk)
 131{
 132	unsigned long hash = (unsigned long)sk;
 133
 134	hash ^= hash >> 16;
 135	hash ^= hash >> 8;
 136	hash ^= sk->sk_type;
 137
 138	return hash & UNIX_HASH_MOD;
 139}
 140
 141static unsigned int unix_bsd_hash(struct inode *i)
 142{
 143	return i->i_ino & UNIX_HASH_MOD;
 144}
 145
 146static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
 147				       int addr_len, int type)
 148{
 149	__wsum csum = csum_partial(sunaddr, addr_len, 0);
 150	unsigned int hash;
 151
 152	hash = (__force unsigned int)csum_fold(csum);
 153	hash ^= hash >> 8;
 154	hash ^= type;
 155
 156	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
 157}
 158
 159static void unix_table_double_lock(struct net *net,
 160				   unsigned int hash1, unsigned int hash2)
 161{
 162	if (hash1 == hash2) {
 163		spin_lock(&net->unx.table.locks[hash1]);
 164		return;
 165	}
 166
 167	if (hash1 > hash2)
 168		swap(hash1, hash2);
 169
 170	spin_lock(&net->unx.table.locks[hash1]);
 171	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
 172}
 173
 174static void unix_table_double_unlock(struct net *net,
 175				     unsigned int hash1, unsigned int hash2)
 176{
 177	if (hash1 == hash2) {
 178		spin_unlock(&net->unx.table.locks[hash1]);
 179		return;
 180	}
 181
 182	spin_unlock(&net->unx.table.locks[hash1]);
 183	spin_unlock(&net->unx.table.locks[hash2]);
 184}
 185
 186#ifdef CONFIG_SECURITY_NETWORK
 187static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 188{
 189	UNIXCB(skb).secid = scm->secid;
 190}
 191
 192static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 193{
 194	scm->secid = UNIXCB(skb).secid;
 195}
 196
 197static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 198{
 199	return (scm->secid == UNIXCB(skb).secid);
 200}
 201#else
 202static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 203{ }
 204
 205static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 206{ }
 207
 208static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
 209{
 210	return true;
 211}
 212#endif /* CONFIG_SECURITY_NETWORK */
 213
 214static inline int unix_our_peer(struct sock *sk, struct sock *osk)
 215{
 216	return unix_peer(osk) == sk;
 217}
 218
 219static inline int unix_may_send(struct sock *sk, struct sock *osk)
 220{
 221	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 222}
 223
 224static inline int unix_recvq_full(const struct sock *sk)
 225{
 226	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 227}
 228
 229static inline int unix_recvq_full_lockless(const struct sock *sk)
 230{
 231	return skb_queue_len_lockless(&sk->sk_receive_queue) >
 232		READ_ONCE(sk->sk_max_ack_backlog);
 233}
 234
 235struct sock *unix_peer_get(struct sock *s)
 236{
 237	struct sock *peer;
 238
 239	unix_state_lock(s);
 240	peer = unix_peer(s);
 241	if (peer)
 242		sock_hold(peer);
 243	unix_state_unlock(s);
 244	return peer;
 245}
 246EXPORT_SYMBOL_GPL(unix_peer_get);
 247
 248static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
 249					     int addr_len)
 250{
 251	struct unix_address *addr;
 252
 253	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
 254	if (!addr)
 255		return NULL;
 256
 257	refcount_set(&addr->refcnt, 1);
 258	addr->len = addr_len;
 259	memcpy(addr->name, sunaddr, addr_len);
 260
 261	return addr;
 262}
 263
 264static inline void unix_release_addr(struct unix_address *addr)
 265{
 266	if (refcount_dec_and_test(&addr->refcnt))
 267		kfree(addr);
 268}
 269
 270/*
 271 *	Check unix socket name:
 272 *		- should be not zero length.
 273 *	        - if started by not zero, should be NULL terminated (FS object)
 274 *		- if started by zero, it is abstract name.
 275 */
 276
 277static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
 278{
 279	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
 280	    addr_len > sizeof(*sunaddr))
 281		return -EINVAL;
 282
 283	if (sunaddr->sun_family != AF_UNIX)
 284		return -EINVAL;
 285
 286	return 0;
 287}
 288
 289static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
 290{
 291	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
 292	short offset = offsetof(struct sockaddr_storage, __data);
 293
 294	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
 295
 296	/* This may look like an off by one error but it is a bit more
 297	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
 298	 * sun_path[108] doesn't as such exist.  However in kernel space
 299	 * we are guaranteed that it is a valid memory location in our
 300	 * kernel address buffer because syscall functions always pass
 301	 * a pointer of struct sockaddr_storage which has a bigger buffer
 302	 * than 108.  Also, we must terminate sun_path for strlen() in
 303	 * getname_kernel().
 304	 */
 305	addr->__data[addr_len - offset] = 0;
 306
 307	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
 308	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
 309	 * know the actual buffer.
 310	 */
 311	return strlen(addr->__data) + offset + 1;
 312}
 313
 314static void __unix_remove_socket(struct sock *sk)
 315{
 316	sk_del_node_init(sk);
 317}
 318
 319static void __unix_insert_socket(struct net *net, struct sock *sk)
 320{
 321	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 322	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
 323}
 324
 325static void __unix_set_addr_hash(struct net *net, struct sock *sk,
 326				 struct unix_address *addr, unsigned int hash)
 327{
 328	__unix_remove_socket(sk);
 329	smp_store_release(&unix_sk(sk)->addr, addr);
 330
 331	sk->sk_hash = hash;
 332	__unix_insert_socket(net, sk);
 333}
 334
 335static void unix_remove_socket(struct net *net, struct sock *sk)
 336{
 337	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 338	__unix_remove_socket(sk);
 339	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 340}
 341
 342static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
 343{
 344	spin_lock(&net->unx.table.locks[sk->sk_hash]);
 345	__unix_insert_socket(net, sk);
 346	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
 347}
 348
 349static void unix_insert_bsd_socket(struct sock *sk)
 350{
 351	spin_lock(&bsd_socket_locks[sk->sk_hash]);
 352	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
 353	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 354}
 355
 356static void unix_remove_bsd_socket(struct sock *sk)
 357{
 358	if (!hlist_unhashed(&sk->sk_bind_node)) {
 359		spin_lock(&bsd_socket_locks[sk->sk_hash]);
 360		__sk_del_bind_node(sk);
 361		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
 362
 363		sk_node_init(&sk->sk_bind_node);
 364	}
 365}
 366
 367static struct sock *__unix_find_socket_byname(struct net *net,
 368					      struct sockaddr_un *sunname,
 369					      int len, unsigned int hash)
 370{
 371	struct sock *s;
 372
 373	sk_for_each(s, &net->unx.table.buckets[hash]) {
 374		struct unix_sock *u = unix_sk(s);
 375
 376		if (u->addr->len == len &&
 377		    !memcmp(u->addr->name, sunname, len))
 378			return s;
 379	}
 380	return NULL;
 381}
 382
 383static inline struct sock *unix_find_socket_byname(struct net *net,
 384						   struct sockaddr_un *sunname,
 385						   int len, unsigned int hash)
 386{
 387	struct sock *s;
 388
 389	spin_lock(&net->unx.table.locks[hash]);
 390	s = __unix_find_socket_byname(net, sunname, len, hash);
 391	if (s)
 392		sock_hold(s);
 393	spin_unlock(&net->unx.table.locks[hash]);
 394	return s;
 395}
 396
 397static struct sock *unix_find_socket_byinode(struct inode *i)
 398{
 399	unsigned int hash = unix_bsd_hash(i);
 400	struct sock *s;
 401
 402	spin_lock(&bsd_socket_locks[hash]);
 403	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
 404		struct dentry *dentry = unix_sk(s)->path.dentry;
 405
 406		if (dentry && d_backing_inode(dentry) == i) {
 407			sock_hold(s);
 408			spin_unlock(&bsd_socket_locks[hash]);
 409			return s;
 410		}
 411	}
 412	spin_unlock(&bsd_socket_locks[hash]);
 413	return NULL;
 414}
 415
 416/* Support code for asymmetrically connected dgram sockets
 417 *
 418 * If a datagram socket is connected to a socket not itself connected
 419 * to the first socket (eg, /dev/log), clients may only enqueue more
 420 * messages if the present receive queue of the server socket is not
 421 * "too large". This means there's a second writeability condition
 422 * poll and sendmsg need to test. The dgram recv code will do a wake
 423 * up on the peer_wait wait queue of a socket upon reception of a
 424 * datagram which needs to be propagated to sleeping would-be writers
 425 * since these might not have sent anything so far. This can't be
 426 * accomplished via poll_wait because the lifetime of the server
 427 * socket might be less than that of its clients if these break their
 428 * association with it or if the server socket is closed while clients
 429 * are still connected to it and there's no way to inform "a polling
 430 * implementation" that it should let go of a certain wait queue
 431 *
 432 * In order to propagate a wake up, a wait_queue_entry_t of the client
 433 * socket is enqueued on the peer_wait queue of the server socket
 434 * whose wake function does a wake_up on the ordinary client socket
 435 * wait queue. This connection is established whenever a write (or
 436 * poll for write) hit the flow control condition and broken when the
 437 * association to the server socket is dissolved or after a wake up
 438 * was relayed.
 439 */
 440
 441static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 442				      void *key)
 443{
 444	struct unix_sock *u;
 445	wait_queue_head_t *u_sleep;
 446
 447	u = container_of(q, struct unix_sock, peer_wake);
 448
 449	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 450			    q);
 451	u->peer_wake.private = NULL;
 452
 453	/* relaying can only happen while the wq still exists */
 454	u_sleep = sk_sleep(&u->sk);
 455	if (u_sleep)
 456		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
 457
 458	return 0;
 459}
 460
 461static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 462{
 463	struct unix_sock *u, *u_other;
 464	int rc;
 465
 466	u = unix_sk(sk);
 467	u_other = unix_sk(other);
 468	rc = 0;
 469	spin_lock(&u_other->peer_wait.lock);
 470
 471	if (!u->peer_wake.private) {
 472		u->peer_wake.private = other;
 473		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 474
 475		rc = 1;
 476	}
 477
 478	spin_unlock(&u_other->peer_wait.lock);
 479	return rc;
 480}
 481
 482static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 483					    struct sock *other)
 484{
 485	struct unix_sock *u, *u_other;
 486
 487	u = unix_sk(sk);
 488	u_other = unix_sk(other);
 489	spin_lock(&u_other->peer_wait.lock);
 490
 491	if (u->peer_wake.private == other) {
 492		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 493		u->peer_wake.private = NULL;
 494	}
 495
 496	spin_unlock(&u_other->peer_wait.lock);
 497}
 498
 499static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 500						   struct sock *other)
 501{
 502	unix_dgram_peer_wake_disconnect(sk, other);
 503	wake_up_interruptible_poll(sk_sleep(sk),
 504				   EPOLLOUT |
 505				   EPOLLWRNORM |
 506				   EPOLLWRBAND);
 507}
 508
 509/* preconditions:
 510 *	- unix_peer(sk) == other
 511 *	- association is stable
 512 */
 513static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 514{
 515	int connected;
 516
 517	connected = unix_dgram_peer_wake_connect(sk, other);
 518
 519	/* If other is SOCK_DEAD, we want to make sure we signal
 520	 * POLLOUT, such that a subsequent write() can get a
 521	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
 522	 * to other and its full, we will hang waiting for POLLOUT.
 523	 */
 524	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
 525		return 1;
 526
 527	if (connected)
 528		unix_dgram_peer_wake_disconnect(sk, other);
 529
 530	return 0;
 531}
 532
 533static int unix_writable(const struct sock *sk)
 534{
 535	return sk->sk_state != TCP_LISTEN &&
 536	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 537}
 538
 539static void unix_write_space(struct sock *sk)
 540{
 541	struct socket_wq *wq;
 542
 543	rcu_read_lock();
 544	if (unix_writable(sk)) {
 545		wq = rcu_dereference(sk->sk_wq);
 546		if (skwq_has_sleeper(wq))
 547			wake_up_interruptible_sync_poll(&wq->wait,
 548				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 549		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 550	}
 551	rcu_read_unlock();
 552}
 553
 554/* When dgram socket disconnects (or changes its peer), we clear its receive
 555 * queue of packets arrived from previous peer. First, it allows to do
 556 * flow control based only on wmem_alloc; second, sk connected to peer
 557 * may receive messages only from that peer. */
 558static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 559{
 560	if (!skb_queue_empty(&sk->sk_receive_queue)) {
 561		skb_queue_purge(&sk->sk_receive_queue);
 562		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 563
 564		/* If one link of bidirectional dgram pipe is disconnected,
 565		 * we signal error. Messages are lost. Do not make this,
 566		 * when peer was not connected to us.
 567		 */
 568		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
 569			WRITE_ONCE(other->sk_err, ECONNRESET);
 570			sk_error_report(other);
 571		}
 572	}
 573	other->sk_state = TCP_CLOSE;
 574}
 575
 576static void unix_sock_destructor(struct sock *sk)
 577{
 578	struct unix_sock *u = unix_sk(sk);
 579
 580	skb_queue_purge(&sk->sk_receive_queue);
 581
 582	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
 583	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
 584	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
 585	if (!sock_flag(sk, SOCK_DEAD)) {
 586		pr_info("Attempt to release alive unix socket: %p\n", sk);
 587		return;
 588	}
 589
 590	if (u->addr)
 591		unix_release_addr(u->addr);
 592
 593	atomic_long_dec(&unix_nr_socks);
 594	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 595#ifdef UNIX_REFCNT_DEBUG
 596	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
 597		atomic_long_read(&unix_nr_socks));
 598#endif
 599}
 600
 601static void unix_release_sock(struct sock *sk, int embrion)
 602{
 603	struct unix_sock *u = unix_sk(sk);
 604	struct sock *skpair;
 605	struct sk_buff *skb;
 606	struct path path;
 607	int state;
 608
 609	unix_remove_socket(sock_net(sk), sk);
 610	unix_remove_bsd_socket(sk);
 611
 612	/* Clear state */
 613	unix_state_lock(sk);
 614	sock_orphan(sk);
 615	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 616	path	     = u->path;
 617	u->path.dentry = NULL;
 618	u->path.mnt = NULL;
 619	state = sk->sk_state;
 620	sk->sk_state = TCP_CLOSE;
 621
 622	skpair = unix_peer(sk);
 623	unix_peer(sk) = NULL;
 624
 625	unix_state_unlock(sk);
 626
 627#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 628	if (u->oob_skb) {
 629		kfree_skb(u->oob_skb);
 630		u->oob_skb = NULL;
 631	}
 632#endif
 633
 634	wake_up_interruptible_all(&u->peer_wait);
 635
 636	if (skpair != NULL) {
 637		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
 638			unix_state_lock(skpair);
 639			/* No more writes */
 640			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
 641			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
 642				WRITE_ONCE(skpair->sk_err, ECONNRESET);
 643			unix_state_unlock(skpair);
 644			skpair->sk_state_change(skpair);
 645			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 646		}
 647
 648		unix_dgram_peer_wake_disconnect(sk, skpair);
 649		sock_put(skpair); /* It may now die */
 650	}
 651
 652	/* Try to flush out this socket. Throw out buffers at least */
 653
 654	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 655		if (state == TCP_LISTEN)
 656			unix_release_sock(skb->sk, 1);
 
 657		/* passed fds are erased in the kfree_skb hook	      */
 658		UNIXCB(skb).consumed = skb->len;
 659		kfree_skb(skb);
 660	}
 661
 662	if (path.dentry)
 663		path_put(&path);
 664
 665	sock_put(sk);
 666
 667	/* ---- Socket is dead now and most probably destroyed ---- */
 668
 669	/*
 670	 * Fixme: BSD difference: In BSD all sockets connected to us get
 671	 *	  ECONNRESET and we die on the spot. In Linux we behave
 672	 *	  like files and pipes do and wait for the last
 673	 *	  dereference.
 674	 *
 675	 * Can't we simply set sock->err?
 676	 *
 677	 *	  What the above comment does talk about? --ANK(980817)
 678	 */
 679
 680	if (READ_ONCE(unix_tot_inflight))
 681		unix_gc();		/* Garbage collect fds */
 682}
 683
 684static void init_peercred(struct sock *sk)
 685{
 
 
 
 
 
 
 686	const struct cred *old_cred;
 687	struct pid *old_pid;
 688
 689	spin_lock(&sk->sk_peer_lock);
 690	old_pid = sk->sk_peer_pid;
 691	old_cred = sk->sk_peer_cred;
 692	sk->sk_peer_pid  = get_pid(task_tgid(current));
 693	sk->sk_peer_cred = get_current_cred();
 694	spin_unlock(&sk->sk_peer_lock);
 695
 696	put_pid(old_pid);
 697	put_cred(old_cred);
 698}
 699
 700static void copy_peercred(struct sock *sk, struct sock *peersk)
 701{
 702	const struct cred *old_cred;
 703	struct pid *old_pid;
 704
 705	if (sk < peersk) {
 706		spin_lock(&sk->sk_peer_lock);
 707		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 708	} else {
 709		spin_lock(&peersk->sk_peer_lock);
 710		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
 711	}
 712	old_pid = sk->sk_peer_pid;
 713	old_cred = sk->sk_peer_cred;
 714	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
 715	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
 716
 717	spin_unlock(&sk->sk_peer_lock);
 718	spin_unlock(&peersk->sk_peer_lock);
 719
 720	put_pid(old_pid);
 721	put_cred(old_cred);
 722}
 723
 724static int unix_listen(struct socket *sock, int backlog)
 725{
 726	int err;
 727	struct sock *sk = sock->sk;
 728	struct unix_sock *u = unix_sk(sk);
 729
 730	err = -EOPNOTSUPP;
 731	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 732		goto out;	/* Only stream/seqpacket sockets accept */
 733	err = -EINVAL;
 734	if (!READ_ONCE(u->addr))
 735		goto out;	/* No listens on an unbound socket */
 736	unix_state_lock(sk);
 737	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
 738		goto out_unlock;
 739	if (backlog > sk->sk_max_ack_backlog)
 740		wake_up_interruptible_all(&u->peer_wait);
 741	sk->sk_max_ack_backlog	= backlog;
 742	sk->sk_state		= TCP_LISTEN;
 
 743	/* set credentials so connect can copy them */
 744	init_peercred(sk);
 745	err = 0;
 746
 747out_unlock:
 748	unix_state_unlock(sk);
 749out:
 750	return err;
 751}
 752
 753static int unix_release(struct socket *);
 754static int unix_bind(struct socket *, struct sockaddr *, int);
 755static int unix_stream_connect(struct socket *, struct sockaddr *,
 756			       int addr_len, int flags);
 757static int unix_socketpair(struct socket *, struct socket *);
 758static int unix_accept(struct socket *, struct socket *, int, bool);
 759static int unix_getname(struct socket *, struct sockaddr *, int);
 760static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
 761static __poll_t unix_dgram_poll(struct file *, struct socket *,
 762				    poll_table *);
 763static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 764#ifdef CONFIG_COMPAT
 765static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 766#endif
 767static int unix_shutdown(struct socket *, int);
 768static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 769static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
 770static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
 771				       struct pipe_inode_info *, size_t size,
 772				       unsigned int flags);
 773static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 774static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 775static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 776static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 777static int unix_dgram_connect(struct socket *, struct sockaddr *,
 778			      int, int);
 779static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 780static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
 781				  int);
 782
 783#ifdef CONFIG_PROC_FS
 784static int unix_count_nr_fds(struct sock *sk)
 785{
 786	struct sk_buff *skb;
 787	struct unix_sock *u;
 788	int nr_fds = 0;
 789
 790	spin_lock(&sk->sk_receive_queue.lock);
 791	skb = skb_peek(&sk->sk_receive_queue);
 792	while (skb) {
 793		u = unix_sk(skb->sk);
 794		nr_fds += atomic_read(&u->scm_stat.nr_fds);
 795		skb = skb_peek_next(skb, &sk->sk_receive_queue);
 796	}
 797	spin_unlock(&sk->sk_receive_queue.lock);
 798
 799	return nr_fds;
 800}
 801
 802static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 803{
 804	struct sock *sk = sock->sk;
 805	unsigned char s_state;
 806	struct unix_sock *u;
 807	int nr_fds = 0;
 808
 809	if (sk) {
 810		s_state = READ_ONCE(sk->sk_state);
 811		u = unix_sk(sk);
 812
 813		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
 814		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
 815		 * SOCK_DGRAM is ordinary. So, no lock is needed.
 816		 */
 817		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
 818			nr_fds = atomic_read(&u->scm_stat.nr_fds);
 819		else if (s_state == TCP_LISTEN)
 820			nr_fds = unix_count_nr_fds(sk);
 821
 822		seq_printf(m, "scm_fds: %u\n", nr_fds);
 823	}
 824}
 825#else
 826#define unix_show_fdinfo NULL
 827#endif
 828
 829static const struct proto_ops unix_stream_ops = {
 830	.family =	PF_UNIX,
 831	.owner =	THIS_MODULE,
 832	.release =	unix_release,
 833	.bind =		unix_bind,
 834	.connect =	unix_stream_connect,
 835	.socketpair =	unix_socketpair,
 836	.accept =	unix_accept,
 837	.getname =	unix_getname,
 838	.poll =		unix_poll,
 839	.ioctl =	unix_ioctl,
 840#ifdef CONFIG_COMPAT
 841	.compat_ioctl =	unix_compat_ioctl,
 842#endif
 843	.listen =	unix_listen,
 844	.shutdown =	unix_shutdown,
 845	.sendmsg =	unix_stream_sendmsg,
 846	.recvmsg =	unix_stream_recvmsg,
 847	.read_skb =	unix_stream_read_skb,
 848	.mmap =		sock_no_mmap,
 849	.splice_read =	unix_stream_splice_read,
 850	.set_peek_off =	sk_set_peek_off,
 851	.show_fdinfo =	unix_show_fdinfo,
 852};
 853
 854static const struct proto_ops unix_dgram_ops = {
 855	.family =	PF_UNIX,
 856	.owner =	THIS_MODULE,
 857	.release =	unix_release,
 858	.bind =		unix_bind,
 859	.connect =	unix_dgram_connect,
 860	.socketpair =	unix_socketpair,
 861	.accept =	sock_no_accept,
 862	.getname =	unix_getname,
 863	.poll =		unix_dgram_poll,
 864	.ioctl =	unix_ioctl,
 865#ifdef CONFIG_COMPAT
 866	.compat_ioctl =	unix_compat_ioctl,
 867#endif
 868	.listen =	sock_no_listen,
 869	.shutdown =	unix_shutdown,
 870	.sendmsg =	unix_dgram_sendmsg,
 871	.read_skb =	unix_read_skb,
 872	.recvmsg =	unix_dgram_recvmsg,
 873	.mmap =		sock_no_mmap,
 874	.set_peek_off =	sk_set_peek_off,
 875	.show_fdinfo =	unix_show_fdinfo,
 876};
 877
 878static const struct proto_ops unix_seqpacket_ops = {
 879	.family =	PF_UNIX,
 880	.owner =	THIS_MODULE,
 881	.release =	unix_release,
 882	.bind =		unix_bind,
 883	.connect =	unix_stream_connect,
 884	.socketpair =	unix_socketpair,
 885	.accept =	unix_accept,
 886	.getname =	unix_getname,
 887	.poll =		unix_dgram_poll,
 888	.ioctl =	unix_ioctl,
 889#ifdef CONFIG_COMPAT
 890	.compat_ioctl =	unix_compat_ioctl,
 891#endif
 892	.listen =	unix_listen,
 893	.shutdown =	unix_shutdown,
 894	.sendmsg =	unix_seqpacket_sendmsg,
 895	.recvmsg =	unix_seqpacket_recvmsg,
 896	.mmap =		sock_no_mmap,
 897	.set_peek_off =	sk_set_peek_off,
 898	.show_fdinfo =	unix_show_fdinfo,
 899};
 900
 901static void unix_close(struct sock *sk, long timeout)
 902{
 903	/* Nothing to do here, unix socket does not need a ->close().
 904	 * This is merely for sockmap.
 905	 */
 906}
 907
 908static void unix_unhash(struct sock *sk)
 909{
 910	/* Nothing to do here, unix socket does not need a ->unhash().
 911	 * This is merely for sockmap.
 912	 */
 913}
 914
 915static bool unix_bpf_bypass_getsockopt(int level, int optname)
 916{
 917	if (level == SOL_SOCKET) {
 918		switch (optname) {
 919		case SO_PEERPIDFD:
 920			return true;
 921		default:
 922			return false;
 923		}
 924	}
 925
 926	return false;
 927}
 928
 929struct proto unix_dgram_proto = {
 930	.name			= "UNIX",
 931	.owner			= THIS_MODULE,
 932	.obj_size		= sizeof(struct unix_sock),
 933	.close			= unix_close,
 934	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 935#ifdef CONFIG_BPF_SYSCALL
 936	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
 937#endif
 938};
 939
 940struct proto unix_stream_proto = {
 941	.name			= "UNIX-STREAM",
 942	.owner			= THIS_MODULE,
 943	.obj_size		= sizeof(struct unix_sock),
 944	.close			= unix_close,
 945	.unhash			= unix_unhash,
 946	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
 947#ifdef CONFIG_BPF_SYSCALL
 948	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
 949#endif
 950};
 951
 952static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
 953{
 954	struct unix_sock *u;
 955	struct sock *sk;
 956	int err;
 957
 958	atomic_long_inc(&unix_nr_socks);
 959	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
 960		err = -ENFILE;
 961		goto err;
 962	}
 963
 964	if (type == SOCK_STREAM)
 965		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
 966	else /*dgram and  seqpacket */
 967		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
 968
 969	if (!sk) {
 970		err = -ENOMEM;
 971		goto err;
 972	}
 973
 974	sock_init_data(sock, sk);
 975
 976	sk->sk_hash		= unix_unbound_hash(sk);
 977	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
 978	sk->sk_write_space	= unix_write_space;
 979	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
 980	sk->sk_destruct		= unix_sock_destructor;
 
 
 981	u = unix_sk(sk);
 982	u->inflight = 0;
 
 983	u->path.dentry = NULL;
 984	u->path.mnt = NULL;
 985	spin_lock_init(&u->lock);
 986	INIT_LIST_HEAD(&u->link);
 987	mutex_init(&u->iolock); /* single task reading lock */
 988	mutex_init(&u->bindlock); /* single task binding lock */
 989	init_waitqueue_head(&u->peer_wait);
 990	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 991	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
 992	unix_insert_unbound_socket(net, sk);
 993
 994	sock_prot_inuse_add(net, sk->sk_prot, 1);
 995
 996	return sk;
 997
 998err:
 999	atomic_long_dec(&unix_nr_socks);
1000	return ERR_PTR(err);
1001}
1002
1003static int unix_create(struct net *net, struct socket *sock, int protocol,
1004		       int kern)
1005{
1006	struct sock *sk;
1007
1008	if (protocol && protocol != PF_UNIX)
1009		return -EPROTONOSUPPORT;
1010
1011	sock->state = SS_UNCONNECTED;
1012
1013	switch (sock->type) {
1014	case SOCK_STREAM:
1015		sock->ops = &unix_stream_ops;
1016		break;
1017		/*
1018		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1019		 *	nothing uses it.
1020		 */
1021	case SOCK_RAW:
1022		sock->type = SOCK_DGRAM;
1023		fallthrough;
1024	case SOCK_DGRAM:
1025		sock->ops = &unix_dgram_ops;
1026		break;
1027	case SOCK_SEQPACKET:
1028		sock->ops = &unix_seqpacket_ops;
1029		break;
1030	default:
1031		return -ESOCKTNOSUPPORT;
1032	}
1033
1034	sk = unix_create1(net, sock, kern, sock->type);
1035	if (IS_ERR(sk))
1036		return PTR_ERR(sk);
1037
1038	return 0;
1039}
1040
1041static int unix_release(struct socket *sock)
1042{
1043	struct sock *sk = sock->sk;
1044
1045	if (!sk)
1046		return 0;
1047
1048	sk->sk_prot->close(sk, 0);
1049	unix_release_sock(sk, 0);
1050	sock->sk = NULL;
1051
1052	return 0;
1053}
1054
1055static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1056				  int type)
1057{
1058	struct inode *inode;
1059	struct path path;
1060	struct sock *sk;
1061	int err;
1062
1063	unix_mkname_bsd(sunaddr, addr_len);
1064	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1065	if (err)
1066		goto fail;
1067
1068	err = path_permission(&path, MAY_WRITE);
1069	if (err)
1070		goto path_put;
1071
1072	err = -ECONNREFUSED;
1073	inode = d_backing_inode(path.dentry);
1074	if (!S_ISSOCK(inode->i_mode))
1075		goto path_put;
1076
1077	sk = unix_find_socket_byinode(inode);
1078	if (!sk)
1079		goto path_put;
1080
1081	err = -EPROTOTYPE;
1082	if (sk->sk_type == type)
1083		touch_atime(&path);
1084	else
1085		goto sock_put;
1086
1087	path_put(&path);
1088
1089	return sk;
1090
1091sock_put:
1092	sock_put(sk);
1093path_put:
1094	path_put(&path);
1095fail:
1096	return ERR_PTR(err);
1097}
1098
1099static struct sock *unix_find_abstract(struct net *net,
1100				       struct sockaddr_un *sunaddr,
1101				       int addr_len, int type)
1102{
1103	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1104	struct dentry *dentry;
1105	struct sock *sk;
1106
1107	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1108	if (!sk)
1109		return ERR_PTR(-ECONNREFUSED);
1110
1111	dentry = unix_sk(sk)->path.dentry;
1112	if (dentry)
1113		touch_atime(&unix_sk(sk)->path);
1114
1115	return sk;
1116}
1117
1118static struct sock *unix_find_other(struct net *net,
1119				    struct sockaddr_un *sunaddr,
1120				    int addr_len, int type)
1121{
1122	struct sock *sk;
1123
1124	if (sunaddr->sun_path[0])
1125		sk = unix_find_bsd(sunaddr, addr_len, type);
1126	else
1127		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1128
1129	return sk;
1130}
1131
1132static int unix_autobind(struct sock *sk)
1133{
1134	struct unix_sock *u = unix_sk(sk);
1135	unsigned int new_hash, old_hash;
1136	struct net *net = sock_net(sk);
1137	struct unix_address *addr;
1138	u32 lastnum, ordernum;
1139	int err;
1140
1141	err = mutex_lock_interruptible(&u->bindlock);
1142	if (err)
1143		return err;
1144
1145	if (u->addr)
1146		goto out;
1147
1148	err = -ENOMEM;
1149	addr = kzalloc(sizeof(*addr) +
1150		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1151	if (!addr)
1152		goto out;
1153
1154	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1155	addr->name->sun_family = AF_UNIX;
1156	refcount_set(&addr->refcnt, 1);
1157
1158	old_hash = sk->sk_hash;
1159	ordernum = get_random_u32();
1160	lastnum = ordernum & 0xFFFFF;
1161retry:
1162	ordernum = (ordernum + 1) & 0xFFFFF;
1163	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1164
1165	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1166	unix_table_double_lock(net, old_hash, new_hash);
1167
1168	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1169		unix_table_double_unlock(net, old_hash, new_hash);
1170
1171		/* __unix_find_socket_byname() may take long time if many names
1172		 * are already in use.
1173		 */
1174		cond_resched();
1175
1176		if (ordernum == lastnum) {
1177			/* Give up if all names seems to be in use. */
1178			err = -ENOSPC;
1179			unix_release_addr(addr);
1180			goto out;
1181		}
1182
1183		goto retry;
1184	}
1185
1186	__unix_set_addr_hash(net, sk, addr, new_hash);
1187	unix_table_double_unlock(net, old_hash, new_hash);
1188	err = 0;
1189
1190out:	mutex_unlock(&u->bindlock);
1191	return err;
1192}
1193
1194static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1195			 int addr_len)
1196{
1197	umode_t mode = S_IFSOCK |
1198	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1199	struct unix_sock *u = unix_sk(sk);
1200	unsigned int new_hash, old_hash;
1201	struct net *net = sock_net(sk);
1202	struct mnt_idmap *idmap;
1203	struct unix_address *addr;
1204	struct dentry *dentry;
1205	struct path parent;
1206	int err;
1207
1208	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1209	addr = unix_create_addr(sunaddr, addr_len);
1210	if (!addr)
1211		return -ENOMEM;
1212
1213	/*
1214	 * Get the parent directory, calculate the hash for last
1215	 * component.
1216	 */
1217	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1218	if (IS_ERR(dentry)) {
1219		err = PTR_ERR(dentry);
1220		goto out;
1221	}
1222
1223	/*
1224	 * All right, let's create it.
1225	 */
1226	idmap = mnt_idmap(parent.mnt);
1227	err = security_path_mknod(&parent, dentry, mode, 0);
1228	if (!err)
1229		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1230	if (err)
1231		goto out_path;
1232	err = mutex_lock_interruptible(&u->bindlock);
1233	if (err)
1234		goto out_unlink;
1235	if (u->addr)
1236		goto out_unlock;
1237
1238	old_hash = sk->sk_hash;
1239	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1240	unix_table_double_lock(net, old_hash, new_hash);
1241	u->path.mnt = mntget(parent.mnt);
1242	u->path.dentry = dget(dentry);
1243	__unix_set_addr_hash(net, sk, addr, new_hash);
1244	unix_table_double_unlock(net, old_hash, new_hash);
1245	unix_insert_bsd_socket(sk);
1246	mutex_unlock(&u->bindlock);
1247	done_path_create(&parent, dentry);
1248	return 0;
1249
1250out_unlock:
1251	mutex_unlock(&u->bindlock);
1252	err = -EINVAL;
1253out_unlink:
1254	/* failed after successful mknod?  unlink what we'd created... */
1255	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1256out_path:
1257	done_path_create(&parent, dentry);
1258out:
1259	unix_release_addr(addr);
1260	return err == -EEXIST ? -EADDRINUSE : err;
1261}
1262
1263static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264			      int addr_len)
1265{
1266	struct unix_sock *u = unix_sk(sk);
1267	unsigned int new_hash, old_hash;
1268	struct net *net = sock_net(sk);
1269	struct unix_address *addr;
1270	int err;
1271
1272	addr = unix_create_addr(sunaddr, addr_len);
1273	if (!addr)
1274		return -ENOMEM;
1275
1276	err = mutex_lock_interruptible(&u->bindlock);
1277	if (err)
1278		goto out;
1279
1280	if (u->addr) {
1281		err = -EINVAL;
1282		goto out_mutex;
1283	}
1284
1285	old_hash = sk->sk_hash;
1286	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1287	unix_table_double_lock(net, old_hash, new_hash);
1288
1289	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1290		goto out_spin;
1291
1292	__unix_set_addr_hash(net, sk, addr, new_hash);
1293	unix_table_double_unlock(net, old_hash, new_hash);
1294	mutex_unlock(&u->bindlock);
1295	return 0;
1296
1297out_spin:
1298	unix_table_double_unlock(net, old_hash, new_hash);
1299	err = -EADDRINUSE;
1300out_mutex:
1301	mutex_unlock(&u->bindlock);
1302out:
1303	unix_release_addr(addr);
1304	return err;
1305}
1306
1307static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1308{
1309	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1310	struct sock *sk = sock->sk;
1311	int err;
1312
1313	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1314	    sunaddr->sun_family == AF_UNIX)
1315		return unix_autobind(sk);
1316
1317	err = unix_validate_addr(sunaddr, addr_len);
1318	if (err)
1319		return err;
1320
1321	if (sunaddr->sun_path[0])
1322		err = unix_bind_bsd(sk, sunaddr, addr_len);
1323	else
1324		err = unix_bind_abstract(sk, sunaddr, addr_len);
1325
1326	return err;
1327}
1328
1329static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1330{
1331	if (unlikely(sk1 == sk2) || !sk2) {
1332		unix_state_lock(sk1);
1333		return;
1334	}
 
1335	if (sk1 > sk2)
1336		swap(sk1, sk2);
1337
1338	unix_state_lock(sk1);
1339	unix_state_lock_nested(sk2, U_LOCK_SECOND);
1340}
1341
1342static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1343{
1344	if (unlikely(sk1 == sk2) || !sk2) {
1345		unix_state_unlock(sk1);
1346		return;
1347	}
1348	unix_state_unlock(sk1);
1349	unix_state_unlock(sk2);
1350}
1351
1352static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1353			      int alen, int flags)
1354{
1355	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1356	struct sock *sk = sock->sk;
1357	struct sock *other;
1358	int err;
1359
1360	err = -EINVAL;
1361	if (alen < offsetofend(struct sockaddr, sa_family))
1362		goto out;
1363
1364	if (addr->sa_family != AF_UNSPEC) {
1365		err = unix_validate_addr(sunaddr, alen);
1366		if (err)
1367			goto out;
1368
1369		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1370		if (err)
1371			goto out;
1372
1373		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1374		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1375		    !READ_ONCE(unix_sk(sk)->addr)) {
1376			err = unix_autobind(sk);
1377			if (err)
1378				goto out;
1379		}
1380
1381restart:
1382		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1383		if (IS_ERR(other)) {
1384			err = PTR_ERR(other);
1385			goto out;
1386		}
1387
1388		unix_state_double_lock(sk, other);
1389
1390		/* Apparently VFS overslept socket death. Retry. */
1391		if (sock_flag(other, SOCK_DEAD)) {
1392			unix_state_double_unlock(sk, other);
1393			sock_put(other);
1394			goto restart;
1395		}
1396
1397		err = -EPERM;
1398		if (!unix_may_send(sk, other))
1399			goto out_unlock;
1400
1401		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1402		if (err)
1403			goto out_unlock;
1404
1405		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
 
1406	} else {
1407		/*
1408		 *	1003.1g breaking connected state with AF_UNSPEC
1409		 */
1410		other = NULL;
1411		unix_state_double_lock(sk, other);
1412	}
1413
1414	/*
1415	 * If it was connected, reconnect.
1416	 */
1417	if (unix_peer(sk)) {
1418		struct sock *old_peer = unix_peer(sk);
1419
1420		unix_peer(sk) = other;
1421		if (!other)
1422			sk->sk_state = TCP_CLOSE;
1423		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1424
1425		unix_state_double_unlock(sk, other);
1426
1427		if (other != old_peer)
1428			unix_dgram_disconnected(sk, old_peer);
 
 
 
 
 
 
 
1429		sock_put(old_peer);
1430	} else {
1431		unix_peer(sk) = other;
1432		unix_state_double_unlock(sk, other);
1433	}
1434
1435	return 0;
1436
1437out_unlock:
1438	unix_state_double_unlock(sk, other);
1439	sock_put(other);
1440out:
1441	return err;
1442}
1443
1444static long unix_wait_for_peer(struct sock *other, long timeo)
1445	__releases(&unix_sk(other)->lock)
1446{
1447	struct unix_sock *u = unix_sk(other);
1448	int sched;
1449	DEFINE_WAIT(wait);
1450
1451	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1452
1453	sched = !sock_flag(other, SOCK_DEAD) &&
1454		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1455		unix_recvq_full_lockless(other);
1456
1457	unix_state_unlock(other);
1458
1459	if (sched)
1460		timeo = schedule_timeout(timeo);
1461
1462	finish_wait(&u->peer_wait, &wait);
1463	return timeo;
1464}
1465
1466static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1467			       int addr_len, int flags)
1468{
1469	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1470	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1471	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1472	struct net *net = sock_net(sk);
1473	struct sk_buff *skb = NULL;
 
1474	long timeo;
1475	int err;
1476	int st;
1477
1478	err = unix_validate_addr(sunaddr, addr_len);
1479	if (err)
1480		goto out;
1481
1482	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1483	if (err)
1484		goto out;
1485
1486	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1487	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1488	    !READ_ONCE(u->addr)) {
1489		err = unix_autobind(sk);
1490		if (err)
1491			goto out;
1492	}
1493
1494	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1495
1496	/* First of all allocate resources.
1497	   If we will make it after state is locked,
1498	   we will have to recheck all again in any case.
1499	 */
1500
1501	/* create new sock for complete connection */
1502	newsk = unix_create1(net, NULL, 0, sock->type);
1503	if (IS_ERR(newsk)) {
1504		err = PTR_ERR(newsk);
1505		newsk = NULL;
1506		goto out;
1507	}
1508
1509	err = -ENOMEM;
1510
1511	/* Allocate skb for sending to listening sock */
1512	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1513	if (skb == NULL)
1514		goto out;
1515
1516restart:
1517	/*  Find listening sock. */
1518	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1519	if (IS_ERR(other)) {
1520		err = PTR_ERR(other);
1521		other = NULL;
1522		goto out;
1523	}
1524
1525	/* Latch state of peer */
1526	unix_state_lock(other);
1527
1528	/* Apparently VFS overslept socket death. Retry. */
1529	if (sock_flag(other, SOCK_DEAD)) {
1530		unix_state_unlock(other);
1531		sock_put(other);
1532		goto restart;
1533	}
1534
1535	err = -ECONNREFUSED;
1536	if (other->sk_state != TCP_LISTEN)
1537		goto out_unlock;
1538	if (other->sk_shutdown & RCV_SHUTDOWN)
1539		goto out_unlock;
1540
1541	if (unix_recvq_full(other)) {
1542		err = -EAGAIN;
1543		if (!timeo)
1544			goto out_unlock;
1545
1546		timeo = unix_wait_for_peer(other, timeo);
1547
1548		err = sock_intr_errno(timeo);
1549		if (signal_pending(current))
1550			goto out;
1551		sock_put(other);
1552		goto restart;
1553	}
1554
1555	/* Latch our state.
1556
1557	   It is tricky place. We need to grab our state lock and cannot
1558	   drop lock on peer. It is dangerous because deadlock is
1559	   possible. Connect to self case and simultaneous
1560	   attempt to connect are eliminated by checking socket
1561	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1562	   check this before attempt to grab lock.
1563
1564	   Well, and we have to recheck the state after socket locked.
1565	 */
1566	st = sk->sk_state;
1567
1568	switch (st) {
1569	case TCP_CLOSE:
1570		/* This is ok... continue with connect */
1571		break;
1572	case TCP_ESTABLISHED:
1573		/* Socket is already connected */
1574		err = -EISCONN;
1575		goto out_unlock;
1576	default:
1577		err = -EINVAL;
1578		goto out_unlock;
1579	}
1580
1581	unix_state_lock_nested(sk, U_LOCK_SECOND);
1582
1583	if (sk->sk_state != st) {
 
1584		unix_state_unlock(sk);
1585		unix_state_unlock(other);
1586		sock_put(other);
1587		goto restart;
1588	}
1589
1590	err = security_unix_stream_connect(sk, other, newsk);
1591	if (err) {
1592		unix_state_unlock(sk);
1593		goto out_unlock;
1594	}
1595
1596	/* The way is open! Fastly set all the necessary fields... */
1597
1598	sock_hold(sk);
1599	unix_peer(newsk)	= sk;
1600	newsk->sk_state		= TCP_ESTABLISHED;
1601	newsk->sk_type		= sk->sk_type;
1602	init_peercred(newsk);
1603	newu = unix_sk(newsk);
 
1604	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1605	otheru = unix_sk(other);
1606
1607	/* copy address information from listening to new sock
1608	 *
1609	 * The contents of *(otheru->addr) and otheru->path
1610	 * are seen fully set up here, since we have found
1611	 * otheru in hash under its lock.  Insertion into the
1612	 * hash chain we'd found it in had been done in an
1613	 * earlier critical area protected by the chain's lock,
1614	 * the same one where we'd set *(otheru->addr) contents,
1615	 * as well as otheru->path and otheru->addr itself.
1616	 *
1617	 * Using smp_store_release() here to set newu->addr
1618	 * is enough to make those stores, as well as stores
1619	 * to newu->path visible to anyone who gets newu->addr
1620	 * by smp_load_acquire().  IOW, the same warranties
1621	 * as for unix_sock instances bound in unix_bind() or
1622	 * in unix_autobind().
1623	 */
1624	if (otheru->path.dentry) {
1625		path_get(&otheru->path);
1626		newu->path = otheru->path;
1627	}
1628	refcount_inc(&otheru->addr->refcnt);
1629	smp_store_release(&newu->addr, otheru->addr);
1630
1631	/* Set credentials */
1632	copy_peercred(sk, other);
1633
1634	sock->state	= SS_CONNECTED;
1635	sk->sk_state	= TCP_ESTABLISHED;
1636	sock_hold(newsk);
1637
1638	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1639	unix_peer(sk)	= newsk;
1640
1641	unix_state_unlock(sk);
1642
1643	/* take ten and send info to listening sock */
1644	spin_lock(&other->sk_receive_queue.lock);
1645	__skb_queue_tail(&other->sk_receive_queue, skb);
1646	spin_unlock(&other->sk_receive_queue.lock);
1647	unix_state_unlock(other);
1648	other->sk_data_ready(other);
1649	sock_put(other);
1650	return 0;
1651
1652out_unlock:
1653	if (other)
1654		unix_state_unlock(other);
1655
1656out:
1657	kfree_skb(skb);
1658	if (newsk)
1659		unix_release_sock(newsk, 0);
1660	if (other)
1661		sock_put(other);
1662	return err;
1663}
1664
1665static int unix_socketpair(struct socket *socka, struct socket *sockb)
1666{
1667	struct sock *ska = socka->sk, *skb = sockb->sk;
1668
1669	/* Join our sockets back to back */
1670	sock_hold(ska);
1671	sock_hold(skb);
1672	unix_peer(ska) = skb;
1673	unix_peer(skb) = ska;
1674	init_peercred(ska);
1675	init_peercred(skb);
1676
1677	ska->sk_state = TCP_ESTABLISHED;
1678	skb->sk_state = TCP_ESTABLISHED;
1679	socka->state  = SS_CONNECTED;
1680	sockb->state  = SS_CONNECTED;
1681	return 0;
1682}
1683
1684static void unix_sock_inherit_flags(const struct socket *old,
1685				    struct socket *new)
1686{
1687	if (test_bit(SOCK_PASSCRED, &old->flags))
1688		set_bit(SOCK_PASSCRED, &new->flags);
1689	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1690		set_bit(SOCK_PASSPIDFD, &new->flags);
1691	if (test_bit(SOCK_PASSSEC, &old->flags))
1692		set_bit(SOCK_PASSSEC, &new->flags);
1693}
1694
1695static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1696		       bool kern)
1697{
1698	struct sock *sk = sock->sk;
 
1699	struct sock *tsk;
1700	struct sk_buff *skb;
1701	int err;
1702
1703	err = -EOPNOTSUPP;
1704	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1705		goto out;
1706
1707	err = -EINVAL;
1708	if (sk->sk_state != TCP_LISTEN)
1709		goto out;
1710
1711	/* If socket state is TCP_LISTEN it cannot change (for now...),
1712	 * so that no locks are necessary.
1713	 */
1714
1715	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1716				&err);
1717	if (!skb) {
1718		/* This means receive shutdown. */
1719		if (err == 0)
1720			err = -EINVAL;
1721		goto out;
1722	}
1723
1724	tsk = skb->sk;
1725	skb_free_datagram(sk, skb);
1726	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1727
1728	/* attach accepted sock to socket */
1729	unix_state_lock(tsk);
 
1730	newsock->state = SS_CONNECTED;
1731	unix_sock_inherit_flags(sock, newsock);
1732	sock_graft(tsk, newsock);
1733	unix_state_unlock(tsk);
1734	return 0;
1735
1736out:
1737	return err;
1738}
1739
1740
1741static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1742{
1743	struct sock *sk = sock->sk;
1744	struct unix_address *addr;
1745	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1746	int err = 0;
1747
1748	if (peer) {
1749		sk = unix_peer_get(sk);
1750
1751		err = -ENOTCONN;
1752		if (!sk)
1753			goto out;
1754		err = 0;
1755	} else {
1756		sock_hold(sk);
1757	}
1758
1759	addr = smp_load_acquire(&unix_sk(sk)->addr);
1760	if (!addr) {
1761		sunaddr->sun_family = AF_UNIX;
1762		sunaddr->sun_path[0] = 0;
1763		err = offsetof(struct sockaddr_un, sun_path);
1764	} else {
1765		err = addr->len;
1766		memcpy(sunaddr, addr->name, addr->len);
1767
1768		if (peer)
1769			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1770					       CGROUP_UNIX_GETPEERNAME);
1771		else
1772			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1773					       CGROUP_UNIX_GETSOCKNAME);
1774	}
1775	sock_put(sk);
1776out:
1777	return err;
1778}
1779
1780/* The "user->unix_inflight" variable is protected by the garbage
1781 * collection lock, and we just read it locklessly here. If you go
1782 * over the limit, there might be a tiny race in actually noticing
1783 * it across threads. Tough.
1784 */
1785static inline bool too_many_unix_fds(struct task_struct *p)
1786{
1787	struct user_struct *user = current_user();
1788
1789	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1790		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1791	return false;
1792}
1793
1794static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1795{
1796	int i;
1797
1798	if (too_many_unix_fds(current))
1799		return -ETOOMANYREFS;
1800
1801	/* Need to duplicate file references for the sake of garbage
1802	 * collection.  Otherwise a socket in the fps might become a
1803	 * candidate for GC while the skb is not yet queued.
1804	 */
1805	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1806	if (!UNIXCB(skb).fp)
1807		return -ENOMEM;
1808
1809	for (i = scm->fp->count - 1; i >= 0; i--)
1810		unix_inflight(scm->fp->user, scm->fp->fp[i]);
1811
1812	return 0;
1813}
1814
1815static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1816{
1817	int i;
1818
1819	scm->fp = UNIXCB(skb).fp;
1820	UNIXCB(skb).fp = NULL;
1821
1822	for (i = scm->fp->count - 1; i >= 0; i--)
1823		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1824}
1825
1826static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1827{
1828	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1829
1830	/*
1831	 * Garbage collection of unix sockets starts by selecting a set of
1832	 * candidate sockets which have reference only from being in flight
1833	 * (total_refs == inflight_refs).  This condition is checked once during
1834	 * the candidate collection phase, and candidates are marked as such, so
1835	 * that non-candidates can later be ignored.  While inflight_refs is
1836	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1837	 * is an instantaneous decision.
1838	 *
1839	 * Once a candidate, however, the socket must not be reinstalled into a
1840	 * file descriptor while the garbage collection is in progress.
1841	 *
1842	 * If the above conditions are met, then the directed graph of
1843	 * candidates (*) does not change while unix_gc_lock is held.
1844	 *
1845	 * Any operations that changes the file count through file descriptors
1846	 * (dup, close, sendmsg) does not change the graph since candidates are
1847	 * not installed in fds.
1848	 *
1849	 * Dequeing a candidate via recvmsg would install it into an fd, but
1850	 * that takes unix_gc_lock to decrement the inflight count, so it's
1851	 * serialized with garbage collection.
1852	 *
1853	 * MSG_PEEK is special in that it does not change the inflight count,
1854	 * yet does install the socket into an fd.  The following lock/unlock
1855	 * pair is to ensure serialization with garbage collection.  It must be
1856	 * done between incrementing the file count and installing the file into
1857	 * an fd.
1858	 *
1859	 * If garbage collection starts after the barrier provided by the
1860	 * lock/unlock, then it will see the elevated refcount and not mark this
1861	 * as a candidate.  If a garbage collection is already in progress
1862	 * before the file count was incremented, then the lock/unlock pair will
1863	 * ensure that garbage collection is finished before progressing to
1864	 * installing the fd.
1865	 *
1866	 * (*) A -> B where B is on the queue of A or B is on the queue of C
1867	 * which is on the queue of listening socket A.
1868	 */
1869	spin_lock(&unix_gc_lock);
1870	spin_unlock(&unix_gc_lock);
1871}
1872
1873static void unix_destruct_scm(struct sk_buff *skb)
1874{
1875	struct scm_cookie scm;
1876
1877	memset(&scm, 0, sizeof(scm));
1878	scm.pid  = UNIXCB(skb).pid;
1879	if (UNIXCB(skb).fp)
1880		unix_detach_fds(&scm, skb);
1881
1882	/* Alas, it calls VFS */
1883	/* So fscking what? fput() had been SMP-safe since the last Summer */
1884	scm_destroy(&scm);
1885	sock_wfree(skb);
1886}
1887
1888static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1889{
1890	int err = 0;
1891
1892	UNIXCB(skb).pid  = get_pid(scm->pid);
1893	UNIXCB(skb).uid = scm->creds.uid;
1894	UNIXCB(skb).gid = scm->creds.gid;
1895	UNIXCB(skb).fp = NULL;
1896	unix_get_secdata(scm, skb);
1897	if (scm->fp && send_fds)
1898		err = unix_attach_fds(scm, skb);
1899
1900	skb->destructor = unix_destruct_scm;
1901	return err;
1902}
1903
1904static bool unix_passcred_enabled(const struct socket *sock,
1905				  const struct sock *other)
1906{
1907	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1908	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1909	       !other->sk_socket ||
1910	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1911	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1912}
1913
1914/*
1915 * Some apps rely on write() giving SCM_CREDENTIALS
1916 * We include credentials if source or destination socket
1917 * asserted SOCK_PASSCRED.
1918 */
1919static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1920			    const struct sock *other)
1921{
1922	if (UNIXCB(skb).pid)
1923		return;
1924	if (unix_passcred_enabled(sock, other)) {
1925		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1926		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1927	}
1928}
1929
1930static bool unix_skb_scm_eq(struct sk_buff *skb,
1931			    struct scm_cookie *scm)
1932{
1933	return UNIXCB(skb).pid == scm->pid &&
1934	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1935	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1936	       unix_secdata_eq(scm, skb);
1937}
1938
1939static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1940{
1941	struct scm_fp_list *fp = UNIXCB(skb).fp;
1942	struct unix_sock *u = unix_sk(sk);
1943
1944	if (unlikely(fp && fp->count))
1945		atomic_add(fp->count, &u->scm_stat.nr_fds);
 
 
1946}
1947
1948static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1949{
1950	struct scm_fp_list *fp = UNIXCB(skb).fp;
1951	struct unix_sock *u = unix_sk(sk);
1952
1953	if (unlikely(fp && fp->count))
1954		atomic_sub(fp->count, &u->scm_stat.nr_fds);
 
 
1955}
1956
1957/*
1958 *	Send AF_UNIX data.
1959 */
1960
1961static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1962			      size_t len)
1963{
1964	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1965	struct sock *sk = sock->sk, *other = NULL;
1966	struct unix_sock *u = unix_sk(sk);
1967	struct scm_cookie scm;
1968	struct sk_buff *skb;
1969	int data_len = 0;
1970	int sk_locked;
1971	long timeo;
1972	int err;
1973
1974	err = scm_send(sock, msg, &scm, false);
1975	if (err < 0)
1976		return err;
1977
1978	wait_for_unix_gc(scm.fp);
1979
1980	err = -EOPNOTSUPP;
1981	if (msg->msg_flags&MSG_OOB)
1982		goto out;
1983
1984	if (msg->msg_namelen) {
1985		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1986		if (err)
1987			goto out;
1988
1989		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1990							    msg->msg_name,
1991							    &msg->msg_namelen,
1992							    NULL);
1993		if (err)
1994			goto out;
1995	} else {
1996		sunaddr = NULL;
1997		err = -ENOTCONN;
1998		other = unix_peer_get(sk);
1999		if (!other)
2000			goto out;
2001	}
2002
2003	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
2004	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
2005	    !READ_ONCE(u->addr)) {
2006		err = unix_autobind(sk);
2007		if (err)
2008			goto out;
2009	}
2010
2011	err = -EMSGSIZE;
2012	if (len > sk->sk_sndbuf - 32)
2013		goto out;
2014
2015	if (len > SKB_MAX_ALLOC) {
2016		data_len = min_t(size_t,
2017				 len - SKB_MAX_ALLOC,
2018				 MAX_SKB_FRAGS * PAGE_SIZE);
2019		data_len = PAGE_ALIGN(data_len);
2020
2021		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2022	}
2023
2024	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2025				   msg->msg_flags & MSG_DONTWAIT, &err,
2026				   PAGE_ALLOC_COSTLY_ORDER);
2027	if (skb == NULL)
2028		goto out;
2029
2030	err = unix_scm_to_skb(&scm, skb, true);
2031	if (err < 0)
2032		goto out_free;
2033
2034	skb_put(skb, len - data_len);
2035	skb->data_len = data_len;
2036	skb->len = len;
2037	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2038	if (err)
2039		goto out_free;
2040
2041	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2042
2043restart:
2044	if (!other) {
2045		err = -ECONNRESET;
2046		if (sunaddr == NULL)
2047			goto out_free;
2048
2049		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
2050					sk->sk_type);
2051		if (IS_ERR(other)) {
2052			err = PTR_ERR(other);
2053			other = NULL;
2054			goto out_free;
2055		}
2056	}
2057
2058	if (sk_filter(other, skb) < 0) {
2059		/* Toss the packet but do not return any error to the sender */
2060		err = len;
2061		goto out_free;
2062	}
2063
2064	sk_locked = 0;
2065	unix_state_lock(other);
2066restart_locked:
2067	err = -EPERM;
2068	if (!unix_may_send(sk, other))
2069		goto out_unlock;
2070
2071	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2072		/*
2073		 *	Check with 1003.1g - what should
2074		 *	datagram error
2075		 */
2076		unix_state_unlock(other);
2077		sock_put(other);
2078
2079		if (!sk_locked)
2080			unix_state_lock(sk);
2081
2082		err = 0;
2083		if (sk->sk_type == SOCK_SEQPACKET) {
2084			/* We are here only when racing with unix_release_sock()
2085			 * is clearing @other. Never change state to TCP_CLOSE
2086			 * unlike SOCK_DGRAM wants.
2087			 */
2088			unix_state_unlock(sk);
2089			err = -EPIPE;
2090		} else if (unix_peer(sk) == other) {
2091			unix_peer(sk) = NULL;
2092			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2093
2094			sk->sk_state = TCP_CLOSE;
2095			unix_state_unlock(sk);
2096
2097			unix_dgram_disconnected(sk, other);
2098			sock_put(other);
2099			err = -ECONNREFUSED;
2100		} else {
2101			unix_state_unlock(sk);
2102		}
2103
2104		other = NULL;
2105		if (err)
2106			goto out_free;
2107		goto restart;
2108	}
2109
2110	err = -EPIPE;
2111	if (other->sk_shutdown & RCV_SHUTDOWN)
2112		goto out_unlock;
2113
2114	if (sk->sk_type != SOCK_SEQPACKET) {
2115		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2116		if (err)
2117			goto out_unlock;
2118	}
2119
2120	/* other == sk && unix_peer(other) != sk if
2121	 * - unix_peer(sk) == NULL, destination address bound to sk
2122	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2123	 */
2124	if (other != sk &&
2125	    unlikely(unix_peer(other) != sk &&
2126	    unix_recvq_full_lockless(other))) {
2127		if (timeo) {
2128			timeo = unix_wait_for_peer(other, timeo);
2129
2130			err = sock_intr_errno(timeo);
2131			if (signal_pending(current))
2132				goto out_free;
2133
2134			goto restart;
2135		}
2136
2137		if (!sk_locked) {
2138			unix_state_unlock(other);
2139			unix_state_double_lock(sk, other);
2140		}
2141
2142		if (unix_peer(sk) != other ||
2143		    unix_dgram_peer_wake_me(sk, other)) {
2144			err = -EAGAIN;
2145			sk_locked = 1;
2146			goto out_unlock;
2147		}
2148
2149		if (!sk_locked) {
2150			sk_locked = 1;
2151			goto restart_locked;
2152		}
2153	}
2154
2155	if (unlikely(sk_locked))
2156		unix_state_unlock(sk);
2157
2158	if (sock_flag(other, SOCK_RCVTSTAMP))
2159		__net_timestamp(skb);
2160	maybe_add_creds(skb, sock, other);
2161	scm_stat_add(other, skb);
2162	skb_queue_tail(&other->sk_receive_queue, skb);
2163	unix_state_unlock(other);
2164	other->sk_data_ready(other);
2165	sock_put(other);
2166	scm_destroy(&scm);
2167	return len;
2168
2169out_unlock:
2170	if (sk_locked)
2171		unix_state_unlock(sk);
2172	unix_state_unlock(other);
2173out_free:
2174	kfree_skb(skb);
2175out:
2176	if (other)
2177		sock_put(other);
2178	scm_destroy(&scm);
2179	return err;
2180}
2181
2182/* We use paged skbs for stream sockets, and limit occupancy to 32768
2183 * bytes, and a minimum of a full page.
2184 */
2185#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2186
2187#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2188static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2189		     struct scm_cookie *scm, bool fds_sent)
2190{
2191	struct unix_sock *ousk = unix_sk(other);
2192	struct sk_buff *skb;
2193	int err = 0;
2194
2195	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2196
2197	if (!skb)
2198		return err;
2199
2200	err = unix_scm_to_skb(scm, skb, !fds_sent);
2201	if (err < 0) {
2202		kfree_skb(skb);
2203		return err;
2204	}
2205	skb_put(skb, 1);
2206	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2207
2208	if (err) {
2209		kfree_skb(skb);
2210		return err;
2211	}
2212
2213	unix_state_lock(other);
2214
2215	if (sock_flag(other, SOCK_DEAD) ||
2216	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2217		unix_state_unlock(other);
2218		kfree_skb(skb);
2219		return -EPIPE;
2220	}
2221
2222	maybe_add_creds(skb, sock, other);
2223	skb_get(skb);
2224
2225	scm_stat_add(other, skb);
2226
2227	spin_lock(&other->sk_receive_queue.lock);
2228	if (ousk->oob_skb)
2229		consume_skb(ousk->oob_skb);
2230	WRITE_ONCE(ousk->oob_skb, skb);
2231	__skb_queue_tail(&other->sk_receive_queue, skb);
2232	spin_unlock(&other->sk_receive_queue.lock);
2233
2234	sk_send_sigurg(other);
2235	unix_state_unlock(other);
2236	other->sk_data_ready(other);
2237
2238	return err;
2239}
2240#endif
2241
2242static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2243			       size_t len)
2244{
2245	struct sock *sk = sock->sk;
2246	struct sock *other = NULL;
2247	int err, size;
2248	struct sk_buff *skb;
2249	int sent = 0;
2250	struct scm_cookie scm;
2251	bool fds_sent = false;
2252	int data_len;
2253
2254	err = scm_send(sock, msg, &scm, false);
2255	if (err < 0)
2256		return err;
2257
2258	wait_for_unix_gc(scm.fp);
2259
2260	err = -EOPNOTSUPP;
2261	if (msg->msg_flags & MSG_OOB) {
2262#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2263		if (len)
2264			len--;
2265		else
2266#endif
2267			goto out_err;
2268	}
2269
2270	if (msg->msg_namelen) {
2271		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2272		goto out_err;
2273	} else {
2274		err = -ENOTCONN;
2275		other = unix_peer(sk);
2276		if (!other)
2277			goto out_err;
2278	}
2279
2280	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2281		goto pipe_err;
2282
2283	while (sent < len) {
2284		size = len - sent;
2285
2286		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2287			skb = sock_alloc_send_pskb(sk, 0, 0,
2288						   msg->msg_flags & MSG_DONTWAIT,
2289						   &err, 0);
2290		} else {
2291			/* Keep two messages in the pipe so it schedules better */
2292			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2293
2294			/* allow fallback to order-0 allocations */
2295			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2296
2297			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2298
2299			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2300
2301			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2302						   msg->msg_flags & MSG_DONTWAIT, &err,
2303						   get_order(UNIX_SKB_FRAGS_SZ));
2304		}
2305		if (!skb)
2306			goto out_err;
2307
2308		/* Only send the fds in the first buffer */
2309		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2310		if (err < 0) {
2311			kfree_skb(skb);
2312			goto out_err;
2313		}
2314		fds_sent = true;
2315
2316		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
 
2317			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2318						   sk->sk_allocation);
2319			if (err < 0) {
2320				kfree_skb(skb);
2321				goto out_err;
2322			}
2323			size = err;
2324			refcount_add(size, &sk->sk_wmem_alloc);
2325		} else {
2326			skb_put(skb, size - data_len);
2327			skb->data_len = data_len;
2328			skb->len = size;
2329			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2330			if (err) {
2331				kfree_skb(skb);
2332				goto out_err;
2333			}
2334		}
2335
2336		unix_state_lock(other);
2337
2338		if (sock_flag(other, SOCK_DEAD) ||
2339		    (other->sk_shutdown & RCV_SHUTDOWN))
2340			goto pipe_err_free;
2341
2342		maybe_add_creds(skb, sock, other);
2343		scm_stat_add(other, skb);
2344		skb_queue_tail(&other->sk_receive_queue, skb);
2345		unix_state_unlock(other);
2346		other->sk_data_ready(other);
2347		sent += size;
2348	}
2349
2350#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2351	if (msg->msg_flags & MSG_OOB) {
2352		err = queue_oob(sock, msg, other, &scm, fds_sent);
2353		if (err)
2354			goto out_err;
2355		sent++;
2356	}
2357#endif
2358
2359	scm_destroy(&scm);
2360
2361	return sent;
2362
2363pipe_err_free:
2364	unix_state_unlock(other);
2365	kfree_skb(skb);
2366pipe_err:
2367	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2368		send_sig(SIGPIPE, current, 0);
2369	err = -EPIPE;
2370out_err:
2371	scm_destroy(&scm);
2372	return sent ? : err;
2373}
2374
2375static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2376				  size_t len)
2377{
2378	int err;
2379	struct sock *sk = sock->sk;
2380
2381	err = sock_error(sk);
2382	if (err)
2383		return err;
2384
2385	if (sk->sk_state != TCP_ESTABLISHED)
2386		return -ENOTCONN;
2387
2388	if (msg->msg_namelen)
2389		msg->msg_namelen = 0;
2390
2391	return unix_dgram_sendmsg(sock, msg, len);
2392}
2393
2394static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2395				  size_t size, int flags)
2396{
2397	struct sock *sk = sock->sk;
2398
2399	if (sk->sk_state != TCP_ESTABLISHED)
2400		return -ENOTCONN;
2401
2402	return unix_dgram_recvmsg(sock, msg, size, flags);
2403}
2404
2405static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2406{
2407	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2408
2409	if (addr) {
2410		msg->msg_namelen = addr->len;
2411		memcpy(msg->msg_name, addr->name, addr->len);
2412	}
2413}
2414
2415int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2416			 int flags)
2417{
2418	struct scm_cookie scm;
2419	struct socket *sock = sk->sk_socket;
2420	struct unix_sock *u = unix_sk(sk);
2421	struct sk_buff *skb, *last;
2422	long timeo;
2423	int skip;
2424	int err;
2425
2426	err = -EOPNOTSUPP;
2427	if (flags&MSG_OOB)
2428		goto out;
2429
2430	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2431
2432	do {
2433		mutex_lock(&u->iolock);
2434
2435		skip = sk_peek_offset(sk, flags);
2436		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2437					      &skip, &err, &last);
2438		if (skb) {
2439			if (!(flags & MSG_PEEK))
2440				scm_stat_del(sk, skb);
2441			break;
2442		}
2443
2444		mutex_unlock(&u->iolock);
2445
2446		if (err != -EAGAIN)
2447			break;
2448	} while (timeo &&
2449		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2450					      &err, &timeo, last));
2451
2452	if (!skb) { /* implies iolock unlocked */
2453		unix_state_lock(sk);
2454		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2455		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2456		    (sk->sk_shutdown & RCV_SHUTDOWN))
2457			err = 0;
2458		unix_state_unlock(sk);
2459		goto out;
2460	}
2461
2462	if (wq_has_sleeper(&u->peer_wait))
2463		wake_up_interruptible_sync_poll(&u->peer_wait,
2464						EPOLLOUT | EPOLLWRNORM |
2465						EPOLLWRBAND);
2466
2467	if (msg->msg_name) {
2468		unix_copy_addr(msg, skb->sk);
2469
2470		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2471						      msg->msg_name,
2472						      &msg->msg_namelen);
2473	}
2474
2475	if (size > skb->len - skip)
2476		size = skb->len - skip;
2477	else if (size < skb->len - skip)
2478		msg->msg_flags |= MSG_TRUNC;
2479
2480	err = skb_copy_datagram_msg(skb, skip, msg, size);
2481	if (err)
2482		goto out_free;
2483
2484	if (sock_flag(sk, SOCK_RCVTSTAMP))
2485		__sock_recv_timestamp(msg, sk, skb);
2486
2487	memset(&scm, 0, sizeof(scm));
2488
2489	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2490	unix_set_secdata(&scm, skb);
2491
2492	if (!(flags & MSG_PEEK)) {
2493		if (UNIXCB(skb).fp)
2494			unix_detach_fds(&scm, skb);
2495
2496		sk_peek_offset_bwd(sk, skb->len);
2497	} else {
2498		/* It is questionable: on PEEK we could:
2499		   - do not return fds - good, but too simple 8)
2500		   - return fds, and do not return them on read (old strategy,
2501		     apparently wrong)
2502		   - clone fds (I chose it for now, it is the most universal
2503		     solution)
2504
2505		   POSIX 1003.1g does not actually define this clearly
2506		   at all. POSIX 1003.1g doesn't define a lot of things
2507		   clearly however!
2508
2509		*/
2510
2511		sk_peek_offset_fwd(sk, size);
2512
2513		if (UNIXCB(skb).fp)
2514			unix_peek_fds(&scm, skb);
2515	}
2516	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2517
2518	scm_recv_unix(sock, msg, &scm, flags);
2519
2520out_free:
2521	skb_free_datagram(sk, skb);
2522	mutex_unlock(&u->iolock);
2523out:
2524	return err;
2525}
2526
2527static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2528			      int flags)
2529{
2530	struct sock *sk = sock->sk;
2531
2532#ifdef CONFIG_BPF_SYSCALL
2533	const struct proto *prot = READ_ONCE(sk->sk_prot);
2534
2535	if (prot != &unix_dgram_proto)
2536		return prot->recvmsg(sk, msg, size, flags, NULL);
2537#endif
2538	return __unix_dgram_recvmsg(sk, msg, size, flags);
2539}
2540
2541static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2542{
2543	struct unix_sock *u = unix_sk(sk);
2544	struct sk_buff *skb;
2545	int err;
2546
2547	mutex_lock(&u->iolock);
2548	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2549	mutex_unlock(&u->iolock);
2550	if (!skb)
2551		return err;
2552
2553	return recv_actor(sk, skb);
2554}
2555
2556/*
2557 *	Sleep until more data has arrived. But check for races..
2558 */
2559static long unix_stream_data_wait(struct sock *sk, long timeo,
2560				  struct sk_buff *last, unsigned int last_len,
2561				  bool freezable)
2562{
2563	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2564	struct sk_buff *tail;
2565	DEFINE_WAIT(wait);
2566
2567	unix_state_lock(sk);
2568
2569	for (;;) {
2570		prepare_to_wait(sk_sleep(sk), &wait, state);
2571
2572		tail = skb_peek_tail(&sk->sk_receive_queue);
2573		if (tail != last ||
2574		    (tail && tail->len != last_len) ||
2575		    sk->sk_err ||
2576		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2577		    signal_pending(current) ||
2578		    !timeo)
2579			break;
2580
2581		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2582		unix_state_unlock(sk);
2583		timeo = schedule_timeout(timeo);
2584		unix_state_lock(sk);
2585
2586		if (sock_flag(sk, SOCK_DEAD))
2587			break;
2588
2589		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2590	}
2591
2592	finish_wait(sk_sleep(sk), &wait);
2593	unix_state_unlock(sk);
2594	return timeo;
2595}
2596
2597static unsigned int unix_skb_len(const struct sk_buff *skb)
2598{
2599	return skb->len - UNIXCB(skb).consumed;
2600}
2601
2602struct unix_stream_read_state {
2603	int (*recv_actor)(struct sk_buff *, int, int,
2604			  struct unix_stream_read_state *);
2605	struct socket *socket;
2606	struct msghdr *msg;
2607	struct pipe_inode_info *pipe;
2608	size_t size;
2609	int flags;
2610	unsigned int splice_flags;
2611};
2612
2613#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2614static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2615{
2616	struct socket *sock = state->socket;
2617	struct sock *sk = sock->sk;
2618	struct unix_sock *u = unix_sk(sk);
2619	int chunk = 1;
2620	struct sk_buff *oob_skb;
2621
2622	mutex_lock(&u->iolock);
2623	unix_state_lock(sk);
2624	spin_lock(&sk->sk_receive_queue.lock);
2625
2626	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2627		spin_unlock(&sk->sk_receive_queue.lock);
2628		unix_state_unlock(sk);
2629		mutex_unlock(&u->iolock);
2630		return -EINVAL;
2631	}
2632
2633	oob_skb = u->oob_skb;
2634
2635	if (!(state->flags & MSG_PEEK))
2636		WRITE_ONCE(u->oob_skb, NULL);
2637	else
2638		skb_get(oob_skb);
2639
2640	spin_unlock(&sk->sk_receive_queue.lock);
2641	unix_state_unlock(sk);
2642
2643	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2644
2645	if (!(state->flags & MSG_PEEK))
2646		UNIXCB(oob_skb).consumed += 1;
2647
2648	consume_skb(oob_skb);
2649
2650	mutex_unlock(&u->iolock);
2651
2652	if (chunk < 0)
2653		return -EFAULT;
2654
2655	state->msg->msg_flags |= MSG_OOB;
2656	return 1;
2657}
2658
2659static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2660				  int flags, int copied)
2661{
 
2662	struct unix_sock *u = unix_sk(sk);
2663
2664	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2665		skb_unlink(skb, &sk->sk_receive_queue);
2666		consume_skb(skb);
2667		skb = NULL;
2668	} else {
2669		struct sk_buff *unlinked_skb = NULL;
2670
2671		spin_lock(&sk->sk_receive_queue.lock);
2672
2673		if (skb == u->oob_skb) {
2674			if (copied) {
2675				skb = NULL;
2676			} else if (sock_flag(sk, SOCK_URGINLINE)) {
2677				if (!(flags & MSG_PEEK)) {
2678					WRITE_ONCE(u->oob_skb, NULL);
2679					consume_skb(skb);
2680				}
2681			} else if (flags & MSG_PEEK) {
2682				skb = NULL;
2683			} else {
2684				__skb_unlink(skb, &sk->sk_receive_queue);
2685				WRITE_ONCE(u->oob_skb, NULL);
2686				unlinked_skb = skb;
2687				skb = skb_peek(&sk->sk_receive_queue);
2688			}
2689		}
2690
2691		spin_unlock(&sk->sk_receive_queue.lock);
 
 
 
 
 
 
 
 
 
 
2692
2693		if (unlinked_skb) {
2694			WARN_ON_ONCE(skb_unref(unlinked_skb));
2695			kfree_skb(unlinked_skb);
 
2696		}
 
 
2697	}
 
 
 
 
 
 
 
2698	return skb;
2699}
2700#endif
2701
2702static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2703{
2704	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
 
 
 
 
2705		return -ENOTCONN;
2706
2707	return unix_read_skb(sk, recv_actor);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2708}
2709
2710static int unix_stream_read_generic(struct unix_stream_read_state *state,
2711				    bool freezable)
2712{
2713	struct scm_cookie scm;
2714	struct socket *sock = state->socket;
2715	struct sock *sk = sock->sk;
2716	struct unix_sock *u = unix_sk(sk);
2717	int copied = 0;
2718	int flags = state->flags;
2719	int noblock = flags & MSG_DONTWAIT;
2720	bool check_creds = false;
2721	int target;
2722	int err = 0;
2723	long timeo;
2724	int skip;
2725	size_t size = state->size;
2726	unsigned int last_len;
2727
2728	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2729		err = -EINVAL;
2730		goto out;
2731	}
2732
2733	if (unlikely(flags & MSG_OOB)) {
2734		err = -EOPNOTSUPP;
2735#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2736		err = unix_stream_recv_urg(state);
2737#endif
2738		goto out;
2739	}
2740
2741	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2742	timeo = sock_rcvtimeo(sk, noblock);
2743
2744	memset(&scm, 0, sizeof(scm));
2745
2746	/* Lock the socket to prevent queue disordering
2747	 * while sleeps in memcpy_tomsg
2748	 */
2749	mutex_lock(&u->iolock);
2750
2751	skip = max(sk_peek_offset(sk, flags), 0);
2752
2753	do {
 
2754		int chunk;
2755		bool drop_skb;
2756		struct sk_buff *skb, *last;
2757
2758redo:
2759		unix_state_lock(sk);
2760		if (sock_flag(sk, SOCK_DEAD)) {
2761			err = -ECONNRESET;
2762			goto unlock;
2763		}
2764		last = skb = skb_peek(&sk->sk_receive_queue);
2765		last_len = last ? last->len : 0;
2766
2767again:
2768#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2769		if (skb) {
2770			skb = manage_oob(skb, sk, flags, copied);
2771			if (!skb && copied) {
2772				unix_state_unlock(sk);
2773				break;
2774			}
2775		}
2776#endif
2777		if (skb == NULL) {
2778			if (copied >= target)
2779				goto unlock;
2780
2781			/*
2782			 *	POSIX 1003.1g mandates this order.
2783			 */
2784
2785			err = sock_error(sk);
2786			if (err)
2787				goto unlock;
2788			if (sk->sk_shutdown & RCV_SHUTDOWN)
2789				goto unlock;
2790
2791			unix_state_unlock(sk);
2792			if (!timeo) {
2793				err = -EAGAIN;
2794				break;
2795			}
2796
2797			mutex_unlock(&u->iolock);
2798
2799			timeo = unix_stream_data_wait(sk, timeo, last,
2800						      last_len, freezable);
2801
2802			if (signal_pending(current)) {
2803				err = sock_intr_errno(timeo);
2804				scm_destroy(&scm);
2805				goto out;
2806			}
2807
2808			mutex_lock(&u->iolock);
2809			goto redo;
2810unlock:
2811			unix_state_unlock(sk);
2812			break;
2813		}
2814
2815		while (skip >= unix_skb_len(skb)) {
2816			skip -= unix_skb_len(skb);
2817			last = skb;
2818			last_len = skb->len;
2819			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2820			if (!skb)
2821				goto again;
2822		}
2823
2824		unix_state_unlock(sk);
2825
2826		if (check_creds) {
2827			/* Never glue messages from different writers */
2828			if (!unix_skb_scm_eq(skb, &scm))
2829				break;
2830		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2831			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2832			/* Copy credentials */
2833			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2834			unix_set_secdata(&scm, skb);
2835			check_creds = true;
2836		}
2837
2838		/* Copy address just once */
2839		if (state->msg && state->msg->msg_name) {
2840			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2841					 state->msg->msg_name);
2842			unix_copy_addr(state->msg, skb->sk);
2843
2844			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2845							      state->msg->msg_name,
2846							      &state->msg->msg_namelen);
2847
2848			sunaddr = NULL;
2849		}
2850
2851		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2852		skb_get(skb);
2853		chunk = state->recv_actor(skb, skip, chunk, state);
2854		drop_skb = !unix_skb_len(skb);
2855		/* skb is only safe to use if !drop_skb */
2856		consume_skb(skb);
2857		if (chunk < 0) {
2858			if (copied == 0)
2859				copied = -EFAULT;
2860			break;
2861		}
2862		copied += chunk;
2863		size -= chunk;
2864
2865		if (drop_skb) {
2866			/* the skb was touched by a concurrent reader;
2867			 * we should not expect anything from this skb
2868			 * anymore and assume it invalid - we can be
2869			 * sure it was dropped from the socket queue
2870			 *
2871			 * let's report a short read
2872			 */
2873			err = 0;
2874			break;
2875		}
2876
2877		/* Mark read part of skb as used */
2878		if (!(flags & MSG_PEEK)) {
2879			UNIXCB(skb).consumed += chunk;
2880
2881			sk_peek_offset_bwd(sk, chunk);
2882
2883			if (UNIXCB(skb).fp) {
2884				scm_stat_del(sk, skb);
2885				unix_detach_fds(&scm, skb);
2886			}
2887
2888			if (unix_skb_len(skb))
2889				break;
2890
2891			skb_unlink(skb, &sk->sk_receive_queue);
2892			consume_skb(skb);
2893
2894			if (scm.fp)
2895				break;
2896		} else {
2897			/* It is questionable, see note in unix_dgram_recvmsg.
2898			 */
2899			if (UNIXCB(skb).fp)
2900				unix_peek_fds(&scm, skb);
2901
2902			sk_peek_offset_fwd(sk, chunk);
2903
2904			if (UNIXCB(skb).fp)
2905				break;
2906
2907			skip = 0;
2908			last = skb;
2909			last_len = skb->len;
2910			unix_state_lock(sk);
2911			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2912			if (skb)
2913				goto again;
2914			unix_state_unlock(sk);
2915			break;
2916		}
2917	} while (size);
2918
2919	mutex_unlock(&u->iolock);
2920	if (state->msg)
2921		scm_recv_unix(sock, state->msg, &scm, flags);
2922	else
2923		scm_destroy(&scm);
2924out:
2925	return copied ? : err;
2926}
2927
2928static int unix_stream_read_actor(struct sk_buff *skb,
2929				  int skip, int chunk,
2930				  struct unix_stream_read_state *state)
2931{
2932	int ret;
2933
2934	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2935				    state->msg, chunk);
2936	return ret ?: chunk;
2937}
2938
2939int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2940			  size_t size, int flags)
2941{
2942	struct unix_stream_read_state state = {
2943		.recv_actor = unix_stream_read_actor,
2944		.socket = sk->sk_socket,
2945		.msg = msg,
2946		.size = size,
2947		.flags = flags
2948	};
2949
2950	return unix_stream_read_generic(&state, true);
2951}
2952
2953static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2954			       size_t size, int flags)
2955{
2956	struct unix_stream_read_state state = {
2957		.recv_actor = unix_stream_read_actor,
2958		.socket = sock,
2959		.msg = msg,
2960		.size = size,
2961		.flags = flags
2962	};
2963
2964#ifdef CONFIG_BPF_SYSCALL
2965	struct sock *sk = sock->sk;
2966	const struct proto *prot = READ_ONCE(sk->sk_prot);
2967
2968	if (prot != &unix_stream_proto)
2969		return prot->recvmsg(sk, msg, size, flags, NULL);
2970#endif
2971	return unix_stream_read_generic(&state, true);
2972}
2973
2974static int unix_stream_splice_actor(struct sk_buff *skb,
2975				    int skip, int chunk,
2976				    struct unix_stream_read_state *state)
2977{
2978	return skb_splice_bits(skb, state->socket->sk,
2979			       UNIXCB(skb).consumed + skip,
2980			       state->pipe, chunk, state->splice_flags);
2981}
2982
2983static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2984				       struct pipe_inode_info *pipe,
2985				       size_t size, unsigned int flags)
2986{
2987	struct unix_stream_read_state state = {
2988		.recv_actor = unix_stream_splice_actor,
2989		.socket = sock,
2990		.pipe = pipe,
2991		.size = size,
2992		.splice_flags = flags,
2993	};
2994
2995	if (unlikely(*ppos))
2996		return -ESPIPE;
2997
2998	if (sock->file->f_flags & O_NONBLOCK ||
2999	    flags & SPLICE_F_NONBLOCK)
3000		state.flags = MSG_DONTWAIT;
3001
3002	return unix_stream_read_generic(&state, false);
3003}
3004
3005static int unix_shutdown(struct socket *sock, int mode)
3006{
3007	struct sock *sk = sock->sk;
3008	struct sock *other;
3009
3010	if (mode < SHUT_RD || mode > SHUT_RDWR)
3011		return -EINVAL;
3012	/* This maps:
3013	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3014	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3015	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3016	 */
3017	++mode;
3018
3019	unix_state_lock(sk);
3020	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3021	other = unix_peer(sk);
3022	if (other)
3023		sock_hold(other);
3024	unix_state_unlock(sk);
3025	sk->sk_state_change(sk);
3026
3027	if (other &&
3028		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3029
3030		int peer_mode = 0;
3031		const struct proto *prot = READ_ONCE(other->sk_prot);
3032
3033		if (prot->unhash)
3034			prot->unhash(other);
3035		if (mode&RCV_SHUTDOWN)
3036			peer_mode |= SEND_SHUTDOWN;
3037		if (mode&SEND_SHUTDOWN)
3038			peer_mode |= RCV_SHUTDOWN;
3039		unix_state_lock(other);
3040		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3041		unix_state_unlock(other);
3042		other->sk_state_change(other);
3043		if (peer_mode == SHUTDOWN_MASK)
3044			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3045		else if (peer_mode & RCV_SHUTDOWN)
3046			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3047	}
3048	if (other)
3049		sock_put(other);
3050
3051	return 0;
3052}
3053
3054long unix_inq_len(struct sock *sk)
3055{
3056	struct sk_buff *skb;
3057	long amount = 0;
3058
3059	if (sk->sk_state == TCP_LISTEN)
3060		return -EINVAL;
3061
3062	spin_lock(&sk->sk_receive_queue.lock);
3063	if (sk->sk_type == SOCK_STREAM ||
3064	    sk->sk_type == SOCK_SEQPACKET) {
3065		skb_queue_walk(&sk->sk_receive_queue, skb)
3066			amount += unix_skb_len(skb);
3067	} else {
3068		skb = skb_peek(&sk->sk_receive_queue);
3069		if (skb)
3070			amount = skb->len;
3071	}
3072	spin_unlock(&sk->sk_receive_queue.lock);
3073
3074	return amount;
3075}
3076EXPORT_SYMBOL_GPL(unix_inq_len);
3077
3078long unix_outq_len(struct sock *sk)
3079{
3080	return sk_wmem_alloc_get(sk);
3081}
3082EXPORT_SYMBOL_GPL(unix_outq_len);
3083
3084static int unix_open_file(struct sock *sk)
3085{
3086	struct path path;
3087	struct file *f;
3088	int fd;
3089
3090	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3091		return -EPERM;
3092
3093	if (!smp_load_acquire(&unix_sk(sk)->addr))
3094		return -ENOENT;
3095
3096	path = unix_sk(sk)->path;
3097	if (!path.dentry)
3098		return -ENOENT;
3099
3100	path_get(&path);
3101
3102	fd = get_unused_fd_flags(O_CLOEXEC);
3103	if (fd < 0)
3104		goto out;
3105
3106	f = dentry_open(&path, O_PATH, current_cred());
3107	if (IS_ERR(f)) {
3108		put_unused_fd(fd);
3109		fd = PTR_ERR(f);
3110		goto out;
3111	}
3112
3113	fd_install(fd, f);
3114out:
3115	path_put(&path);
3116
3117	return fd;
3118}
3119
3120static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3121{
3122	struct sock *sk = sock->sk;
3123	long amount = 0;
3124	int err;
3125
3126	switch (cmd) {
3127	case SIOCOUTQ:
3128		amount = unix_outq_len(sk);
3129		err = put_user(amount, (int __user *)arg);
3130		break;
3131	case SIOCINQ:
3132		amount = unix_inq_len(sk);
3133		if (amount < 0)
3134			err = amount;
3135		else
3136			err = put_user(amount, (int __user *)arg);
3137		break;
3138	case SIOCUNIXFILE:
3139		err = unix_open_file(sk);
3140		break;
3141#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3142	case SIOCATMARK:
3143		{
 
3144			struct sk_buff *skb;
3145			int answ = 0;
3146
 
 
3147			skb = skb_peek(&sk->sk_receive_queue);
3148			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3149				answ = 1;
 
 
 
 
 
 
 
 
 
 
 
 
3150			err = put_user(answ, (int __user *)arg);
3151		}
3152		break;
3153#endif
3154	default:
3155		err = -ENOIOCTLCMD;
3156		break;
3157	}
3158	return err;
3159}
3160
3161#ifdef CONFIG_COMPAT
3162static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3163{
3164	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3165}
3166#endif
3167
3168static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3169{
3170	struct sock *sk = sock->sk;
 
3171	__poll_t mask;
3172	u8 shutdown;
3173
3174	sock_poll_wait(file, sock, wait);
3175	mask = 0;
3176	shutdown = READ_ONCE(sk->sk_shutdown);
 
3177
3178	/* exceptional events? */
3179	if (READ_ONCE(sk->sk_err))
3180		mask |= EPOLLERR;
3181	if (shutdown == SHUTDOWN_MASK)
3182		mask |= EPOLLHUP;
3183	if (shutdown & RCV_SHUTDOWN)
3184		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3185
3186	/* readable? */
3187	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3188		mask |= EPOLLIN | EPOLLRDNORM;
3189	if (sk_is_readable(sk))
3190		mask |= EPOLLIN | EPOLLRDNORM;
3191#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3192	if (READ_ONCE(unix_sk(sk)->oob_skb))
3193		mask |= EPOLLPRI;
3194#endif
3195
3196	/* Connection-based need to check for termination and startup */
3197	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3198	    sk->sk_state == TCP_CLOSE)
3199		mask |= EPOLLHUP;
3200
3201	/*
3202	 * we set writable also when the other side has shut down the
3203	 * connection. This prevents stuck sockets.
3204	 */
3205	if (unix_writable(sk))
3206		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3207
3208	return mask;
3209}
3210
3211static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3212				    poll_table *wait)
3213{
3214	struct sock *sk = sock->sk, *other;
3215	unsigned int writable;
 
3216	__poll_t mask;
3217	u8 shutdown;
3218
3219	sock_poll_wait(file, sock, wait);
3220	mask = 0;
3221	shutdown = READ_ONCE(sk->sk_shutdown);
 
3222
3223	/* exceptional events? */
3224	if (READ_ONCE(sk->sk_err) ||
3225	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3226		mask |= EPOLLERR |
3227			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3228
3229	if (shutdown & RCV_SHUTDOWN)
3230		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3231	if (shutdown == SHUTDOWN_MASK)
3232		mask |= EPOLLHUP;
3233
3234	/* readable? */
3235	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3236		mask |= EPOLLIN | EPOLLRDNORM;
3237	if (sk_is_readable(sk))
3238		mask |= EPOLLIN | EPOLLRDNORM;
3239
3240	/* Connection-based need to check for termination and startup */
3241	if (sk->sk_type == SOCK_SEQPACKET) {
3242		if (sk->sk_state == TCP_CLOSE)
3243			mask |= EPOLLHUP;
3244		/* connection hasn't started yet? */
3245		if (sk->sk_state == TCP_SYN_SENT)
3246			return mask;
3247	}
3248
3249	/* No write status requested, avoid expensive OUT tests. */
3250	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3251		return mask;
3252
3253	writable = unix_writable(sk);
3254	if (writable) {
3255		unix_state_lock(sk);
3256
3257		other = unix_peer(sk);
3258		if (other && unix_peer(other) != sk &&
3259		    unix_recvq_full_lockless(other) &&
3260		    unix_dgram_peer_wake_me(sk, other))
3261			writable = 0;
3262
3263		unix_state_unlock(sk);
3264	}
3265
3266	if (writable)
3267		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3268	else
3269		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3270
3271	return mask;
3272}
3273
3274#ifdef CONFIG_PROC_FS
3275
3276#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3277
3278#define get_bucket(x) ((x) >> BUCKET_SPACE)
3279#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3280#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3281
3282static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3283{
3284	unsigned long offset = get_offset(*pos);
3285	unsigned long bucket = get_bucket(*pos);
3286	unsigned long count = 0;
3287	struct sock *sk;
3288
3289	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3290	     sk; sk = sk_next(sk)) {
3291		if (++count == offset)
3292			break;
3293	}
3294
3295	return sk;
3296}
3297
3298static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3299{
3300	unsigned long bucket = get_bucket(*pos);
3301	struct net *net = seq_file_net(seq);
3302	struct sock *sk;
3303
3304	while (bucket < UNIX_HASH_SIZE) {
3305		spin_lock(&net->unx.table.locks[bucket]);
3306
3307		sk = unix_from_bucket(seq, pos);
3308		if (sk)
3309			return sk;
3310
3311		spin_unlock(&net->unx.table.locks[bucket]);
3312
3313		*pos = set_bucket_offset(++bucket, 1);
3314	}
3315
3316	return NULL;
3317}
3318
3319static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3320				  loff_t *pos)
3321{
3322	unsigned long bucket = get_bucket(*pos);
3323
3324	sk = sk_next(sk);
3325	if (sk)
3326		return sk;
3327
3328
3329	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3330
3331	*pos = set_bucket_offset(++bucket, 1);
3332
3333	return unix_get_first(seq, pos);
3334}
3335
3336static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3337{
3338	if (!*pos)
3339		return SEQ_START_TOKEN;
3340
3341	return unix_get_first(seq, pos);
3342}
3343
3344static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3345{
3346	++*pos;
3347
3348	if (v == SEQ_START_TOKEN)
3349		return unix_get_first(seq, pos);
3350
3351	return unix_get_next(seq, v, pos);
3352}
3353
3354static void unix_seq_stop(struct seq_file *seq, void *v)
3355{
3356	struct sock *sk = v;
3357
3358	if (sk)
3359		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3360}
3361
3362static int unix_seq_show(struct seq_file *seq, void *v)
3363{
3364
3365	if (v == SEQ_START_TOKEN)
3366		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3367			 "Inode Path\n");
3368	else {
3369		struct sock *s = v;
3370		struct unix_sock *u = unix_sk(s);
3371		unix_state_lock(s);
3372
3373		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3374			s,
3375			refcount_read(&s->sk_refcnt),
3376			0,
3377			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3378			s->sk_type,
3379			s->sk_socket ?
3380			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3381			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3382			sock_i_ino(s));
3383
3384		if (u->addr) {	// under a hash table lock here
3385			int i, len;
3386			seq_putc(seq, ' ');
3387
3388			i = 0;
3389			len = u->addr->len -
3390				offsetof(struct sockaddr_un, sun_path);
3391			if (u->addr->name->sun_path[0]) {
3392				len--;
3393			} else {
3394				seq_putc(seq, '@');
3395				i++;
3396			}
3397			for ( ; i < len; i++)
3398				seq_putc(seq, u->addr->name->sun_path[i] ?:
3399					 '@');
3400		}
3401		unix_state_unlock(s);
3402		seq_putc(seq, '\n');
3403	}
3404
3405	return 0;
3406}
3407
3408static const struct seq_operations unix_seq_ops = {
3409	.start  = unix_seq_start,
3410	.next   = unix_seq_next,
3411	.stop   = unix_seq_stop,
3412	.show   = unix_seq_show,
3413};
3414
3415#ifdef CONFIG_BPF_SYSCALL
3416struct bpf_unix_iter_state {
3417	struct seq_net_private p;
3418	unsigned int cur_sk;
3419	unsigned int end_sk;
3420	unsigned int max_sk;
3421	struct sock **batch;
3422	bool st_bucket_done;
3423};
3424
3425struct bpf_iter__unix {
3426	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3427	__bpf_md_ptr(struct unix_sock *, unix_sk);
3428	uid_t uid __aligned(8);
3429};
3430
3431static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3432			      struct unix_sock *unix_sk, uid_t uid)
3433{
3434	struct bpf_iter__unix ctx;
3435
3436	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3437	ctx.meta = meta;
3438	ctx.unix_sk = unix_sk;
3439	ctx.uid = uid;
3440	return bpf_iter_run_prog(prog, &ctx);
3441}
3442
3443static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3444
3445{
3446	struct bpf_unix_iter_state *iter = seq->private;
3447	unsigned int expected = 1;
3448	struct sock *sk;
3449
3450	sock_hold(start_sk);
3451	iter->batch[iter->end_sk++] = start_sk;
3452
3453	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3454		if (iter->end_sk < iter->max_sk) {
3455			sock_hold(sk);
3456			iter->batch[iter->end_sk++] = sk;
3457		}
3458
3459		expected++;
3460	}
3461
3462	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3463
3464	return expected;
3465}
3466
3467static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3468{
3469	while (iter->cur_sk < iter->end_sk)
3470		sock_put(iter->batch[iter->cur_sk++]);
3471}
3472
3473static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3474				       unsigned int new_batch_sz)
3475{
3476	struct sock **new_batch;
3477
3478	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3479			     GFP_USER | __GFP_NOWARN);
3480	if (!new_batch)
3481		return -ENOMEM;
3482
3483	bpf_iter_unix_put_batch(iter);
3484	kvfree(iter->batch);
3485	iter->batch = new_batch;
3486	iter->max_sk = new_batch_sz;
3487
3488	return 0;
3489}
3490
3491static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3492					loff_t *pos)
3493{
3494	struct bpf_unix_iter_state *iter = seq->private;
3495	unsigned int expected;
3496	bool resized = false;
3497	struct sock *sk;
3498
3499	if (iter->st_bucket_done)
3500		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3501
3502again:
3503	/* Get a new batch */
3504	iter->cur_sk = 0;
3505	iter->end_sk = 0;
3506
3507	sk = unix_get_first(seq, pos);
3508	if (!sk)
3509		return NULL; /* Done */
3510
3511	expected = bpf_iter_unix_hold_batch(seq, sk);
3512
3513	if (iter->end_sk == expected) {
3514		iter->st_bucket_done = true;
3515		return sk;
3516	}
3517
3518	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3519		resized = true;
3520		goto again;
3521	}
3522
3523	return sk;
3524}
3525
3526static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3527{
3528	if (!*pos)
3529		return SEQ_START_TOKEN;
3530
3531	/* bpf iter does not support lseek, so it always
3532	 * continue from where it was stop()-ped.
3533	 */
3534	return bpf_iter_unix_batch(seq, pos);
3535}
3536
3537static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3538{
3539	struct bpf_unix_iter_state *iter = seq->private;
3540	struct sock *sk;
3541
3542	/* Whenever seq_next() is called, the iter->cur_sk is
3543	 * done with seq_show(), so advance to the next sk in
3544	 * the batch.
3545	 */
3546	if (iter->cur_sk < iter->end_sk)
3547		sock_put(iter->batch[iter->cur_sk++]);
3548
3549	++*pos;
3550
3551	if (iter->cur_sk < iter->end_sk)
3552		sk = iter->batch[iter->cur_sk];
3553	else
3554		sk = bpf_iter_unix_batch(seq, pos);
3555
3556	return sk;
3557}
3558
3559static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3560{
3561	struct bpf_iter_meta meta;
3562	struct bpf_prog *prog;
3563	struct sock *sk = v;
3564	uid_t uid;
3565	bool slow;
3566	int ret;
3567
3568	if (v == SEQ_START_TOKEN)
3569		return 0;
3570
3571	slow = lock_sock_fast(sk);
3572
3573	if (unlikely(sk_unhashed(sk))) {
3574		ret = SEQ_SKIP;
3575		goto unlock;
3576	}
3577
3578	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3579	meta.seq = seq;
3580	prog = bpf_iter_get_info(&meta, false);
3581	ret = unix_prog_seq_show(prog, &meta, v, uid);
3582unlock:
3583	unlock_sock_fast(sk, slow);
3584	return ret;
3585}
3586
3587static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3588{
3589	struct bpf_unix_iter_state *iter = seq->private;
3590	struct bpf_iter_meta meta;
3591	struct bpf_prog *prog;
3592
3593	if (!v) {
3594		meta.seq = seq;
3595		prog = bpf_iter_get_info(&meta, true);
3596		if (prog)
3597			(void)unix_prog_seq_show(prog, &meta, v, 0);
3598	}
3599
3600	if (iter->cur_sk < iter->end_sk)
3601		bpf_iter_unix_put_batch(iter);
3602}
3603
3604static const struct seq_operations bpf_iter_unix_seq_ops = {
3605	.start	= bpf_iter_unix_seq_start,
3606	.next	= bpf_iter_unix_seq_next,
3607	.stop	= bpf_iter_unix_seq_stop,
3608	.show	= bpf_iter_unix_seq_show,
3609};
3610#endif
3611#endif
3612
3613static const struct net_proto_family unix_family_ops = {
3614	.family = PF_UNIX,
3615	.create = unix_create,
3616	.owner	= THIS_MODULE,
3617};
3618
3619
3620static int __net_init unix_net_init(struct net *net)
3621{
3622	int i;
3623
3624	net->unx.sysctl_max_dgram_qlen = 10;
3625	if (unix_sysctl_register(net))
3626		goto out;
3627
3628#ifdef CONFIG_PROC_FS
3629	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3630			     sizeof(struct seq_net_private)))
3631		goto err_sysctl;
3632#endif
3633
3634	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3635					      sizeof(spinlock_t), GFP_KERNEL);
3636	if (!net->unx.table.locks)
3637		goto err_proc;
3638
3639	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3640						sizeof(struct hlist_head),
3641						GFP_KERNEL);
3642	if (!net->unx.table.buckets)
3643		goto free_locks;
3644
3645	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3646		spin_lock_init(&net->unx.table.locks[i]);
 
3647		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3648	}
3649
3650	return 0;
3651
3652free_locks:
3653	kvfree(net->unx.table.locks);
3654err_proc:
3655#ifdef CONFIG_PROC_FS
3656	remove_proc_entry("unix", net->proc_net);
3657err_sysctl:
3658#endif
3659	unix_sysctl_unregister(net);
3660out:
3661	return -ENOMEM;
3662}
3663
3664static void __net_exit unix_net_exit(struct net *net)
3665{
3666	kvfree(net->unx.table.buckets);
3667	kvfree(net->unx.table.locks);
3668	unix_sysctl_unregister(net);
3669	remove_proc_entry("unix", net->proc_net);
3670}
3671
3672static struct pernet_operations unix_net_ops = {
3673	.init = unix_net_init,
3674	.exit = unix_net_exit,
3675};
3676
3677#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3678DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3679		     struct unix_sock *unix_sk, uid_t uid)
3680
3681#define INIT_BATCH_SZ 16
3682
3683static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3684{
3685	struct bpf_unix_iter_state *iter = priv_data;
3686	int err;
3687
3688	err = bpf_iter_init_seq_net(priv_data, aux);
3689	if (err)
3690		return err;
3691
3692	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3693	if (err) {
3694		bpf_iter_fini_seq_net(priv_data);
3695		return err;
3696	}
3697
3698	return 0;
3699}
3700
3701static void bpf_iter_fini_unix(void *priv_data)
3702{
3703	struct bpf_unix_iter_state *iter = priv_data;
3704
3705	bpf_iter_fini_seq_net(priv_data);
3706	kvfree(iter->batch);
3707}
3708
3709static const struct bpf_iter_seq_info unix_seq_info = {
3710	.seq_ops		= &bpf_iter_unix_seq_ops,
3711	.init_seq_private	= bpf_iter_init_unix,
3712	.fini_seq_private	= bpf_iter_fini_unix,
3713	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3714};
3715
3716static const struct bpf_func_proto *
3717bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3718			     const struct bpf_prog *prog)
3719{
3720	switch (func_id) {
3721	case BPF_FUNC_setsockopt:
3722		return &bpf_sk_setsockopt_proto;
3723	case BPF_FUNC_getsockopt:
3724		return &bpf_sk_getsockopt_proto;
3725	default:
3726		return NULL;
3727	}
3728}
3729
3730static struct bpf_iter_reg unix_reg_info = {
3731	.target			= "unix",
3732	.ctx_arg_info_size	= 1,
3733	.ctx_arg_info		= {
3734		{ offsetof(struct bpf_iter__unix, unix_sk),
3735		  PTR_TO_BTF_ID_OR_NULL },
3736	},
3737	.get_func_proto         = bpf_iter_unix_get_func_proto,
3738	.seq_info		= &unix_seq_info,
3739};
3740
3741static void __init bpf_iter_register(void)
3742{
3743	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3744	if (bpf_iter_reg_target(&unix_reg_info))
3745		pr_warn("Warning: could not register bpf iterator unix\n");
3746}
3747#endif
3748
3749static int __init af_unix_init(void)
3750{
3751	int i, rc = -1;
3752
3753	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3754
3755	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3756		spin_lock_init(&bsd_socket_locks[i]);
3757		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3758	}
3759
3760	rc = proto_register(&unix_dgram_proto, 1);
3761	if (rc != 0) {
3762		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3763		goto out;
3764	}
3765
3766	rc = proto_register(&unix_stream_proto, 1);
3767	if (rc != 0) {
3768		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3769		proto_unregister(&unix_dgram_proto);
3770		goto out;
3771	}
3772
3773	sock_register(&unix_family_ops);
3774	register_pernet_subsys(&unix_net_ops);
3775	unix_bpf_build_proto();
3776
3777#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3778	bpf_iter_register();
3779#endif
3780
3781out:
3782	return rc;
3783}
3784
3785/* Later than subsys_initcall() because we depend on stuff initialised there */
3786fs_initcall(af_unix_init);