Loading...
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
85#include <linux/module.h>
86#include <linux/kernel.h>
87#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <linux/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
106#include <net/net_namespace.h>
107#include <net/sock.h>
108#include <net/tcp_states.h>
109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
119#include <linux/freezer.h>
120
121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
125static atomic_long_t unix_nr_socks;
126
127
128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
139
140#ifdef CONFIG_SECURITY_NETWORK
141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142{
143 UNIXCB(skb).secid = scm->secid;
144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
148 scm->secid = UNIXCB(skb).secid;
149}
150
151static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
152{
153 return (scm->secid == UNIXCB(skb).secid);
154}
155#else
156static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157{ }
158
159static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160{ }
161
162static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
163{
164 return true;
165}
166#endif /* CONFIG_SECURITY_NETWORK */
167
168/*
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
172 */
173
174static inline unsigned int unix_hash_fold(__wsum n)
175{
176 unsigned int hash = (__force unsigned int)csum_fold(n);
177
178 hash ^= hash>>8;
179 return hash&(UNIX_HASH_SIZE-1);
180}
181
182#define unix_peer(sk) (unix_sk(sk)->peer)
183
184static inline int unix_our_peer(struct sock *sk, struct sock *osk)
185{
186 return unix_peer(osk) == sk;
187}
188
189static inline int unix_may_send(struct sock *sk, struct sock *osk)
190{
191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
192}
193
194static inline int unix_recvq_full(struct sock const *sk)
195{
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197}
198
199struct sock *unix_peer_get(struct sock *s)
200{
201 struct sock *peer;
202
203 unix_state_lock(s);
204 peer = unix_peer(s);
205 if (peer)
206 sock_hold(peer);
207 unix_state_unlock(s);
208 return peer;
209}
210EXPORT_SYMBOL_GPL(unix_peer_get);
211
212static inline void unix_release_addr(struct unix_address *addr)
213{
214 if (atomic_dec_and_test(&addr->refcnt))
215 kfree(addr);
216}
217
218/*
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
223 */
224
225static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
226{
227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
228 return -EINVAL;
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230 return -EINVAL;
231 if (sunaddr->sun_path[0]) {
232 /*
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
238 */
239 ((char *)sunaddr)[len] = 0;
240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241 return len;
242 }
243
244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
245 return len;
246}
247
248static void __unix_remove_socket(struct sock *sk)
249{
250 sk_del_node_init(sk);
251}
252
253static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254{
255 WARN_ON(!sk_unhashed(sk));
256 sk_add_node(sk, list);
257}
258
259static inline void unix_remove_socket(struct sock *sk)
260{
261 spin_lock(&unix_table_lock);
262 __unix_remove_socket(sk);
263 spin_unlock(&unix_table_lock);
264}
265
266static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267{
268 spin_lock(&unix_table_lock);
269 __unix_insert_socket(list, sk);
270 spin_unlock(&unix_table_lock);
271}
272
273static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
275 int len, int type, unsigned int hash)
276{
277 struct sock *s;
278
279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
280 struct unix_sock *u = unix_sk(s);
281
282 if (!net_eq(sock_net(s), net))
283 continue;
284
285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
287 goto found;
288 }
289 s = NULL;
290found:
291 return s;
292}
293
294static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
296 int len, int type,
297 unsigned int hash)
298{
299 struct sock *s;
300
301 spin_lock(&unix_table_lock);
302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
303 if (s)
304 sock_hold(s);
305 spin_unlock(&unix_table_lock);
306 return s;
307}
308
309static struct sock *unix_find_socket_byinode(struct inode *i)
310{
311 struct sock *s;
312
313 spin_lock(&unix_table_lock);
314 sk_for_each(s,
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry;
317
318 if (dentry && d_backing_inode(dentry) == i) {
319 sock_hold(s);
320 goto found;
321 }
322 }
323 s = NULL;
324found:
325 spin_unlock(&unix_table_lock);
326 return s;
327}
328
329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439}
440
441static int unix_writable(const struct sock *sk)
442{
443 return sk->sk_state != TCP_LISTEN &&
444 (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
445}
446
447static void unix_write_space(struct sock *sk)
448{
449 struct socket_wq *wq;
450
451 rcu_read_lock();
452 if (unix_writable(sk)) {
453 wq = rcu_dereference(sk->sk_wq);
454 if (skwq_has_sleeper(wq))
455 wake_up_interruptible_sync_poll(&wq->wait,
456 POLLOUT | POLLWRNORM | POLLWRBAND);
457 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
458 }
459 rcu_read_unlock();
460}
461
462/* When dgram socket disconnects (or changes its peer), we clear its receive
463 * queue of packets arrived from previous peer. First, it allows to do
464 * flow control based only on wmem_alloc; second, sk connected to peer
465 * may receive messages only from that peer. */
466static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
467{
468 if (!skb_queue_empty(&sk->sk_receive_queue)) {
469 skb_queue_purge(&sk->sk_receive_queue);
470 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
471
472 /* If one link of bidirectional dgram pipe is disconnected,
473 * we signal error. Messages are lost. Do not make this,
474 * when peer was not connected to us.
475 */
476 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
477 other->sk_err = ECONNRESET;
478 other->sk_error_report(other);
479 }
480 }
481}
482
483static void unix_sock_destructor(struct sock *sk)
484{
485 struct unix_sock *u = unix_sk(sk);
486
487 skb_queue_purge(&sk->sk_receive_queue);
488
489 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
490 WARN_ON(!sk_unhashed(sk));
491 WARN_ON(sk->sk_socket);
492 if (!sock_flag(sk, SOCK_DEAD)) {
493 pr_info("Attempt to release alive unix socket: %p\n", sk);
494 return;
495 }
496
497 if (u->addr)
498 unix_release_addr(u->addr);
499
500 atomic_long_dec(&unix_nr_socks);
501 local_bh_disable();
502 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
503 local_bh_enable();
504#ifdef UNIX_REFCNT_DEBUG
505 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
506 atomic_long_read(&unix_nr_socks));
507#endif
508}
509
510static void unix_release_sock(struct sock *sk, int embrion)
511{
512 struct unix_sock *u = unix_sk(sk);
513 struct path path;
514 struct sock *skpair;
515 struct sk_buff *skb;
516 int state;
517
518 unix_remove_socket(sk);
519
520 /* Clear state */
521 unix_state_lock(sk);
522 sock_orphan(sk);
523 sk->sk_shutdown = SHUTDOWN_MASK;
524 path = u->path;
525 u->path.dentry = NULL;
526 u->path.mnt = NULL;
527 state = sk->sk_state;
528 sk->sk_state = TCP_CLOSE;
529 unix_state_unlock(sk);
530
531 wake_up_interruptible_all(&u->peer_wait);
532
533 skpair = unix_peer(sk);
534
535 if (skpair != NULL) {
536 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
537 unix_state_lock(skpair);
538 /* No more writes */
539 skpair->sk_shutdown = SHUTDOWN_MASK;
540 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
541 skpair->sk_err = ECONNRESET;
542 unix_state_unlock(skpair);
543 skpair->sk_state_change(skpair);
544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
548 sock_put(skpair); /* It may now die */
549 unix_peer(sk) = NULL;
550 }
551
552 /* Try to flush out this socket. Throw out buffers at least */
553
554 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
555 if (state == TCP_LISTEN)
556 unix_release_sock(skb->sk, 1);
557 /* passed fds are erased in the kfree_skb hook */
558 UNIXCB(skb).consumed = skb->len;
559 kfree_skb(skb);
560 }
561
562 if (path.dentry)
563 path_put(&path);
564
565 sock_put(sk);
566
567 /* ---- Socket is dead now and most probably destroyed ---- */
568
569 /*
570 * Fixme: BSD difference: In BSD all sockets connected to us get
571 * ECONNRESET and we die on the spot. In Linux we behave
572 * like files and pipes do and wait for the last
573 * dereference.
574 *
575 * Can't we simply set sock->err?
576 *
577 * What the above comment does talk about? --ANK(980817)
578 */
579
580 if (unix_tot_inflight)
581 unix_gc(); /* Garbage collect fds */
582}
583
584static void init_peercred(struct sock *sk)
585{
586 put_pid(sk->sk_peer_pid);
587 if (sk->sk_peer_cred)
588 put_cred(sk->sk_peer_cred);
589 sk->sk_peer_pid = get_pid(task_tgid(current));
590 sk->sk_peer_cred = get_current_cred();
591}
592
593static void copy_peercred(struct sock *sk, struct sock *peersk)
594{
595 put_pid(sk->sk_peer_pid);
596 if (sk->sk_peer_cred)
597 put_cred(sk->sk_peer_cred);
598 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
599 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
600}
601
602static int unix_listen(struct socket *sock, int backlog)
603{
604 int err;
605 struct sock *sk = sock->sk;
606 struct unix_sock *u = unix_sk(sk);
607 struct pid *old_pid = NULL;
608
609 err = -EOPNOTSUPP;
610 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
611 goto out; /* Only stream/seqpacket sockets accept */
612 err = -EINVAL;
613 if (!u->addr)
614 goto out; /* No listens on an unbound socket */
615 unix_state_lock(sk);
616 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
617 goto out_unlock;
618 if (backlog > sk->sk_max_ack_backlog)
619 wake_up_interruptible_all(&u->peer_wait);
620 sk->sk_max_ack_backlog = backlog;
621 sk->sk_state = TCP_LISTEN;
622 /* set credentials so connect can copy them */
623 init_peercred(sk);
624 err = 0;
625
626out_unlock:
627 unix_state_unlock(sk);
628 put_pid(old_pid);
629out:
630 return err;
631}
632
633static int unix_release(struct socket *);
634static int unix_bind(struct socket *, struct sockaddr *, int);
635static int unix_stream_connect(struct socket *, struct sockaddr *,
636 int addr_len, int flags);
637static int unix_socketpair(struct socket *, struct socket *);
638static int unix_accept(struct socket *, struct socket *, int);
639static int unix_getname(struct socket *, struct sockaddr *, int *, int);
640static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
641static unsigned int unix_dgram_poll(struct file *, struct socket *,
642 poll_table *);
643static int unix_ioctl(struct socket *, unsigned int, unsigned long);
644static int unix_shutdown(struct socket *, int);
645static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
646static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
647static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
648 size_t size, int flags);
649static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
650 struct pipe_inode_info *, size_t size,
651 unsigned int flags);
652static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
653static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
654static int unix_dgram_connect(struct socket *, struct sockaddr *,
655 int, int);
656static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
657static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
658 int);
659
660static int unix_set_peek_off(struct sock *sk, int val)
661{
662 struct unix_sock *u = unix_sk(sk);
663
664 if (mutex_lock_interruptible(&u->iolock))
665 return -EINTR;
666
667 sk->sk_peek_off = val;
668 mutex_unlock(&u->iolock);
669
670 return 0;
671}
672
673
674static const struct proto_ops unix_stream_ops = {
675 .family = PF_UNIX,
676 .owner = THIS_MODULE,
677 .release = unix_release,
678 .bind = unix_bind,
679 .connect = unix_stream_connect,
680 .socketpair = unix_socketpair,
681 .accept = unix_accept,
682 .getname = unix_getname,
683 .poll = unix_poll,
684 .ioctl = unix_ioctl,
685 .listen = unix_listen,
686 .shutdown = unix_shutdown,
687 .setsockopt = sock_no_setsockopt,
688 .getsockopt = sock_no_getsockopt,
689 .sendmsg = unix_stream_sendmsg,
690 .recvmsg = unix_stream_recvmsg,
691 .mmap = sock_no_mmap,
692 .sendpage = unix_stream_sendpage,
693 .splice_read = unix_stream_splice_read,
694 .set_peek_off = unix_set_peek_off,
695};
696
697static const struct proto_ops unix_dgram_ops = {
698 .family = PF_UNIX,
699 .owner = THIS_MODULE,
700 .release = unix_release,
701 .bind = unix_bind,
702 .connect = unix_dgram_connect,
703 .socketpair = unix_socketpair,
704 .accept = sock_no_accept,
705 .getname = unix_getname,
706 .poll = unix_dgram_poll,
707 .ioctl = unix_ioctl,
708 .listen = sock_no_listen,
709 .shutdown = unix_shutdown,
710 .setsockopt = sock_no_setsockopt,
711 .getsockopt = sock_no_getsockopt,
712 .sendmsg = unix_dgram_sendmsg,
713 .recvmsg = unix_dgram_recvmsg,
714 .mmap = sock_no_mmap,
715 .sendpage = sock_no_sendpage,
716 .set_peek_off = unix_set_peek_off,
717};
718
719static const struct proto_ops unix_seqpacket_ops = {
720 .family = PF_UNIX,
721 .owner = THIS_MODULE,
722 .release = unix_release,
723 .bind = unix_bind,
724 .connect = unix_stream_connect,
725 .socketpair = unix_socketpair,
726 .accept = unix_accept,
727 .getname = unix_getname,
728 .poll = unix_dgram_poll,
729 .ioctl = unix_ioctl,
730 .listen = unix_listen,
731 .shutdown = unix_shutdown,
732 .setsockopt = sock_no_setsockopt,
733 .getsockopt = sock_no_getsockopt,
734 .sendmsg = unix_seqpacket_sendmsg,
735 .recvmsg = unix_seqpacket_recvmsg,
736 .mmap = sock_no_mmap,
737 .sendpage = sock_no_sendpage,
738 .set_peek_off = unix_set_peek_off,
739};
740
741static struct proto unix_proto = {
742 .name = "UNIX",
743 .owner = THIS_MODULE,
744 .obj_size = sizeof(struct unix_sock),
745};
746
747/*
748 * AF_UNIX sockets do not interact with hardware, hence they
749 * dont trigger interrupts - so it's safe for them to have
750 * bh-unsafe locking for their sk_receive_queue.lock. Split off
751 * this special lock-class by reinitializing the spinlock key:
752 */
753static struct lock_class_key af_unix_sk_receive_queue_lock_key;
754
755static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
756{
757 struct sock *sk = NULL;
758 struct unix_sock *u;
759
760 atomic_long_inc(&unix_nr_socks);
761 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
762 goto out;
763
764 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
765 if (!sk)
766 goto out;
767
768 sock_init_data(sock, sk);
769 lockdep_set_class(&sk->sk_receive_queue.lock,
770 &af_unix_sk_receive_queue_lock_key);
771
772 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
773 sk->sk_write_space = unix_write_space;
774 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
775 sk->sk_destruct = unix_sock_destructor;
776 u = unix_sk(sk);
777 u->path.dentry = NULL;
778 u->path.mnt = NULL;
779 spin_lock_init(&u->lock);
780 atomic_long_set(&u->inflight, 0);
781 INIT_LIST_HEAD(&u->link);
782 mutex_init(&u->iolock); /* single task reading lock */
783 mutex_init(&u->bindlock); /* single task binding lock */
784 init_waitqueue_head(&u->peer_wait);
785 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
786 unix_insert_socket(unix_sockets_unbound(sk), sk);
787out:
788 if (sk == NULL)
789 atomic_long_dec(&unix_nr_socks);
790 else {
791 local_bh_disable();
792 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
793 local_bh_enable();
794 }
795 return sk;
796}
797
798static int unix_create(struct net *net, struct socket *sock, int protocol,
799 int kern)
800{
801 if (protocol && protocol != PF_UNIX)
802 return -EPROTONOSUPPORT;
803
804 sock->state = SS_UNCONNECTED;
805
806 switch (sock->type) {
807 case SOCK_STREAM:
808 sock->ops = &unix_stream_ops;
809 break;
810 /*
811 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
812 * nothing uses it.
813 */
814 case SOCK_RAW:
815 sock->type = SOCK_DGRAM;
816 case SOCK_DGRAM:
817 sock->ops = &unix_dgram_ops;
818 break;
819 case SOCK_SEQPACKET:
820 sock->ops = &unix_seqpacket_ops;
821 break;
822 default:
823 return -ESOCKTNOSUPPORT;
824 }
825
826 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
827}
828
829static int unix_release(struct socket *sock)
830{
831 struct sock *sk = sock->sk;
832
833 if (!sk)
834 return 0;
835
836 unix_release_sock(sk, 0);
837 sock->sk = NULL;
838
839 return 0;
840}
841
842static int unix_autobind(struct socket *sock)
843{
844 struct sock *sk = sock->sk;
845 struct net *net = sock_net(sk);
846 struct unix_sock *u = unix_sk(sk);
847 static u32 ordernum = 1;
848 struct unix_address *addr;
849 int err;
850 unsigned int retries = 0;
851
852 err = mutex_lock_interruptible(&u->bindlock);
853 if (err)
854 return err;
855
856 err = 0;
857 if (u->addr)
858 goto out;
859
860 err = -ENOMEM;
861 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
862 if (!addr)
863 goto out;
864
865 addr->name->sun_family = AF_UNIX;
866 atomic_set(&addr->refcnt, 1);
867
868retry:
869 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
870 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
871
872 spin_lock(&unix_table_lock);
873 ordernum = (ordernum+1)&0xFFFFF;
874
875 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
876 addr->hash)) {
877 spin_unlock(&unix_table_lock);
878 /*
879 * __unix_find_socket_byname() may take long time if many names
880 * are already in use.
881 */
882 cond_resched();
883 /* Give up if all names seems to be in use. */
884 if (retries++ == 0xFFFFF) {
885 err = -ENOSPC;
886 kfree(addr);
887 goto out;
888 }
889 goto retry;
890 }
891 addr->hash ^= sk->sk_type;
892
893 __unix_remove_socket(sk);
894 u->addr = addr;
895 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
896 spin_unlock(&unix_table_lock);
897 err = 0;
898
899out: mutex_unlock(&u->bindlock);
900 return err;
901}
902
903static struct sock *unix_find_other(struct net *net,
904 struct sockaddr_un *sunname, int len,
905 int type, unsigned int hash, int *error)
906{
907 struct sock *u;
908 struct path path;
909 int err = 0;
910
911 if (sunname->sun_path[0]) {
912 struct inode *inode;
913 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
914 if (err)
915 goto fail;
916 inode = d_backing_inode(path.dentry);
917 err = inode_permission(inode, MAY_WRITE);
918 if (err)
919 goto put_fail;
920
921 err = -ECONNREFUSED;
922 if (!S_ISSOCK(inode->i_mode))
923 goto put_fail;
924 u = unix_find_socket_byinode(inode);
925 if (!u)
926 goto put_fail;
927
928 if (u->sk_type == type)
929 touch_atime(&path);
930
931 path_put(&path);
932
933 err = -EPROTOTYPE;
934 if (u->sk_type != type) {
935 sock_put(u);
936 goto fail;
937 }
938 } else {
939 err = -ECONNREFUSED;
940 u = unix_find_socket_byname(net, sunname, len, type, hash);
941 if (u) {
942 struct dentry *dentry;
943 dentry = unix_sk(u)->path.dentry;
944 if (dentry)
945 touch_atime(&unix_sk(u)->path);
946 } else
947 goto fail;
948 }
949 return u;
950
951put_fail:
952 path_put(&path);
953fail:
954 *error = err;
955 return NULL;
956}
957
958static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
959{
960 struct dentry *dentry;
961 struct path path;
962 int err = 0;
963 /*
964 * Get the parent directory, calculate the hash for last
965 * component.
966 */
967 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
968 err = PTR_ERR(dentry);
969 if (IS_ERR(dentry))
970 return err;
971
972 /*
973 * All right, let's create it.
974 */
975 err = security_path_mknod(&path, dentry, mode, 0);
976 if (!err) {
977 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
978 if (!err) {
979 res->mnt = mntget(path.mnt);
980 res->dentry = dget(dentry);
981 }
982 }
983 done_path_create(&path, dentry);
984 return err;
985}
986
987static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
988{
989 struct sock *sk = sock->sk;
990 struct net *net = sock_net(sk);
991 struct unix_sock *u = unix_sk(sk);
992 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
993 char *sun_path = sunaddr->sun_path;
994 int err;
995 unsigned int hash;
996 struct unix_address *addr;
997 struct hlist_head *list;
998 struct path path = { NULL, NULL };
999
1000 err = -EINVAL;
1001 if (sunaddr->sun_family != AF_UNIX)
1002 goto out;
1003
1004 if (addr_len == sizeof(short)) {
1005 err = unix_autobind(sock);
1006 goto out;
1007 }
1008
1009 err = unix_mkname(sunaddr, addr_len, &hash);
1010 if (err < 0)
1011 goto out;
1012 addr_len = err;
1013
1014 if (sun_path[0]) {
1015 umode_t mode = S_IFSOCK |
1016 (SOCK_INODE(sock)->i_mode & ~current_umask());
1017 err = unix_mknod(sun_path, mode, &path);
1018 if (err) {
1019 if (err == -EEXIST)
1020 err = -EADDRINUSE;
1021 goto out;
1022 }
1023 }
1024
1025 err = mutex_lock_interruptible(&u->bindlock);
1026 if (err)
1027 goto out_put;
1028
1029 err = -EINVAL;
1030 if (u->addr)
1031 goto out_up;
1032
1033 err = -ENOMEM;
1034 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1035 if (!addr)
1036 goto out_up;
1037
1038 memcpy(addr->name, sunaddr, addr_len);
1039 addr->len = addr_len;
1040 addr->hash = hash ^ sk->sk_type;
1041 atomic_set(&addr->refcnt, 1);
1042
1043 if (sun_path[0]) {
1044 addr->hash = UNIX_HASH_SIZE;
1045 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1046 spin_lock(&unix_table_lock);
1047 u->path = path;
1048 list = &unix_socket_table[hash];
1049 } else {
1050 spin_lock(&unix_table_lock);
1051 err = -EADDRINUSE;
1052 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1053 sk->sk_type, hash)) {
1054 unix_release_addr(addr);
1055 goto out_unlock;
1056 }
1057
1058 list = &unix_socket_table[addr->hash];
1059 }
1060
1061 err = 0;
1062 __unix_remove_socket(sk);
1063 u->addr = addr;
1064 __unix_insert_socket(list, sk);
1065
1066out_unlock:
1067 spin_unlock(&unix_table_lock);
1068out_up:
1069 mutex_unlock(&u->bindlock);
1070out_put:
1071 if (err)
1072 path_put(&path);
1073out:
1074 return err;
1075}
1076
1077static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1078{
1079 if (unlikely(sk1 == sk2) || !sk2) {
1080 unix_state_lock(sk1);
1081 return;
1082 }
1083 if (sk1 < sk2) {
1084 unix_state_lock(sk1);
1085 unix_state_lock_nested(sk2);
1086 } else {
1087 unix_state_lock(sk2);
1088 unix_state_lock_nested(sk1);
1089 }
1090}
1091
1092static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1093{
1094 if (unlikely(sk1 == sk2) || !sk2) {
1095 unix_state_unlock(sk1);
1096 return;
1097 }
1098 unix_state_unlock(sk1);
1099 unix_state_unlock(sk2);
1100}
1101
1102static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1103 int alen, int flags)
1104{
1105 struct sock *sk = sock->sk;
1106 struct net *net = sock_net(sk);
1107 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1108 struct sock *other;
1109 unsigned int hash;
1110 int err;
1111
1112 if (addr->sa_family != AF_UNSPEC) {
1113 err = unix_mkname(sunaddr, alen, &hash);
1114 if (err < 0)
1115 goto out;
1116 alen = err;
1117
1118 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1119 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1120 goto out;
1121
1122restart:
1123 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1124 if (!other)
1125 goto out;
1126
1127 unix_state_double_lock(sk, other);
1128
1129 /* Apparently VFS overslept socket death. Retry. */
1130 if (sock_flag(other, SOCK_DEAD)) {
1131 unix_state_double_unlock(sk, other);
1132 sock_put(other);
1133 goto restart;
1134 }
1135
1136 err = -EPERM;
1137 if (!unix_may_send(sk, other))
1138 goto out_unlock;
1139
1140 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1141 if (err)
1142 goto out_unlock;
1143
1144 } else {
1145 /*
1146 * 1003.1g breaking connected state with AF_UNSPEC
1147 */
1148 other = NULL;
1149 unix_state_double_lock(sk, other);
1150 }
1151
1152 /*
1153 * If it was connected, reconnect.
1154 */
1155 if (unix_peer(sk)) {
1156 struct sock *old_peer = unix_peer(sk);
1157 unix_peer(sk) = other;
1158 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1159
1160 unix_state_double_unlock(sk, other);
1161
1162 if (other != old_peer)
1163 unix_dgram_disconnected(sk, old_peer);
1164 sock_put(old_peer);
1165 } else {
1166 unix_peer(sk) = other;
1167 unix_state_double_unlock(sk, other);
1168 }
1169 return 0;
1170
1171out_unlock:
1172 unix_state_double_unlock(sk, other);
1173 sock_put(other);
1174out:
1175 return err;
1176}
1177
1178static long unix_wait_for_peer(struct sock *other, long timeo)
1179{
1180 struct unix_sock *u = unix_sk(other);
1181 int sched;
1182 DEFINE_WAIT(wait);
1183
1184 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1185
1186 sched = !sock_flag(other, SOCK_DEAD) &&
1187 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1188 unix_recvq_full(other);
1189
1190 unix_state_unlock(other);
1191
1192 if (sched)
1193 timeo = schedule_timeout(timeo);
1194
1195 finish_wait(&u->peer_wait, &wait);
1196 return timeo;
1197}
1198
1199static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1200 int addr_len, int flags)
1201{
1202 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1203 struct sock *sk = sock->sk;
1204 struct net *net = sock_net(sk);
1205 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1206 struct sock *newsk = NULL;
1207 struct sock *other = NULL;
1208 struct sk_buff *skb = NULL;
1209 unsigned int hash;
1210 int st;
1211 int err;
1212 long timeo;
1213
1214 err = unix_mkname(sunaddr, addr_len, &hash);
1215 if (err < 0)
1216 goto out;
1217 addr_len = err;
1218
1219 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1220 (err = unix_autobind(sock)) != 0)
1221 goto out;
1222
1223 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1224
1225 /* First of all allocate resources.
1226 If we will make it after state is locked,
1227 we will have to recheck all again in any case.
1228 */
1229
1230 err = -ENOMEM;
1231
1232 /* create new sock for complete connection */
1233 newsk = unix_create1(sock_net(sk), NULL, 0);
1234 if (newsk == NULL)
1235 goto out;
1236
1237 /* Allocate skb for sending to listening sock */
1238 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1239 if (skb == NULL)
1240 goto out;
1241
1242restart:
1243 /* Find listening sock. */
1244 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1245 if (!other)
1246 goto out;
1247
1248 /* Latch state of peer */
1249 unix_state_lock(other);
1250
1251 /* Apparently VFS overslept socket death. Retry. */
1252 if (sock_flag(other, SOCK_DEAD)) {
1253 unix_state_unlock(other);
1254 sock_put(other);
1255 goto restart;
1256 }
1257
1258 err = -ECONNREFUSED;
1259 if (other->sk_state != TCP_LISTEN)
1260 goto out_unlock;
1261 if (other->sk_shutdown & RCV_SHUTDOWN)
1262 goto out_unlock;
1263
1264 if (unix_recvq_full(other)) {
1265 err = -EAGAIN;
1266 if (!timeo)
1267 goto out_unlock;
1268
1269 timeo = unix_wait_for_peer(other, timeo);
1270
1271 err = sock_intr_errno(timeo);
1272 if (signal_pending(current))
1273 goto out;
1274 sock_put(other);
1275 goto restart;
1276 }
1277
1278 /* Latch our state.
1279
1280 It is tricky place. We need to grab our state lock and cannot
1281 drop lock on peer. It is dangerous because deadlock is
1282 possible. Connect to self case and simultaneous
1283 attempt to connect are eliminated by checking socket
1284 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1285 check this before attempt to grab lock.
1286
1287 Well, and we have to recheck the state after socket locked.
1288 */
1289 st = sk->sk_state;
1290
1291 switch (st) {
1292 case TCP_CLOSE:
1293 /* This is ok... continue with connect */
1294 break;
1295 case TCP_ESTABLISHED:
1296 /* Socket is already connected */
1297 err = -EISCONN;
1298 goto out_unlock;
1299 default:
1300 err = -EINVAL;
1301 goto out_unlock;
1302 }
1303
1304 unix_state_lock_nested(sk);
1305
1306 if (sk->sk_state != st) {
1307 unix_state_unlock(sk);
1308 unix_state_unlock(other);
1309 sock_put(other);
1310 goto restart;
1311 }
1312
1313 err = security_unix_stream_connect(sk, other, newsk);
1314 if (err) {
1315 unix_state_unlock(sk);
1316 goto out_unlock;
1317 }
1318
1319 /* The way is open! Fastly set all the necessary fields... */
1320
1321 sock_hold(sk);
1322 unix_peer(newsk) = sk;
1323 newsk->sk_state = TCP_ESTABLISHED;
1324 newsk->sk_type = sk->sk_type;
1325 init_peercred(newsk);
1326 newu = unix_sk(newsk);
1327 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1328 otheru = unix_sk(other);
1329
1330 /* copy address information from listening to new sock*/
1331 if (otheru->addr) {
1332 atomic_inc(&otheru->addr->refcnt);
1333 newu->addr = otheru->addr;
1334 }
1335 if (otheru->path.dentry) {
1336 path_get(&otheru->path);
1337 newu->path = otheru->path;
1338 }
1339
1340 /* Set credentials */
1341 copy_peercred(sk, other);
1342
1343 sock->state = SS_CONNECTED;
1344 sk->sk_state = TCP_ESTABLISHED;
1345 sock_hold(newsk);
1346
1347 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1348 unix_peer(sk) = newsk;
1349
1350 unix_state_unlock(sk);
1351
1352 /* take ten and and send info to listening sock */
1353 spin_lock(&other->sk_receive_queue.lock);
1354 __skb_queue_tail(&other->sk_receive_queue, skb);
1355 spin_unlock(&other->sk_receive_queue.lock);
1356 unix_state_unlock(other);
1357 other->sk_data_ready(other);
1358 sock_put(other);
1359 return 0;
1360
1361out_unlock:
1362 if (other)
1363 unix_state_unlock(other);
1364
1365out:
1366 kfree_skb(skb);
1367 if (newsk)
1368 unix_release_sock(newsk, 0);
1369 if (other)
1370 sock_put(other);
1371 return err;
1372}
1373
1374static int unix_socketpair(struct socket *socka, struct socket *sockb)
1375{
1376 struct sock *ska = socka->sk, *skb = sockb->sk;
1377
1378 /* Join our sockets back to back */
1379 sock_hold(ska);
1380 sock_hold(skb);
1381 unix_peer(ska) = skb;
1382 unix_peer(skb) = ska;
1383 init_peercred(ska);
1384 init_peercred(skb);
1385
1386 if (ska->sk_type != SOCK_DGRAM) {
1387 ska->sk_state = TCP_ESTABLISHED;
1388 skb->sk_state = TCP_ESTABLISHED;
1389 socka->state = SS_CONNECTED;
1390 sockb->state = SS_CONNECTED;
1391 }
1392 return 0;
1393}
1394
1395static void unix_sock_inherit_flags(const struct socket *old,
1396 struct socket *new)
1397{
1398 if (test_bit(SOCK_PASSCRED, &old->flags))
1399 set_bit(SOCK_PASSCRED, &new->flags);
1400 if (test_bit(SOCK_PASSSEC, &old->flags))
1401 set_bit(SOCK_PASSSEC, &new->flags);
1402}
1403
1404static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1405{
1406 struct sock *sk = sock->sk;
1407 struct sock *tsk;
1408 struct sk_buff *skb;
1409 int err;
1410
1411 err = -EOPNOTSUPP;
1412 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1413 goto out;
1414
1415 err = -EINVAL;
1416 if (sk->sk_state != TCP_LISTEN)
1417 goto out;
1418
1419 /* If socket state is TCP_LISTEN it cannot change (for now...),
1420 * so that no locks are necessary.
1421 */
1422
1423 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1424 if (!skb) {
1425 /* This means receive shutdown. */
1426 if (err == 0)
1427 err = -EINVAL;
1428 goto out;
1429 }
1430
1431 tsk = skb->sk;
1432 skb_free_datagram(sk, skb);
1433 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1434
1435 /* attach accepted sock to socket */
1436 unix_state_lock(tsk);
1437 newsock->state = SS_CONNECTED;
1438 unix_sock_inherit_flags(sock, newsock);
1439 sock_graft(tsk, newsock);
1440 unix_state_unlock(tsk);
1441 return 0;
1442
1443out:
1444 return err;
1445}
1446
1447
1448static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1449{
1450 struct sock *sk = sock->sk;
1451 struct unix_sock *u;
1452 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1453 int err = 0;
1454
1455 if (peer) {
1456 sk = unix_peer_get(sk);
1457
1458 err = -ENOTCONN;
1459 if (!sk)
1460 goto out;
1461 err = 0;
1462 } else {
1463 sock_hold(sk);
1464 }
1465
1466 u = unix_sk(sk);
1467 unix_state_lock(sk);
1468 if (!u->addr) {
1469 sunaddr->sun_family = AF_UNIX;
1470 sunaddr->sun_path[0] = 0;
1471 *uaddr_len = sizeof(short);
1472 } else {
1473 struct unix_address *addr = u->addr;
1474
1475 *uaddr_len = addr->len;
1476 memcpy(sunaddr, addr->name, *uaddr_len);
1477 }
1478 unix_state_unlock(sk);
1479 sock_put(sk);
1480out:
1481 return err;
1482}
1483
1484static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1485{
1486 int i;
1487
1488 scm->fp = UNIXCB(skb).fp;
1489 UNIXCB(skb).fp = NULL;
1490
1491 for (i = scm->fp->count-1; i >= 0; i--)
1492 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1493}
1494
1495static void unix_destruct_scm(struct sk_buff *skb)
1496{
1497 struct scm_cookie scm;
1498 memset(&scm, 0, sizeof(scm));
1499 scm.pid = UNIXCB(skb).pid;
1500 if (UNIXCB(skb).fp)
1501 unix_detach_fds(&scm, skb);
1502
1503 /* Alas, it calls VFS */
1504 /* So fscking what? fput() had been SMP-safe since the last Summer */
1505 scm_destroy(&scm);
1506 sock_wfree(skb);
1507}
1508
1509/*
1510 * The "user->unix_inflight" variable is protected by the garbage
1511 * collection lock, and we just read it locklessly here. If you go
1512 * over the limit, there might be a tiny race in actually noticing
1513 * it across threads. Tough.
1514 */
1515static inline bool too_many_unix_fds(struct task_struct *p)
1516{
1517 struct user_struct *user = current_user();
1518
1519 if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1520 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1521 return false;
1522}
1523
1524#define MAX_RECURSION_LEVEL 4
1525
1526static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1527{
1528 int i;
1529 unsigned char max_level = 0;
1530
1531 if (too_many_unix_fds(current))
1532 return -ETOOMANYREFS;
1533
1534 for (i = scm->fp->count - 1; i >= 0; i--) {
1535 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1536
1537 if (sk)
1538 max_level = max(max_level,
1539 unix_sk(sk)->recursion_level);
1540 }
1541 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1542 return -ETOOMANYREFS;
1543
1544 /*
1545 * Need to duplicate file references for the sake of garbage
1546 * collection. Otherwise a socket in the fps might become a
1547 * candidate for GC while the skb is not yet queued.
1548 */
1549 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1550 if (!UNIXCB(skb).fp)
1551 return -ENOMEM;
1552
1553 for (i = scm->fp->count - 1; i >= 0; i--)
1554 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1555 return max_level;
1556}
1557
1558static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1559{
1560 int err = 0;
1561
1562 UNIXCB(skb).pid = get_pid(scm->pid);
1563 UNIXCB(skb).uid = scm->creds.uid;
1564 UNIXCB(skb).gid = scm->creds.gid;
1565 UNIXCB(skb).fp = NULL;
1566 unix_get_secdata(scm, skb);
1567 if (scm->fp && send_fds)
1568 err = unix_attach_fds(scm, skb);
1569
1570 skb->destructor = unix_destruct_scm;
1571 return err;
1572}
1573
1574static bool unix_passcred_enabled(const struct socket *sock,
1575 const struct sock *other)
1576{
1577 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1578 !other->sk_socket ||
1579 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1580}
1581
1582/*
1583 * Some apps rely on write() giving SCM_CREDENTIALS
1584 * We include credentials if source or destination socket
1585 * asserted SOCK_PASSCRED.
1586 */
1587static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1588 const struct sock *other)
1589{
1590 if (UNIXCB(skb).pid)
1591 return;
1592 if (unix_passcred_enabled(sock, other)) {
1593 UNIXCB(skb).pid = get_pid(task_tgid(current));
1594 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1595 }
1596}
1597
1598static int maybe_init_creds(struct scm_cookie *scm,
1599 struct socket *socket,
1600 const struct sock *other)
1601{
1602 int err;
1603 struct msghdr msg = { .msg_controllen = 0 };
1604
1605 err = scm_send(socket, &msg, scm, false);
1606 if (err)
1607 return err;
1608
1609 if (unix_passcred_enabled(socket, other)) {
1610 scm->pid = get_pid(task_tgid(current));
1611 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1612 }
1613 return err;
1614}
1615
1616static bool unix_skb_scm_eq(struct sk_buff *skb,
1617 struct scm_cookie *scm)
1618{
1619 const struct unix_skb_parms *u = &UNIXCB(skb);
1620
1621 return u->pid == scm->pid &&
1622 uid_eq(u->uid, scm->creds.uid) &&
1623 gid_eq(u->gid, scm->creds.gid) &&
1624 unix_secdata_eq(scm, skb);
1625}
1626
1627/*
1628 * Send AF_UNIX data.
1629 */
1630
1631static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1632 size_t len)
1633{
1634 struct sock *sk = sock->sk;
1635 struct net *net = sock_net(sk);
1636 struct unix_sock *u = unix_sk(sk);
1637 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1638 struct sock *other = NULL;
1639 int namelen = 0; /* fake GCC */
1640 int err;
1641 unsigned int hash;
1642 struct sk_buff *skb;
1643 long timeo;
1644 struct scm_cookie scm;
1645 int max_level;
1646 int data_len = 0;
1647 int sk_locked;
1648
1649 wait_for_unix_gc();
1650 err = scm_send(sock, msg, &scm, false);
1651 if (err < 0)
1652 return err;
1653
1654 err = -EOPNOTSUPP;
1655 if (msg->msg_flags&MSG_OOB)
1656 goto out;
1657
1658 if (msg->msg_namelen) {
1659 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1660 if (err < 0)
1661 goto out;
1662 namelen = err;
1663 } else {
1664 sunaddr = NULL;
1665 err = -ENOTCONN;
1666 other = unix_peer_get(sk);
1667 if (!other)
1668 goto out;
1669 }
1670
1671 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1672 && (err = unix_autobind(sock)) != 0)
1673 goto out;
1674
1675 err = -EMSGSIZE;
1676 if (len > sk->sk_sndbuf - 32)
1677 goto out;
1678
1679 if (len > SKB_MAX_ALLOC) {
1680 data_len = min_t(size_t,
1681 len - SKB_MAX_ALLOC,
1682 MAX_SKB_FRAGS * PAGE_SIZE);
1683 data_len = PAGE_ALIGN(data_len);
1684
1685 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1686 }
1687
1688 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1689 msg->msg_flags & MSG_DONTWAIT, &err,
1690 PAGE_ALLOC_COSTLY_ORDER);
1691 if (skb == NULL)
1692 goto out;
1693
1694 err = unix_scm_to_skb(&scm, skb, true);
1695 if (err < 0)
1696 goto out_free;
1697 max_level = err + 1;
1698
1699 skb_put(skb, len - data_len);
1700 skb->data_len = data_len;
1701 skb->len = len;
1702 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1703 if (err)
1704 goto out_free;
1705
1706 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1707
1708restart:
1709 if (!other) {
1710 err = -ECONNRESET;
1711 if (sunaddr == NULL)
1712 goto out_free;
1713
1714 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1715 hash, &err);
1716 if (other == NULL)
1717 goto out_free;
1718 }
1719
1720 if (sk_filter(other, skb) < 0) {
1721 /* Toss the packet but do not return any error to the sender */
1722 err = len;
1723 goto out_free;
1724 }
1725
1726 sk_locked = 0;
1727 unix_state_lock(other);
1728restart_locked:
1729 err = -EPERM;
1730 if (!unix_may_send(sk, other))
1731 goto out_unlock;
1732
1733 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1734 /*
1735 * Check with 1003.1g - what should
1736 * datagram error
1737 */
1738 unix_state_unlock(other);
1739 sock_put(other);
1740
1741 if (!sk_locked)
1742 unix_state_lock(sk);
1743
1744 err = 0;
1745 if (unix_peer(sk) == other) {
1746 unix_peer(sk) = NULL;
1747 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1748
1749 unix_state_unlock(sk);
1750
1751 unix_dgram_disconnected(sk, other);
1752 sock_put(other);
1753 err = -ECONNREFUSED;
1754 } else {
1755 unix_state_unlock(sk);
1756 }
1757
1758 other = NULL;
1759 if (err)
1760 goto out_free;
1761 goto restart;
1762 }
1763
1764 err = -EPIPE;
1765 if (other->sk_shutdown & RCV_SHUTDOWN)
1766 goto out_unlock;
1767
1768 if (sk->sk_type != SOCK_SEQPACKET) {
1769 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1770 if (err)
1771 goto out_unlock;
1772 }
1773
1774 /* other == sk && unix_peer(other) != sk if
1775 * - unix_peer(sk) == NULL, destination address bound to sk
1776 * - unix_peer(sk) == sk by time of get but disconnected before lock
1777 */
1778 if (other != sk &&
1779 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1780 if (timeo) {
1781 timeo = unix_wait_for_peer(other, timeo);
1782
1783 err = sock_intr_errno(timeo);
1784 if (signal_pending(current))
1785 goto out_free;
1786
1787 goto restart;
1788 }
1789
1790 if (!sk_locked) {
1791 unix_state_unlock(other);
1792 unix_state_double_lock(sk, other);
1793 }
1794
1795 if (unix_peer(sk) != other ||
1796 unix_dgram_peer_wake_me(sk, other)) {
1797 err = -EAGAIN;
1798 sk_locked = 1;
1799 goto out_unlock;
1800 }
1801
1802 if (!sk_locked) {
1803 sk_locked = 1;
1804 goto restart_locked;
1805 }
1806 }
1807
1808 if (unlikely(sk_locked))
1809 unix_state_unlock(sk);
1810
1811 if (sock_flag(other, SOCK_RCVTSTAMP))
1812 __net_timestamp(skb);
1813 maybe_add_creds(skb, sock, other);
1814 skb_queue_tail(&other->sk_receive_queue, skb);
1815 if (max_level > unix_sk(other)->recursion_level)
1816 unix_sk(other)->recursion_level = max_level;
1817 unix_state_unlock(other);
1818 other->sk_data_ready(other);
1819 sock_put(other);
1820 scm_destroy(&scm);
1821 return len;
1822
1823out_unlock:
1824 if (sk_locked)
1825 unix_state_unlock(sk);
1826 unix_state_unlock(other);
1827out_free:
1828 kfree_skb(skb);
1829out:
1830 if (other)
1831 sock_put(other);
1832 scm_destroy(&scm);
1833 return err;
1834}
1835
1836/* We use paged skbs for stream sockets, and limit occupancy to 32768
1837 * bytes, and a minimun of a full page.
1838 */
1839#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1840
1841static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1842 size_t len)
1843{
1844 struct sock *sk = sock->sk;
1845 struct sock *other = NULL;
1846 int err, size;
1847 struct sk_buff *skb;
1848 int sent = 0;
1849 struct scm_cookie scm;
1850 bool fds_sent = false;
1851 int max_level;
1852 int data_len;
1853
1854 wait_for_unix_gc();
1855 err = scm_send(sock, msg, &scm, false);
1856 if (err < 0)
1857 return err;
1858
1859 err = -EOPNOTSUPP;
1860 if (msg->msg_flags&MSG_OOB)
1861 goto out_err;
1862
1863 if (msg->msg_namelen) {
1864 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1865 goto out_err;
1866 } else {
1867 err = -ENOTCONN;
1868 other = unix_peer(sk);
1869 if (!other)
1870 goto out_err;
1871 }
1872
1873 if (sk->sk_shutdown & SEND_SHUTDOWN)
1874 goto pipe_err;
1875
1876 while (sent < len) {
1877 size = len - sent;
1878
1879 /* Keep two messages in the pipe so it schedules better */
1880 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1881
1882 /* allow fallback to order-0 allocations */
1883 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1884
1885 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1886
1887 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1888
1889 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1890 msg->msg_flags & MSG_DONTWAIT, &err,
1891 get_order(UNIX_SKB_FRAGS_SZ));
1892 if (!skb)
1893 goto out_err;
1894
1895 /* Only send the fds in the first buffer */
1896 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1897 if (err < 0) {
1898 kfree_skb(skb);
1899 goto out_err;
1900 }
1901 max_level = err + 1;
1902 fds_sent = true;
1903
1904 skb_put(skb, size - data_len);
1905 skb->data_len = data_len;
1906 skb->len = size;
1907 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1908 if (err) {
1909 kfree_skb(skb);
1910 goto out_err;
1911 }
1912
1913 unix_state_lock(other);
1914
1915 if (sock_flag(other, SOCK_DEAD) ||
1916 (other->sk_shutdown & RCV_SHUTDOWN))
1917 goto pipe_err_free;
1918
1919 maybe_add_creds(skb, sock, other);
1920 skb_queue_tail(&other->sk_receive_queue, skb);
1921 if (max_level > unix_sk(other)->recursion_level)
1922 unix_sk(other)->recursion_level = max_level;
1923 unix_state_unlock(other);
1924 other->sk_data_ready(other);
1925 sent += size;
1926 }
1927
1928 scm_destroy(&scm);
1929
1930 return sent;
1931
1932pipe_err_free:
1933 unix_state_unlock(other);
1934 kfree_skb(skb);
1935pipe_err:
1936 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1937 send_sig(SIGPIPE, current, 0);
1938 err = -EPIPE;
1939out_err:
1940 scm_destroy(&scm);
1941 return sent ? : err;
1942}
1943
1944static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1945 int offset, size_t size, int flags)
1946{
1947 int err;
1948 bool send_sigpipe = false;
1949 bool init_scm = true;
1950 struct scm_cookie scm;
1951 struct sock *other, *sk = socket->sk;
1952 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1953
1954 if (flags & MSG_OOB)
1955 return -EOPNOTSUPP;
1956
1957 other = unix_peer(sk);
1958 if (!other || sk->sk_state != TCP_ESTABLISHED)
1959 return -ENOTCONN;
1960
1961 if (false) {
1962alloc_skb:
1963 unix_state_unlock(other);
1964 mutex_unlock(&unix_sk(other)->iolock);
1965 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1966 &err, 0);
1967 if (!newskb)
1968 goto err;
1969 }
1970
1971 /* we must acquire iolock as we modify already present
1972 * skbs in the sk_receive_queue and mess with skb->len
1973 */
1974 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
1975 if (err) {
1976 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1977 goto err;
1978 }
1979
1980 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1981 err = -EPIPE;
1982 send_sigpipe = true;
1983 goto err_unlock;
1984 }
1985
1986 unix_state_lock(other);
1987
1988 if (sock_flag(other, SOCK_DEAD) ||
1989 other->sk_shutdown & RCV_SHUTDOWN) {
1990 err = -EPIPE;
1991 send_sigpipe = true;
1992 goto err_state_unlock;
1993 }
1994
1995 if (init_scm) {
1996 err = maybe_init_creds(&scm, socket, other);
1997 if (err)
1998 goto err_state_unlock;
1999 init_scm = false;
2000 }
2001
2002 skb = skb_peek_tail(&other->sk_receive_queue);
2003 if (tail && tail == skb) {
2004 skb = newskb;
2005 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2006 if (newskb) {
2007 skb = newskb;
2008 } else {
2009 tail = skb;
2010 goto alloc_skb;
2011 }
2012 } else if (newskb) {
2013 /* this is fast path, we don't necessarily need to
2014 * call to kfree_skb even though with newskb == NULL
2015 * this - does no harm
2016 */
2017 consume_skb(newskb);
2018 newskb = NULL;
2019 }
2020
2021 if (skb_append_pagefrags(skb, page, offset, size)) {
2022 tail = skb;
2023 goto alloc_skb;
2024 }
2025
2026 skb->len += size;
2027 skb->data_len += size;
2028 skb->truesize += size;
2029 atomic_add(size, &sk->sk_wmem_alloc);
2030
2031 if (newskb) {
2032 err = unix_scm_to_skb(&scm, skb, false);
2033 if (err)
2034 goto err_state_unlock;
2035 spin_lock(&other->sk_receive_queue.lock);
2036 __skb_queue_tail(&other->sk_receive_queue, newskb);
2037 spin_unlock(&other->sk_receive_queue.lock);
2038 }
2039
2040 unix_state_unlock(other);
2041 mutex_unlock(&unix_sk(other)->iolock);
2042
2043 other->sk_data_ready(other);
2044 scm_destroy(&scm);
2045 return size;
2046
2047err_state_unlock:
2048 unix_state_unlock(other);
2049err_unlock:
2050 mutex_unlock(&unix_sk(other)->iolock);
2051err:
2052 kfree_skb(newskb);
2053 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2054 send_sig(SIGPIPE, current, 0);
2055 if (!init_scm)
2056 scm_destroy(&scm);
2057 return err;
2058}
2059
2060static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2061 size_t len)
2062{
2063 int err;
2064 struct sock *sk = sock->sk;
2065
2066 err = sock_error(sk);
2067 if (err)
2068 return err;
2069
2070 if (sk->sk_state != TCP_ESTABLISHED)
2071 return -ENOTCONN;
2072
2073 if (msg->msg_namelen)
2074 msg->msg_namelen = 0;
2075
2076 return unix_dgram_sendmsg(sock, msg, len);
2077}
2078
2079static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2080 size_t size, int flags)
2081{
2082 struct sock *sk = sock->sk;
2083
2084 if (sk->sk_state != TCP_ESTABLISHED)
2085 return -ENOTCONN;
2086
2087 return unix_dgram_recvmsg(sock, msg, size, flags);
2088}
2089
2090static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2091{
2092 struct unix_sock *u = unix_sk(sk);
2093
2094 if (u->addr) {
2095 msg->msg_namelen = u->addr->len;
2096 memcpy(msg->msg_name, u->addr->name, u->addr->len);
2097 }
2098}
2099
2100static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2101 size_t size, int flags)
2102{
2103 struct scm_cookie scm;
2104 struct sock *sk = sock->sk;
2105 struct unix_sock *u = unix_sk(sk);
2106 struct sk_buff *skb, *last;
2107 long timeo;
2108 int err;
2109 int peeked, skip;
2110
2111 err = -EOPNOTSUPP;
2112 if (flags&MSG_OOB)
2113 goto out;
2114
2115 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2116
2117 do {
2118 mutex_lock(&u->iolock);
2119
2120 skip = sk_peek_offset(sk, flags);
2121 skb = __skb_try_recv_datagram(sk, flags, NULL, &peeked, &skip,
2122 &err, &last);
2123 if (skb)
2124 break;
2125
2126 mutex_unlock(&u->iolock);
2127
2128 if (err != -EAGAIN)
2129 break;
2130 } while (timeo &&
2131 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
2132
2133 if (!skb) { /* implies iolock unlocked */
2134 unix_state_lock(sk);
2135 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2136 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2137 (sk->sk_shutdown & RCV_SHUTDOWN))
2138 err = 0;
2139 unix_state_unlock(sk);
2140 goto out;
2141 }
2142
2143 if (wq_has_sleeper(&u->peer_wait))
2144 wake_up_interruptible_sync_poll(&u->peer_wait,
2145 POLLOUT | POLLWRNORM |
2146 POLLWRBAND);
2147
2148 if (msg->msg_name)
2149 unix_copy_addr(msg, skb->sk);
2150
2151 if (size > skb->len - skip)
2152 size = skb->len - skip;
2153 else if (size < skb->len - skip)
2154 msg->msg_flags |= MSG_TRUNC;
2155
2156 err = skb_copy_datagram_msg(skb, skip, msg, size);
2157 if (err)
2158 goto out_free;
2159
2160 if (sock_flag(sk, SOCK_RCVTSTAMP))
2161 __sock_recv_timestamp(msg, sk, skb);
2162
2163 memset(&scm, 0, sizeof(scm));
2164
2165 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2166 unix_set_secdata(&scm, skb);
2167
2168 if (!(flags & MSG_PEEK)) {
2169 if (UNIXCB(skb).fp)
2170 unix_detach_fds(&scm, skb);
2171
2172 sk_peek_offset_bwd(sk, skb->len);
2173 } else {
2174 /* It is questionable: on PEEK we could:
2175 - do not return fds - good, but too simple 8)
2176 - return fds, and do not return them on read (old strategy,
2177 apparently wrong)
2178 - clone fds (I chose it for now, it is the most universal
2179 solution)
2180
2181 POSIX 1003.1g does not actually define this clearly
2182 at all. POSIX 1003.1g doesn't define a lot of things
2183 clearly however!
2184
2185 */
2186
2187 sk_peek_offset_fwd(sk, size);
2188
2189 if (UNIXCB(skb).fp)
2190 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2191 }
2192 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2193
2194 scm_recv(sock, msg, &scm, flags);
2195
2196out_free:
2197 skb_free_datagram(sk, skb);
2198 mutex_unlock(&u->iolock);
2199out:
2200 return err;
2201}
2202
2203/*
2204 * Sleep until more data has arrived. But check for races..
2205 */
2206static long unix_stream_data_wait(struct sock *sk, long timeo,
2207 struct sk_buff *last, unsigned int last_len,
2208 bool freezable)
2209{
2210 struct sk_buff *tail;
2211 DEFINE_WAIT(wait);
2212
2213 unix_state_lock(sk);
2214
2215 for (;;) {
2216 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2217
2218 tail = skb_peek_tail(&sk->sk_receive_queue);
2219 if (tail != last ||
2220 (tail && tail->len != last_len) ||
2221 sk->sk_err ||
2222 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2223 signal_pending(current) ||
2224 !timeo)
2225 break;
2226
2227 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2228 unix_state_unlock(sk);
2229 if (freezable)
2230 timeo = freezable_schedule_timeout(timeo);
2231 else
2232 timeo = schedule_timeout(timeo);
2233 unix_state_lock(sk);
2234
2235 if (sock_flag(sk, SOCK_DEAD))
2236 break;
2237
2238 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2239 }
2240
2241 finish_wait(sk_sleep(sk), &wait);
2242 unix_state_unlock(sk);
2243 return timeo;
2244}
2245
2246static unsigned int unix_skb_len(const struct sk_buff *skb)
2247{
2248 return skb->len - UNIXCB(skb).consumed;
2249}
2250
2251struct unix_stream_read_state {
2252 int (*recv_actor)(struct sk_buff *, int, int,
2253 struct unix_stream_read_state *);
2254 struct socket *socket;
2255 struct msghdr *msg;
2256 struct pipe_inode_info *pipe;
2257 size_t size;
2258 int flags;
2259 unsigned int splice_flags;
2260};
2261
2262static int unix_stream_read_generic(struct unix_stream_read_state *state,
2263 bool freezable)
2264{
2265 struct scm_cookie scm;
2266 struct socket *sock = state->socket;
2267 struct sock *sk = sock->sk;
2268 struct unix_sock *u = unix_sk(sk);
2269 int copied = 0;
2270 int flags = state->flags;
2271 int noblock = flags & MSG_DONTWAIT;
2272 bool check_creds = false;
2273 int target;
2274 int err = 0;
2275 long timeo;
2276 int skip;
2277 size_t size = state->size;
2278 unsigned int last_len;
2279
2280 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2281 err = -EINVAL;
2282 goto out;
2283 }
2284
2285 if (unlikely(flags & MSG_OOB)) {
2286 err = -EOPNOTSUPP;
2287 goto out;
2288 }
2289
2290 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2291 timeo = sock_rcvtimeo(sk, noblock);
2292
2293 memset(&scm, 0, sizeof(scm));
2294
2295 /* Lock the socket to prevent queue disordering
2296 * while sleeps in memcpy_tomsg
2297 */
2298 mutex_lock(&u->iolock);
2299
2300 if (flags & MSG_PEEK)
2301 skip = sk_peek_offset(sk, flags);
2302 else
2303 skip = 0;
2304
2305 do {
2306 int chunk;
2307 bool drop_skb;
2308 struct sk_buff *skb, *last;
2309
2310redo:
2311 unix_state_lock(sk);
2312 if (sock_flag(sk, SOCK_DEAD)) {
2313 err = -ECONNRESET;
2314 goto unlock;
2315 }
2316 last = skb = skb_peek(&sk->sk_receive_queue);
2317 last_len = last ? last->len : 0;
2318again:
2319 if (skb == NULL) {
2320 unix_sk(sk)->recursion_level = 0;
2321 if (copied >= target)
2322 goto unlock;
2323
2324 /*
2325 * POSIX 1003.1g mandates this order.
2326 */
2327
2328 err = sock_error(sk);
2329 if (err)
2330 goto unlock;
2331 if (sk->sk_shutdown & RCV_SHUTDOWN)
2332 goto unlock;
2333
2334 unix_state_unlock(sk);
2335 if (!timeo) {
2336 err = -EAGAIN;
2337 break;
2338 }
2339
2340 mutex_unlock(&u->iolock);
2341
2342 timeo = unix_stream_data_wait(sk, timeo, last,
2343 last_len, freezable);
2344
2345 if (signal_pending(current)) {
2346 err = sock_intr_errno(timeo);
2347 scm_destroy(&scm);
2348 goto out;
2349 }
2350
2351 mutex_lock(&u->iolock);
2352 goto redo;
2353unlock:
2354 unix_state_unlock(sk);
2355 break;
2356 }
2357
2358 while (skip >= unix_skb_len(skb)) {
2359 skip -= unix_skb_len(skb);
2360 last = skb;
2361 last_len = skb->len;
2362 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2363 if (!skb)
2364 goto again;
2365 }
2366
2367 unix_state_unlock(sk);
2368
2369 if (check_creds) {
2370 /* Never glue messages from different writers */
2371 if (!unix_skb_scm_eq(skb, &scm))
2372 break;
2373 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2374 /* Copy credentials */
2375 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2376 unix_set_secdata(&scm, skb);
2377 check_creds = true;
2378 }
2379
2380 /* Copy address just once */
2381 if (state->msg && state->msg->msg_name) {
2382 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2383 state->msg->msg_name);
2384 unix_copy_addr(state->msg, skb->sk);
2385 sunaddr = NULL;
2386 }
2387
2388 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2389 skb_get(skb);
2390 chunk = state->recv_actor(skb, skip, chunk, state);
2391 drop_skb = !unix_skb_len(skb);
2392 /* skb is only safe to use if !drop_skb */
2393 consume_skb(skb);
2394 if (chunk < 0) {
2395 if (copied == 0)
2396 copied = -EFAULT;
2397 break;
2398 }
2399 copied += chunk;
2400 size -= chunk;
2401
2402 if (drop_skb) {
2403 /* the skb was touched by a concurrent reader;
2404 * we should not expect anything from this skb
2405 * anymore and assume it invalid - we can be
2406 * sure it was dropped from the socket queue
2407 *
2408 * let's report a short read
2409 */
2410 err = 0;
2411 break;
2412 }
2413
2414 /* Mark read part of skb as used */
2415 if (!(flags & MSG_PEEK)) {
2416 UNIXCB(skb).consumed += chunk;
2417
2418 sk_peek_offset_bwd(sk, chunk);
2419
2420 if (UNIXCB(skb).fp)
2421 unix_detach_fds(&scm, skb);
2422
2423 if (unix_skb_len(skb))
2424 break;
2425
2426 skb_unlink(skb, &sk->sk_receive_queue);
2427 consume_skb(skb);
2428
2429 if (scm.fp)
2430 break;
2431 } else {
2432 /* It is questionable, see note in unix_dgram_recvmsg.
2433 */
2434 if (UNIXCB(skb).fp)
2435 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2436
2437 sk_peek_offset_fwd(sk, chunk);
2438
2439 if (UNIXCB(skb).fp)
2440 break;
2441
2442 skip = 0;
2443 last = skb;
2444 last_len = skb->len;
2445 unix_state_lock(sk);
2446 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2447 if (skb)
2448 goto again;
2449 unix_state_unlock(sk);
2450 break;
2451 }
2452 } while (size);
2453
2454 mutex_unlock(&u->iolock);
2455 if (state->msg)
2456 scm_recv(sock, state->msg, &scm, flags);
2457 else
2458 scm_destroy(&scm);
2459out:
2460 return copied ? : err;
2461}
2462
2463static int unix_stream_read_actor(struct sk_buff *skb,
2464 int skip, int chunk,
2465 struct unix_stream_read_state *state)
2466{
2467 int ret;
2468
2469 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2470 state->msg, chunk);
2471 return ret ?: chunk;
2472}
2473
2474static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2475 size_t size, int flags)
2476{
2477 struct unix_stream_read_state state = {
2478 .recv_actor = unix_stream_read_actor,
2479 .socket = sock,
2480 .msg = msg,
2481 .size = size,
2482 .flags = flags
2483 };
2484
2485 return unix_stream_read_generic(&state, true);
2486}
2487
2488static int unix_stream_splice_actor(struct sk_buff *skb,
2489 int skip, int chunk,
2490 struct unix_stream_read_state *state)
2491{
2492 return skb_splice_bits(skb, state->socket->sk,
2493 UNIXCB(skb).consumed + skip,
2494 state->pipe, chunk, state->splice_flags);
2495}
2496
2497static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2498 struct pipe_inode_info *pipe,
2499 size_t size, unsigned int flags)
2500{
2501 struct unix_stream_read_state state = {
2502 .recv_actor = unix_stream_splice_actor,
2503 .socket = sock,
2504 .pipe = pipe,
2505 .size = size,
2506 .splice_flags = flags,
2507 };
2508
2509 if (unlikely(*ppos))
2510 return -ESPIPE;
2511
2512 if (sock->file->f_flags & O_NONBLOCK ||
2513 flags & SPLICE_F_NONBLOCK)
2514 state.flags = MSG_DONTWAIT;
2515
2516 return unix_stream_read_generic(&state, false);
2517}
2518
2519static int unix_shutdown(struct socket *sock, int mode)
2520{
2521 struct sock *sk = sock->sk;
2522 struct sock *other;
2523
2524 if (mode < SHUT_RD || mode > SHUT_RDWR)
2525 return -EINVAL;
2526 /* This maps:
2527 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2528 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2529 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2530 */
2531 ++mode;
2532
2533 unix_state_lock(sk);
2534 sk->sk_shutdown |= mode;
2535 other = unix_peer(sk);
2536 if (other)
2537 sock_hold(other);
2538 unix_state_unlock(sk);
2539 sk->sk_state_change(sk);
2540
2541 if (other &&
2542 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2543
2544 int peer_mode = 0;
2545
2546 if (mode&RCV_SHUTDOWN)
2547 peer_mode |= SEND_SHUTDOWN;
2548 if (mode&SEND_SHUTDOWN)
2549 peer_mode |= RCV_SHUTDOWN;
2550 unix_state_lock(other);
2551 other->sk_shutdown |= peer_mode;
2552 unix_state_unlock(other);
2553 other->sk_state_change(other);
2554 if (peer_mode == SHUTDOWN_MASK)
2555 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2556 else if (peer_mode & RCV_SHUTDOWN)
2557 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2558 }
2559 if (other)
2560 sock_put(other);
2561
2562 return 0;
2563}
2564
2565long unix_inq_len(struct sock *sk)
2566{
2567 struct sk_buff *skb;
2568 long amount = 0;
2569
2570 if (sk->sk_state == TCP_LISTEN)
2571 return -EINVAL;
2572
2573 spin_lock(&sk->sk_receive_queue.lock);
2574 if (sk->sk_type == SOCK_STREAM ||
2575 sk->sk_type == SOCK_SEQPACKET) {
2576 skb_queue_walk(&sk->sk_receive_queue, skb)
2577 amount += unix_skb_len(skb);
2578 } else {
2579 skb = skb_peek(&sk->sk_receive_queue);
2580 if (skb)
2581 amount = skb->len;
2582 }
2583 spin_unlock(&sk->sk_receive_queue.lock);
2584
2585 return amount;
2586}
2587EXPORT_SYMBOL_GPL(unix_inq_len);
2588
2589long unix_outq_len(struct sock *sk)
2590{
2591 return sk_wmem_alloc_get(sk);
2592}
2593EXPORT_SYMBOL_GPL(unix_outq_len);
2594
2595static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2596{
2597 struct sock *sk = sock->sk;
2598 long amount = 0;
2599 int err;
2600
2601 switch (cmd) {
2602 case SIOCOUTQ:
2603 amount = unix_outq_len(sk);
2604 err = put_user(amount, (int __user *)arg);
2605 break;
2606 case SIOCINQ:
2607 amount = unix_inq_len(sk);
2608 if (amount < 0)
2609 err = amount;
2610 else
2611 err = put_user(amount, (int __user *)arg);
2612 break;
2613 default:
2614 err = -ENOIOCTLCMD;
2615 break;
2616 }
2617 return err;
2618}
2619
2620static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2621{
2622 struct sock *sk = sock->sk;
2623 unsigned int mask;
2624
2625 sock_poll_wait(file, sk_sleep(sk), wait);
2626 mask = 0;
2627
2628 /* exceptional events? */
2629 if (sk->sk_err)
2630 mask |= POLLERR;
2631 if (sk->sk_shutdown == SHUTDOWN_MASK)
2632 mask |= POLLHUP;
2633 if (sk->sk_shutdown & RCV_SHUTDOWN)
2634 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2635
2636 /* readable? */
2637 if (!skb_queue_empty(&sk->sk_receive_queue))
2638 mask |= POLLIN | POLLRDNORM;
2639
2640 /* Connection-based need to check for termination and startup */
2641 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2642 sk->sk_state == TCP_CLOSE)
2643 mask |= POLLHUP;
2644
2645 /*
2646 * we set writable also when the other side has shut down the
2647 * connection. This prevents stuck sockets.
2648 */
2649 if (unix_writable(sk))
2650 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2651
2652 return mask;
2653}
2654
2655static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2656 poll_table *wait)
2657{
2658 struct sock *sk = sock->sk, *other;
2659 unsigned int mask, writable;
2660
2661 sock_poll_wait(file, sk_sleep(sk), wait);
2662 mask = 0;
2663
2664 /* exceptional events? */
2665 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2666 mask |= POLLERR |
2667 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2668
2669 if (sk->sk_shutdown & RCV_SHUTDOWN)
2670 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2671 if (sk->sk_shutdown == SHUTDOWN_MASK)
2672 mask |= POLLHUP;
2673
2674 /* readable? */
2675 if (!skb_queue_empty(&sk->sk_receive_queue))
2676 mask |= POLLIN | POLLRDNORM;
2677
2678 /* Connection-based need to check for termination and startup */
2679 if (sk->sk_type == SOCK_SEQPACKET) {
2680 if (sk->sk_state == TCP_CLOSE)
2681 mask |= POLLHUP;
2682 /* connection hasn't started yet? */
2683 if (sk->sk_state == TCP_SYN_SENT)
2684 return mask;
2685 }
2686
2687 /* No write status requested, avoid expensive OUT tests. */
2688 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2689 return mask;
2690
2691 writable = unix_writable(sk);
2692 if (writable) {
2693 unix_state_lock(sk);
2694
2695 other = unix_peer(sk);
2696 if (other && unix_peer(other) != sk &&
2697 unix_recvq_full(other) &&
2698 unix_dgram_peer_wake_me(sk, other))
2699 writable = 0;
2700
2701 unix_state_unlock(sk);
2702 }
2703
2704 if (writable)
2705 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2706 else
2707 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2708
2709 return mask;
2710}
2711
2712#ifdef CONFIG_PROC_FS
2713
2714#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2715
2716#define get_bucket(x) ((x) >> BUCKET_SPACE)
2717#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2718#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2719
2720static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2721{
2722 unsigned long offset = get_offset(*pos);
2723 unsigned long bucket = get_bucket(*pos);
2724 struct sock *sk;
2725 unsigned long count = 0;
2726
2727 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2728 if (sock_net(sk) != seq_file_net(seq))
2729 continue;
2730 if (++count == offset)
2731 break;
2732 }
2733
2734 return sk;
2735}
2736
2737static struct sock *unix_next_socket(struct seq_file *seq,
2738 struct sock *sk,
2739 loff_t *pos)
2740{
2741 unsigned long bucket;
2742
2743 while (sk > (struct sock *)SEQ_START_TOKEN) {
2744 sk = sk_next(sk);
2745 if (!sk)
2746 goto next_bucket;
2747 if (sock_net(sk) == seq_file_net(seq))
2748 return sk;
2749 }
2750
2751 do {
2752 sk = unix_from_bucket(seq, pos);
2753 if (sk)
2754 return sk;
2755
2756next_bucket:
2757 bucket = get_bucket(*pos) + 1;
2758 *pos = set_bucket_offset(bucket, 1);
2759 } while (bucket < ARRAY_SIZE(unix_socket_table));
2760
2761 return NULL;
2762}
2763
2764static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2765 __acquires(unix_table_lock)
2766{
2767 spin_lock(&unix_table_lock);
2768
2769 if (!*pos)
2770 return SEQ_START_TOKEN;
2771
2772 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2773 return NULL;
2774
2775 return unix_next_socket(seq, NULL, pos);
2776}
2777
2778static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2779{
2780 ++*pos;
2781 return unix_next_socket(seq, v, pos);
2782}
2783
2784static void unix_seq_stop(struct seq_file *seq, void *v)
2785 __releases(unix_table_lock)
2786{
2787 spin_unlock(&unix_table_lock);
2788}
2789
2790static int unix_seq_show(struct seq_file *seq, void *v)
2791{
2792
2793 if (v == SEQ_START_TOKEN)
2794 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2795 "Inode Path\n");
2796 else {
2797 struct sock *s = v;
2798 struct unix_sock *u = unix_sk(s);
2799 unix_state_lock(s);
2800
2801 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2802 s,
2803 atomic_read(&s->sk_refcnt),
2804 0,
2805 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2806 s->sk_type,
2807 s->sk_socket ?
2808 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2809 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2810 sock_i_ino(s));
2811
2812 if (u->addr) {
2813 int i, len;
2814 seq_putc(seq, ' ');
2815
2816 i = 0;
2817 len = u->addr->len - sizeof(short);
2818 if (!UNIX_ABSTRACT(s))
2819 len--;
2820 else {
2821 seq_putc(seq, '@');
2822 i++;
2823 }
2824 for ( ; i < len; i++)
2825 seq_putc(seq, u->addr->name->sun_path[i] ?:
2826 '@');
2827 }
2828 unix_state_unlock(s);
2829 seq_putc(seq, '\n');
2830 }
2831
2832 return 0;
2833}
2834
2835static const struct seq_operations unix_seq_ops = {
2836 .start = unix_seq_start,
2837 .next = unix_seq_next,
2838 .stop = unix_seq_stop,
2839 .show = unix_seq_show,
2840};
2841
2842static int unix_seq_open(struct inode *inode, struct file *file)
2843{
2844 return seq_open_net(inode, file, &unix_seq_ops,
2845 sizeof(struct seq_net_private));
2846}
2847
2848static const struct file_operations unix_seq_fops = {
2849 .owner = THIS_MODULE,
2850 .open = unix_seq_open,
2851 .read = seq_read,
2852 .llseek = seq_lseek,
2853 .release = seq_release_net,
2854};
2855
2856#endif
2857
2858static const struct net_proto_family unix_family_ops = {
2859 .family = PF_UNIX,
2860 .create = unix_create,
2861 .owner = THIS_MODULE,
2862};
2863
2864
2865static int __net_init unix_net_init(struct net *net)
2866{
2867 int error = -ENOMEM;
2868
2869 net->unx.sysctl_max_dgram_qlen = 10;
2870 if (unix_sysctl_register(net))
2871 goto out;
2872
2873#ifdef CONFIG_PROC_FS
2874 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2875 unix_sysctl_unregister(net);
2876 goto out;
2877 }
2878#endif
2879 error = 0;
2880out:
2881 return error;
2882}
2883
2884static void __net_exit unix_net_exit(struct net *net)
2885{
2886 unix_sysctl_unregister(net);
2887 remove_proc_entry("unix", net->proc_net);
2888}
2889
2890static struct pernet_operations unix_net_ops = {
2891 .init = unix_net_init,
2892 .exit = unix_net_exit,
2893};
2894
2895static int __init af_unix_init(void)
2896{
2897 int rc = -1;
2898
2899 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2900
2901 rc = proto_register(&unix_proto, 1);
2902 if (rc != 0) {
2903 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2904 goto out;
2905 }
2906
2907 sock_register(&unix_family_ops);
2908 register_pernet_subsys(&unix_net_ops);
2909out:
2910 return rc;
2911}
2912
2913static void __exit af_unix_exit(void)
2914{
2915 sock_unregister(PF_UNIX);
2916 proto_unregister(&unix_proto);
2917 unregister_pernet_subsys(&unix_net_ops);
2918}
2919
2920/* Earlier than device_initcall() so that other drivers invoking
2921 request_module() don't end up in a loop when modprobe tries
2922 to use a UNIX socket. But later than subsys_initcall() because
2923 we depend on stuff initialised there */
2924fs_initcall(af_unix_init);
2925module_exit(af_unix_exit);
2926
2927MODULE_LICENSE("GPL");
2928MODULE_ALIAS_NETPROTO(PF_UNIX);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NET4: Implementation of BSD Unix domain sockets.
4 *
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 *
7 * Fixes:
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
17 * Mike Shaver's work.
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
24 * reference counting
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
27 * Lots of bug fixes.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
39 * dgram receiver.
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
47 *
48 * Known differences from reference BSD that was tested:
49 *
50 * [TO FIX]
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
55 * [NOT TO FIX]
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
63 *
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
68 *
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
75 * with BSD names.
76 */
77
78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79
80#include <linux/module.h>
81#include <linux/kernel.h>
82#include <linux/signal.h>
83#include <linux/sched/signal.h>
84#include <linux/errno.h>
85#include <linux/string.h>
86#include <linux/stat.h>
87#include <linux/dcache.h>
88#include <linux/namei.h>
89#include <linux/socket.h>
90#include <linux/un.h>
91#include <linux/fcntl.h>
92#include <linux/filter.h>
93#include <linux/termios.h>
94#include <linux/sockios.h>
95#include <linux/net.h>
96#include <linux/in.h>
97#include <linux/fs.h>
98#include <linux/slab.h>
99#include <linux/uaccess.h>
100#include <linux/skbuff.h>
101#include <linux/netdevice.h>
102#include <net/net_namespace.h>
103#include <net/sock.h>
104#include <net/tcp_states.h>
105#include <net/af_unix.h>
106#include <linux/proc_fs.h>
107#include <linux/seq_file.h>
108#include <net/scm.h>
109#include <linux/init.h>
110#include <linux/poll.h>
111#include <linux/rtnetlink.h>
112#include <linux/mount.h>
113#include <net/checksum.h>
114#include <linux/security.h>
115#include <linux/splice.h>
116#include <linux/freezer.h>
117#include <linux/file.h>
118#include <linux/btf_ids.h>
119#include <linux/bpf-cgroup.h>
120
121static atomic_long_t unix_nr_socks;
122static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
123static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
124
125/* SMP locking strategy:
126 * hash table is protected with spinlock.
127 * each socket state is protected by separate spinlock.
128 */
129
130static unsigned int unix_unbound_hash(struct sock *sk)
131{
132 unsigned long hash = (unsigned long)sk;
133
134 hash ^= hash >> 16;
135 hash ^= hash >> 8;
136 hash ^= sk->sk_type;
137
138 return hash & UNIX_HASH_MOD;
139}
140
141static unsigned int unix_bsd_hash(struct inode *i)
142{
143 return i->i_ino & UNIX_HASH_MOD;
144}
145
146static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
147 int addr_len, int type)
148{
149 __wsum csum = csum_partial(sunaddr, addr_len, 0);
150 unsigned int hash;
151
152 hash = (__force unsigned int)csum_fold(csum);
153 hash ^= hash >> 8;
154 hash ^= type;
155
156 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
157}
158
159static void unix_table_double_lock(struct net *net,
160 unsigned int hash1, unsigned int hash2)
161{
162 if (hash1 == hash2) {
163 spin_lock(&net->unx.table.locks[hash1]);
164 return;
165 }
166
167 if (hash1 > hash2)
168 swap(hash1, hash2);
169
170 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
172}
173
174static void unix_table_double_unlock(struct net *net,
175 unsigned int hash1, unsigned int hash2)
176{
177 if (hash1 == hash2) {
178 spin_unlock(&net->unx.table.locks[hash1]);
179 return;
180 }
181
182 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash2]);
184}
185
186#ifdef CONFIG_SECURITY_NETWORK
187static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
188{
189 UNIXCB(skb).secid = scm->secid;
190}
191
192static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
193{
194 scm->secid = UNIXCB(skb).secid;
195}
196
197static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
198{
199 return (scm->secid == UNIXCB(skb).secid);
200}
201#else
202static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
203{ }
204
205static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
206{ }
207
208static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
209{
210 return true;
211}
212#endif /* CONFIG_SECURITY_NETWORK */
213
214static inline int unix_our_peer(struct sock *sk, struct sock *osk)
215{
216 return unix_peer(osk) == sk;
217}
218
219static inline int unix_may_send(struct sock *sk, struct sock *osk)
220{
221 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
222}
223
224static inline int unix_recvq_full(const struct sock *sk)
225{
226 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
227}
228
229static inline int unix_recvq_full_lockless(const struct sock *sk)
230{
231 return skb_queue_len_lockless(&sk->sk_receive_queue) >
232 READ_ONCE(sk->sk_max_ack_backlog);
233}
234
235struct sock *unix_peer_get(struct sock *s)
236{
237 struct sock *peer;
238
239 unix_state_lock(s);
240 peer = unix_peer(s);
241 if (peer)
242 sock_hold(peer);
243 unix_state_unlock(s);
244 return peer;
245}
246EXPORT_SYMBOL_GPL(unix_peer_get);
247
248static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
249 int addr_len)
250{
251 struct unix_address *addr;
252
253 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
254 if (!addr)
255 return NULL;
256
257 refcount_set(&addr->refcnt, 1);
258 addr->len = addr_len;
259 memcpy(addr->name, sunaddr, addr_len);
260
261 return addr;
262}
263
264static inline void unix_release_addr(struct unix_address *addr)
265{
266 if (refcount_dec_and_test(&addr->refcnt))
267 kfree(addr);
268}
269
270/*
271 * Check unix socket name:
272 * - should be not zero length.
273 * - if started by not zero, should be NULL terminated (FS object)
274 * - if started by zero, it is abstract name.
275 */
276
277static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
278{
279 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
280 addr_len > sizeof(*sunaddr))
281 return -EINVAL;
282
283 if (sunaddr->sun_family != AF_UNIX)
284 return -EINVAL;
285
286 return 0;
287}
288
289static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
290{
291 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
292 short offset = offsetof(struct sockaddr_storage, __data);
293
294 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
295
296 /* This may look like an off by one error but it is a bit more
297 * subtle. 108 is the longest valid AF_UNIX path for a binding.
298 * sun_path[108] doesn't as such exist. However in kernel space
299 * we are guaranteed that it is a valid memory location in our
300 * kernel address buffer because syscall functions always pass
301 * a pointer of struct sockaddr_storage which has a bigger buffer
302 * than 108. Also, we must terminate sun_path for strlen() in
303 * getname_kernel().
304 */
305 addr->__data[addr_len - offset] = 0;
306
307 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
308 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
309 * know the actual buffer.
310 */
311 return strlen(addr->__data) + offset + 1;
312}
313
314static void __unix_remove_socket(struct sock *sk)
315{
316 sk_del_node_init(sk);
317}
318
319static void __unix_insert_socket(struct net *net, struct sock *sk)
320{
321 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
322 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
323}
324
325static void __unix_set_addr_hash(struct net *net, struct sock *sk,
326 struct unix_address *addr, unsigned int hash)
327{
328 __unix_remove_socket(sk);
329 smp_store_release(&unix_sk(sk)->addr, addr);
330
331 sk->sk_hash = hash;
332 __unix_insert_socket(net, sk);
333}
334
335static void unix_remove_socket(struct net *net, struct sock *sk)
336{
337 spin_lock(&net->unx.table.locks[sk->sk_hash]);
338 __unix_remove_socket(sk);
339 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
340}
341
342static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
343{
344 spin_lock(&net->unx.table.locks[sk->sk_hash]);
345 __unix_insert_socket(net, sk);
346 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
347}
348
349static void unix_insert_bsd_socket(struct sock *sk)
350{
351 spin_lock(&bsd_socket_locks[sk->sk_hash]);
352 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
353 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
354}
355
356static void unix_remove_bsd_socket(struct sock *sk)
357{
358 if (!hlist_unhashed(&sk->sk_bind_node)) {
359 spin_lock(&bsd_socket_locks[sk->sk_hash]);
360 __sk_del_bind_node(sk);
361 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
362
363 sk_node_init(&sk->sk_bind_node);
364 }
365}
366
367static struct sock *__unix_find_socket_byname(struct net *net,
368 struct sockaddr_un *sunname,
369 int len, unsigned int hash)
370{
371 struct sock *s;
372
373 sk_for_each(s, &net->unx.table.buckets[hash]) {
374 struct unix_sock *u = unix_sk(s);
375
376 if (u->addr->len == len &&
377 !memcmp(u->addr->name, sunname, len))
378 return s;
379 }
380 return NULL;
381}
382
383static inline struct sock *unix_find_socket_byname(struct net *net,
384 struct sockaddr_un *sunname,
385 int len, unsigned int hash)
386{
387 struct sock *s;
388
389 spin_lock(&net->unx.table.locks[hash]);
390 s = __unix_find_socket_byname(net, sunname, len, hash);
391 if (s)
392 sock_hold(s);
393 spin_unlock(&net->unx.table.locks[hash]);
394 return s;
395}
396
397static struct sock *unix_find_socket_byinode(struct inode *i)
398{
399 unsigned int hash = unix_bsd_hash(i);
400 struct sock *s;
401
402 spin_lock(&bsd_socket_locks[hash]);
403 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
404 struct dentry *dentry = unix_sk(s)->path.dentry;
405
406 if (dentry && d_backing_inode(dentry) == i) {
407 sock_hold(s);
408 spin_unlock(&bsd_socket_locks[hash]);
409 return s;
410 }
411 }
412 spin_unlock(&bsd_socket_locks[hash]);
413 return NULL;
414}
415
416/* Support code for asymmetrically connected dgram sockets
417 *
418 * If a datagram socket is connected to a socket not itself connected
419 * to the first socket (eg, /dev/log), clients may only enqueue more
420 * messages if the present receive queue of the server socket is not
421 * "too large". This means there's a second writeability condition
422 * poll and sendmsg need to test. The dgram recv code will do a wake
423 * up on the peer_wait wait queue of a socket upon reception of a
424 * datagram which needs to be propagated to sleeping would-be writers
425 * since these might not have sent anything so far. This can't be
426 * accomplished via poll_wait because the lifetime of the server
427 * socket might be less than that of its clients if these break their
428 * association with it or if the server socket is closed while clients
429 * are still connected to it and there's no way to inform "a polling
430 * implementation" that it should let go of a certain wait queue
431 *
432 * In order to propagate a wake up, a wait_queue_entry_t of the client
433 * socket is enqueued on the peer_wait queue of the server socket
434 * whose wake function does a wake_up on the ordinary client socket
435 * wait queue. This connection is established whenever a write (or
436 * poll for write) hit the flow control condition and broken when the
437 * association to the server socket is dissolved or after a wake up
438 * was relayed.
439 */
440
441static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
442 void *key)
443{
444 struct unix_sock *u;
445 wait_queue_head_t *u_sleep;
446
447 u = container_of(q, struct unix_sock, peer_wake);
448
449 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
450 q);
451 u->peer_wake.private = NULL;
452
453 /* relaying can only happen while the wq still exists */
454 u_sleep = sk_sleep(&u->sk);
455 if (u_sleep)
456 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
457
458 return 0;
459}
460
461static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
462{
463 struct unix_sock *u, *u_other;
464 int rc;
465
466 u = unix_sk(sk);
467 u_other = unix_sk(other);
468 rc = 0;
469 spin_lock(&u_other->peer_wait.lock);
470
471 if (!u->peer_wake.private) {
472 u->peer_wake.private = other;
473 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
474
475 rc = 1;
476 }
477
478 spin_unlock(&u_other->peer_wait.lock);
479 return rc;
480}
481
482static void unix_dgram_peer_wake_disconnect(struct sock *sk,
483 struct sock *other)
484{
485 struct unix_sock *u, *u_other;
486
487 u = unix_sk(sk);
488 u_other = unix_sk(other);
489 spin_lock(&u_other->peer_wait.lock);
490
491 if (u->peer_wake.private == other) {
492 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
493 u->peer_wake.private = NULL;
494 }
495
496 spin_unlock(&u_other->peer_wait.lock);
497}
498
499static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
500 struct sock *other)
501{
502 unix_dgram_peer_wake_disconnect(sk, other);
503 wake_up_interruptible_poll(sk_sleep(sk),
504 EPOLLOUT |
505 EPOLLWRNORM |
506 EPOLLWRBAND);
507}
508
509/* preconditions:
510 * - unix_peer(sk) == other
511 * - association is stable
512 */
513static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
514{
515 int connected;
516
517 connected = unix_dgram_peer_wake_connect(sk, other);
518
519 /* If other is SOCK_DEAD, we want to make sure we signal
520 * POLLOUT, such that a subsequent write() can get a
521 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
522 * to other and its full, we will hang waiting for POLLOUT.
523 */
524 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
525 return 1;
526
527 if (connected)
528 unix_dgram_peer_wake_disconnect(sk, other);
529
530 return 0;
531}
532
533static int unix_writable(const struct sock *sk)
534{
535 return sk->sk_state != TCP_LISTEN &&
536 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
537}
538
539static void unix_write_space(struct sock *sk)
540{
541 struct socket_wq *wq;
542
543 rcu_read_lock();
544 if (unix_writable(sk)) {
545 wq = rcu_dereference(sk->sk_wq);
546 if (skwq_has_sleeper(wq))
547 wake_up_interruptible_sync_poll(&wq->wait,
548 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
549 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
550 }
551 rcu_read_unlock();
552}
553
554/* When dgram socket disconnects (or changes its peer), we clear its receive
555 * queue of packets arrived from previous peer. First, it allows to do
556 * flow control based only on wmem_alloc; second, sk connected to peer
557 * may receive messages only from that peer. */
558static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
559{
560 if (!skb_queue_empty(&sk->sk_receive_queue)) {
561 skb_queue_purge(&sk->sk_receive_queue);
562 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
563
564 /* If one link of bidirectional dgram pipe is disconnected,
565 * we signal error. Messages are lost. Do not make this,
566 * when peer was not connected to us.
567 */
568 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
569 WRITE_ONCE(other->sk_err, ECONNRESET);
570 sk_error_report(other);
571 }
572 }
573 other->sk_state = TCP_CLOSE;
574}
575
576static void unix_sock_destructor(struct sock *sk)
577{
578 struct unix_sock *u = unix_sk(sk);
579
580 skb_queue_purge(&sk->sk_receive_queue);
581
582 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
583 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
584 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
585 if (!sock_flag(sk, SOCK_DEAD)) {
586 pr_info("Attempt to release alive unix socket: %p\n", sk);
587 return;
588 }
589
590 if (u->addr)
591 unix_release_addr(u->addr);
592
593 atomic_long_dec(&unix_nr_socks);
594 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
595#ifdef UNIX_REFCNT_DEBUG
596 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
597 atomic_long_read(&unix_nr_socks));
598#endif
599}
600
601static void unix_release_sock(struct sock *sk, int embrion)
602{
603 struct unix_sock *u = unix_sk(sk);
604 struct sock *skpair;
605 struct sk_buff *skb;
606 struct path path;
607 int state;
608
609 unix_remove_socket(sock_net(sk), sk);
610 unix_remove_bsd_socket(sk);
611
612 /* Clear state */
613 unix_state_lock(sk);
614 sock_orphan(sk);
615 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
616 path = u->path;
617 u->path.dentry = NULL;
618 u->path.mnt = NULL;
619 state = sk->sk_state;
620 sk->sk_state = TCP_CLOSE;
621
622 skpair = unix_peer(sk);
623 unix_peer(sk) = NULL;
624
625 unix_state_unlock(sk);
626
627#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
628 if (u->oob_skb) {
629 kfree_skb(u->oob_skb);
630 u->oob_skb = NULL;
631 }
632#endif
633
634 wake_up_interruptible_all(&u->peer_wait);
635
636 if (skpair != NULL) {
637 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
638 unix_state_lock(skpair);
639 /* No more writes */
640 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
641 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
642 WRITE_ONCE(skpair->sk_err, ECONNRESET);
643 unix_state_unlock(skpair);
644 skpair->sk_state_change(skpair);
645 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
646 }
647
648 unix_dgram_peer_wake_disconnect(sk, skpair);
649 sock_put(skpair); /* It may now die */
650 }
651
652 /* Try to flush out this socket. Throw out buffers at least */
653
654 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
655 if (state == TCP_LISTEN)
656 unix_release_sock(skb->sk, 1);
657 /* passed fds are erased in the kfree_skb hook */
658 UNIXCB(skb).consumed = skb->len;
659 kfree_skb(skb);
660 }
661
662 if (path.dentry)
663 path_put(&path);
664
665 sock_put(sk);
666
667 /* ---- Socket is dead now and most probably destroyed ---- */
668
669 /*
670 * Fixme: BSD difference: In BSD all sockets connected to us get
671 * ECONNRESET and we die on the spot. In Linux we behave
672 * like files and pipes do and wait for the last
673 * dereference.
674 *
675 * Can't we simply set sock->err?
676 *
677 * What the above comment does talk about? --ANK(980817)
678 */
679
680 if (READ_ONCE(unix_tot_inflight))
681 unix_gc(); /* Garbage collect fds */
682}
683
684static void init_peercred(struct sock *sk)
685{
686 const struct cred *old_cred;
687 struct pid *old_pid;
688
689 spin_lock(&sk->sk_peer_lock);
690 old_pid = sk->sk_peer_pid;
691 old_cred = sk->sk_peer_cred;
692 sk->sk_peer_pid = get_pid(task_tgid(current));
693 sk->sk_peer_cred = get_current_cred();
694 spin_unlock(&sk->sk_peer_lock);
695
696 put_pid(old_pid);
697 put_cred(old_cred);
698}
699
700static void copy_peercred(struct sock *sk, struct sock *peersk)
701{
702 const struct cred *old_cred;
703 struct pid *old_pid;
704
705 if (sk < peersk) {
706 spin_lock(&sk->sk_peer_lock);
707 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
708 } else {
709 spin_lock(&peersk->sk_peer_lock);
710 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
711 }
712 old_pid = sk->sk_peer_pid;
713 old_cred = sk->sk_peer_cred;
714 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
715 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
716
717 spin_unlock(&sk->sk_peer_lock);
718 spin_unlock(&peersk->sk_peer_lock);
719
720 put_pid(old_pid);
721 put_cred(old_cred);
722}
723
724static int unix_listen(struct socket *sock, int backlog)
725{
726 int err;
727 struct sock *sk = sock->sk;
728 struct unix_sock *u = unix_sk(sk);
729
730 err = -EOPNOTSUPP;
731 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
732 goto out; /* Only stream/seqpacket sockets accept */
733 err = -EINVAL;
734 if (!READ_ONCE(u->addr))
735 goto out; /* No listens on an unbound socket */
736 unix_state_lock(sk);
737 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
738 goto out_unlock;
739 if (backlog > sk->sk_max_ack_backlog)
740 wake_up_interruptible_all(&u->peer_wait);
741 sk->sk_max_ack_backlog = backlog;
742 sk->sk_state = TCP_LISTEN;
743 /* set credentials so connect can copy them */
744 init_peercred(sk);
745 err = 0;
746
747out_unlock:
748 unix_state_unlock(sk);
749out:
750 return err;
751}
752
753static int unix_release(struct socket *);
754static int unix_bind(struct socket *, struct sockaddr *, int);
755static int unix_stream_connect(struct socket *, struct sockaddr *,
756 int addr_len, int flags);
757static int unix_socketpair(struct socket *, struct socket *);
758static int unix_accept(struct socket *, struct socket *, int, bool);
759static int unix_getname(struct socket *, struct sockaddr *, int);
760static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
761static __poll_t unix_dgram_poll(struct file *, struct socket *,
762 poll_table *);
763static int unix_ioctl(struct socket *, unsigned int, unsigned long);
764#ifdef CONFIG_COMPAT
765static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
766#endif
767static int unix_shutdown(struct socket *, int);
768static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
769static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
770static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
771 struct pipe_inode_info *, size_t size,
772 unsigned int flags);
773static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
774static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
775static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
776static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
777static int unix_dgram_connect(struct socket *, struct sockaddr *,
778 int, int);
779static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
780static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
781 int);
782
783#ifdef CONFIG_PROC_FS
784static int unix_count_nr_fds(struct sock *sk)
785{
786 struct sk_buff *skb;
787 struct unix_sock *u;
788 int nr_fds = 0;
789
790 spin_lock(&sk->sk_receive_queue.lock);
791 skb = skb_peek(&sk->sk_receive_queue);
792 while (skb) {
793 u = unix_sk(skb->sk);
794 nr_fds += atomic_read(&u->scm_stat.nr_fds);
795 skb = skb_peek_next(skb, &sk->sk_receive_queue);
796 }
797 spin_unlock(&sk->sk_receive_queue.lock);
798
799 return nr_fds;
800}
801
802static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
803{
804 struct sock *sk = sock->sk;
805 unsigned char s_state;
806 struct unix_sock *u;
807 int nr_fds = 0;
808
809 if (sk) {
810 s_state = READ_ONCE(sk->sk_state);
811 u = unix_sk(sk);
812
813 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
814 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
815 * SOCK_DGRAM is ordinary. So, no lock is needed.
816 */
817 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
818 nr_fds = atomic_read(&u->scm_stat.nr_fds);
819 else if (s_state == TCP_LISTEN)
820 nr_fds = unix_count_nr_fds(sk);
821
822 seq_printf(m, "scm_fds: %u\n", nr_fds);
823 }
824}
825#else
826#define unix_show_fdinfo NULL
827#endif
828
829static const struct proto_ops unix_stream_ops = {
830 .family = PF_UNIX,
831 .owner = THIS_MODULE,
832 .release = unix_release,
833 .bind = unix_bind,
834 .connect = unix_stream_connect,
835 .socketpair = unix_socketpair,
836 .accept = unix_accept,
837 .getname = unix_getname,
838 .poll = unix_poll,
839 .ioctl = unix_ioctl,
840#ifdef CONFIG_COMPAT
841 .compat_ioctl = unix_compat_ioctl,
842#endif
843 .listen = unix_listen,
844 .shutdown = unix_shutdown,
845 .sendmsg = unix_stream_sendmsg,
846 .recvmsg = unix_stream_recvmsg,
847 .read_skb = unix_stream_read_skb,
848 .mmap = sock_no_mmap,
849 .splice_read = unix_stream_splice_read,
850 .set_peek_off = sk_set_peek_off,
851 .show_fdinfo = unix_show_fdinfo,
852};
853
854static const struct proto_ops unix_dgram_ops = {
855 .family = PF_UNIX,
856 .owner = THIS_MODULE,
857 .release = unix_release,
858 .bind = unix_bind,
859 .connect = unix_dgram_connect,
860 .socketpair = unix_socketpair,
861 .accept = sock_no_accept,
862 .getname = unix_getname,
863 .poll = unix_dgram_poll,
864 .ioctl = unix_ioctl,
865#ifdef CONFIG_COMPAT
866 .compat_ioctl = unix_compat_ioctl,
867#endif
868 .listen = sock_no_listen,
869 .shutdown = unix_shutdown,
870 .sendmsg = unix_dgram_sendmsg,
871 .read_skb = unix_read_skb,
872 .recvmsg = unix_dgram_recvmsg,
873 .mmap = sock_no_mmap,
874 .set_peek_off = sk_set_peek_off,
875 .show_fdinfo = unix_show_fdinfo,
876};
877
878static const struct proto_ops unix_seqpacket_ops = {
879 .family = PF_UNIX,
880 .owner = THIS_MODULE,
881 .release = unix_release,
882 .bind = unix_bind,
883 .connect = unix_stream_connect,
884 .socketpair = unix_socketpair,
885 .accept = unix_accept,
886 .getname = unix_getname,
887 .poll = unix_dgram_poll,
888 .ioctl = unix_ioctl,
889#ifdef CONFIG_COMPAT
890 .compat_ioctl = unix_compat_ioctl,
891#endif
892 .listen = unix_listen,
893 .shutdown = unix_shutdown,
894 .sendmsg = unix_seqpacket_sendmsg,
895 .recvmsg = unix_seqpacket_recvmsg,
896 .mmap = sock_no_mmap,
897 .set_peek_off = sk_set_peek_off,
898 .show_fdinfo = unix_show_fdinfo,
899};
900
901static void unix_close(struct sock *sk, long timeout)
902{
903 /* Nothing to do here, unix socket does not need a ->close().
904 * This is merely for sockmap.
905 */
906}
907
908static void unix_unhash(struct sock *sk)
909{
910 /* Nothing to do here, unix socket does not need a ->unhash().
911 * This is merely for sockmap.
912 */
913}
914
915static bool unix_bpf_bypass_getsockopt(int level, int optname)
916{
917 if (level == SOL_SOCKET) {
918 switch (optname) {
919 case SO_PEERPIDFD:
920 return true;
921 default:
922 return false;
923 }
924 }
925
926 return false;
927}
928
929struct proto unix_dgram_proto = {
930 .name = "UNIX",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct unix_sock),
933 .close = unix_close,
934 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
935#ifdef CONFIG_BPF_SYSCALL
936 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
937#endif
938};
939
940struct proto unix_stream_proto = {
941 .name = "UNIX-STREAM",
942 .owner = THIS_MODULE,
943 .obj_size = sizeof(struct unix_sock),
944 .close = unix_close,
945 .unhash = unix_unhash,
946 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
947#ifdef CONFIG_BPF_SYSCALL
948 .psock_update_sk_prot = unix_stream_bpf_update_proto,
949#endif
950};
951
952static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
953{
954 struct unix_sock *u;
955 struct sock *sk;
956 int err;
957
958 atomic_long_inc(&unix_nr_socks);
959 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
960 err = -ENFILE;
961 goto err;
962 }
963
964 if (type == SOCK_STREAM)
965 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
966 else /*dgram and seqpacket */
967 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
968
969 if (!sk) {
970 err = -ENOMEM;
971 goto err;
972 }
973
974 sock_init_data(sock, sk);
975
976 sk->sk_hash = unix_unbound_hash(sk);
977 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
978 sk->sk_write_space = unix_write_space;
979 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
980 sk->sk_destruct = unix_sock_destructor;
981 u = unix_sk(sk);
982 u->inflight = 0;
983 u->path.dentry = NULL;
984 u->path.mnt = NULL;
985 spin_lock_init(&u->lock);
986 INIT_LIST_HEAD(&u->link);
987 mutex_init(&u->iolock); /* single task reading lock */
988 mutex_init(&u->bindlock); /* single task binding lock */
989 init_waitqueue_head(&u->peer_wait);
990 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
991 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
992 unix_insert_unbound_socket(net, sk);
993
994 sock_prot_inuse_add(net, sk->sk_prot, 1);
995
996 return sk;
997
998err:
999 atomic_long_dec(&unix_nr_socks);
1000 return ERR_PTR(err);
1001}
1002
1003static int unix_create(struct net *net, struct socket *sock, int protocol,
1004 int kern)
1005{
1006 struct sock *sk;
1007
1008 if (protocol && protocol != PF_UNIX)
1009 return -EPROTONOSUPPORT;
1010
1011 sock->state = SS_UNCONNECTED;
1012
1013 switch (sock->type) {
1014 case SOCK_STREAM:
1015 sock->ops = &unix_stream_ops;
1016 break;
1017 /*
1018 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1019 * nothing uses it.
1020 */
1021 case SOCK_RAW:
1022 sock->type = SOCK_DGRAM;
1023 fallthrough;
1024 case SOCK_DGRAM:
1025 sock->ops = &unix_dgram_ops;
1026 break;
1027 case SOCK_SEQPACKET:
1028 sock->ops = &unix_seqpacket_ops;
1029 break;
1030 default:
1031 return -ESOCKTNOSUPPORT;
1032 }
1033
1034 sk = unix_create1(net, sock, kern, sock->type);
1035 if (IS_ERR(sk))
1036 return PTR_ERR(sk);
1037
1038 return 0;
1039}
1040
1041static int unix_release(struct socket *sock)
1042{
1043 struct sock *sk = sock->sk;
1044
1045 if (!sk)
1046 return 0;
1047
1048 sk->sk_prot->close(sk, 0);
1049 unix_release_sock(sk, 0);
1050 sock->sk = NULL;
1051
1052 return 0;
1053}
1054
1055static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1056 int type)
1057{
1058 struct inode *inode;
1059 struct path path;
1060 struct sock *sk;
1061 int err;
1062
1063 unix_mkname_bsd(sunaddr, addr_len);
1064 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1065 if (err)
1066 goto fail;
1067
1068 err = path_permission(&path, MAY_WRITE);
1069 if (err)
1070 goto path_put;
1071
1072 err = -ECONNREFUSED;
1073 inode = d_backing_inode(path.dentry);
1074 if (!S_ISSOCK(inode->i_mode))
1075 goto path_put;
1076
1077 sk = unix_find_socket_byinode(inode);
1078 if (!sk)
1079 goto path_put;
1080
1081 err = -EPROTOTYPE;
1082 if (sk->sk_type == type)
1083 touch_atime(&path);
1084 else
1085 goto sock_put;
1086
1087 path_put(&path);
1088
1089 return sk;
1090
1091sock_put:
1092 sock_put(sk);
1093path_put:
1094 path_put(&path);
1095fail:
1096 return ERR_PTR(err);
1097}
1098
1099static struct sock *unix_find_abstract(struct net *net,
1100 struct sockaddr_un *sunaddr,
1101 int addr_len, int type)
1102{
1103 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1104 struct dentry *dentry;
1105 struct sock *sk;
1106
1107 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1108 if (!sk)
1109 return ERR_PTR(-ECONNREFUSED);
1110
1111 dentry = unix_sk(sk)->path.dentry;
1112 if (dentry)
1113 touch_atime(&unix_sk(sk)->path);
1114
1115 return sk;
1116}
1117
1118static struct sock *unix_find_other(struct net *net,
1119 struct sockaddr_un *sunaddr,
1120 int addr_len, int type)
1121{
1122 struct sock *sk;
1123
1124 if (sunaddr->sun_path[0])
1125 sk = unix_find_bsd(sunaddr, addr_len, type);
1126 else
1127 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1128
1129 return sk;
1130}
1131
1132static int unix_autobind(struct sock *sk)
1133{
1134 struct unix_sock *u = unix_sk(sk);
1135 unsigned int new_hash, old_hash;
1136 struct net *net = sock_net(sk);
1137 struct unix_address *addr;
1138 u32 lastnum, ordernum;
1139 int err;
1140
1141 err = mutex_lock_interruptible(&u->bindlock);
1142 if (err)
1143 return err;
1144
1145 if (u->addr)
1146 goto out;
1147
1148 err = -ENOMEM;
1149 addr = kzalloc(sizeof(*addr) +
1150 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1151 if (!addr)
1152 goto out;
1153
1154 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1155 addr->name->sun_family = AF_UNIX;
1156 refcount_set(&addr->refcnt, 1);
1157
1158 old_hash = sk->sk_hash;
1159 ordernum = get_random_u32();
1160 lastnum = ordernum & 0xFFFFF;
1161retry:
1162 ordernum = (ordernum + 1) & 0xFFFFF;
1163 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1164
1165 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1166 unix_table_double_lock(net, old_hash, new_hash);
1167
1168 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1169 unix_table_double_unlock(net, old_hash, new_hash);
1170
1171 /* __unix_find_socket_byname() may take long time if many names
1172 * are already in use.
1173 */
1174 cond_resched();
1175
1176 if (ordernum == lastnum) {
1177 /* Give up if all names seems to be in use. */
1178 err = -ENOSPC;
1179 unix_release_addr(addr);
1180 goto out;
1181 }
1182
1183 goto retry;
1184 }
1185
1186 __unix_set_addr_hash(net, sk, addr, new_hash);
1187 unix_table_double_unlock(net, old_hash, new_hash);
1188 err = 0;
1189
1190out: mutex_unlock(&u->bindlock);
1191 return err;
1192}
1193
1194static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1195 int addr_len)
1196{
1197 umode_t mode = S_IFSOCK |
1198 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1199 struct unix_sock *u = unix_sk(sk);
1200 unsigned int new_hash, old_hash;
1201 struct net *net = sock_net(sk);
1202 struct mnt_idmap *idmap;
1203 struct unix_address *addr;
1204 struct dentry *dentry;
1205 struct path parent;
1206 int err;
1207
1208 addr_len = unix_mkname_bsd(sunaddr, addr_len);
1209 addr = unix_create_addr(sunaddr, addr_len);
1210 if (!addr)
1211 return -ENOMEM;
1212
1213 /*
1214 * Get the parent directory, calculate the hash for last
1215 * component.
1216 */
1217 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1218 if (IS_ERR(dentry)) {
1219 err = PTR_ERR(dentry);
1220 goto out;
1221 }
1222
1223 /*
1224 * All right, let's create it.
1225 */
1226 idmap = mnt_idmap(parent.mnt);
1227 err = security_path_mknod(&parent, dentry, mode, 0);
1228 if (!err)
1229 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1230 if (err)
1231 goto out_path;
1232 err = mutex_lock_interruptible(&u->bindlock);
1233 if (err)
1234 goto out_unlink;
1235 if (u->addr)
1236 goto out_unlock;
1237
1238 old_hash = sk->sk_hash;
1239 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1240 unix_table_double_lock(net, old_hash, new_hash);
1241 u->path.mnt = mntget(parent.mnt);
1242 u->path.dentry = dget(dentry);
1243 __unix_set_addr_hash(net, sk, addr, new_hash);
1244 unix_table_double_unlock(net, old_hash, new_hash);
1245 unix_insert_bsd_socket(sk);
1246 mutex_unlock(&u->bindlock);
1247 done_path_create(&parent, dentry);
1248 return 0;
1249
1250out_unlock:
1251 mutex_unlock(&u->bindlock);
1252 err = -EINVAL;
1253out_unlink:
1254 /* failed after successful mknod? unlink what we'd created... */
1255 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1256out_path:
1257 done_path_create(&parent, dentry);
1258out:
1259 unix_release_addr(addr);
1260 return err == -EEXIST ? -EADDRINUSE : err;
1261}
1262
1263static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264 int addr_len)
1265{
1266 struct unix_sock *u = unix_sk(sk);
1267 unsigned int new_hash, old_hash;
1268 struct net *net = sock_net(sk);
1269 struct unix_address *addr;
1270 int err;
1271
1272 addr = unix_create_addr(sunaddr, addr_len);
1273 if (!addr)
1274 return -ENOMEM;
1275
1276 err = mutex_lock_interruptible(&u->bindlock);
1277 if (err)
1278 goto out;
1279
1280 if (u->addr) {
1281 err = -EINVAL;
1282 goto out_mutex;
1283 }
1284
1285 old_hash = sk->sk_hash;
1286 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1287 unix_table_double_lock(net, old_hash, new_hash);
1288
1289 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1290 goto out_spin;
1291
1292 __unix_set_addr_hash(net, sk, addr, new_hash);
1293 unix_table_double_unlock(net, old_hash, new_hash);
1294 mutex_unlock(&u->bindlock);
1295 return 0;
1296
1297out_spin:
1298 unix_table_double_unlock(net, old_hash, new_hash);
1299 err = -EADDRINUSE;
1300out_mutex:
1301 mutex_unlock(&u->bindlock);
1302out:
1303 unix_release_addr(addr);
1304 return err;
1305}
1306
1307static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1308{
1309 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1310 struct sock *sk = sock->sk;
1311 int err;
1312
1313 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1314 sunaddr->sun_family == AF_UNIX)
1315 return unix_autobind(sk);
1316
1317 err = unix_validate_addr(sunaddr, addr_len);
1318 if (err)
1319 return err;
1320
1321 if (sunaddr->sun_path[0])
1322 err = unix_bind_bsd(sk, sunaddr, addr_len);
1323 else
1324 err = unix_bind_abstract(sk, sunaddr, addr_len);
1325
1326 return err;
1327}
1328
1329static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1330{
1331 if (unlikely(sk1 == sk2) || !sk2) {
1332 unix_state_lock(sk1);
1333 return;
1334 }
1335 if (sk1 > sk2)
1336 swap(sk1, sk2);
1337
1338 unix_state_lock(sk1);
1339 unix_state_lock_nested(sk2, U_LOCK_SECOND);
1340}
1341
1342static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1343{
1344 if (unlikely(sk1 == sk2) || !sk2) {
1345 unix_state_unlock(sk1);
1346 return;
1347 }
1348 unix_state_unlock(sk1);
1349 unix_state_unlock(sk2);
1350}
1351
1352static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1353 int alen, int flags)
1354{
1355 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1356 struct sock *sk = sock->sk;
1357 struct sock *other;
1358 int err;
1359
1360 err = -EINVAL;
1361 if (alen < offsetofend(struct sockaddr, sa_family))
1362 goto out;
1363
1364 if (addr->sa_family != AF_UNSPEC) {
1365 err = unix_validate_addr(sunaddr, alen);
1366 if (err)
1367 goto out;
1368
1369 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1370 if (err)
1371 goto out;
1372
1373 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1374 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1375 !READ_ONCE(unix_sk(sk)->addr)) {
1376 err = unix_autobind(sk);
1377 if (err)
1378 goto out;
1379 }
1380
1381restart:
1382 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1383 if (IS_ERR(other)) {
1384 err = PTR_ERR(other);
1385 goto out;
1386 }
1387
1388 unix_state_double_lock(sk, other);
1389
1390 /* Apparently VFS overslept socket death. Retry. */
1391 if (sock_flag(other, SOCK_DEAD)) {
1392 unix_state_double_unlock(sk, other);
1393 sock_put(other);
1394 goto restart;
1395 }
1396
1397 err = -EPERM;
1398 if (!unix_may_send(sk, other))
1399 goto out_unlock;
1400
1401 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1402 if (err)
1403 goto out_unlock;
1404
1405 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1406 } else {
1407 /*
1408 * 1003.1g breaking connected state with AF_UNSPEC
1409 */
1410 other = NULL;
1411 unix_state_double_lock(sk, other);
1412 }
1413
1414 /*
1415 * If it was connected, reconnect.
1416 */
1417 if (unix_peer(sk)) {
1418 struct sock *old_peer = unix_peer(sk);
1419
1420 unix_peer(sk) = other;
1421 if (!other)
1422 sk->sk_state = TCP_CLOSE;
1423 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1424
1425 unix_state_double_unlock(sk, other);
1426
1427 if (other != old_peer)
1428 unix_dgram_disconnected(sk, old_peer);
1429 sock_put(old_peer);
1430 } else {
1431 unix_peer(sk) = other;
1432 unix_state_double_unlock(sk, other);
1433 }
1434
1435 return 0;
1436
1437out_unlock:
1438 unix_state_double_unlock(sk, other);
1439 sock_put(other);
1440out:
1441 return err;
1442}
1443
1444static long unix_wait_for_peer(struct sock *other, long timeo)
1445 __releases(&unix_sk(other)->lock)
1446{
1447 struct unix_sock *u = unix_sk(other);
1448 int sched;
1449 DEFINE_WAIT(wait);
1450
1451 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1452
1453 sched = !sock_flag(other, SOCK_DEAD) &&
1454 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1455 unix_recvq_full_lockless(other);
1456
1457 unix_state_unlock(other);
1458
1459 if (sched)
1460 timeo = schedule_timeout(timeo);
1461
1462 finish_wait(&u->peer_wait, &wait);
1463 return timeo;
1464}
1465
1466static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1467 int addr_len, int flags)
1468{
1469 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1470 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1471 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1472 struct net *net = sock_net(sk);
1473 struct sk_buff *skb = NULL;
1474 long timeo;
1475 int err;
1476 int st;
1477
1478 err = unix_validate_addr(sunaddr, addr_len);
1479 if (err)
1480 goto out;
1481
1482 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1483 if (err)
1484 goto out;
1485
1486 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1487 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1488 !READ_ONCE(u->addr)) {
1489 err = unix_autobind(sk);
1490 if (err)
1491 goto out;
1492 }
1493
1494 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1495
1496 /* First of all allocate resources.
1497 If we will make it after state is locked,
1498 we will have to recheck all again in any case.
1499 */
1500
1501 /* create new sock for complete connection */
1502 newsk = unix_create1(net, NULL, 0, sock->type);
1503 if (IS_ERR(newsk)) {
1504 err = PTR_ERR(newsk);
1505 newsk = NULL;
1506 goto out;
1507 }
1508
1509 err = -ENOMEM;
1510
1511 /* Allocate skb for sending to listening sock */
1512 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1513 if (skb == NULL)
1514 goto out;
1515
1516restart:
1517 /* Find listening sock. */
1518 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1519 if (IS_ERR(other)) {
1520 err = PTR_ERR(other);
1521 other = NULL;
1522 goto out;
1523 }
1524
1525 /* Latch state of peer */
1526 unix_state_lock(other);
1527
1528 /* Apparently VFS overslept socket death. Retry. */
1529 if (sock_flag(other, SOCK_DEAD)) {
1530 unix_state_unlock(other);
1531 sock_put(other);
1532 goto restart;
1533 }
1534
1535 err = -ECONNREFUSED;
1536 if (other->sk_state != TCP_LISTEN)
1537 goto out_unlock;
1538 if (other->sk_shutdown & RCV_SHUTDOWN)
1539 goto out_unlock;
1540
1541 if (unix_recvq_full(other)) {
1542 err = -EAGAIN;
1543 if (!timeo)
1544 goto out_unlock;
1545
1546 timeo = unix_wait_for_peer(other, timeo);
1547
1548 err = sock_intr_errno(timeo);
1549 if (signal_pending(current))
1550 goto out;
1551 sock_put(other);
1552 goto restart;
1553 }
1554
1555 /* Latch our state.
1556
1557 It is tricky place. We need to grab our state lock and cannot
1558 drop lock on peer. It is dangerous because deadlock is
1559 possible. Connect to self case and simultaneous
1560 attempt to connect are eliminated by checking socket
1561 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1562 check this before attempt to grab lock.
1563
1564 Well, and we have to recheck the state after socket locked.
1565 */
1566 st = sk->sk_state;
1567
1568 switch (st) {
1569 case TCP_CLOSE:
1570 /* This is ok... continue with connect */
1571 break;
1572 case TCP_ESTABLISHED:
1573 /* Socket is already connected */
1574 err = -EISCONN;
1575 goto out_unlock;
1576 default:
1577 err = -EINVAL;
1578 goto out_unlock;
1579 }
1580
1581 unix_state_lock_nested(sk, U_LOCK_SECOND);
1582
1583 if (sk->sk_state != st) {
1584 unix_state_unlock(sk);
1585 unix_state_unlock(other);
1586 sock_put(other);
1587 goto restart;
1588 }
1589
1590 err = security_unix_stream_connect(sk, other, newsk);
1591 if (err) {
1592 unix_state_unlock(sk);
1593 goto out_unlock;
1594 }
1595
1596 /* The way is open! Fastly set all the necessary fields... */
1597
1598 sock_hold(sk);
1599 unix_peer(newsk) = sk;
1600 newsk->sk_state = TCP_ESTABLISHED;
1601 newsk->sk_type = sk->sk_type;
1602 init_peercred(newsk);
1603 newu = unix_sk(newsk);
1604 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1605 otheru = unix_sk(other);
1606
1607 /* copy address information from listening to new sock
1608 *
1609 * The contents of *(otheru->addr) and otheru->path
1610 * are seen fully set up here, since we have found
1611 * otheru in hash under its lock. Insertion into the
1612 * hash chain we'd found it in had been done in an
1613 * earlier critical area protected by the chain's lock,
1614 * the same one where we'd set *(otheru->addr) contents,
1615 * as well as otheru->path and otheru->addr itself.
1616 *
1617 * Using smp_store_release() here to set newu->addr
1618 * is enough to make those stores, as well as stores
1619 * to newu->path visible to anyone who gets newu->addr
1620 * by smp_load_acquire(). IOW, the same warranties
1621 * as for unix_sock instances bound in unix_bind() or
1622 * in unix_autobind().
1623 */
1624 if (otheru->path.dentry) {
1625 path_get(&otheru->path);
1626 newu->path = otheru->path;
1627 }
1628 refcount_inc(&otheru->addr->refcnt);
1629 smp_store_release(&newu->addr, otheru->addr);
1630
1631 /* Set credentials */
1632 copy_peercred(sk, other);
1633
1634 sock->state = SS_CONNECTED;
1635 sk->sk_state = TCP_ESTABLISHED;
1636 sock_hold(newsk);
1637
1638 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1639 unix_peer(sk) = newsk;
1640
1641 unix_state_unlock(sk);
1642
1643 /* take ten and send info to listening sock */
1644 spin_lock(&other->sk_receive_queue.lock);
1645 __skb_queue_tail(&other->sk_receive_queue, skb);
1646 spin_unlock(&other->sk_receive_queue.lock);
1647 unix_state_unlock(other);
1648 other->sk_data_ready(other);
1649 sock_put(other);
1650 return 0;
1651
1652out_unlock:
1653 if (other)
1654 unix_state_unlock(other);
1655
1656out:
1657 kfree_skb(skb);
1658 if (newsk)
1659 unix_release_sock(newsk, 0);
1660 if (other)
1661 sock_put(other);
1662 return err;
1663}
1664
1665static int unix_socketpair(struct socket *socka, struct socket *sockb)
1666{
1667 struct sock *ska = socka->sk, *skb = sockb->sk;
1668
1669 /* Join our sockets back to back */
1670 sock_hold(ska);
1671 sock_hold(skb);
1672 unix_peer(ska) = skb;
1673 unix_peer(skb) = ska;
1674 init_peercred(ska);
1675 init_peercred(skb);
1676
1677 ska->sk_state = TCP_ESTABLISHED;
1678 skb->sk_state = TCP_ESTABLISHED;
1679 socka->state = SS_CONNECTED;
1680 sockb->state = SS_CONNECTED;
1681 return 0;
1682}
1683
1684static void unix_sock_inherit_flags(const struct socket *old,
1685 struct socket *new)
1686{
1687 if (test_bit(SOCK_PASSCRED, &old->flags))
1688 set_bit(SOCK_PASSCRED, &new->flags);
1689 if (test_bit(SOCK_PASSPIDFD, &old->flags))
1690 set_bit(SOCK_PASSPIDFD, &new->flags);
1691 if (test_bit(SOCK_PASSSEC, &old->flags))
1692 set_bit(SOCK_PASSSEC, &new->flags);
1693}
1694
1695static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1696 bool kern)
1697{
1698 struct sock *sk = sock->sk;
1699 struct sock *tsk;
1700 struct sk_buff *skb;
1701 int err;
1702
1703 err = -EOPNOTSUPP;
1704 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1705 goto out;
1706
1707 err = -EINVAL;
1708 if (sk->sk_state != TCP_LISTEN)
1709 goto out;
1710
1711 /* If socket state is TCP_LISTEN it cannot change (for now...),
1712 * so that no locks are necessary.
1713 */
1714
1715 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1716 &err);
1717 if (!skb) {
1718 /* This means receive shutdown. */
1719 if (err == 0)
1720 err = -EINVAL;
1721 goto out;
1722 }
1723
1724 tsk = skb->sk;
1725 skb_free_datagram(sk, skb);
1726 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1727
1728 /* attach accepted sock to socket */
1729 unix_state_lock(tsk);
1730 newsock->state = SS_CONNECTED;
1731 unix_sock_inherit_flags(sock, newsock);
1732 sock_graft(tsk, newsock);
1733 unix_state_unlock(tsk);
1734 return 0;
1735
1736out:
1737 return err;
1738}
1739
1740
1741static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1742{
1743 struct sock *sk = sock->sk;
1744 struct unix_address *addr;
1745 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1746 int err = 0;
1747
1748 if (peer) {
1749 sk = unix_peer_get(sk);
1750
1751 err = -ENOTCONN;
1752 if (!sk)
1753 goto out;
1754 err = 0;
1755 } else {
1756 sock_hold(sk);
1757 }
1758
1759 addr = smp_load_acquire(&unix_sk(sk)->addr);
1760 if (!addr) {
1761 sunaddr->sun_family = AF_UNIX;
1762 sunaddr->sun_path[0] = 0;
1763 err = offsetof(struct sockaddr_un, sun_path);
1764 } else {
1765 err = addr->len;
1766 memcpy(sunaddr, addr->name, addr->len);
1767
1768 if (peer)
1769 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1770 CGROUP_UNIX_GETPEERNAME);
1771 else
1772 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1773 CGROUP_UNIX_GETSOCKNAME);
1774 }
1775 sock_put(sk);
1776out:
1777 return err;
1778}
1779
1780/* The "user->unix_inflight" variable is protected by the garbage
1781 * collection lock, and we just read it locklessly here. If you go
1782 * over the limit, there might be a tiny race in actually noticing
1783 * it across threads. Tough.
1784 */
1785static inline bool too_many_unix_fds(struct task_struct *p)
1786{
1787 struct user_struct *user = current_user();
1788
1789 if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1790 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1791 return false;
1792}
1793
1794static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1795{
1796 int i;
1797
1798 if (too_many_unix_fds(current))
1799 return -ETOOMANYREFS;
1800
1801 /* Need to duplicate file references for the sake of garbage
1802 * collection. Otherwise a socket in the fps might become a
1803 * candidate for GC while the skb is not yet queued.
1804 */
1805 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1806 if (!UNIXCB(skb).fp)
1807 return -ENOMEM;
1808
1809 for (i = scm->fp->count - 1; i >= 0; i--)
1810 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1811
1812 return 0;
1813}
1814
1815static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1816{
1817 int i;
1818
1819 scm->fp = UNIXCB(skb).fp;
1820 UNIXCB(skb).fp = NULL;
1821
1822 for (i = scm->fp->count - 1; i >= 0; i--)
1823 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1824}
1825
1826static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1827{
1828 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1829
1830 /*
1831 * Garbage collection of unix sockets starts by selecting a set of
1832 * candidate sockets which have reference only from being in flight
1833 * (total_refs == inflight_refs). This condition is checked once during
1834 * the candidate collection phase, and candidates are marked as such, so
1835 * that non-candidates can later be ignored. While inflight_refs is
1836 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1837 * is an instantaneous decision.
1838 *
1839 * Once a candidate, however, the socket must not be reinstalled into a
1840 * file descriptor while the garbage collection is in progress.
1841 *
1842 * If the above conditions are met, then the directed graph of
1843 * candidates (*) does not change while unix_gc_lock is held.
1844 *
1845 * Any operations that changes the file count through file descriptors
1846 * (dup, close, sendmsg) does not change the graph since candidates are
1847 * not installed in fds.
1848 *
1849 * Dequeing a candidate via recvmsg would install it into an fd, but
1850 * that takes unix_gc_lock to decrement the inflight count, so it's
1851 * serialized with garbage collection.
1852 *
1853 * MSG_PEEK is special in that it does not change the inflight count,
1854 * yet does install the socket into an fd. The following lock/unlock
1855 * pair is to ensure serialization with garbage collection. It must be
1856 * done between incrementing the file count and installing the file into
1857 * an fd.
1858 *
1859 * If garbage collection starts after the barrier provided by the
1860 * lock/unlock, then it will see the elevated refcount and not mark this
1861 * as a candidate. If a garbage collection is already in progress
1862 * before the file count was incremented, then the lock/unlock pair will
1863 * ensure that garbage collection is finished before progressing to
1864 * installing the fd.
1865 *
1866 * (*) A -> B where B is on the queue of A or B is on the queue of C
1867 * which is on the queue of listening socket A.
1868 */
1869 spin_lock(&unix_gc_lock);
1870 spin_unlock(&unix_gc_lock);
1871}
1872
1873static void unix_destruct_scm(struct sk_buff *skb)
1874{
1875 struct scm_cookie scm;
1876
1877 memset(&scm, 0, sizeof(scm));
1878 scm.pid = UNIXCB(skb).pid;
1879 if (UNIXCB(skb).fp)
1880 unix_detach_fds(&scm, skb);
1881
1882 /* Alas, it calls VFS */
1883 /* So fscking what? fput() had been SMP-safe since the last Summer */
1884 scm_destroy(&scm);
1885 sock_wfree(skb);
1886}
1887
1888static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1889{
1890 int err = 0;
1891
1892 UNIXCB(skb).pid = get_pid(scm->pid);
1893 UNIXCB(skb).uid = scm->creds.uid;
1894 UNIXCB(skb).gid = scm->creds.gid;
1895 UNIXCB(skb).fp = NULL;
1896 unix_get_secdata(scm, skb);
1897 if (scm->fp && send_fds)
1898 err = unix_attach_fds(scm, skb);
1899
1900 skb->destructor = unix_destruct_scm;
1901 return err;
1902}
1903
1904static bool unix_passcred_enabled(const struct socket *sock,
1905 const struct sock *other)
1906{
1907 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1908 test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1909 !other->sk_socket ||
1910 test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1911 test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1912}
1913
1914/*
1915 * Some apps rely on write() giving SCM_CREDENTIALS
1916 * We include credentials if source or destination socket
1917 * asserted SOCK_PASSCRED.
1918 */
1919static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1920 const struct sock *other)
1921{
1922 if (UNIXCB(skb).pid)
1923 return;
1924 if (unix_passcred_enabled(sock, other)) {
1925 UNIXCB(skb).pid = get_pid(task_tgid(current));
1926 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1927 }
1928}
1929
1930static bool unix_skb_scm_eq(struct sk_buff *skb,
1931 struct scm_cookie *scm)
1932{
1933 return UNIXCB(skb).pid == scm->pid &&
1934 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1935 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1936 unix_secdata_eq(scm, skb);
1937}
1938
1939static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1940{
1941 struct scm_fp_list *fp = UNIXCB(skb).fp;
1942 struct unix_sock *u = unix_sk(sk);
1943
1944 if (unlikely(fp && fp->count))
1945 atomic_add(fp->count, &u->scm_stat.nr_fds);
1946}
1947
1948static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1949{
1950 struct scm_fp_list *fp = UNIXCB(skb).fp;
1951 struct unix_sock *u = unix_sk(sk);
1952
1953 if (unlikely(fp && fp->count))
1954 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1955}
1956
1957/*
1958 * Send AF_UNIX data.
1959 */
1960
1961static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1962 size_t len)
1963{
1964 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1965 struct sock *sk = sock->sk, *other = NULL;
1966 struct unix_sock *u = unix_sk(sk);
1967 struct scm_cookie scm;
1968 struct sk_buff *skb;
1969 int data_len = 0;
1970 int sk_locked;
1971 long timeo;
1972 int err;
1973
1974 err = scm_send(sock, msg, &scm, false);
1975 if (err < 0)
1976 return err;
1977
1978 wait_for_unix_gc(scm.fp);
1979
1980 err = -EOPNOTSUPP;
1981 if (msg->msg_flags&MSG_OOB)
1982 goto out;
1983
1984 if (msg->msg_namelen) {
1985 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1986 if (err)
1987 goto out;
1988
1989 err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1990 msg->msg_name,
1991 &msg->msg_namelen,
1992 NULL);
1993 if (err)
1994 goto out;
1995 } else {
1996 sunaddr = NULL;
1997 err = -ENOTCONN;
1998 other = unix_peer_get(sk);
1999 if (!other)
2000 goto out;
2001 }
2002
2003 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
2004 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
2005 !READ_ONCE(u->addr)) {
2006 err = unix_autobind(sk);
2007 if (err)
2008 goto out;
2009 }
2010
2011 err = -EMSGSIZE;
2012 if (len > sk->sk_sndbuf - 32)
2013 goto out;
2014
2015 if (len > SKB_MAX_ALLOC) {
2016 data_len = min_t(size_t,
2017 len - SKB_MAX_ALLOC,
2018 MAX_SKB_FRAGS * PAGE_SIZE);
2019 data_len = PAGE_ALIGN(data_len);
2020
2021 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2022 }
2023
2024 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2025 msg->msg_flags & MSG_DONTWAIT, &err,
2026 PAGE_ALLOC_COSTLY_ORDER);
2027 if (skb == NULL)
2028 goto out;
2029
2030 err = unix_scm_to_skb(&scm, skb, true);
2031 if (err < 0)
2032 goto out_free;
2033
2034 skb_put(skb, len - data_len);
2035 skb->data_len = data_len;
2036 skb->len = len;
2037 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2038 if (err)
2039 goto out_free;
2040
2041 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2042
2043restart:
2044 if (!other) {
2045 err = -ECONNRESET;
2046 if (sunaddr == NULL)
2047 goto out_free;
2048
2049 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
2050 sk->sk_type);
2051 if (IS_ERR(other)) {
2052 err = PTR_ERR(other);
2053 other = NULL;
2054 goto out_free;
2055 }
2056 }
2057
2058 if (sk_filter(other, skb) < 0) {
2059 /* Toss the packet but do not return any error to the sender */
2060 err = len;
2061 goto out_free;
2062 }
2063
2064 sk_locked = 0;
2065 unix_state_lock(other);
2066restart_locked:
2067 err = -EPERM;
2068 if (!unix_may_send(sk, other))
2069 goto out_unlock;
2070
2071 if (unlikely(sock_flag(other, SOCK_DEAD))) {
2072 /*
2073 * Check with 1003.1g - what should
2074 * datagram error
2075 */
2076 unix_state_unlock(other);
2077 sock_put(other);
2078
2079 if (!sk_locked)
2080 unix_state_lock(sk);
2081
2082 err = 0;
2083 if (sk->sk_type == SOCK_SEQPACKET) {
2084 /* We are here only when racing with unix_release_sock()
2085 * is clearing @other. Never change state to TCP_CLOSE
2086 * unlike SOCK_DGRAM wants.
2087 */
2088 unix_state_unlock(sk);
2089 err = -EPIPE;
2090 } else if (unix_peer(sk) == other) {
2091 unix_peer(sk) = NULL;
2092 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2093
2094 sk->sk_state = TCP_CLOSE;
2095 unix_state_unlock(sk);
2096
2097 unix_dgram_disconnected(sk, other);
2098 sock_put(other);
2099 err = -ECONNREFUSED;
2100 } else {
2101 unix_state_unlock(sk);
2102 }
2103
2104 other = NULL;
2105 if (err)
2106 goto out_free;
2107 goto restart;
2108 }
2109
2110 err = -EPIPE;
2111 if (other->sk_shutdown & RCV_SHUTDOWN)
2112 goto out_unlock;
2113
2114 if (sk->sk_type != SOCK_SEQPACKET) {
2115 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2116 if (err)
2117 goto out_unlock;
2118 }
2119
2120 /* other == sk && unix_peer(other) != sk if
2121 * - unix_peer(sk) == NULL, destination address bound to sk
2122 * - unix_peer(sk) == sk by time of get but disconnected before lock
2123 */
2124 if (other != sk &&
2125 unlikely(unix_peer(other) != sk &&
2126 unix_recvq_full_lockless(other))) {
2127 if (timeo) {
2128 timeo = unix_wait_for_peer(other, timeo);
2129
2130 err = sock_intr_errno(timeo);
2131 if (signal_pending(current))
2132 goto out_free;
2133
2134 goto restart;
2135 }
2136
2137 if (!sk_locked) {
2138 unix_state_unlock(other);
2139 unix_state_double_lock(sk, other);
2140 }
2141
2142 if (unix_peer(sk) != other ||
2143 unix_dgram_peer_wake_me(sk, other)) {
2144 err = -EAGAIN;
2145 sk_locked = 1;
2146 goto out_unlock;
2147 }
2148
2149 if (!sk_locked) {
2150 sk_locked = 1;
2151 goto restart_locked;
2152 }
2153 }
2154
2155 if (unlikely(sk_locked))
2156 unix_state_unlock(sk);
2157
2158 if (sock_flag(other, SOCK_RCVTSTAMP))
2159 __net_timestamp(skb);
2160 maybe_add_creds(skb, sock, other);
2161 scm_stat_add(other, skb);
2162 skb_queue_tail(&other->sk_receive_queue, skb);
2163 unix_state_unlock(other);
2164 other->sk_data_ready(other);
2165 sock_put(other);
2166 scm_destroy(&scm);
2167 return len;
2168
2169out_unlock:
2170 if (sk_locked)
2171 unix_state_unlock(sk);
2172 unix_state_unlock(other);
2173out_free:
2174 kfree_skb(skb);
2175out:
2176 if (other)
2177 sock_put(other);
2178 scm_destroy(&scm);
2179 return err;
2180}
2181
2182/* We use paged skbs for stream sockets, and limit occupancy to 32768
2183 * bytes, and a minimum of a full page.
2184 */
2185#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2186
2187#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2188static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2189 struct scm_cookie *scm, bool fds_sent)
2190{
2191 struct unix_sock *ousk = unix_sk(other);
2192 struct sk_buff *skb;
2193 int err = 0;
2194
2195 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2196
2197 if (!skb)
2198 return err;
2199
2200 err = unix_scm_to_skb(scm, skb, !fds_sent);
2201 if (err < 0) {
2202 kfree_skb(skb);
2203 return err;
2204 }
2205 skb_put(skb, 1);
2206 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2207
2208 if (err) {
2209 kfree_skb(skb);
2210 return err;
2211 }
2212
2213 unix_state_lock(other);
2214
2215 if (sock_flag(other, SOCK_DEAD) ||
2216 (other->sk_shutdown & RCV_SHUTDOWN)) {
2217 unix_state_unlock(other);
2218 kfree_skb(skb);
2219 return -EPIPE;
2220 }
2221
2222 maybe_add_creds(skb, sock, other);
2223 skb_get(skb);
2224
2225 scm_stat_add(other, skb);
2226
2227 spin_lock(&other->sk_receive_queue.lock);
2228 if (ousk->oob_skb)
2229 consume_skb(ousk->oob_skb);
2230 WRITE_ONCE(ousk->oob_skb, skb);
2231 __skb_queue_tail(&other->sk_receive_queue, skb);
2232 spin_unlock(&other->sk_receive_queue.lock);
2233
2234 sk_send_sigurg(other);
2235 unix_state_unlock(other);
2236 other->sk_data_ready(other);
2237
2238 return err;
2239}
2240#endif
2241
2242static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2243 size_t len)
2244{
2245 struct sock *sk = sock->sk;
2246 struct sock *other = NULL;
2247 int err, size;
2248 struct sk_buff *skb;
2249 int sent = 0;
2250 struct scm_cookie scm;
2251 bool fds_sent = false;
2252 int data_len;
2253
2254 err = scm_send(sock, msg, &scm, false);
2255 if (err < 0)
2256 return err;
2257
2258 wait_for_unix_gc(scm.fp);
2259
2260 err = -EOPNOTSUPP;
2261 if (msg->msg_flags & MSG_OOB) {
2262#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2263 if (len)
2264 len--;
2265 else
2266#endif
2267 goto out_err;
2268 }
2269
2270 if (msg->msg_namelen) {
2271 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2272 goto out_err;
2273 } else {
2274 err = -ENOTCONN;
2275 other = unix_peer(sk);
2276 if (!other)
2277 goto out_err;
2278 }
2279
2280 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2281 goto pipe_err;
2282
2283 while (sent < len) {
2284 size = len - sent;
2285
2286 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2287 skb = sock_alloc_send_pskb(sk, 0, 0,
2288 msg->msg_flags & MSG_DONTWAIT,
2289 &err, 0);
2290 } else {
2291 /* Keep two messages in the pipe so it schedules better */
2292 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2293
2294 /* allow fallback to order-0 allocations */
2295 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2296
2297 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2298
2299 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2300
2301 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2302 msg->msg_flags & MSG_DONTWAIT, &err,
2303 get_order(UNIX_SKB_FRAGS_SZ));
2304 }
2305 if (!skb)
2306 goto out_err;
2307
2308 /* Only send the fds in the first buffer */
2309 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2310 if (err < 0) {
2311 kfree_skb(skb);
2312 goto out_err;
2313 }
2314 fds_sent = true;
2315
2316 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2317 err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2318 sk->sk_allocation);
2319 if (err < 0) {
2320 kfree_skb(skb);
2321 goto out_err;
2322 }
2323 size = err;
2324 refcount_add(size, &sk->sk_wmem_alloc);
2325 } else {
2326 skb_put(skb, size - data_len);
2327 skb->data_len = data_len;
2328 skb->len = size;
2329 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2330 if (err) {
2331 kfree_skb(skb);
2332 goto out_err;
2333 }
2334 }
2335
2336 unix_state_lock(other);
2337
2338 if (sock_flag(other, SOCK_DEAD) ||
2339 (other->sk_shutdown & RCV_SHUTDOWN))
2340 goto pipe_err_free;
2341
2342 maybe_add_creds(skb, sock, other);
2343 scm_stat_add(other, skb);
2344 skb_queue_tail(&other->sk_receive_queue, skb);
2345 unix_state_unlock(other);
2346 other->sk_data_ready(other);
2347 sent += size;
2348 }
2349
2350#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2351 if (msg->msg_flags & MSG_OOB) {
2352 err = queue_oob(sock, msg, other, &scm, fds_sent);
2353 if (err)
2354 goto out_err;
2355 sent++;
2356 }
2357#endif
2358
2359 scm_destroy(&scm);
2360
2361 return sent;
2362
2363pipe_err_free:
2364 unix_state_unlock(other);
2365 kfree_skb(skb);
2366pipe_err:
2367 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2368 send_sig(SIGPIPE, current, 0);
2369 err = -EPIPE;
2370out_err:
2371 scm_destroy(&scm);
2372 return sent ? : err;
2373}
2374
2375static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2376 size_t len)
2377{
2378 int err;
2379 struct sock *sk = sock->sk;
2380
2381 err = sock_error(sk);
2382 if (err)
2383 return err;
2384
2385 if (sk->sk_state != TCP_ESTABLISHED)
2386 return -ENOTCONN;
2387
2388 if (msg->msg_namelen)
2389 msg->msg_namelen = 0;
2390
2391 return unix_dgram_sendmsg(sock, msg, len);
2392}
2393
2394static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2395 size_t size, int flags)
2396{
2397 struct sock *sk = sock->sk;
2398
2399 if (sk->sk_state != TCP_ESTABLISHED)
2400 return -ENOTCONN;
2401
2402 return unix_dgram_recvmsg(sock, msg, size, flags);
2403}
2404
2405static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2406{
2407 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2408
2409 if (addr) {
2410 msg->msg_namelen = addr->len;
2411 memcpy(msg->msg_name, addr->name, addr->len);
2412 }
2413}
2414
2415int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2416 int flags)
2417{
2418 struct scm_cookie scm;
2419 struct socket *sock = sk->sk_socket;
2420 struct unix_sock *u = unix_sk(sk);
2421 struct sk_buff *skb, *last;
2422 long timeo;
2423 int skip;
2424 int err;
2425
2426 err = -EOPNOTSUPP;
2427 if (flags&MSG_OOB)
2428 goto out;
2429
2430 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2431
2432 do {
2433 mutex_lock(&u->iolock);
2434
2435 skip = sk_peek_offset(sk, flags);
2436 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2437 &skip, &err, &last);
2438 if (skb) {
2439 if (!(flags & MSG_PEEK))
2440 scm_stat_del(sk, skb);
2441 break;
2442 }
2443
2444 mutex_unlock(&u->iolock);
2445
2446 if (err != -EAGAIN)
2447 break;
2448 } while (timeo &&
2449 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2450 &err, &timeo, last));
2451
2452 if (!skb) { /* implies iolock unlocked */
2453 unix_state_lock(sk);
2454 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2455 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2456 (sk->sk_shutdown & RCV_SHUTDOWN))
2457 err = 0;
2458 unix_state_unlock(sk);
2459 goto out;
2460 }
2461
2462 if (wq_has_sleeper(&u->peer_wait))
2463 wake_up_interruptible_sync_poll(&u->peer_wait,
2464 EPOLLOUT | EPOLLWRNORM |
2465 EPOLLWRBAND);
2466
2467 if (msg->msg_name) {
2468 unix_copy_addr(msg, skb->sk);
2469
2470 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2471 msg->msg_name,
2472 &msg->msg_namelen);
2473 }
2474
2475 if (size > skb->len - skip)
2476 size = skb->len - skip;
2477 else if (size < skb->len - skip)
2478 msg->msg_flags |= MSG_TRUNC;
2479
2480 err = skb_copy_datagram_msg(skb, skip, msg, size);
2481 if (err)
2482 goto out_free;
2483
2484 if (sock_flag(sk, SOCK_RCVTSTAMP))
2485 __sock_recv_timestamp(msg, sk, skb);
2486
2487 memset(&scm, 0, sizeof(scm));
2488
2489 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2490 unix_set_secdata(&scm, skb);
2491
2492 if (!(flags & MSG_PEEK)) {
2493 if (UNIXCB(skb).fp)
2494 unix_detach_fds(&scm, skb);
2495
2496 sk_peek_offset_bwd(sk, skb->len);
2497 } else {
2498 /* It is questionable: on PEEK we could:
2499 - do not return fds - good, but too simple 8)
2500 - return fds, and do not return them on read (old strategy,
2501 apparently wrong)
2502 - clone fds (I chose it for now, it is the most universal
2503 solution)
2504
2505 POSIX 1003.1g does not actually define this clearly
2506 at all. POSIX 1003.1g doesn't define a lot of things
2507 clearly however!
2508
2509 */
2510
2511 sk_peek_offset_fwd(sk, size);
2512
2513 if (UNIXCB(skb).fp)
2514 unix_peek_fds(&scm, skb);
2515 }
2516 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2517
2518 scm_recv_unix(sock, msg, &scm, flags);
2519
2520out_free:
2521 skb_free_datagram(sk, skb);
2522 mutex_unlock(&u->iolock);
2523out:
2524 return err;
2525}
2526
2527static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2528 int flags)
2529{
2530 struct sock *sk = sock->sk;
2531
2532#ifdef CONFIG_BPF_SYSCALL
2533 const struct proto *prot = READ_ONCE(sk->sk_prot);
2534
2535 if (prot != &unix_dgram_proto)
2536 return prot->recvmsg(sk, msg, size, flags, NULL);
2537#endif
2538 return __unix_dgram_recvmsg(sk, msg, size, flags);
2539}
2540
2541static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2542{
2543 struct unix_sock *u = unix_sk(sk);
2544 struct sk_buff *skb;
2545 int err;
2546
2547 mutex_lock(&u->iolock);
2548 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2549 mutex_unlock(&u->iolock);
2550 if (!skb)
2551 return err;
2552
2553 return recv_actor(sk, skb);
2554}
2555
2556/*
2557 * Sleep until more data has arrived. But check for races..
2558 */
2559static long unix_stream_data_wait(struct sock *sk, long timeo,
2560 struct sk_buff *last, unsigned int last_len,
2561 bool freezable)
2562{
2563 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2564 struct sk_buff *tail;
2565 DEFINE_WAIT(wait);
2566
2567 unix_state_lock(sk);
2568
2569 for (;;) {
2570 prepare_to_wait(sk_sleep(sk), &wait, state);
2571
2572 tail = skb_peek_tail(&sk->sk_receive_queue);
2573 if (tail != last ||
2574 (tail && tail->len != last_len) ||
2575 sk->sk_err ||
2576 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2577 signal_pending(current) ||
2578 !timeo)
2579 break;
2580
2581 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2582 unix_state_unlock(sk);
2583 timeo = schedule_timeout(timeo);
2584 unix_state_lock(sk);
2585
2586 if (sock_flag(sk, SOCK_DEAD))
2587 break;
2588
2589 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2590 }
2591
2592 finish_wait(sk_sleep(sk), &wait);
2593 unix_state_unlock(sk);
2594 return timeo;
2595}
2596
2597static unsigned int unix_skb_len(const struct sk_buff *skb)
2598{
2599 return skb->len - UNIXCB(skb).consumed;
2600}
2601
2602struct unix_stream_read_state {
2603 int (*recv_actor)(struct sk_buff *, int, int,
2604 struct unix_stream_read_state *);
2605 struct socket *socket;
2606 struct msghdr *msg;
2607 struct pipe_inode_info *pipe;
2608 size_t size;
2609 int flags;
2610 unsigned int splice_flags;
2611};
2612
2613#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2614static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2615{
2616 struct socket *sock = state->socket;
2617 struct sock *sk = sock->sk;
2618 struct unix_sock *u = unix_sk(sk);
2619 int chunk = 1;
2620 struct sk_buff *oob_skb;
2621
2622 mutex_lock(&u->iolock);
2623 unix_state_lock(sk);
2624 spin_lock(&sk->sk_receive_queue.lock);
2625
2626 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2627 spin_unlock(&sk->sk_receive_queue.lock);
2628 unix_state_unlock(sk);
2629 mutex_unlock(&u->iolock);
2630 return -EINVAL;
2631 }
2632
2633 oob_skb = u->oob_skb;
2634
2635 if (!(state->flags & MSG_PEEK))
2636 WRITE_ONCE(u->oob_skb, NULL);
2637 else
2638 skb_get(oob_skb);
2639
2640 spin_unlock(&sk->sk_receive_queue.lock);
2641 unix_state_unlock(sk);
2642
2643 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2644
2645 if (!(state->flags & MSG_PEEK))
2646 UNIXCB(oob_skb).consumed += 1;
2647
2648 consume_skb(oob_skb);
2649
2650 mutex_unlock(&u->iolock);
2651
2652 if (chunk < 0)
2653 return -EFAULT;
2654
2655 state->msg->msg_flags |= MSG_OOB;
2656 return 1;
2657}
2658
2659static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2660 int flags, int copied)
2661{
2662 struct unix_sock *u = unix_sk(sk);
2663
2664 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2665 skb_unlink(skb, &sk->sk_receive_queue);
2666 consume_skb(skb);
2667 skb = NULL;
2668 } else {
2669 struct sk_buff *unlinked_skb = NULL;
2670
2671 spin_lock(&sk->sk_receive_queue.lock);
2672
2673 if (skb == u->oob_skb) {
2674 if (copied) {
2675 skb = NULL;
2676 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2677 if (!(flags & MSG_PEEK)) {
2678 WRITE_ONCE(u->oob_skb, NULL);
2679 consume_skb(skb);
2680 }
2681 } else if (flags & MSG_PEEK) {
2682 skb = NULL;
2683 } else {
2684 __skb_unlink(skb, &sk->sk_receive_queue);
2685 WRITE_ONCE(u->oob_skb, NULL);
2686 unlinked_skb = skb;
2687 skb = skb_peek(&sk->sk_receive_queue);
2688 }
2689 }
2690
2691 spin_unlock(&sk->sk_receive_queue.lock);
2692
2693 if (unlinked_skb) {
2694 WARN_ON_ONCE(skb_unref(unlinked_skb));
2695 kfree_skb(unlinked_skb);
2696 }
2697 }
2698 return skb;
2699}
2700#endif
2701
2702static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2703{
2704 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2705 return -ENOTCONN;
2706
2707 return unix_read_skb(sk, recv_actor);
2708}
2709
2710static int unix_stream_read_generic(struct unix_stream_read_state *state,
2711 bool freezable)
2712{
2713 struct scm_cookie scm;
2714 struct socket *sock = state->socket;
2715 struct sock *sk = sock->sk;
2716 struct unix_sock *u = unix_sk(sk);
2717 int copied = 0;
2718 int flags = state->flags;
2719 int noblock = flags & MSG_DONTWAIT;
2720 bool check_creds = false;
2721 int target;
2722 int err = 0;
2723 long timeo;
2724 int skip;
2725 size_t size = state->size;
2726 unsigned int last_len;
2727
2728 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2729 err = -EINVAL;
2730 goto out;
2731 }
2732
2733 if (unlikely(flags & MSG_OOB)) {
2734 err = -EOPNOTSUPP;
2735#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2736 err = unix_stream_recv_urg(state);
2737#endif
2738 goto out;
2739 }
2740
2741 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2742 timeo = sock_rcvtimeo(sk, noblock);
2743
2744 memset(&scm, 0, sizeof(scm));
2745
2746 /* Lock the socket to prevent queue disordering
2747 * while sleeps in memcpy_tomsg
2748 */
2749 mutex_lock(&u->iolock);
2750
2751 skip = max(sk_peek_offset(sk, flags), 0);
2752
2753 do {
2754 int chunk;
2755 bool drop_skb;
2756 struct sk_buff *skb, *last;
2757
2758redo:
2759 unix_state_lock(sk);
2760 if (sock_flag(sk, SOCK_DEAD)) {
2761 err = -ECONNRESET;
2762 goto unlock;
2763 }
2764 last = skb = skb_peek(&sk->sk_receive_queue);
2765 last_len = last ? last->len : 0;
2766
2767again:
2768#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2769 if (skb) {
2770 skb = manage_oob(skb, sk, flags, copied);
2771 if (!skb && copied) {
2772 unix_state_unlock(sk);
2773 break;
2774 }
2775 }
2776#endif
2777 if (skb == NULL) {
2778 if (copied >= target)
2779 goto unlock;
2780
2781 /*
2782 * POSIX 1003.1g mandates this order.
2783 */
2784
2785 err = sock_error(sk);
2786 if (err)
2787 goto unlock;
2788 if (sk->sk_shutdown & RCV_SHUTDOWN)
2789 goto unlock;
2790
2791 unix_state_unlock(sk);
2792 if (!timeo) {
2793 err = -EAGAIN;
2794 break;
2795 }
2796
2797 mutex_unlock(&u->iolock);
2798
2799 timeo = unix_stream_data_wait(sk, timeo, last,
2800 last_len, freezable);
2801
2802 if (signal_pending(current)) {
2803 err = sock_intr_errno(timeo);
2804 scm_destroy(&scm);
2805 goto out;
2806 }
2807
2808 mutex_lock(&u->iolock);
2809 goto redo;
2810unlock:
2811 unix_state_unlock(sk);
2812 break;
2813 }
2814
2815 while (skip >= unix_skb_len(skb)) {
2816 skip -= unix_skb_len(skb);
2817 last = skb;
2818 last_len = skb->len;
2819 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2820 if (!skb)
2821 goto again;
2822 }
2823
2824 unix_state_unlock(sk);
2825
2826 if (check_creds) {
2827 /* Never glue messages from different writers */
2828 if (!unix_skb_scm_eq(skb, &scm))
2829 break;
2830 } else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2831 test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2832 /* Copy credentials */
2833 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2834 unix_set_secdata(&scm, skb);
2835 check_creds = true;
2836 }
2837
2838 /* Copy address just once */
2839 if (state->msg && state->msg->msg_name) {
2840 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2841 state->msg->msg_name);
2842 unix_copy_addr(state->msg, skb->sk);
2843
2844 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2845 state->msg->msg_name,
2846 &state->msg->msg_namelen);
2847
2848 sunaddr = NULL;
2849 }
2850
2851 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2852 skb_get(skb);
2853 chunk = state->recv_actor(skb, skip, chunk, state);
2854 drop_skb = !unix_skb_len(skb);
2855 /* skb is only safe to use if !drop_skb */
2856 consume_skb(skb);
2857 if (chunk < 0) {
2858 if (copied == 0)
2859 copied = -EFAULT;
2860 break;
2861 }
2862 copied += chunk;
2863 size -= chunk;
2864
2865 if (drop_skb) {
2866 /* the skb was touched by a concurrent reader;
2867 * we should not expect anything from this skb
2868 * anymore and assume it invalid - we can be
2869 * sure it was dropped from the socket queue
2870 *
2871 * let's report a short read
2872 */
2873 err = 0;
2874 break;
2875 }
2876
2877 /* Mark read part of skb as used */
2878 if (!(flags & MSG_PEEK)) {
2879 UNIXCB(skb).consumed += chunk;
2880
2881 sk_peek_offset_bwd(sk, chunk);
2882
2883 if (UNIXCB(skb).fp) {
2884 scm_stat_del(sk, skb);
2885 unix_detach_fds(&scm, skb);
2886 }
2887
2888 if (unix_skb_len(skb))
2889 break;
2890
2891 skb_unlink(skb, &sk->sk_receive_queue);
2892 consume_skb(skb);
2893
2894 if (scm.fp)
2895 break;
2896 } else {
2897 /* It is questionable, see note in unix_dgram_recvmsg.
2898 */
2899 if (UNIXCB(skb).fp)
2900 unix_peek_fds(&scm, skb);
2901
2902 sk_peek_offset_fwd(sk, chunk);
2903
2904 if (UNIXCB(skb).fp)
2905 break;
2906
2907 skip = 0;
2908 last = skb;
2909 last_len = skb->len;
2910 unix_state_lock(sk);
2911 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2912 if (skb)
2913 goto again;
2914 unix_state_unlock(sk);
2915 break;
2916 }
2917 } while (size);
2918
2919 mutex_unlock(&u->iolock);
2920 if (state->msg)
2921 scm_recv_unix(sock, state->msg, &scm, flags);
2922 else
2923 scm_destroy(&scm);
2924out:
2925 return copied ? : err;
2926}
2927
2928static int unix_stream_read_actor(struct sk_buff *skb,
2929 int skip, int chunk,
2930 struct unix_stream_read_state *state)
2931{
2932 int ret;
2933
2934 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2935 state->msg, chunk);
2936 return ret ?: chunk;
2937}
2938
2939int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2940 size_t size, int flags)
2941{
2942 struct unix_stream_read_state state = {
2943 .recv_actor = unix_stream_read_actor,
2944 .socket = sk->sk_socket,
2945 .msg = msg,
2946 .size = size,
2947 .flags = flags
2948 };
2949
2950 return unix_stream_read_generic(&state, true);
2951}
2952
2953static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2954 size_t size, int flags)
2955{
2956 struct unix_stream_read_state state = {
2957 .recv_actor = unix_stream_read_actor,
2958 .socket = sock,
2959 .msg = msg,
2960 .size = size,
2961 .flags = flags
2962 };
2963
2964#ifdef CONFIG_BPF_SYSCALL
2965 struct sock *sk = sock->sk;
2966 const struct proto *prot = READ_ONCE(sk->sk_prot);
2967
2968 if (prot != &unix_stream_proto)
2969 return prot->recvmsg(sk, msg, size, flags, NULL);
2970#endif
2971 return unix_stream_read_generic(&state, true);
2972}
2973
2974static int unix_stream_splice_actor(struct sk_buff *skb,
2975 int skip, int chunk,
2976 struct unix_stream_read_state *state)
2977{
2978 return skb_splice_bits(skb, state->socket->sk,
2979 UNIXCB(skb).consumed + skip,
2980 state->pipe, chunk, state->splice_flags);
2981}
2982
2983static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2984 struct pipe_inode_info *pipe,
2985 size_t size, unsigned int flags)
2986{
2987 struct unix_stream_read_state state = {
2988 .recv_actor = unix_stream_splice_actor,
2989 .socket = sock,
2990 .pipe = pipe,
2991 .size = size,
2992 .splice_flags = flags,
2993 };
2994
2995 if (unlikely(*ppos))
2996 return -ESPIPE;
2997
2998 if (sock->file->f_flags & O_NONBLOCK ||
2999 flags & SPLICE_F_NONBLOCK)
3000 state.flags = MSG_DONTWAIT;
3001
3002 return unix_stream_read_generic(&state, false);
3003}
3004
3005static int unix_shutdown(struct socket *sock, int mode)
3006{
3007 struct sock *sk = sock->sk;
3008 struct sock *other;
3009
3010 if (mode < SHUT_RD || mode > SHUT_RDWR)
3011 return -EINVAL;
3012 /* This maps:
3013 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
3014 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
3015 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3016 */
3017 ++mode;
3018
3019 unix_state_lock(sk);
3020 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3021 other = unix_peer(sk);
3022 if (other)
3023 sock_hold(other);
3024 unix_state_unlock(sk);
3025 sk->sk_state_change(sk);
3026
3027 if (other &&
3028 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3029
3030 int peer_mode = 0;
3031 const struct proto *prot = READ_ONCE(other->sk_prot);
3032
3033 if (prot->unhash)
3034 prot->unhash(other);
3035 if (mode&RCV_SHUTDOWN)
3036 peer_mode |= SEND_SHUTDOWN;
3037 if (mode&SEND_SHUTDOWN)
3038 peer_mode |= RCV_SHUTDOWN;
3039 unix_state_lock(other);
3040 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3041 unix_state_unlock(other);
3042 other->sk_state_change(other);
3043 if (peer_mode == SHUTDOWN_MASK)
3044 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3045 else if (peer_mode & RCV_SHUTDOWN)
3046 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3047 }
3048 if (other)
3049 sock_put(other);
3050
3051 return 0;
3052}
3053
3054long unix_inq_len(struct sock *sk)
3055{
3056 struct sk_buff *skb;
3057 long amount = 0;
3058
3059 if (sk->sk_state == TCP_LISTEN)
3060 return -EINVAL;
3061
3062 spin_lock(&sk->sk_receive_queue.lock);
3063 if (sk->sk_type == SOCK_STREAM ||
3064 sk->sk_type == SOCK_SEQPACKET) {
3065 skb_queue_walk(&sk->sk_receive_queue, skb)
3066 amount += unix_skb_len(skb);
3067 } else {
3068 skb = skb_peek(&sk->sk_receive_queue);
3069 if (skb)
3070 amount = skb->len;
3071 }
3072 spin_unlock(&sk->sk_receive_queue.lock);
3073
3074 return amount;
3075}
3076EXPORT_SYMBOL_GPL(unix_inq_len);
3077
3078long unix_outq_len(struct sock *sk)
3079{
3080 return sk_wmem_alloc_get(sk);
3081}
3082EXPORT_SYMBOL_GPL(unix_outq_len);
3083
3084static int unix_open_file(struct sock *sk)
3085{
3086 struct path path;
3087 struct file *f;
3088 int fd;
3089
3090 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3091 return -EPERM;
3092
3093 if (!smp_load_acquire(&unix_sk(sk)->addr))
3094 return -ENOENT;
3095
3096 path = unix_sk(sk)->path;
3097 if (!path.dentry)
3098 return -ENOENT;
3099
3100 path_get(&path);
3101
3102 fd = get_unused_fd_flags(O_CLOEXEC);
3103 if (fd < 0)
3104 goto out;
3105
3106 f = dentry_open(&path, O_PATH, current_cred());
3107 if (IS_ERR(f)) {
3108 put_unused_fd(fd);
3109 fd = PTR_ERR(f);
3110 goto out;
3111 }
3112
3113 fd_install(fd, f);
3114out:
3115 path_put(&path);
3116
3117 return fd;
3118}
3119
3120static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3121{
3122 struct sock *sk = sock->sk;
3123 long amount = 0;
3124 int err;
3125
3126 switch (cmd) {
3127 case SIOCOUTQ:
3128 amount = unix_outq_len(sk);
3129 err = put_user(amount, (int __user *)arg);
3130 break;
3131 case SIOCINQ:
3132 amount = unix_inq_len(sk);
3133 if (amount < 0)
3134 err = amount;
3135 else
3136 err = put_user(amount, (int __user *)arg);
3137 break;
3138 case SIOCUNIXFILE:
3139 err = unix_open_file(sk);
3140 break;
3141#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3142 case SIOCATMARK:
3143 {
3144 struct sk_buff *skb;
3145 int answ = 0;
3146
3147 skb = skb_peek(&sk->sk_receive_queue);
3148 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3149 answ = 1;
3150 err = put_user(answ, (int __user *)arg);
3151 }
3152 break;
3153#endif
3154 default:
3155 err = -ENOIOCTLCMD;
3156 break;
3157 }
3158 return err;
3159}
3160
3161#ifdef CONFIG_COMPAT
3162static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3163{
3164 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3165}
3166#endif
3167
3168static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3169{
3170 struct sock *sk = sock->sk;
3171 __poll_t mask;
3172 u8 shutdown;
3173
3174 sock_poll_wait(file, sock, wait);
3175 mask = 0;
3176 shutdown = READ_ONCE(sk->sk_shutdown);
3177
3178 /* exceptional events? */
3179 if (READ_ONCE(sk->sk_err))
3180 mask |= EPOLLERR;
3181 if (shutdown == SHUTDOWN_MASK)
3182 mask |= EPOLLHUP;
3183 if (shutdown & RCV_SHUTDOWN)
3184 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3185
3186 /* readable? */
3187 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3188 mask |= EPOLLIN | EPOLLRDNORM;
3189 if (sk_is_readable(sk))
3190 mask |= EPOLLIN | EPOLLRDNORM;
3191#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3192 if (READ_ONCE(unix_sk(sk)->oob_skb))
3193 mask |= EPOLLPRI;
3194#endif
3195
3196 /* Connection-based need to check for termination and startup */
3197 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3198 sk->sk_state == TCP_CLOSE)
3199 mask |= EPOLLHUP;
3200
3201 /*
3202 * we set writable also when the other side has shut down the
3203 * connection. This prevents stuck sockets.
3204 */
3205 if (unix_writable(sk))
3206 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3207
3208 return mask;
3209}
3210
3211static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3212 poll_table *wait)
3213{
3214 struct sock *sk = sock->sk, *other;
3215 unsigned int writable;
3216 __poll_t mask;
3217 u8 shutdown;
3218
3219 sock_poll_wait(file, sock, wait);
3220 mask = 0;
3221 shutdown = READ_ONCE(sk->sk_shutdown);
3222
3223 /* exceptional events? */
3224 if (READ_ONCE(sk->sk_err) ||
3225 !skb_queue_empty_lockless(&sk->sk_error_queue))
3226 mask |= EPOLLERR |
3227 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3228
3229 if (shutdown & RCV_SHUTDOWN)
3230 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3231 if (shutdown == SHUTDOWN_MASK)
3232 mask |= EPOLLHUP;
3233
3234 /* readable? */
3235 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3236 mask |= EPOLLIN | EPOLLRDNORM;
3237 if (sk_is_readable(sk))
3238 mask |= EPOLLIN | EPOLLRDNORM;
3239
3240 /* Connection-based need to check for termination and startup */
3241 if (sk->sk_type == SOCK_SEQPACKET) {
3242 if (sk->sk_state == TCP_CLOSE)
3243 mask |= EPOLLHUP;
3244 /* connection hasn't started yet? */
3245 if (sk->sk_state == TCP_SYN_SENT)
3246 return mask;
3247 }
3248
3249 /* No write status requested, avoid expensive OUT tests. */
3250 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3251 return mask;
3252
3253 writable = unix_writable(sk);
3254 if (writable) {
3255 unix_state_lock(sk);
3256
3257 other = unix_peer(sk);
3258 if (other && unix_peer(other) != sk &&
3259 unix_recvq_full_lockless(other) &&
3260 unix_dgram_peer_wake_me(sk, other))
3261 writable = 0;
3262
3263 unix_state_unlock(sk);
3264 }
3265
3266 if (writable)
3267 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3268 else
3269 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3270
3271 return mask;
3272}
3273
3274#ifdef CONFIG_PROC_FS
3275
3276#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3277
3278#define get_bucket(x) ((x) >> BUCKET_SPACE)
3279#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3280#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3281
3282static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3283{
3284 unsigned long offset = get_offset(*pos);
3285 unsigned long bucket = get_bucket(*pos);
3286 unsigned long count = 0;
3287 struct sock *sk;
3288
3289 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3290 sk; sk = sk_next(sk)) {
3291 if (++count == offset)
3292 break;
3293 }
3294
3295 return sk;
3296}
3297
3298static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3299{
3300 unsigned long bucket = get_bucket(*pos);
3301 struct net *net = seq_file_net(seq);
3302 struct sock *sk;
3303
3304 while (bucket < UNIX_HASH_SIZE) {
3305 spin_lock(&net->unx.table.locks[bucket]);
3306
3307 sk = unix_from_bucket(seq, pos);
3308 if (sk)
3309 return sk;
3310
3311 spin_unlock(&net->unx.table.locks[bucket]);
3312
3313 *pos = set_bucket_offset(++bucket, 1);
3314 }
3315
3316 return NULL;
3317}
3318
3319static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3320 loff_t *pos)
3321{
3322 unsigned long bucket = get_bucket(*pos);
3323
3324 sk = sk_next(sk);
3325 if (sk)
3326 return sk;
3327
3328
3329 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3330
3331 *pos = set_bucket_offset(++bucket, 1);
3332
3333 return unix_get_first(seq, pos);
3334}
3335
3336static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3337{
3338 if (!*pos)
3339 return SEQ_START_TOKEN;
3340
3341 return unix_get_first(seq, pos);
3342}
3343
3344static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3345{
3346 ++*pos;
3347
3348 if (v == SEQ_START_TOKEN)
3349 return unix_get_first(seq, pos);
3350
3351 return unix_get_next(seq, v, pos);
3352}
3353
3354static void unix_seq_stop(struct seq_file *seq, void *v)
3355{
3356 struct sock *sk = v;
3357
3358 if (sk)
3359 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3360}
3361
3362static int unix_seq_show(struct seq_file *seq, void *v)
3363{
3364
3365 if (v == SEQ_START_TOKEN)
3366 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3367 "Inode Path\n");
3368 else {
3369 struct sock *s = v;
3370 struct unix_sock *u = unix_sk(s);
3371 unix_state_lock(s);
3372
3373 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3374 s,
3375 refcount_read(&s->sk_refcnt),
3376 0,
3377 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3378 s->sk_type,
3379 s->sk_socket ?
3380 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3381 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3382 sock_i_ino(s));
3383
3384 if (u->addr) { // under a hash table lock here
3385 int i, len;
3386 seq_putc(seq, ' ');
3387
3388 i = 0;
3389 len = u->addr->len -
3390 offsetof(struct sockaddr_un, sun_path);
3391 if (u->addr->name->sun_path[0]) {
3392 len--;
3393 } else {
3394 seq_putc(seq, '@');
3395 i++;
3396 }
3397 for ( ; i < len; i++)
3398 seq_putc(seq, u->addr->name->sun_path[i] ?:
3399 '@');
3400 }
3401 unix_state_unlock(s);
3402 seq_putc(seq, '\n');
3403 }
3404
3405 return 0;
3406}
3407
3408static const struct seq_operations unix_seq_ops = {
3409 .start = unix_seq_start,
3410 .next = unix_seq_next,
3411 .stop = unix_seq_stop,
3412 .show = unix_seq_show,
3413};
3414
3415#ifdef CONFIG_BPF_SYSCALL
3416struct bpf_unix_iter_state {
3417 struct seq_net_private p;
3418 unsigned int cur_sk;
3419 unsigned int end_sk;
3420 unsigned int max_sk;
3421 struct sock **batch;
3422 bool st_bucket_done;
3423};
3424
3425struct bpf_iter__unix {
3426 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3427 __bpf_md_ptr(struct unix_sock *, unix_sk);
3428 uid_t uid __aligned(8);
3429};
3430
3431static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3432 struct unix_sock *unix_sk, uid_t uid)
3433{
3434 struct bpf_iter__unix ctx;
3435
3436 meta->seq_num--; /* skip SEQ_START_TOKEN */
3437 ctx.meta = meta;
3438 ctx.unix_sk = unix_sk;
3439 ctx.uid = uid;
3440 return bpf_iter_run_prog(prog, &ctx);
3441}
3442
3443static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3444
3445{
3446 struct bpf_unix_iter_state *iter = seq->private;
3447 unsigned int expected = 1;
3448 struct sock *sk;
3449
3450 sock_hold(start_sk);
3451 iter->batch[iter->end_sk++] = start_sk;
3452
3453 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3454 if (iter->end_sk < iter->max_sk) {
3455 sock_hold(sk);
3456 iter->batch[iter->end_sk++] = sk;
3457 }
3458
3459 expected++;
3460 }
3461
3462 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3463
3464 return expected;
3465}
3466
3467static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3468{
3469 while (iter->cur_sk < iter->end_sk)
3470 sock_put(iter->batch[iter->cur_sk++]);
3471}
3472
3473static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3474 unsigned int new_batch_sz)
3475{
3476 struct sock **new_batch;
3477
3478 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3479 GFP_USER | __GFP_NOWARN);
3480 if (!new_batch)
3481 return -ENOMEM;
3482
3483 bpf_iter_unix_put_batch(iter);
3484 kvfree(iter->batch);
3485 iter->batch = new_batch;
3486 iter->max_sk = new_batch_sz;
3487
3488 return 0;
3489}
3490
3491static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3492 loff_t *pos)
3493{
3494 struct bpf_unix_iter_state *iter = seq->private;
3495 unsigned int expected;
3496 bool resized = false;
3497 struct sock *sk;
3498
3499 if (iter->st_bucket_done)
3500 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3501
3502again:
3503 /* Get a new batch */
3504 iter->cur_sk = 0;
3505 iter->end_sk = 0;
3506
3507 sk = unix_get_first(seq, pos);
3508 if (!sk)
3509 return NULL; /* Done */
3510
3511 expected = bpf_iter_unix_hold_batch(seq, sk);
3512
3513 if (iter->end_sk == expected) {
3514 iter->st_bucket_done = true;
3515 return sk;
3516 }
3517
3518 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3519 resized = true;
3520 goto again;
3521 }
3522
3523 return sk;
3524}
3525
3526static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3527{
3528 if (!*pos)
3529 return SEQ_START_TOKEN;
3530
3531 /* bpf iter does not support lseek, so it always
3532 * continue from where it was stop()-ped.
3533 */
3534 return bpf_iter_unix_batch(seq, pos);
3535}
3536
3537static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3538{
3539 struct bpf_unix_iter_state *iter = seq->private;
3540 struct sock *sk;
3541
3542 /* Whenever seq_next() is called, the iter->cur_sk is
3543 * done with seq_show(), so advance to the next sk in
3544 * the batch.
3545 */
3546 if (iter->cur_sk < iter->end_sk)
3547 sock_put(iter->batch[iter->cur_sk++]);
3548
3549 ++*pos;
3550
3551 if (iter->cur_sk < iter->end_sk)
3552 sk = iter->batch[iter->cur_sk];
3553 else
3554 sk = bpf_iter_unix_batch(seq, pos);
3555
3556 return sk;
3557}
3558
3559static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3560{
3561 struct bpf_iter_meta meta;
3562 struct bpf_prog *prog;
3563 struct sock *sk = v;
3564 uid_t uid;
3565 bool slow;
3566 int ret;
3567
3568 if (v == SEQ_START_TOKEN)
3569 return 0;
3570
3571 slow = lock_sock_fast(sk);
3572
3573 if (unlikely(sk_unhashed(sk))) {
3574 ret = SEQ_SKIP;
3575 goto unlock;
3576 }
3577
3578 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3579 meta.seq = seq;
3580 prog = bpf_iter_get_info(&meta, false);
3581 ret = unix_prog_seq_show(prog, &meta, v, uid);
3582unlock:
3583 unlock_sock_fast(sk, slow);
3584 return ret;
3585}
3586
3587static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3588{
3589 struct bpf_unix_iter_state *iter = seq->private;
3590 struct bpf_iter_meta meta;
3591 struct bpf_prog *prog;
3592
3593 if (!v) {
3594 meta.seq = seq;
3595 prog = bpf_iter_get_info(&meta, true);
3596 if (prog)
3597 (void)unix_prog_seq_show(prog, &meta, v, 0);
3598 }
3599
3600 if (iter->cur_sk < iter->end_sk)
3601 bpf_iter_unix_put_batch(iter);
3602}
3603
3604static const struct seq_operations bpf_iter_unix_seq_ops = {
3605 .start = bpf_iter_unix_seq_start,
3606 .next = bpf_iter_unix_seq_next,
3607 .stop = bpf_iter_unix_seq_stop,
3608 .show = bpf_iter_unix_seq_show,
3609};
3610#endif
3611#endif
3612
3613static const struct net_proto_family unix_family_ops = {
3614 .family = PF_UNIX,
3615 .create = unix_create,
3616 .owner = THIS_MODULE,
3617};
3618
3619
3620static int __net_init unix_net_init(struct net *net)
3621{
3622 int i;
3623
3624 net->unx.sysctl_max_dgram_qlen = 10;
3625 if (unix_sysctl_register(net))
3626 goto out;
3627
3628#ifdef CONFIG_PROC_FS
3629 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3630 sizeof(struct seq_net_private)))
3631 goto err_sysctl;
3632#endif
3633
3634 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3635 sizeof(spinlock_t), GFP_KERNEL);
3636 if (!net->unx.table.locks)
3637 goto err_proc;
3638
3639 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3640 sizeof(struct hlist_head),
3641 GFP_KERNEL);
3642 if (!net->unx.table.buckets)
3643 goto free_locks;
3644
3645 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3646 spin_lock_init(&net->unx.table.locks[i]);
3647 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3648 }
3649
3650 return 0;
3651
3652free_locks:
3653 kvfree(net->unx.table.locks);
3654err_proc:
3655#ifdef CONFIG_PROC_FS
3656 remove_proc_entry("unix", net->proc_net);
3657err_sysctl:
3658#endif
3659 unix_sysctl_unregister(net);
3660out:
3661 return -ENOMEM;
3662}
3663
3664static void __net_exit unix_net_exit(struct net *net)
3665{
3666 kvfree(net->unx.table.buckets);
3667 kvfree(net->unx.table.locks);
3668 unix_sysctl_unregister(net);
3669 remove_proc_entry("unix", net->proc_net);
3670}
3671
3672static struct pernet_operations unix_net_ops = {
3673 .init = unix_net_init,
3674 .exit = unix_net_exit,
3675};
3676
3677#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3678DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3679 struct unix_sock *unix_sk, uid_t uid)
3680
3681#define INIT_BATCH_SZ 16
3682
3683static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3684{
3685 struct bpf_unix_iter_state *iter = priv_data;
3686 int err;
3687
3688 err = bpf_iter_init_seq_net(priv_data, aux);
3689 if (err)
3690 return err;
3691
3692 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3693 if (err) {
3694 bpf_iter_fini_seq_net(priv_data);
3695 return err;
3696 }
3697
3698 return 0;
3699}
3700
3701static void bpf_iter_fini_unix(void *priv_data)
3702{
3703 struct bpf_unix_iter_state *iter = priv_data;
3704
3705 bpf_iter_fini_seq_net(priv_data);
3706 kvfree(iter->batch);
3707}
3708
3709static const struct bpf_iter_seq_info unix_seq_info = {
3710 .seq_ops = &bpf_iter_unix_seq_ops,
3711 .init_seq_private = bpf_iter_init_unix,
3712 .fini_seq_private = bpf_iter_fini_unix,
3713 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3714};
3715
3716static const struct bpf_func_proto *
3717bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3718 const struct bpf_prog *prog)
3719{
3720 switch (func_id) {
3721 case BPF_FUNC_setsockopt:
3722 return &bpf_sk_setsockopt_proto;
3723 case BPF_FUNC_getsockopt:
3724 return &bpf_sk_getsockopt_proto;
3725 default:
3726 return NULL;
3727 }
3728}
3729
3730static struct bpf_iter_reg unix_reg_info = {
3731 .target = "unix",
3732 .ctx_arg_info_size = 1,
3733 .ctx_arg_info = {
3734 { offsetof(struct bpf_iter__unix, unix_sk),
3735 PTR_TO_BTF_ID_OR_NULL },
3736 },
3737 .get_func_proto = bpf_iter_unix_get_func_proto,
3738 .seq_info = &unix_seq_info,
3739};
3740
3741static void __init bpf_iter_register(void)
3742{
3743 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3744 if (bpf_iter_reg_target(&unix_reg_info))
3745 pr_warn("Warning: could not register bpf iterator unix\n");
3746}
3747#endif
3748
3749static int __init af_unix_init(void)
3750{
3751 int i, rc = -1;
3752
3753 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3754
3755 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3756 spin_lock_init(&bsd_socket_locks[i]);
3757 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3758 }
3759
3760 rc = proto_register(&unix_dgram_proto, 1);
3761 if (rc != 0) {
3762 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3763 goto out;
3764 }
3765
3766 rc = proto_register(&unix_stream_proto, 1);
3767 if (rc != 0) {
3768 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3769 proto_unregister(&unix_dgram_proto);
3770 goto out;
3771 }
3772
3773 sock_register(&unix_family_ops);
3774 register_pernet_subsys(&unix_net_ops);
3775 unix_bpf_build_proto();
3776
3777#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3778 bpf_iter_register();
3779#endif
3780
3781out:
3782 return rc;
3783}
3784
3785/* Later than subsys_initcall() because we depend on stuff initialised there */
3786fs_initcall(af_unix_init);