Loading...
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
85#include <linux/module.h>
86#include <linux/kernel.h>
87#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
106#include <net/net_namespace.h>
107#include <net/sock.h>
108#include <net/tcp_states.h>
109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
119#include <linux/freezer.h>
120
121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
125static atomic_long_t unix_nr_socks;
126
127
128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
139
140#ifdef CONFIG_SECURITY_NETWORK
141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142{
143 UNIXCB(skb).secid = scm->secid;
144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
148 scm->secid = UNIXCB(skb).secid;
149}
150
151static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
152{
153 return (scm->secid == UNIXCB(skb).secid);
154}
155#else
156static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157{ }
158
159static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160{ }
161
162static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
163{
164 return true;
165}
166#endif /* CONFIG_SECURITY_NETWORK */
167
168/*
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
172 */
173
174static inline unsigned int unix_hash_fold(__wsum n)
175{
176 unsigned int hash = (__force unsigned int)csum_fold(n);
177
178 hash ^= hash>>8;
179 return hash&(UNIX_HASH_SIZE-1);
180}
181
182#define unix_peer(sk) (unix_sk(sk)->peer)
183
184static inline int unix_our_peer(struct sock *sk, struct sock *osk)
185{
186 return unix_peer(osk) == sk;
187}
188
189static inline int unix_may_send(struct sock *sk, struct sock *osk)
190{
191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
192}
193
194static inline int unix_recvq_full(struct sock const *sk)
195{
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197}
198
199struct sock *unix_peer_get(struct sock *s)
200{
201 struct sock *peer;
202
203 unix_state_lock(s);
204 peer = unix_peer(s);
205 if (peer)
206 sock_hold(peer);
207 unix_state_unlock(s);
208 return peer;
209}
210EXPORT_SYMBOL_GPL(unix_peer_get);
211
212static inline void unix_release_addr(struct unix_address *addr)
213{
214 if (atomic_dec_and_test(&addr->refcnt))
215 kfree(addr);
216}
217
218/*
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
223 */
224
225static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
226{
227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
228 return -EINVAL;
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230 return -EINVAL;
231 if (sunaddr->sun_path[0]) {
232 /*
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
238 */
239 ((char *)sunaddr)[len] = 0;
240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241 return len;
242 }
243
244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
245 return len;
246}
247
248static void __unix_remove_socket(struct sock *sk)
249{
250 sk_del_node_init(sk);
251}
252
253static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254{
255 WARN_ON(!sk_unhashed(sk));
256 sk_add_node(sk, list);
257}
258
259static inline void unix_remove_socket(struct sock *sk)
260{
261 spin_lock(&unix_table_lock);
262 __unix_remove_socket(sk);
263 spin_unlock(&unix_table_lock);
264}
265
266static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267{
268 spin_lock(&unix_table_lock);
269 __unix_insert_socket(list, sk);
270 spin_unlock(&unix_table_lock);
271}
272
273static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
275 int len, int type, unsigned int hash)
276{
277 struct sock *s;
278
279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
280 struct unix_sock *u = unix_sk(s);
281
282 if (!net_eq(sock_net(s), net))
283 continue;
284
285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
287 goto found;
288 }
289 s = NULL;
290found:
291 return s;
292}
293
294static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
296 int len, int type,
297 unsigned int hash)
298{
299 struct sock *s;
300
301 spin_lock(&unix_table_lock);
302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
303 if (s)
304 sock_hold(s);
305 spin_unlock(&unix_table_lock);
306 return s;
307}
308
309static struct sock *unix_find_socket_byinode(struct inode *i)
310{
311 struct sock *s;
312
313 spin_lock(&unix_table_lock);
314 sk_for_each(s,
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry;
317
318 if (dentry && d_backing_inode(dentry) == i) {
319 sock_hold(s);
320 goto found;
321 }
322 }
323 s = NULL;
324found:
325 spin_unlock(&unix_table_lock);
326 return s;
327}
328
329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439}
440
441static int unix_writable(const struct sock *sk)
442{
443 return sk->sk_state != TCP_LISTEN &&
444 (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
445}
446
447static void unix_write_space(struct sock *sk)
448{
449 struct socket_wq *wq;
450
451 rcu_read_lock();
452 if (unix_writable(sk)) {
453 wq = rcu_dereference(sk->sk_wq);
454 if (skwq_has_sleeper(wq))
455 wake_up_interruptible_sync_poll(&wq->wait,
456 POLLOUT | POLLWRNORM | POLLWRBAND);
457 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
458 }
459 rcu_read_unlock();
460}
461
462/* When dgram socket disconnects (or changes its peer), we clear its receive
463 * queue of packets arrived from previous peer. First, it allows to do
464 * flow control based only on wmem_alloc; second, sk connected to peer
465 * may receive messages only from that peer. */
466static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
467{
468 if (!skb_queue_empty(&sk->sk_receive_queue)) {
469 skb_queue_purge(&sk->sk_receive_queue);
470 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
471
472 /* If one link of bidirectional dgram pipe is disconnected,
473 * we signal error. Messages are lost. Do not make this,
474 * when peer was not connected to us.
475 */
476 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
477 other->sk_err = ECONNRESET;
478 other->sk_error_report(other);
479 }
480 }
481}
482
483static void unix_sock_destructor(struct sock *sk)
484{
485 struct unix_sock *u = unix_sk(sk);
486
487 skb_queue_purge(&sk->sk_receive_queue);
488
489 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
490 WARN_ON(!sk_unhashed(sk));
491 WARN_ON(sk->sk_socket);
492 if (!sock_flag(sk, SOCK_DEAD)) {
493 pr_info("Attempt to release alive unix socket: %p\n", sk);
494 return;
495 }
496
497 if (u->addr)
498 unix_release_addr(u->addr);
499
500 atomic_long_dec(&unix_nr_socks);
501 local_bh_disable();
502 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
503 local_bh_enable();
504#ifdef UNIX_REFCNT_DEBUG
505 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
506 atomic_long_read(&unix_nr_socks));
507#endif
508}
509
510static void unix_release_sock(struct sock *sk, int embrion)
511{
512 struct unix_sock *u = unix_sk(sk);
513 struct path path;
514 struct sock *skpair;
515 struct sk_buff *skb;
516 int state;
517
518 unix_remove_socket(sk);
519
520 /* Clear state */
521 unix_state_lock(sk);
522 sock_orphan(sk);
523 sk->sk_shutdown = SHUTDOWN_MASK;
524 path = u->path;
525 u->path.dentry = NULL;
526 u->path.mnt = NULL;
527 state = sk->sk_state;
528 sk->sk_state = TCP_CLOSE;
529 unix_state_unlock(sk);
530
531 wake_up_interruptible_all(&u->peer_wait);
532
533 skpair = unix_peer(sk);
534
535 if (skpair != NULL) {
536 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
537 unix_state_lock(skpair);
538 /* No more writes */
539 skpair->sk_shutdown = SHUTDOWN_MASK;
540 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
541 skpair->sk_err = ECONNRESET;
542 unix_state_unlock(skpair);
543 skpair->sk_state_change(skpair);
544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
548 sock_put(skpair); /* It may now die */
549 unix_peer(sk) = NULL;
550 }
551
552 /* Try to flush out this socket. Throw out buffers at least */
553
554 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
555 if (state == TCP_LISTEN)
556 unix_release_sock(skb->sk, 1);
557 /* passed fds are erased in the kfree_skb hook */
558 UNIXCB(skb).consumed = skb->len;
559 kfree_skb(skb);
560 }
561
562 if (path.dentry)
563 path_put(&path);
564
565 sock_put(sk);
566
567 /* ---- Socket is dead now and most probably destroyed ---- */
568
569 /*
570 * Fixme: BSD difference: In BSD all sockets connected to us get
571 * ECONNRESET and we die on the spot. In Linux we behave
572 * like files and pipes do and wait for the last
573 * dereference.
574 *
575 * Can't we simply set sock->err?
576 *
577 * What the above comment does talk about? --ANK(980817)
578 */
579
580 if (unix_tot_inflight)
581 unix_gc(); /* Garbage collect fds */
582}
583
584static void init_peercred(struct sock *sk)
585{
586 put_pid(sk->sk_peer_pid);
587 if (sk->sk_peer_cred)
588 put_cred(sk->sk_peer_cred);
589 sk->sk_peer_pid = get_pid(task_tgid(current));
590 sk->sk_peer_cred = get_current_cred();
591}
592
593static void copy_peercred(struct sock *sk, struct sock *peersk)
594{
595 put_pid(sk->sk_peer_pid);
596 if (sk->sk_peer_cred)
597 put_cred(sk->sk_peer_cred);
598 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
599 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
600}
601
602static int unix_listen(struct socket *sock, int backlog)
603{
604 int err;
605 struct sock *sk = sock->sk;
606 struct unix_sock *u = unix_sk(sk);
607 struct pid *old_pid = NULL;
608
609 err = -EOPNOTSUPP;
610 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
611 goto out; /* Only stream/seqpacket sockets accept */
612 err = -EINVAL;
613 if (!u->addr)
614 goto out; /* No listens on an unbound socket */
615 unix_state_lock(sk);
616 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
617 goto out_unlock;
618 if (backlog > sk->sk_max_ack_backlog)
619 wake_up_interruptible_all(&u->peer_wait);
620 sk->sk_max_ack_backlog = backlog;
621 sk->sk_state = TCP_LISTEN;
622 /* set credentials so connect can copy them */
623 init_peercred(sk);
624 err = 0;
625
626out_unlock:
627 unix_state_unlock(sk);
628 put_pid(old_pid);
629out:
630 return err;
631}
632
633static int unix_release(struct socket *);
634static int unix_bind(struct socket *, struct sockaddr *, int);
635static int unix_stream_connect(struct socket *, struct sockaddr *,
636 int addr_len, int flags);
637static int unix_socketpair(struct socket *, struct socket *);
638static int unix_accept(struct socket *, struct socket *, int);
639static int unix_getname(struct socket *, struct sockaddr *, int *, int);
640static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
641static unsigned int unix_dgram_poll(struct file *, struct socket *,
642 poll_table *);
643static int unix_ioctl(struct socket *, unsigned int, unsigned long);
644static int unix_shutdown(struct socket *, int);
645static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
646static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
647static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
648 size_t size, int flags);
649static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
650 struct pipe_inode_info *, size_t size,
651 unsigned int flags);
652static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
653static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
654static int unix_dgram_connect(struct socket *, struct sockaddr *,
655 int, int);
656static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
657static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
658 int);
659
660static int unix_set_peek_off(struct sock *sk, int val)
661{
662 struct unix_sock *u = unix_sk(sk);
663
664 if (mutex_lock_interruptible(&u->readlock))
665 return -EINTR;
666
667 sk->sk_peek_off = val;
668 mutex_unlock(&u->readlock);
669
670 return 0;
671}
672
673
674static const struct proto_ops unix_stream_ops = {
675 .family = PF_UNIX,
676 .owner = THIS_MODULE,
677 .release = unix_release,
678 .bind = unix_bind,
679 .connect = unix_stream_connect,
680 .socketpair = unix_socketpair,
681 .accept = unix_accept,
682 .getname = unix_getname,
683 .poll = unix_poll,
684 .ioctl = unix_ioctl,
685 .listen = unix_listen,
686 .shutdown = unix_shutdown,
687 .setsockopt = sock_no_setsockopt,
688 .getsockopt = sock_no_getsockopt,
689 .sendmsg = unix_stream_sendmsg,
690 .recvmsg = unix_stream_recvmsg,
691 .mmap = sock_no_mmap,
692 .sendpage = unix_stream_sendpage,
693 .splice_read = unix_stream_splice_read,
694 .set_peek_off = unix_set_peek_off,
695};
696
697static const struct proto_ops unix_dgram_ops = {
698 .family = PF_UNIX,
699 .owner = THIS_MODULE,
700 .release = unix_release,
701 .bind = unix_bind,
702 .connect = unix_dgram_connect,
703 .socketpair = unix_socketpair,
704 .accept = sock_no_accept,
705 .getname = unix_getname,
706 .poll = unix_dgram_poll,
707 .ioctl = unix_ioctl,
708 .listen = sock_no_listen,
709 .shutdown = unix_shutdown,
710 .setsockopt = sock_no_setsockopt,
711 .getsockopt = sock_no_getsockopt,
712 .sendmsg = unix_dgram_sendmsg,
713 .recvmsg = unix_dgram_recvmsg,
714 .mmap = sock_no_mmap,
715 .sendpage = sock_no_sendpage,
716 .set_peek_off = unix_set_peek_off,
717};
718
719static const struct proto_ops unix_seqpacket_ops = {
720 .family = PF_UNIX,
721 .owner = THIS_MODULE,
722 .release = unix_release,
723 .bind = unix_bind,
724 .connect = unix_stream_connect,
725 .socketpair = unix_socketpair,
726 .accept = unix_accept,
727 .getname = unix_getname,
728 .poll = unix_dgram_poll,
729 .ioctl = unix_ioctl,
730 .listen = unix_listen,
731 .shutdown = unix_shutdown,
732 .setsockopt = sock_no_setsockopt,
733 .getsockopt = sock_no_getsockopt,
734 .sendmsg = unix_seqpacket_sendmsg,
735 .recvmsg = unix_seqpacket_recvmsg,
736 .mmap = sock_no_mmap,
737 .sendpage = sock_no_sendpage,
738 .set_peek_off = unix_set_peek_off,
739};
740
741static struct proto unix_proto = {
742 .name = "UNIX",
743 .owner = THIS_MODULE,
744 .obj_size = sizeof(struct unix_sock),
745};
746
747/*
748 * AF_UNIX sockets do not interact with hardware, hence they
749 * dont trigger interrupts - so it's safe for them to have
750 * bh-unsafe locking for their sk_receive_queue.lock. Split off
751 * this special lock-class by reinitializing the spinlock key:
752 */
753static struct lock_class_key af_unix_sk_receive_queue_lock_key;
754
755static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
756{
757 struct sock *sk = NULL;
758 struct unix_sock *u;
759
760 atomic_long_inc(&unix_nr_socks);
761 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
762 goto out;
763
764 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
765 if (!sk)
766 goto out;
767
768 sock_init_data(sock, sk);
769 lockdep_set_class(&sk->sk_receive_queue.lock,
770 &af_unix_sk_receive_queue_lock_key);
771
772 sk->sk_write_space = unix_write_space;
773 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
774 sk->sk_destruct = unix_sock_destructor;
775 u = unix_sk(sk);
776 u->path.dentry = NULL;
777 u->path.mnt = NULL;
778 spin_lock_init(&u->lock);
779 atomic_long_set(&u->inflight, 0);
780 INIT_LIST_HEAD(&u->link);
781 mutex_init(&u->readlock); /* single task reading lock */
782 init_waitqueue_head(&u->peer_wait);
783 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
784 unix_insert_socket(unix_sockets_unbound(sk), sk);
785out:
786 if (sk == NULL)
787 atomic_long_dec(&unix_nr_socks);
788 else {
789 local_bh_disable();
790 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
791 local_bh_enable();
792 }
793 return sk;
794}
795
796static int unix_create(struct net *net, struct socket *sock, int protocol,
797 int kern)
798{
799 if (protocol && protocol != PF_UNIX)
800 return -EPROTONOSUPPORT;
801
802 sock->state = SS_UNCONNECTED;
803
804 switch (sock->type) {
805 case SOCK_STREAM:
806 sock->ops = &unix_stream_ops;
807 break;
808 /*
809 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
810 * nothing uses it.
811 */
812 case SOCK_RAW:
813 sock->type = SOCK_DGRAM;
814 case SOCK_DGRAM:
815 sock->ops = &unix_dgram_ops;
816 break;
817 case SOCK_SEQPACKET:
818 sock->ops = &unix_seqpacket_ops;
819 break;
820 default:
821 return -ESOCKTNOSUPPORT;
822 }
823
824 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
825}
826
827static int unix_release(struct socket *sock)
828{
829 struct sock *sk = sock->sk;
830
831 if (!sk)
832 return 0;
833
834 unix_release_sock(sk, 0);
835 sock->sk = NULL;
836
837 return 0;
838}
839
840static int unix_autobind(struct socket *sock)
841{
842 struct sock *sk = sock->sk;
843 struct net *net = sock_net(sk);
844 struct unix_sock *u = unix_sk(sk);
845 static u32 ordernum = 1;
846 struct unix_address *addr;
847 int err;
848 unsigned int retries = 0;
849
850 err = mutex_lock_interruptible(&u->readlock);
851 if (err)
852 return err;
853
854 err = 0;
855 if (u->addr)
856 goto out;
857
858 err = -ENOMEM;
859 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
860 if (!addr)
861 goto out;
862
863 addr->name->sun_family = AF_UNIX;
864 atomic_set(&addr->refcnt, 1);
865
866retry:
867 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
868 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
869
870 spin_lock(&unix_table_lock);
871 ordernum = (ordernum+1)&0xFFFFF;
872
873 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
874 addr->hash)) {
875 spin_unlock(&unix_table_lock);
876 /*
877 * __unix_find_socket_byname() may take long time if many names
878 * are already in use.
879 */
880 cond_resched();
881 /* Give up if all names seems to be in use. */
882 if (retries++ == 0xFFFFF) {
883 err = -ENOSPC;
884 kfree(addr);
885 goto out;
886 }
887 goto retry;
888 }
889 addr->hash ^= sk->sk_type;
890
891 __unix_remove_socket(sk);
892 u->addr = addr;
893 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
894 spin_unlock(&unix_table_lock);
895 err = 0;
896
897out: mutex_unlock(&u->readlock);
898 return err;
899}
900
901static struct sock *unix_find_other(struct net *net,
902 struct sockaddr_un *sunname, int len,
903 int type, unsigned int hash, int *error)
904{
905 struct sock *u;
906 struct path path;
907 int err = 0;
908
909 if (sunname->sun_path[0]) {
910 struct inode *inode;
911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
912 if (err)
913 goto fail;
914 inode = d_backing_inode(path.dentry);
915 err = inode_permission(inode, MAY_WRITE);
916 if (err)
917 goto put_fail;
918
919 err = -ECONNREFUSED;
920 if (!S_ISSOCK(inode->i_mode))
921 goto put_fail;
922 u = unix_find_socket_byinode(inode);
923 if (!u)
924 goto put_fail;
925
926 if (u->sk_type == type)
927 touch_atime(&path);
928
929 path_put(&path);
930
931 err = -EPROTOTYPE;
932 if (u->sk_type != type) {
933 sock_put(u);
934 goto fail;
935 }
936 } else {
937 err = -ECONNREFUSED;
938 u = unix_find_socket_byname(net, sunname, len, type, hash);
939 if (u) {
940 struct dentry *dentry;
941 dentry = unix_sk(u)->path.dentry;
942 if (dentry)
943 touch_atime(&unix_sk(u)->path);
944 } else
945 goto fail;
946 }
947 return u;
948
949put_fail:
950 path_put(&path);
951fail:
952 *error = err;
953 return NULL;
954}
955
956static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
957 struct path *res)
958{
959 int err;
960
961 err = security_path_mknod(path, dentry, mode, 0);
962 if (!err) {
963 err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
964 if (!err) {
965 res->mnt = mntget(path->mnt);
966 res->dentry = dget(dentry);
967 }
968 }
969
970 return err;
971}
972
973static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
974{
975 struct sock *sk = sock->sk;
976 struct net *net = sock_net(sk);
977 struct unix_sock *u = unix_sk(sk);
978 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
979 char *sun_path = sunaddr->sun_path;
980 int err, name_err;
981 unsigned int hash;
982 struct unix_address *addr;
983 struct hlist_head *list;
984 struct path path;
985 struct dentry *dentry;
986
987 err = -EINVAL;
988 if (sunaddr->sun_family != AF_UNIX)
989 goto out;
990
991 if (addr_len == sizeof(short)) {
992 err = unix_autobind(sock);
993 goto out;
994 }
995
996 err = unix_mkname(sunaddr, addr_len, &hash);
997 if (err < 0)
998 goto out;
999 addr_len = err;
1000
1001 name_err = 0;
1002 dentry = NULL;
1003 if (sun_path[0]) {
1004 /* Get the parent directory, calculate the hash for last
1005 * component.
1006 */
1007 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
1008
1009 if (IS_ERR(dentry)) {
1010 /* delay report until after 'already bound' check */
1011 name_err = PTR_ERR(dentry);
1012 dentry = NULL;
1013 }
1014 }
1015
1016 err = mutex_lock_interruptible(&u->readlock);
1017 if (err)
1018 goto out_path;
1019
1020 err = -EINVAL;
1021 if (u->addr)
1022 goto out_up;
1023
1024 if (name_err) {
1025 err = name_err == -EEXIST ? -EADDRINUSE : name_err;
1026 goto out_up;
1027 }
1028
1029 err = -ENOMEM;
1030 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1031 if (!addr)
1032 goto out_up;
1033
1034 memcpy(addr->name, sunaddr, addr_len);
1035 addr->len = addr_len;
1036 addr->hash = hash ^ sk->sk_type;
1037 atomic_set(&addr->refcnt, 1);
1038
1039 if (dentry) {
1040 struct path u_path;
1041 umode_t mode = S_IFSOCK |
1042 (SOCK_INODE(sock)->i_mode & ~current_umask());
1043 err = unix_mknod(dentry, &path, mode, &u_path);
1044 if (err) {
1045 if (err == -EEXIST)
1046 err = -EADDRINUSE;
1047 unix_release_addr(addr);
1048 goto out_up;
1049 }
1050 addr->hash = UNIX_HASH_SIZE;
1051 hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1052 spin_lock(&unix_table_lock);
1053 u->path = u_path;
1054 list = &unix_socket_table[hash];
1055 } else {
1056 spin_lock(&unix_table_lock);
1057 err = -EADDRINUSE;
1058 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1059 sk->sk_type, hash)) {
1060 unix_release_addr(addr);
1061 goto out_unlock;
1062 }
1063
1064 list = &unix_socket_table[addr->hash];
1065 }
1066
1067 err = 0;
1068 __unix_remove_socket(sk);
1069 u->addr = addr;
1070 __unix_insert_socket(list, sk);
1071
1072out_unlock:
1073 spin_unlock(&unix_table_lock);
1074out_up:
1075 mutex_unlock(&u->readlock);
1076out_path:
1077 if (dentry)
1078 done_path_create(&path, dentry);
1079
1080out:
1081 return err;
1082}
1083
1084static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1085{
1086 if (unlikely(sk1 == sk2) || !sk2) {
1087 unix_state_lock(sk1);
1088 return;
1089 }
1090 if (sk1 < sk2) {
1091 unix_state_lock(sk1);
1092 unix_state_lock_nested(sk2);
1093 } else {
1094 unix_state_lock(sk2);
1095 unix_state_lock_nested(sk1);
1096 }
1097}
1098
1099static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1100{
1101 if (unlikely(sk1 == sk2) || !sk2) {
1102 unix_state_unlock(sk1);
1103 return;
1104 }
1105 unix_state_unlock(sk1);
1106 unix_state_unlock(sk2);
1107}
1108
1109static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1110 int alen, int flags)
1111{
1112 struct sock *sk = sock->sk;
1113 struct net *net = sock_net(sk);
1114 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1115 struct sock *other;
1116 unsigned int hash;
1117 int err;
1118
1119 if (addr->sa_family != AF_UNSPEC) {
1120 err = unix_mkname(sunaddr, alen, &hash);
1121 if (err < 0)
1122 goto out;
1123 alen = err;
1124
1125 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1126 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1127 goto out;
1128
1129restart:
1130 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1131 if (!other)
1132 goto out;
1133
1134 unix_state_double_lock(sk, other);
1135
1136 /* Apparently VFS overslept socket death. Retry. */
1137 if (sock_flag(other, SOCK_DEAD)) {
1138 unix_state_double_unlock(sk, other);
1139 sock_put(other);
1140 goto restart;
1141 }
1142
1143 err = -EPERM;
1144 if (!unix_may_send(sk, other))
1145 goto out_unlock;
1146
1147 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1148 if (err)
1149 goto out_unlock;
1150
1151 } else {
1152 /*
1153 * 1003.1g breaking connected state with AF_UNSPEC
1154 */
1155 other = NULL;
1156 unix_state_double_lock(sk, other);
1157 }
1158
1159 /*
1160 * If it was connected, reconnect.
1161 */
1162 if (unix_peer(sk)) {
1163 struct sock *old_peer = unix_peer(sk);
1164 unix_peer(sk) = other;
1165 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1166
1167 unix_state_double_unlock(sk, other);
1168
1169 if (other != old_peer)
1170 unix_dgram_disconnected(sk, old_peer);
1171 sock_put(old_peer);
1172 } else {
1173 unix_peer(sk) = other;
1174 unix_state_double_unlock(sk, other);
1175 }
1176 return 0;
1177
1178out_unlock:
1179 unix_state_double_unlock(sk, other);
1180 sock_put(other);
1181out:
1182 return err;
1183}
1184
1185static long unix_wait_for_peer(struct sock *other, long timeo)
1186{
1187 struct unix_sock *u = unix_sk(other);
1188 int sched;
1189 DEFINE_WAIT(wait);
1190
1191 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1192
1193 sched = !sock_flag(other, SOCK_DEAD) &&
1194 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1195 unix_recvq_full(other);
1196
1197 unix_state_unlock(other);
1198
1199 if (sched)
1200 timeo = schedule_timeout(timeo);
1201
1202 finish_wait(&u->peer_wait, &wait);
1203 return timeo;
1204}
1205
1206static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1207 int addr_len, int flags)
1208{
1209 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1210 struct sock *sk = sock->sk;
1211 struct net *net = sock_net(sk);
1212 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1213 struct sock *newsk = NULL;
1214 struct sock *other = NULL;
1215 struct sk_buff *skb = NULL;
1216 unsigned int hash;
1217 int st;
1218 int err;
1219 long timeo;
1220
1221 err = unix_mkname(sunaddr, addr_len, &hash);
1222 if (err < 0)
1223 goto out;
1224 addr_len = err;
1225
1226 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1227 (err = unix_autobind(sock)) != 0)
1228 goto out;
1229
1230 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1231
1232 /* First of all allocate resources.
1233 If we will make it after state is locked,
1234 we will have to recheck all again in any case.
1235 */
1236
1237 err = -ENOMEM;
1238
1239 /* create new sock for complete connection */
1240 newsk = unix_create1(sock_net(sk), NULL, 0);
1241 if (newsk == NULL)
1242 goto out;
1243
1244 /* Allocate skb for sending to listening sock */
1245 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1246 if (skb == NULL)
1247 goto out;
1248
1249restart:
1250 /* Find listening sock. */
1251 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1252 if (!other)
1253 goto out;
1254
1255 /* Latch state of peer */
1256 unix_state_lock(other);
1257
1258 /* Apparently VFS overslept socket death. Retry. */
1259 if (sock_flag(other, SOCK_DEAD)) {
1260 unix_state_unlock(other);
1261 sock_put(other);
1262 goto restart;
1263 }
1264
1265 err = -ECONNREFUSED;
1266 if (other->sk_state != TCP_LISTEN)
1267 goto out_unlock;
1268 if (other->sk_shutdown & RCV_SHUTDOWN)
1269 goto out_unlock;
1270
1271 if (unix_recvq_full(other)) {
1272 err = -EAGAIN;
1273 if (!timeo)
1274 goto out_unlock;
1275
1276 timeo = unix_wait_for_peer(other, timeo);
1277
1278 err = sock_intr_errno(timeo);
1279 if (signal_pending(current))
1280 goto out;
1281 sock_put(other);
1282 goto restart;
1283 }
1284
1285 /* Latch our state.
1286
1287 It is tricky place. We need to grab our state lock and cannot
1288 drop lock on peer. It is dangerous because deadlock is
1289 possible. Connect to self case and simultaneous
1290 attempt to connect are eliminated by checking socket
1291 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1292 check this before attempt to grab lock.
1293
1294 Well, and we have to recheck the state after socket locked.
1295 */
1296 st = sk->sk_state;
1297
1298 switch (st) {
1299 case TCP_CLOSE:
1300 /* This is ok... continue with connect */
1301 break;
1302 case TCP_ESTABLISHED:
1303 /* Socket is already connected */
1304 err = -EISCONN;
1305 goto out_unlock;
1306 default:
1307 err = -EINVAL;
1308 goto out_unlock;
1309 }
1310
1311 unix_state_lock_nested(sk);
1312
1313 if (sk->sk_state != st) {
1314 unix_state_unlock(sk);
1315 unix_state_unlock(other);
1316 sock_put(other);
1317 goto restart;
1318 }
1319
1320 err = security_unix_stream_connect(sk, other, newsk);
1321 if (err) {
1322 unix_state_unlock(sk);
1323 goto out_unlock;
1324 }
1325
1326 /* The way is open! Fastly set all the necessary fields... */
1327
1328 sock_hold(sk);
1329 unix_peer(newsk) = sk;
1330 newsk->sk_state = TCP_ESTABLISHED;
1331 newsk->sk_type = sk->sk_type;
1332 init_peercred(newsk);
1333 newu = unix_sk(newsk);
1334 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1335 otheru = unix_sk(other);
1336
1337 /* copy address information from listening to new sock*/
1338 if (otheru->addr) {
1339 atomic_inc(&otheru->addr->refcnt);
1340 newu->addr = otheru->addr;
1341 }
1342 if (otheru->path.dentry) {
1343 path_get(&otheru->path);
1344 newu->path = otheru->path;
1345 }
1346
1347 /* Set credentials */
1348 copy_peercred(sk, other);
1349
1350 sock->state = SS_CONNECTED;
1351 sk->sk_state = TCP_ESTABLISHED;
1352 sock_hold(newsk);
1353
1354 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1355 unix_peer(sk) = newsk;
1356
1357 unix_state_unlock(sk);
1358
1359 /* take ten and and send info to listening sock */
1360 spin_lock(&other->sk_receive_queue.lock);
1361 __skb_queue_tail(&other->sk_receive_queue, skb);
1362 spin_unlock(&other->sk_receive_queue.lock);
1363 unix_state_unlock(other);
1364 other->sk_data_ready(other);
1365 sock_put(other);
1366 return 0;
1367
1368out_unlock:
1369 if (other)
1370 unix_state_unlock(other);
1371
1372out:
1373 kfree_skb(skb);
1374 if (newsk)
1375 unix_release_sock(newsk, 0);
1376 if (other)
1377 sock_put(other);
1378 return err;
1379}
1380
1381static int unix_socketpair(struct socket *socka, struct socket *sockb)
1382{
1383 struct sock *ska = socka->sk, *skb = sockb->sk;
1384
1385 /* Join our sockets back to back */
1386 sock_hold(ska);
1387 sock_hold(skb);
1388 unix_peer(ska) = skb;
1389 unix_peer(skb) = ska;
1390 init_peercred(ska);
1391 init_peercred(skb);
1392
1393 if (ska->sk_type != SOCK_DGRAM) {
1394 ska->sk_state = TCP_ESTABLISHED;
1395 skb->sk_state = TCP_ESTABLISHED;
1396 socka->state = SS_CONNECTED;
1397 sockb->state = SS_CONNECTED;
1398 }
1399 return 0;
1400}
1401
1402static void unix_sock_inherit_flags(const struct socket *old,
1403 struct socket *new)
1404{
1405 if (test_bit(SOCK_PASSCRED, &old->flags))
1406 set_bit(SOCK_PASSCRED, &new->flags);
1407 if (test_bit(SOCK_PASSSEC, &old->flags))
1408 set_bit(SOCK_PASSSEC, &new->flags);
1409}
1410
1411static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1412{
1413 struct sock *sk = sock->sk;
1414 struct sock *tsk;
1415 struct sk_buff *skb;
1416 int err;
1417
1418 err = -EOPNOTSUPP;
1419 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1420 goto out;
1421
1422 err = -EINVAL;
1423 if (sk->sk_state != TCP_LISTEN)
1424 goto out;
1425
1426 /* If socket state is TCP_LISTEN it cannot change (for now...),
1427 * so that no locks are necessary.
1428 */
1429
1430 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1431 if (!skb) {
1432 /* This means receive shutdown. */
1433 if (err == 0)
1434 err = -EINVAL;
1435 goto out;
1436 }
1437
1438 tsk = skb->sk;
1439 skb_free_datagram(sk, skb);
1440 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1441
1442 /* attach accepted sock to socket */
1443 unix_state_lock(tsk);
1444 newsock->state = SS_CONNECTED;
1445 unix_sock_inherit_flags(sock, newsock);
1446 sock_graft(tsk, newsock);
1447 unix_state_unlock(tsk);
1448 return 0;
1449
1450out:
1451 return err;
1452}
1453
1454
1455static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1456{
1457 struct sock *sk = sock->sk;
1458 struct unix_sock *u;
1459 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1460 int err = 0;
1461
1462 if (peer) {
1463 sk = unix_peer_get(sk);
1464
1465 err = -ENOTCONN;
1466 if (!sk)
1467 goto out;
1468 err = 0;
1469 } else {
1470 sock_hold(sk);
1471 }
1472
1473 u = unix_sk(sk);
1474 unix_state_lock(sk);
1475 if (!u->addr) {
1476 sunaddr->sun_family = AF_UNIX;
1477 sunaddr->sun_path[0] = 0;
1478 *uaddr_len = sizeof(short);
1479 } else {
1480 struct unix_address *addr = u->addr;
1481
1482 *uaddr_len = addr->len;
1483 memcpy(sunaddr, addr->name, *uaddr_len);
1484 }
1485 unix_state_unlock(sk);
1486 sock_put(sk);
1487out:
1488 return err;
1489}
1490
1491static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1492{
1493 int i;
1494
1495 scm->fp = UNIXCB(skb).fp;
1496 UNIXCB(skb).fp = NULL;
1497
1498 for (i = scm->fp->count-1; i >= 0; i--)
1499 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1500}
1501
1502static void unix_destruct_scm(struct sk_buff *skb)
1503{
1504 struct scm_cookie scm;
1505 memset(&scm, 0, sizeof(scm));
1506 scm.pid = UNIXCB(skb).pid;
1507 if (UNIXCB(skb).fp)
1508 unix_detach_fds(&scm, skb);
1509
1510 /* Alas, it calls VFS */
1511 /* So fscking what? fput() had been SMP-safe since the last Summer */
1512 scm_destroy(&scm);
1513 sock_wfree(skb);
1514}
1515
1516/*
1517 * The "user->unix_inflight" variable is protected by the garbage
1518 * collection lock, and we just read it locklessly here. If you go
1519 * over the limit, there might be a tiny race in actually noticing
1520 * it across threads. Tough.
1521 */
1522static inline bool too_many_unix_fds(struct task_struct *p)
1523{
1524 struct user_struct *user = current_user();
1525
1526 if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1527 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1528 return false;
1529}
1530
1531#define MAX_RECURSION_LEVEL 4
1532
1533static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1534{
1535 int i;
1536 unsigned char max_level = 0;
1537
1538 if (too_many_unix_fds(current))
1539 return -ETOOMANYREFS;
1540
1541 for (i = scm->fp->count - 1; i >= 0; i--) {
1542 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1543
1544 if (sk)
1545 max_level = max(max_level,
1546 unix_sk(sk)->recursion_level);
1547 }
1548 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1549 return -ETOOMANYREFS;
1550
1551 /*
1552 * Need to duplicate file references for the sake of garbage
1553 * collection. Otherwise a socket in the fps might become a
1554 * candidate for GC while the skb is not yet queued.
1555 */
1556 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1557 if (!UNIXCB(skb).fp)
1558 return -ENOMEM;
1559
1560 for (i = scm->fp->count - 1; i >= 0; i--)
1561 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1562 return max_level;
1563}
1564
1565static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1566{
1567 int err = 0;
1568
1569 UNIXCB(skb).pid = get_pid(scm->pid);
1570 UNIXCB(skb).uid = scm->creds.uid;
1571 UNIXCB(skb).gid = scm->creds.gid;
1572 UNIXCB(skb).fp = NULL;
1573 unix_get_secdata(scm, skb);
1574 if (scm->fp && send_fds)
1575 err = unix_attach_fds(scm, skb);
1576
1577 skb->destructor = unix_destruct_scm;
1578 return err;
1579}
1580
1581static bool unix_passcred_enabled(const struct socket *sock,
1582 const struct sock *other)
1583{
1584 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1585 !other->sk_socket ||
1586 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1587}
1588
1589/*
1590 * Some apps rely on write() giving SCM_CREDENTIALS
1591 * We include credentials if source or destination socket
1592 * asserted SOCK_PASSCRED.
1593 */
1594static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1595 const struct sock *other)
1596{
1597 if (UNIXCB(skb).pid)
1598 return;
1599 if (unix_passcred_enabled(sock, other)) {
1600 UNIXCB(skb).pid = get_pid(task_tgid(current));
1601 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1602 }
1603}
1604
1605static int maybe_init_creds(struct scm_cookie *scm,
1606 struct socket *socket,
1607 const struct sock *other)
1608{
1609 int err;
1610 struct msghdr msg = { .msg_controllen = 0 };
1611
1612 err = scm_send(socket, &msg, scm, false);
1613 if (err)
1614 return err;
1615
1616 if (unix_passcred_enabled(socket, other)) {
1617 scm->pid = get_pid(task_tgid(current));
1618 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1619 }
1620 return err;
1621}
1622
1623static bool unix_skb_scm_eq(struct sk_buff *skb,
1624 struct scm_cookie *scm)
1625{
1626 const struct unix_skb_parms *u = &UNIXCB(skb);
1627
1628 return u->pid == scm->pid &&
1629 uid_eq(u->uid, scm->creds.uid) &&
1630 gid_eq(u->gid, scm->creds.gid) &&
1631 unix_secdata_eq(scm, skb);
1632}
1633
1634/*
1635 * Send AF_UNIX data.
1636 */
1637
1638static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1639 size_t len)
1640{
1641 struct sock *sk = sock->sk;
1642 struct net *net = sock_net(sk);
1643 struct unix_sock *u = unix_sk(sk);
1644 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1645 struct sock *other = NULL;
1646 int namelen = 0; /* fake GCC */
1647 int err;
1648 unsigned int hash;
1649 struct sk_buff *skb;
1650 long timeo;
1651 struct scm_cookie scm;
1652 int max_level;
1653 int data_len = 0;
1654 int sk_locked;
1655
1656 wait_for_unix_gc();
1657 err = scm_send(sock, msg, &scm, false);
1658 if (err < 0)
1659 return err;
1660
1661 err = -EOPNOTSUPP;
1662 if (msg->msg_flags&MSG_OOB)
1663 goto out;
1664
1665 if (msg->msg_namelen) {
1666 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1667 if (err < 0)
1668 goto out;
1669 namelen = err;
1670 } else {
1671 sunaddr = NULL;
1672 err = -ENOTCONN;
1673 other = unix_peer_get(sk);
1674 if (!other)
1675 goto out;
1676 }
1677
1678 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1679 && (err = unix_autobind(sock)) != 0)
1680 goto out;
1681
1682 err = -EMSGSIZE;
1683 if (len > sk->sk_sndbuf - 32)
1684 goto out;
1685
1686 if (len > SKB_MAX_ALLOC) {
1687 data_len = min_t(size_t,
1688 len - SKB_MAX_ALLOC,
1689 MAX_SKB_FRAGS * PAGE_SIZE);
1690 data_len = PAGE_ALIGN(data_len);
1691
1692 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1693 }
1694
1695 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1696 msg->msg_flags & MSG_DONTWAIT, &err,
1697 PAGE_ALLOC_COSTLY_ORDER);
1698 if (skb == NULL)
1699 goto out;
1700
1701 err = unix_scm_to_skb(&scm, skb, true);
1702 if (err < 0)
1703 goto out_free;
1704 max_level = err + 1;
1705
1706 skb_put(skb, len - data_len);
1707 skb->data_len = data_len;
1708 skb->len = len;
1709 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1710 if (err)
1711 goto out_free;
1712
1713 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1714
1715restart:
1716 if (!other) {
1717 err = -ECONNRESET;
1718 if (sunaddr == NULL)
1719 goto out_free;
1720
1721 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1722 hash, &err);
1723 if (other == NULL)
1724 goto out_free;
1725 }
1726
1727 if (sk_filter(other, skb) < 0) {
1728 /* Toss the packet but do not return any error to the sender */
1729 err = len;
1730 goto out_free;
1731 }
1732
1733 sk_locked = 0;
1734 unix_state_lock(other);
1735restart_locked:
1736 err = -EPERM;
1737 if (!unix_may_send(sk, other))
1738 goto out_unlock;
1739
1740 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1741 /*
1742 * Check with 1003.1g - what should
1743 * datagram error
1744 */
1745 unix_state_unlock(other);
1746 sock_put(other);
1747
1748 if (!sk_locked)
1749 unix_state_lock(sk);
1750
1751 err = 0;
1752 if (unix_peer(sk) == other) {
1753 unix_peer(sk) = NULL;
1754 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1755
1756 unix_state_unlock(sk);
1757
1758 unix_dgram_disconnected(sk, other);
1759 sock_put(other);
1760 err = -ECONNREFUSED;
1761 } else {
1762 unix_state_unlock(sk);
1763 }
1764
1765 other = NULL;
1766 if (err)
1767 goto out_free;
1768 goto restart;
1769 }
1770
1771 err = -EPIPE;
1772 if (other->sk_shutdown & RCV_SHUTDOWN)
1773 goto out_unlock;
1774
1775 if (sk->sk_type != SOCK_SEQPACKET) {
1776 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1777 if (err)
1778 goto out_unlock;
1779 }
1780
1781 /* other == sk && unix_peer(other) != sk if
1782 * - unix_peer(sk) == NULL, destination address bound to sk
1783 * - unix_peer(sk) == sk by time of get but disconnected before lock
1784 */
1785 if (other != sk &&
1786 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1787 if (timeo) {
1788 timeo = unix_wait_for_peer(other, timeo);
1789
1790 err = sock_intr_errno(timeo);
1791 if (signal_pending(current))
1792 goto out_free;
1793
1794 goto restart;
1795 }
1796
1797 if (!sk_locked) {
1798 unix_state_unlock(other);
1799 unix_state_double_lock(sk, other);
1800 }
1801
1802 if (unix_peer(sk) != other ||
1803 unix_dgram_peer_wake_me(sk, other)) {
1804 err = -EAGAIN;
1805 sk_locked = 1;
1806 goto out_unlock;
1807 }
1808
1809 if (!sk_locked) {
1810 sk_locked = 1;
1811 goto restart_locked;
1812 }
1813 }
1814
1815 if (unlikely(sk_locked))
1816 unix_state_unlock(sk);
1817
1818 if (sock_flag(other, SOCK_RCVTSTAMP))
1819 __net_timestamp(skb);
1820 maybe_add_creds(skb, sock, other);
1821 skb_queue_tail(&other->sk_receive_queue, skb);
1822 if (max_level > unix_sk(other)->recursion_level)
1823 unix_sk(other)->recursion_level = max_level;
1824 unix_state_unlock(other);
1825 other->sk_data_ready(other);
1826 sock_put(other);
1827 scm_destroy(&scm);
1828 return len;
1829
1830out_unlock:
1831 if (sk_locked)
1832 unix_state_unlock(sk);
1833 unix_state_unlock(other);
1834out_free:
1835 kfree_skb(skb);
1836out:
1837 if (other)
1838 sock_put(other);
1839 scm_destroy(&scm);
1840 return err;
1841}
1842
1843/* We use paged skbs for stream sockets, and limit occupancy to 32768
1844 * bytes, and a minimun of a full page.
1845 */
1846#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1847
1848static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1849 size_t len)
1850{
1851 struct sock *sk = sock->sk;
1852 struct sock *other = NULL;
1853 int err, size;
1854 struct sk_buff *skb;
1855 int sent = 0;
1856 struct scm_cookie scm;
1857 bool fds_sent = false;
1858 int max_level;
1859 int data_len;
1860
1861 wait_for_unix_gc();
1862 err = scm_send(sock, msg, &scm, false);
1863 if (err < 0)
1864 return err;
1865
1866 err = -EOPNOTSUPP;
1867 if (msg->msg_flags&MSG_OOB)
1868 goto out_err;
1869
1870 if (msg->msg_namelen) {
1871 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1872 goto out_err;
1873 } else {
1874 err = -ENOTCONN;
1875 other = unix_peer(sk);
1876 if (!other)
1877 goto out_err;
1878 }
1879
1880 if (sk->sk_shutdown & SEND_SHUTDOWN)
1881 goto pipe_err;
1882
1883 while (sent < len) {
1884 size = len - sent;
1885
1886 /* Keep two messages in the pipe so it schedules better */
1887 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1888
1889 /* allow fallback to order-0 allocations */
1890 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1891
1892 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1893
1894 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1895
1896 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1897 msg->msg_flags & MSG_DONTWAIT, &err,
1898 get_order(UNIX_SKB_FRAGS_SZ));
1899 if (!skb)
1900 goto out_err;
1901
1902 /* Only send the fds in the first buffer */
1903 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1904 if (err < 0) {
1905 kfree_skb(skb);
1906 goto out_err;
1907 }
1908 max_level = err + 1;
1909 fds_sent = true;
1910
1911 skb_put(skb, size - data_len);
1912 skb->data_len = data_len;
1913 skb->len = size;
1914 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1915 if (err) {
1916 kfree_skb(skb);
1917 goto out_err;
1918 }
1919
1920 unix_state_lock(other);
1921
1922 if (sock_flag(other, SOCK_DEAD) ||
1923 (other->sk_shutdown & RCV_SHUTDOWN))
1924 goto pipe_err_free;
1925
1926 maybe_add_creds(skb, sock, other);
1927 skb_queue_tail(&other->sk_receive_queue, skb);
1928 if (max_level > unix_sk(other)->recursion_level)
1929 unix_sk(other)->recursion_level = max_level;
1930 unix_state_unlock(other);
1931 other->sk_data_ready(other);
1932 sent += size;
1933 }
1934
1935 scm_destroy(&scm);
1936
1937 return sent;
1938
1939pipe_err_free:
1940 unix_state_unlock(other);
1941 kfree_skb(skb);
1942pipe_err:
1943 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1944 send_sig(SIGPIPE, current, 0);
1945 err = -EPIPE;
1946out_err:
1947 scm_destroy(&scm);
1948 return sent ? : err;
1949}
1950
1951static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1952 int offset, size_t size, int flags)
1953{
1954 int err;
1955 bool send_sigpipe = false;
1956 bool init_scm = true;
1957 struct scm_cookie scm;
1958 struct sock *other, *sk = socket->sk;
1959 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1960
1961 if (flags & MSG_OOB)
1962 return -EOPNOTSUPP;
1963
1964 other = unix_peer(sk);
1965 if (!other || sk->sk_state != TCP_ESTABLISHED)
1966 return -ENOTCONN;
1967
1968 if (false) {
1969alloc_skb:
1970 unix_state_unlock(other);
1971 mutex_unlock(&unix_sk(other)->readlock);
1972 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1973 &err, 0);
1974 if (!newskb)
1975 goto err;
1976 }
1977
1978 /* we must acquire readlock as we modify already present
1979 * skbs in the sk_receive_queue and mess with skb->len
1980 */
1981 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1982 if (err) {
1983 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1984 goto err;
1985 }
1986
1987 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1988 err = -EPIPE;
1989 send_sigpipe = true;
1990 goto err_unlock;
1991 }
1992
1993 unix_state_lock(other);
1994
1995 if (sock_flag(other, SOCK_DEAD) ||
1996 other->sk_shutdown & RCV_SHUTDOWN) {
1997 err = -EPIPE;
1998 send_sigpipe = true;
1999 goto err_state_unlock;
2000 }
2001
2002 if (init_scm) {
2003 err = maybe_init_creds(&scm, socket, other);
2004 if (err)
2005 goto err_state_unlock;
2006 init_scm = false;
2007 }
2008
2009 skb = skb_peek_tail(&other->sk_receive_queue);
2010 if (tail && tail == skb) {
2011 skb = newskb;
2012 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2013 if (newskb) {
2014 skb = newskb;
2015 } else {
2016 tail = skb;
2017 goto alloc_skb;
2018 }
2019 } else if (newskb) {
2020 /* this is fast path, we don't necessarily need to
2021 * call to kfree_skb even though with newskb == NULL
2022 * this - does no harm
2023 */
2024 consume_skb(newskb);
2025 newskb = NULL;
2026 }
2027
2028 if (skb_append_pagefrags(skb, page, offset, size)) {
2029 tail = skb;
2030 goto alloc_skb;
2031 }
2032
2033 skb->len += size;
2034 skb->data_len += size;
2035 skb->truesize += size;
2036 atomic_add(size, &sk->sk_wmem_alloc);
2037
2038 if (newskb) {
2039 err = unix_scm_to_skb(&scm, skb, false);
2040 if (err)
2041 goto err_state_unlock;
2042 spin_lock(&other->sk_receive_queue.lock);
2043 __skb_queue_tail(&other->sk_receive_queue, newskb);
2044 spin_unlock(&other->sk_receive_queue.lock);
2045 }
2046
2047 unix_state_unlock(other);
2048 mutex_unlock(&unix_sk(other)->readlock);
2049
2050 other->sk_data_ready(other);
2051 scm_destroy(&scm);
2052 return size;
2053
2054err_state_unlock:
2055 unix_state_unlock(other);
2056err_unlock:
2057 mutex_unlock(&unix_sk(other)->readlock);
2058err:
2059 kfree_skb(newskb);
2060 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2061 send_sig(SIGPIPE, current, 0);
2062 if (!init_scm)
2063 scm_destroy(&scm);
2064 return err;
2065}
2066
2067static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2068 size_t len)
2069{
2070 int err;
2071 struct sock *sk = sock->sk;
2072
2073 err = sock_error(sk);
2074 if (err)
2075 return err;
2076
2077 if (sk->sk_state != TCP_ESTABLISHED)
2078 return -ENOTCONN;
2079
2080 if (msg->msg_namelen)
2081 msg->msg_namelen = 0;
2082
2083 return unix_dgram_sendmsg(sock, msg, len);
2084}
2085
2086static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2087 size_t size, int flags)
2088{
2089 struct sock *sk = sock->sk;
2090
2091 if (sk->sk_state != TCP_ESTABLISHED)
2092 return -ENOTCONN;
2093
2094 return unix_dgram_recvmsg(sock, msg, size, flags);
2095}
2096
2097static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2098{
2099 struct unix_sock *u = unix_sk(sk);
2100
2101 if (u->addr) {
2102 msg->msg_namelen = u->addr->len;
2103 memcpy(msg->msg_name, u->addr->name, u->addr->len);
2104 }
2105}
2106
2107static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2108 size_t size, int flags)
2109{
2110 struct scm_cookie scm;
2111 struct sock *sk = sock->sk;
2112 struct unix_sock *u = unix_sk(sk);
2113 struct sk_buff *skb, *last;
2114 long timeo;
2115 int err;
2116 int peeked, skip;
2117
2118 err = -EOPNOTSUPP;
2119 if (flags&MSG_OOB)
2120 goto out;
2121
2122 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2123
2124 do {
2125 mutex_lock(&u->readlock);
2126
2127 skip = sk_peek_offset(sk, flags);
2128 skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
2129 &last);
2130 if (skb)
2131 break;
2132
2133 mutex_unlock(&u->readlock);
2134
2135 if (err != -EAGAIN)
2136 break;
2137 } while (timeo &&
2138 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
2139
2140 if (!skb) { /* implies readlock unlocked */
2141 unix_state_lock(sk);
2142 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2143 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2144 (sk->sk_shutdown & RCV_SHUTDOWN))
2145 err = 0;
2146 unix_state_unlock(sk);
2147 goto out;
2148 }
2149
2150 if (wq_has_sleeper(&u->peer_wait))
2151 wake_up_interruptible_sync_poll(&u->peer_wait,
2152 POLLOUT | POLLWRNORM |
2153 POLLWRBAND);
2154
2155 if (msg->msg_name)
2156 unix_copy_addr(msg, skb->sk);
2157
2158 if (size > skb->len - skip)
2159 size = skb->len - skip;
2160 else if (size < skb->len - skip)
2161 msg->msg_flags |= MSG_TRUNC;
2162
2163 err = skb_copy_datagram_msg(skb, skip, msg, size);
2164 if (err)
2165 goto out_free;
2166
2167 if (sock_flag(sk, SOCK_RCVTSTAMP))
2168 __sock_recv_timestamp(msg, sk, skb);
2169
2170 memset(&scm, 0, sizeof(scm));
2171
2172 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2173 unix_set_secdata(&scm, skb);
2174
2175 if (!(flags & MSG_PEEK)) {
2176 if (UNIXCB(skb).fp)
2177 unix_detach_fds(&scm, skb);
2178
2179 sk_peek_offset_bwd(sk, skb->len);
2180 } else {
2181 /* It is questionable: on PEEK we could:
2182 - do not return fds - good, but too simple 8)
2183 - return fds, and do not return them on read (old strategy,
2184 apparently wrong)
2185 - clone fds (I chose it for now, it is the most universal
2186 solution)
2187
2188 POSIX 1003.1g does not actually define this clearly
2189 at all. POSIX 1003.1g doesn't define a lot of things
2190 clearly however!
2191
2192 */
2193
2194 sk_peek_offset_fwd(sk, size);
2195
2196 if (UNIXCB(skb).fp)
2197 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2198 }
2199 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2200
2201 scm_recv(sock, msg, &scm, flags);
2202
2203out_free:
2204 skb_free_datagram(sk, skb);
2205 mutex_unlock(&u->readlock);
2206out:
2207 return err;
2208}
2209
2210/*
2211 * Sleep until more data has arrived. But check for races..
2212 */
2213static long unix_stream_data_wait(struct sock *sk, long timeo,
2214 struct sk_buff *last, unsigned int last_len)
2215{
2216 struct sk_buff *tail;
2217 DEFINE_WAIT(wait);
2218
2219 unix_state_lock(sk);
2220
2221 for (;;) {
2222 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2223
2224 tail = skb_peek_tail(&sk->sk_receive_queue);
2225 if (tail != last ||
2226 (tail && tail->len != last_len) ||
2227 sk->sk_err ||
2228 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2229 signal_pending(current) ||
2230 !timeo)
2231 break;
2232
2233 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2234 unix_state_unlock(sk);
2235 timeo = freezable_schedule_timeout(timeo);
2236 unix_state_lock(sk);
2237
2238 if (sock_flag(sk, SOCK_DEAD))
2239 break;
2240
2241 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2242 }
2243
2244 finish_wait(sk_sleep(sk), &wait);
2245 unix_state_unlock(sk);
2246 return timeo;
2247}
2248
2249static unsigned int unix_skb_len(const struct sk_buff *skb)
2250{
2251 return skb->len - UNIXCB(skb).consumed;
2252}
2253
2254struct unix_stream_read_state {
2255 int (*recv_actor)(struct sk_buff *, int, int,
2256 struct unix_stream_read_state *);
2257 struct socket *socket;
2258 struct msghdr *msg;
2259 struct pipe_inode_info *pipe;
2260 size_t size;
2261 int flags;
2262 unsigned int splice_flags;
2263};
2264
2265static int unix_stream_read_generic(struct unix_stream_read_state *state)
2266{
2267 struct scm_cookie scm;
2268 struct socket *sock = state->socket;
2269 struct sock *sk = sock->sk;
2270 struct unix_sock *u = unix_sk(sk);
2271 int copied = 0;
2272 int flags = state->flags;
2273 int noblock = flags & MSG_DONTWAIT;
2274 bool check_creds = false;
2275 int target;
2276 int err = 0;
2277 long timeo;
2278 int skip;
2279 size_t size = state->size;
2280 unsigned int last_len;
2281
2282 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2283 err = -EINVAL;
2284 goto out;
2285 }
2286
2287 if (unlikely(flags & MSG_OOB)) {
2288 err = -EOPNOTSUPP;
2289 goto out;
2290 }
2291
2292 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2293 timeo = sock_rcvtimeo(sk, noblock);
2294
2295 memset(&scm, 0, sizeof(scm));
2296
2297 /* Lock the socket to prevent queue disordering
2298 * while sleeps in memcpy_tomsg
2299 */
2300 mutex_lock(&u->readlock);
2301
2302 if (flags & MSG_PEEK)
2303 skip = sk_peek_offset(sk, flags);
2304 else
2305 skip = 0;
2306
2307 do {
2308 int chunk;
2309 bool drop_skb;
2310 struct sk_buff *skb, *last;
2311
2312redo:
2313 unix_state_lock(sk);
2314 if (sock_flag(sk, SOCK_DEAD)) {
2315 err = -ECONNRESET;
2316 goto unlock;
2317 }
2318 last = skb = skb_peek(&sk->sk_receive_queue);
2319 last_len = last ? last->len : 0;
2320again:
2321 if (skb == NULL) {
2322 unix_sk(sk)->recursion_level = 0;
2323 if (copied >= target)
2324 goto unlock;
2325
2326 /*
2327 * POSIX 1003.1g mandates this order.
2328 */
2329
2330 err = sock_error(sk);
2331 if (err)
2332 goto unlock;
2333 if (sk->sk_shutdown & RCV_SHUTDOWN)
2334 goto unlock;
2335
2336 unix_state_unlock(sk);
2337 if (!timeo) {
2338 err = -EAGAIN;
2339 break;
2340 }
2341
2342 mutex_unlock(&u->readlock);
2343
2344 timeo = unix_stream_data_wait(sk, timeo, last,
2345 last_len);
2346
2347 if (signal_pending(current)) {
2348 err = sock_intr_errno(timeo);
2349 scm_destroy(&scm);
2350 goto out;
2351 }
2352
2353 mutex_lock(&u->readlock);
2354 goto redo;
2355unlock:
2356 unix_state_unlock(sk);
2357 break;
2358 }
2359
2360 while (skip >= unix_skb_len(skb)) {
2361 skip -= unix_skb_len(skb);
2362 last = skb;
2363 last_len = skb->len;
2364 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2365 if (!skb)
2366 goto again;
2367 }
2368
2369 unix_state_unlock(sk);
2370
2371 if (check_creds) {
2372 /* Never glue messages from different writers */
2373 if (!unix_skb_scm_eq(skb, &scm))
2374 break;
2375 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2376 /* Copy credentials */
2377 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2378 unix_set_secdata(&scm, skb);
2379 check_creds = true;
2380 }
2381
2382 /* Copy address just once */
2383 if (state->msg && state->msg->msg_name) {
2384 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2385 state->msg->msg_name);
2386 unix_copy_addr(state->msg, skb->sk);
2387 sunaddr = NULL;
2388 }
2389
2390 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2391 skb_get(skb);
2392 chunk = state->recv_actor(skb, skip, chunk, state);
2393 drop_skb = !unix_skb_len(skb);
2394 /* skb is only safe to use if !drop_skb */
2395 consume_skb(skb);
2396 if (chunk < 0) {
2397 if (copied == 0)
2398 copied = -EFAULT;
2399 break;
2400 }
2401 copied += chunk;
2402 size -= chunk;
2403
2404 if (drop_skb) {
2405 /* the skb was touched by a concurrent reader;
2406 * we should not expect anything from this skb
2407 * anymore and assume it invalid - we can be
2408 * sure it was dropped from the socket queue
2409 *
2410 * let's report a short read
2411 */
2412 err = 0;
2413 break;
2414 }
2415
2416 /* Mark read part of skb as used */
2417 if (!(flags & MSG_PEEK)) {
2418 UNIXCB(skb).consumed += chunk;
2419
2420 sk_peek_offset_bwd(sk, chunk);
2421
2422 if (UNIXCB(skb).fp)
2423 unix_detach_fds(&scm, skb);
2424
2425 if (unix_skb_len(skb))
2426 break;
2427
2428 skb_unlink(skb, &sk->sk_receive_queue);
2429 consume_skb(skb);
2430
2431 if (scm.fp)
2432 break;
2433 } else {
2434 /* It is questionable, see note in unix_dgram_recvmsg.
2435 */
2436 if (UNIXCB(skb).fp)
2437 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2438
2439 sk_peek_offset_fwd(sk, chunk);
2440
2441 if (UNIXCB(skb).fp)
2442 break;
2443
2444 skip = 0;
2445 last = skb;
2446 last_len = skb->len;
2447 unix_state_lock(sk);
2448 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2449 if (skb)
2450 goto again;
2451 unix_state_unlock(sk);
2452 break;
2453 }
2454 } while (size);
2455
2456 mutex_unlock(&u->readlock);
2457 if (state->msg)
2458 scm_recv(sock, state->msg, &scm, flags);
2459 else
2460 scm_destroy(&scm);
2461out:
2462 return copied ? : err;
2463}
2464
2465static int unix_stream_read_actor(struct sk_buff *skb,
2466 int skip, int chunk,
2467 struct unix_stream_read_state *state)
2468{
2469 int ret;
2470
2471 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2472 state->msg, chunk);
2473 return ret ?: chunk;
2474}
2475
2476static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2477 size_t size, int flags)
2478{
2479 struct unix_stream_read_state state = {
2480 .recv_actor = unix_stream_read_actor,
2481 .socket = sock,
2482 .msg = msg,
2483 .size = size,
2484 .flags = flags
2485 };
2486
2487 return unix_stream_read_generic(&state);
2488}
2489
2490static ssize_t skb_unix_socket_splice(struct sock *sk,
2491 struct pipe_inode_info *pipe,
2492 struct splice_pipe_desc *spd)
2493{
2494 int ret;
2495 struct unix_sock *u = unix_sk(sk);
2496
2497 mutex_unlock(&u->readlock);
2498 ret = splice_to_pipe(pipe, spd);
2499 mutex_lock(&u->readlock);
2500
2501 return ret;
2502}
2503
2504static int unix_stream_splice_actor(struct sk_buff *skb,
2505 int skip, int chunk,
2506 struct unix_stream_read_state *state)
2507{
2508 return skb_splice_bits(skb, state->socket->sk,
2509 UNIXCB(skb).consumed + skip,
2510 state->pipe, chunk, state->splice_flags,
2511 skb_unix_socket_splice);
2512}
2513
2514static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2515 struct pipe_inode_info *pipe,
2516 size_t size, unsigned int flags)
2517{
2518 struct unix_stream_read_state state = {
2519 .recv_actor = unix_stream_splice_actor,
2520 .socket = sock,
2521 .pipe = pipe,
2522 .size = size,
2523 .splice_flags = flags,
2524 };
2525
2526 if (unlikely(*ppos))
2527 return -ESPIPE;
2528
2529 if (sock->file->f_flags & O_NONBLOCK ||
2530 flags & SPLICE_F_NONBLOCK)
2531 state.flags = MSG_DONTWAIT;
2532
2533 return unix_stream_read_generic(&state);
2534}
2535
2536static int unix_shutdown(struct socket *sock, int mode)
2537{
2538 struct sock *sk = sock->sk;
2539 struct sock *other;
2540
2541 if (mode < SHUT_RD || mode > SHUT_RDWR)
2542 return -EINVAL;
2543 /* This maps:
2544 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2545 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2546 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2547 */
2548 ++mode;
2549
2550 unix_state_lock(sk);
2551 sk->sk_shutdown |= mode;
2552 other = unix_peer(sk);
2553 if (other)
2554 sock_hold(other);
2555 unix_state_unlock(sk);
2556 sk->sk_state_change(sk);
2557
2558 if (other &&
2559 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2560
2561 int peer_mode = 0;
2562
2563 if (mode&RCV_SHUTDOWN)
2564 peer_mode |= SEND_SHUTDOWN;
2565 if (mode&SEND_SHUTDOWN)
2566 peer_mode |= RCV_SHUTDOWN;
2567 unix_state_lock(other);
2568 other->sk_shutdown |= peer_mode;
2569 unix_state_unlock(other);
2570 other->sk_state_change(other);
2571 if (peer_mode == SHUTDOWN_MASK)
2572 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2573 else if (peer_mode & RCV_SHUTDOWN)
2574 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2575 }
2576 if (other)
2577 sock_put(other);
2578
2579 return 0;
2580}
2581
2582long unix_inq_len(struct sock *sk)
2583{
2584 struct sk_buff *skb;
2585 long amount = 0;
2586
2587 if (sk->sk_state == TCP_LISTEN)
2588 return -EINVAL;
2589
2590 spin_lock(&sk->sk_receive_queue.lock);
2591 if (sk->sk_type == SOCK_STREAM ||
2592 sk->sk_type == SOCK_SEQPACKET) {
2593 skb_queue_walk(&sk->sk_receive_queue, skb)
2594 amount += unix_skb_len(skb);
2595 } else {
2596 skb = skb_peek(&sk->sk_receive_queue);
2597 if (skb)
2598 amount = skb->len;
2599 }
2600 spin_unlock(&sk->sk_receive_queue.lock);
2601
2602 return amount;
2603}
2604EXPORT_SYMBOL_GPL(unix_inq_len);
2605
2606long unix_outq_len(struct sock *sk)
2607{
2608 return sk_wmem_alloc_get(sk);
2609}
2610EXPORT_SYMBOL_GPL(unix_outq_len);
2611
2612static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2613{
2614 struct sock *sk = sock->sk;
2615 long amount = 0;
2616 int err;
2617
2618 switch (cmd) {
2619 case SIOCOUTQ:
2620 amount = unix_outq_len(sk);
2621 err = put_user(amount, (int __user *)arg);
2622 break;
2623 case SIOCINQ:
2624 amount = unix_inq_len(sk);
2625 if (amount < 0)
2626 err = amount;
2627 else
2628 err = put_user(amount, (int __user *)arg);
2629 break;
2630 default:
2631 err = -ENOIOCTLCMD;
2632 break;
2633 }
2634 return err;
2635}
2636
2637static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2638{
2639 struct sock *sk = sock->sk;
2640 unsigned int mask;
2641
2642 sock_poll_wait(file, sk_sleep(sk), wait);
2643 mask = 0;
2644
2645 /* exceptional events? */
2646 if (sk->sk_err)
2647 mask |= POLLERR;
2648 if (sk->sk_shutdown == SHUTDOWN_MASK)
2649 mask |= POLLHUP;
2650 if (sk->sk_shutdown & RCV_SHUTDOWN)
2651 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2652
2653 /* readable? */
2654 if (!skb_queue_empty(&sk->sk_receive_queue))
2655 mask |= POLLIN | POLLRDNORM;
2656
2657 /* Connection-based need to check for termination and startup */
2658 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2659 sk->sk_state == TCP_CLOSE)
2660 mask |= POLLHUP;
2661
2662 /*
2663 * we set writable also when the other side has shut down the
2664 * connection. This prevents stuck sockets.
2665 */
2666 if (unix_writable(sk))
2667 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2668
2669 return mask;
2670}
2671
2672static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2673 poll_table *wait)
2674{
2675 struct sock *sk = sock->sk, *other;
2676 unsigned int mask, writable;
2677
2678 sock_poll_wait(file, sk_sleep(sk), wait);
2679 mask = 0;
2680
2681 /* exceptional events? */
2682 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2683 mask |= POLLERR |
2684 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2685
2686 if (sk->sk_shutdown & RCV_SHUTDOWN)
2687 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2688 if (sk->sk_shutdown == SHUTDOWN_MASK)
2689 mask |= POLLHUP;
2690
2691 /* readable? */
2692 if (!skb_queue_empty(&sk->sk_receive_queue))
2693 mask |= POLLIN | POLLRDNORM;
2694
2695 /* Connection-based need to check for termination and startup */
2696 if (sk->sk_type == SOCK_SEQPACKET) {
2697 if (sk->sk_state == TCP_CLOSE)
2698 mask |= POLLHUP;
2699 /* connection hasn't started yet? */
2700 if (sk->sk_state == TCP_SYN_SENT)
2701 return mask;
2702 }
2703
2704 /* No write status requested, avoid expensive OUT tests. */
2705 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2706 return mask;
2707
2708 writable = unix_writable(sk);
2709 if (writable) {
2710 unix_state_lock(sk);
2711
2712 other = unix_peer(sk);
2713 if (other && unix_peer(other) != sk &&
2714 unix_recvq_full(other) &&
2715 unix_dgram_peer_wake_me(sk, other))
2716 writable = 0;
2717
2718 unix_state_unlock(sk);
2719 }
2720
2721 if (writable)
2722 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2723 else
2724 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2725
2726 return mask;
2727}
2728
2729#ifdef CONFIG_PROC_FS
2730
2731#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2732
2733#define get_bucket(x) ((x) >> BUCKET_SPACE)
2734#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2735#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2736
2737static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2738{
2739 unsigned long offset = get_offset(*pos);
2740 unsigned long bucket = get_bucket(*pos);
2741 struct sock *sk;
2742 unsigned long count = 0;
2743
2744 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2745 if (sock_net(sk) != seq_file_net(seq))
2746 continue;
2747 if (++count == offset)
2748 break;
2749 }
2750
2751 return sk;
2752}
2753
2754static struct sock *unix_next_socket(struct seq_file *seq,
2755 struct sock *sk,
2756 loff_t *pos)
2757{
2758 unsigned long bucket;
2759
2760 while (sk > (struct sock *)SEQ_START_TOKEN) {
2761 sk = sk_next(sk);
2762 if (!sk)
2763 goto next_bucket;
2764 if (sock_net(sk) == seq_file_net(seq))
2765 return sk;
2766 }
2767
2768 do {
2769 sk = unix_from_bucket(seq, pos);
2770 if (sk)
2771 return sk;
2772
2773next_bucket:
2774 bucket = get_bucket(*pos) + 1;
2775 *pos = set_bucket_offset(bucket, 1);
2776 } while (bucket < ARRAY_SIZE(unix_socket_table));
2777
2778 return NULL;
2779}
2780
2781static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2782 __acquires(unix_table_lock)
2783{
2784 spin_lock(&unix_table_lock);
2785
2786 if (!*pos)
2787 return SEQ_START_TOKEN;
2788
2789 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2790 return NULL;
2791
2792 return unix_next_socket(seq, NULL, pos);
2793}
2794
2795static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2796{
2797 ++*pos;
2798 return unix_next_socket(seq, v, pos);
2799}
2800
2801static void unix_seq_stop(struct seq_file *seq, void *v)
2802 __releases(unix_table_lock)
2803{
2804 spin_unlock(&unix_table_lock);
2805}
2806
2807static int unix_seq_show(struct seq_file *seq, void *v)
2808{
2809
2810 if (v == SEQ_START_TOKEN)
2811 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2812 "Inode Path\n");
2813 else {
2814 struct sock *s = v;
2815 struct unix_sock *u = unix_sk(s);
2816 unix_state_lock(s);
2817
2818 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2819 s,
2820 atomic_read(&s->sk_refcnt),
2821 0,
2822 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2823 s->sk_type,
2824 s->sk_socket ?
2825 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2826 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2827 sock_i_ino(s));
2828
2829 if (u->addr) {
2830 int i, len;
2831 seq_putc(seq, ' ');
2832
2833 i = 0;
2834 len = u->addr->len - sizeof(short);
2835 if (!UNIX_ABSTRACT(s))
2836 len--;
2837 else {
2838 seq_putc(seq, '@');
2839 i++;
2840 }
2841 for ( ; i < len; i++)
2842 seq_putc(seq, u->addr->name->sun_path[i]);
2843 }
2844 unix_state_unlock(s);
2845 seq_putc(seq, '\n');
2846 }
2847
2848 return 0;
2849}
2850
2851static const struct seq_operations unix_seq_ops = {
2852 .start = unix_seq_start,
2853 .next = unix_seq_next,
2854 .stop = unix_seq_stop,
2855 .show = unix_seq_show,
2856};
2857
2858static int unix_seq_open(struct inode *inode, struct file *file)
2859{
2860 return seq_open_net(inode, file, &unix_seq_ops,
2861 sizeof(struct seq_net_private));
2862}
2863
2864static const struct file_operations unix_seq_fops = {
2865 .owner = THIS_MODULE,
2866 .open = unix_seq_open,
2867 .read = seq_read,
2868 .llseek = seq_lseek,
2869 .release = seq_release_net,
2870};
2871
2872#endif
2873
2874static const struct net_proto_family unix_family_ops = {
2875 .family = PF_UNIX,
2876 .create = unix_create,
2877 .owner = THIS_MODULE,
2878};
2879
2880
2881static int __net_init unix_net_init(struct net *net)
2882{
2883 int error = -ENOMEM;
2884
2885 net->unx.sysctl_max_dgram_qlen = 10;
2886 if (unix_sysctl_register(net))
2887 goto out;
2888
2889#ifdef CONFIG_PROC_FS
2890 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2891 unix_sysctl_unregister(net);
2892 goto out;
2893 }
2894#endif
2895 error = 0;
2896out:
2897 return error;
2898}
2899
2900static void __net_exit unix_net_exit(struct net *net)
2901{
2902 unix_sysctl_unregister(net);
2903 remove_proc_entry("unix", net->proc_net);
2904}
2905
2906static struct pernet_operations unix_net_ops = {
2907 .init = unix_net_init,
2908 .exit = unix_net_exit,
2909};
2910
2911static int __init af_unix_init(void)
2912{
2913 int rc = -1;
2914
2915 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2916
2917 rc = proto_register(&unix_proto, 1);
2918 if (rc != 0) {
2919 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2920 goto out;
2921 }
2922
2923 sock_register(&unix_family_ops);
2924 register_pernet_subsys(&unix_net_ops);
2925out:
2926 return rc;
2927}
2928
2929static void __exit af_unix_exit(void)
2930{
2931 sock_unregister(PF_UNIX);
2932 proto_unregister(&unix_proto);
2933 unregister_pernet_subsys(&unix_net_ops);
2934}
2935
2936/* Earlier than device_initcall() so that other drivers invoking
2937 request_module() don't end up in a loop when modprobe tries
2938 to use a UNIX socket. But later than subsys_initcall() because
2939 we depend on stuff initialised there */
2940fs_initcall(af_unix_init);
2941module_exit(af_unix_exit);
2942
2943MODULE_LICENSE("GPL");
2944MODULE_ALIAS_NETPROTO(PF_UNIX);
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
104#include <net/net_namespace.h>
105#include <net/sock.h>
106#include <net/tcp_states.h>
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock);
120static atomic_long_t unix_nr_socks;
121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
126#ifdef CONFIG_SECURITY_NETWORK
127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
128{
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
130}
131
132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133{
134 scm->secid = *UNIXSID(skb);
135}
136#else
137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
138{ }
139
140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141{ }
142#endif /* CONFIG_SECURITY_NETWORK */
143
144/*
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
148 */
149
150static inline unsigned unix_hash_fold(__wsum n)
151{
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156}
157
158#define unix_peer(sk) (unix_sk(sk)->peer)
159
160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161{
162 return unix_peer(osk) == sk;
163}
164
165static inline int unix_may_send(struct sock *sk, struct sock *osk)
166{
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
168}
169
170static inline int unix_recvq_full(struct sock const *sk)
171{
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173}
174
175static struct sock *unix_peer_get(struct sock *s)
176{
177 struct sock *peer;
178
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
185}
186
187static inline void unix_release_addr(struct unix_address *addr)
188{
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191}
192
193/*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
199
200static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
201{
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesn't as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
221}
222
223static void __unix_remove_socket(struct sock *sk)
224{
225 sk_del_node_init(sk);
226}
227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
232}
233
234static inline void unix_remove_socket(struct sock *sk)
235{
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
239}
240
241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
246}
247
248static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
251{
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
258 if (!net_eq(sock_net(s), net))
259 continue;
260
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266found:
267 return s;
268}
269
270static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
274{
275 struct sock *s;
276
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
283}
284
285static struct sock *unix_find_socket_byinode(struct inode *i)
286{
287 struct sock *s;
288 struct hlist_node *node;
289
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
295 if (dentry && dentry->d_inode == i) {
296 sock_hold(s);
297 goto found;
298 }
299 }
300 s = NULL;
301found:
302 spin_unlock(&unix_table_lock);
303 return s;
304}
305
306static inline int unix_writable(struct sock *sk)
307{
308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
309}
310
311static void unix_write_space(struct sock *sk)
312{
313 struct socket_wq *wq;
314
315 rcu_read_lock();
316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
322 }
323 rcu_read_unlock();
324}
325
326/* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331{
332 if (!skb_queue_empty(&sk->sk_receive_queue)) {
333 skb_queue_purge(&sk->sk_receive_queue);
334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
339 */
340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341 other->sk_err = ECONNRESET;
342 other->sk_error_report(other);
343 }
344 }
345}
346
347static void unix_sock_destructor(struct sock *sk)
348{
349 struct unix_sock *u = unix_sk(sk);
350
351 skb_queue_purge(&sk->sk_receive_queue);
352
353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 WARN_ON(!sk_unhashed(sk));
355 WARN_ON(sk->sk_socket);
356 if (!sock_flag(sk, SOCK_DEAD)) {
357 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
358 return;
359 }
360
361 if (u->addr)
362 unix_release_addr(u->addr);
363
364 atomic_long_dec(&unix_nr_socks);
365 local_bh_disable();
366 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
367 local_bh_enable();
368#ifdef UNIX_REFCNT_DEBUG
369 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
370 atomic_long_read(&unix_nr_socks));
371#endif
372}
373
374static int unix_release_sock(struct sock *sk, int embrion)
375{
376 struct unix_sock *u = unix_sk(sk);
377 struct dentry *dentry;
378 struct vfsmount *mnt;
379 struct sock *skpair;
380 struct sk_buff *skb;
381 int state;
382
383 unix_remove_socket(sk);
384
385 /* Clear state */
386 unix_state_lock(sk);
387 sock_orphan(sk);
388 sk->sk_shutdown = SHUTDOWN_MASK;
389 dentry = u->dentry;
390 u->dentry = NULL;
391 mnt = u->mnt;
392 u->mnt = NULL;
393 state = sk->sk_state;
394 sk->sk_state = TCP_CLOSE;
395 unix_state_unlock(sk);
396
397 wake_up_interruptible_all(&u->peer_wait);
398
399 skpair = unix_peer(sk);
400
401 if (skpair != NULL) {
402 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
403 unix_state_lock(skpair);
404 /* No more writes */
405 skpair->sk_shutdown = SHUTDOWN_MASK;
406 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
407 skpair->sk_err = ECONNRESET;
408 unix_state_unlock(skpair);
409 skpair->sk_state_change(skpair);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 }
412 sock_put(skpair); /* It may now die */
413 unix_peer(sk) = NULL;
414 }
415
416 /* Try to flush out this socket. Throw out buffers at least */
417
418 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
419 if (state == TCP_LISTEN)
420 unix_release_sock(skb->sk, 1);
421 /* passed fds are erased in the kfree_skb hook */
422 kfree_skb(skb);
423 }
424
425 if (dentry) {
426 dput(dentry);
427 mntput(mnt);
428 }
429
430 sock_put(sk);
431
432 /* ---- Socket is dead now and most probably destroyed ---- */
433
434 /*
435 * Fixme: BSD difference: In BSD all sockets connected to use get
436 * ECONNRESET and we die on the spot. In Linux we behave
437 * like files and pipes do and wait for the last
438 * dereference.
439 *
440 * Can't we simply set sock->err?
441 *
442 * What the above comment does talk about? --ANK(980817)
443 */
444
445 if (unix_tot_inflight)
446 unix_gc(); /* Garbage collect fds */
447
448 return 0;
449}
450
451static void init_peercred(struct sock *sk)
452{
453 put_pid(sk->sk_peer_pid);
454 if (sk->sk_peer_cred)
455 put_cred(sk->sk_peer_cred);
456 sk->sk_peer_pid = get_pid(task_tgid(current));
457 sk->sk_peer_cred = get_current_cred();
458}
459
460static void copy_peercred(struct sock *sk, struct sock *peersk)
461{
462 put_pid(sk->sk_peer_pid);
463 if (sk->sk_peer_cred)
464 put_cred(sk->sk_peer_cred);
465 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
466 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
467}
468
469static int unix_listen(struct socket *sock, int backlog)
470{
471 int err;
472 struct sock *sk = sock->sk;
473 struct unix_sock *u = unix_sk(sk);
474 struct pid *old_pid = NULL;
475 const struct cred *old_cred = NULL;
476
477 err = -EOPNOTSUPP;
478 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
479 goto out; /* Only stream/seqpacket sockets accept */
480 err = -EINVAL;
481 if (!u->addr)
482 goto out; /* No listens on an unbound socket */
483 unix_state_lock(sk);
484 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485 goto out_unlock;
486 if (backlog > sk->sk_max_ack_backlog)
487 wake_up_interruptible_all(&u->peer_wait);
488 sk->sk_max_ack_backlog = backlog;
489 sk->sk_state = TCP_LISTEN;
490 /* set credentials so connect can copy them */
491 init_peercred(sk);
492 err = 0;
493
494out_unlock:
495 unix_state_unlock(sk);
496 put_pid(old_pid);
497 if (old_cred)
498 put_cred(old_cred);
499out:
500 return err;
501}
502
503static int unix_release(struct socket *);
504static int unix_bind(struct socket *, struct sockaddr *, int);
505static int unix_stream_connect(struct socket *, struct sockaddr *,
506 int addr_len, int flags);
507static int unix_socketpair(struct socket *, struct socket *);
508static int unix_accept(struct socket *, struct socket *, int);
509static int unix_getname(struct socket *, struct sockaddr *, int *, int);
510static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
511static unsigned int unix_dgram_poll(struct file *, struct socket *,
512 poll_table *);
513static int unix_ioctl(struct socket *, unsigned int, unsigned long);
514static int unix_shutdown(struct socket *, int);
515static int unix_stream_sendmsg(struct kiocb *, struct socket *,
516 struct msghdr *, size_t);
517static int unix_stream_recvmsg(struct kiocb *, struct socket *,
518 struct msghdr *, size_t, int);
519static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
520 struct msghdr *, size_t);
521static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
522 struct msghdr *, size_t, int);
523static int unix_dgram_connect(struct socket *, struct sockaddr *,
524 int, int);
525static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
526 struct msghdr *, size_t);
527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
528 struct msghdr *, size_t, int);
529
530static const struct proto_ops unix_stream_ops = {
531 .family = PF_UNIX,
532 .owner = THIS_MODULE,
533 .release = unix_release,
534 .bind = unix_bind,
535 .connect = unix_stream_connect,
536 .socketpair = unix_socketpair,
537 .accept = unix_accept,
538 .getname = unix_getname,
539 .poll = unix_poll,
540 .ioctl = unix_ioctl,
541 .listen = unix_listen,
542 .shutdown = unix_shutdown,
543 .setsockopt = sock_no_setsockopt,
544 .getsockopt = sock_no_getsockopt,
545 .sendmsg = unix_stream_sendmsg,
546 .recvmsg = unix_stream_recvmsg,
547 .mmap = sock_no_mmap,
548 .sendpage = sock_no_sendpage,
549};
550
551static const struct proto_ops unix_dgram_ops = {
552 .family = PF_UNIX,
553 .owner = THIS_MODULE,
554 .release = unix_release,
555 .bind = unix_bind,
556 .connect = unix_dgram_connect,
557 .socketpair = unix_socketpair,
558 .accept = sock_no_accept,
559 .getname = unix_getname,
560 .poll = unix_dgram_poll,
561 .ioctl = unix_ioctl,
562 .listen = sock_no_listen,
563 .shutdown = unix_shutdown,
564 .setsockopt = sock_no_setsockopt,
565 .getsockopt = sock_no_getsockopt,
566 .sendmsg = unix_dgram_sendmsg,
567 .recvmsg = unix_dgram_recvmsg,
568 .mmap = sock_no_mmap,
569 .sendpage = sock_no_sendpage,
570};
571
572static const struct proto_ops unix_seqpacket_ops = {
573 .family = PF_UNIX,
574 .owner = THIS_MODULE,
575 .release = unix_release,
576 .bind = unix_bind,
577 .connect = unix_stream_connect,
578 .socketpair = unix_socketpair,
579 .accept = unix_accept,
580 .getname = unix_getname,
581 .poll = unix_dgram_poll,
582 .ioctl = unix_ioctl,
583 .listen = unix_listen,
584 .shutdown = unix_shutdown,
585 .setsockopt = sock_no_setsockopt,
586 .getsockopt = sock_no_getsockopt,
587 .sendmsg = unix_seqpacket_sendmsg,
588 .recvmsg = unix_seqpacket_recvmsg,
589 .mmap = sock_no_mmap,
590 .sendpage = sock_no_sendpage,
591};
592
593static struct proto unix_proto = {
594 .name = "UNIX",
595 .owner = THIS_MODULE,
596 .obj_size = sizeof(struct unix_sock),
597};
598
599/*
600 * AF_UNIX sockets do not interact with hardware, hence they
601 * dont trigger interrupts - so it's safe for them to have
602 * bh-unsafe locking for their sk_receive_queue.lock. Split off
603 * this special lock-class by reinitializing the spinlock key:
604 */
605static struct lock_class_key af_unix_sk_receive_queue_lock_key;
606
607static struct sock *unix_create1(struct net *net, struct socket *sock)
608{
609 struct sock *sk = NULL;
610 struct unix_sock *u;
611
612 atomic_long_inc(&unix_nr_socks);
613 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
614 goto out;
615
616 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
617 if (!sk)
618 goto out;
619
620 sock_init_data(sock, sk);
621 lockdep_set_class(&sk->sk_receive_queue.lock,
622 &af_unix_sk_receive_queue_lock_key);
623
624 sk->sk_write_space = unix_write_space;
625 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
626 sk->sk_destruct = unix_sock_destructor;
627 u = unix_sk(sk);
628 u->dentry = NULL;
629 u->mnt = NULL;
630 spin_lock_init(&u->lock);
631 atomic_long_set(&u->inflight, 0);
632 INIT_LIST_HEAD(&u->link);
633 mutex_init(&u->readlock); /* single task reading lock */
634 init_waitqueue_head(&u->peer_wait);
635 unix_insert_socket(unix_sockets_unbound, sk);
636out:
637 if (sk == NULL)
638 atomic_long_dec(&unix_nr_socks);
639 else {
640 local_bh_disable();
641 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
642 local_bh_enable();
643 }
644 return sk;
645}
646
647static int unix_create(struct net *net, struct socket *sock, int protocol,
648 int kern)
649{
650 if (protocol && protocol != PF_UNIX)
651 return -EPROTONOSUPPORT;
652
653 sock->state = SS_UNCONNECTED;
654
655 switch (sock->type) {
656 case SOCK_STREAM:
657 sock->ops = &unix_stream_ops;
658 break;
659 /*
660 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
661 * nothing uses it.
662 */
663 case SOCK_RAW:
664 sock->type = SOCK_DGRAM;
665 case SOCK_DGRAM:
666 sock->ops = &unix_dgram_ops;
667 break;
668 case SOCK_SEQPACKET:
669 sock->ops = &unix_seqpacket_ops;
670 break;
671 default:
672 return -ESOCKTNOSUPPORT;
673 }
674
675 return unix_create1(net, sock) ? 0 : -ENOMEM;
676}
677
678static int unix_release(struct socket *sock)
679{
680 struct sock *sk = sock->sk;
681
682 if (!sk)
683 return 0;
684
685 sock->sk = NULL;
686
687 return unix_release_sock(sk, 0);
688}
689
690static int unix_autobind(struct socket *sock)
691{
692 struct sock *sk = sock->sk;
693 struct net *net = sock_net(sk);
694 struct unix_sock *u = unix_sk(sk);
695 static u32 ordernum = 1;
696 struct unix_address *addr;
697 int err;
698 unsigned int retries = 0;
699
700 mutex_lock(&u->readlock);
701
702 err = 0;
703 if (u->addr)
704 goto out;
705
706 err = -ENOMEM;
707 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
708 if (!addr)
709 goto out;
710
711 addr->name->sun_family = AF_UNIX;
712 atomic_set(&addr->refcnt, 1);
713
714retry:
715 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
716 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
717
718 spin_lock(&unix_table_lock);
719 ordernum = (ordernum+1)&0xFFFFF;
720
721 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
722 addr->hash)) {
723 spin_unlock(&unix_table_lock);
724 /*
725 * __unix_find_socket_byname() may take long time if many names
726 * are already in use.
727 */
728 cond_resched();
729 /* Give up if all names seems to be in use. */
730 if (retries++ == 0xFFFFF) {
731 err = -ENOSPC;
732 kfree(addr);
733 goto out;
734 }
735 goto retry;
736 }
737 addr->hash ^= sk->sk_type;
738
739 __unix_remove_socket(sk);
740 u->addr = addr;
741 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
742 spin_unlock(&unix_table_lock);
743 err = 0;
744
745out: mutex_unlock(&u->readlock);
746 return err;
747}
748
749static struct sock *unix_find_other(struct net *net,
750 struct sockaddr_un *sunname, int len,
751 int type, unsigned hash, int *error)
752{
753 struct sock *u;
754 struct path path;
755 int err = 0;
756
757 if (sunname->sun_path[0]) {
758 struct inode *inode;
759 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
760 if (err)
761 goto fail;
762 inode = path.dentry->d_inode;
763 err = inode_permission(inode, MAY_WRITE);
764 if (err)
765 goto put_fail;
766
767 err = -ECONNREFUSED;
768 if (!S_ISSOCK(inode->i_mode))
769 goto put_fail;
770 u = unix_find_socket_byinode(inode);
771 if (!u)
772 goto put_fail;
773
774 if (u->sk_type == type)
775 touch_atime(path.mnt, path.dentry);
776
777 path_put(&path);
778
779 err = -EPROTOTYPE;
780 if (u->sk_type != type) {
781 sock_put(u);
782 goto fail;
783 }
784 } else {
785 err = -ECONNREFUSED;
786 u = unix_find_socket_byname(net, sunname, len, type, hash);
787 if (u) {
788 struct dentry *dentry;
789 dentry = unix_sk(u)->dentry;
790 if (dentry)
791 touch_atime(unix_sk(u)->mnt, dentry);
792 } else
793 goto fail;
794 }
795 return u;
796
797put_fail:
798 path_put(&path);
799fail:
800 *error = err;
801 return NULL;
802}
803
804
805static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
806{
807 struct sock *sk = sock->sk;
808 struct net *net = sock_net(sk);
809 struct unix_sock *u = unix_sk(sk);
810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
811 char *sun_path = sunaddr->sun_path;
812 struct dentry *dentry = NULL;
813 struct path path;
814 int err;
815 unsigned hash;
816 struct unix_address *addr;
817 struct hlist_head *list;
818
819 err = -EINVAL;
820 if (sunaddr->sun_family != AF_UNIX)
821 goto out;
822
823 if (addr_len == sizeof(short)) {
824 err = unix_autobind(sock);
825 goto out;
826 }
827
828 err = unix_mkname(sunaddr, addr_len, &hash);
829 if (err < 0)
830 goto out;
831 addr_len = err;
832
833 mutex_lock(&u->readlock);
834
835 err = -EINVAL;
836 if (u->addr)
837 goto out_up;
838
839 err = -ENOMEM;
840 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
841 if (!addr)
842 goto out_up;
843
844 memcpy(addr->name, sunaddr, addr_len);
845 addr->len = addr_len;
846 addr->hash = hash ^ sk->sk_type;
847 atomic_set(&addr->refcnt, 1);
848
849 if (sun_path[0]) {
850 unsigned int mode;
851 err = 0;
852 /*
853 * Get the parent directory, calculate the hash for last
854 * component.
855 */
856 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
857 err = PTR_ERR(dentry);
858 if (IS_ERR(dentry))
859 goto out_mknod_parent;
860
861 /*
862 * All right, let's create it.
863 */
864 mode = S_IFSOCK |
865 (SOCK_INODE(sock)->i_mode & ~current_umask());
866 err = mnt_want_write(path.mnt);
867 if (err)
868 goto out_mknod_dput;
869 err = security_path_mknod(&path, dentry, mode, 0);
870 if (err)
871 goto out_mknod_drop_write;
872 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
873out_mknod_drop_write:
874 mnt_drop_write(path.mnt);
875 if (err)
876 goto out_mknod_dput;
877 mutex_unlock(&path.dentry->d_inode->i_mutex);
878 dput(path.dentry);
879 path.dentry = dentry;
880
881 addr->hash = UNIX_HASH_SIZE;
882 }
883
884 spin_lock(&unix_table_lock);
885
886 if (!sun_path[0]) {
887 err = -EADDRINUSE;
888 if (__unix_find_socket_byname(net, sunaddr, addr_len,
889 sk->sk_type, hash)) {
890 unix_release_addr(addr);
891 goto out_unlock;
892 }
893
894 list = &unix_socket_table[addr->hash];
895 } else {
896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
897 u->dentry = path.dentry;
898 u->mnt = path.mnt;
899 }
900
901 err = 0;
902 __unix_remove_socket(sk);
903 u->addr = addr;
904 __unix_insert_socket(list, sk);
905
906out_unlock:
907 spin_unlock(&unix_table_lock);
908out_up:
909 mutex_unlock(&u->readlock);
910out:
911 return err;
912
913out_mknod_dput:
914 dput(dentry);
915 mutex_unlock(&path.dentry->d_inode->i_mutex);
916 path_put(&path);
917out_mknod_parent:
918 if (err == -EEXIST)
919 err = -EADDRINUSE;
920 unix_release_addr(addr);
921 goto out_up;
922}
923
924static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
925{
926 if (unlikely(sk1 == sk2) || !sk2) {
927 unix_state_lock(sk1);
928 return;
929 }
930 if (sk1 < sk2) {
931 unix_state_lock(sk1);
932 unix_state_lock_nested(sk2);
933 } else {
934 unix_state_lock(sk2);
935 unix_state_lock_nested(sk1);
936 }
937}
938
939static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
940{
941 if (unlikely(sk1 == sk2) || !sk2) {
942 unix_state_unlock(sk1);
943 return;
944 }
945 unix_state_unlock(sk1);
946 unix_state_unlock(sk2);
947}
948
949static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
950 int alen, int flags)
951{
952 struct sock *sk = sock->sk;
953 struct net *net = sock_net(sk);
954 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
955 struct sock *other;
956 unsigned hash;
957 int err;
958
959 if (addr->sa_family != AF_UNSPEC) {
960 err = unix_mkname(sunaddr, alen, &hash);
961 if (err < 0)
962 goto out;
963 alen = err;
964
965 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
966 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
967 goto out;
968
969restart:
970 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
971 if (!other)
972 goto out;
973
974 unix_state_double_lock(sk, other);
975
976 /* Apparently VFS overslept socket death. Retry. */
977 if (sock_flag(other, SOCK_DEAD)) {
978 unix_state_double_unlock(sk, other);
979 sock_put(other);
980 goto restart;
981 }
982
983 err = -EPERM;
984 if (!unix_may_send(sk, other))
985 goto out_unlock;
986
987 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
988 if (err)
989 goto out_unlock;
990
991 } else {
992 /*
993 * 1003.1g breaking connected state with AF_UNSPEC
994 */
995 other = NULL;
996 unix_state_double_lock(sk, other);
997 }
998
999 /*
1000 * If it was connected, reconnect.
1001 */
1002 if (unix_peer(sk)) {
1003 struct sock *old_peer = unix_peer(sk);
1004 unix_peer(sk) = other;
1005 unix_state_double_unlock(sk, other);
1006
1007 if (other != old_peer)
1008 unix_dgram_disconnected(sk, old_peer);
1009 sock_put(old_peer);
1010 } else {
1011 unix_peer(sk) = other;
1012 unix_state_double_unlock(sk, other);
1013 }
1014 return 0;
1015
1016out_unlock:
1017 unix_state_double_unlock(sk, other);
1018 sock_put(other);
1019out:
1020 return err;
1021}
1022
1023static long unix_wait_for_peer(struct sock *other, long timeo)
1024{
1025 struct unix_sock *u = unix_sk(other);
1026 int sched;
1027 DEFINE_WAIT(wait);
1028
1029 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1030
1031 sched = !sock_flag(other, SOCK_DEAD) &&
1032 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1033 unix_recvq_full(other);
1034
1035 unix_state_unlock(other);
1036
1037 if (sched)
1038 timeo = schedule_timeout(timeo);
1039
1040 finish_wait(&u->peer_wait, &wait);
1041 return timeo;
1042}
1043
1044static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1045 int addr_len, int flags)
1046{
1047 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1048 struct sock *sk = sock->sk;
1049 struct net *net = sock_net(sk);
1050 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1051 struct sock *newsk = NULL;
1052 struct sock *other = NULL;
1053 struct sk_buff *skb = NULL;
1054 unsigned hash;
1055 int st;
1056 int err;
1057 long timeo;
1058
1059 err = unix_mkname(sunaddr, addr_len, &hash);
1060 if (err < 0)
1061 goto out;
1062 addr_len = err;
1063
1064 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1065 (err = unix_autobind(sock)) != 0)
1066 goto out;
1067
1068 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1069
1070 /* First of all allocate resources.
1071 If we will make it after state is locked,
1072 we will have to recheck all again in any case.
1073 */
1074
1075 err = -ENOMEM;
1076
1077 /* create new sock for complete connection */
1078 newsk = unix_create1(sock_net(sk), NULL);
1079 if (newsk == NULL)
1080 goto out;
1081
1082 /* Allocate skb for sending to listening sock */
1083 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1084 if (skb == NULL)
1085 goto out;
1086
1087restart:
1088 /* Find listening sock. */
1089 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1090 if (!other)
1091 goto out;
1092
1093 /* Latch state of peer */
1094 unix_state_lock(other);
1095
1096 /* Apparently VFS overslept socket death. Retry. */
1097 if (sock_flag(other, SOCK_DEAD)) {
1098 unix_state_unlock(other);
1099 sock_put(other);
1100 goto restart;
1101 }
1102
1103 err = -ECONNREFUSED;
1104 if (other->sk_state != TCP_LISTEN)
1105 goto out_unlock;
1106 if (other->sk_shutdown & RCV_SHUTDOWN)
1107 goto out_unlock;
1108
1109 if (unix_recvq_full(other)) {
1110 err = -EAGAIN;
1111 if (!timeo)
1112 goto out_unlock;
1113
1114 timeo = unix_wait_for_peer(other, timeo);
1115
1116 err = sock_intr_errno(timeo);
1117 if (signal_pending(current))
1118 goto out;
1119 sock_put(other);
1120 goto restart;
1121 }
1122
1123 /* Latch our state.
1124
1125 It is tricky place. We need to grab our state lock and cannot
1126 drop lock on peer. It is dangerous because deadlock is
1127 possible. Connect to self case and simultaneous
1128 attempt to connect are eliminated by checking socket
1129 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1130 check this before attempt to grab lock.
1131
1132 Well, and we have to recheck the state after socket locked.
1133 */
1134 st = sk->sk_state;
1135
1136 switch (st) {
1137 case TCP_CLOSE:
1138 /* This is ok... continue with connect */
1139 break;
1140 case TCP_ESTABLISHED:
1141 /* Socket is already connected */
1142 err = -EISCONN;
1143 goto out_unlock;
1144 default:
1145 err = -EINVAL;
1146 goto out_unlock;
1147 }
1148
1149 unix_state_lock_nested(sk);
1150
1151 if (sk->sk_state != st) {
1152 unix_state_unlock(sk);
1153 unix_state_unlock(other);
1154 sock_put(other);
1155 goto restart;
1156 }
1157
1158 err = security_unix_stream_connect(sk, other, newsk);
1159 if (err) {
1160 unix_state_unlock(sk);
1161 goto out_unlock;
1162 }
1163
1164 /* The way is open! Fastly set all the necessary fields... */
1165
1166 sock_hold(sk);
1167 unix_peer(newsk) = sk;
1168 newsk->sk_state = TCP_ESTABLISHED;
1169 newsk->sk_type = sk->sk_type;
1170 init_peercred(newsk);
1171 newu = unix_sk(newsk);
1172 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1173 otheru = unix_sk(other);
1174
1175 /* copy address information from listening to new sock*/
1176 if (otheru->addr) {
1177 atomic_inc(&otheru->addr->refcnt);
1178 newu->addr = otheru->addr;
1179 }
1180 if (otheru->dentry) {
1181 newu->dentry = dget(otheru->dentry);
1182 newu->mnt = mntget(otheru->mnt);
1183 }
1184
1185 /* Set credentials */
1186 copy_peercred(sk, other);
1187
1188 sock->state = SS_CONNECTED;
1189 sk->sk_state = TCP_ESTABLISHED;
1190 sock_hold(newsk);
1191
1192 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1193 unix_peer(sk) = newsk;
1194
1195 unix_state_unlock(sk);
1196
1197 /* take ten and and send info to listening sock */
1198 spin_lock(&other->sk_receive_queue.lock);
1199 __skb_queue_tail(&other->sk_receive_queue, skb);
1200 spin_unlock(&other->sk_receive_queue.lock);
1201 unix_state_unlock(other);
1202 other->sk_data_ready(other, 0);
1203 sock_put(other);
1204 return 0;
1205
1206out_unlock:
1207 if (other)
1208 unix_state_unlock(other);
1209
1210out:
1211 kfree_skb(skb);
1212 if (newsk)
1213 unix_release_sock(newsk, 0);
1214 if (other)
1215 sock_put(other);
1216 return err;
1217}
1218
1219static int unix_socketpair(struct socket *socka, struct socket *sockb)
1220{
1221 struct sock *ska = socka->sk, *skb = sockb->sk;
1222
1223 /* Join our sockets back to back */
1224 sock_hold(ska);
1225 sock_hold(skb);
1226 unix_peer(ska) = skb;
1227 unix_peer(skb) = ska;
1228 init_peercred(ska);
1229 init_peercred(skb);
1230
1231 if (ska->sk_type != SOCK_DGRAM) {
1232 ska->sk_state = TCP_ESTABLISHED;
1233 skb->sk_state = TCP_ESTABLISHED;
1234 socka->state = SS_CONNECTED;
1235 sockb->state = SS_CONNECTED;
1236 }
1237 return 0;
1238}
1239
1240static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1241{
1242 struct sock *sk = sock->sk;
1243 struct sock *tsk;
1244 struct sk_buff *skb;
1245 int err;
1246
1247 err = -EOPNOTSUPP;
1248 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1249 goto out;
1250
1251 err = -EINVAL;
1252 if (sk->sk_state != TCP_LISTEN)
1253 goto out;
1254
1255 /* If socket state is TCP_LISTEN it cannot change (for now...),
1256 * so that no locks are necessary.
1257 */
1258
1259 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1260 if (!skb) {
1261 /* This means receive shutdown. */
1262 if (err == 0)
1263 err = -EINVAL;
1264 goto out;
1265 }
1266
1267 tsk = skb->sk;
1268 skb_free_datagram(sk, skb);
1269 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1270
1271 /* attach accepted sock to socket */
1272 unix_state_lock(tsk);
1273 newsock->state = SS_CONNECTED;
1274 sock_graft(tsk, newsock);
1275 unix_state_unlock(tsk);
1276 return 0;
1277
1278out:
1279 return err;
1280}
1281
1282
1283static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1284{
1285 struct sock *sk = sock->sk;
1286 struct unix_sock *u;
1287 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1288 int err = 0;
1289
1290 if (peer) {
1291 sk = unix_peer_get(sk);
1292
1293 err = -ENOTCONN;
1294 if (!sk)
1295 goto out;
1296 err = 0;
1297 } else {
1298 sock_hold(sk);
1299 }
1300
1301 u = unix_sk(sk);
1302 unix_state_lock(sk);
1303 if (!u->addr) {
1304 sunaddr->sun_family = AF_UNIX;
1305 sunaddr->sun_path[0] = 0;
1306 *uaddr_len = sizeof(short);
1307 } else {
1308 struct unix_address *addr = u->addr;
1309
1310 *uaddr_len = addr->len;
1311 memcpy(sunaddr, addr->name, *uaddr_len);
1312 }
1313 unix_state_unlock(sk);
1314 sock_put(sk);
1315out:
1316 return err;
1317}
1318
1319static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1320{
1321 int i;
1322
1323 scm->fp = UNIXCB(skb).fp;
1324 UNIXCB(skb).fp = NULL;
1325
1326 for (i = scm->fp->count-1; i >= 0; i--)
1327 unix_notinflight(scm->fp->fp[i]);
1328}
1329
1330static void unix_destruct_scm(struct sk_buff *skb)
1331{
1332 struct scm_cookie scm;
1333 memset(&scm, 0, sizeof(scm));
1334 scm.pid = UNIXCB(skb).pid;
1335 scm.cred = UNIXCB(skb).cred;
1336 if (UNIXCB(skb).fp)
1337 unix_detach_fds(&scm, skb);
1338
1339 /* Alas, it calls VFS */
1340 /* So fscking what? fput() had been SMP-safe since the last Summer */
1341 scm_destroy(&scm);
1342 sock_wfree(skb);
1343}
1344
1345#define MAX_RECURSION_LEVEL 4
1346
1347static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1348{
1349 int i;
1350 unsigned char max_level = 0;
1351 int unix_sock_count = 0;
1352
1353 for (i = scm->fp->count - 1; i >= 0; i--) {
1354 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1355
1356 if (sk) {
1357 unix_sock_count++;
1358 max_level = max(max_level,
1359 unix_sk(sk)->recursion_level);
1360 }
1361 }
1362 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1363 return -ETOOMANYREFS;
1364
1365 /*
1366 * Need to duplicate file references for the sake of garbage
1367 * collection. Otherwise a socket in the fps might become a
1368 * candidate for GC while the skb is not yet queued.
1369 */
1370 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1371 if (!UNIXCB(skb).fp)
1372 return -ENOMEM;
1373
1374 if (unix_sock_count) {
1375 for (i = scm->fp->count - 1; i >= 0; i--)
1376 unix_inflight(scm->fp->fp[i]);
1377 }
1378 return max_level;
1379}
1380
1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1382{
1383 int err = 0;
1384 UNIXCB(skb).pid = get_pid(scm->pid);
1385 UNIXCB(skb).cred = get_cred(scm->cred);
1386 UNIXCB(skb).fp = NULL;
1387 if (scm->fp && send_fds)
1388 err = unix_attach_fds(scm, skb);
1389
1390 skb->destructor = unix_destruct_scm;
1391 return err;
1392}
1393
1394/*
1395 * Send AF_UNIX data.
1396 */
1397
1398static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1399 struct msghdr *msg, size_t len)
1400{
1401 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1402 struct sock *sk = sock->sk;
1403 struct net *net = sock_net(sk);
1404 struct unix_sock *u = unix_sk(sk);
1405 struct sockaddr_un *sunaddr = msg->msg_name;
1406 struct sock *other = NULL;
1407 int namelen = 0; /* fake GCC */
1408 int err;
1409 unsigned hash;
1410 struct sk_buff *skb;
1411 long timeo;
1412 struct scm_cookie tmp_scm;
1413 int max_level;
1414
1415 if (NULL == siocb->scm)
1416 siocb->scm = &tmp_scm;
1417 wait_for_unix_gc();
1418 err = scm_send(sock, msg, siocb->scm);
1419 if (err < 0)
1420 return err;
1421
1422 err = -EOPNOTSUPP;
1423 if (msg->msg_flags&MSG_OOB)
1424 goto out;
1425
1426 if (msg->msg_namelen) {
1427 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1428 if (err < 0)
1429 goto out;
1430 namelen = err;
1431 } else {
1432 sunaddr = NULL;
1433 err = -ENOTCONN;
1434 other = unix_peer_get(sk);
1435 if (!other)
1436 goto out;
1437 }
1438
1439 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1440 && (err = unix_autobind(sock)) != 0)
1441 goto out;
1442
1443 err = -EMSGSIZE;
1444 if (len > sk->sk_sndbuf - 32)
1445 goto out;
1446
1447 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1448 if (skb == NULL)
1449 goto out;
1450
1451 err = unix_scm_to_skb(siocb->scm, skb, true);
1452 if (err < 0)
1453 goto out_free;
1454 max_level = err + 1;
1455 unix_get_secdata(siocb->scm, skb);
1456
1457 skb_reset_transport_header(skb);
1458 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1459 if (err)
1460 goto out_free;
1461
1462 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1463
1464restart:
1465 if (!other) {
1466 err = -ECONNRESET;
1467 if (sunaddr == NULL)
1468 goto out_free;
1469
1470 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1471 hash, &err);
1472 if (other == NULL)
1473 goto out_free;
1474 }
1475
1476 if (sk_filter(other, skb) < 0) {
1477 /* Toss the packet but do not return any error to the sender */
1478 err = len;
1479 goto out_free;
1480 }
1481
1482 unix_state_lock(other);
1483 err = -EPERM;
1484 if (!unix_may_send(sk, other))
1485 goto out_unlock;
1486
1487 if (sock_flag(other, SOCK_DEAD)) {
1488 /*
1489 * Check with 1003.1g - what should
1490 * datagram error
1491 */
1492 unix_state_unlock(other);
1493 sock_put(other);
1494
1495 err = 0;
1496 unix_state_lock(sk);
1497 if (unix_peer(sk) == other) {
1498 unix_peer(sk) = NULL;
1499 unix_state_unlock(sk);
1500
1501 unix_dgram_disconnected(sk, other);
1502 sock_put(other);
1503 err = -ECONNREFUSED;
1504 } else {
1505 unix_state_unlock(sk);
1506 }
1507
1508 other = NULL;
1509 if (err)
1510 goto out_free;
1511 goto restart;
1512 }
1513
1514 err = -EPIPE;
1515 if (other->sk_shutdown & RCV_SHUTDOWN)
1516 goto out_unlock;
1517
1518 if (sk->sk_type != SOCK_SEQPACKET) {
1519 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1520 if (err)
1521 goto out_unlock;
1522 }
1523
1524 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1525 if (!timeo) {
1526 err = -EAGAIN;
1527 goto out_unlock;
1528 }
1529
1530 timeo = unix_wait_for_peer(other, timeo);
1531
1532 err = sock_intr_errno(timeo);
1533 if (signal_pending(current))
1534 goto out_free;
1535
1536 goto restart;
1537 }
1538
1539 if (sock_flag(other, SOCK_RCVTSTAMP))
1540 __net_timestamp(skb);
1541 skb_queue_tail(&other->sk_receive_queue, skb);
1542 if (max_level > unix_sk(other)->recursion_level)
1543 unix_sk(other)->recursion_level = max_level;
1544 unix_state_unlock(other);
1545 other->sk_data_ready(other, len);
1546 sock_put(other);
1547 scm_destroy(siocb->scm);
1548 return len;
1549
1550out_unlock:
1551 unix_state_unlock(other);
1552out_free:
1553 kfree_skb(skb);
1554out:
1555 if (other)
1556 sock_put(other);
1557 scm_destroy(siocb->scm);
1558 return err;
1559}
1560
1561
1562static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1563 struct msghdr *msg, size_t len)
1564{
1565 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1566 struct sock *sk = sock->sk;
1567 struct sock *other = NULL;
1568 int err, size;
1569 struct sk_buff *skb;
1570 int sent = 0;
1571 struct scm_cookie tmp_scm;
1572 bool fds_sent = false;
1573 int max_level;
1574
1575 if (NULL == siocb->scm)
1576 siocb->scm = &tmp_scm;
1577 wait_for_unix_gc();
1578 err = scm_send(sock, msg, siocb->scm);
1579 if (err < 0)
1580 return err;
1581
1582 err = -EOPNOTSUPP;
1583 if (msg->msg_flags&MSG_OOB)
1584 goto out_err;
1585
1586 if (msg->msg_namelen) {
1587 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1588 goto out_err;
1589 } else {
1590 err = -ENOTCONN;
1591 other = unix_peer(sk);
1592 if (!other)
1593 goto out_err;
1594 }
1595
1596 if (sk->sk_shutdown & SEND_SHUTDOWN)
1597 goto pipe_err;
1598
1599 while (sent < len) {
1600 /*
1601 * Optimisation for the fact that under 0.01% of X
1602 * messages typically need breaking up.
1603 */
1604
1605 size = len-sent;
1606
1607 /* Keep two messages in the pipe so it schedules better */
1608 if (size > ((sk->sk_sndbuf >> 1) - 64))
1609 size = (sk->sk_sndbuf >> 1) - 64;
1610
1611 if (size > SKB_MAX_ALLOC)
1612 size = SKB_MAX_ALLOC;
1613
1614 /*
1615 * Grab a buffer
1616 */
1617
1618 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1619 &err);
1620
1621 if (skb == NULL)
1622 goto out_err;
1623
1624 /*
1625 * If you pass two values to the sock_alloc_send_skb
1626 * it tries to grab the large buffer with GFP_NOFS
1627 * (which can fail easily), and if it fails grab the
1628 * fallback size buffer which is under a page and will
1629 * succeed. [Alan]
1630 */
1631 size = min_t(int, size, skb_tailroom(skb));
1632
1633
1634 /* Only send the fds in the first buffer */
1635 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1636 if (err < 0) {
1637 kfree_skb(skb);
1638 goto out_err;
1639 }
1640 max_level = err + 1;
1641 fds_sent = true;
1642
1643 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1644 if (err) {
1645 kfree_skb(skb);
1646 goto out_err;
1647 }
1648
1649 unix_state_lock(other);
1650
1651 if (sock_flag(other, SOCK_DEAD) ||
1652 (other->sk_shutdown & RCV_SHUTDOWN))
1653 goto pipe_err_free;
1654
1655 skb_queue_tail(&other->sk_receive_queue, skb);
1656 if (max_level > unix_sk(other)->recursion_level)
1657 unix_sk(other)->recursion_level = max_level;
1658 unix_state_unlock(other);
1659 other->sk_data_ready(other, size);
1660 sent += size;
1661 }
1662
1663 scm_destroy(siocb->scm);
1664 siocb->scm = NULL;
1665
1666 return sent;
1667
1668pipe_err_free:
1669 unix_state_unlock(other);
1670 kfree_skb(skb);
1671pipe_err:
1672 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1673 send_sig(SIGPIPE, current, 0);
1674 err = -EPIPE;
1675out_err:
1676 scm_destroy(siocb->scm);
1677 siocb->scm = NULL;
1678 return sent ? : err;
1679}
1680
1681static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1682 struct msghdr *msg, size_t len)
1683{
1684 int err;
1685 struct sock *sk = sock->sk;
1686
1687 err = sock_error(sk);
1688 if (err)
1689 return err;
1690
1691 if (sk->sk_state != TCP_ESTABLISHED)
1692 return -ENOTCONN;
1693
1694 if (msg->msg_namelen)
1695 msg->msg_namelen = 0;
1696
1697 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1698}
1699
1700static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1701 struct msghdr *msg, size_t size,
1702 int flags)
1703{
1704 struct sock *sk = sock->sk;
1705
1706 if (sk->sk_state != TCP_ESTABLISHED)
1707 return -ENOTCONN;
1708
1709 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1710}
1711
1712static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1713{
1714 struct unix_sock *u = unix_sk(sk);
1715
1716 msg->msg_namelen = 0;
1717 if (u->addr) {
1718 msg->msg_namelen = u->addr->len;
1719 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1720 }
1721}
1722
1723static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1724 struct msghdr *msg, size_t size,
1725 int flags)
1726{
1727 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1728 struct scm_cookie tmp_scm;
1729 struct sock *sk = sock->sk;
1730 struct unix_sock *u = unix_sk(sk);
1731 int noblock = flags & MSG_DONTWAIT;
1732 struct sk_buff *skb;
1733 int err;
1734
1735 err = -EOPNOTSUPP;
1736 if (flags&MSG_OOB)
1737 goto out;
1738
1739 msg->msg_namelen = 0;
1740
1741 err = mutex_lock_interruptible(&u->readlock);
1742 if (err) {
1743 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1744 goto out;
1745 }
1746
1747 skb = skb_recv_datagram(sk, flags, noblock, &err);
1748 if (!skb) {
1749 unix_state_lock(sk);
1750 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1751 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1752 (sk->sk_shutdown & RCV_SHUTDOWN))
1753 err = 0;
1754 unix_state_unlock(sk);
1755 goto out_unlock;
1756 }
1757
1758 wake_up_interruptible_sync_poll(&u->peer_wait,
1759 POLLOUT | POLLWRNORM | POLLWRBAND);
1760
1761 if (msg->msg_name)
1762 unix_copy_addr(msg, skb->sk);
1763
1764 if (size > skb->len)
1765 size = skb->len;
1766 else if (size < skb->len)
1767 msg->msg_flags |= MSG_TRUNC;
1768
1769 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1770 if (err)
1771 goto out_free;
1772
1773 if (sock_flag(sk, SOCK_RCVTSTAMP))
1774 __sock_recv_timestamp(msg, sk, skb);
1775
1776 if (!siocb->scm) {
1777 siocb->scm = &tmp_scm;
1778 memset(&tmp_scm, 0, sizeof(tmp_scm));
1779 }
1780 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1781 unix_set_secdata(siocb->scm, skb);
1782
1783 if (!(flags & MSG_PEEK)) {
1784 if (UNIXCB(skb).fp)
1785 unix_detach_fds(siocb->scm, skb);
1786 } else {
1787 /* It is questionable: on PEEK we could:
1788 - do not return fds - good, but too simple 8)
1789 - return fds, and do not return them on read (old strategy,
1790 apparently wrong)
1791 - clone fds (I chose it for now, it is the most universal
1792 solution)
1793
1794 POSIX 1003.1g does not actually define this clearly
1795 at all. POSIX 1003.1g doesn't define a lot of things
1796 clearly however!
1797
1798 */
1799 if (UNIXCB(skb).fp)
1800 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1801 }
1802 err = size;
1803
1804 scm_recv(sock, msg, siocb->scm, flags);
1805
1806out_free:
1807 skb_free_datagram(sk, skb);
1808out_unlock:
1809 mutex_unlock(&u->readlock);
1810out:
1811 return err;
1812}
1813
1814/*
1815 * Sleep until data has arrive. But check for races..
1816 */
1817
1818static long unix_stream_data_wait(struct sock *sk, long timeo)
1819{
1820 DEFINE_WAIT(wait);
1821
1822 unix_state_lock(sk);
1823
1824 for (;;) {
1825 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1826
1827 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1828 sk->sk_err ||
1829 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1830 signal_pending(current) ||
1831 !timeo)
1832 break;
1833
1834 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1835 unix_state_unlock(sk);
1836 timeo = schedule_timeout(timeo);
1837 unix_state_lock(sk);
1838 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1839 }
1840
1841 finish_wait(sk_sleep(sk), &wait);
1842 unix_state_unlock(sk);
1843 return timeo;
1844}
1845
1846
1847
1848static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1849 struct msghdr *msg, size_t size,
1850 int flags)
1851{
1852 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1853 struct scm_cookie tmp_scm;
1854 struct sock *sk = sock->sk;
1855 struct unix_sock *u = unix_sk(sk);
1856 struct sockaddr_un *sunaddr = msg->msg_name;
1857 int copied = 0;
1858 int check_creds = 0;
1859 int target;
1860 int err = 0;
1861 long timeo;
1862
1863 err = -EINVAL;
1864 if (sk->sk_state != TCP_ESTABLISHED)
1865 goto out;
1866
1867 err = -EOPNOTSUPP;
1868 if (flags&MSG_OOB)
1869 goto out;
1870
1871 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1872 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1873
1874 msg->msg_namelen = 0;
1875
1876 /* Lock the socket to prevent queue disordering
1877 * while sleeps in memcpy_tomsg
1878 */
1879
1880 if (!siocb->scm) {
1881 siocb->scm = &tmp_scm;
1882 memset(&tmp_scm, 0, sizeof(tmp_scm));
1883 }
1884
1885 err = mutex_lock_interruptible(&u->readlock);
1886 if (err) {
1887 err = sock_intr_errno(timeo);
1888 goto out;
1889 }
1890
1891 do {
1892 int chunk;
1893 struct sk_buff *skb;
1894
1895 unix_state_lock(sk);
1896 skb = skb_dequeue(&sk->sk_receive_queue);
1897 if (skb == NULL) {
1898 unix_sk(sk)->recursion_level = 0;
1899 if (copied >= target)
1900 goto unlock;
1901
1902 /*
1903 * POSIX 1003.1g mandates this order.
1904 */
1905
1906 err = sock_error(sk);
1907 if (err)
1908 goto unlock;
1909 if (sk->sk_shutdown & RCV_SHUTDOWN)
1910 goto unlock;
1911
1912 unix_state_unlock(sk);
1913 err = -EAGAIN;
1914 if (!timeo)
1915 break;
1916 mutex_unlock(&u->readlock);
1917
1918 timeo = unix_stream_data_wait(sk, timeo);
1919
1920 if (signal_pending(current)
1921 || mutex_lock_interruptible(&u->readlock)) {
1922 err = sock_intr_errno(timeo);
1923 goto out;
1924 }
1925
1926 continue;
1927 unlock:
1928 unix_state_unlock(sk);
1929 break;
1930 }
1931 unix_state_unlock(sk);
1932
1933 if (check_creds) {
1934 /* Never glue messages from different writers */
1935 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1936 (UNIXCB(skb).cred != siocb->scm->cred)) {
1937 skb_queue_head(&sk->sk_receive_queue, skb);
1938 break;
1939 }
1940 } else {
1941 /* Copy credentials */
1942 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1943 check_creds = 1;
1944 }
1945
1946 /* Copy address just once */
1947 if (sunaddr) {
1948 unix_copy_addr(msg, skb->sk);
1949 sunaddr = NULL;
1950 }
1951
1952 chunk = min_t(unsigned int, skb->len, size);
1953 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1954 skb_queue_head(&sk->sk_receive_queue, skb);
1955 if (copied == 0)
1956 copied = -EFAULT;
1957 break;
1958 }
1959 copied += chunk;
1960 size -= chunk;
1961
1962 /* Mark read part of skb as used */
1963 if (!(flags & MSG_PEEK)) {
1964 skb_pull(skb, chunk);
1965
1966 if (UNIXCB(skb).fp)
1967 unix_detach_fds(siocb->scm, skb);
1968
1969 /* put the skb back if we didn't use it up.. */
1970 if (skb->len) {
1971 skb_queue_head(&sk->sk_receive_queue, skb);
1972 break;
1973 }
1974
1975 consume_skb(skb);
1976
1977 if (siocb->scm->fp)
1978 break;
1979 } else {
1980 /* It is questionable, see note in unix_dgram_recvmsg.
1981 */
1982 if (UNIXCB(skb).fp)
1983 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1984
1985 /* put message back and return */
1986 skb_queue_head(&sk->sk_receive_queue, skb);
1987 break;
1988 }
1989 } while (size);
1990
1991 mutex_unlock(&u->readlock);
1992 scm_recv(sock, msg, siocb->scm, flags);
1993out:
1994 return copied ? : err;
1995}
1996
1997static int unix_shutdown(struct socket *sock, int mode)
1998{
1999 struct sock *sk = sock->sk;
2000 struct sock *other;
2001
2002 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2003
2004 if (!mode)
2005 return 0;
2006
2007 unix_state_lock(sk);
2008 sk->sk_shutdown |= mode;
2009 other = unix_peer(sk);
2010 if (other)
2011 sock_hold(other);
2012 unix_state_unlock(sk);
2013 sk->sk_state_change(sk);
2014
2015 if (other &&
2016 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2017
2018 int peer_mode = 0;
2019
2020 if (mode&RCV_SHUTDOWN)
2021 peer_mode |= SEND_SHUTDOWN;
2022 if (mode&SEND_SHUTDOWN)
2023 peer_mode |= RCV_SHUTDOWN;
2024 unix_state_lock(other);
2025 other->sk_shutdown |= peer_mode;
2026 unix_state_unlock(other);
2027 other->sk_state_change(other);
2028 if (peer_mode == SHUTDOWN_MASK)
2029 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2030 else if (peer_mode & RCV_SHUTDOWN)
2031 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2032 }
2033 if (other)
2034 sock_put(other);
2035
2036 return 0;
2037}
2038
2039static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2040{
2041 struct sock *sk = sock->sk;
2042 long amount = 0;
2043 int err;
2044
2045 switch (cmd) {
2046 case SIOCOUTQ:
2047 amount = sk_wmem_alloc_get(sk);
2048 err = put_user(amount, (int __user *)arg);
2049 break;
2050 case SIOCINQ:
2051 {
2052 struct sk_buff *skb;
2053
2054 if (sk->sk_state == TCP_LISTEN) {
2055 err = -EINVAL;
2056 break;
2057 }
2058
2059 spin_lock(&sk->sk_receive_queue.lock);
2060 if (sk->sk_type == SOCK_STREAM ||
2061 sk->sk_type == SOCK_SEQPACKET) {
2062 skb_queue_walk(&sk->sk_receive_queue, skb)
2063 amount += skb->len;
2064 } else {
2065 skb = skb_peek(&sk->sk_receive_queue);
2066 if (skb)
2067 amount = skb->len;
2068 }
2069 spin_unlock(&sk->sk_receive_queue.lock);
2070 err = put_user(amount, (int __user *)arg);
2071 break;
2072 }
2073
2074 default:
2075 err = -ENOIOCTLCMD;
2076 break;
2077 }
2078 return err;
2079}
2080
2081static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2082{
2083 struct sock *sk = sock->sk;
2084 unsigned int mask;
2085
2086 sock_poll_wait(file, sk_sleep(sk), wait);
2087 mask = 0;
2088
2089 /* exceptional events? */
2090 if (sk->sk_err)
2091 mask |= POLLERR;
2092 if (sk->sk_shutdown == SHUTDOWN_MASK)
2093 mask |= POLLHUP;
2094 if (sk->sk_shutdown & RCV_SHUTDOWN)
2095 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2096
2097 /* readable? */
2098 if (!skb_queue_empty(&sk->sk_receive_queue))
2099 mask |= POLLIN | POLLRDNORM;
2100
2101 /* Connection-based need to check for termination and startup */
2102 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2103 sk->sk_state == TCP_CLOSE)
2104 mask |= POLLHUP;
2105
2106 /*
2107 * we set writable also when the other side has shut down the
2108 * connection. This prevents stuck sockets.
2109 */
2110 if (unix_writable(sk))
2111 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2112
2113 return mask;
2114}
2115
2116static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2117 poll_table *wait)
2118{
2119 struct sock *sk = sock->sk, *other;
2120 unsigned int mask, writable;
2121
2122 sock_poll_wait(file, sk_sleep(sk), wait);
2123 mask = 0;
2124
2125 /* exceptional events? */
2126 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2127 mask |= POLLERR;
2128 if (sk->sk_shutdown & RCV_SHUTDOWN)
2129 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2130 if (sk->sk_shutdown == SHUTDOWN_MASK)
2131 mask |= POLLHUP;
2132
2133 /* readable? */
2134 if (!skb_queue_empty(&sk->sk_receive_queue))
2135 mask |= POLLIN | POLLRDNORM;
2136
2137 /* Connection-based need to check for termination and startup */
2138 if (sk->sk_type == SOCK_SEQPACKET) {
2139 if (sk->sk_state == TCP_CLOSE)
2140 mask |= POLLHUP;
2141 /* connection hasn't started yet? */
2142 if (sk->sk_state == TCP_SYN_SENT)
2143 return mask;
2144 }
2145
2146 /* No write status requested, avoid expensive OUT tests. */
2147 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2148 return mask;
2149
2150 writable = unix_writable(sk);
2151 other = unix_peer_get(sk);
2152 if (other) {
2153 if (unix_peer(other) != sk) {
2154 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2155 if (unix_recvq_full(other))
2156 writable = 0;
2157 }
2158 sock_put(other);
2159 }
2160
2161 if (writable)
2162 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2163 else
2164 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2165
2166 return mask;
2167}
2168
2169#ifdef CONFIG_PROC_FS
2170static struct sock *first_unix_socket(int *i)
2171{
2172 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2173 if (!hlist_empty(&unix_socket_table[*i]))
2174 return __sk_head(&unix_socket_table[*i]);
2175 }
2176 return NULL;
2177}
2178
2179static struct sock *next_unix_socket(int *i, struct sock *s)
2180{
2181 struct sock *next = sk_next(s);
2182 /* More in this chain? */
2183 if (next)
2184 return next;
2185 /* Look for next non-empty chain. */
2186 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2187 if (!hlist_empty(&unix_socket_table[*i]))
2188 return __sk_head(&unix_socket_table[*i]);
2189 }
2190 return NULL;
2191}
2192
2193struct unix_iter_state {
2194 struct seq_net_private p;
2195 int i;
2196};
2197
2198static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2199{
2200 struct unix_iter_state *iter = seq->private;
2201 loff_t off = 0;
2202 struct sock *s;
2203
2204 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2205 if (sock_net(s) != seq_file_net(seq))
2206 continue;
2207 if (off == pos)
2208 return s;
2209 ++off;
2210 }
2211 return NULL;
2212}
2213
2214static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2215 __acquires(unix_table_lock)
2216{
2217 spin_lock(&unix_table_lock);
2218 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2219}
2220
2221static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2222{
2223 struct unix_iter_state *iter = seq->private;
2224 struct sock *sk = v;
2225 ++*pos;
2226
2227 if (v == SEQ_START_TOKEN)
2228 sk = first_unix_socket(&iter->i);
2229 else
2230 sk = next_unix_socket(&iter->i, sk);
2231 while (sk && (sock_net(sk) != seq_file_net(seq)))
2232 sk = next_unix_socket(&iter->i, sk);
2233 return sk;
2234}
2235
2236static void unix_seq_stop(struct seq_file *seq, void *v)
2237 __releases(unix_table_lock)
2238{
2239 spin_unlock(&unix_table_lock);
2240}
2241
2242static int unix_seq_show(struct seq_file *seq, void *v)
2243{
2244
2245 if (v == SEQ_START_TOKEN)
2246 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2247 "Inode Path\n");
2248 else {
2249 struct sock *s = v;
2250 struct unix_sock *u = unix_sk(s);
2251 unix_state_lock(s);
2252
2253 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2254 s,
2255 atomic_read(&s->sk_refcnt),
2256 0,
2257 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2258 s->sk_type,
2259 s->sk_socket ?
2260 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2261 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2262 sock_i_ino(s));
2263
2264 if (u->addr) {
2265 int i, len;
2266 seq_putc(seq, ' ');
2267
2268 i = 0;
2269 len = u->addr->len - sizeof(short);
2270 if (!UNIX_ABSTRACT(s))
2271 len--;
2272 else {
2273 seq_putc(seq, '@');
2274 i++;
2275 }
2276 for ( ; i < len; i++)
2277 seq_putc(seq, u->addr->name->sun_path[i]);
2278 }
2279 unix_state_unlock(s);
2280 seq_putc(seq, '\n');
2281 }
2282
2283 return 0;
2284}
2285
2286static const struct seq_operations unix_seq_ops = {
2287 .start = unix_seq_start,
2288 .next = unix_seq_next,
2289 .stop = unix_seq_stop,
2290 .show = unix_seq_show,
2291};
2292
2293static int unix_seq_open(struct inode *inode, struct file *file)
2294{
2295 return seq_open_net(inode, file, &unix_seq_ops,
2296 sizeof(struct unix_iter_state));
2297}
2298
2299static const struct file_operations unix_seq_fops = {
2300 .owner = THIS_MODULE,
2301 .open = unix_seq_open,
2302 .read = seq_read,
2303 .llseek = seq_lseek,
2304 .release = seq_release_net,
2305};
2306
2307#endif
2308
2309static const struct net_proto_family unix_family_ops = {
2310 .family = PF_UNIX,
2311 .create = unix_create,
2312 .owner = THIS_MODULE,
2313};
2314
2315
2316static int __net_init unix_net_init(struct net *net)
2317{
2318 int error = -ENOMEM;
2319
2320 net->unx.sysctl_max_dgram_qlen = 10;
2321 if (unix_sysctl_register(net))
2322 goto out;
2323
2324#ifdef CONFIG_PROC_FS
2325 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2326 unix_sysctl_unregister(net);
2327 goto out;
2328 }
2329#endif
2330 error = 0;
2331out:
2332 return error;
2333}
2334
2335static void __net_exit unix_net_exit(struct net *net)
2336{
2337 unix_sysctl_unregister(net);
2338 proc_net_remove(net, "unix");
2339}
2340
2341static struct pernet_operations unix_net_ops = {
2342 .init = unix_net_init,
2343 .exit = unix_net_exit,
2344};
2345
2346static int __init af_unix_init(void)
2347{
2348 int rc = -1;
2349 struct sk_buff *dummy_skb;
2350
2351 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2352
2353 rc = proto_register(&unix_proto, 1);
2354 if (rc != 0) {
2355 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2356 __func__);
2357 goto out;
2358 }
2359
2360 sock_register(&unix_family_ops);
2361 register_pernet_subsys(&unix_net_ops);
2362out:
2363 return rc;
2364}
2365
2366static void __exit af_unix_exit(void)
2367{
2368 sock_unregister(PF_UNIX);
2369 proto_unregister(&unix_proto);
2370 unregister_pernet_subsys(&unix_net_ops);
2371}
2372
2373/* Earlier than device_initcall() so that other drivers invoking
2374 request_module() don't end up in a loop when modprobe tries
2375 to use a UNIX socket. But later than subsys_initcall() because
2376 we depend on stuff initialised there */
2377fs_initcall(af_unix_init);
2378module_exit(af_unix_exit);
2379
2380MODULE_LICENSE("GPL");
2381MODULE_ALIAS_NETPROTO(PF_UNIX);