Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.6
  1/*
  2 * File: socket.c
  3 *
  4 * Phonet sockets
  5 *
  6 * Copyright (C) 2008 Nokia Corporation.
  7 *
  8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
  9 *          RĂ©mi Denis-Courmont
 10 *
 11 * This program is free software; you can redistribute it and/or
 12 * modify it under the terms of the GNU General Public License
 13 * version 2 as published by the Free Software Foundation.
 14 *
 15 * This program is distributed in the hope that it will be useful, but
 16 * WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 18 * General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 23 * 02110-1301 USA
 24 */
 25
 26#include <linux/gfp.h>
 27#include <linux/kernel.h>
 28#include <linux/net.h>
 29#include <linux/poll.h>
 
 
 30#include <net/sock.h>
 31#include <net/tcp_states.h>
 32
 33#include <linux/phonet.h>
 34#include <linux/export.h>
 35#include <net/phonet/phonet.h>
 36#include <net/phonet/pep.h>
 37#include <net/phonet/pn_dev.h>
 38
 39static int pn_socket_release(struct socket *sock)
 40{
 41	struct sock *sk = sock->sk;
 42
 43	if (sk) {
 44		sock->sk = NULL;
 45		sk->sk_prot->close(sk, 0);
 46	}
 47	return 0;
 48}
 49
 50#define PN_HASHSIZE	16
 51#define PN_HASHMASK	(PN_HASHSIZE-1)
 52
 53
 54static struct  {
 55	struct hlist_head hlist[PN_HASHSIZE];
 56	struct mutex lock;
 57} pnsocks;
 58
 59void __init pn_sock_init(void)
 60{
 61	unsigned int i;
 62
 63	for (i = 0; i < PN_HASHSIZE; i++)
 64		INIT_HLIST_HEAD(pnsocks.hlist + i);
 65	mutex_init(&pnsocks.lock);
 66}
 67
 68static struct hlist_head *pn_hash_list(u16 obj)
 69{
 70	return pnsocks.hlist + (obj & PN_HASHMASK);
 71}
 72
 73/*
 74 * Find address based on socket address, match only certain fields.
 75 * Also grab sock if it was found. Remember to sock_put it later.
 76 */
 77struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 78{
 79	struct sock *sknode;
 80	struct sock *rval = NULL;
 81	u16 obj = pn_sockaddr_get_object(spn);
 82	u8 res = spn->spn_resource;
 83	struct hlist_head *hlist = pn_hash_list(obj);
 84
 85	rcu_read_lock();
 86	sk_for_each_rcu(sknode, hlist) {
 87		struct pn_sock *pn = pn_sk(sknode);
 88		BUG_ON(!pn->sobject); /* unbound socket */
 89
 90		if (!net_eq(sock_net(sknode), net))
 91			continue;
 92		if (pn_port(obj)) {
 93			/* Look up socket by port */
 94			if (pn_port(pn->sobject) != pn_port(obj))
 95				continue;
 96		} else {
 97			/* If port is zero, look up by resource */
 98			if (pn->resource != res)
 99				continue;
100		}
101		if (pn_addr(pn->sobject) &&
102		    pn_addr(pn->sobject) != pn_addr(obj))
103			continue;
104
105		rval = sknode;
106		sock_hold(sknode);
107		break;
108	}
109	rcu_read_unlock();
110
111	return rval;
112}
113
114/* Deliver a broadcast packet (only in bottom-half) */
115void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
116{
117	struct hlist_head *hlist = pnsocks.hlist;
118	unsigned int h;
119
120	rcu_read_lock();
121	for (h = 0; h < PN_HASHSIZE; h++) {
122		struct sock *sknode;
123
124		sk_for_each(sknode, hlist) {
125			struct sk_buff *clone;
126
127			if (!net_eq(sock_net(sknode), net))
128				continue;
129			if (!sock_flag(sknode, SOCK_BROADCAST))
130				continue;
131
132			clone = skb_clone(skb, GFP_ATOMIC);
133			if (clone) {
134				sock_hold(sknode);
135				sk_receive_skb(sknode, clone, 0);
136			}
137		}
138		hlist++;
139	}
140	rcu_read_unlock();
141}
142
143int pn_sock_hash(struct sock *sk)
144{
145	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
146
147	mutex_lock(&pnsocks.lock);
148	sk_add_node_rcu(sk, hlist);
149	mutex_unlock(&pnsocks.lock);
150
151	return 0;
152}
153EXPORT_SYMBOL(pn_sock_hash);
154
155void pn_sock_unhash(struct sock *sk)
156{
157	mutex_lock(&pnsocks.lock);
158	sk_del_node_init_rcu(sk);
159	mutex_unlock(&pnsocks.lock);
160	pn_sock_unbind_all_res(sk);
161	synchronize_rcu();
162}
163EXPORT_SYMBOL(pn_sock_unhash);
164
165static DEFINE_MUTEX(port_mutex);
166
167static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
168{
169	struct sock *sk = sock->sk;
170	struct pn_sock *pn = pn_sk(sk);
171	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
172	int err;
173	u16 handle;
174	u8 saddr;
175
176	if (sk->sk_prot->bind)
177		return sk->sk_prot->bind(sk, addr, len);
178
179	if (len < sizeof(struct sockaddr_pn))
180		return -EINVAL;
181	if (spn->spn_family != AF_PHONET)
182		return -EAFNOSUPPORT;
183
184	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
185	saddr = pn_addr(handle);
186	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
187		return -EADDRNOTAVAIL;
188
189	lock_sock(sk);
190	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
191		err = -EINVAL; /* attempt to rebind */
192		goto out;
193	}
194	WARN_ON(sk_hashed(sk));
195	mutex_lock(&port_mutex);
196	err = sk->sk_prot->get_port(sk, pn_port(handle));
197	if (err)
198		goto out_port;
199
200	/* get_port() sets the port, bind() sets the address if applicable */
201	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
202	pn->resource = spn->spn_resource;
203
204	/* Enable RX on the socket */
205	err = sk->sk_prot->hash(sk);
206out_port:
207	mutex_unlock(&port_mutex);
208out:
209	release_sock(sk);
210	return err;
211}
212
213static int pn_socket_autobind(struct socket *sock)
214{
215	struct sockaddr_pn sa;
216	int err;
217
218	memset(&sa, 0, sizeof(sa));
219	sa.spn_family = AF_PHONET;
220	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
221				sizeof(struct sockaddr_pn));
222	if (err != -EINVAL)
223		return err;
224	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
225	return 0; /* socket was already bound */
226}
227
228static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
229		int len, int flags)
230{
231	struct sock *sk = sock->sk;
232	struct pn_sock *pn = pn_sk(sk);
233	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
234	struct task_struct *tsk = current;
235	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
236	int err;
237
238	if (pn_socket_autobind(sock))
239		return -ENOBUFS;
240	if (len < sizeof(struct sockaddr_pn))
241		return -EINVAL;
242	if (spn->spn_family != AF_PHONET)
243		return -EAFNOSUPPORT;
244
245	lock_sock(sk);
246
247	switch (sock->state) {
248	case SS_UNCONNECTED:
249		if (sk->sk_state != TCP_CLOSE) {
250			err = -EISCONN;
251			goto out;
252		}
253		break;
254	case SS_CONNECTING:
255		err = -EALREADY;
256		goto out;
257	default:
258		err = -EISCONN;
259		goto out;
260	}
261
262	pn->dobject = pn_sockaddr_get_object(spn);
263	pn->resource = pn_sockaddr_get_resource(spn);
264	sock->state = SS_CONNECTING;
265
266	err = sk->sk_prot->connect(sk, addr, len);
267	if (err) {
268		sock->state = SS_UNCONNECTED;
269		pn->dobject = 0;
270		goto out;
271	}
272
273	while (sk->sk_state == TCP_SYN_SENT) {
274		DEFINE_WAIT(wait);
275
276		if (!timeo) {
277			err = -EINPROGRESS;
278			goto out;
279		}
280		if (signal_pending(tsk)) {
281			err = sock_intr_errno(timeo);
282			goto out;
283		}
284
285		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
286						TASK_INTERRUPTIBLE);
287		release_sock(sk);
288		timeo = schedule_timeout(timeo);
289		lock_sock(sk);
290		finish_wait(sk_sleep(sk), &wait);
291	}
292
293	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
294		err = 0;
295	else if (sk->sk_state == TCP_CLOSE_WAIT)
296		err = -ECONNRESET;
297	else
298		err = -ECONNREFUSED;
299	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
300out:
301	release_sock(sk);
302	return err;
303}
304
305static int pn_socket_accept(struct socket *sock, struct socket *newsock,
306				int flags)
307{
308	struct sock *sk = sock->sk;
309	struct sock *newsk;
310	int err;
311
312	if (unlikely(sk->sk_state != TCP_LISTEN))
313		return -EINVAL;
314
315	newsk = sk->sk_prot->accept(sk, flags, &err);
316	if (!newsk)
317		return err;
318
319	lock_sock(newsk);
320	sock_graft(newsk, newsock);
321	newsock->state = SS_CONNECTED;
322	release_sock(newsk);
323	return 0;
324}
325
326static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
327				int *sockaddr_len, int peer)
328{
329	struct sock *sk = sock->sk;
330	struct pn_sock *pn = pn_sk(sk);
331
332	memset(addr, 0, sizeof(struct sockaddr_pn));
333	addr->sa_family = AF_PHONET;
334	if (!peer) /* Race with bind() here is userland's problem. */
335		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
336					pn->sobject);
337
338	*sockaddr_len = sizeof(struct sockaddr_pn);
339	return 0;
340}
341
342static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
343					poll_table *wait)
344{
345	struct sock *sk = sock->sk;
346	struct pep_sock *pn = pep_sk(sk);
347	unsigned int mask = 0;
348
349	poll_wait(file, sk_sleep(sk), wait);
350
351	if (sk->sk_state == TCP_CLOSE)
352		return POLLERR;
353	if (!skb_queue_empty(&sk->sk_receive_queue))
354		mask |= POLLIN | POLLRDNORM;
355	if (!skb_queue_empty(&pn->ctrlreq_queue))
356		mask |= POLLPRI;
357	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
358		return POLLHUP;
359
360	if (sk->sk_state == TCP_ESTABLISHED &&
361		atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
362		atomic_read(&pn->tx_credits))
363		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
364
365	return mask;
366}
367
368static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
369				unsigned long arg)
370{
371	struct sock *sk = sock->sk;
372	struct pn_sock *pn = pn_sk(sk);
373
374	if (cmd == SIOCPNGETOBJECT) {
375		struct net_device *dev;
376		u16 handle;
377		u8 saddr;
378
379		if (get_user(handle, (__u16 __user *)arg))
380			return -EFAULT;
381
382		lock_sock(sk);
383		if (sk->sk_bound_dev_if)
384			dev = dev_get_by_index(sock_net(sk),
385						sk->sk_bound_dev_if);
386		else
387			dev = phonet_device_get(sock_net(sk));
388		if (dev && (dev->flags & IFF_UP))
389			saddr = phonet_address_get(dev, pn_addr(handle));
390		else
391			saddr = PN_NO_ADDR;
392		release_sock(sk);
393
394		if (dev)
395			dev_put(dev);
396		if (saddr == PN_NO_ADDR)
397			return -EHOSTUNREACH;
398
399		handle = pn_object(saddr, pn_port(pn->sobject));
400		return put_user(handle, (__u16 __user *)arg);
401	}
402
403	return sk->sk_prot->ioctl(sk, cmd, arg);
404}
405
406static int pn_socket_listen(struct socket *sock, int backlog)
407{
408	struct sock *sk = sock->sk;
409	int err = 0;
410
411	if (pn_socket_autobind(sock))
412		return -ENOBUFS;
413
414	lock_sock(sk);
415	if (sock->state != SS_UNCONNECTED) {
416		err = -EINVAL;
417		goto out;
418	}
419
420	if (sk->sk_state != TCP_LISTEN) {
421		sk->sk_state = TCP_LISTEN;
422		sk->sk_ack_backlog = 0;
423	}
424	sk->sk_max_ack_backlog = backlog;
425out:
426	release_sock(sk);
427	return err;
428}
429
430static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
431			     size_t total_len)
432{
433	struct sock *sk = sock->sk;
434
435	if (pn_socket_autobind(sock))
436		return -EAGAIN;
437
438	return sk->sk_prot->sendmsg(sk, m, total_len);
439}
440
441const struct proto_ops phonet_dgram_ops = {
442	.family		= AF_PHONET,
443	.owner		= THIS_MODULE,
444	.release	= pn_socket_release,
445	.bind		= pn_socket_bind,
446	.connect	= sock_no_connect,
447	.socketpair	= sock_no_socketpair,
448	.accept		= sock_no_accept,
449	.getname	= pn_socket_getname,
450	.poll		= datagram_poll,
451	.ioctl		= pn_socket_ioctl,
452	.listen		= sock_no_listen,
453	.shutdown	= sock_no_shutdown,
454	.setsockopt	= sock_no_setsockopt,
455	.getsockopt	= sock_no_getsockopt,
456#ifdef CONFIG_COMPAT
457	.compat_setsockopt = sock_no_setsockopt,
458	.compat_getsockopt = sock_no_getsockopt,
459#endif
460	.sendmsg	= pn_socket_sendmsg,
461	.recvmsg	= sock_common_recvmsg,
462	.mmap		= sock_no_mmap,
463	.sendpage	= sock_no_sendpage,
464};
465
466const struct proto_ops phonet_stream_ops = {
467	.family		= AF_PHONET,
468	.owner		= THIS_MODULE,
469	.release	= pn_socket_release,
470	.bind		= pn_socket_bind,
471	.connect	= pn_socket_connect,
472	.socketpair	= sock_no_socketpair,
473	.accept		= pn_socket_accept,
474	.getname	= pn_socket_getname,
475	.poll		= pn_socket_poll,
476	.ioctl		= pn_socket_ioctl,
477	.listen		= pn_socket_listen,
478	.shutdown	= sock_no_shutdown,
479	.setsockopt	= sock_common_setsockopt,
480	.getsockopt	= sock_common_getsockopt,
481#ifdef CONFIG_COMPAT
482	.compat_setsockopt = compat_sock_common_setsockopt,
483	.compat_getsockopt = compat_sock_common_getsockopt,
484#endif
485	.sendmsg	= pn_socket_sendmsg,
486	.recvmsg	= sock_common_recvmsg,
487	.mmap		= sock_no_mmap,
488	.sendpage	= sock_no_sendpage,
489};
490EXPORT_SYMBOL(phonet_stream_ops);
491
492/* allocate port for a socket */
493int pn_sock_get_port(struct sock *sk, unsigned short sport)
494{
495	static int port_cur;
496	struct net *net = sock_net(sk);
497	struct pn_sock *pn = pn_sk(sk);
498	struct sockaddr_pn try_sa;
499	struct sock *tmpsk;
500
501	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
502	try_sa.spn_family = AF_PHONET;
503	WARN_ON(!mutex_is_locked(&port_mutex));
504	if (!sport) {
505		/* search free port */
506		int port, pmin, pmax;
507
508		phonet_get_local_port_range(&pmin, &pmax);
509		for (port = pmin; port <= pmax; port++) {
510			port_cur++;
511			if (port_cur < pmin || port_cur > pmax)
512				port_cur = pmin;
513
514			pn_sockaddr_set_port(&try_sa, port_cur);
515			tmpsk = pn_find_sock_by_sa(net, &try_sa);
516			if (tmpsk == NULL) {
517				sport = port_cur;
518				goto found;
519			} else
520				sock_put(tmpsk);
521		}
522	} else {
523		/* try to find specific port */
524		pn_sockaddr_set_port(&try_sa, sport);
525		tmpsk = pn_find_sock_by_sa(net, &try_sa);
526		if (tmpsk == NULL)
527			/* No sock there! We can use that port... */
528			goto found;
529		else
530			sock_put(tmpsk);
531	}
532	/* the port must be in use already */
533	return -EADDRINUSE;
534
535found:
536	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
537	return 0;
538}
539EXPORT_SYMBOL(pn_sock_get_port);
540
541#ifdef CONFIG_PROC_FS
542static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
543{
544	struct net *net = seq_file_net(seq);
545	struct hlist_head *hlist = pnsocks.hlist;
546	struct sock *sknode;
547	unsigned int h;
548
549	for (h = 0; h < PN_HASHSIZE; h++) {
550		sk_for_each_rcu(sknode, hlist) {
551			if (!net_eq(net, sock_net(sknode)))
552				continue;
553			if (!pos)
554				return sknode;
555			pos--;
556		}
557		hlist++;
558	}
559	return NULL;
560}
561
562static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
563{
564	struct net *net = seq_file_net(seq);
565
566	do
567		sk = sk_next(sk);
568	while (sk && !net_eq(net, sock_net(sk)));
569
570	return sk;
571}
572
573static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
574	__acquires(rcu)
575{
576	rcu_read_lock();
577	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
578}
579
580static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
581{
582	struct sock *sk;
583
584	if (v == SEQ_START_TOKEN)
585		sk = pn_sock_get_idx(seq, 0);
586	else
587		sk = pn_sock_get_next(seq, v);
588	(*pos)++;
589	return sk;
590}
591
592static void pn_sock_seq_stop(struct seq_file *seq, void *v)
593	__releases(rcu)
594{
595	rcu_read_unlock();
596}
597
598static int pn_sock_seq_show(struct seq_file *seq, void *v)
599{
600	seq_setwidth(seq, 127);
601	if (v == SEQ_START_TOKEN)
602		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
603			"  uid inode ref pointer drops");
604	else {
605		struct sock *sk = v;
606		struct pn_sock *pn = pn_sk(sk);
607
608		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
609			"%d %pK %d",
610			sk->sk_protocol, pn->sobject, pn->dobject,
611			pn->resource, sk->sk_state,
612			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
613			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
614			sock_i_ino(sk),
615			atomic_read(&sk->sk_refcnt), sk,
616			atomic_read(&sk->sk_drops));
617	}
618	seq_pad(seq, '\n');
619	return 0;
620}
621
622static const struct seq_operations pn_sock_seq_ops = {
623	.start = pn_sock_seq_start,
624	.next = pn_sock_seq_next,
625	.stop = pn_sock_seq_stop,
626	.show = pn_sock_seq_show,
627};
628
629static int pn_sock_open(struct inode *inode, struct file *file)
630{
631	return seq_open_net(inode, file, &pn_sock_seq_ops,
632				sizeof(struct seq_net_private));
633}
634
635const struct file_operations pn_sock_seq_fops = {
636	.owner = THIS_MODULE,
637	.open = pn_sock_open,
638	.read = seq_read,
639	.llseek = seq_lseek,
640	.release = seq_release_net,
641};
642#endif
643
644static struct  {
645	struct sock *sk[256];
646} pnres;
647
648/*
649 * Find and hold socket based on resource.
650 */
651struct sock *pn_find_sock_by_res(struct net *net, u8 res)
652{
653	struct sock *sk;
654
655	if (!net_eq(net, &init_net))
656		return NULL;
657
658	rcu_read_lock();
659	sk = rcu_dereference(pnres.sk[res]);
660	if (sk)
661		sock_hold(sk);
662	rcu_read_unlock();
663	return sk;
664}
665
666static DEFINE_MUTEX(resource_mutex);
667
668int pn_sock_bind_res(struct sock *sk, u8 res)
669{
670	int ret = -EADDRINUSE;
671
672	if (!net_eq(sock_net(sk), &init_net))
673		return -ENOIOCTLCMD;
674	if (!capable(CAP_SYS_ADMIN))
675		return -EPERM;
676	if (pn_socket_autobind(sk->sk_socket))
677		return -EAGAIN;
678
679	mutex_lock(&resource_mutex);
680	if (pnres.sk[res] == NULL) {
681		sock_hold(sk);
682		rcu_assign_pointer(pnres.sk[res], sk);
683		ret = 0;
684	}
685	mutex_unlock(&resource_mutex);
686	return ret;
687}
688
689int pn_sock_unbind_res(struct sock *sk, u8 res)
690{
691	int ret = -ENOENT;
692
693	if (!capable(CAP_SYS_ADMIN))
694		return -EPERM;
695
696	mutex_lock(&resource_mutex);
697	if (pnres.sk[res] == sk) {
698		RCU_INIT_POINTER(pnres.sk[res], NULL);
699		ret = 0;
700	}
701	mutex_unlock(&resource_mutex);
702
703	if (ret == 0) {
704		synchronize_rcu();
705		sock_put(sk);
706	}
707	return ret;
708}
709
710void pn_sock_unbind_all_res(struct sock *sk)
711{
712	unsigned int res, match = 0;
713
714	mutex_lock(&resource_mutex);
715	for (res = 0; res < 256; res++) {
716		if (pnres.sk[res] == sk) {
717			RCU_INIT_POINTER(pnres.sk[res], NULL);
718			match++;
719		}
720	}
721	mutex_unlock(&resource_mutex);
722
723	while (match > 0) {
724		__sock_put(sk);
725		match--;
726	}
727	/* Caller is responsible for RCU sync before final sock_put() */
728}
729
730#ifdef CONFIG_PROC_FS
731static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
732{
733	struct net *net = seq_file_net(seq);
734	unsigned int i;
735
736	if (!net_eq(net, &init_net))
737		return NULL;
738
739	for (i = 0; i < 256; i++) {
740		if (pnres.sk[i] == NULL)
741			continue;
742		if (!pos)
743			return pnres.sk + i;
744		pos--;
745	}
746	return NULL;
747}
748
749static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
750{
751	struct net *net = seq_file_net(seq);
752	unsigned int i;
753
754	BUG_ON(!net_eq(net, &init_net));
755
756	for (i = (sk - pnres.sk) + 1; i < 256; i++)
757		if (pnres.sk[i])
758			return pnres.sk + i;
759	return NULL;
760}
761
762static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
763	__acquires(resource_mutex)
764{
765	mutex_lock(&resource_mutex);
766	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
767}
768
769static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
770{
771	struct sock **sk;
772
773	if (v == SEQ_START_TOKEN)
774		sk = pn_res_get_idx(seq, 0);
775	else
776		sk = pn_res_get_next(seq, v);
777	(*pos)++;
778	return sk;
779}
780
781static void pn_res_seq_stop(struct seq_file *seq, void *v)
782	__releases(resource_mutex)
783{
784	mutex_unlock(&resource_mutex);
785}
786
787static int pn_res_seq_show(struct seq_file *seq, void *v)
788{
789	seq_setwidth(seq, 63);
790	if (v == SEQ_START_TOKEN)
791		seq_puts(seq, "rs   uid inode");
792	else {
793		struct sock **psk = v;
794		struct sock *sk = *psk;
795
796		seq_printf(seq, "%02X %5u %lu",
797			   (int) (psk - pnres.sk),
798			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
799			   sock_i_ino(sk));
800	}
801	seq_pad(seq, '\n');
802	return 0;
803}
804
805static const struct seq_operations pn_res_seq_ops = {
806	.start = pn_res_seq_start,
807	.next = pn_res_seq_next,
808	.stop = pn_res_seq_stop,
809	.show = pn_res_seq_show,
810};
811
812static int pn_res_open(struct inode *inode, struct file *file)
813{
814	return seq_open_net(inode, file, &pn_res_seq_ops,
815				sizeof(struct seq_net_private));
816}
817
818const struct file_operations pn_res_seq_fops = {
819	.owner = THIS_MODULE,
820	.open = pn_res_open,
821	.read = seq_read,
822	.llseek = seq_lseek,
823	.release = seq_release_net,
824};
825#endif
v4.17
  1/*
  2 * File: socket.c
  3 *
  4 * Phonet sockets
  5 *
  6 * Copyright (C) 2008 Nokia Corporation.
  7 *
  8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
  9 *          RĂ©mi Denis-Courmont
 10 *
 11 * This program is free software; you can redistribute it and/or
 12 * modify it under the terms of the GNU General Public License
 13 * version 2 as published by the Free Software Foundation.
 14 *
 15 * This program is distributed in the hope that it will be useful, but
 16 * WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 18 * General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 23 * 02110-1301 USA
 24 */
 25
 26#include <linux/gfp.h>
 27#include <linux/kernel.h>
 28#include <linux/net.h>
 29#include <linux/poll.h>
 30#include <linux/sched/signal.h>
 31
 32#include <net/sock.h>
 33#include <net/tcp_states.h>
 34
 35#include <linux/phonet.h>
 36#include <linux/export.h>
 37#include <net/phonet/phonet.h>
 38#include <net/phonet/pep.h>
 39#include <net/phonet/pn_dev.h>
 40
 41static int pn_socket_release(struct socket *sock)
 42{
 43	struct sock *sk = sock->sk;
 44
 45	if (sk) {
 46		sock->sk = NULL;
 47		sk->sk_prot->close(sk, 0);
 48	}
 49	return 0;
 50}
 51
 52#define PN_HASHSIZE	16
 53#define PN_HASHMASK	(PN_HASHSIZE-1)
 54
 55
 56static struct  {
 57	struct hlist_head hlist[PN_HASHSIZE];
 58	struct mutex lock;
 59} pnsocks;
 60
 61void __init pn_sock_init(void)
 62{
 63	unsigned int i;
 64
 65	for (i = 0; i < PN_HASHSIZE; i++)
 66		INIT_HLIST_HEAD(pnsocks.hlist + i);
 67	mutex_init(&pnsocks.lock);
 68}
 69
 70static struct hlist_head *pn_hash_list(u16 obj)
 71{
 72	return pnsocks.hlist + (obj & PN_HASHMASK);
 73}
 74
 75/*
 76 * Find address based on socket address, match only certain fields.
 77 * Also grab sock if it was found. Remember to sock_put it later.
 78 */
 79struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 80{
 81	struct sock *sknode;
 82	struct sock *rval = NULL;
 83	u16 obj = pn_sockaddr_get_object(spn);
 84	u8 res = spn->spn_resource;
 85	struct hlist_head *hlist = pn_hash_list(obj);
 86
 87	rcu_read_lock();
 88	sk_for_each_rcu(sknode, hlist) {
 89		struct pn_sock *pn = pn_sk(sknode);
 90		BUG_ON(!pn->sobject); /* unbound socket */
 91
 92		if (!net_eq(sock_net(sknode), net))
 93			continue;
 94		if (pn_port(obj)) {
 95			/* Look up socket by port */
 96			if (pn_port(pn->sobject) != pn_port(obj))
 97				continue;
 98		} else {
 99			/* If port is zero, look up by resource */
100			if (pn->resource != res)
101				continue;
102		}
103		if (pn_addr(pn->sobject) &&
104		    pn_addr(pn->sobject) != pn_addr(obj))
105			continue;
106
107		rval = sknode;
108		sock_hold(sknode);
109		break;
110	}
111	rcu_read_unlock();
112
113	return rval;
114}
115
116/* Deliver a broadcast packet (only in bottom-half) */
117void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
118{
119	struct hlist_head *hlist = pnsocks.hlist;
120	unsigned int h;
121
122	rcu_read_lock();
123	for (h = 0; h < PN_HASHSIZE; h++) {
124		struct sock *sknode;
125
126		sk_for_each(sknode, hlist) {
127			struct sk_buff *clone;
128
129			if (!net_eq(sock_net(sknode), net))
130				continue;
131			if (!sock_flag(sknode, SOCK_BROADCAST))
132				continue;
133
134			clone = skb_clone(skb, GFP_ATOMIC);
135			if (clone) {
136				sock_hold(sknode);
137				sk_receive_skb(sknode, clone, 0);
138			}
139		}
140		hlist++;
141	}
142	rcu_read_unlock();
143}
144
145int pn_sock_hash(struct sock *sk)
146{
147	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
148
149	mutex_lock(&pnsocks.lock);
150	sk_add_node_rcu(sk, hlist);
151	mutex_unlock(&pnsocks.lock);
152
153	return 0;
154}
155EXPORT_SYMBOL(pn_sock_hash);
156
157void pn_sock_unhash(struct sock *sk)
158{
159	mutex_lock(&pnsocks.lock);
160	sk_del_node_init_rcu(sk);
161	mutex_unlock(&pnsocks.lock);
162	pn_sock_unbind_all_res(sk);
163	synchronize_rcu();
164}
165EXPORT_SYMBOL(pn_sock_unhash);
166
167static DEFINE_MUTEX(port_mutex);
168
169static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
170{
171	struct sock *sk = sock->sk;
172	struct pn_sock *pn = pn_sk(sk);
173	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
174	int err;
175	u16 handle;
176	u8 saddr;
177
178	if (sk->sk_prot->bind)
179		return sk->sk_prot->bind(sk, addr, len);
180
181	if (len < sizeof(struct sockaddr_pn))
182		return -EINVAL;
183	if (spn->spn_family != AF_PHONET)
184		return -EAFNOSUPPORT;
185
186	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
187	saddr = pn_addr(handle);
188	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
189		return -EADDRNOTAVAIL;
190
191	lock_sock(sk);
192	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
193		err = -EINVAL; /* attempt to rebind */
194		goto out;
195	}
196	WARN_ON(sk_hashed(sk));
197	mutex_lock(&port_mutex);
198	err = sk->sk_prot->get_port(sk, pn_port(handle));
199	if (err)
200		goto out_port;
201
202	/* get_port() sets the port, bind() sets the address if applicable */
203	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
204	pn->resource = spn->spn_resource;
205
206	/* Enable RX on the socket */
207	err = sk->sk_prot->hash(sk);
208out_port:
209	mutex_unlock(&port_mutex);
210out:
211	release_sock(sk);
212	return err;
213}
214
215static int pn_socket_autobind(struct socket *sock)
216{
217	struct sockaddr_pn sa;
218	int err;
219
220	memset(&sa, 0, sizeof(sa));
221	sa.spn_family = AF_PHONET;
222	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
223				sizeof(struct sockaddr_pn));
224	if (err != -EINVAL)
225		return err;
226	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
227	return 0; /* socket was already bound */
228}
229
230static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
231		int len, int flags)
232{
233	struct sock *sk = sock->sk;
234	struct pn_sock *pn = pn_sk(sk);
235	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
236	struct task_struct *tsk = current;
237	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
238	int err;
239
240	if (pn_socket_autobind(sock))
241		return -ENOBUFS;
242	if (len < sizeof(struct sockaddr_pn))
243		return -EINVAL;
244	if (spn->spn_family != AF_PHONET)
245		return -EAFNOSUPPORT;
246
247	lock_sock(sk);
248
249	switch (sock->state) {
250	case SS_UNCONNECTED:
251		if (sk->sk_state != TCP_CLOSE) {
252			err = -EISCONN;
253			goto out;
254		}
255		break;
256	case SS_CONNECTING:
257		err = -EALREADY;
258		goto out;
259	default:
260		err = -EISCONN;
261		goto out;
262	}
263
264	pn->dobject = pn_sockaddr_get_object(spn);
265	pn->resource = pn_sockaddr_get_resource(spn);
266	sock->state = SS_CONNECTING;
267
268	err = sk->sk_prot->connect(sk, addr, len);
269	if (err) {
270		sock->state = SS_UNCONNECTED;
271		pn->dobject = 0;
272		goto out;
273	}
274
275	while (sk->sk_state == TCP_SYN_SENT) {
276		DEFINE_WAIT(wait);
277
278		if (!timeo) {
279			err = -EINPROGRESS;
280			goto out;
281		}
282		if (signal_pending(tsk)) {
283			err = sock_intr_errno(timeo);
284			goto out;
285		}
286
287		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
288						TASK_INTERRUPTIBLE);
289		release_sock(sk);
290		timeo = schedule_timeout(timeo);
291		lock_sock(sk);
292		finish_wait(sk_sleep(sk), &wait);
293	}
294
295	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
296		err = 0;
297	else if (sk->sk_state == TCP_CLOSE_WAIT)
298		err = -ECONNRESET;
299	else
300		err = -ECONNREFUSED;
301	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
302out:
303	release_sock(sk);
304	return err;
305}
306
307static int pn_socket_accept(struct socket *sock, struct socket *newsock,
308			    int flags, bool kern)
309{
310	struct sock *sk = sock->sk;
311	struct sock *newsk;
312	int err;
313
314	if (unlikely(sk->sk_state != TCP_LISTEN))
315		return -EINVAL;
316
317	newsk = sk->sk_prot->accept(sk, flags, &err, kern);
318	if (!newsk)
319		return err;
320
321	lock_sock(newsk);
322	sock_graft(newsk, newsock);
323	newsock->state = SS_CONNECTED;
324	release_sock(newsk);
325	return 0;
326}
327
328static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
329				int peer)
330{
331	struct sock *sk = sock->sk;
332	struct pn_sock *pn = pn_sk(sk);
333
334	memset(addr, 0, sizeof(struct sockaddr_pn));
335	addr->sa_family = AF_PHONET;
336	if (!peer) /* Race with bind() here is userland's problem. */
337		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
338					pn->sobject);
339
340	return sizeof(struct sockaddr_pn);
 
341}
342
343static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
344					poll_table *wait)
345{
346	struct sock *sk = sock->sk;
347	struct pep_sock *pn = pep_sk(sk);
348	__poll_t mask = 0;
349
350	poll_wait(file, sk_sleep(sk), wait);
351
352	if (sk->sk_state == TCP_CLOSE)
353		return EPOLLERR;
354	if (!skb_queue_empty(&sk->sk_receive_queue))
355		mask |= EPOLLIN | EPOLLRDNORM;
356	if (!skb_queue_empty(&pn->ctrlreq_queue))
357		mask |= EPOLLPRI;
358	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
359		return EPOLLHUP;
360
361	if (sk->sk_state == TCP_ESTABLISHED &&
362		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
363		atomic_read(&pn->tx_credits))
364		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
365
366	return mask;
367}
368
369static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
370				unsigned long arg)
371{
372	struct sock *sk = sock->sk;
373	struct pn_sock *pn = pn_sk(sk);
374
375	if (cmd == SIOCPNGETOBJECT) {
376		struct net_device *dev;
377		u16 handle;
378		u8 saddr;
379
380		if (get_user(handle, (__u16 __user *)arg))
381			return -EFAULT;
382
383		lock_sock(sk);
384		if (sk->sk_bound_dev_if)
385			dev = dev_get_by_index(sock_net(sk),
386						sk->sk_bound_dev_if);
387		else
388			dev = phonet_device_get(sock_net(sk));
389		if (dev && (dev->flags & IFF_UP))
390			saddr = phonet_address_get(dev, pn_addr(handle));
391		else
392			saddr = PN_NO_ADDR;
393		release_sock(sk);
394
395		if (dev)
396			dev_put(dev);
397		if (saddr == PN_NO_ADDR)
398			return -EHOSTUNREACH;
399
400		handle = pn_object(saddr, pn_port(pn->sobject));
401		return put_user(handle, (__u16 __user *)arg);
402	}
403
404	return sk->sk_prot->ioctl(sk, cmd, arg);
405}
406
407static int pn_socket_listen(struct socket *sock, int backlog)
408{
409	struct sock *sk = sock->sk;
410	int err = 0;
411
412	if (pn_socket_autobind(sock))
413		return -ENOBUFS;
414
415	lock_sock(sk);
416	if (sock->state != SS_UNCONNECTED) {
417		err = -EINVAL;
418		goto out;
419	}
420
421	if (sk->sk_state != TCP_LISTEN) {
422		sk->sk_state = TCP_LISTEN;
423		sk->sk_ack_backlog = 0;
424	}
425	sk->sk_max_ack_backlog = backlog;
426out:
427	release_sock(sk);
428	return err;
429}
430
431static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
432			     size_t total_len)
433{
434	struct sock *sk = sock->sk;
435
436	if (pn_socket_autobind(sock))
437		return -EAGAIN;
438
439	return sk->sk_prot->sendmsg(sk, m, total_len);
440}
441
442const struct proto_ops phonet_dgram_ops = {
443	.family		= AF_PHONET,
444	.owner		= THIS_MODULE,
445	.release	= pn_socket_release,
446	.bind		= pn_socket_bind,
447	.connect	= sock_no_connect,
448	.socketpair	= sock_no_socketpair,
449	.accept		= sock_no_accept,
450	.getname	= pn_socket_getname,
451	.poll		= datagram_poll,
452	.ioctl		= pn_socket_ioctl,
453	.listen		= sock_no_listen,
454	.shutdown	= sock_no_shutdown,
455	.setsockopt	= sock_no_setsockopt,
456	.getsockopt	= sock_no_getsockopt,
457#ifdef CONFIG_COMPAT
458	.compat_setsockopt = sock_no_setsockopt,
459	.compat_getsockopt = sock_no_getsockopt,
460#endif
461	.sendmsg	= pn_socket_sendmsg,
462	.recvmsg	= sock_common_recvmsg,
463	.mmap		= sock_no_mmap,
464	.sendpage	= sock_no_sendpage,
465};
466
467const struct proto_ops phonet_stream_ops = {
468	.family		= AF_PHONET,
469	.owner		= THIS_MODULE,
470	.release	= pn_socket_release,
471	.bind		= pn_socket_bind,
472	.connect	= pn_socket_connect,
473	.socketpair	= sock_no_socketpair,
474	.accept		= pn_socket_accept,
475	.getname	= pn_socket_getname,
476	.poll		= pn_socket_poll,
477	.ioctl		= pn_socket_ioctl,
478	.listen		= pn_socket_listen,
479	.shutdown	= sock_no_shutdown,
480	.setsockopt	= sock_common_setsockopt,
481	.getsockopt	= sock_common_getsockopt,
482#ifdef CONFIG_COMPAT
483	.compat_setsockopt = compat_sock_common_setsockopt,
484	.compat_getsockopt = compat_sock_common_getsockopt,
485#endif
486	.sendmsg	= pn_socket_sendmsg,
487	.recvmsg	= sock_common_recvmsg,
488	.mmap		= sock_no_mmap,
489	.sendpage	= sock_no_sendpage,
490};
491EXPORT_SYMBOL(phonet_stream_ops);
492
493/* allocate port for a socket */
494int pn_sock_get_port(struct sock *sk, unsigned short sport)
495{
496	static int port_cur;
497	struct net *net = sock_net(sk);
498	struct pn_sock *pn = pn_sk(sk);
499	struct sockaddr_pn try_sa;
500	struct sock *tmpsk;
501
502	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
503	try_sa.spn_family = AF_PHONET;
504	WARN_ON(!mutex_is_locked(&port_mutex));
505	if (!sport) {
506		/* search free port */
507		int port, pmin, pmax;
508
509		phonet_get_local_port_range(&pmin, &pmax);
510		for (port = pmin; port <= pmax; port++) {
511			port_cur++;
512			if (port_cur < pmin || port_cur > pmax)
513				port_cur = pmin;
514
515			pn_sockaddr_set_port(&try_sa, port_cur);
516			tmpsk = pn_find_sock_by_sa(net, &try_sa);
517			if (tmpsk == NULL) {
518				sport = port_cur;
519				goto found;
520			} else
521				sock_put(tmpsk);
522		}
523	} else {
524		/* try to find specific port */
525		pn_sockaddr_set_port(&try_sa, sport);
526		tmpsk = pn_find_sock_by_sa(net, &try_sa);
527		if (tmpsk == NULL)
528			/* No sock there! We can use that port... */
529			goto found;
530		else
531			sock_put(tmpsk);
532	}
533	/* the port must be in use already */
534	return -EADDRINUSE;
535
536found:
537	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
538	return 0;
539}
540EXPORT_SYMBOL(pn_sock_get_port);
541
542#ifdef CONFIG_PROC_FS
543static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
544{
545	struct net *net = seq_file_net(seq);
546	struct hlist_head *hlist = pnsocks.hlist;
547	struct sock *sknode;
548	unsigned int h;
549
550	for (h = 0; h < PN_HASHSIZE; h++) {
551		sk_for_each_rcu(sknode, hlist) {
552			if (!net_eq(net, sock_net(sknode)))
553				continue;
554			if (!pos)
555				return sknode;
556			pos--;
557		}
558		hlist++;
559	}
560	return NULL;
561}
562
563static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
564{
565	struct net *net = seq_file_net(seq);
566
567	do
568		sk = sk_next(sk);
569	while (sk && !net_eq(net, sock_net(sk)));
570
571	return sk;
572}
573
574static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
575	__acquires(rcu)
576{
577	rcu_read_lock();
578	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
579}
580
581static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
582{
583	struct sock *sk;
584
585	if (v == SEQ_START_TOKEN)
586		sk = pn_sock_get_idx(seq, 0);
587	else
588		sk = pn_sock_get_next(seq, v);
589	(*pos)++;
590	return sk;
591}
592
593static void pn_sock_seq_stop(struct seq_file *seq, void *v)
594	__releases(rcu)
595{
596	rcu_read_unlock();
597}
598
599static int pn_sock_seq_show(struct seq_file *seq, void *v)
600{
601	seq_setwidth(seq, 127);
602	if (v == SEQ_START_TOKEN)
603		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
604			"  uid inode ref pointer drops");
605	else {
606		struct sock *sk = v;
607		struct pn_sock *pn = pn_sk(sk);
608
609		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
610			"%d %pK %d",
611			sk->sk_protocol, pn->sobject, pn->dobject,
612			pn->resource, sk->sk_state,
613			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
614			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
615			sock_i_ino(sk),
616			refcount_read(&sk->sk_refcnt), sk,
617			atomic_read(&sk->sk_drops));
618	}
619	seq_pad(seq, '\n');
620	return 0;
621}
622
623static const struct seq_operations pn_sock_seq_ops = {
624	.start = pn_sock_seq_start,
625	.next = pn_sock_seq_next,
626	.stop = pn_sock_seq_stop,
627	.show = pn_sock_seq_show,
628};
629
630static int pn_sock_open(struct inode *inode, struct file *file)
631{
632	return seq_open_net(inode, file, &pn_sock_seq_ops,
633				sizeof(struct seq_net_private));
634}
635
636const struct file_operations pn_sock_seq_fops = {
 
637	.open = pn_sock_open,
638	.read = seq_read,
639	.llseek = seq_lseek,
640	.release = seq_release_net,
641};
642#endif
643
644static struct  {
645	struct sock *sk[256];
646} pnres;
647
648/*
649 * Find and hold socket based on resource.
650 */
651struct sock *pn_find_sock_by_res(struct net *net, u8 res)
652{
653	struct sock *sk;
654
655	if (!net_eq(net, &init_net))
656		return NULL;
657
658	rcu_read_lock();
659	sk = rcu_dereference(pnres.sk[res]);
660	if (sk)
661		sock_hold(sk);
662	rcu_read_unlock();
663	return sk;
664}
665
666static DEFINE_MUTEX(resource_mutex);
667
668int pn_sock_bind_res(struct sock *sk, u8 res)
669{
670	int ret = -EADDRINUSE;
671
672	if (!net_eq(sock_net(sk), &init_net))
673		return -ENOIOCTLCMD;
674	if (!capable(CAP_SYS_ADMIN))
675		return -EPERM;
676	if (pn_socket_autobind(sk->sk_socket))
677		return -EAGAIN;
678
679	mutex_lock(&resource_mutex);
680	if (pnres.sk[res] == NULL) {
681		sock_hold(sk);
682		rcu_assign_pointer(pnres.sk[res], sk);
683		ret = 0;
684	}
685	mutex_unlock(&resource_mutex);
686	return ret;
687}
688
689int pn_sock_unbind_res(struct sock *sk, u8 res)
690{
691	int ret = -ENOENT;
692
693	if (!capable(CAP_SYS_ADMIN))
694		return -EPERM;
695
696	mutex_lock(&resource_mutex);
697	if (pnres.sk[res] == sk) {
698		RCU_INIT_POINTER(pnres.sk[res], NULL);
699		ret = 0;
700	}
701	mutex_unlock(&resource_mutex);
702
703	if (ret == 0) {
704		synchronize_rcu();
705		sock_put(sk);
706	}
707	return ret;
708}
709
710void pn_sock_unbind_all_res(struct sock *sk)
711{
712	unsigned int res, match = 0;
713
714	mutex_lock(&resource_mutex);
715	for (res = 0; res < 256; res++) {
716		if (pnres.sk[res] == sk) {
717			RCU_INIT_POINTER(pnres.sk[res], NULL);
718			match++;
719		}
720	}
721	mutex_unlock(&resource_mutex);
722
723	while (match > 0) {
724		__sock_put(sk);
725		match--;
726	}
727	/* Caller is responsible for RCU sync before final sock_put() */
728}
729
730#ifdef CONFIG_PROC_FS
731static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
732{
733	struct net *net = seq_file_net(seq);
734	unsigned int i;
735
736	if (!net_eq(net, &init_net))
737		return NULL;
738
739	for (i = 0; i < 256; i++) {
740		if (pnres.sk[i] == NULL)
741			continue;
742		if (!pos)
743			return pnres.sk + i;
744		pos--;
745	}
746	return NULL;
747}
748
749static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
750{
751	struct net *net = seq_file_net(seq);
752	unsigned int i;
753
754	BUG_ON(!net_eq(net, &init_net));
755
756	for (i = (sk - pnres.sk) + 1; i < 256; i++)
757		if (pnres.sk[i])
758			return pnres.sk + i;
759	return NULL;
760}
761
762static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
763	__acquires(resource_mutex)
764{
765	mutex_lock(&resource_mutex);
766	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
767}
768
769static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
770{
771	struct sock **sk;
772
773	if (v == SEQ_START_TOKEN)
774		sk = pn_res_get_idx(seq, 0);
775	else
776		sk = pn_res_get_next(seq, v);
777	(*pos)++;
778	return sk;
779}
780
781static void pn_res_seq_stop(struct seq_file *seq, void *v)
782	__releases(resource_mutex)
783{
784	mutex_unlock(&resource_mutex);
785}
786
787static int pn_res_seq_show(struct seq_file *seq, void *v)
788{
789	seq_setwidth(seq, 63);
790	if (v == SEQ_START_TOKEN)
791		seq_puts(seq, "rs   uid inode");
792	else {
793		struct sock **psk = v;
794		struct sock *sk = *psk;
795
796		seq_printf(seq, "%02X %5u %lu",
797			   (int) (psk - pnres.sk),
798			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
799			   sock_i_ino(sk));
800	}
801	seq_pad(seq, '\n');
802	return 0;
803}
804
805static const struct seq_operations pn_res_seq_ops = {
806	.start = pn_res_seq_start,
807	.next = pn_res_seq_next,
808	.stop = pn_res_seq_stop,
809	.show = pn_res_seq_show,
810};
811
812static int pn_res_open(struct inode *inode, struct file *file)
813{
814	return seq_open_net(inode, file, &pn_res_seq_ops,
815				sizeof(struct seq_net_private));
816}
817
818const struct file_operations pn_res_seq_fops = {
 
819	.open = pn_res_open,
820	.read = seq_read,
821	.llseek = seq_lseek,
822	.release = seq_release_net,
823};
824#endif