Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * File: socket.c
  4 *
  5 * Phonet sockets
  6 *
  7 * Copyright (C) 2008 Nokia Corporation.
  8 *
  9 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
 10 *          RĂ©mi Denis-Courmont
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 11 */
 12
 13#include <linux/gfp.h>
 14#include <linux/kernel.h>
 15#include <linux/net.h>
 16#include <linux/poll.h>
 17#include <linux/sched/signal.h>
 18
 19#include <net/sock.h>
 20#include <net/tcp_states.h>
 21
 22#include <linux/phonet.h>
 23#include <linux/export.h>
 24#include <net/phonet/phonet.h>
 25#include <net/phonet/pep.h>
 26#include <net/phonet/pn_dev.h>
 27
 28static int pn_socket_release(struct socket *sock)
 29{
 30	struct sock *sk = sock->sk;
 31
 32	if (sk) {
 33		sock->sk = NULL;
 34		sk->sk_prot->close(sk, 0);
 35	}
 36	return 0;
 37}
 38
 39#define PN_HASHSIZE	16
 40#define PN_HASHMASK	(PN_HASHSIZE-1)
 41
 42
 43static struct  {
 44	struct hlist_head hlist[PN_HASHSIZE];
 45	struct mutex lock;
 46} pnsocks;
 47
 48void __init pn_sock_init(void)
 49{
 50	unsigned int i;
 51
 52	for (i = 0; i < PN_HASHSIZE; i++)
 53		INIT_HLIST_HEAD(pnsocks.hlist + i);
 54	mutex_init(&pnsocks.lock);
 55}
 56
 57static struct hlist_head *pn_hash_list(u16 obj)
 58{
 59	return pnsocks.hlist + (obj & PN_HASHMASK);
 60}
 61
 62/*
 63 * Find address based on socket address, match only certain fields.
 64 * Also grab sock if it was found. Remember to sock_put it later.
 65 */
 66struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 67{
 68	struct sock *sknode;
 69	struct sock *rval = NULL;
 70	u16 obj = pn_sockaddr_get_object(spn);
 71	u8 res = spn->spn_resource;
 72	struct hlist_head *hlist = pn_hash_list(obj);
 73
 74	rcu_read_lock();
 75	sk_for_each_rcu(sknode, hlist) {
 76		struct pn_sock *pn = pn_sk(sknode);
 77		BUG_ON(!pn->sobject); /* unbound socket */
 78
 79		if (!net_eq(sock_net(sknode), net))
 80			continue;
 81		if (pn_port(obj)) {
 82			/* Look up socket by port */
 83			if (pn_port(pn->sobject) != pn_port(obj))
 84				continue;
 85		} else {
 86			/* If port is zero, look up by resource */
 87			if (pn->resource != res)
 88				continue;
 89		}
 90		if (pn_addr(pn->sobject) &&
 91		    pn_addr(pn->sobject) != pn_addr(obj))
 92			continue;
 93
 94		rval = sknode;
 95		sock_hold(sknode);
 96		break;
 97	}
 98	rcu_read_unlock();
 99
100	return rval;
101}
102
103/* Deliver a broadcast packet (only in bottom-half) */
104void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
105{
106	struct hlist_head *hlist = pnsocks.hlist;
107	unsigned int h;
108
109	rcu_read_lock();
110	for (h = 0; h < PN_HASHSIZE; h++) {
111		struct sock *sknode;
112
113		sk_for_each(sknode, hlist) {
114			struct sk_buff *clone;
115
116			if (!net_eq(sock_net(sknode), net))
117				continue;
118			if (!sock_flag(sknode, SOCK_BROADCAST))
119				continue;
120
121			clone = skb_clone(skb, GFP_ATOMIC);
122			if (clone) {
123				sock_hold(sknode);
124				sk_receive_skb(sknode, clone, 0);
125			}
126		}
127		hlist++;
128	}
129	rcu_read_unlock();
130}
131
132int pn_sock_hash(struct sock *sk)
133{
134	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
135
136	mutex_lock(&pnsocks.lock);
137	sk_add_node_rcu(sk, hlist);
138	mutex_unlock(&pnsocks.lock);
139
140	return 0;
141}
142EXPORT_SYMBOL(pn_sock_hash);
143
144void pn_sock_unhash(struct sock *sk)
145{
146	mutex_lock(&pnsocks.lock);
147	sk_del_node_init_rcu(sk);
148	mutex_unlock(&pnsocks.lock);
149	pn_sock_unbind_all_res(sk);
150	synchronize_rcu();
151}
152EXPORT_SYMBOL(pn_sock_unhash);
153
154static DEFINE_MUTEX(port_mutex);
155
156static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
157{
158	struct sock *sk = sock->sk;
159	struct pn_sock *pn = pn_sk(sk);
160	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
161	int err;
162	u16 handle;
163	u8 saddr;
164
165	if (sk->sk_prot->bind)
166		return sk->sk_prot->bind(sk, addr, len);
167
168	if (len < sizeof(struct sockaddr_pn))
169		return -EINVAL;
170	if (spn->spn_family != AF_PHONET)
171		return -EAFNOSUPPORT;
172
173	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
174	saddr = pn_addr(handle);
175	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
176		return -EADDRNOTAVAIL;
177
178	lock_sock(sk);
179	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
180		err = -EINVAL; /* attempt to rebind */
181		goto out;
182	}
183	WARN_ON(sk_hashed(sk));
184	mutex_lock(&port_mutex);
185	err = sk->sk_prot->get_port(sk, pn_port(handle));
186	if (err)
187		goto out_port;
188
189	/* get_port() sets the port, bind() sets the address if applicable */
190	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
191	pn->resource = spn->spn_resource;
192
193	/* Enable RX on the socket */
194	err = sk->sk_prot->hash(sk);
195out_port:
196	mutex_unlock(&port_mutex);
197out:
198	release_sock(sk);
199	return err;
200}
201
202static int pn_socket_autobind(struct socket *sock)
203{
204	struct sockaddr_pn sa;
205	int err;
206
207	memset(&sa, 0, sizeof(sa));
208	sa.spn_family = AF_PHONET;
209	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
210				sizeof(struct sockaddr_pn));
211	if (err != -EINVAL)
212		return err;
213	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
214	return 0; /* socket was already bound */
215}
216
217static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
218		int len, int flags)
219{
220	struct sock *sk = sock->sk;
221	struct pn_sock *pn = pn_sk(sk);
222	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
223	struct task_struct *tsk = current;
224	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
225	int err;
226
227	if (pn_socket_autobind(sock))
228		return -ENOBUFS;
229	if (len < sizeof(struct sockaddr_pn))
230		return -EINVAL;
231	if (spn->spn_family != AF_PHONET)
232		return -EAFNOSUPPORT;
233
234	lock_sock(sk);
235
236	switch (sock->state) {
237	case SS_UNCONNECTED:
238		if (sk->sk_state != TCP_CLOSE) {
239			err = -EISCONN;
240			goto out;
241		}
242		break;
243	case SS_CONNECTING:
244		err = -EALREADY;
245		goto out;
246	default:
247		err = -EISCONN;
248		goto out;
249	}
250
251	pn->dobject = pn_sockaddr_get_object(spn);
252	pn->resource = pn_sockaddr_get_resource(spn);
253	sock->state = SS_CONNECTING;
254
255	err = sk->sk_prot->connect(sk, addr, len);
256	if (err) {
257		sock->state = SS_UNCONNECTED;
258		pn->dobject = 0;
259		goto out;
260	}
261
262	while (sk->sk_state == TCP_SYN_SENT) {
263		DEFINE_WAIT(wait);
264
265		if (!timeo) {
266			err = -EINPROGRESS;
267			goto out;
268		}
269		if (signal_pending(tsk)) {
270			err = sock_intr_errno(timeo);
271			goto out;
272		}
273
274		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
275						TASK_INTERRUPTIBLE);
276		release_sock(sk);
277		timeo = schedule_timeout(timeo);
278		lock_sock(sk);
279		finish_wait(sk_sleep(sk), &wait);
280	}
281
282	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
283		err = 0;
284	else if (sk->sk_state == TCP_CLOSE_WAIT)
285		err = -ECONNRESET;
286	else
287		err = -ECONNREFUSED;
288	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
289out:
290	release_sock(sk);
291	return err;
292}
293
294static int pn_socket_accept(struct socket *sock, struct socket *newsock,
295			    int flags, bool kern)
296{
297	struct sock *sk = sock->sk;
298	struct sock *newsk;
299	int err;
300
301	if (unlikely(sk->sk_state != TCP_LISTEN))
302		return -EINVAL;
303
304	newsk = sk->sk_prot->accept(sk, flags, &err, kern);
305	if (!newsk)
306		return err;
307
308	lock_sock(newsk);
309	sock_graft(newsk, newsock);
310	newsock->state = SS_CONNECTED;
311	release_sock(newsk);
312	return 0;
313}
314
315static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
316				int peer)
317{
318	struct sock *sk = sock->sk;
319	struct pn_sock *pn = pn_sk(sk);
320
321	memset(addr, 0, sizeof(struct sockaddr_pn));
322	addr->sa_family = AF_PHONET;
323	if (!peer) /* Race with bind() here is userland's problem. */
324		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
325					pn->sobject);
326
327	return sizeof(struct sockaddr_pn);
 
328}
329
330static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
331					poll_table *wait)
332{
333	struct sock *sk = sock->sk;
334	struct pep_sock *pn = pep_sk(sk);
335	__poll_t mask = 0;
336
337	poll_wait(file, sk_sleep(sk), wait);
338
339	if (sk->sk_state == TCP_CLOSE)
340		return EPOLLERR;
341	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
342		mask |= EPOLLIN | EPOLLRDNORM;
343	if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
344		mask |= EPOLLPRI;
345	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
346		return EPOLLHUP;
347
348	if (sk->sk_state == TCP_ESTABLISHED &&
349		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
350		atomic_read(&pn->tx_credits))
351		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
352
353	return mask;
354}
355
356static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
357				unsigned long arg)
358{
359	struct sock *sk = sock->sk;
360	struct pn_sock *pn = pn_sk(sk);
361
362	if (cmd == SIOCPNGETOBJECT) {
363		struct net_device *dev;
364		u16 handle;
365		u8 saddr;
366
367		if (get_user(handle, (__u16 __user *)arg))
368			return -EFAULT;
369
370		lock_sock(sk);
371		if (sk->sk_bound_dev_if)
372			dev = dev_get_by_index(sock_net(sk),
373						sk->sk_bound_dev_if);
374		else
375			dev = phonet_device_get(sock_net(sk));
376		if (dev && (dev->flags & IFF_UP))
377			saddr = phonet_address_get(dev, pn_addr(handle));
378		else
379			saddr = PN_NO_ADDR;
380		release_sock(sk);
381
382		if (dev)
383			dev_put(dev);
384		if (saddr == PN_NO_ADDR)
385			return -EHOSTUNREACH;
386
387		handle = pn_object(saddr, pn_port(pn->sobject));
388		return put_user(handle, (__u16 __user *)arg);
389	}
390
391	return sk->sk_prot->ioctl(sk, cmd, arg);
392}
393
394static int pn_socket_listen(struct socket *sock, int backlog)
395{
396	struct sock *sk = sock->sk;
397	int err = 0;
398
399	if (pn_socket_autobind(sock))
400		return -ENOBUFS;
401
402	lock_sock(sk);
403	if (sock->state != SS_UNCONNECTED) {
404		err = -EINVAL;
405		goto out;
406	}
407
408	if (sk->sk_state != TCP_LISTEN) {
409		sk->sk_state = TCP_LISTEN;
410		sk->sk_ack_backlog = 0;
411	}
412	sk->sk_max_ack_backlog = backlog;
413out:
414	release_sock(sk);
415	return err;
416}
417
418static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
419			     size_t total_len)
420{
421	struct sock *sk = sock->sk;
422
423	if (pn_socket_autobind(sock))
424		return -EAGAIN;
425
426	return sk->sk_prot->sendmsg(sk, m, total_len);
427}
428
429const struct proto_ops phonet_dgram_ops = {
430	.family		= AF_PHONET,
431	.owner		= THIS_MODULE,
432	.release	= pn_socket_release,
433	.bind		= pn_socket_bind,
434	.connect	= sock_no_connect,
435	.socketpair	= sock_no_socketpair,
436	.accept		= sock_no_accept,
437	.getname	= pn_socket_getname,
438	.poll		= datagram_poll,
439	.ioctl		= pn_socket_ioctl,
440	.listen		= sock_no_listen,
441	.shutdown	= sock_no_shutdown,
442	.setsockopt	= sock_no_setsockopt,
443	.getsockopt	= sock_no_getsockopt,
444#ifdef CONFIG_COMPAT
445	.compat_setsockopt = sock_no_setsockopt,
446	.compat_getsockopt = sock_no_getsockopt,
447#endif
448	.sendmsg	= pn_socket_sendmsg,
449	.recvmsg	= sock_common_recvmsg,
450	.mmap		= sock_no_mmap,
451	.sendpage	= sock_no_sendpage,
452};
453
454const struct proto_ops phonet_stream_ops = {
455	.family		= AF_PHONET,
456	.owner		= THIS_MODULE,
457	.release	= pn_socket_release,
458	.bind		= pn_socket_bind,
459	.connect	= pn_socket_connect,
460	.socketpair	= sock_no_socketpair,
461	.accept		= pn_socket_accept,
462	.getname	= pn_socket_getname,
463	.poll		= pn_socket_poll,
464	.ioctl		= pn_socket_ioctl,
465	.listen		= pn_socket_listen,
466	.shutdown	= sock_no_shutdown,
467	.setsockopt	= sock_common_setsockopt,
468	.getsockopt	= sock_common_getsockopt,
469#ifdef CONFIG_COMPAT
470	.compat_setsockopt = compat_sock_common_setsockopt,
471	.compat_getsockopt = compat_sock_common_getsockopt,
472#endif
473	.sendmsg	= pn_socket_sendmsg,
474	.recvmsg	= sock_common_recvmsg,
475	.mmap		= sock_no_mmap,
476	.sendpage	= sock_no_sendpage,
477};
478EXPORT_SYMBOL(phonet_stream_ops);
479
480/* allocate port for a socket */
481int pn_sock_get_port(struct sock *sk, unsigned short sport)
482{
483	static int port_cur;
484	struct net *net = sock_net(sk);
485	struct pn_sock *pn = pn_sk(sk);
486	struct sockaddr_pn try_sa;
487	struct sock *tmpsk;
488
489	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
490	try_sa.spn_family = AF_PHONET;
491	WARN_ON(!mutex_is_locked(&port_mutex));
492	if (!sport) {
493		/* search free port */
494		int port, pmin, pmax;
495
496		phonet_get_local_port_range(&pmin, &pmax);
497		for (port = pmin; port <= pmax; port++) {
498			port_cur++;
499			if (port_cur < pmin || port_cur > pmax)
500				port_cur = pmin;
501
502			pn_sockaddr_set_port(&try_sa, port_cur);
503			tmpsk = pn_find_sock_by_sa(net, &try_sa);
504			if (tmpsk == NULL) {
505				sport = port_cur;
506				goto found;
507			} else
508				sock_put(tmpsk);
509		}
510	} else {
511		/* try to find specific port */
512		pn_sockaddr_set_port(&try_sa, sport);
513		tmpsk = pn_find_sock_by_sa(net, &try_sa);
514		if (tmpsk == NULL)
515			/* No sock there! We can use that port... */
516			goto found;
517		else
518			sock_put(tmpsk);
519	}
520	/* the port must be in use already */
521	return -EADDRINUSE;
522
523found:
524	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
525	return 0;
526}
527EXPORT_SYMBOL(pn_sock_get_port);
528
529#ifdef CONFIG_PROC_FS
530static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
531{
532	struct net *net = seq_file_net(seq);
533	struct hlist_head *hlist = pnsocks.hlist;
534	struct sock *sknode;
535	unsigned int h;
536
537	for (h = 0; h < PN_HASHSIZE; h++) {
538		sk_for_each_rcu(sknode, hlist) {
539			if (!net_eq(net, sock_net(sknode)))
540				continue;
541			if (!pos)
542				return sknode;
543			pos--;
544		}
545		hlist++;
546	}
547	return NULL;
548}
549
550static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
551{
552	struct net *net = seq_file_net(seq);
553
554	do
555		sk = sk_next(sk);
556	while (sk && !net_eq(net, sock_net(sk)));
557
558	return sk;
559}
560
561static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
562	__acquires(rcu)
563{
564	rcu_read_lock();
565	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
566}
567
568static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
569{
570	struct sock *sk;
571
572	if (v == SEQ_START_TOKEN)
573		sk = pn_sock_get_idx(seq, 0);
574	else
575		sk = pn_sock_get_next(seq, v);
576	(*pos)++;
577	return sk;
578}
579
580static void pn_sock_seq_stop(struct seq_file *seq, void *v)
581	__releases(rcu)
582{
583	rcu_read_unlock();
584}
585
586static int pn_sock_seq_show(struct seq_file *seq, void *v)
587{
588	seq_setwidth(seq, 127);
589	if (v == SEQ_START_TOKEN)
590		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
591			"  uid inode ref pointer drops");
592	else {
593		struct sock *sk = v;
594		struct pn_sock *pn = pn_sk(sk);
595
596		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
597			"%d %pK %u",
598			sk->sk_protocol, pn->sobject, pn->dobject,
599			pn->resource, sk->sk_state,
600			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
601			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
602			sock_i_ino(sk),
603			refcount_read(&sk->sk_refcnt), sk,
604			atomic_read(&sk->sk_drops));
605	}
606	seq_pad(seq, '\n');
607	return 0;
608}
609
610const struct seq_operations pn_sock_seq_ops = {
611	.start = pn_sock_seq_start,
612	.next = pn_sock_seq_next,
613	.stop = pn_sock_seq_stop,
614	.show = pn_sock_seq_show,
615};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616#endif
617
618static struct  {
619	struct sock *sk[256];
620} pnres;
621
622/*
623 * Find and hold socket based on resource.
624 */
625struct sock *pn_find_sock_by_res(struct net *net, u8 res)
626{
627	struct sock *sk;
628
629	if (!net_eq(net, &init_net))
630		return NULL;
631
632	rcu_read_lock();
633	sk = rcu_dereference(pnres.sk[res]);
634	if (sk)
635		sock_hold(sk);
636	rcu_read_unlock();
637	return sk;
638}
639
640static DEFINE_MUTEX(resource_mutex);
641
642int pn_sock_bind_res(struct sock *sk, u8 res)
643{
644	int ret = -EADDRINUSE;
645
646	if (!net_eq(sock_net(sk), &init_net))
647		return -ENOIOCTLCMD;
648	if (!capable(CAP_SYS_ADMIN))
649		return -EPERM;
650	if (pn_socket_autobind(sk->sk_socket))
651		return -EAGAIN;
652
653	mutex_lock(&resource_mutex);
654	if (pnres.sk[res] == NULL) {
655		sock_hold(sk);
656		rcu_assign_pointer(pnres.sk[res], sk);
657		ret = 0;
658	}
659	mutex_unlock(&resource_mutex);
660	return ret;
661}
662
663int pn_sock_unbind_res(struct sock *sk, u8 res)
664{
665	int ret = -ENOENT;
666
667	if (!capable(CAP_SYS_ADMIN))
668		return -EPERM;
669
670	mutex_lock(&resource_mutex);
671	if (pnres.sk[res] == sk) {
672		RCU_INIT_POINTER(pnres.sk[res], NULL);
673		ret = 0;
674	}
675	mutex_unlock(&resource_mutex);
676
677	if (ret == 0) {
678		synchronize_rcu();
679		sock_put(sk);
680	}
681	return ret;
682}
683
684void pn_sock_unbind_all_res(struct sock *sk)
685{
686	unsigned int res, match = 0;
687
688	mutex_lock(&resource_mutex);
689	for (res = 0; res < 256; res++) {
690		if (pnres.sk[res] == sk) {
691			RCU_INIT_POINTER(pnres.sk[res], NULL);
692			match++;
693		}
694	}
695	mutex_unlock(&resource_mutex);
696
697	while (match > 0) {
698		__sock_put(sk);
699		match--;
700	}
701	/* Caller is responsible for RCU sync before final sock_put() */
702}
703
704#ifdef CONFIG_PROC_FS
705static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
706{
707	struct net *net = seq_file_net(seq);
708	unsigned int i;
709
710	if (!net_eq(net, &init_net))
711		return NULL;
712
713	for (i = 0; i < 256; i++) {
714		if (pnres.sk[i] == NULL)
715			continue;
716		if (!pos)
717			return pnres.sk + i;
718		pos--;
719	}
720	return NULL;
721}
722
723static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
724{
725	struct net *net = seq_file_net(seq);
726	unsigned int i;
727
728	BUG_ON(!net_eq(net, &init_net));
729
730	for (i = (sk - pnres.sk) + 1; i < 256; i++)
731		if (pnres.sk[i])
732			return pnres.sk + i;
733	return NULL;
734}
735
736static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
737	__acquires(resource_mutex)
738{
739	mutex_lock(&resource_mutex);
740	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
741}
742
743static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
744{
745	struct sock **sk;
746
747	if (v == SEQ_START_TOKEN)
748		sk = pn_res_get_idx(seq, 0);
749	else
750		sk = pn_res_get_next(seq, v);
751	(*pos)++;
752	return sk;
753}
754
755static void pn_res_seq_stop(struct seq_file *seq, void *v)
756	__releases(resource_mutex)
757{
758	mutex_unlock(&resource_mutex);
759}
760
761static int pn_res_seq_show(struct seq_file *seq, void *v)
762{
763	seq_setwidth(seq, 63);
764	if (v == SEQ_START_TOKEN)
765		seq_puts(seq, "rs   uid inode");
766	else {
767		struct sock **psk = v;
768		struct sock *sk = *psk;
769
770		seq_printf(seq, "%02X %5u %lu",
771			   (int) (psk - pnres.sk),
772			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
773			   sock_i_ino(sk));
774	}
775	seq_pad(seq, '\n');
776	return 0;
777}
778
779const struct seq_operations pn_res_seq_ops = {
780	.start = pn_res_seq_start,
781	.next = pn_res_seq_next,
782	.stop = pn_res_seq_stop,
783	.show = pn_res_seq_show,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784};
785#endif
v3.15
 
  1/*
  2 * File: socket.c
  3 *
  4 * Phonet sockets
  5 *
  6 * Copyright (C) 2008 Nokia Corporation.
  7 *
  8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
  9 *          RĂ©mi Denis-Courmont
 10 *
 11 * This program is free software; you can redistribute it and/or
 12 * modify it under the terms of the GNU General Public License
 13 * version 2 as published by the Free Software Foundation.
 14 *
 15 * This program is distributed in the hope that it will be useful, but
 16 * WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 18 * General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 23 * 02110-1301 USA
 24 */
 25
 26#include <linux/gfp.h>
 27#include <linux/kernel.h>
 28#include <linux/net.h>
 29#include <linux/poll.h>
 
 
 30#include <net/sock.h>
 31#include <net/tcp_states.h>
 32
 33#include <linux/phonet.h>
 34#include <linux/export.h>
 35#include <net/phonet/phonet.h>
 36#include <net/phonet/pep.h>
 37#include <net/phonet/pn_dev.h>
 38
 39static int pn_socket_release(struct socket *sock)
 40{
 41	struct sock *sk = sock->sk;
 42
 43	if (sk) {
 44		sock->sk = NULL;
 45		sk->sk_prot->close(sk, 0);
 46	}
 47	return 0;
 48}
 49
 50#define PN_HASHSIZE	16
 51#define PN_HASHMASK	(PN_HASHSIZE-1)
 52
 53
 54static struct  {
 55	struct hlist_head hlist[PN_HASHSIZE];
 56	struct mutex lock;
 57} pnsocks;
 58
 59void __init pn_sock_init(void)
 60{
 61	unsigned int i;
 62
 63	for (i = 0; i < PN_HASHSIZE; i++)
 64		INIT_HLIST_HEAD(pnsocks.hlist + i);
 65	mutex_init(&pnsocks.lock);
 66}
 67
 68static struct hlist_head *pn_hash_list(u16 obj)
 69{
 70	return pnsocks.hlist + (obj & PN_HASHMASK);
 71}
 72
 73/*
 74 * Find address based on socket address, match only certain fields.
 75 * Also grab sock if it was found. Remember to sock_put it later.
 76 */
 77struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 78{
 79	struct sock *sknode;
 80	struct sock *rval = NULL;
 81	u16 obj = pn_sockaddr_get_object(spn);
 82	u8 res = spn->spn_resource;
 83	struct hlist_head *hlist = pn_hash_list(obj);
 84
 85	rcu_read_lock();
 86	sk_for_each_rcu(sknode, hlist) {
 87		struct pn_sock *pn = pn_sk(sknode);
 88		BUG_ON(!pn->sobject); /* unbound socket */
 89
 90		if (!net_eq(sock_net(sknode), net))
 91			continue;
 92		if (pn_port(obj)) {
 93			/* Look up socket by port */
 94			if (pn_port(pn->sobject) != pn_port(obj))
 95				continue;
 96		} else {
 97			/* If port is zero, look up by resource */
 98			if (pn->resource != res)
 99				continue;
100		}
101		if (pn_addr(pn->sobject) &&
102		    pn_addr(pn->sobject) != pn_addr(obj))
103			continue;
104
105		rval = sknode;
106		sock_hold(sknode);
107		break;
108	}
109	rcu_read_unlock();
110
111	return rval;
112}
113
114/* Deliver a broadcast packet (only in bottom-half) */
115void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
116{
117	struct hlist_head *hlist = pnsocks.hlist;
118	unsigned int h;
119
120	rcu_read_lock();
121	for (h = 0; h < PN_HASHSIZE; h++) {
122		struct sock *sknode;
123
124		sk_for_each(sknode, hlist) {
125			struct sk_buff *clone;
126
127			if (!net_eq(sock_net(sknode), net))
128				continue;
129			if (!sock_flag(sknode, SOCK_BROADCAST))
130				continue;
131
132			clone = skb_clone(skb, GFP_ATOMIC);
133			if (clone) {
134				sock_hold(sknode);
135				sk_receive_skb(sknode, clone, 0);
136			}
137		}
138		hlist++;
139	}
140	rcu_read_unlock();
141}
142
143void pn_sock_hash(struct sock *sk)
144{
145	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
146
147	mutex_lock(&pnsocks.lock);
148	sk_add_node_rcu(sk, hlist);
149	mutex_unlock(&pnsocks.lock);
 
 
150}
151EXPORT_SYMBOL(pn_sock_hash);
152
153void pn_sock_unhash(struct sock *sk)
154{
155	mutex_lock(&pnsocks.lock);
156	sk_del_node_init_rcu(sk);
157	mutex_unlock(&pnsocks.lock);
158	pn_sock_unbind_all_res(sk);
159	synchronize_rcu();
160}
161EXPORT_SYMBOL(pn_sock_unhash);
162
163static DEFINE_MUTEX(port_mutex);
164
165static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
166{
167	struct sock *sk = sock->sk;
168	struct pn_sock *pn = pn_sk(sk);
169	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
170	int err;
171	u16 handle;
172	u8 saddr;
173
174	if (sk->sk_prot->bind)
175		return sk->sk_prot->bind(sk, addr, len);
176
177	if (len < sizeof(struct sockaddr_pn))
178		return -EINVAL;
179	if (spn->spn_family != AF_PHONET)
180		return -EAFNOSUPPORT;
181
182	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
183	saddr = pn_addr(handle);
184	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
185		return -EADDRNOTAVAIL;
186
187	lock_sock(sk);
188	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
189		err = -EINVAL; /* attempt to rebind */
190		goto out;
191	}
192	WARN_ON(sk_hashed(sk));
193	mutex_lock(&port_mutex);
194	err = sk->sk_prot->get_port(sk, pn_port(handle));
195	if (err)
196		goto out_port;
197
198	/* get_port() sets the port, bind() sets the address if applicable */
199	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
200	pn->resource = spn->spn_resource;
201
202	/* Enable RX on the socket */
203	sk->sk_prot->hash(sk);
204out_port:
205	mutex_unlock(&port_mutex);
206out:
207	release_sock(sk);
208	return err;
209}
210
211static int pn_socket_autobind(struct socket *sock)
212{
213	struct sockaddr_pn sa;
214	int err;
215
216	memset(&sa, 0, sizeof(sa));
217	sa.spn_family = AF_PHONET;
218	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
219				sizeof(struct sockaddr_pn));
220	if (err != -EINVAL)
221		return err;
222	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
223	return 0; /* socket was already bound */
224}
225
226static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
227		int len, int flags)
228{
229	struct sock *sk = sock->sk;
230	struct pn_sock *pn = pn_sk(sk);
231	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
232	struct task_struct *tsk = current;
233	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
234	int err;
235
236	if (pn_socket_autobind(sock))
237		return -ENOBUFS;
238	if (len < sizeof(struct sockaddr_pn))
239		return -EINVAL;
240	if (spn->spn_family != AF_PHONET)
241		return -EAFNOSUPPORT;
242
243	lock_sock(sk);
244
245	switch (sock->state) {
246	case SS_UNCONNECTED:
247		if (sk->sk_state != TCP_CLOSE) {
248			err = -EISCONN;
249			goto out;
250		}
251		break;
252	case SS_CONNECTING:
253		err = -EALREADY;
254		goto out;
255	default:
256		err = -EISCONN;
257		goto out;
258	}
259
260	pn->dobject = pn_sockaddr_get_object(spn);
261	pn->resource = pn_sockaddr_get_resource(spn);
262	sock->state = SS_CONNECTING;
263
264	err = sk->sk_prot->connect(sk, addr, len);
265	if (err) {
266		sock->state = SS_UNCONNECTED;
267		pn->dobject = 0;
268		goto out;
269	}
270
271	while (sk->sk_state == TCP_SYN_SENT) {
272		DEFINE_WAIT(wait);
273
274		if (!timeo) {
275			err = -EINPROGRESS;
276			goto out;
277		}
278		if (signal_pending(tsk)) {
279			err = sock_intr_errno(timeo);
280			goto out;
281		}
282
283		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
284						TASK_INTERRUPTIBLE);
285		release_sock(sk);
286		timeo = schedule_timeout(timeo);
287		lock_sock(sk);
288		finish_wait(sk_sleep(sk), &wait);
289	}
290
291	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
292		err = 0;
293	else if (sk->sk_state == TCP_CLOSE_WAIT)
294		err = -ECONNRESET;
295	else
296		err = -ECONNREFUSED;
297	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
298out:
299	release_sock(sk);
300	return err;
301}
302
303static int pn_socket_accept(struct socket *sock, struct socket *newsock,
304				int flags)
305{
306	struct sock *sk = sock->sk;
307	struct sock *newsk;
308	int err;
309
310	if (unlikely(sk->sk_state != TCP_LISTEN))
311		return -EINVAL;
312
313	newsk = sk->sk_prot->accept(sk, flags, &err);
314	if (!newsk)
315		return err;
316
317	lock_sock(newsk);
318	sock_graft(newsk, newsock);
319	newsock->state = SS_CONNECTED;
320	release_sock(newsk);
321	return 0;
322}
323
324static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
325				int *sockaddr_len, int peer)
326{
327	struct sock *sk = sock->sk;
328	struct pn_sock *pn = pn_sk(sk);
329
330	memset(addr, 0, sizeof(struct sockaddr_pn));
331	addr->sa_family = AF_PHONET;
332	if (!peer) /* Race with bind() here is userland's problem. */
333		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
334					pn->sobject);
335
336	*sockaddr_len = sizeof(struct sockaddr_pn);
337	return 0;
338}
339
340static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
341					poll_table *wait)
342{
343	struct sock *sk = sock->sk;
344	struct pep_sock *pn = pep_sk(sk);
345	unsigned int mask = 0;
346
347	poll_wait(file, sk_sleep(sk), wait);
348
349	if (sk->sk_state == TCP_CLOSE)
350		return POLLERR;
351	if (!skb_queue_empty(&sk->sk_receive_queue))
352		mask |= POLLIN | POLLRDNORM;
353	if (!skb_queue_empty(&pn->ctrlreq_queue))
354		mask |= POLLPRI;
355	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
356		return POLLHUP;
357
358	if (sk->sk_state == TCP_ESTABLISHED &&
359		atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
360		atomic_read(&pn->tx_credits))
361		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
362
363	return mask;
364}
365
366static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
367				unsigned long arg)
368{
369	struct sock *sk = sock->sk;
370	struct pn_sock *pn = pn_sk(sk);
371
372	if (cmd == SIOCPNGETOBJECT) {
373		struct net_device *dev;
374		u16 handle;
375		u8 saddr;
376
377		if (get_user(handle, (__u16 __user *)arg))
378			return -EFAULT;
379
380		lock_sock(sk);
381		if (sk->sk_bound_dev_if)
382			dev = dev_get_by_index(sock_net(sk),
383						sk->sk_bound_dev_if);
384		else
385			dev = phonet_device_get(sock_net(sk));
386		if (dev && (dev->flags & IFF_UP))
387			saddr = phonet_address_get(dev, pn_addr(handle));
388		else
389			saddr = PN_NO_ADDR;
390		release_sock(sk);
391
392		if (dev)
393			dev_put(dev);
394		if (saddr == PN_NO_ADDR)
395			return -EHOSTUNREACH;
396
397		handle = pn_object(saddr, pn_port(pn->sobject));
398		return put_user(handle, (__u16 __user *)arg);
399	}
400
401	return sk->sk_prot->ioctl(sk, cmd, arg);
402}
403
404static int pn_socket_listen(struct socket *sock, int backlog)
405{
406	struct sock *sk = sock->sk;
407	int err = 0;
408
409	if (pn_socket_autobind(sock))
410		return -ENOBUFS;
411
412	lock_sock(sk);
413	if (sock->state != SS_UNCONNECTED) {
414		err = -EINVAL;
415		goto out;
416	}
417
418	if (sk->sk_state != TCP_LISTEN) {
419		sk->sk_state = TCP_LISTEN;
420		sk->sk_ack_backlog = 0;
421	}
422	sk->sk_max_ack_backlog = backlog;
423out:
424	release_sock(sk);
425	return err;
426}
427
428static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
429				struct msghdr *m, size_t total_len)
430{
431	struct sock *sk = sock->sk;
432
433	if (pn_socket_autobind(sock))
434		return -EAGAIN;
435
436	return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
437}
438
439const struct proto_ops phonet_dgram_ops = {
440	.family		= AF_PHONET,
441	.owner		= THIS_MODULE,
442	.release	= pn_socket_release,
443	.bind		= pn_socket_bind,
444	.connect	= sock_no_connect,
445	.socketpair	= sock_no_socketpair,
446	.accept		= sock_no_accept,
447	.getname	= pn_socket_getname,
448	.poll		= datagram_poll,
449	.ioctl		= pn_socket_ioctl,
450	.listen		= sock_no_listen,
451	.shutdown	= sock_no_shutdown,
452	.setsockopt	= sock_no_setsockopt,
453	.getsockopt	= sock_no_getsockopt,
454#ifdef CONFIG_COMPAT
455	.compat_setsockopt = sock_no_setsockopt,
456	.compat_getsockopt = sock_no_getsockopt,
457#endif
458	.sendmsg	= pn_socket_sendmsg,
459	.recvmsg	= sock_common_recvmsg,
460	.mmap		= sock_no_mmap,
461	.sendpage	= sock_no_sendpage,
462};
463
464const struct proto_ops phonet_stream_ops = {
465	.family		= AF_PHONET,
466	.owner		= THIS_MODULE,
467	.release	= pn_socket_release,
468	.bind		= pn_socket_bind,
469	.connect	= pn_socket_connect,
470	.socketpair	= sock_no_socketpair,
471	.accept		= pn_socket_accept,
472	.getname	= pn_socket_getname,
473	.poll		= pn_socket_poll,
474	.ioctl		= pn_socket_ioctl,
475	.listen		= pn_socket_listen,
476	.shutdown	= sock_no_shutdown,
477	.setsockopt	= sock_common_setsockopt,
478	.getsockopt	= sock_common_getsockopt,
479#ifdef CONFIG_COMPAT
480	.compat_setsockopt = compat_sock_common_setsockopt,
481	.compat_getsockopt = compat_sock_common_getsockopt,
482#endif
483	.sendmsg	= pn_socket_sendmsg,
484	.recvmsg	= sock_common_recvmsg,
485	.mmap		= sock_no_mmap,
486	.sendpage	= sock_no_sendpage,
487};
488EXPORT_SYMBOL(phonet_stream_ops);
489
490/* allocate port for a socket */
491int pn_sock_get_port(struct sock *sk, unsigned short sport)
492{
493	static int port_cur;
494	struct net *net = sock_net(sk);
495	struct pn_sock *pn = pn_sk(sk);
496	struct sockaddr_pn try_sa;
497	struct sock *tmpsk;
498
499	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
500	try_sa.spn_family = AF_PHONET;
501	WARN_ON(!mutex_is_locked(&port_mutex));
502	if (!sport) {
503		/* search free port */
504		int port, pmin, pmax;
505
506		phonet_get_local_port_range(&pmin, &pmax);
507		for (port = pmin; port <= pmax; port++) {
508			port_cur++;
509			if (port_cur < pmin || port_cur > pmax)
510				port_cur = pmin;
511
512			pn_sockaddr_set_port(&try_sa, port_cur);
513			tmpsk = pn_find_sock_by_sa(net, &try_sa);
514			if (tmpsk == NULL) {
515				sport = port_cur;
516				goto found;
517			} else
518				sock_put(tmpsk);
519		}
520	} else {
521		/* try to find specific port */
522		pn_sockaddr_set_port(&try_sa, sport);
523		tmpsk = pn_find_sock_by_sa(net, &try_sa);
524		if (tmpsk == NULL)
525			/* No sock there! We can use that port... */
526			goto found;
527		else
528			sock_put(tmpsk);
529	}
530	/* the port must be in use already */
531	return -EADDRINUSE;
532
533found:
534	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
535	return 0;
536}
537EXPORT_SYMBOL(pn_sock_get_port);
538
539#ifdef CONFIG_PROC_FS
540static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
541{
542	struct net *net = seq_file_net(seq);
543	struct hlist_head *hlist = pnsocks.hlist;
544	struct sock *sknode;
545	unsigned int h;
546
547	for (h = 0; h < PN_HASHSIZE; h++) {
548		sk_for_each_rcu(sknode, hlist) {
549			if (!net_eq(net, sock_net(sknode)))
550				continue;
551			if (!pos)
552				return sknode;
553			pos--;
554		}
555		hlist++;
556	}
557	return NULL;
558}
559
560static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
561{
562	struct net *net = seq_file_net(seq);
563
564	do
565		sk = sk_next(sk);
566	while (sk && !net_eq(net, sock_net(sk)));
567
568	return sk;
569}
570
571static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
572	__acquires(rcu)
573{
574	rcu_read_lock();
575	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
576}
577
578static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
579{
580	struct sock *sk;
581
582	if (v == SEQ_START_TOKEN)
583		sk = pn_sock_get_idx(seq, 0);
584	else
585		sk = pn_sock_get_next(seq, v);
586	(*pos)++;
587	return sk;
588}
589
590static void pn_sock_seq_stop(struct seq_file *seq, void *v)
591	__releases(rcu)
592{
593	rcu_read_unlock();
594}
595
596static int pn_sock_seq_show(struct seq_file *seq, void *v)
597{
598	seq_setwidth(seq, 127);
599	if (v == SEQ_START_TOKEN)
600		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
601			"  uid inode ref pointer drops");
602	else {
603		struct sock *sk = v;
604		struct pn_sock *pn = pn_sk(sk);
605
606		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
607			"%d %pK %d",
608			sk->sk_protocol, pn->sobject, pn->dobject,
609			pn->resource, sk->sk_state,
610			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
611			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
612			sock_i_ino(sk),
613			atomic_read(&sk->sk_refcnt), sk,
614			atomic_read(&sk->sk_drops));
615	}
616	seq_pad(seq, '\n');
617	return 0;
618}
619
620static const struct seq_operations pn_sock_seq_ops = {
621	.start = pn_sock_seq_start,
622	.next = pn_sock_seq_next,
623	.stop = pn_sock_seq_stop,
624	.show = pn_sock_seq_show,
625};
626
627static int pn_sock_open(struct inode *inode, struct file *file)
628{
629	return seq_open_net(inode, file, &pn_sock_seq_ops,
630				sizeof(struct seq_net_private));
631}
632
633const struct file_operations pn_sock_seq_fops = {
634	.owner = THIS_MODULE,
635	.open = pn_sock_open,
636	.read = seq_read,
637	.llseek = seq_lseek,
638	.release = seq_release_net,
639};
640#endif
641
642static struct  {
643	struct sock *sk[256];
644} pnres;
645
646/*
647 * Find and hold socket based on resource.
648 */
649struct sock *pn_find_sock_by_res(struct net *net, u8 res)
650{
651	struct sock *sk;
652
653	if (!net_eq(net, &init_net))
654		return NULL;
655
656	rcu_read_lock();
657	sk = rcu_dereference(pnres.sk[res]);
658	if (sk)
659		sock_hold(sk);
660	rcu_read_unlock();
661	return sk;
662}
663
664static DEFINE_MUTEX(resource_mutex);
665
666int pn_sock_bind_res(struct sock *sk, u8 res)
667{
668	int ret = -EADDRINUSE;
669
670	if (!net_eq(sock_net(sk), &init_net))
671		return -ENOIOCTLCMD;
672	if (!capable(CAP_SYS_ADMIN))
673		return -EPERM;
674	if (pn_socket_autobind(sk->sk_socket))
675		return -EAGAIN;
676
677	mutex_lock(&resource_mutex);
678	if (pnres.sk[res] == NULL) {
679		sock_hold(sk);
680		rcu_assign_pointer(pnres.sk[res], sk);
681		ret = 0;
682	}
683	mutex_unlock(&resource_mutex);
684	return ret;
685}
686
687int pn_sock_unbind_res(struct sock *sk, u8 res)
688{
689	int ret = -ENOENT;
690
691	if (!capable(CAP_SYS_ADMIN))
692		return -EPERM;
693
694	mutex_lock(&resource_mutex);
695	if (pnres.sk[res] == sk) {
696		RCU_INIT_POINTER(pnres.sk[res], NULL);
697		ret = 0;
698	}
699	mutex_unlock(&resource_mutex);
700
701	if (ret == 0) {
702		synchronize_rcu();
703		sock_put(sk);
704	}
705	return ret;
706}
707
708void pn_sock_unbind_all_res(struct sock *sk)
709{
710	unsigned int res, match = 0;
711
712	mutex_lock(&resource_mutex);
713	for (res = 0; res < 256; res++) {
714		if (pnres.sk[res] == sk) {
715			RCU_INIT_POINTER(pnres.sk[res], NULL);
716			match++;
717		}
718	}
719	mutex_unlock(&resource_mutex);
720
721	while (match > 0) {
722		__sock_put(sk);
723		match--;
724	}
725	/* Caller is responsible for RCU sync before final sock_put() */
726}
727
728#ifdef CONFIG_PROC_FS
729static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
730{
731	struct net *net = seq_file_net(seq);
732	unsigned int i;
733
734	if (!net_eq(net, &init_net))
735		return NULL;
736
737	for (i = 0; i < 256; i++) {
738		if (pnres.sk[i] == NULL)
739			continue;
740		if (!pos)
741			return pnres.sk + i;
742		pos--;
743	}
744	return NULL;
745}
746
747static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
748{
749	struct net *net = seq_file_net(seq);
750	unsigned int i;
751
752	BUG_ON(!net_eq(net, &init_net));
753
754	for (i = (sk - pnres.sk) + 1; i < 256; i++)
755		if (pnres.sk[i])
756			return pnres.sk + i;
757	return NULL;
758}
759
760static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
761	__acquires(resource_mutex)
762{
763	mutex_lock(&resource_mutex);
764	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
765}
766
767static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
768{
769	struct sock **sk;
770
771	if (v == SEQ_START_TOKEN)
772		sk = pn_res_get_idx(seq, 0);
773	else
774		sk = pn_res_get_next(seq, v);
775	(*pos)++;
776	return sk;
777}
778
779static void pn_res_seq_stop(struct seq_file *seq, void *v)
780	__releases(resource_mutex)
781{
782	mutex_unlock(&resource_mutex);
783}
784
785static int pn_res_seq_show(struct seq_file *seq, void *v)
786{
787	seq_setwidth(seq, 63);
788	if (v == SEQ_START_TOKEN)
789		seq_puts(seq, "rs   uid inode");
790	else {
791		struct sock **psk = v;
792		struct sock *sk = *psk;
793
794		seq_printf(seq, "%02X %5u %lu",
795			   (int) (psk - pnres.sk),
796			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
797			   sock_i_ino(sk));
798	}
799	seq_pad(seq, '\n');
800	return 0;
801}
802
803static const struct seq_operations pn_res_seq_ops = {
804	.start = pn_res_seq_start,
805	.next = pn_res_seq_next,
806	.stop = pn_res_seq_stop,
807	.show = pn_res_seq_show,
808};
809
810static int pn_res_open(struct inode *inode, struct file *file)
811{
812	return seq_open_net(inode, file, &pn_res_seq_ops,
813				sizeof(struct seq_net_private));
814}
815
816const struct file_operations pn_res_seq_fops = {
817	.owner = THIS_MODULE,
818	.open = pn_res_open,
819	.read = seq_read,
820	.llseek = seq_lseek,
821	.release = seq_release_net,
822};
823#endif