Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * File: socket.c
  4 *
  5 * Phonet sockets
  6 *
  7 * Copyright (C) 2008 Nokia Corporation.
  8 *
  9 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
 10 *          RĂ©mi Denis-Courmont
 11 */
 12
 13#include <linux/gfp.h>
 14#include <linux/kernel.h>
 15#include <linux/net.h>
 16#include <linux/poll.h>
 17#include <linux/sched/signal.h>
 18
 19#include <net/sock.h>
 20#include <net/tcp_states.h>
 21
 22#include <linux/phonet.h>
 23#include <linux/export.h>
 24#include <net/phonet/phonet.h>
 25#include <net/phonet/pep.h>
 26#include <net/phonet/pn_dev.h>
 27
 28static int pn_socket_release(struct socket *sock)
 29{
 30	struct sock *sk = sock->sk;
 31
 32	if (sk) {
 33		sock->sk = NULL;
 34		sk->sk_prot->close(sk, 0);
 35	}
 36	return 0;
 37}
 38
 39#define PN_HASHSIZE	16
 40#define PN_HASHMASK	(PN_HASHSIZE-1)
 41
 42
 43static struct  {
 44	struct hlist_head hlist[PN_HASHSIZE];
 45	struct mutex lock;
 46} pnsocks;
 47
 48void __init pn_sock_init(void)
 49{
 50	unsigned int i;
 51
 52	for (i = 0; i < PN_HASHSIZE; i++)
 53		INIT_HLIST_HEAD(pnsocks.hlist + i);
 54	mutex_init(&pnsocks.lock);
 55}
 56
 57static struct hlist_head *pn_hash_list(u16 obj)
 58{
 59	return pnsocks.hlist + (obj & PN_HASHMASK);
 60}
 61
 62/*
 63 * Find address based on socket address, match only certain fields.
 64 * Also grab sock if it was found. Remember to sock_put it later.
 65 */
 66struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 67{
 68	struct sock *sknode;
 69	struct sock *rval = NULL;
 70	u16 obj = pn_sockaddr_get_object(spn);
 71	u8 res = spn->spn_resource;
 72	struct hlist_head *hlist = pn_hash_list(obj);
 73
 74	rcu_read_lock();
 75	sk_for_each_rcu(sknode, hlist) {
 76		struct pn_sock *pn = pn_sk(sknode);
 77		BUG_ON(!pn->sobject); /* unbound socket */
 78
 79		if (!net_eq(sock_net(sknode), net))
 80			continue;
 81		if (pn_port(obj)) {
 82			/* Look up socket by port */
 83			if (pn_port(pn->sobject) != pn_port(obj))
 84				continue;
 85		} else {
 86			/* If port is zero, look up by resource */
 87			if (pn->resource != res)
 88				continue;
 89		}
 90		if (pn_addr(pn->sobject) &&
 91		    pn_addr(pn->sobject) != pn_addr(obj))
 92			continue;
 93
 94		rval = sknode;
 95		sock_hold(sknode);
 96		break;
 97	}
 98	rcu_read_unlock();
 99
100	return rval;
101}
102
103/* Deliver a broadcast packet (only in bottom-half) */
104void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
105{
106	struct hlist_head *hlist = pnsocks.hlist;
107	unsigned int h;
108
109	rcu_read_lock();
110	for (h = 0; h < PN_HASHSIZE; h++) {
111		struct sock *sknode;
112
113		sk_for_each(sknode, hlist) {
114			struct sk_buff *clone;
115
116			if (!net_eq(sock_net(sknode), net))
117				continue;
118			if (!sock_flag(sknode, SOCK_BROADCAST))
119				continue;
120
121			clone = skb_clone(skb, GFP_ATOMIC);
122			if (clone) {
123				sock_hold(sknode);
124				sk_receive_skb(sknode, clone, 0);
125			}
126		}
127		hlist++;
128	}
129	rcu_read_unlock();
130}
131
132int pn_sock_hash(struct sock *sk)
133{
134	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
135
136	mutex_lock(&pnsocks.lock);
137	sk_add_node_rcu(sk, hlist);
138	mutex_unlock(&pnsocks.lock);
139
140	return 0;
141}
142EXPORT_SYMBOL(pn_sock_hash);
143
144void pn_sock_unhash(struct sock *sk)
145{
146	mutex_lock(&pnsocks.lock);
147	sk_del_node_init_rcu(sk);
148	mutex_unlock(&pnsocks.lock);
149	pn_sock_unbind_all_res(sk);
150	synchronize_rcu();
151}
152EXPORT_SYMBOL(pn_sock_unhash);
153
154static DEFINE_MUTEX(port_mutex);
155
156static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
157{
158	struct sock *sk = sock->sk;
159	struct pn_sock *pn = pn_sk(sk);
160	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
161	int err;
162	u16 handle;
163	u8 saddr;
164
165	if (sk->sk_prot->bind)
166		return sk->sk_prot->bind(sk, addr, len);
167
168	if (len < sizeof(struct sockaddr_pn))
169		return -EINVAL;
170	if (spn->spn_family != AF_PHONET)
171		return -EAFNOSUPPORT;
172
173	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
174	saddr = pn_addr(handle);
175	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
176		return -EADDRNOTAVAIL;
177
178	lock_sock(sk);
179	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
180		err = -EINVAL; /* attempt to rebind */
181		goto out;
182	}
183	WARN_ON(sk_hashed(sk));
184	mutex_lock(&port_mutex);
185	err = sk->sk_prot->get_port(sk, pn_port(handle));
186	if (err)
187		goto out_port;
188
189	/* get_port() sets the port, bind() sets the address if applicable */
190	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
191	pn->resource = spn->spn_resource;
192
193	/* Enable RX on the socket */
194	err = sk->sk_prot->hash(sk);
195out_port:
196	mutex_unlock(&port_mutex);
197out:
198	release_sock(sk);
199	return err;
200}
201
202static int pn_socket_autobind(struct socket *sock)
203{
204	struct sockaddr_pn sa;
205	int err;
206
207	memset(&sa, 0, sizeof(sa));
208	sa.spn_family = AF_PHONET;
209	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
210				sizeof(struct sockaddr_pn));
211	if (err != -EINVAL)
212		return err;
213	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
214	return 0; /* socket was already bound */
215}
216
217static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
218		int len, int flags)
219{
220	struct sock *sk = sock->sk;
221	struct pn_sock *pn = pn_sk(sk);
222	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
223	struct task_struct *tsk = current;
224	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
225	int err;
226
227	if (pn_socket_autobind(sock))
228		return -ENOBUFS;
229	if (len < sizeof(struct sockaddr_pn))
230		return -EINVAL;
231	if (spn->spn_family != AF_PHONET)
232		return -EAFNOSUPPORT;
233
234	lock_sock(sk);
235
236	switch (sock->state) {
237	case SS_UNCONNECTED:
238		if (sk->sk_state != TCP_CLOSE) {
239			err = -EISCONN;
240			goto out;
241		}
242		break;
243	case SS_CONNECTING:
244		err = -EALREADY;
245		goto out;
246	default:
247		err = -EISCONN;
248		goto out;
249	}
250
251	pn->dobject = pn_sockaddr_get_object(spn);
252	pn->resource = pn_sockaddr_get_resource(spn);
253	sock->state = SS_CONNECTING;
254
255	err = sk->sk_prot->connect(sk, addr, len);
256	if (err) {
257		sock->state = SS_UNCONNECTED;
258		pn->dobject = 0;
259		goto out;
260	}
261
262	while (sk->sk_state == TCP_SYN_SENT) {
263		DEFINE_WAIT(wait);
264
265		if (!timeo) {
266			err = -EINPROGRESS;
267			goto out;
268		}
269		if (signal_pending(tsk)) {
270			err = sock_intr_errno(timeo);
271			goto out;
272		}
273
274		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
275						TASK_INTERRUPTIBLE);
276		release_sock(sk);
277		timeo = schedule_timeout(timeo);
278		lock_sock(sk);
279		finish_wait(sk_sleep(sk), &wait);
280	}
281
282	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
283		err = 0;
284	else if (sk->sk_state == TCP_CLOSE_WAIT)
285		err = -ECONNRESET;
286	else
287		err = -ECONNREFUSED;
288	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
289out:
290	release_sock(sk);
291	return err;
292}
293
294static int pn_socket_accept(struct socket *sock, struct socket *newsock,
295			    int flags, bool kern)
296{
297	struct sock *sk = sock->sk;
298	struct sock *newsk;
299	int err;
300
301	if (unlikely(sk->sk_state != TCP_LISTEN))
302		return -EINVAL;
303
304	newsk = sk->sk_prot->accept(sk, flags, &err, kern);
305	if (!newsk)
306		return err;
307
308	lock_sock(newsk);
309	sock_graft(newsk, newsock);
310	newsock->state = SS_CONNECTED;
311	release_sock(newsk);
312	return 0;
313}
314
315static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
316				int peer)
317{
318	struct sock *sk = sock->sk;
319	struct pn_sock *pn = pn_sk(sk);
320
321	memset(addr, 0, sizeof(struct sockaddr_pn));
322	addr->sa_family = AF_PHONET;
323	if (!peer) /* Race with bind() here is userland's problem. */
324		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
325					pn->sobject);
326
327	return sizeof(struct sockaddr_pn);
328}
329
330static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
331					poll_table *wait)
332{
333	struct sock *sk = sock->sk;
334	struct pep_sock *pn = pep_sk(sk);
335	__poll_t mask = 0;
336
337	poll_wait(file, sk_sleep(sk), wait);
338
339	if (sk->sk_state == TCP_CLOSE)
340		return EPOLLERR;
341	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
342		mask |= EPOLLIN | EPOLLRDNORM;
343	if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
344		mask |= EPOLLPRI;
345	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
346		return EPOLLHUP;
347
348	if (sk->sk_state == TCP_ESTABLISHED &&
349		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
350		atomic_read(&pn->tx_credits))
351		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
352
353	return mask;
354}
355
356static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
357				unsigned long arg)
358{
359	struct sock *sk = sock->sk;
360	struct pn_sock *pn = pn_sk(sk);
361
362	if (cmd == SIOCPNGETOBJECT) {
363		struct net_device *dev;
364		u16 handle;
365		u8 saddr;
366
367		if (get_user(handle, (__u16 __user *)arg))
368			return -EFAULT;
369
370		lock_sock(sk);
371		if (sk->sk_bound_dev_if)
372			dev = dev_get_by_index(sock_net(sk),
373						sk->sk_bound_dev_if);
374		else
375			dev = phonet_device_get(sock_net(sk));
376		if (dev && (dev->flags & IFF_UP))
377			saddr = phonet_address_get(dev, pn_addr(handle));
378		else
379			saddr = PN_NO_ADDR;
380		release_sock(sk);
381
382		if (dev)
383			dev_put(dev);
384		if (saddr == PN_NO_ADDR)
385			return -EHOSTUNREACH;
386
387		handle = pn_object(saddr, pn_port(pn->sobject));
388		return put_user(handle, (__u16 __user *)arg);
389	}
390
391	return sk->sk_prot->ioctl(sk, cmd, arg);
392}
393
394static int pn_socket_listen(struct socket *sock, int backlog)
395{
396	struct sock *sk = sock->sk;
397	int err = 0;
398
399	if (pn_socket_autobind(sock))
400		return -ENOBUFS;
401
402	lock_sock(sk);
403	if (sock->state != SS_UNCONNECTED) {
404		err = -EINVAL;
405		goto out;
406	}
407
408	if (sk->sk_state != TCP_LISTEN) {
409		sk->sk_state = TCP_LISTEN;
410		sk->sk_ack_backlog = 0;
411	}
412	sk->sk_max_ack_backlog = backlog;
413out:
414	release_sock(sk);
415	return err;
416}
417
418static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
419			     size_t total_len)
420{
421	struct sock *sk = sock->sk;
422
423	if (pn_socket_autobind(sock))
424		return -EAGAIN;
425
426	return sk->sk_prot->sendmsg(sk, m, total_len);
427}
428
429const struct proto_ops phonet_dgram_ops = {
430	.family		= AF_PHONET,
431	.owner		= THIS_MODULE,
432	.release	= pn_socket_release,
433	.bind		= pn_socket_bind,
434	.connect	= sock_no_connect,
435	.socketpair	= sock_no_socketpair,
436	.accept		= sock_no_accept,
437	.getname	= pn_socket_getname,
438	.poll		= datagram_poll,
439	.ioctl		= pn_socket_ioctl,
440	.listen		= sock_no_listen,
441	.shutdown	= sock_no_shutdown,
442	.sendmsg	= pn_socket_sendmsg,
443	.recvmsg	= sock_common_recvmsg,
444	.mmap		= sock_no_mmap,
445	.sendpage	= sock_no_sendpage,
446};
447
448const struct proto_ops phonet_stream_ops = {
449	.family		= AF_PHONET,
450	.owner		= THIS_MODULE,
451	.release	= pn_socket_release,
452	.bind		= pn_socket_bind,
453	.connect	= pn_socket_connect,
454	.socketpair	= sock_no_socketpair,
455	.accept		= pn_socket_accept,
456	.getname	= pn_socket_getname,
457	.poll		= pn_socket_poll,
458	.ioctl		= pn_socket_ioctl,
459	.listen		= pn_socket_listen,
460	.shutdown	= sock_no_shutdown,
461	.setsockopt	= sock_common_setsockopt,
462	.getsockopt	= sock_common_getsockopt,
463	.sendmsg	= pn_socket_sendmsg,
464	.recvmsg	= sock_common_recvmsg,
465	.mmap		= sock_no_mmap,
466	.sendpage	= sock_no_sendpage,
467};
468EXPORT_SYMBOL(phonet_stream_ops);
469
470/* allocate port for a socket */
471int pn_sock_get_port(struct sock *sk, unsigned short sport)
472{
473	static int port_cur;
474	struct net *net = sock_net(sk);
475	struct pn_sock *pn = pn_sk(sk);
476	struct sockaddr_pn try_sa;
477	struct sock *tmpsk;
478
479	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
480	try_sa.spn_family = AF_PHONET;
481	WARN_ON(!mutex_is_locked(&port_mutex));
482	if (!sport) {
483		/* search free port */
484		int port, pmin, pmax;
485
486		phonet_get_local_port_range(&pmin, &pmax);
487		for (port = pmin; port <= pmax; port++) {
488			port_cur++;
489			if (port_cur < pmin || port_cur > pmax)
490				port_cur = pmin;
491
492			pn_sockaddr_set_port(&try_sa, port_cur);
493			tmpsk = pn_find_sock_by_sa(net, &try_sa);
494			if (tmpsk == NULL) {
495				sport = port_cur;
496				goto found;
497			} else
498				sock_put(tmpsk);
499		}
500	} else {
501		/* try to find specific port */
502		pn_sockaddr_set_port(&try_sa, sport);
503		tmpsk = pn_find_sock_by_sa(net, &try_sa);
504		if (tmpsk == NULL)
505			/* No sock there! We can use that port... */
506			goto found;
507		else
508			sock_put(tmpsk);
509	}
510	/* the port must be in use already */
511	return -EADDRINUSE;
512
513found:
514	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
515	return 0;
516}
517EXPORT_SYMBOL(pn_sock_get_port);
518
519#ifdef CONFIG_PROC_FS
520static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
521{
522	struct net *net = seq_file_net(seq);
523	struct hlist_head *hlist = pnsocks.hlist;
524	struct sock *sknode;
525	unsigned int h;
526
527	for (h = 0; h < PN_HASHSIZE; h++) {
528		sk_for_each_rcu(sknode, hlist) {
529			if (!net_eq(net, sock_net(sknode)))
530				continue;
531			if (!pos)
532				return sknode;
533			pos--;
534		}
535		hlist++;
536	}
537	return NULL;
538}
539
540static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
541{
542	struct net *net = seq_file_net(seq);
543
544	do
545		sk = sk_next(sk);
546	while (sk && !net_eq(net, sock_net(sk)));
547
548	return sk;
549}
550
551static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
552	__acquires(rcu)
553{
554	rcu_read_lock();
555	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
556}
557
558static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
559{
560	struct sock *sk;
561
562	if (v == SEQ_START_TOKEN)
563		sk = pn_sock_get_idx(seq, 0);
564	else
565		sk = pn_sock_get_next(seq, v);
566	(*pos)++;
567	return sk;
568}
569
570static void pn_sock_seq_stop(struct seq_file *seq, void *v)
571	__releases(rcu)
572{
573	rcu_read_unlock();
574}
575
576static int pn_sock_seq_show(struct seq_file *seq, void *v)
577{
578	seq_setwidth(seq, 127);
579	if (v == SEQ_START_TOKEN)
580		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
581			"  uid inode ref pointer drops");
582	else {
583		struct sock *sk = v;
584		struct pn_sock *pn = pn_sk(sk);
585
586		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
587			"%d %pK %u",
588			sk->sk_protocol, pn->sobject, pn->dobject,
589			pn->resource, sk->sk_state,
590			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
591			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
592			sock_i_ino(sk),
593			refcount_read(&sk->sk_refcnt), sk,
594			atomic_read(&sk->sk_drops));
595	}
596	seq_pad(seq, '\n');
597	return 0;
598}
599
600const struct seq_operations pn_sock_seq_ops = {
601	.start = pn_sock_seq_start,
602	.next = pn_sock_seq_next,
603	.stop = pn_sock_seq_stop,
604	.show = pn_sock_seq_show,
605};
606#endif
607
608static struct  {
609	struct sock *sk[256];
610} pnres;
611
612/*
613 * Find and hold socket based on resource.
614 */
615struct sock *pn_find_sock_by_res(struct net *net, u8 res)
616{
617	struct sock *sk;
618
619	if (!net_eq(net, &init_net))
620		return NULL;
621
622	rcu_read_lock();
623	sk = rcu_dereference(pnres.sk[res]);
624	if (sk)
625		sock_hold(sk);
626	rcu_read_unlock();
627	return sk;
628}
629
630static DEFINE_MUTEX(resource_mutex);
631
632int pn_sock_bind_res(struct sock *sk, u8 res)
633{
634	int ret = -EADDRINUSE;
635
636	if (!net_eq(sock_net(sk), &init_net))
637		return -ENOIOCTLCMD;
638	if (!capable(CAP_SYS_ADMIN))
639		return -EPERM;
640	if (pn_socket_autobind(sk->sk_socket))
641		return -EAGAIN;
642
643	mutex_lock(&resource_mutex);
644	if (pnres.sk[res] == NULL) {
645		sock_hold(sk);
646		rcu_assign_pointer(pnres.sk[res], sk);
647		ret = 0;
648	}
649	mutex_unlock(&resource_mutex);
650	return ret;
651}
652
653int pn_sock_unbind_res(struct sock *sk, u8 res)
654{
655	int ret = -ENOENT;
656
657	if (!capable(CAP_SYS_ADMIN))
658		return -EPERM;
659
660	mutex_lock(&resource_mutex);
661	if (pnres.sk[res] == sk) {
662		RCU_INIT_POINTER(pnres.sk[res], NULL);
663		ret = 0;
664	}
665	mutex_unlock(&resource_mutex);
666
667	if (ret == 0) {
668		synchronize_rcu();
669		sock_put(sk);
670	}
671	return ret;
672}
673
674void pn_sock_unbind_all_res(struct sock *sk)
675{
676	unsigned int res, match = 0;
677
678	mutex_lock(&resource_mutex);
679	for (res = 0; res < 256; res++) {
680		if (pnres.sk[res] == sk) {
681			RCU_INIT_POINTER(pnres.sk[res], NULL);
682			match++;
683		}
684	}
685	mutex_unlock(&resource_mutex);
686
687	while (match > 0) {
688		__sock_put(sk);
689		match--;
690	}
691	/* Caller is responsible for RCU sync before final sock_put() */
692}
693
694#ifdef CONFIG_PROC_FS
695static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
696{
697	struct net *net = seq_file_net(seq);
698	unsigned int i;
699
700	if (!net_eq(net, &init_net))
701		return NULL;
702
703	for (i = 0; i < 256; i++) {
704		if (pnres.sk[i] == NULL)
705			continue;
706		if (!pos)
707			return pnres.sk + i;
708		pos--;
709	}
710	return NULL;
711}
712
713static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
714{
715	struct net *net = seq_file_net(seq);
716	unsigned int i;
717
718	BUG_ON(!net_eq(net, &init_net));
719
720	for (i = (sk - pnres.sk) + 1; i < 256; i++)
721		if (pnres.sk[i])
722			return pnres.sk + i;
723	return NULL;
724}
725
726static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
727	__acquires(resource_mutex)
728{
729	mutex_lock(&resource_mutex);
730	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
731}
732
733static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
734{
735	struct sock **sk;
736
737	if (v == SEQ_START_TOKEN)
738		sk = pn_res_get_idx(seq, 0);
739	else
740		sk = pn_res_get_next(seq, v);
741	(*pos)++;
742	return sk;
743}
744
745static void pn_res_seq_stop(struct seq_file *seq, void *v)
746	__releases(resource_mutex)
747{
748	mutex_unlock(&resource_mutex);
749}
750
751static int pn_res_seq_show(struct seq_file *seq, void *v)
752{
753	seq_setwidth(seq, 63);
754	if (v == SEQ_START_TOKEN)
755		seq_puts(seq, "rs   uid inode");
756	else {
757		struct sock **psk = v;
758		struct sock *sk = *psk;
759
760		seq_printf(seq, "%02X %5u %lu",
761			   (int) (psk - pnres.sk),
762			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
763			   sock_i_ino(sk));
764	}
765	seq_pad(seq, '\n');
766	return 0;
767}
768
769const struct seq_operations pn_res_seq_ops = {
770	.start = pn_res_seq_start,
771	.next = pn_res_seq_next,
772	.stop = pn_res_seq_stop,
773	.show = pn_res_seq_show,
774};
775#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * File: socket.c
  4 *
  5 * Phonet sockets
  6 *
  7 * Copyright (C) 2008 Nokia Corporation.
  8 *
  9 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
 10 *          RĂ©mi Denis-Courmont
 11 */
 12
 13#include <linux/gfp.h>
 14#include <linux/kernel.h>
 15#include <linux/net.h>
 16#include <linux/poll.h>
 17#include <linux/sched/signal.h>
 18
 19#include <net/sock.h>
 20#include <net/tcp_states.h>
 21
 22#include <linux/phonet.h>
 23#include <linux/export.h>
 24#include <net/phonet/phonet.h>
 25#include <net/phonet/pep.h>
 26#include <net/phonet/pn_dev.h>
 27
 28static int pn_socket_release(struct socket *sock)
 29{
 30	struct sock *sk = sock->sk;
 31
 32	if (sk) {
 33		sock->sk = NULL;
 34		sk->sk_prot->close(sk, 0);
 35	}
 36	return 0;
 37}
 38
 39#define PN_HASHSIZE	16
 40#define PN_HASHMASK	(PN_HASHSIZE-1)
 41
 42
 43static struct  {
 44	struct hlist_head hlist[PN_HASHSIZE];
 45	struct mutex lock;
 46} pnsocks;
 47
 48void __init pn_sock_init(void)
 49{
 50	unsigned int i;
 51
 52	for (i = 0; i < PN_HASHSIZE; i++)
 53		INIT_HLIST_HEAD(pnsocks.hlist + i);
 54	mutex_init(&pnsocks.lock);
 55}
 56
 57static struct hlist_head *pn_hash_list(u16 obj)
 58{
 59	return pnsocks.hlist + (obj & PN_HASHMASK);
 60}
 61
 62/*
 63 * Find address based on socket address, match only certain fields.
 64 * Also grab sock if it was found. Remember to sock_put it later.
 65 */
 66struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 67{
 68	struct sock *sknode;
 69	struct sock *rval = NULL;
 70	u16 obj = pn_sockaddr_get_object(spn);
 71	u8 res = spn->spn_resource;
 72	struct hlist_head *hlist = pn_hash_list(obj);
 73
 74	rcu_read_lock();
 75	sk_for_each_rcu(sknode, hlist) {
 76		struct pn_sock *pn = pn_sk(sknode);
 77		BUG_ON(!pn->sobject); /* unbound socket */
 78
 79		if (!net_eq(sock_net(sknode), net))
 80			continue;
 81		if (pn_port(obj)) {
 82			/* Look up socket by port */
 83			if (pn_port(pn->sobject) != pn_port(obj))
 84				continue;
 85		} else {
 86			/* If port is zero, look up by resource */
 87			if (pn->resource != res)
 88				continue;
 89		}
 90		if (pn_addr(pn->sobject) &&
 91		    pn_addr(pn->sobject) != pn_addr(obj))
 92			continue;
 93
 94		rval = sknode;
 95		sock_hold(sknode);
 96		break;
 97	}
 98	rcu_read_unlock();
 99
100	return rval;
101}
102
103/* Deliver a broadcast packet (only in bottom-half) */
104void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
105{
106	struct hlist_head *hlist = pnsocks.hlist;
107	unsigned int h;
108
109	rcu_read_lock();
110	for (h = 0; h < PN_HASHSIZE; h++) {
111		struct sock *sknode;
112
113		sk_for_each(sknode, hlist) {
114			struct sk_buff *clone;
115
116			if (!net_eq(sock_net(sknode), net))
117				continue;
118			if (!sock_flag(sknode, SOCK_BROADCAST))
119				continue;
120
121			clone = skb_clone(skb, GFP_ATOMIC);
122			if (clone) {
123				sock_hold(sknode);
124				sk_receive_skb(sknode, clone, 0);
125			}
126		}
127		hlist++;
128	}
129	rcu_read_unlock();
130}
131
132int pn_sock_hash(struct sock *sk)
133{
134	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
135
136	mutex_lock(&pnsocks.lock);
137	sk_add_node_rcu(sk, hlist);
138	mutex_unlock(&pnsocks.lock);
139
140	return 0;
141}
142EXPORT_SYMBOL(pn_sock_hash);
143
144void pn_sock_unhash(struct sock *sk)
145{
146	mutex_lock(&pnsocks.lock);
147	sk_del_node_init_rcu(sk);
148	mutex_unlock(&pnsocks.lock);
149	pn_sock_unbind_all_res(sk);
150	synchronize_rcu();
151}
152EXPORT_SYMBOL(pn_sock_unhash);
153
154static DEFINE_MUTEX(port_mutex);
155
156static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
157{
158	struct sock *sk = sock->sk;
159	struct pn_sock *pn = pn_sk(sk);
160	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
161	int err;
162	u16 handle;
163	u8 saddr;
164
165	if (sk->sk_prot->bind)
166		return sk->sk_prot->bind(sk, addr, len);
167
168	if (len < sizeof(struct sockaddr_pn))
169		return -EINVAL;
170	if (spn->spn_family != AF_PHONET)
171		return -EAFNOSUPPORT;
172
173	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
174	saddr = pn_addr(handle);
175	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
176		return -EADDRNOTAVAIL;
177
178	lock_sock(sk);
179	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
180		err = -EINVAL; /* attempt to rebind */
181		goto out;
182	}
183	WARN_ON(sk_hashed(sk));
184	mutex_lock(&port_mutex);
185	err = sk->sk_prot->get_port(sk, pn_port(handle));
186	if (err)
187		goto out_port;
188
189	/* get_port() sets the port, bind() sets the address if applicable */
190	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
191	pn->resource = spn->spn_resource;
192
193	/* Enable RX on the socket */
194	err = sk->sk_prot->hash(sk);
195out_port:
196	mutex_unlock(&port_mutex);
197out:
198	release_sock(sk);
199	return err;
200}
201
202static int pn_socket_autobind(struct socket *sock)
203{
204	struct sockaddr_pn sa;
205	int err;
206
207	memset(&sa, 0, sizeof(sa));
208	sa.spn_family = AF_PHONET;
209	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
210				sizeof(struct sockaddr_pn));
211	if (err != -EINVAL)
212		return err;
213	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
214	return 0; /* socket was already bound */
215}
216
217static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
218		int len, int flags)
219{
220	struct sock *sk = sock->sk;
221	struct pn_sock *pn = pn_sk(sk);
222	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
223	struct task_struct *tsk = current;
224	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
225	int err;
226
227	if (pn_socket_autobind(sock))
228		return -ENOBUFS;
229	if (len < sizeof(struct sockaddr_pn))
230		return -EINVAL;
231	if (spn->spn_family != AF_PHONET)
232		return -EAFNOSUPPORT;
233
234	lock_sock(sk);
235
236	switch (sock->state) {
237	case SS_UNCONNECTED:
238		if (sk->sk_state != TCP_CLOSE) {
239			err = -EISCONN;
240			goto out;
241		}
242		break;
243	case SS_CONNECTING:
244		err = -EALREADY;
245		goto out;
246	default:
247		err = -EISCONN;
248		goto out;
249	}
250
251	pn->dobject = pn_sockaddr_get_object(spn);
252	pn->resource = pn_sockaddr_get_resource(spn);
253	sock->state = SS_CONNECTING;
254
255	err = sk->sk_prot->connect(sk, addr, len);
256	if (err) {
257		sock->state = SS_UNCONNECTED;
258		pn->dobject = 0;
259		goto out;
260	}
261
262	while (sk->sk_state == TCP_SYN_SENT) {
263		DEFINE_WAIT(wait);
264
265		if (!timeo) {
266			err = -EINPROGRESS;
267			goto out;
268		}
269		if (signal_pending(tsk)) {
270			err = sock_intr_errno(timeo);
271			goto out;
272		}
273
274		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
275						TASK_INTERRUPTIBLE);
276		release_sock(sk);
277		timeo = schedule_timeout(timeo);
278		lock_sock(sk);
279		finish_wait(sk_sleep(sk), &wait);
280	}
281
282	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
283		err = 0;
284	else if (sk->sk_state == TCP_CLOSE_WAIT)
285		err = -ECONNRESET;
286	else
287		err = -ECONNREFUSED;
288	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
289out:
290	release_sock(sk);
291	return err;
292}
293
294static int pn_socket_accept(struct socket *sock, struct socket *newsock,
295			    struct proto_accept_arg *arg)
296{
297	struct sock *sk = sock->sk;
298	struct sock *newsk;
 
299
300	if (unlikely(sk->sk_state != TCP_LISTEN))
301		return -EINVAL;
302
303	newsk = sk->sk_prot->accept(sk, arg);
304	if (!newsk)
305		return arg->err;
306
307	lock_sock(newsk);
308	sock_graft(newsk, newsock);
309	newsock->state = SS_CONNECTED;
310	release_sock(newsk);
311	return 0;
312}
313
314static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
315				int peer)
316{
317	struct sock *sk = sock->sk;
318	struct pn_sock *pn = pn_sk(sk);
319
320	memset(addr, 0, sizeof(struct sockaddr_pn));
321	addr->sa_family = AF_PHONET;
322	if (!peer) /* Race with bind() here is userland's problem. */
323		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
324					pn->sobject);
325
326	return sizeof(struct sockaddr_pn);
327}
328
329static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
330					poll_table *wait)
331{
332	struct sock *sk = sock->sk;
333	struct pep_sock *pn = pep_sk(sk);
334	__poll_t mask = 0;
335
336	poll_wait(file, sk_sleep(sk), wait);
337
338	if (sk->sk_state == TCP_CLOSE)
339		return EPOLLERR;
340	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
341		mask |= EPOLLIN | EPOLLRDNORM;
342	if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
343		mask |= EPOLLPRI;
344	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
345		return EPOLLHUP;
346
347	if (sk->sk_state == TCP_ESTABLISHED &&
348		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
349		atomic_read(&pn->tx_credits))
350		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
351
352	return mask;
353}
354
355static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
356				unsigned long arg)
357{
358	struct sock *sk = sock->sk;
359	struct pn_sock *pn = pn_sk(sk);
360
361	if (cmd == SIOCPNGETOBJECT) {
362		struct net_device *dev;
363		u16 handle;
364		u8 saddr;
365
366		if (get_user(handle, (__u16 __user *)arg))
367			return -EFAULT;
368
369		lock_sock(sk);
370		if (sk->sk_bound_dev_if)
371			dev = dev_get_by_index(sock_net(sk),
372						sk->sk_bound_dev_if);
373		else
374			dev = phonet_device_get(sock_net(sk));
375		if (dev && (dev->flags & IFF_UP))
376			saddr = phonet_address_get(dev, pn_addr(handle));
377		else
378			saddr = PN_NO_ADDR;
379		release_sock(sk);
380
381		dev_put(dev);
 
382		if (saddr == PN_NO_ADDR)
383			return -EHOSTUNREACH;
384
385		handle = pn_object(saddr, pn_port(pn->sobject));
386		return put_user(handle, (__u16 __user *)arg);
387	}
388
389	return sk_ioctl(sk, cmd, (void __user *)arg);
390}
391
392static int pn_socket_listen(struct socket *sock, int backlog)
393{
394	struct sock *sk = sock->sk;
395	int err = 0;
396
397	if (pn_socket_autobind(sock))
398		return -ENOBUFS;
399
400	lock_sock(sk);
401	if (sock->state != SS_UNCONNECTED) {
402		err = -EINVAL;
403		goto out;
404	}
405
406	if (sk->sk_state != TCP_LISTEN) {
407		sk->sk_state = TCP_LISTEN;
408		sk->sk_ack_backlog = 0;
409	}
410	sk->sk_max_ack_backlog = backlog;
411out:
412	release_sock(sk);
413	return err;
414}
415
416static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
417			     size_t total_len)
418{
419	struct sock *sk = sock->sk;
420
421	if (pn_socket_autobind(sock))
422		return -EAGAIN;
423
424	return sk->sk_prot->sendmsg(sk, m, total_len);
425}
426
427const struct proto_ops phonet_dgram_ops = {
428	.family		= AF_PHONET,
429	.owner		= THIS_MODULE,
430	.release	= pn_socket_release,
431	.bind		= pn_socket_bind,
432	.connect	= sock_no_connect,
433	.socketpair	= sock_no_socketpair,
434	.accept		= sock_no_accept,
435	.getname	= pn_socket_getname,
436	.poll		= datagram_poll,
437	.ioctl		= pn_socket_ioctl,
438	.listen		= sock_no_listen,
439	.shutdown	= sock_no_shutdown,
440	.sendmsg	= pn_socket_sendmsg,
441	.recvmsg	= sock_common_recvmsg,
442	.mmap		= sock_no_mmap,
 
443};
444
445const struct proto_ops phonet_stream_ops = {
446	.family		= AF_PHONET,
447	.owner		= THIS_MODULE,
448	.release	= pn_socket_release,
449	.bind		= pn_socket_bind,
450	.connect	= pn_socket_connect,
451	.socketpair	= sock_no_socketpair,
452	.accept		= pn_socket_accept,
453	.getname	= pn_socket_getname,
454	.poll		= pn_socket_poll,
455	.ioctl		= pn_socket_ioctl,
456	.listen		= pn_socket_listen,
457	.shutdown	= sock_no_shutdown,
458	.setsockopt	= sock_common_setsockopt,
459	.getsockopt	= sock_common_getsockopt,
460	.sendmsg	= pn_socket_sendmsg,
461	.recvmsg	= sock_common_recvmsg,
462	.mmap		= sock_no_mmap,
 
463};
464EXPORT_SYMBOL(phonet_stream_ops);
465
466/* allocate port for a socket */
467int pn_sock_get_port(struct sock *sk, unsigned short sport)
468{
469	static int port_cur;
470	struct net *net = sock_net(sk);
471	struct pn_sock *pn = pn_sk(sk);
472	struct sockaddr_pn try_sa;
473	struct sock *tmpsk;
474
475	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
476	try_sa.spn_family = AF_PHONET;
477	WARN_ON(!mutex_is_locked(&port_mutex));
478	if (!sport) {
479		/* search free port */
480		int port, pmin, pmax;
481
482		phonet_get_local_port_range(&pmin, &pmax);
483		for (port = pmin; port <= pmax; port++) {
484			port_cur++;
485			if (port_cur < pmin || port_cur > pmax)
486				port_cur = pmin;
487
488			pn_sockaddr_set_port(&try_sa, port_cur);
489			tmpsk = pn_find_sock_by_sa(net, &try_sa);
490			if (tmpsk == NULL) {
491				sport = port_cur;
492				goto found;
493			} else
494				sock_put(tmpsk);
495		}
496	} else {
497		/* try to find specific port */
498		pn_sockaddr_set_port(&try_sa, sport);
499		tmpsk = pn_find_sock_by_sa(net, &try_sa);
500		if (tmpsk == NULL)
501			/* No sock there! We can use that port... */
502			goto found;
503		else
504			sock_put(tmpsk);
505	}
506	/* the port must be in use already */
507	return -EADDRINUSE;
508
509found:
510	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
511	return 0;
512}
513EXPORT_SYMBOL(pn_sock_get_port);
514
515#ifdef CONFIG_PROC_FS
516static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
517{
518	struct net *net = seq_file_net(seq);
519	struct hlist_head *hlist = pnsocks.hlist;
520	struct sock *sknode;
521	unsigned int h;
522
523	for (h = 0; h < PN_HASHSIZE; h++) {
524		sk_for_each_rcu(sknode, hlist) {
525			if (!net_eq(net, sock_net(sknode)))
526				continue;
527			if (!pos)
528				return sknode;
529			pos--;
530		}
531		hlist++;
532	}
533	return NULL;
534}
535
536static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
537{
538	struct net *net = seq_file_net(seq);
539
540	do
541		sk = sk_next(sk);
542	while (sk && !net_eq(net, sock_net(sk)));
543
544	return sk;
545}
546
547static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
548	__acquires(rcu)
549{
550	rcu_read_lock();
551	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
552}
553
554static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
555{
556	struct sock *sk;
557
558	if (v == SEQ_START_TOKEN)
559		sk = pn_sock_get_idx(seq, 0);
560	else
561		sk = pn_sock_get_next(seq, v);
562	(*pos)++;
563	return sk;
564}
565
566static void pn_sock_seq_stop(struct seq_file *seq, void *v)
567	__releases(rcu)
568{
569	rcu_read_unlock();
570}
571
572static int pn_sock_seq_show(struct seq_file *seq, void *v)
573{
574	seq_setwidth(seq, 127);
575	if (v == SEQ_START_TOKEN)
576		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
577			"  uid inode ref pointer drops");
578	else {
579		struct sock *sk = v;
580		struct pn_sock *pn = pn_sk(sk);
581
582		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
583			"%d %pK %u",
584			sk->sk_protocol, pn->sobject, pn->dobject,
585			pn->resource, sk->sk_state,
586			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
587			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
588			sock_i_ino(sk),
589			refcount_read(&sk->sk_refcnt), sk,
590			atomic_read(&sk->sk_drops));
591	}
592	seq_pad(seq, '\n');
593	return 0;
594}
595
596const struct seq_operations pn_sock_seq_ops = {
597	.start = pn_sock_seq_start,
598	.next = pn_sock_seq_next,
599	.stop = pn_sock_seq_stop,
600	.show = pn_sock_seq_show,
601};
602#endif
603
604static struct  {
605	struct sock *sk[256];
606} pnres;
607
608/*
609 * Find and hold socket based on resource.
610 */
611struct sock *pn_find_sock_by_res(struct net *net, u8 res)
612{
613	struct sock *sk;
614
615	if (!net_eq(net, &init_net))
616		return NULL;
617
618	rcu_read_lock();
619	sk = rcu_dereference(pnres.sk[res]);
620	if (sk)
621		sock_hold(sk);
622	rcu_read_unlock();
623	return sk;
624}
625
626static DEFINE_MUTEX(resource_mutex);
627
628int pn_sock_bind_res(struct sock *sk, u8 res)
629{
630	int ret = -EADDRINUSE;
631
632	if (!net_eq(sock_net(sk), &init_net))
633		return -ENOIOCTLCMD;
634	if (!capable(CAP_SYS_ADMIN))
635		return -EPERM;
636	if (pn_socket_autobind(sk->sk_socket))
637		return -EAGAIN;
638
639	mutex_lock(&resource_mutex);
640	if (pnres.sk[res] == NULL) {
641		sock_hold(sk);
642		rcu_assign_pointer(pnres.sk[res], sk);
643		ret = 0;
644	}
645	mutex_unlock(&resource_mutex);
646	return ret;
647}
648
649int pn_sock_unbind_res(struct sock *sk, u8 res)
650{
651	int ret = -ENOENT;
652
653	if (!capable(CAP_SYS_ADMIN))
654		return -EPERM;
655
656	mutex_lock(&resource_mutex);
657	if (pnres.sk[res] == sk) {
658		RCU_INIT_POINTER(pnres.sk[res], NULL);
659		ret = 0;
660	}
661	mutex_unlock(&resource_mutex);
662
663	if (ret == 0) {
664		synchronize_rcu();
665		sock_put(sk);
666	}
667	return ret;
668}
669
670void pn_sock_unbind_all_res(struct sock *sk)
671{
672	unsigned int res, match = 0;
673
674	mutex_lock(&resource_mutex);
675	for (res = 0; res < 256; res++) {
676		if (pnres.sk[res] == sk) {
677			RCU_INIT_POINTER(pnres.sk[res], NULL);
678			match++;
679		}
680	}
681	mutex_unlock(&resource_mutex);
682
683	while (match > 0) {
684		__sock_put(sk);
685		match--;
686	}
687	/* Caller is responsible for RCU sync before final sock_put() */
688}
689
690#ifdef CONFIG_PROC_FS
691static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
692{
693	struct net *net = seq_file_net(seq);
694	unsigned int i;
695
696	if (!net_eq(net, &init_net))
697		return NULL;
698
699	for (i = 0; i < 256; i++) {
700		if (pnres.sk[i] == NULL)
701			continue;
702		if (!pos)
703			return pnres.sk + i;
704		pos--;
705	}
706	return NULL;
707}
708
709static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
710{
711	struct net *net = seq_file_net(seq);
712	unsigned int i;
713
714	BUG_ON(!net_eq(net, &init_net));
715
716	for (i = (sk - pnres.sk) + 1; i < 256; i++)
717		if (pnres.sk[i])
718			return pnres.sk + i;
719	return NULL;
720}
721
722static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
723	__acquires(resource_mutex)
724{
725	mutex_lock(&resource_mutex);
726	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
727}
728
729static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
730{
731	struct sock **sk;
732
733	if (v == SEQ_START_TOKEN)
734		sk = pn_res_get_idx(seq, 0);
735	else
736		sk = pn_res_get_next(seq, v);
737	(*pos)++;
738	return sk;
739}
740
741static void pn_res_seq_stop(struct seq_file *seq, void *v)
742	__releases(resource_mutex)
743{
744	mutex_unlock(&resource_mutex);
745}
746
747static int pn_res_seq_show(struct seq_file *seq, void *v)
748{
749	seq_setwidth(seq, 63);
750	if (v == SEQ_START_TOKEN)
751		seq_puts(seq, "rs   uid inode");
752	else {
753		struct sock **psk = v;
754		struct sock *sk = *psk;
755
756		seq_printf(seq, "%02X %5u %lu",
757			   (int) (psk - pnres.sk),
758			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
759			   sock_i_ino(sk));
760	}
761	seq_pad(seq, '\n');
762	return 0;
763}
764
765const struct seq_operations pn_res_seq_ops = {
766	.start = pn_res_seq_start,
767	.next = pn_res_seq_next,
768	.stop = pn_res_seq_stop,
769	.show = pn_res_seq_show,
770};
771#endif