Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * File: pep.c
   3 *
   4 * Phonet pipe protocol end point socket
   5 *
   6 * Copyright (C) 2008 Nokia Corporation.
   7 *
   8 * Author: RĂ©mi Denis-Courmont
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * version 2 as published by the Free Software Foundation.
  13 *
  14 * This program is distributed in the hope that it will be useful, but
  15 * WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17 * General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  22 * 02110-1301 USA
  23 */
  24
  25#include <linux/kernel.h>
  26#include <linux/slab.h>
  27#include <linux/socket.h>
  28#include <net/sock.h>
  29#include <net/tcp_states.h>
  30#include <asm/ioctls.h>
  31
  32#include <linux/phonet.h>
  33#include <linux/module.h>
  34#include <net/phonet/phonet.h>
  35#include <net/phonet/pep.h>
  36#include <net/phonet/gprs.h>
  37
  38/* sk_state values:
  39 * TCP_CLOSE		sock not in use yet
  40 * TCP_CLOSE_WAIT	disconnected pipe
  41 * TCP_LISTEN		listening pipe endpoint
  42 * TCP_SYN_RECV		connected pipe in disabled state
  43 * TCP_ESTABLISHED	connected pipe in enabled state
  44 *
  45 * pep_sock locking:
  46 *  - sk_state, hlist: sock lock needed
  47 *  - listener: read only
  48 *  - pipe_handle: read only
  49 */
  50
  51#define CREDITS_MAX	10
  52#define CREDITS_THR	7
  53
  54#define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  55
  56/* Get the next TLV sub-block. */
  57static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  58					void *buf)
  59{
  60	void *data = NULL;
  61	struct {
  62		u8 sb_type;
  63		u8 sb_len;
  64	} *ph, h;
  65	int buflen = *plen;
  66
  67	ph = skb_header_pointer(skb, 0, 2, &h);
  68	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  69		return NULL;
  70	ph->sb_len -= 2;
  71	*ptype = ph->sb_type;
  72	*plen = ph->sb_len;
  73
  74	if (buflen > ph->sb_len)
  75		buflen = ph->sb_len;
  76	data = skb_header_pointer(skb, 2, buflen, buf);
  77	__skb_pull(skb, 2 + ph->sb_len);
  78	return data;
  79}
  80
  81static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
  82					int len, gfp_t priority)
  83{
  84	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  85	if (!skb)
  86		return NULL;
  87	skb_set_owner_w(skb, sk);
  88
  89	skb_reserve(skb, MAX_PNPIPE_HEADER);
  90	__skb_put(skb, len);
  91	skb_copy_to_linear_data(skb, payload, len);
  92	__skb_push(skb, sizeof(struct pnpipehdr));
  93	skb_reset_transport_header(skb);
  94	return skb;
  95}
  96
  97static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
  98			const void *data, int len, gfp_t priority)
  99{
 100	const struct pnpipehdr *oph = pnp_hdr(oskb);
 101	struct pnpipehdr *ph;
 102	struct sk_buff *skb;
 103	struct sockaddr_pn peer;
 104
 105	skb = pep_alloc_skb(sk, data, len, priority);
 106	if (!skb)
 107		return -ENOMEM;
 108
 109	ph = pnp_hdr(skb);
 110	ph->utid = oph->utid;
 111	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
 112	ph->pipe_handle = oph->pipe_handle;
 113	ph->error_code = code;
 114
 115	pn_skb_get_src_sockaddr(oskb, &peer);
 116	return pn_skb_send(sk, skb, &peer);
 117}
 118
 119static int pep_indicate(struct sock *sk, u8 id, u8 code,
 120			const void *data, int len, gfp_t priority)
 121{
 122	struct pep_sock *pn = pep_sk(sk);
 123	struct pnpipehdr *ph;
 124	struct sk_buff *skb;
 125
 126	skb = pep_alloc_skb(sk, data, len, priority);
 127	if (!skb)
 128		return -ENOMEM;
 129
 130	ph = pnp_hdr(skb);
 131	ph->utid = 0;
 132	ph->message_id = id;
 133	ph->pipe_handle = pn->pipe_handle;
 134	ph->data[0] = code;
 135	return pn_skb_send(sk, skb, NULL);
 136}
 137
 138#define PAD 0x00
 139
 140static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
 141				const void *data, int len)
 142{
 143	struct pep_sock *pn = pep_sk(sk);
 144	struct pnpipehdr *ph;
 145	struct sk_buff *skb;
 146
 147	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
 148	if (!skb)
 149		return -ENOMEM;
 150
 151	ph = pnp_hdr(skb);
 152	ph->utid = id; /* whatever */
 153	ph->message_id = id;
 154	ph->pipe_handle = pn->pipe_handle;
 155	ph->data[0] = code;
 156	return pn_skb_send(sk, skb, NULL);
 157}
 158
 159static int pipe_handler_send_created_ind(struct sock *sk)
 160{
 161	struct pep_sock *pn = pep_sk(sk);
 162	u8 data[4] = {
 163		PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
 164		pn->tx_fc, pn->rx_fc,
 165	};
 166
 167	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
 168				data, 4, GFP_ATOMIC);
 169}
 170
 171static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 172{
 173	static const u8 data[20] = {
 174		PAD, PAD, PAD, 2 /* sub-blocks */,
 175		PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
 176			PN_MULTI_CREDIT_FLOW_CONTROL,
 177			PN_ONE_CREDIT_FLOW_CONTROL,
 178			PN_LEGACY_FLOW_CONTROL,
 179			PAD,
 180		PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
 181			PN_MULTI_CREDIT_FLOW_CONTROL,
 182			PN_ONE_CREDIT_FLOW_CONTROL,
 183			PN_LEGACY_FLOW_CONTROL,
 184			PAD,
 185	};
 186
 187	might_sleep();
 188	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
 189				GFP_KERNEL);
 190}
 191
 192static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
 193				gfp_t priority)
 194{
 195	static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
 196	WARN_ON(code == PN_PIPE_NO_ERROR);
 197	return pep_reply(sk, skb, code, data, sizeof(data), priority);
 198}
 199
 200/* Control requests are not sent by the pipe service and have a specific
 201 * message format. */
 202static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 203				gfp_t priority)
 204{
 205	const struct pnpipehdr *oph = pnp_hdr(oskb);
 206	struct sk_buff *skb;
 207	struct pnpipehdr *ph;
 208	struct sockaddr_pn dst;
 209	u8 data[4] = {
 210		oph->data[0], /* PEP type */
 211		code, /* error code, at an unusual offset */
 212		PAD, PAD,
 213	};
 214
 215	skb = pep_alloc_skb(sk, data, 4, priority);
 216	if (!skb)
 217		return -ENOMEM;
 218
 219	ph = pnp_hdr(skb);
 220	ph->utid = oph->utid;
 221	ph->message_id = PNS_PEP_CTRL_RESP;
 222	ph->pipe_handle = oph->pipe_handle;
 223	ph->data[0] = oph->data[1]; /* CTRL id */
 224
 225	pn_skb_get_src_sockaddr(oskb, &dst);
 226	return pn_skb_send(sk, skb, &dst);
 227}
 228
 229static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 230{
 231	u8 data[4] = { type, PAD, PAD, status };
 232
 233	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
 234				data, 4, priority);
 235}
 236
 237/* Send our RX flow control information to the sender.
 238 * Socket must be locked. */
 239static void pipe_grant_credits(struct sock *sk, gfp_t priority)
 240{
 241	struct pep_sock *pn = pep_sk(sk);
 242
 243	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
 244
 245	switch (pn->rx_fc) {
 246	case PN_LEGACY_FLOW_CONTROL: /* TODO */
 247		break;
 248	case PN_ONE_CREDIT_FLOW_CONTROL:
 249		if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
 250					PEP_IND_READY, priority) == 0)
 251			pn->rx_credits = 1;
 252		break;
 253	case PN_MULTI_CREDIT_FLOW_CONTROL:
 254		if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
 255			break;
 256		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
 257					CREDITS_MAX - pn->rx_credits,
 258					priority) == 0)
 259			pn->rx_credits = CREDITS_MAX;
 260		break;
 261	}
 262}
 263
 264static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 265{
 266	struct pep_sock *pn = pep_sk(sk);
 267	struct pnpipehdr *hdr;
 268	int wake = 0;
 269
 270	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 271		return -EINVAL;
 272
 273	hdr = pnp_hdr(skb);
 274	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
 275		net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
 276				    (unsigned int)hdr->data[0]);
 277		return -EOPNOTSUPP;
 278	}
 279
 280	switch (hdr->data[1]) {
 281	case PN_PEP_IND_FLOW_CONTROL:
 282		switch (pn->tx_fc) {
 283		case PN_LEGACY_FLOW_CONTROL:
 284			switch (hdr->data[4]) {
 285			case PEP_IND_BUSY:
 286				atomic_set(&pn->tx_credits, 0);
 287				break;
 288			case PEP_IND_READY:
 289				atomic_set(&pn->tx_credits, wake = 1);
 290				break;
 291			}
 292			break;
 293		case PN_ONE_CREDIT_FLOW_CONTROL:
 294			if (hdr->data[4] == PEP_IND_READY)
 295				atomic_set(&pn->tx_credits, wake = 1);
 296			break;
 297		}
 298		break;
 299
 300	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
 301		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
 302			break;
 303		atomic_add(wake = hdr->data[4], &pn->tx_credits);
 304		break;
 305
 306	default:
 307		net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
 308				    (unsigned int)hdr->data[1]);
 309		return -EOPNOTSUPP;
 310	}
 311	if (wake)
 312		sk->sk_write_space(sk);
 313	return 0;
 314}
 315
 316static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
 317{
 318	struct pep_sock *pn = pep_sk(sk);
 319	struct pnpipehdr *hdr = pnp_hdr(skb);
 320	u8 n_sb = hdr->data[0];
 321
 322	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 323	__skb_pull(skb, sizeof(*hdr));
 324	while (n_sb > 0) {
 325		u8 type, buf[2], len = sizeof(buf);
 326		u8 *data = pep_get_sb(skb, &type, &len, buf);
 327
 328		if (data == NULL)
 329			return -EINVAL;
 330		switch (type) {
 331		case PN_PIPE_SB_NEGOTIATED_FC:
 332			if (len < 2 || (data[0] | data[1]) > 3)
 333				break;
 334			pn->tx_fc = data[0] & 3;
 335			pn->rx_fc = data[1] & 3;
 336			break;
 337		}
 338		n_sb--;
 339	}
 340	return 0;
 341}
 342
 343/* Queue an skb to a connected sock.
 344 * Socket lock must be held. */
 345static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 346{
 347	struct pep_sock *pn = pep_sk(sk);
 348	struct pnpipehdr *hdr = pnp_hdr(skb);
 349	struct sk_buff_head *queue;
 350	int err = 0;
 351
 352	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
 353
 354	switch (hdr->message_id) {
 355	case PNS_PEP_CONNECT_REQ:
 356		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
 357		break;
 358
 359	case PNS_PEP_DISCONNECT_REQ:
 360		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 361		sk->sk_state = TCP_CLOSE_WAIT;
 362		if (!sock_flag(sk, SOCK_DEAD))
 363			sk->sk_state_change(sk);
 364		break;
 365
 366	case PNS_PEP_ENABLE_REQ:
 367		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
 368		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 369		break;
 370
 371	case PNS_PEP_RESET_REQ:
 372		switch (hdr->state_after_reset) {
 373		case PN_PIPE_DISABLE:
 374			pn->init_enable = 0;
 375			break;
 376		case PN_PIPE_ENABLE:
 377			pn->init_enable = 1;
 378			break;
 379		default: /* not allowed to send an error here!? */
 380			err = -EINVAL;
 381			goto out;
 382		}
 383		/* fall through */
 384	case PNS_PEP_DISABLE_REQ:
 385		atomic_set(&pn->tx_credits, 0);
 386		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 387		break;
 388
 389	case PNS_PEP_CTRL_REQ:
 390		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
 391			atomic_inc(&sk->sk_drops);
 392			break;
 393		}
 394		__skb_pull(skb, 4);
 395		queue = &pn->ctrlreq_queue;
 396		goto queue;
 397
 398	case PNS_PIPE_ALIGNED_DATA:
 399		__skb_pull(skb, 1);
 400		/* fall through */
 401	case PNS_PIPE_DATA:
 402		__skb_pull(skb, 3); /* Pipe data header */
 403		if (!pn_flow_safe(pn->rx_fc)) {
 404			err = sock_queue_rcv_skb(sk, skb);
 405			if (!err)
 406				return NET_RX_SUCCESS;
 407			err = -ENOBUFS;
 408			break;
 409		}
 410
 411		if (pn->rx_credits == 0) {
 412			atomic_inc(&sk->sk_drops);
 413			err = -ENOBUFS;
 414			break;
 415		}
 416		pn->rx_credits--;
 417		queue = &sk->sk_receive_queue;
 418		goto queue;
 419
 420	case PNS_PEP_STATUS_IND:
 421		pipe_rcv_status(sk, skb);
 422		break;
 423
 424	case PNS_PIPE_REDIRECTED_IND:
 425		err = pipe_rcv_created(sk, skb);
 426		break;
 427
 428	case PNS_PIPE_CREATED_IND:
 429		err = pipe_rcv_created(sk, skb);
 430		if (err)
 431			break;
 432		/* fall through */
 433	case PNS_PIPE_RESET_IND:
 434		if (!pn->init_enable)
 435			break;
 436		/* fall through */
 437	case PNS_PIPE_ENABLED_IND:
 438		if (!pn_flow_safe(pn->tx_fc)) {
 439			atomic_set(&pn->tx_credits, 1);
 440			sk->sk_write_space(sk);
 441		}
 442		if (sk->sk_state == TCP_ESTABLISHED)
 443			break; /* Nothing to do */
 444		sk->sk_state = TCP_ESTABLISHED;
 445		pipe_grant_credits(sk, GFP_ATOMIC);
 446		break;
 447
 448	case PNS_PIPE_DISABLED_IND:
 449		sk->sk_state = TCP_SYN_RECV;
 450		pn->rx_credits = 0;
 451		break;
 452
 453	default:
 454		net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
 455				    hdr->message_id);
 456		err = -EINVAL;
 457	}
 458out:
 459	kfree_skb(skb);
 460	return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
 461
 462queue:
 463	skb->dev = NULL;
 464	skb_set_owner_r(skb, sk);
 
 465	skb_queue_tail(queue, skb);
 466	if (!sock_flag(sk, SOCK_DEAD))
 467		sk->sk_data_ready(sk);
 468	return NET_RX_SUCCESS;
 469}
 470
 471/* Destroy connected sock. */
 472static void pipe_destruct(struct sock *sk)
 473{
 474	struct pep_sock *pn = pep_sk(sk);
 475
 476	skb_queue_purge(&sk->sk_receive_queue);
 477	skb_queue_purge(&pn->ctrlreq_queue);
 478}
 479
 480static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
 481{
 482	unsigned int i;
 483	u8 final_fc = PN_NO_FLOW_CONTROL;
 484
 485	for (i = 0; i < n; i++) {
 486		u8 fc = fcs[i];
 487
 488		if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
 489			final_fc = fc;
 490	}
 491	return final_fc;
 492}
 493
 494static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 495{
 496	struct pep_sock *pn = pep_sk(sk);
 497	struct pnpipehdr *hdr;
 498	u8 n_sb;
 499
 500	if (!pskb_pull(skb, sizeof(*hdr) + 4))
 501		return -EINVAL;
 502
 503	hdr = pnp_hdr(skb);
 504	if (hdr->error_code != PN_PIPE_NO_ERROR)
 505		return -ECONNREFUSED;
 506
 507	/* Parse sub-blocks */
 508	n_sb = hdr->data[4];
 509	while (n_sb > 0) {
 510		u8 type, buf[6], len = sizeof(buf);
 511		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 512
 513		if (data == NULL)
 514			return -EINVAL;
 515
 516		switch (type) {
 517		case PN_PIPE_SB_REQUIRED_FC_TX:
 518			if (len < 2 || len < data[0])
 519				break;
 520			pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
 521			break;
 522
 523		case PN_PIPE_SB_PREFERRED_FC_RX:
 524			if (len < 2 || len < data[0])
 525				break;
 526			pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
 527			break;
 528
 529		}
 530		n_sb--;
 531	}
 532
 533	return pipe_handler_send_created_ind(sk);
 534}
 535
 536static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
 537{
 538	struct pnpipehdr *hdr = pnp_hdr(skb);
 539
 540	if (hdr->error_code != PN_PIPE_NO_ERROR)
 541		return -ECONNREFUSED;
 542
 543	return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
 544		NULL, 0, GFP_ATOMIC);
 545
 546}
 547
 548static void pipe_start_flow_control(struct sock *sk)
 549{
 550	struct pep_sock *pn = pep_sk(sk);
 551
 552	if (!pn_flow_safe(pn->tx_fc)) {
 553		atomic_set(&pn->tx_credits, 1);
 554		sk->sk_write_space(sk);
 555	}
 556	pipe_grant_credits(sk, GFP_ATOMIC);
 557}
 558
 559/* Queue an skb to an actively connected sock.
 560 * Socket lock must be held. */
 561static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 562{
 563	struct pep_sock *pn = pep_sk(sk);
 564	struct pnpipehdr *hdr = pnp_hdr(skb);
 565	int err = NET_RX_SUCCESS;
 566
 567	switch (hdr->message_id) {
 568	case PNS_PIPE_ALIGNED_DATA:
 569		__skb_pull(skb, 1);
 570		/* fall through */
 571	case PNS_PIPE_DATA:
 572		__skb_pull(skb, 3); /* Pipe data header */
 573		if (!pn_flow_safe(pn->rx_fc)) {
 574			err = sock_queue_rcv_skb(sk, skb);
 575			if (!err)
 576				return NET_RX_SUCCESS;
 577			err = NET_RX_DROP;
 578			break;
 579		}
 580
 581		if (pn->rx_credits == 0) {
 582			atomic_inc(&sk->sk_drops);
 583			err = NET_RX_DROP;
 584			break;
 585		}
 586		pn->rx_credits--;
 587		skb->dev = NULL;
 588		skb_set_owner_r(skb, sk);
 
 589		skb_queue_tail(&sk->sk_receive_queue, skb);
 590		if (!sock_flag(sk, SOCK_DEAD))
 591			sk->sk_data_ready(sk);
 592		return NET_RX_SUCCESS;
 593
 594	case PNS_PEP_CONNECT_RESP:
 595		if (sk->sk_state != TCP_SYN_SENT)
 596			break;
 597		if (!sock_flag(sk, SOCK_DEAD))
 598			sk->sk_state_change(sk);
 599		if (pep_connresp_rcv(sk, skb)) {
 600			sk->sk_state = TCP_CLOSE_WAIT;
 601			break;
 602		}
 603		if (pn->init_enable == PN_PIPE_DISABLE)
 604			sk->sk_state = TCP_SYN_RECV;
 605		else {
 606			sk->sk_state = TCP_ESTABLISHED;
 607			pipe_start_flow_control(sk);
 608		}
 609		break;
 610
 611	case PNS_PEP_ENABLE_RESP:
 612		if (sk->sk_state != TCP_SYN_SENT)
 613			break;
 614
 615		if (pep_enableresp_rcv(sk, skb)) {
 616			sk->sk_state = TCP_CLOSE_WAIT;
 617			break;
 618		}
 619
 620		sk->sk_state = TCP_ESTABLISHED;
 621		pipe_start_flow_control(sk);
 
 
 
 
 622		break;
 623
 624	case PNS_PEP_DISCONNECT_RESP:
 625		/* sock should already be dead, nothing to do */
 626		break;
 627
 628	case PNS_PEP_STATUS_IND:
 629		pipe_rcv_status(sk, skb);
 630		break;
 631	}
 632	kfree_skb(skb);
 633	return err;
 634}
 635
 636/* Listening sock must be locked */
 637static struct sock *pep_find_pipe(const struct hlist_head *hlist,
 638					const struct sockaddr_pn *dst,
 639					u8 pipe_handle)
 640{
 
 641	struct sock *sknode;
 642	u16 dobj = pn_sockaddr_get_object(dst);
 643
 644	sk_for_each(sknode, hlist) {
 645		struct pep_sock *pnnode = pep_sk(sknode);
 646
 647		/* Ports match, but addresses might not: */
 648		if (pnnode->pn_sk.sobject != dobj)
 649			continue;
 650		if (pnnode->pipe_handle != pipe_handle)
 651			continue;
 652		if (sknode->sk_state == TCP_CLOSE_WAIT)
 653			continue;
 654
 655		sock_hold(sknode);
 656		return sknode;
 657	}
 658	return NULL;
 659}
 660
 661/*
 662 * Deliver an skb to a listening sock.
 663 * Socket lock must be held.
 664 * We then queue the skb to the right connected sock (if any).
 665 */
 666static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 667{
 668	struct pep_sock *pn = pep_sk(sk);
 669	struct sock *sknode;
 670	struct pnpipehdr *hdr;
 671	struct sockaddr_pn dst;
 672	u8 pipe_handle;
 673
 674	if (!pskb_may_pull(skb, sizeof(*hdr)))
 675		goto drop;
 676
 677	hdr = pnp_hdr(skb);
 678	pipe_handle = hdr->pipe_handle;
 679	if (pipe_handle == PN_PIPE_INVALID_HANDLE)
 680		goto drop;
 681
 682	pn_skb_get_dst_sockaddr(skb, &dst);
 683
 684	/* Look for an existing pipe handle */
 685	sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 686	if (sknode)
 687		return sk_receive_skb(sknode, skb, 1);
 688
 689	switch (hdr->message_id) {
 690	case PNS_PEP_CONNECT_REQ:
 691		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
 692			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
 693					GFP_ATOMIC);
 694			break;
 695		}
 696		skb_queue_head(&sk->sk_receive_queue, skb);
 697		sk_acceptq_added(sk);
 698		if (!sock_flag(sk, SOCK_DEAD))
 699			sk->sk_data_ready(sk);
 700		return NET_RX_SUCCESS;
 701
 702	case PNS_PEP_DISCONNECT_REQ:
 703		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 704		break;
 705
 706	case PNS_PEP_CTRL_REQ:
 707		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
 708		break;
 709
 710	case PNS_PEP_RESET_REQ:
 711	case PNS_PEP_ENABLE_REQ:
 712	case PNS_PEP_DISABLE_REQ:
 713		/* invalid handle is not even allowed here! */
 714		break;
 715
 716	default:
 717		if ((1 << sk->sk_state)
 718				& ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
 719			/* actively connected socket */
 720			return pipe_handler_do_rcv(sk, skb);
 721	}
 722drop:
 723	kfree_skb(skb);
 724	return NET_RX_SUCCESS;
 725}
 726
 727static int pipe_do_remove(struct sock *sk)
 728{
 729	struct pep_sock *pn = pep_sk(sk);
 730	struct pnpipehdr *ph;
 731	struct sk_buff *skb;
 732
 733	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
 734	if (!skb)
 735		return -ENOMEM;
 736
 737	ph = pnp_hdr(skb);
 738	ph->utid = 0;
 739	ph->message_id = PNS_PIPE_REMOVE_REQ;
 740	ph->pipe_handle = pn->pipe_handle;
 741	ph->data[0] = PAD;
 742	return pn_skb_send(sk, skb, NULL);
 743}
 744
 745/* associated socket ceases to exist */
 746static void pep_sock_close(struct sock *sk, long timeout)
 747{
 748	struct pep_sock *pn = pep_sk(sk);
 749	int ifindex = 0;
 750
 751	sock_hold(sk); /* keep a reference after sk_common_release() */
 752	sk_common_release(sk);
 753
 754	lock_sock(sk);
 755	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
 756		if (sk->sk_backlog_rcv == pipe_do_rcv)
 757			/* Forcefully remove dangling Phonet pipe */
 758			pipe_do_remove(sk);
 759		else
 760			pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
 761						NULL, 0);
 762	}
 763	sk->sk_state = TCP_CLOSE;
 764
 765	ifindex = pn->ifindex;
 766	pn->ifindex = 0;
 767	release_sock(sk);
 768
 769	if (ifindex)
 770		gprs_detach(sk);
 771	sock_put(sk);
 772}
 773
 774static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
 775{
 776	struct pep_sock *pn = pep_sk(sk), *newpn;
 777	struct sock *newsk = NULL;
 778	struct sk_buff *skb;
 779	struct pnpipehdr *hdr;
 780	struct sockaddr_pn dst, src;
 781	int err;
 782	u16 peer_type;
 783	u8 pipe_handle, enabled, n_sb;
 784	u8 aligned = 0;
 785
 786	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
 787	if (!skb)
 788		return NULL;
 789
 790	lock_sock(sk);
 791	if (sk->sk_state != TCP_LISTEN) {
 792		err = -EINVAL;
 793		goto drop;
 794	}
 795	sk_acceptq_removed(sk);
 796
 797	err = -EPROTO;
 798	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 799		goto drop;
 800
 801	hdr = pnp_hdr(skb);
 802	pipe_handle = hdr->pipe_handle;
 803	switch (hdr->state_after_connect) {
 804	case PN_PIPE_DISABLE:
 805		enabled = 0;
 806		break;
 807	case PN_PIPE_ENABLE:
 808		enabled = 1;
 809		break;
 810	default:
 811		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
 812				GFP_KERNEL);
 813		goto drop;
 814	}
 815	peer_type = hdr->other_pep_type << 8;
 816
 817	/* Parse sub-blocks (options) */
 818	n_sb = hdr->data[4];
 819	while (n_sb > 0) {
 820		u8 type, buf[1], len = sizeof(buf);
 821		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 822
 823		if (data == NULL)
 824			goto drop;
 825		switch (type) {
 826		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
 827			if (len < 1)
 828				goto drop;
 829			peer_type = (peer_type & 0xff00) | data[0];
 830			break;
 831		case PN_PIPE_SB_ALIGNED_DATA:
 832			aligned = data[0] != 0;
 833			break;
 834		}
 835		n_sb--;
 836	}
 837
 838	/* Check for duplicate pipe handle */
 839	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 840	if (unlikely(newsk)) {
 841		__sock_put(newsk);
 842		newsk = NULL;
 843		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
 844		goto drop;
 845	}
 846
 847	/* Create a new to-be-accepted sock */
 848	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
 849	if (!newsk) {
 850		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
 851		err = -ENOBUFS;
 852		goto drop;
 853	}
 854
 855	sock_init_data(NULL, newsk);
 856	newsk->sk_state = TCP_SYN_RECV;
 857	newsk->sk_backlog_rcv = pipe_do_rcv;
 858	newsk->sk_protocol = sk->sk_protocol;
 859	newsk->sk_destruct = pipe_destruct;
 860
 861	newpn = pep_sk(newsk);
 862	pn_skb_get_dst_sockaddr(skb, &dst);
 863	pn_skb_get_src_sockaddr(skb, &src);
 864	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
 865	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
 866	newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
 867	sock_hold(sk);
 868	newpn->listener = sk;
 869	skb_queue_head_init(&newpn->ctrlreq_queue);
 870	newpn->pipe_handle = pipe_handle;
 871	atomic_set(&newpn->tx_credits, 0);
 872	newpn->ifindex = 0;
 873	newpn->peer_type = peer_type;
 874	newpn->rx_credits = 0;
 875	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 876	newpn->init_enable = enabled;
 877	newpn->aligned = aligned;
 878
 879	err = pep_accept_conn(newsk, skb);
 880	if (err) {
 881		sock_put(newsk);
 882		newsk = NULL;
 883		goto drop;
 884	}
 885	sk_add_node(newsk, &pn->hlist);
 886drop:
 887	release_sock(sk);
 888	kfree_skb(skb);
 889	*errp = err;
 890	return newsk;
 891}
 892
 893static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 894{
 895	struct pep_sock *pn = pep_sk(sk);
 896	int err;
 897	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 898
 899	if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
 900		pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
 901
 902	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
 903				pn->init_enable, data, 4);
 904	if (err) {
 905		pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 906		return err;
 907	}
 908
 909	sk->sk_state = TCP_SYN_SENT;
 910
 911	return 0;
 912}
 913
 914static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
 915{
 916	int err;
 917
 918	err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
 919				NULL, 0);
 920	if (err)
 921		return err;
 922
 923	sk->sk_state = TCP_SYN_SENT;
 924
 925	return 0;
 926}
 927
 928static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 929{
 930	struct pep_sock *pn = pep_sk(sk);
 931	int answ;
 932	int ret = -ENOIOCTLCMD;
 933
 934	switch (cmd) {
 935	case SIOCINQ:
 936		if (sk->sk_state == TCP_LISTEN) {
 937			ret = -EINVAL;
 938			break;
 939		}
 940
 941		lock_sock(sk);
 942		if (sock_flag(sk, SOCK_URGINLINE) &&
 943		    !skb_queue_empty(&pn->ctrlreq_queue))
 944			answ = skb_peek(&pn->ctrlreq_queue)->len;
 945		else if (!skb_queue_empty(&sk->sk_receive_queue))
 946			answ = skb_peek(&sk->sk_receive_queue)->len;
 947		else
 948			answ = 0;
 949		release_sock(sk);
 950		ret = put_user(answ, (int __user *)arg);
 951		break;
 952
 953	case SIOCPNENABLEPIPE:
 954		lock_sock(sk);
 955		if (sk->sk_state == TCP_SYN_SENT)
 956			ret =  -EBUSY;
 957		else if (sk->sk_state == TCP_ESTABLISHED)
 958			ret = -EISCONN;
 959		else
 960			ret = pep_sock_enable(sk, NULL, 0);
 961		release_sock(sk);
 962		break;
 963	}
 964
 965	return ret;
 966}
 967
 968static int pep_init(struct sock *sk)
 969{
 970	struct pep_sock *pn = pep_sk(sk);
 971
 972	sk->sk_destruct = pipe_destruct;
 973	INIT_HLIST_HEAD(&pn->hlist);
 974	pn->listener = NULL;
 975	skb_queue_head_init(&pn->ctrlreq_queue);
 976	atomic_set(&pn->tx_credits, 0);
 977	pn->ifindex = 0;
 978	pn->peer_type = 0;
 979	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 980	pn->rx_credits = 0;
 981	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 982	pn->init_enable = 1;
 983	pn->aligned = 0;
 984	return 0;
 985}
 986
 987static int pep_setsockopt(struct sock *sk, int level, int optname,
 988				char __user *optval, unsigned int optlen)
 989{
 990	struct pep_sock *pn = pep_sk(sk);
 991	int val = 0, err = 0;
 992
 993	if (level != SOL_PNPIPE)
 994		return -ENOPROTOOPT;
 995	if (optlen >= sizeof(int)) {
 996		if (get_user(val, (int __user *) optval))
 997			return -EFAULT;
 998	}
 999
1000	lock_sock(sk);
1001	switch (optname) {
1002	case PNPIPE_ENCAP:
1003		if (val && val != PNPIPE_ENCAP_IP) {
1004			err = -EINVAL;
1005			break;
1006		}
1007		if (!pn->ifindex == !val)
1008			break; /* Nothing to do! */
1009		if (!capable(CAP_NET_ADMIN)) {
1010			err = -EPERM;
1011			break;
1012		}
1013		if (val) {
1014			release_sock(sk);
1015			err = gprs_attach(sk);
1016			if (err > 0) {
1017				pn->ifindex = err;
1018				err = 0;
1019			}
1020		} else {
1021			pn->ifindex = 0;
1022			release_sock(sk);
1023			gprs_detach(sk);
1024			err = 0;
1025		}
1026		goto out_norel;
1027
1028	case PNPIPE_HANDLE:
1029		if ((sk->sk_state == TCP_CLOSE) &&
1030			(val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
1031			pn->pipe_handle = val;
1032		else
1033			err = -EINVAL;
1034		break;
1035
1036	case PNPIPE_INITSTATE:
1037		pn->init_enable = !!val;
1038		break;
1039
1040	default:
1041		err = -ENOPROTOOPT;
1042	}
1043	release_sock(sk);
1044
1045out_norel:
1046	return err;
1047}
1048
1049static int pep_getsockopt(struct sock *sk, int level, int optname,
1050				char __user *optval, int __user *optlen)
1051{
1052	struct pep_sock *pn = pep_sk(sk);
1053	int len, val;
1054
1055	if (level != SOL_PNPIPE)
1056		return -ENOPROTOOPT;
1057	if (get_user(len, optlen))
1058		return -EFAULT;
1059
1060	switch (optname) {
1061	case PNPIPE_ENCAP:
1062		val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
1063		break;
1064
1065	case PNPIPE_IFINDEX:
1066		val = pn->ifindex;
1067		break;
1068
1069	case PNPIPE_HANDLE:
1070		val = pn->pipe_handle;
1071		if (val == PN_PIPE_INVALID_HANDLE)
1072			return -EINVAL;
1073		break;
1074
1075	case PNPIPE_INITSTATE:
1076		val = pn->init_enable;
1077		break;
1078
1079	default:
1080		return -ENOPROTOOPT;
1081	}
1082
1083	len = min_t(unsigned int, sizeof(int), len);
1084	if (put_user(len, optlen))
1085		return -EFAULT;
1086	if (put_user(val, (int __user *) optval))
1087		return -EFAULT;
1088	return 0;
1089}
1090
1091static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1092{
1093	struct pep_sock *pn = pep_sk(sk);
1094	struct pnpipehdr *ph;
1095	int err;
1096
1097	if (pn_flow_safe(pn->tx_fc) &&
1098	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
1099		kfree_skb(skb);
1100		return -ENOBUFS;
1101	}
1102
1103	skb_push(skb, 3 + pn->aligned);
1104	skb_reset_transport_header(skb);
1105	ph = pnp_hdr(skb);
1106	ph->utid = 0;
1107	if (pn->aligned) {
1108		ph->message_id = PNS_PIPE_ALIGNED_DATA;
1109		ph->data[0] = 0; /* padding */
1110	} else
1111		ph->message_id = PNS_PIPE_DATA;
1112	ph->pipe_handle = pn->pipe_handle;
1113	err = pn_skb_send(sk, skb, NULL);
1114
1115	if (err && pn_flow_safe(pn->tx_fc))
1116		atomic_inc(&pn->tx_credits);
1117	return err;
1118
1119}
1120
1121static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
1122{
1123	struct pep_sock *pn = pep_sk(sk);
1124	struct sk_buff *skb;
1125	long timeo;
1126	int flags = msg->msg_flags;
1127	int err, done;
1128
1129	if (len > USHRT_MAX)
1130		return -EMSGSIZE;
1131
1132	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1133				MSG_CMSG_COMPAT)) ||
1134			!(msg->msg_flags & MSG_EOR))
1135		return -EOPNOTSUPP;
1136
1137	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
1138					flags & MSG_DONTWAIT, &err);
1139	if (!skb)
1140		return err;
1141
1142	skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
1143	err = memcpy_from_msg(skb_put(skb, len), msg, len);
1144	if (err < 0)
1145		goto outfree;
1146
1147	lock_sock(sk);
1148	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1149	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
1150		err = -ENOTCONN;
1151		goto out;
1152	}
1153	if (sk->sk_state != TCP_ESTABLISHED) {
1154		/* Wait until the pipe gets to enabled state */
1155disabled:
1156		err = sk_stream_wait_connect(sk, &timeo);
1157		if (err)
1158			goto out;
1159
1160		if (sk->sk_state == TCP_CLOSE_WAIT) {
1161			err = -ECONNRESET;
1162			goto out;
1163		}
1164	}
1165	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
1166
1167	/* Wait until flow control allows TX */
1168	done = atomic_read(&pn->tx_credits);
1169	while (!done) {
1170		DEFINE_WAIT(wait);
1171
1172		if (!timeo) {
1173			err = -EAGAIN;
1174			goto out;
1175		}
1176		if (signal_pending(current)) {
1177			err = sock_intr_errno(timeo);
1178			goto out;
1179		}
1180
1181		prepare_to_wait(sk_sleep(sk), &wait,
1182				TASK_INTERRUPTIBLE);
1183		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
1184		finish_wait(sk_sleep(sk), &wait);
1185
1186		if (sk->sk_state != TCP_ESTABLISHED)
1187			goto disabled;
1188	}
1189
1190	err = pipe_skb_send(sk, skb);
1191	if (err >= 0)
1192		err = len; /* success! */
1193	skb = NULL;
1194out:
1195	release_sock(sk);
1196outfree:
1197	kfree_skb(skb);
1198	return err;
1199}
1200
1201int pep_writeable(struct sock *sk)
1202{
1203	struct pep_sock *pn = pep_sk(sk);
1204
1205	return atomic_read(&pn->tx_credits);
1206}
1207
1208int pep_write(struct sock *sk, struct sk_buff *skb)
1209{
1210	struct sk_buff *rskb, *fs;
1211	int flen = 0;
1212
1213	if (pep_sk(sk)->aligned)
1214		return pipe_skb_send(sk, skb);
1215
1216	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
1217	if (!rskb) {
1218		kfree_skb(skb);
1219		return -ENOMEM;
1220	}
1221	skb_shinfo(rskb)->frag_list = skb;
1222	rskb->len += skb->len;
1223	rskb->data_len += rskb->len;
1224	rskb->truesize += rskb->len;
1225
1226	/* Avoid nested fragments */
1227	skb_walk_frags(skb, fs)
1228		flen += fs->len;
1229	skb->next = skb_shinfo(skb)->frag_list;
1230	skb_frag_list_init(skb);
1231	skb->len -= flen;
1232	skb->data_len -= flen;
1233	skb->truesize -= flen;
1234
1235	skb_reserve(rskb, MAX_PHONET_HEADER + 3);
1236	return pipe_skb_send(sk, rskb);
1237}
1238
1239struct sk_buff *pep_read(struct sock *sk)
1240{
1241	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1242
1243	if (sk->sk_state == TCP_ESTABLISHED)
1244		pipe_grant_credits(sk, GFP_ATOMIC);
1245	return skb;
1246}
1247
1248static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1249		       int noblock, int flags, int *addr_len)
 
1250{
1251	struct sk_buff *skb;
1252	int err;
1253
1254	if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
1255			MSG_NOSIGNAL|MSG_CMSG_COMPAT))
1256		return -EOPNOTSUPP;
1257
1258	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
1259		return -ENOTCONN;
1260
1261	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
1262		/* Dequeue and acknowledge control request */
1263		struct pep_sock *pn = pep_sk(sk);
1264
1265		if (flags & MSG_PEEK)
1266			return -EOPNOTSUPP;
1267		skb = skb_dequeue(&pn->ctrlreq_queue);
1268		if (skb) {
1269			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
1270						GFP_KERNEL);
1271			msg->msg_flags |= MSG_OOB;
1272			goto copy;
1273		}
1274		if (flags & MSG_OOB)
1275			return -EINVAL;
1276	}
1277
1278	skb = skb_recv_datagram(sk, flags, noblock, &err);
1279	lock_sock(sk);
1280	if (skb == NULL) {
1281		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
1282			err = -ECONNRESET;
1283		release_sock(sk);
1284		return err;
1285	}
1286
1287	if (sk->sk_state == TCP_ESTABLISHED)
1288		pipe_grant_credits(sk, GFP_KERNEL);
1289	release_sock(sk);
1290copy:
1291	msg->msg_flags |= MSG_EOR;
1292	if (skb->len > len)
1293		msg->msg_flags |= MSG_TRUNC;
1294	else
1295		len = skb->len;
1296
1297	err = skb_copy_datagram_msg(skb, 0, msg, len);
1298	if (!err)
1299		err = (flags & MSG_TRUNC) ? skb->len : len;
1300
1301	skb_free_datagram(sk, skb);
1302	return err;
1303}
1304
1305static void pep_sock_unhash(struct sock *sk)
1306{
1307	struct pep_sock *pn = pep_sk(sk);
1308	struct sock *skparent = NULL;
1309
1310	lock_sock(sk);
1311
1312	if (pn->listener != NULL) {
1313		skparent = pn->listener;
1314		pn->listener = NULL;
1315		release_sock(sk);
1316
1317		pn = pep_sk(skparent);
1318		lock_sock(skparent);
1319		sk_del_node_init(sk);
1320		sk = skparent;
1321	}
1322
1323	/* Unhash a listening sock only when it is closed
1324	 * and all of its active connected pipes are closed. */
1325	if (hlist_empty(&pn->hlist))
1326		pn_sock_unhash(&pn->pn_sk.sk);
1327	release_sock(sk);
1328
1329	if (skparent)
1330		sock_put(skparent);
1331}
1332
1333static struct proto pep_proto = {
1334	.close		= pep_sock_close,
1335	.accept		= pep_sock_accept,
1336	.connect	= pep_sock_connect,
1337	.ioctl		= pep_ioctl,
1338	.init		= pep_init,
1339	.setsockopt	= pep_setsockopt,
1340	.getsockopt	= pep_getsockopt,
1341	.sendmsg	= pep_sendmsg,
1342	.recvmsg	= pep_recvmsg,
1343	.backlog_rcv	= pep_do_rcv,
1344	.hash		= pn_sock_hash,
1345	.unhash		= pep_sock_unhash,
1346	.get_port	= pn_sock_get_port,
1347	.obj_size	= sizeof(struct pep_sock),
1348	.owner		= THIS_MODULE,
1349	.name		= "PNPIPE",
1350};
1351
1352static struct phonet_protocol pep_pn_proto = {
1353	.ops		= &phonet_stream_ops,
1354	.prot		= &pep_proto,
1355	.sock_type	= SOCK_SEQPACKET,
1356};
1357
1358static int __init pep_register(void)
1359{
1360	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1361}
1362
1363static void __exit pep_unregister(void)
1364{
1365	phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1366}
1367
1368module_init(pep_register);
1369module_exit(pep_unregister);
1370MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1371MODULE_DESCRIPTION("Phonet pipe protocol");
1372MODULE_LICENSE("GPL");
1373MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
v3.1
   1/*
   2 * File: pep.c
   3 *
   4 * Phonet pipe protocol end point socket
   5 *
   6 * Copyright (C) 2008 Nokia Corporation.
   7 *
   8 * Author: RĂ©mi Denis-Courmont <remi.denis-courmont@nokia.com>
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * version 2 as published by the Free Software Foundation.
  13 *
  14 * This program is distributed in the hope that it will be useful, but
  15 * WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17 * General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  22 * 02110-1301 USA
  23 */
  24
  25#include <linux/kernel.h>
  26#include <linux/slab.h>
  27#include <linux/socket.h>
  28#include <net/sock.h>
  29#include <net/tcp_states.h>
  30#include <asm/ioctls.h>
  31
  32#include <linux/phonet.h>
 
  33#include <net/phonet/phonet.h>
  34#include <net/phonet/pep.h>
  35#include <net/phonet/gprs.h>
  36
  37/* sk_state values:
  38 * TCP_CLOSE		sock not in use yet
  39 * TCP_CLOSE_WAIT	disconnected pipe
  40 * TCP_LISTEN		listening pipe endpoint
  41 * TCP_SYN_RECV		connected pipe in disabled state
  42 * TCP_ESTABLISHED	connected pipe in enabled state
  43 *
  44 * pep_sock locking:
  45 *  - sk_state, hlist: sock lock needed
  46 *  - listener: read only
  47 *  - pipe_handle: read only
  48 */
  49
  50#define CREDITS_MAX	10
  51#define CREDITS_THR	7
  52
  53#define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  54
  55/* Get the next TLV sub-block. */
  56static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  57					void *buf)
  58{
  59	void *data = NULL;
  60	struct {
  61		u8 sb_type;
  62		u8 sb_len;
  63	} *ph, h;
  64	int buflen = *plen;
  65
  66	ph = skb_header_pointer(skb, 0, 2, &h);
  67	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  68		return NULL;
  69	ph->sb_len -= 2;
  70	*ptype = ph->sb_type;
  71	*plen = ph->sb_len;
  72
  73	if (buflen > ph->sb_len)
  74		buflen = ph->sb_len;
  75	data = skb_header_pointer(skb, 2, buflen, buf);
  76	__skb_pull(skb, 2 + ph->sb_len);
  77	return data;
  78}
  79
  80static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
  81					int len, gfp_t priority)
  82{
  83	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  84	if (!skb)
  85		return NULL;
  86	skb_set_owner_w(skb, sk);
  87
  88	skb_reserve(skb, MAX_PNPIPE_HEADER);
  89	__skb_put(skb, len);
  90	skb_copy_to_linear_data(skb, payload, len);
  91	__skb_push(skb, sizeof(struct pnpipehdr));
  92	skb_reset_transport_header(skb);
  93	return skb;
  94}
  95
  96static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
  97			const void *data, int len, gfp_t priority)
  98{
  99	const struct pnpipehdr *oph = pnp_hdr(oskb);
 100	struct pnpipehdr *ph;
 101	struct sk_buff *skb;
 102	struct sockaddr_pn peer;
 103
 104	skb = pep_alloc_skb(sk, data, len, priority);
 105	if (!skb)
 106		return -ENOMEM;
 107
 108	ph = pnp_hdr(skb);
 109	ph->utid = oph->utid;
 110	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
 111	ph->pipe_handle = oph->pipe_handle;
 112	ph->error_code = code;
 113
 114	pn_skb_get_src_sockaddr(oskb, &peer);
 115	return pn_skb_send(sk, skb, &peer);
 116}
 117
 118static int pep_indicate(struct sock *sk, u8 id, u8 code,
 119			const void *data, int len, gfp_t priority)
 120{
 121	struct pep_sock *pn = pep_sk(sk);
 122	struct pnpipehdr *ph;
 123	struct sk_buff *skb;
 124
 125	skb = pep_alloc_skb(sk, data, len, priority);
 126	if (!skb)
 127		return -ENOMEM;
 128
 129	ph = pnp_hdr(skb);
 130	ph->utid = 0;
 131	ph->message_id = id;
 132	ph->pipe_handle = pn->pipe_handle;
 133	ph->data[0] = code;
 134	return pn_skb_send(sk, skb, NULL);
 135}
 136
 137#define PAD 0x00
 138
 139static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
 140				const void *data, int len)
 141{
 142	struct pep_sock *pn = pep_sk(sk);
 143	struct pnpipehdr *ph;
 144	struct sk_buff *skb;
 145
 146	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
 147	if (!skb)
 148		return -ENOMEM;
 149
 150	ph = pnp_hdr(skb);
 151	ph->utid = id; /* whatever */
 152	ph->message_id = id;
 153	ph->pipe_handle = pn->pipe_handle;
 154	ph->data[0] = code;
 155	return pn_skb_send(sk, skb, NULL);
 156}
 157
 158static int pipe_handler_send_created_ind(struct sock *sk)
 159{
 160	struct pep_sock *pn = pep_sk(sk);
 161	u8 data[4] = {
 162		PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
 163		pn->tx_fc, pn->rx_fc,
 164	};
 165
 166	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
 167				data, 4, GFP_ATOMIC);
 168}
 169
 170static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 171{
 172	static const u8 data[20] = {
 173		PAD, PAD, PAD, 2 /* sub-blocks */,
 174		PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
 175			PN_MULTI_CREDIT_FLOW_CONTROL,
 176			PN_ONE_CREDIT_FLOW_CONTROL,
 177			PN_LEGACY_FLOW_CONTROL,
 178			PAD,
 179		PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
 180			PN_MULTI_CREDIT_FLOW_CONTROL,
 181			PN_ONE_CREDIT_FLOW_CONTROL,
 182			PN_LEGACY_FLOW_CONTROL,
 183			PAD,
 184	};
 185
 186	might_sleep();
 187	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
 188				GFP_KERNEL);
 189}
 190
 191static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
 192				gfp_t priority)
 193{
 194	static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
 195	WARN_ON(code == PN_PIPE_NO_ERROR);
 196	return pep_reply(sk, skb, code, data, sizeof(data), priority);
 197}
 198
 199/* Control requests are not sent by the pipe service and have a specific
 200 * message format. */
 201static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 202				gfp_t priority)
 203{
 204	const struct pnpipehdr *oph = pnp_hdr(oskb);
 205	struct sk_buff *skb;
 206	struct pnpipehdr *ph;
 207	struct sockaddr_pn dst;
 208	u8 data[4] = {
 209		oph->data[0], /* PEP type */
 210		code, /* error code, at an unusual offset */
 211		PAD, PAD,
 212	};
 213
 214	skb = pep_alloc_skb(sk, data, 4, priority);
 215	if (!skb)
 216		return -ENOMEM;
 217
 218	ph = pnp_hdr(skb);
 219	ph->utid = oph->utid;
 220	ph->message_id = PNS_PEP_CTRL_RESP;
 221	ph->pipe_handle = oph->pipe_handle;
 222	ph->data[0] = oph->data[1]; /* CTRL id */
 223
 224	pn_skb_get_src_sockaddr(oskb, &dst);
 225	return pn_skb_send(sk, skb, &dst);
 226}
 227
 228static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 229{
 230	u8 data[4] = { type, PAD, PAD, status };
 231
 232	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
 233				data, 4, priority);
 234}
 235
 236/* Send our RX flow control information to the sender.
 237 * Socket must be locked. */
 238static void pipe_grant_credits(struct sock *sk, gfp_t priority)
 239{
 240	struct pep_sock *pn = pep_sk(sk);
 241
 242	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
 243
 244	switch (pn->rx_fc) {
 245	case PN_LEGACY_FLOW_CONTROL: /* TODO */
 246		break;
 247	case PN_ONE_CREDIT_FLOW_CONTROL:
 248		if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
 249					PEP_IND_READY, priority) == 0)
 250			pn->rx_credits = 1;
 251		break;
 252	case PN_MULTI_CREDIT_FLOW_CONTROL:
 253		if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
 254			break;
 255		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
 256					CREDITS_MAX - pn->rx_credits,
 257					priority) == 0)
 258			pn->rx_credits = CREDITS_MAX;
 259		break;
 260	}
 261}
 262
 263static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 264{
 265	struct pep_sock *pn = pep_sk(sk);
 266	struct pnpipehdr *hdr;
 267	int wake = 0;
 268
 269	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 270		return -EINVAL;
 271
 272	hdr = pnp_hdr(skb);
 273	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
 274		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
 275				(unsigned)hdr->data[0]);
 276		return -EOPNOTSUPP;
 277	}
 278
 279	switch (hdr->data[1]) {
 280	case PN_PEP_IND_FLOW_CONTROL:
 281		switch (pn->tx_fc) {
 282		case PN_LEGACY_FLOW_CONTROL:
 283			switch (hdr->data[4]) {
 284			case PEP_IND_BUSY:
 285				atomic_set(&pn->tx_credits, 0);
 286				break;
 287			case PEP_IND_READY:
 288				atomic_set(&pn->tx_credits, wake = 1);
 289				break;
 290			}
 291			break;
 292		case PN_ONE_CREDIT_FLOW_CONTROL:
 293			if (hdr->data[4] == PEP_IND_READY)
 294				atomic_set(&pn->tx_credits, wake = 1);
 295			break;
 296		}
 297		break;
 298
 299	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
 300		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
 301			break;
 302		atomic_add(wake = hdr->data[4], &pn->tx_credits);
 303		break;
 304
 305	default:
 306		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
 307				(unsigned)hdr->data[1]);
 308		return -EOPNOTSUPP;
 309	}
 310	if (wake)
 311		sk->sk_write_space(sk);
 312	return 0;
 313}
 314
 315static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
 316{
 317	struct pep_sock *pn = pep_sk(sk);
 318	struct pnpipehdr *hdr = pnp_hdr(skb);
 319	u8 n_sb = hdr->data[0];
 320
 321	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 322	__skb_pull(skb, sizeof(*hdr));
 323	while (n_sb > 0) {
 324		u8 type, buf[2], len = sizeof(buf);
 325		u8 *data = pep_get_sb(skb, &type, &len, buf);
 326
 327		if (data == NULL)
 328			return -EINVAL;
 329		switch (type) {
 330		case PN_PIPE_SB_NEGOTIATED_FC:
 331			if (len < 2 || (data[0] | data[1]) > 3)
 332				break;
 333			pn->tx_fc = data[0] & 3;
 334			pn->rx_fc = data[1] & 3;
 335			break;
 336		}
 337		n_sb--;
 338	}
 339	return 0;
 340}
 341
 342/* Queue an skb to a connected sock.
 343 * Socket lock must be held. */
 344static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 345{
 346	struct pep_sock *pn = pep_sk(sk);
 347	struct pnpipehdr *hdr = pnp_hdr(skb);
 348	struct sk_buff_head *queue;
 349	int err = 0;
 350
 351	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
 352
 353	switch (hdr->message_id) {
 354	case PNS_PEP_CONNECT_REQ:
 355		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
 356		break;
 357
 358	case PNS_PEP_DISCONNECT_REQ:
 359		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 360		sk->sk_state = TCP_CLOSE_WAIT;
 361		if (!sock_flag(sk, SOCK_DEAD))
 362			sk->sk_state_change(sk);
 363		break;
 364
 365	case PNS_PEP_ENABLE_REQ:
 366		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
 367		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 368		break;
 369
 370	case PNS_PEP_RESET_REQ:
 371		switch (hdr->state_after_reset) {
 372		case PN_PIPE_DISABLE:
 373			pn->init_enable = 0;
 374			break;
 375		case PN_PIPE_ENABLE:
 376			pn->init_enable = 1;
 377			break;
 378		default: /* not allowed to send an error here!? */
 379			err = -EINVAL;
 380			goto out;
 381		}
 382		/* fall through */
 383	case PNS_PEP_DISABLE_REQ:
 384		atomic_set(&pn->tx_credits, 0);
 385		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 386		break;
 387
 388	case PNS_PEP_CTRL_REQ:
 389		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
 390			atomic_inc(&sk->sk_drops);
 391			break;
 392		}
 393		__skb_pull(skb, 4);
 394		queue = &pn->ctrlreq_queue;
 395		goto queue;
 396
 397	case PNS_PIPE_ALIGNED_DATA:
 398		__skb_pull(skb, 1);
 399		/* fall through */
 400	case PNS_PIPE_DATA:
 401		__skb_pull(skb, 3); /* Pipe data header */
 402		if (!pn_flow_safe(pn->rx_fc)) {
 403			err = sock_queue_rcv_skb(sk, skb);
 404			if (!err)
 405				return NET_RX_SUCCESS;
 406			err = -ENOBUFS;
 407			break;
 408		}
 409
 410		if (pn->rx_credits == 0) {
 411			atomic_inc(&sk->sk_drops);
 412			err = -ENOBUFS;
 413			break;
 414		}
 415		pn->rx_credits--;
 416		queue = &sk->sk_receive_queue;
 417		goto queue;
 418
 419	case PNS_PEP_STATUS_IND:
 420		pipe_rcv_status(sk, skb);
 421		break;
 422
 423	case PNS_PIPE_REDIRECTED_IND:
 424		err = pipe_rcv_created(sk, skb);
 425		break;
 426
 427	case PNS_PIPE_CREATED_IND:
 428		err = pipe_rcv_created(sk, skb);
 429		if (err)
 430			break;
 431		/* fall through */
 432	case PNS_PIPE_RESET_IND:
 433		if (!pn->init_enable)
 434			break;
 435		/* fall through */
 436	case PNS_PIPE_ENABLED_IND:
 437		if (!pn_flow_safe(pn->tx_fc)) {
 438			atomic_set(&pn->tx_credits, 1);
 439			sk->sk_write_space(sk);
 440		}
 441		if (sk->sk_state == TCP_ESTABLISHED)
 442			break; /* Nothing to do */
 443		sk->sk_state = TCP_ESTABLISHED;
 444		pipe_grant_credits(sk, GFP_ATOMIC);
 445		break;
 446
 447	case PNS_PIPE_DISABLED_IND:
 448		sk->sk_state = TCP_SYN_RECV;
 449		pn->rx_credits = 0;
 450		break;
 451
 452	default:
 453		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
 454				hdr->message_id);
 455		err = -EINVAL;
 456	}
 457out:
 458	kfree_skb(skb);
 459	return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
 460
 461queue:
 462	skb->dev = NULL;
 463	skb_set_owner_r(skb, sk);
 464	err = skb->len;
 465	skb_queue_tail(queue, skb);
 466	if (!sock_flag(sk, SOCK_DEAD))
 467		sk->sk_data_ready(sk, err);
 468	return NET_RX_SUCCESS;
 469}
 470
 471/* Destroy connected sock. */
 472static void pipe_destruct(struct sock *sk)
 473{
 474	struct pep_sock *pn = pep_sk(sk);
 475
 476	skb_queue_purge(&sk->sk_receive_queue);
 477	skb_queue_purge(&pn->ctrlreq_queue);
 478}
 479
 480static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
 481{
 482	unsigned i;
 483	u8 final_fc = PN_NO_FLOW_CONTROL;
 484
 485	for (i = 0; i < n; i++) {
 486		u8 fc = fcs[i];
 487
 488		if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
 489			final_fc = fc;
 490	}
 491	return final_fc;
 492}
 493
 494static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 495{
 496	struct pep_sock *pn = pep_sk(sk);
 497	struct pnpipehdr *hdr;
 498	u8 n_sb;
 499
 500	if (!pskb_pull(skb, sizeof(*hdr) + 4))
 501		return -EINVAL;
 502
 503	hdr = pnp_hdr(skb);
 504	if (hdr->error_code != PN_PIPE_NO_ERROR)
 505		return -ECONNREFUSED;
 506
 507	/* Parse sub-blocks */
 508	n_sb = hdr->data[4];
 509	while (n_sb > 0) {
 510		u8 type, buf[6], len = sizeof(buf);
 511		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 512
 513		if (data == NULL)
 514			return -EINVAL;
 515
 516		switch (type) {
 517		case PN_PIPE_SB_REQUIRED_FC_TX:
 518			if (len < 2 || len < data[0])
 519				break;
 520			pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
 521			break;
 522
 523		case PN_PIPE_SB_PREFERRED_FC_RX:
 524			if (len < 2 || len < data[0])
 525				break;
 526			pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
 527			break;
 528
 529		}
 530		n_sb--;
 531	}
 532
 533	return pipe_handler_send_created_ind(sk);
 534}
 535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536/* Queue an skb to an actively connected sock.
 537 * Socket lock must be held. */
 538static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 539{
 540	struct pep_sock *pn = pep_sk(sk);
 541	struct pnpipehdr *hdr = pnp_hdr(skb);
 542	int err = NET_RX_SUCCESS;
 543
 544	switch (hdr->message_id) {
 545	case PNS_PIPE_ALIGNED_DATA:
 546		__skb_pull(skb, 1);
 547		/* fall through */
 548	case PNS_PIPE_DATA:
 549		__skb_pull(skb, 3); /* Pipe data header */
 550		if (!pn_flow_safe(pn->rx_fc)) {
 551			err = sock_queue_rcv_skb(sk, skb);
 552			if (!err)
 553				return NET_RX_SUCCESS;
 554			err = NET_RX_DROP;
 555			break;
 556		}
 557
 558		if (pn->rx_credits == 0) {
 559			atomic_inc(&sk->sk_drops);
 560			err = NET_RX_DROP;
 561			break;
 562		}
 563		pn->rx_credits--;
 564		skb->dev = NULL;
 565		skb_set_owner_r(skb, sk);
 566		err = skb->len;
 567		skb_queue_tail(&sk->sk_receive_queue, skb);
 568		if (!sock_flag(sk, SOCK_DEAD))
 569			sk->sk_data_ready(sk, err);
 570		return NET_RX_SUCCESS;
 571
 572	case PNS_PEP_CONNECT_RESP:
 573		if (sk->sk_state != TCP_SYN_SENT)
 574			break;
 575		if (!sock_flag(sk, SOCK_DEAD))
 576			sk->sk_state_change(sk);
 577		if (pep_connresp_rcv(sk, skb)) {
 578			sk->sk_state = TCP_CLOSE_WAIT;
 579			break;
 580		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581
 582		sk->sk_state = TCP_ESTABLISHED;
 583		if (!pn_flow_safe(pn->tx_fc)) {
 584			atomic_set(&pn->tx_credits, 1);
 585			sk->sk_write_space(sk);
 586		}
 587		pipe_grant_credits(sk, GFP_ATOMIC);
 588		break;
 589
 590	case PNS_PEP_DISCONNECT_RESP:
 591		/* sock should already be dead, nothing to do */
 592		break;
 593
 594	case PNS_PEP_STATUS_IND:
 595		pipe_rcv_status(sk, skb);
 596		break;
 597	}
 598	kfree_skb(skb);
 599	return err;
 600}
 601
 602/* Listening sock must be locked */
 603static struct sock *pep_find_pipe(const struct hlist_head *hlist,
 604					const struct sockaddr_pn *dst,
 605					u8 pipe_handle)
 606{
 607	struct hlist_node *node;
 608	struct sock *sknode;
 609	u16 dobj = pn_sockaddr_get_object(dst);
 610
 611	sk_for_each(sknode, node, hlist) {
 612		struct pep_sock *pnnode = pep_sk(sknode);
 613
 614		/* Ports match, but addresses might not: */
 615		if (pnnode->pn_sk.sobject != dobj)
 616			continue;
 617		if (pnnode->pipe_handle != pipe_handle)
 618			continue;
 619		if (sknode->sk_state == TCP_CLOSE_WAIT)
 620			continue;
 621
 622		sock_hold(sknode);
 623		return sknode;
 624	}
 625	return NULL;
 626}
 627
 628/*
 629 * Deliver an skb to a listening sock.
 630 * Socket lock must be held.
 631 * We then queue the skb to the right connected sock (if any).
 632 */
 633static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 634{
 635	struct pep_sock *pn = pep_sk(sk);
 636	struct sock *sknode;
 637	struct pnpipehdr *hdr;
 638	struct sockaddr_pn dst;
 639	u8 pipe_handle;
 640
 641	if (!pskb_may_pull(skb, sizeof(*hdr)))
 642		goto drop;
 643
 644	hdr = pnp_hdr(skb);
 645	pipe_handle = hdr->pipe_handle;
 646	if (pipe_handle == PN_PIPE_INVALID_HANDLE)
 647		goto drop;
 648
 649	pn_skb_get_dst_sockaddr(skb, &dst);
 650
 651	/* Look for an existing pipe handle */
 652	sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 653	if (sknode)
 654		return sk_receive_skb(sknode, skb, 1);
 655
 656	switch (hdr->message_id) {
 657	case PNS_PEP_CONNECT_REQ:
 658		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
 659			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
 660					GFP_ATOMIC);
 661			break;
 662		}
 663		skb_queue_head(&sk->sk_receive_queue, skb);
 664		sk_acceptq_added(sk);
 665		if (!sock_flag(sk, SOCK_DEAD))
 666			sk->sk_data_ready(sk, 0);
 667		return NET_RX_SUCCESS;
 668
 669	case PNS_PEP_DISCONNECT_REQ:
 670		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 671		break;
 672
 673	case PNS_PEP_CTRL_REQ:
 674		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
 675		break;
 676
 677	case PNS_PEP_RESET_REQ:
 678	case PNS_PEP_ENABLE_REQ:
 679	case PNS_PEP_DISABLE_REQ:
 680		/* invalid handle is not even allowed here! */
 681		break;
 682
 683	default:
 684		if ((1 << sk->sk_state)
 685				& ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
 686			/* actively connected socket */
 687			return pipe_handler_do_rcv(sk, skb);
 688	}
 689drop:
 690	kfree_skb(skb);
 691	return NET_RX_SUCCESS;
 692}
 693
 694static int pipe_do_remove(struct sock *sk)
 695{
 696	struct pep_sock *pn = pep_sk(sk);
 697	struct pnpipehdr *ph;
 698	struct sk_buff *skb;
 699
 700	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
 701	if (!skb)
 702		return -ENOMEM;
 703
 704	ph = pnp_hdr(skb);
 705	ph->utid = 0;
 706	ph->message_id = PNS_PIPE_REMOVE_REQ;
 707	ph->pipe_handle = pn->pipe_handle;
 708	ph->data[0] = PAD;
 709	return pn_skb_send(sk, skb, NULL);
 710}
 711
 712/* associated socket ceases to exist */
 713static void pep_sock_close(struct sock *sk, long timeout)
 714{
 715	struct pep_sock *pn = pep_sk(sk);
 716	int ifindex = 0;
 717
 718	sock_hold(sk); /* keep a reference after sk_common_release() */
 719	sk_common_release(sk);
 720
 721	lock_sock(sk);
 722	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
 723		if (sk->sk_backlog_rcv == pipe_do_rcv)
 724			/* Forcefully remove dangling Phonet pipe */
 725			pipe_do_remove(sk);
 726		else
 727			pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
 728						NULL, 0);
 729	}
 730	sk->sk_state = TCP_CLOSE;
 731
 732	ifindex = pn->ifindex;
 733	pn->ifindex = 0;
 734	release_sock(sk);
 735
 736	if (ifindex)
 737		gprs_detach(sk);
 738	sock_put(sk);
 739}
 740
 741static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
 742{
 743	struct pep_sock *pn = pep_sk(sk), *newpn;
 744	struct sock *newsk = NULL;
 745	struct sk_buff *skb;
 746	struct pnpipehdr *hdr;
 747	struct sockaddr_pn dst, src;
 748	int err;
 749	u16 peer_type;
 750	u8 pipe_handle, enabled, n_sb;
 751	u8 aligned = 0;
 752
 753	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
 754	if (!skb)
 755		return NULL;
 756
 757	lock_sock(sk);
 758	if (sk->sk_state != TCP_LISTEN) {
 759		err = -EINVAL;
 760		goto drop;
 761	}
 762	sk_acceptq_removed(sk);
 763
 764	err = -EPROTO;
 765	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 766		goto drop;
 767
 768	hdr = pnp_hdr(skb);
 769	pipe_handle = hdr->pipe_handle;
 770	switch (hdr->state_after_connect) {
 771	case PN_PIPE_DISABLE:
 772		enabled = 0;
 773		break;
 774	case PN_PIPE_ENABLE:
 775		enabled = 1;
 776		break;
 777	default:
 778		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
 779				GFP_KERNEL);
 780		goto drop;
 781	}
 782	peer_type = hdr->other_pep_type << 8;
 783
 784	/* Parse sub-blocks (options) */
 785	n_sb = hdr->data[4];
 786	while (n_sb > 0) {
 787		u8 type, buf[1], len = sizeof(buf);
 788		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 789
 790		if (data == NULL)
 791			goto drop;
 792		switch (type) {
 793		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
 794			if (len < 1)
 795				goto drop;
 796			peer_type = (peer_type & 0xff00) | data[0];
 797			break;
 798		case PN_PIPE_SB_ALIGNED_DATA:
 799			aligned = data[0] != 0;
 800			break;
 801		}
 802		n_sb--;
 803	}
 804
 805	/* Check for duplicate pipe handle */
 806	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 807	if (unlikely(newsk)) {
 808		__sock_put(newsk);
 809		newsk = NULL;
 810		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
 811		goto drop;
 812	}
 813
 814	/* Create a new to-be-accepted sock */
 815	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
 816	if (!newsk) {
 817		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
 818		err = -ENOBUFS;
 819		goto drop;
 820	}
 821
 822	sock_init_data(NULL, newsk);
 823	newsk->sk_state = TCP_SYN_RECV;
 824	newsk->sk_backlog_rcv = pipe_do_rcv;
 825	newsk->sk_protocol = sk->sk_protocol;
 826	newsk->sk_destruct = pipe_destruct;
 827
 828	newpn = pep_sk(newsk);
 829	pn_skb_get_dst_sockaddr(skb, &dst);
 830	pn_skb_get_src_sockaddr(skb, &src);
 831	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
 832	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
 833	newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
 834	sock_hold(sk);
 835	newpn->listener = sk;
 836	skb_queue_head_init(&newpn->ctrlreq_queue);
 837	newpn->pipe_handle = pipe_handle;
 838	atomic_set(&newpn->tx_credits, 0);
 839	newpn->ifindex = 0;
 840	newpn->peer_type = peer_type;
 841	newpn->rx_credits = 0;
 842	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 843	newpn->init_enable = enabled;
 844	newpn->aligned = aligned;
 845
 846	err = pep_accept_conn(newsk, skb);
 847	if (err) {
 848		sock_put(newsk);
 849		newsk = NULL;
 850		goto drop;
 851	}
 852	sk_add_node(newsk, &pn->hlist);
 853drop:
 854	release_sock(sk);
 855	kfree_skb(skb);
 856	*errp = err;
 857	return newsk;
 858}
 859
 860static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 861{
 862	struct pep_sock *pn = pep_sk(sk);
 863	int err;
 864	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 865
 866	pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
 
 
 867	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
 868					PN_PIPE_ENABLE, data, 4);
 869	if (err) {
 870		pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 871		return err;
 872	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 873	sk->sk_state = TCP_SYN_SENT;
 
 874	return 0;
 875}
 876
 877static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 878{
 879	struct pep_sock *pn = pep_sk(sk);
 880	int answ;
 
 881
 882	switch (cmd) {
 883	case SIOCINQ:
 884		if (sk->sk_state == TCP_LISTEN)
 885			return -EINVAL;
 
 
 886
 887		lock_sock(sk);
 888		if (sock_flag(sk, SOCK_URGINLINE) &&
 889		    !skb_queue_empty(&pn->ctrlreq_queue))
 890			answ = skb_peek(&pn->ctrlreq_queue)->len;
 891		else if (!skb_queue_empty(&sk->sk_receive_queue))
 892			answ = skb_peek(&sk->sk_receive_queue)->len;
 893		else
 894			answ = 0;
 895		release_sock(sk);
 896		return put_user(answ, (int __user *)arg);
 
 
 
 
 
 
 
 
 
 
 
 
 897	}
 898
 899	return -ENOIOCTLCMD;
 900}
 901
 902static int pep_init(struct sock *sk)
 903{
 904	struct pep_sock *pn = pep_sk(sk);
 905
 906	sk->sk_destruct = pipe_destruct;
 907	INIT_HLIST_HEAD(&pn->hlist);
 908	pn->listener = NULL;
 909	skb_queue_head_init(&pn->ctrlreq_queue);
 910	atomic_set(&pn->tx_credits, 0);
 911	pn->ifindex = 0;
 912	pn->peer_type = 0;
 913	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 914	pn->rx_credits = 0;
 915	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 916	pn->init_enable = 1;
 917	pn->aligned = 0;
 918	return 0;
 919}
 920
 921static int pep_setsockopt(struct sock *sk, int level, int optname,
 922				char __user *optval, unsigned int optlen)
 923{
 924	struct pep_sock *pn = pep_sk(sk);
 925	int val = 0, err = 0;
 926
 927	if (level != SOL_PNPIPE)
 928		return -ENOPROTOOPT;
 929	if (optlen >= sizeof(int)) {
 930		if (get_user(val, (int __user *) optval))
 931			return -EFAULT;
 932	}
 933
 934	lock_sock(sk);
 935	switch (optname) {
 936	case PNPIPE_ENCAP:
 937		if (val && val != PNPIPE_ENCAP_IP) {
 938			err = -EINVAL;
 939			break;
 940		}
 941		if (!pn->ifindex == !val)
 942			break; /* Nothing to do! */
 943		if (!capable(CAP_NET_ADMIN)) {
 944			err = -EPERM;
 945			break;
 946		}
 947		if (val) {
 948			release_sock(sk);
 949			err = gprs_attach(sk);
 950			if (err > 0) {
 951				pn->ifindex = err;
 952				err = 0;
 953			}
 954		} else {
 955			pn->ifindex = 0;
 956			release_sock(sk);
 957			gprs_detach(sk);
 958			err = 0;
 959		}
 960		goto out_norel;
 961
 
 
 
 
 
 
 
 
 
 
 
 
 962	default:
 963		err = -ENOPROTOOPT;
 964	}
 965	release_sock(sk);
 966
 967out_norel:
 968	return err;
 969}
 970
 971static int pep_getsockopt(struct sock *sk, int level, int optname,
 972				char __user *optval, int __user *optlen)
 973{
 974	struct pep_sock *pn = pep_sk(sk);
 975	int len, val;
 976
 977	if (level != SOL_PNPIPE)
 978		return -ENOPROTOOPT;
 979	if (get_user(len, optlen))
 980		return -EFAULT;
 981
 982	switch (optname) {
 983	case PNPIPE_ENCAP:
 984		val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
 985		break;
 986
 987	case PNPIPE_IFINDEX:
 988		val = pn->ifindex;
 989		break;
 990
 991	case PNPIPE_HANDLE:
 992		val = pn->pipe_handle;
 993		if (val == PN_PIPE_INVALID_HANDLE)
 994			return -EINVAL;
 995		break;
 996
 
 
 
 
 997	default:
 998		return -ENOPROTOOPT;
 999	}
1000
1001	len = min_t(unsigned int, sizeof(int), len);
1002	if (put_user(len, optlen))
1003		return -EFAULT;
1004	if (put_user(val, (int __user *) optval))
1005		return -EFAULT;
1006	return 0;
1007}
1008
1009static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1010{
1011	struct pep_sock *pn = pep_sk(sk);
1012	struct pnpipehdr *ph;
1013	int err;
1014
1015	if (pn_flow_safe(pn->tx_fc) &&
1016	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
1017		kfree_skb(skb);
1018		return -ENOBUFS;
1019	}
1020
1021	skb_push(skb, 3 + pn->aligned);
1022	skb_reset_transport_header(skb);
1023	ph = pnp_hdr(skb);
1024	ph->utid = 0;
1025	if (pn->aligned) {
1026		ph->message_id = PNS_PIPE_ALIGNED_DATA;
1027		ph->data[0] = 0; /* padding */
1028	} else
1029		ph->message_id = PNS_PIPE_DATA;
1030	ph->pipe_handle = pn->pipe_handle;
1031	err = pn_skb_send(sk, skb, NULL);
1032
1033	if (err && pn_flow_safe(pn->tx_fc))
1034		atomic_inc(&pn->tx_credits);
1035	return err;
1036
1037}
1038
1039static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
1040			struct msghdr *msg, size_t len)
1041{
1042	struct pep_sock *pn = pep_sk(sk);
1043	struct sk_buff *skb;
1044	long timeo;
1045	int flags = msg->msg_flags;
1046	int err, done;
1047
 
 
 
1048	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1049				MSG_CMSG_COMPAT)) ||
1050			!(msg->msg_flags & MSG_EOR))
1051		return -EOPNOTSUPP;
1052
1053	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
1054					flags & MSG_DONTWAIT, &err);
1055	if (!skb)
1056		return err;
1057
1058	skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
1059	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1060	if (err < 0)
1061		goto outfree;
1062
1063	lock_sock(sk);
1064	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1065	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
1066		err = -ENOTCONN;
1067		goto out;
1068	}
1069	if (sk->sk_state != TCP_ESTABLISHED) {
1070		/* Wait until the pipe gets to enabled state */
1071disabled:
1072		err = sk_stream_wait_connect(sk, &timeo);
1073		if (err)
1074			goto out;
1075
1076		if (sk->sk_state == TCP_CLOSE_WAIT) {
1077			err = -ECONNRESET;
1078			goto out;
1079		}
1080	}
1081	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
1082
1083	/* Wait until flow control allows TX */
1084	done = atomic_read(&pn->tx_credits);
1085	while (!done) {
1086		DEFINE_WAIT(wait);
1087
1088		if (!timeo) {
1089			err = -EAGAIN;
1090			goto out;
1091		}
1092		if (signal_pending(current)) {
1093			err = sock_intr_errno(timeo);
1094			goto out;
1095		}
1096
1097		prepare_to_wait(sk_sleep(sk), &wait,
1098				TASK_INTERRUPTIBLE);
1099		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
1100		finish_wait(sk_sleep(sk), &wait);
1101
1102		if (sk->sk_state != TCP_ESTABLISHED)
1103			goto disabled;
1104	}
1105
1106	err = pipe_skb_send(sk, skb);
1107	if (err >= 0)
1108		err = len; /* success! */
1109	skb = NULL;
1110out:
1111	release_sock(sk);
1112outfree:
1113	kfree_skb(skb);
1114	return err;
1115}
1116
1117int pep_writeable(struct sock *sk)
1118{
1119	struct pep_sock *pn = pep_sk(sk);
1120
1121	return atomic_read(&pn->tx_credits);
1122}
1123
1124int pep_write(struct sock *sk, struct sk_buff *skb)
1125{
1126	struct sk_buff *rskb, *fs;
1127	int flen = 0;
1128
1129	if (pep_sk(sk)->aligned)
1130		return pipe_skb_send(sk, skb);
1131
1132	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
1133	if (!rskb) {
1134		kfree_skb(skb);
1135		return -ENOMEM;
1136	}
1137	skb_shinfo(rskb)->frag_list = skb;
1138	rskb->len += skb->len;
1139	rskb->data_len += rskb->len;
1140	rskb->truesize += rskb->len;
1141
1142	/* Avoid nested fragments */
1143	skb_walk_frags(skb, fs)
1144		flen += fs->len;
1145	skb->next = skb_shinfo(skb)->frag_list;
1146	skb_frag_list_init(skb);
1147	skb->len -= flen;
1148	skb->data_len -= flen;
1149	skb->truesize -= flen;
1150
1151	skb_reserve(rskb, MAX_PHONET_HEADER + 3);
1152	return pipe_skb_send(sk, rskb);
1153}
1154
1155struct sk_buff *pep_read(struct sock *sk)
1156{
1157	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1158
1159	if (sk->sk_state == TCP_ESTABLISHED)
1160		pipe_grant_credits(sk, GFP_ATOMIC);
1161	return skb;
1162}
1163
1164static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
1165			struct msghdr *msg, size_t len, int noblock,
1166			int flags, int *addr_len)
1167{
1168	struct sk_buff *skb;
1169	int err;
1170
1171	if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
1172			MSG_NOSIGNAL|MSG_CMSG_COMPAT))
1173		return -EOPNOTSUPP;
1174
1175	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
1176		return -ENOTCONN;
1177
1178	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
1179		/* Dequeue and acknowledge control request */
1180		struct pep_sock *pn = pep_sk(sk);
1181
1182		if (flags & MSG_PEEK)
1183			return -EOPNOTSUPP;
1184		skb = skb_dequeue(&pn->ctrlreq_queue);
1185		if (skb) {
1186			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
1187						GFP_KERNEL);
1188			msg->msg_flags |= MSG_OOB;
1189			goto copy;
1190		}
1191		if (flags & MSG_OOB)
1192			return -EINVAL;
1193	}
1194
1195	skb = skb_recv_datagram(sk, flags, noblock, &err);
1196	lock_sock(sk);
1197	if (skb == NULL) {
1198		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
1199			err = -ECONNRESET;
1200		release_sock(sk);
1201		return err;
1202	}
1203
1204	if (sk->sk_state == TCP_ESTABLISHED)
1205		pipe_grant_credits(sk, GFP_KERNEL);
1206	release_sock(sk);
1207copy:
1208	msg->msg_flags |= MSG_EOR;
1209	if (skb->len > len)
1210		msg->msg_flags |= MSG_TRUNC;
1211	else
1212		len = skb->len;
1213
1214	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
1215	if (!err)
1216		err = (flags & MSG_TRUNC) ? skb->len : len;
1217
1218	skb_free_datagram(sk, skb);
1219	return err;
1220}
1221
1222static void pep_sock_unhash(struct sock *sk)
1223{
1224	struct pep_sock *pn = pep_sk(sk);
1225	struct sock *skparent = NULL;
1226
1227	lock_sock(sk);
1228
1229	if (pn->listener != NULL) {
1230		skparent = pn->listener;
1231		pn->listener = NULL;
1232		release_sock(sk);
1233
1234		pn = pep_sk(skparent);
1235		lock_sock(skparent);
1236		sk_del_node_init(sk);
1237		sk = skparent;
1238	}
1239
1240	/* Unhash a listening sock only when it is closed
1241	 * and all of its active connected pipes are closed. */
1242	if (hlist_empty(&pn->hlist))
1243		pn_sock_unhash(&pn->pn_sk.sk);
1244	release_sock(sk);
1245
1246	if (skparent)
1247		sock_put(skparent);
1248}
1249
1250static struct proto pep_proto = {
1251	.close		= pep_sock_close,
1252	.accept		= pep_sock_accept,
1253	.connect	= pep_sock_connect,
1254	.ioctl		= pep_ioctl,
1255	.init		= pep_init,
1256	.setsockopt	= pep_setsockopt,
1257	.getsockopt	= pep_getsockopt,
1258	.sendmsg	= pep_sendmsg,
1259	.recvmsg	= pep_recvmsg,
1260	.backlog_rcv	= pep_do_rcv,
1261	.hash		= pn_sock_hash,
1262	.unhash		= pep_sock_unhash,
1263	.get_port	= pn_sock_get_port,
1264	.obj_size	= sizeof(struct pep_sock),
1265	.owner		= THIS_MODULE,
1266	.name		= "PNPIPE",
1267};
1268
1269static struct phonet_protocol pep_pn_proto = {
1270	.ops		= &phonet_stream_ops,
1271	.prot		= &pep_proto,
1272	.sock_type	= SOCK_SEQPACKET,
1273};
1274
1275static int __init pep_register(void)
1276{
1277	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1278}
1279
1280static void __exit pep_unregister(void)
1281{
1282	phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1283}
1284
1285module_init(pep_register);
1286module_exit(pep_unregister);
1287MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1288MODULE_DESCRIPTION("Phonet pipe protocol");
1289MODULE_LICENSE("GPL");
1290MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);