Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * File: pep.c
   4 *
   5 * Phonet pipe protocol end point socket
   6 *
   7 * Copyright (C) 2008 Nokia Corporation.
   8 *
   9 * Author: RĂ©mi Denis-Courmont
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/sched/signal.h>
  14#include <linux/slab.h>
  15#include <linux/socket.h>
  16#include <net/sock.h>
  17#include <net/tcp_states.h>
  18#include <asm/ioctls.h>
  19
  20#include <linux/phonet.h>
  21#include <linux/module.h>
  22#include <net/phonet/phonet.h>
  23#include <net/phonet/pep.h>
  24#include <net/phonet/gprs.h>
  25
  26/* sk_state values:
  27 * TCP_CLOSE		sock not in use yet
  28 * TCP_CLOSE_WAIT	disconnected pipe
  29 * TCP_LISTEN		listening pipe endpoint
  30 * TCP_SYN_RECV		connected pipe in disabled state
  31 * TCP_ESTABLISHED	connected pipe in enabled state
  32 *
  33 * pep_sock locking:
  34 *  - sk_state, hlist: sock lock needed
  35 *  - listener: read only
  36 *  - pipe_handle: read only
  37 */
  38
  39#define CREDITS_MAX	10
  40#define CREDITS_THR	7
  41
  42#define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  43
  44/* Get the next TLV sub-block. */
  45static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  46					void *buf)
  47{
  48	void *data = NULL;
  49	struct {
  50		u8 sb_type;
  51		u8 sb_len;
  52	} *ph, h;
  53	int buflen = *plen;
  54
  55	ph = skb_header_pointer(skb, 0, 2, &h);
  56	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  57		return NULL;
  58	ph->sb_len -= 2;
  59	*ptype = ph->sb_type;
  60	*plen = ph->sb_len;
  61
  62	if (buflen > ph->sb_len)
  63		buflen = ph->sb_len;
  64	data = skb_header_pointer(skb, 2, buflen, buf);
  65	__skb_pull(skb, 2 + ph->sb_len);
  66	return data;
  67}
  68
  69static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
  70					int len, gfp_t priority)
  71{
  72	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  73	if (!skb)
  74		return NULL;
  75	skb_set_owner_w(skb, sk);
  76
  77	skb_reserve(skb, MAX_PNPIPE_HEADER);
  78	__skb_put(skb, len);
  79	skb_copy_to_linear_data(skb, payload, len);
  80	__skb_push(skb, sizeof(struct pnpipehdr));
  81	skb_reset_transport_header(skb);
  82	return skb;
  83}
  84
  85static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
  86			const void *data, int len, gfp_t priority)
  87{
  88	const struct pnpipehdr *oph = pnp_hdr(oskb);
  89	struct pnpipehdr *ph;
  90	struct sk_buff *skb;
  91	struct sockaddr_pn peer;
  92
  93	skb = pep_alloc_skb(sk, data, len, priority);
  94	if (!skb)
  95		return -ENOMEM;
  96
  97	ph = pnp_hdr(skb);
  98	ph->utid = oph->utid;
  99	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
 100	ph->pipe_handle = oph->pipe_handle;
 101	ph->error_code = code;
 102
 103	pn_skb_get_src_sockaddr(oskb, &peer);
 104	return pn_skb_send(sk, skb, &peer);
 105}
 106
 107static int pep_indicate(struct sock *sk, u8 id, u8 code,
 108			const void *data, int len, gfp_t priority)
 109{
 110	struct pep_sock *pn = pep_sk(sk);
 111	struct pnpipehdr *ph;
 112	struct sk_buff *skb;
 113
 114	skb = pep_alloc_skb(sk, data, len, priority);
 115	if (!skb)
 116		return -ENOMEM;
 117
 118	ph = pnp_hdr(skb);
 119	ph->utid = 0;
 120	ph->message_id = id;
 121	ph->pipe_handle = pn->pipe_handle;
 122	ph->error_code = code;
 123	return pn_skb_send(sk, skb, NULL);
 124}
 125
 126#define PAD 0x00
 127
 128static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
 129				const void *data, int len)
 130{
 131	struct pep_sock *pn = pep_sk(sk);
 132	struct pnpipehdr *ph;
 133	struct sk_buff *skb;
 134
 135	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
 136	if (!skb)
 137		return -ENOMEM;
 138
 139	ph = pnp_hdr(skb);
 140	ph->utid = id; /* whatever */
 141	ph->message_id = id;
 142	ph->pipe_handle = pn->pipe_handle;
 143	ph->error_code = code;
 144	return pn_skb_send(sk, skb, NULL);
 145}
 146
 147static int pipe_handler_send_created_ind(struct sock *sk)
 148{
 149	struct pep_sock *pn = pep_sk(sk);
 150	u8 data[4] = {
 151		PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
 152		pn->tx_fc, pn->rx_fc,
 153	};
 154
 155	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
 156				data, 4, GFP_ATOMIC);
 157}
 158
 159static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 160{
 161	static const u8 data[20] = {
 162		PAD, PAD, PAD, 2 /* sub-blocks */,
 163		PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
 164			PN_MULTI_CREDIT_FLOW_CONTROL,
 165			PN_ONE_CREDIT_FLOW_CONTROL,
 166			PN_LEGACY_FLOW_CONTROL,
 167			PAD,
 168		PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
 169			PN_MULTI_CREDIT_FLOW_CONTROL,
 170			PN_ONE_CREDIT_FLOW_CONTROL,
 171			PN_LEGACY_FLOW_CONTROL,
 172			PAD,
 173	};
 174
 175	might_sleep();
 176	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
 177				GFP_KERNEL);
 178}
 179
 180static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
 181				gfp_t priority)
 182{
 183	static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
 184	WARN_ON(code == PN_PIPE_NO_ERROR);
 185	return pep_reply(sk, skb, code, data, sizeof(data), priority);
 186}
 187
 188/* Control requests are not sent by the pipe service and have a specific
 189 * message format. */
 190static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 191				gfp_t priority)
 192{
 193	const struct pnpipehdr *oph = pnp_hdr(oskb);
 194	struct sk_buff *skb;
 195	struct pnpipehdr *ph;
 196	struct sockaddr_pn dst;
 197	u8 data[4] = {
 198		oph->pep_type, /* PEP type */
 199		code, /* error code, at an unusual offset */
 200		PAD, PAD,
 201	};
 202
 203	skb = pep_alloc_skb(sk, data, 4, priority);
 204	if (!skb)
 205		return -ENOMEM;
 206
 207	ph = pnp_hdr(skb);
 208	ph->utid = oph->utid;
 209	ph->message_id = PNS_PEP_CTRL_RESP;
 210	ph->pipe_handle = oph->pipe_handle;
 211	ph->data0 = oph->data[0]; /* CTRL id */
 212
 213	pn_skb_get_src_sockaddr(oskb, &dst);
 214	return pn_skb_send(sk, skb, &dst);
 215}
 216
 217static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 218{
 219	u8 data[4] = { type, PAD, PAD, status };
 220
 221	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
 222				data, 4, priority);
 223}
 224
 225/* Send our RX flow control information to the sender.
 226 * Socket must be locked. */
 227static void pipe_grant_credits(struct sock *sk, gfp_t priority)
 228{
 229	struct pep_sock *pn = pep_sk(sk);
 230
 231	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
 232
 233	switch (pn->rx_fc) {
 234	case PN_LEGACY_FLOW_CONTROL: /* TODO */
 235		break;
 236	case PN_ONE_CREDIT_FLOW_CONTROL:
 237		if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
 238					PEP_IND_READY, priority) == 0)
 239			pn->rx_credits = 1;
 240		break;
 241	case PN_MULTI_CREDIT_FLOW_CONTROL:
 242		if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
 243			break;
 244		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
 245					CREDITS_MAX - pn->rx_credits,
 246					priority) == 0)
 247			pn->rx_credits = CREDITS_MAX;
 248		break;
 249	}
 250}
 251
 252static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 253{
 254	struct pep_sock *pn = pep_sk(sk);
 255	struct pnpipehdr *hdr;
 256	int wake = 0;
 257
 258	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 259		return -EINVAL;
 260
 261	hdr = pnp_hdr(skb);
 262	if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
 263		net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
 264				    (unsigned int)hdr->pep_type);
 265		return -EOPNOTSUPP;
 266	}
 267
 268	switch (hdr->data[0]) {
 269	case PN_PEP_IND_FLOW_CONTROL:
 270		switch (pn->tx_fc) {
 271		case PN_LEGACY_FLOW_CONTROL:
 272			switch (hdr->data[3]) {
 273			case PEP_IND_BUSY:
 274				atomic_set(&pn->tx_credits, 0);
 275				break;
 276			case PEP_IND_READY:
 277				atomic_set(&pn->tx_credits, wake = 1);
 278				break;
 279			}
 280			break;
 281		case PN_ONE_CREDIT_FLOW_CONTROL:
 282			if (hdr->data[3] == PEP_IND_READY)
 283				atomic_set(&pn->tx_credits, wake = 1);
 284			break;
 285		}
 286		break;
 287
 288	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
 289		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
 290			break;
 291		atomic_add(wake = hdr->data[3], &pn->tx_credits);
 292		break;
 293
 294	default:
 295		net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
 296				    (unsigned int)hdr->data[0]);
 297		return -EOPNOTSUPP;
 298	}
 299	if (wake)
 300		sk->sk_write_space(sk);
 301	return 0;
 302}
 303
 304static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
 305{
 306	struct pep_sock *pn = pep_sk(sk);
 307	struct pnpipehdr *hdr = pnp_hdr(skb);
 308	u8 n_sb = hdr->data0;
 309
 310	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 311	__skb_pull(skb, sizeof(*hdr));
 312	while (n_sb > 0) {
 313		u8 type, buf[2], len = sizeof(buf);
 314		u8 *data = pep_get_sb(skb, &type, &len, buf);
 315
 316		if (data == NULL)
 317			return -EINVAL;
 318		switch (type) {
 319		case PN_PIPE_SB_NEGOTIATED_FC:
 320			if (len < 2 || (data[0] | data[1]) > 3)
 321				break;
 322			pn->tx_fc = data[0] & 3;
 323			pn->rx_fc = data[1] & 3;
 324			break;
 325		}
 326		n_sb--;
 327	}
 328	return 0;
 329}
 330
 331/* Queue an skb to a connected sock.
 332 * Socket lock must be held. */
 333static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 334{
 335	struct pep_sock *pn = pep_sk(sk);
 336	struct pnpipehdr *hdr = pnp_hdr(skb);
 337	struct sk_buff_head *queue;
 338	int err = 0;
 339
 340	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
 341
 342	switch (hdr->message_id) {
 343	case PNS_PEP_CONNECT_REQ:
 344		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
 345		break;
 346
 347	case PNS_PEP_DISCONNECT_REQ:
 348		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 349		sk->sk_state = TCP_CLOSE_WAIT;
 350		if (!sock_flag(sk, SOCK_DEAD))
 351			sk->sk_state_change(sk);
 352		break;
 353
 354	case PNS_PEP_ENABLE_REQ:
 355		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
 356		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 357		break;
 358
 359	case PNS_PEP_RESET_REQ:
 360		switch (hdr->state_after_reset) {
 361		case PN_PIPE_DISABLE:
 362			pn->init_enable = 0;
 363			break;
 364		case PN_PIPE_ENABLE:
 365			pn->init_enable = 1;
 366			break;
 367		default: /* not allowed to send an error here!? */
 368			err = -EINVAL;
 369			goto out;
 370		}
 371		/* fall through */
 372	case PNS_PEP_DISABLE_REQ:
 373		atomic_set(&pn->tx_credits, 0);
 374		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 375		break;
 376
 377	case PNS_PEP_CTRL_REQ:
 378		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
 379			atomic_inc(&sk->sk_drops);
 380			break;
 381		}
 382		__skb_pull(skb, 4);
 383		queue = &pn->ctrlreq_queue;
 384		goto queue;
 385
 386	case PNS_PIPE_ALIGNED_DATA:
 387		__skb_pull(skb, 1);
 388		/* fall through */
 389	case PNS_PIPE_DATA:
 390		__skb_pull(skb, 3); /* Pipe data header */
 391		if (!pn_flow_safe(pn->rx_fc)) {
 392			err = sock_queue_rcv_skb(sk, skb);
 393			if (!err)
 394				return NET_RX_SUCCESS;
 395			err = -ENOBUFS;
 396			break;
 397		}
 398
 399		if (pn->rx_credits == 0) {
 400			atomic_inc(&sk->sk_drops);
 401			err = -ENOBUFS;
 402			break;
 403		}
 404		pn->rx_credits--;
 405		queue = &sk->sk_receive_queue;
 406		goto queue;
 407
 408	case PNS_PEP_STATUS_IND:
 409		pipe_rcv_status(sk, skb);
 410		break;
 411
 412	case PNS_PIPE_REDIRECTED_IND:
 413		err = pipe_rcv_created(sk, skb);
 414		break;
 415
 416	case PNS_PIPE_CREATED_IND:
 417		err = pipe_rcv_created(sk, skb);
 418		if (err)
 419			break;
 420		/* fall through */
 421	case PNS_PIPE_RESET_IND:
 422		if (!pn->init_enable)
 423			break;
 424		/* fall through */
 425	case PNS_PIPE_ENABLED_IND:
 426		if (!pn_flow_safe(pn->tx_fc)) {
 427			atomic_set(&pn->tx_credits, 1);
 428			sk->sk_write_space(sk);
 429		}
 430		if (sk->sk_state == TCP_ESTABLISHED)
 431			break; /* Nothing to do */
 432		sk->sk_state = TCP_ESTABLISHED;
 433		pipe_grant_credits(sk, GFP_ATOMIC);
 434		break;
 435
 436	case PNS_PIPE_DISABLED_IND:
 437		sk->sk_state = TCP_SYN_RECV;
 438		pn->rx_credits = 0;
 439		break;
 440
 441	default:
 442		net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
 443				    hdr->message_id);
 444		err = -EINVAL;
 445	}
 446out:
 447	kfree_skb(skb);
 448	return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
 449
 450queue:
 451	skb->dev = NULL;
 452	skb_set_owner_r(skb, sk);
 453	skb_queue_tail(queue, skb);
 454	if (!sock_flag(sk, SOCK_DEAD))
 455		sk->sk_data_ready(sk);
 456	return NET_RX_SUCCESS;
 457}
 458
 459/* Destroy connected sock. */
 460static void pipe_destruct(struct sock *sk)
 461{
 462	struct pep_sock *pn = pep_sk(sk);
 463
 464	skb_queue_purge(&sk->sk_receive_queue);
 465	skb_queue_purge(&pn->ctrlreq_queue);
 466}
 467
 468static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
 469{
 470	unsigned int i;
 471	u8 final_fc = PN_NO_FLOW_CONTROL;
 472
 473	for (i = 0; i < n; i++) {
 474		u8 fc = fcs[i];
 475
 476		if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
 477			final_fc = fc;
 478	}
 479	return final_fc;
 480}
 481
 482static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 483{
 484	struct pep_sock *pn = pep_sk(sk);
 485	struct pnpipehdr *hdr;
 486	u8 n_sb;
 487
 488	if (!pskb_pull(skb, sizeof(*hdr) + 4))
 489		return -EINVAL;
 490
 491	hdr = pnp_hdr(skb);
 492	if (hdr->error_code != PN_PIPE_NO_ERROR)
 493		return -ECONNREFUSED;
 494
 495	/* Parse sub-blocks */
 496	n_sb = hdr->data[3];
 497	while (n_sb > 0) {
 498		u8 type, buf[6], len = sizeof(buf);
 499		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 500
 501		if (data == NULL)
 502			return -EINVAL;
 503
 504		switch (type) {
 505		case PN_PIPE_SB_REQUIRED_FC_TX:
 506			if (len < 2 || len < data[0])
 507				break;
 508			pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
 509			break;
 510
 511		case PN_PIPE_SB_PREFERRED_FC_RX:
 512			if (len < 2 || len < data[0])
 513				break;
 514			pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
 515			break;
 516
 517		}
 518		n_sb--;
 519	}
 520
 521	return pipe_handler_send_created_ind(sk);
 522}
 523
 524static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
 525{
 526	struct pnpipehdr *hdr = pnp_hdr(skb);
 527
 528	if (hdr->error_code != PN_PIPE_NO_ERROR)
 529		return -ECONNREFUSED;
 530
 531	return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
 532		NULL, 0, GFP_ATOMIC);
 533
 534}
 535
 536static void pipe_start_flow_control(struct sock *sk)
 537{
 538	struct pep_sock *pn = pep_sk(sk);
 539
 540	if (!pn_flow_safe(pn->tx_fc)) {
 541		atomic_set(&pn->tx_credits, 1);
 542		sk->sk_write_space(sk);
 543	}
 544	pipe_grant_credits(sk, GFP_ATOMIC);
 545}
 546
 547/* Queue an skb to an actively connected sock.
 548 * Socket lock must be held. */
 549static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 550{
 551	struct pep_sock *pn = pep_sk(sk);
 552	struct pnpipehdr *hdr = pnp_hdr(skb);
 553	int err = NET_RX_SUCCESS;
 554
 555	switch (hdr->message_id) {
 556	case PNS_PIPE_ALIGNED_DATA:
 557		__skb_pull(skb, 1);
 558		/* fall through */
 559	case PNS_PIPE_DATA:
 560		__skb_pull(skb, 3); /* Pipe data header */
 561		if (!pn_flow_safe(pn->rx_fc)) {
 562			err = sock_queue_rcv_skb(sk, skb);
 563			if (!err)
 564				return NET_RX_SUCCESS;
 565			err = NET_RX_DROP;
 566			break;
 567		}
 568
 569		if (pn->rx_credits == 0) {
 570			atomic_inc(&sk->sk_drops);
 571			err = NET_RX_DROP;
 572			break;
 573		}
 574		pn->rx_credits--;
 575		skb->dev = NULL;
 576		skb_set_owner_r(skb, sk);
 577		skb_queue_tail(&sk->sk_receive_queue, skb);
 578		if (!sock_flag(sk, SOCK_DEAD))
 579			sk->sk_data_ready(sk);
 580		return NET_RX_SUCCESS;
 581
 582	case PNS_PEP_CONNECT_RESP:
 583		if (sk->sk_state != TCP_SYN_SENT)
 584			break;
 585		if (!sock_flag(sk, SOCK_DEAD))
 586			sk->sk_state_change(sk);
 587		if (pep_connresp_rcv(sk, skb)) {
 588			sk->sk_state = TCP_CLOSE_WAIT;
 589			break;
 590		}
 591		if (pn->init_enable == PN_PIPE_DISABLE)
 592			sk->sk_state = TCP_SYN_RECV;
 593		else {
 594			sk->sk_state = TCP_ESTABLISHED;
 595			pipe_start_flow_control(sk);
 596		}
 597		break;
 598
 599	case PNS_PEP_ENABLE_RESP:
 600		if (sk->sk_state != TCP_SYN_SENT)
 601			break;
 602
 603		if (pep_enableresp_rcv(sk, skb)) {
 604			sk->sk_state = TCP_CLOSE_WAIT;
 605			break;
 606		}
 607
 608		sk->sk_state = TCP_ESTABLISHED;
 609		pipe_start_flow_control(sk);
 610		break;
 611
 612	case PNS_PEP_DISCONNECT_RESP:
 613		/* sock should already be dead, nothing to do */
 614		break;
 615
 616	case PNS_PEP_STATUS_IND:
 617		pipe_rcv_status(sk, skb);
 618		break;
 619	}
 620	kfree_skb(skb);
 621	return err;
 622}
 623
 624/* Listening sock must be locked */
 625static struct sock *pep_find_pipe(const struct hlist_head *hlist,
 626					const struct sockaddr_pn *dst,
 627					u8 pipe_handle)
 628{
 629	struct sock *sknode;
 630	u16 dobj = pn_sockaddr_get_object(dst);
 631
 632	sk_for_each(sknode, hlist) {
 633		struct pep_sock *pnnode = pep_sk(sknode);
 634
 635		/* Ports match, but addresses might not: */
 636		if (pnnode->pn_sk.sobject != dobj)
 637			continue;
 638		if (pnnode->pipe_handle != pipe_handle)
 639			continue;
 640		if (sknode->sk_state == TCP_CLOSE_WAIT)
 641			continue;
 642
 643		sock_hold(sknode);
 644		return sknode;
 645	}
 646	return NULL;
 647}
 648
 649/*
 650 * Deliver an skb to a listening sock.
 651 * Socket lock must be held.
 652 * We then queue the skb to the right connected sock (if any).
 653 */
 654static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 655{
 656	struct pep_sock *pn = pep_sk(sk);
 657	struct sock *sknode;
 658	struct pnpipehdr *hdr;
 659	struct sockaddr_pn dst;
 660	u8 pipe_handle;
 661
 662	if (!pskb_may_pull(skb, sizeof(*hdr)))
 663		goto drop;
 664
 665	hdr = pnp_hdr(skb);
 666	pipe_handle = hdr->pipe_handle;
 667	if (pipe_handle == PN_PIPE_INVALID_HANDLE)
 668		goto drop;
 669
 670	pn_skb_get_dst_sockaddr(skb, &dst);
 671
 672	/* Look for an existing pipe handle */
 673	sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 674	if (sknode)
 675		return sk_receive_skb(sknode, skb, 1);
 676
 677	switch (hdr->message_id) {
 678	case PNS_PEP_CONNECT_REQ:
 679		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
 680			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
 681					GFP_ATOMIC);
 682			break;
 683		}
 684		skb_queue_head(&sk->sk_receive_queue, skb);
 685		sk_acceptq_added(sk);
 686		if (!sock_flag(sk, SOCK_DEAD))
 687			sk->sk_data_ready(sk);
 688		return NET_RX_SUCCESS;
 689
 690	case PNS_PEP_DISCONNECT_REQ:
 691		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 692		break;
 693
 694	case PNS_PEP_CTRL_REQ:
 695		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
 696		break;
 697
 698	case PNS_PEP_RESET_REQ:
 699	case PNS_PEP_ENABLE_REQ:
 700	case PNS_PEP_DISABLE_REQ:
 701		/* invalid handle is not even allowed here! */
 702		break;
 703
 704	default:
 705		if ((1 << sk->sk_state)
 706				& ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
 707			/* actively connected socket */
 708			return pipe_handler_do_rcv(sk, skb);
 709	}
 710drop:
 711	kfree_skb(skb);
 712	return NET_RX_SUCCESS;
 713}
 714
 715static int pipe_do_remove(struct sock *sk)
 716{
 717	struct pep_sock *pn = pep_sk(sk);
 718	struct pnpipehdr *ph;
 719	struct sk_buff *skb;
 720
 721	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
 722	if (!skb)
 723		return -ENOMEM;
 724
 725	ph = pnp_hdr(skb);
 726	ph->utid = 0;
 727	ph->message_id = PNS_PIPE_REMOVE_REQ;
 728	ph->pipe_handle = pn->pipe_handle;
 729	ph->data0 = PAD;
 730	return pn_skb_send(sk, skb, NULL);
 731}
 732
 733/* associated socket ceases to exist */
 734static void pep_sock_close(struct sock *sk, long timeout)
 735{
 736	struct pep_sock *pn = pep_sk(sk);
 737	int ifindex = 0;
 738
 739	sock_hold(sk); /* keep a reference after sk_common_release() */
 740	sk_common_release(sk);
 741
 742	lock_sock(sk);
 743	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
 744		if (sk->sk_backlog_rcv == pipe_do_rcv)
 745			/* Forcefully remove dangling Phonet pipe */
 746			pipe_do_remove(sk);
 747		else
 748			pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
 749						NULL, 0);
 750	}
 751	sk->sk_state = TCP_CLOSE;
 752
 753	ifindex = pn->ifindex;
 754	pn->ifindex = 0;
 755	release_sock(sk);
 756
 757	if (ifindex)
 758		gprs_detach(sk);
 759	sock_put(sk);
 760}
 761
 762static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
 763				    bool kern)
 764{
 765	struct pep_sock *pn = pep_sk(sk), *newpn;
 766	struct sock *newsk = NULL;
 767	struct sk_buff *skb;
 768	struct pnpipehdr *hdr;
 769	struct sockaddr_pn dst, src;
 770	int err;
 771	u16 peer_type;
 772	u8 pipe_handle, enabled, n_sb;
 773	u8 aligned = 0;
 774
 775	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
 
 776	if (!skb)
 777		return NULL;
 778
 779	lock_sock(sk);
 780	if (sk->sk_state != TCP_LISTEN) {
 781		err = -EINVAL;
 782		goto drop;
 783	}
 784	sk_acceptq_removed(sk);
 785
 786	err = -EPROTO;
 787	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 788		goto drop;
 789
 790	hdr = pnp_hdr(skb);
 791	pipe_handle = hdr->pipe_handle;
 792	switch (hdr->state_after_connect) {
 793	case PN_PIPE_DISABLE:
 794		enabled = 0;
 795		break;
 796	case PN_PIPE_ENABLE:
 797		enabled = 1;
 798		break;
 799	default:
 800		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
 801				GFP_KERNEL);
 802		goto drop;
 803	}
 804	peer_type = hdr->other_pep_type << 8;
 805
 806	/* Parse sub-blocks (options) */
 807	n_sb = hdr->data[3];
 808	while (n_sb > 0) {
 809		u8 type, buf[1], len = sizeof(buf);
 810		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 811
 812		if (data == NULL)
 813			goto drop;
 814		switch (type) {
 815		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
 816			if (len < 1)
 817				goto drop;
 818			peer_type = (peer_type & 0xff00) | data[0];
 819			break;
 820		case PN_PIPE_SB_ALIGNED_DATA:
 821			aligned = data[0] != 0;
 822			break;
 823		}
 824		n_sb--;
 825	}
 826
 827	/* Check for duplicate pipe handle */
 828	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 829	if (unlikely(newsk)) {
 830		__sock_put(newsk);
 831		newsk = NULL;
 832		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
 833		goto drop;
 834	}
 835
 836	/* Create a new to-be-accepted sock */
 837	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
 838			 kern);
 839	if (!newsk) {
 840		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
 841		err = -ENOBUFS;
 842		goto drop;
 843	}
 844
 845	sock_init_data(NULL, newsk);
 846	newsk->sk_state = TCP_SYN_RECV;
 847	newsk->sk_backlog_rcv = pipe_do_rcv;
 848	newsk->sk_protocol = sk->sk_protocol;
 849	newsk->sk_destruct = pipe_destruct;
 850
 851	newpn = pep_sk(newsk);
 852	pn_skb_get_dst_sockaddr(skb, &dst);
 853	pn_skb_get_src_sockaddr(skb, &src);
 854	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
 855	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
 856	newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
 857	sock_hold(sk);
 858	newpn->listener = sk;
 859	skb_queue_head_init(&newpn->ctrlreq_queue);
 860	newpn->pipe_handle = pipe_handle;
 861	atomic_set(&newpn->tx_credits, 0);
 862	newpn->ifindex = 0;
 863	newpn->peer_type = peer_type;
 864	newpn->rx_credits = 0;
 865	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 866	newpn->init_enable = enabled;
 867	newpn->aligned = aligned;
 868
 869	err = pep_accept_conn(newsk, skb);
 870	if (err) {
 
 871		sock_put(newsk);
 872		newsk = NULL;
 873		goto drop;
 874	}
 875	sk_add_node(newsk, &pn->hlist);
 876drop:
 877	release_sock(sk);
 878	kfree_skb(skb);
 879	*errp = err;
 880	return newsk;
 881}
 882
 883static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 884{
 885	struct pep_sock *pn = pep_sk(sk);
 886	int err;
 887	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 888
 889	if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
 890		pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
 891
 892	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
 893				pn->init_enable, data, 4);
 894	if (err) {
 895		pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 896		return err;
 897	}
 898
 899	sk->sk_state = TCP_SYN_SENT;
 900
 901	return 0;
 902}
 903
 904static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
 905{
 906	int err;
 907
 908	err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
 909				NULL, 0);
 910	if (err)
 911		return err;
 912
 913	sk->sk_state = TCP_SYN_SENT;
 914
 915	return 0;
 916}
 917
 918static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 919{
 920	struct pep_sock *pn = pep_sk(sk);
 921	int answ;
 922	int ret = -ENOIOCTLCMD;
 923
 924	switch (cmd) {
 925	case SIOCINQ:
 926		if (sk->sk_state == TCP_LISTEN) {
 927			ret = -EINVAL;
 928			break;
 929		}
 930
 931		lock_sock(sk);
 932		if (sock_flag(sk, SOCK_URGINLINE) &&
 933		    !skb_queue_empty(&pn->ctrlreq_queue))
 934			answ = skb_peek(&pn->ctrlreq_queue)->len;
 935		else if (!skb_queue_empty(&sk->sk_receive_queue))
 936			answ = skb_peek(&sk->sk_receive_queue)->len;
 937		else
 938			answ = 0;
 939		release_sock(sk);
 940		ret = put_user(answ, (int __user *)arg);
 941		break;
 942
 943	case SIOCPNENABLEPIPE:
 944		lock_sock(sk);
 945		if (sk->sk_state == TCP_SYN_SENT)
 946			ret =  -EBUSY;
 947		else if (sk->sk_state == TCP_ESTABLISHED)
 948			ret = -EISCONN;
 
 
 949		else
 950			ret = pep_sock_enable(sk, NULL, 0);
 951		release_sock(sk);
 952		break;
 953	}
 954
 955	return ret;
 956}
 957
 958static int pep_init(struct sock *sk)
 959{
 960	struct pep_sock *pn = pep_sk(sk);
 961
 962	sk->sk_destruct = pipe_destruct;
 963	INIT_HLIST_HEAD(&pn->hlist);
 964	pn->listener = NULL;
 965	skb_queue_head_init(&pn->ctrlreq_queue);
 966	atomic_set(&pn->tx_credits, 0);
 967	pn->ifindex = 0;
 968	pn->peer_type = 0;
 969	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 970	pn->rx_credits = 0;
 971	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 972	pn->init_enable = 1;
 973	pn->aligned = 0;
 974	return 0;
 975}
 976
 977static int pep_setsockopt(struct sock *sk, int level, int optname,
 978				char __user *optval, unsigned int optlen)
 979{
 980	struct pep_sock *pn = pep_sk(sk);
 981	int val = 0, err = 0;
 982
 983	if (level != SOL_PNPIPE)
 984		return -ENOPROTOOPT;
 985	if (optlen >= sizeof(int)) {
 986		if (get_user(val, (int __user *) optval))
 987			return -EFAULT;
 988	}
 989
 990	lock_sock(sk);
 991	switch (optname) {
 992	case PNPIPE_ENCAP:
 993		if (val && val != PNPIPE_ENCAP_IP) {
 994			err = -EINVAL;
 995			break;
 996		}
 997		if (!pn->ifindex == !val)
 998			break; /* Nothing to do! */
 999		if (!capable(CAP_NET_ADMIN)) {
1000			err = -EPERM;
1001			break;
1002		}
1003		if (val) {
1004			release_sock(sk);
1005			err = gprs_attach(sk);
1006			if (err > 0) {
1007				pn->ifindex = err;
1008				err = 0;
1009			}
1010		} else {
1011			pn->ifindex = 0;
1012			release_sock(sk);
1013			gprs_detach(sk);
1014			err = 0;
1015		}
1016		goto out_norel;
1017
1018	case PNPIPE_HANDLE:
1019		if ((sk->sk_state == TCP_CLOSE) &&
1020			(val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
1021			pn->pipe_handle = val;
1022		else
1023			err = -EINVAL;
1024		break;
1025
1026	case PNPIPE_INITSTATE:
1027		pn->init_enable = !!val;
1028		break;
1029
1030	default:
1031		err = -ENOPROTOOPT;
1032	}
1033	release_sock(sk);
1034
1035out_norel:
1036	return err;
1037}
1038
1039static int pep_getsockopt(struct sock *sk, int level, int optname,
1040				char __user *optval, int __user *optlen)
1041{
1042	struct pep_sock *pn = pep_sk(sk);
1043	int len, val;
1044
1045	if (level != SOL_PNPIPE)
1046		return -ENOPROTOOPT;
1047	if (get_user(len, optlen))
1048		return -EFAULT;
1049
1050	switch (optname) {
1051	case PNPIPE_ENCAP:
1052		val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
1053		break;
1054
1055	case PNPIPE_IFINDEX:
1056		val = pn->ifindex;
1057		break;
1058
1059	case PNPIPE_HANDLE:
1060		val = pn->pipe_handle;
1061		if (val == PN_PIPE_INVALID_HANDLE)
1062			return -EINVAL;
1063		break;
1064
1065	case PNPIPE_INITSTATE:
1066		val = pn->init_enable;
1067		break;
1068
1069	default:
1070		return -ENOPROTOOPT;
1071	}
1072
1073	len = min_t(unsigned int, sizeof(int), len);
1074	if (put_user(len, optlen))
1075		return -EFAULT;
1076	if (put_user(val, (int __user *) optval))
1077		return -EFAULT;
1078	return 0;
1079}
1080
1081static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1082{
1083	struct pep_sock *pn = pep_sk(sk);
1084	struct pnpipehdr *ph;
1085	int err;
1086
1087	if (pn_flow_safe(pn->tx_fc) &&
1088	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
1089		kfree_skb(skb);
1090		return -ENOBUFS;
1091	}
1092
1093	skb_push(skb, 3 + pn->aligned);
1094	skb_reset_transport_header(skb);
1095	ph = pnp_hdr(skb);
1096	ph->utid = 0;
1097	if (pn->aligned) {
1098		ph->message_id = PNS_PIPE_ALIGNED_DATA;
1099		ph->data0 = 0; /* padding */
1100	} else
1101		ph->message_id = PNS_PIPE_DATA;
1102	ph->pipe_handle = pn->pipe_handle;
1103	err = pn_skb_send(sk, skb, NULL);
1104
1105	if (err && pn_flow_safe(pn->tx_fc))
1106		atomic_inc(&pn->tx_credits);
1107	return err;
1108
1109}
1110
1111static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1112{
1113	struct pep_sock *pn = pep_sk(sk);
1114	struct sk_buff *skb;
1115	long timeo;
1116	int flags = msg->msg_flags;
1117	int err, done;
1118
1119	if (len > USHRT_MAX)
1120		return -EMSGSIZE;
1121
1122	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1123				MSG_CMSG_COMPAT)) ||
1124			!(msg->msg_flags & MSG_EOR))
1125		return -EOPNOTSUPP;
1126
1127	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
1128					flags & MSG_DONTWAIT, &err);
1129	if (!skb)
1130		return err;
1131
1132	skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
1133	err = memcpy_from_msg(skb_put(skb, len), msg, len);
1134	if (err < 0)
1135		goto outfree;
1136
1137	lock_sock(sk);
1138	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1139	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
1140		err = -ENOTCONN;
1141		goto out;
1142	}
1143	if (sk->sk_state != TCP_ESTABLISHED) {
1144		/* Wait until the pipe gets to enabled state */
1145disabled:
1146		err = sk_stream_wait_connect(sk, &timeo);
1147		if (err)
1148			goto out;
1149
1150		if (sk->sk_state == TCP_CLOSE_WAIT) {
1151			err = -ECONNRESET;
1152			goto out;
1153		}
1154	}
1155	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
1156
1157	/* Wait until flow control allows TX */
1158	done = atomic_read(&pn->tx_credits);
1159	while (!done) {
1160		DEFINE_WAIT_FUNC(wait, woken_wake_function);
1161
1162		if (!timeo) {
1163			err = -EAGAIN;
1164			goto out;
1165		}
1166		if (signal_pending(current)) {
1167			err = sock_intr_errno(timeo);
1168			goto out;
1169		}
1170
1171		add_wait_queue(sk_sleep(sk), &wait);
1172		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait);
1173		remove_wait_queue(sk_sleep(sk), &wait);
1174
1175		if (sk->sk_state != TCP_ESTABLISHED)
1176			goto disabled;
1177	}
1178
1179	err = pipe_skb_send(sk, skb);
1180	if (err >= 0)
1181		err = len; /* success! */
1182	skb = NULL;
1183out:
1184	release_sock(sk);
1185outfree:
1186	kfree_skb(skb);
1187	return err;
1188}
1189
1190int pep_writeable(struct sock *sk)
1191{
1192	struct pep_sock *pn = pep_sk(sk);
1193
1194	return atomic_read(&pn->tx_credits);
1195}
1196
1197int pep_write(struct sock *sk, struct sk_buff *skb)
1198{
1199	struct sk_buff *rskb, *fs;
1200	int flen = 0;
1201
1202	if (pep_sk(sk)->aligned)
1203		return pipe_skb_send(sk, skb);
1204
1205	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
1206	if (!rskb) {
1207		kfree_skb(skb);
1208		return -ENOMEM;
1209	}
1210	skb_shinfo(rskb)->frag_list = skb;
1211	rskb->len += skb->len;
1212	rskb->data_len += rskb->len;
1213	rskb->truesize += rskb->len;
1214
1215	/* Avoid nested fragments */
1216	skb_walk_frags(skb, fs)
1217		flen += fs->len;
1218	skb->next = skb_shinfo(skb)->frag_list;
1219	skb_frag_list_init(skb);
1220	skb->len -= flen;
1221	skb->data_len -= flen;
1222	skb->truesize -= flen;
1223
1224	skb_reserve(rskb, MAX_PHONET_HEADER + 3);
1225	return pipe_skb_send(sk, rskb);
1226}
1227
1228struct sk_buff *pep_read(struct sock *sk)
1229{
1230	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1231
1232	if (sk->sk_state == TCP_ESTABLISHED)
1233		pipe_grant_credits(sk, GFP_ATOMIC);
1234	return skb;
1235}
1236
1237static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1238		       int noblock, int flags, int *addr_len)
1239{
1240	struct sk_buff *skb;
1241	int err;
1242
1243	if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
1244			MSG_NOSIGNAL|MSG_CMSG_COMPAT))
1245		return -EOPNOTSUPP;
1246
1247	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
1248		return -ENOTCONN;
1249
1250	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
1251		/* Dequeue and acknowledge control request */
1252		struct pep_sock *pn = pep_sk(sk);
1253
1254		if (flags & MSG_PEEK)
1255			return -EOPNOTSUPP;
1256		skb = skb_dequeue(&pn->ctrlreq_queue);
1257		if (skb) {
1258			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
1259						GFP_KERNEL);
1260			msg->msg_flags |= MSG_OOB;
1261			goto copy;
1262		}
1263		if (flags & MSG_OOB)
1264			return -EINVAL;
1265	}
1266
1267	skb = skb_recv_datagram(sk, flags, noblock, &err);
1268	lock_sock(sk);
1269	if (skb == NULL) {
1270		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
1271			err = -ECONNRESET;
1272		release_sock(sk);
1273		return err;
1274	}
1275
1276	if (sk->sk_state == TCP_ESTABLISHED)
1277		pipe_grant_credits(sk, GFP_KERNEL);
1278	release_sock(sk);
1279copy:
1280	msg->msg_flags |= MSG_EOR;
1281	if (skb->len > len)
1282		msg->msg_flags |= MSG_TRUNC;
1283	else
1284		len = skb->len;
1285
1286	err = skb_copy_datagram_msg(skb, 0, msg, len);
1287	if (!err)
1288		err = (flags & MSG_TRUNC) ? skb->len : len;
1289
1290	skb_free_datagram(sk, skb);
1291	return err;
1292}
1293
1294static void pep_sock_unhash(struct sock *sk)
1295{
1296	struct pep_sock *pn = pep_sk(sk);
1297	struct sock *skparent = NULL;
1298
1299	lock_sock(sk);
1300
1301	if (pn->listener != NULL) {
1302		skparent = pn->listener;
1303		pn->listener = NULL;
1304		release_sock(sk);
1305
1306		pn = pep_sk(skparent);
1307		lock_sock(skparent);
1308		sk_del_node_init(sk);
1309		sk = skparent;
1310	}
1311
1312	/* Unhash a listening sock only when it is closed
1313	 * and all of its active connected pipes are closed. */
1314	if (hlist_empty(&pn->hlist))
1315		pn_sock_unhash(&pn->pn_sk.sk);
1316	release_sock(sk);
1317
1318	if (skparent)
1319		sock_put(skparent);
1320}
1321
1322static struct proto pep_proto = {
1323	.close		= pep_sock_close,
1324	.accept		= pep_sock_accept,
1325	.connect	= pep_sock_connect,
1326	.ioctl		= pep_ioctl,
1327	.init		= pep_init,
1328	.setsockopt	= pep_setsockopt,
1329	.getsockopt	= pep_getsockopt,
1330	.sendmsg	= pep_sendmsg,
1331	.recvmsg	= pep_recvmsg,
1332	.backlog_rcv	= pep_do_rcv,
1333	.hash		= pn_sock_hash,
1334	.unhash		= pep_sock_unhash,
1335	.get_port	= pn_sock_get_port,
1336	.obj_size	= sizeof(struct pep_sock),
1337	.owner		= THIS_MODULE,
1338	.name		= "PNPIPE",
1339};
1340
1341static const struct phonet_protocol pep_pn_proto = {
1342	.ops		= &phonet_stream_ops,
1343	.prot		= &pep_proto,
1344	.sock_type	= SOCK_SEQPACKET,
1345};
1346
1347static int __init pep_register(void)
1348{
1349	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1350}
1351
1352static void __exit pep_unregister(void)
1353{
1354	phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1355}
1356
1357module_init(pep_register);
1358module_exit(pep_unregister);
1359MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1360MODULE_DESCRIPTION("Phonet pipe protocol");
1361MODULE_LICENSE("GPL");
1362MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * File: pep.c
   4 *
   5 * Phonet pipe protocol end point socket
   6 *
   7 * Copyright (C) 2008 Nokia Corporation.
   8 *
   9 * Author: RĂ©mi Denis-Courmont
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/sched/signal.h>
  14#include <linux/slab.h>
  15#include <linux/socket.h>
  16#include <net/sock.h>
  17#include <net/tcp_states.h>
  18#include <asm/ioctls.h>
  19
  20#include <linux/phonet.h>
  21#include <linux/module.h>
  22#include <net/phonet/phonet.h>
  23#include <net/phonet/pep.h>
  24#include <net/phonet/gprs.h>
  25
  26/* sk_state values:
  27 * TCP_CLOSE		sock not in use yet
  28 * TCP_CLOSE_WAIT	disconnected pipe
  29 * TCP_LISTEN		listening pipe endpoint
  30 * TCP_SYN_RECV		connected pipe in disabled state
  31 * TCP_ESTABLISHED	connected pipe in enabled state
  32 *
  33 * pep_sock locking:
  34 *  - sk_state, hlist: sock lock needed
  35 *  - listener: read only
  36 *  - pipe_handle: read only
  37 */
  38
  39#define CREDITS_MAX	10
  40#define CREDITS_THR	7
  41
  42#define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  43
  44/* Get the next TLV sub-block. */
  45static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  46					void *buf)
  47{
  48	void *data = NULL;
  49	struct {
  50		u8 sb_type;
  51		u8 sb_len;
  52	} *ph, h;
  53	int buflen = *plen;
  54
  55	ph = skb_header_pointer(skb, 0, 2, &h);
  56	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  57		return NULL;
  58	ph->sb_len -= 2;
  59	*ptype = ph->sb_type;
  60	*plen = ph->sb_len;
  61
  62	if (buflen > ph->sb_len)
  63		buflen = ph->sb_len;
  64	data = skb_header_pointer(skb, 2, buflen, buf);
  65	__skb_pull(skb, 2 + ph->sb_len);
  66	return data;
  67}
  68
  69static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
  70					int len, gfp_t priority)
  71{
  72	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  73	if (!skb)
  74		return NULL;
  75	skb_set_owner_w(skb, sk);
  76
  77	skb_reserve(skb, MAX_PNPIPE_HEADER);
  78	__skb_put(skb, len);
  79	skb_copy_to_linear_data(skb, payload, len);
  80	__skb_push(skb, sizeof(struct pnpipehdr));
  81	skb_reset_transport_header(skb);
  82	return skb;
  83}
  84
  85static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
  86			const void *data, int len, gfp_t priority)
  87{
  88	const struct pnpipehdr *oph = pnp_hdr(oskb);
  89	struct pnpipehdr *ph;
  90	struct sk_buff *skb;
  91	struct sockaddr_pn peer;
  92
  93	skb = pep_alloc_skb(sk, data, len, priority);
  94	if (!skb)
  95		return -ENOMEM;
  96
  97	ph = pnp_hdr(skb);
  98	ph->utid = oph->utid;
  99	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
 100	ph->pipe_handle = oph->pipe_handle;
 101	ph->error_code = code;
 102
 103	pn_skb_get_src_sockaddr(oskb, &peer);
 104	return pn_skb_send(sk, skb, &peer);
 105}
 106
 107static int pep_indicate(struct sock *sk, u8 id, u8 code,
 108			const void *data, int len, gfp_t priority)
 109{
 110	struct pep_sock *pn = pep_sk(sk);
 111	struct pnpipehdr *ph;
 112	struct sk_buff *skb;
 113
 114	skb = pep_alloc_skb(sk, data, len, priority);
 115	if (!skb)
 116		return -ENOMEM;
 117
 118	ph = pnp_hdr(skb);
 119	ph->utid = 0;
 120	ph->message_id = id;
 121	ph->pipe_handle = pn->pipe_handle;
 122	ph->error_code = code;
 123	return pn_skb_send(sk, skb, NULL);
 124}
 125
 126#define PAD 0x00
 127
 128static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
 129				const void *data, int len)
 130{
 131	struct pep_sock *pn = pep_sk(sk);
 132	struct pnpipehdr *ph;
 133	struct sk_buff *skb;
 134
 135	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
 136	if (!skb)
 137		return -ENOMEM;
 138
 139	ph = pnp_hdr(skb);
 140	ph->utid = id; /* whatever */
 141	ph->message_id = id;
 142	ph->pipe_handle = pn->pipe_handle;
 143	ph->error_code = code;
 144	return pn_skb_send(sk, skb, NULL);
 145}
 146
 147static int pipe_handler_send_created_ind(struct sock *sk)
 148{
 149	struct pep_sock *pn = pep_sk(sk);
 150	u8 data[4] = {
 151		PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
 152		pn->tx_fc, pn->rx_fc,
 153	};
 154
 155	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
 156				data, 4, GFP_ATOMIC);
 157}
 158
 159static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 160{
 161	static const u8 data[20] = {
 162		PAD, PAD, PAD, 2 /* sub-blocks */,
 163		PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
 164			PN_MULTI_CREDIT_FLOW_CONTROL,
 165			PN_ONE_CREDIT_FLOW_CONTROL,
 166			PN_LEGACY_FLOW_CONTROL,
 167			PAD,
 168		PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
 169			PN_MULTI_CREDIT_FLOW_CONTROL,
 170			PN_ONE_CREDIT_FLOW_CONTROL,
 171			PN_LEGACY_FLOW_CONTROL,
 172			PAD,
 173	};
 174
 175	might_sleep();
 176	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
 177				GFP_KERNEL);
 178}
 179
 180static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
 181				gfp_t priority)
 182{
 183	static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
 184	WARN_ON(code == PN_PIPE_NO_ERROR);
 185	return pep_reply(sk, skb, code, data, sizeof(data), priority);
 186}
 187
 188/* Control requests are not sent by the pipe service and have a specific
 189 * message format. */
 190static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 191				gfp_t priority)
 192{
 193	const struct pnpipehdr *oph = pnp_hdr(oskb);
 194	struct sk_buff *skb;
 195	struct pnpipehdr *ph;
 196	struct sockaddr_pn dst;
 197	u8 data[4] = {
 198		oph->pep_type, /* PEP type */
 199		code, /* error code, at an unusual offset */
 200		PAD, PAD,
 201	};
 202
 203	skb = pep_alloc_skb(sk, data, 4, priority);
 204	if (!skb)
 205		return -ENOMEM;
 206
 207	ph = pnp_hdr(skb);
 208	ph->utid = oph->utid;
 209	ph->message_id = PNS_PEP_CTRL_RESP;
 210	ph->pipe_handle = oph->pipe_handle;
 211	ph->data0 = oph->data[0]; /* CTRL id */
 212
 213	pn_skb_get_src_sockaddr(oskb, &dst);
 214	return pn_skb_send(sk, skb, &dst);
 215}
 216
 217static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 218{
 219	u8 data[4] = { type, PAD, PAD, status };
 220
 221	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
 222				data, 4, priority);
 223}
 224
 225/* Send our RX flow control information to the sender.
 226 * Socket must be locked. */
 227static void pipe_grant_credits(struct sock *sk, gfp_t priority)
 228{
 229	struct pep_sock *pn = pep_sk(sk);
 230
 231	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
 232
 233	switch (pn->rx_fc) {
 234	case PN_LEGACY_FLOW_CONTROL: /* TODO */
 235		break;
 236	case PN_ONE_CREDIT_FLOW_CONTROL:
 237		if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
 238					PEP_IND_READY, priority) == 0)
 239			pn->rx_credits = 1;
 240		break;
 241	case PN_MULTI_CREDIT_FLOW_CONTROL:
 242		if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
 243			break;
 244		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
 245					CREDITS_MAX - pn->rx_credits,
 246					priority) == 0)
 247			pn->rx_credits = CREDITS_MAX;
 248		break;
 249	}
 250}
 251
 252static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 253{
 254	struct pep_sock *pn = pep_sk(sk);
 255	struct pnpipehdr *hdr;
 256	int wake = 0;
 257
 258	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 259		return -EINVAL;
 260
 261	hdr = pnp_hdr(skb);
 262	if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
 263		net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
 264				    (unsigned int)hdr->pep_type);
 265		return -EOPNOTSUPP;
 266	}
 267
 268	switch (hdr->data[0]) {
 269	case PN_PEP_IND_FLOW_CONTROL:
 270		switch (pn->tx_fc) {
 271		case PN_LEGACY_FLOW_CONTROL:
 272			switch (hdr->data[3]) {
 273			case PEP_IND_BUSY:
 274				atomic_set(&pn->tx_credits, 0);
 275				break;
 276			case PEP_IND_READY:
 277				atomic_set(&pn->tx_credits, wake = 1);
 278				break;
 279			}
 280			break;
 281		case PN_ONE_CREDIT_FLOW_CONTROL:
 282			if (hdr->data[3] == PEP_IND_READY)
 283				atomic_set(&pn->tx_credits, wake = 1);
 284			break;
 285		}
 286		break;
 287
 288	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
 289		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
 290			break;
 291		atomic_add(wake = hdr->data[3], &pn->tx_credits);
 292		break;
 293
 294	default:
 295		net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
 296				    (unsigned int)hdr->data[0]);
 297		return -EOPNOTSUPP;
 298	}
 299	if (wake)
 300		sk->sk_write_space(sk);
 301	return 0;
 302}
 303
 304static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
 305{
 306	struct pep_sock *pn = pep_sk(sk);
 307	struct pnpipehdr *hdr = pnp_hdr(skb);
 308	u8 n_sb = hdr->data0;
 309
 310	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 311	__skb_pull(skb, sizeof(*hdr));
 312	while (n_sb > 0) {
 313		u8 type, buf[2], len = sizeof(buf);
 314		u8 *data = pep_get_sb(skb, &type, &len, buf);
 315
 316		if (data == NULL)
 317			return -EINVAL;
 318		switch (type) {
 319		case PN_PIPE_SB_NEGOTIATED_FC:
 320			if (len < 2 || (data[0] | data[1]) > 3)
 321				break;
 322			pn->tx_fc = data[0] & 3;
 323			pn->rx_fc = data[1] & 3;
 324			break;
 325		}
 326		n_sb--;
 327	}
 328	return 0;
 329}
 330
 331/* Queue an skb to a connected sock.
 332 * Socket lock must be held. */
 333static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 334{
 335	struct pep_sock *pn = pep_sk(sk);
 336	struct pnpipehdr *hdr = pnp_hdr(skb);
 337	struct sk_buff_head *queue;
 338	int err = 0;
 339
 340	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
 341
 342	switch (hdr->message_id) {
 343	case PNS_PEP_CONNECT_REQ:
 344		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
 345		break;
 346
 347	case PNS_PEP_DISCONNECT_REQ:
 348		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 349		sk->sk_state = TCP_CLOSE_WAIT;
 350		if (!sock_flag(sk, SOCK_DEAD))
 351			sk->sk_state_change(sk);
 352		break;
 353
 354	case PNS_PEP_ENABLE_REQ:
 355		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
 356		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 357		break;
 358
 359	case PNS_PEP_RESET_REQ:
 360		switch (hdr->state_after_reset) {
 361		case PN_PIPE_DISABLE:
 362			pn->init_enable = 0;
 363			break;
 364		case PN_PIPE_ENABLE:
 365			pn->init_enable = 1;
 366			break;
 367		default: /* not allowed to send an error here!? */
 368			err = -EINVAL;
 369			goto out;
 370		}
 371		fallthrough;
 372	case PNS_PEP_DISABLE_REQ:
 373		atomic_set(&pn->tx_credits, 0);
 374		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 375		break;
 376
 377	case PNS_PEP_CTRL_REQ:
 378		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
 379			atomic_inc(&sk->sk_drops);
 380			break;
 381		}
 382		__skb_pull(skb, 4);
 383		queue = &pn->ctrlreq_queue;
 384		goto queue;
 385
 386	case PNS_PIPE_ALIGNED_DATA:
 387		__skb_pull(skb, 1);
 388		fallthrough;
 389	case PNS_PIPE_DATA:
 390		__skb_pull(skb, 3); /* Pipe data header */
 391		if (!pn_flow_safe(pn->rx_fc)) {
 392			err = sock_queue_rcv_skb(sk, skb);
 393			if (!err)
 394				return NET_RX_SUCCESS;
 395			err = -ENOBUFS;
 396			break;
 397		}
 398
 399		if (pn->rx_credits == 0) {
 400			atomic_inc(&sk->sk_drops);
 401			err = -ENOBUFS;
 402			break;
 403		}
 404		pn->rx_credits--;
 405		queue = &sk->sk_receive_queue;
 406		goto queue;
 407
 408	case PNS_PEP_STATUS_IND:
 409		pipe_rcv_status(sk, skb);
 410		break;
 411
 412	case PNS_PIPE_REDIRECTED_IND:
 413		err = pipe_rcv_created(sk, skb);
 414		break;
 415
 416	case PNS_PIPE_CREATED_IND:
 417		err = pipe_rcv_created(sk, skb);
 418		if (err)
 419			break;
 420		fallthrough;
 421	case PNS_PIPE_RESET_IND:
 422		if (!pn->init_enable)
 423			break;
 424		fallthrough;
 425	case PNS_PIPE_ENABLED_IND:
 426		if (!pn_flow_safe(pn->tx_fc)) {
 427			atomic_set(&pn->tx_credits, 1);
 428			sk->sk_write_space(sk);
 429		}
 430		if (sk->sk_state == TCP_ESTABLISHED)
 431			break; /* Nothing to do */
 432		sk->sk_state = TCP_ESTABLISHED;
 433		pipe_grant_credits(sk, GFP_ATOMIC);
 434		break;
 435
 436	case PNS_PIPE_DISABLED_IND:
 437		sk->sk_state = TCP_SYN_RECV;
 438		pn->rx_credits = 0;
 439		break;
 440
 441	default:
 442		net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
 443				    hdr->message_id);
 444		err = -EINVAL;
 445	}
 446out:
 447	kfree_skb(skb);
 448	return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
 449
 450queue:
 451	skb->dev = NULL;
 452	skb_set_owner_r(skb, sk);
 453	skb_queue_tail(queue, skb);
 454	if (!sock_flag(sk, SOCK_DEAD))
 455		sk->sk_data_ready(sk);
 456	return NET_RX_SUCCESS;
 457}
 458
 459/* Destroy connected sock. */
 460static void pipe_destruct(struct sock *sk)
 461{
 462	struct pep_sock *pn = pep_sk(sk);
 463
 464	skb_queue_purge(&sk->sk_receive_queue);
 465	skb_queue_purge(&pn->ctrlreq_queue);
 466}
 467
 468static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
 469{
 470	unsigned int i;
 471	u8 final_fc = PN_NO_FLOW_CONTROL;
 472
 473	for (i = 0; i < n; i++) {
 474		u8 fc = fcs[i];
 475
 476		if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
 477			final_fc = fc;
 478	}
 479	return final_fc;
 480}
 481
 482static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 483{
 484	struct pep_sock *pn = pep_sk(sk);
 485	struct pnpipehdr *hdr;
 486	u8 n_sb;
 487
 488	if (!pskb_pull(skb, sizeof(*hdr) + 4))
 489		return -EINVAL;
 490
 491	hdr = pnp_hdr(skb);
 492	if (hdr->error_code != PN_PIPE_NO_ERROR)
 493		return -ECONNREFUSED;
 494
 495	/* Parse sub-blocks */
 496	n_sb = hdr->data[3];
 497	while (n_sb > 0) {
 498		u8 type, buf[6], len = sizeof(buf);
 499		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 500
 501		if (data == NULL)
 502			return -EINVAL;
 503
 504		switch (type) {
 505		case PN_PIPE_SB_REQUIRED_FC_TX:
 506			if (len < 2 || len < data[0])
 507				break;
 508			pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
 509			break;
 510
 511		case PN_PIPE_SB_PREFERRED_FC_RX:
 512			if (len < 2 || len < data[0])
 513				break;
 514			pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
 515			break;
 516
 517		}
 518		n_sb--;
 519	}
 520
 521	return pipe_handler_send_created_ind(sk);
 522}
 523
 524static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
 525{
 526	struct pnpipehdr *hdr = pnp_hdr(skb);
 527
 528	if (hdr->error_code != PN_PIPE_NO_ERROR)
 529		return -ECONNREFUSED;
 530
 531	return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
 532		NULL, 0, GFP_ATOMIC);
 533
 534}
 535
 536static void pipe_start_flow_control(struct sock *sk)
 537{
 538	struct pep_sock *pn = pep_sk(sk);
 539
 540	if (!pn_flow_safe(pn->tx_fc)) {
 541		atomic_set(&pn->tx_credits, 1);
 542		sk->sk_write_space(sk);
 543	}
 544	pipe_grant_credits(sk, GFP_ATOMIC);
 545}
 546
 547/* Queue an skb to an actively connected sock.
 548 * Socket lock must be held. */
 549static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 550{
 551	struct pep_sock *pn = pep_sk(sk);
 552	struct pnpipehdr *hdr = pnp_hdr(skb);
 553	int err = NET_RX_SUCCESS;
 554
 555	switch (hdr->message_id) {
 556	case PNS_PIPE_ALIGNED_DATA:
 557		__skb_pull(skb, 1);
 558		fallthrough;
 559	case PNS_PIPE_DATA:
 560		__skb_pull(skb, 3); /* Pipe data header */
 561		if (!pn_flow_safe(pn->rx_fc)) {
 562			err = sock_queue_rcv_skb(sk, skb);
 563			if (!err)
 564				return NET_RX_SUCCESS;
 565			err = NET_RX_DROP;
 566			break;
 567		}
 568
 569		if (pn->rx_credits == 0) {
 570			atomic_inc(&sk->sk_drops);
 571			err = NET_RX_DROP;
 572			break;
 573		}
 574		pn->rx_credits--;
 575		skb->dev = NULL;
 576		skb_set_owner_r(skb, sk);
 577		skb_queue_tail(&sk->sk_receive_queue, skb);
 578		if (!sock_flag(sk, SOCK_DEAD))
 579			sk->sk_data_ready(sk);
 580		return NET_RX_SUCCESS;
 581
 582	case PNS_PEP_CONNECT_RESP:
 583		if (sk->sk_state != TCP_SYN_SENT)
 584			break;
 585		if (!sock_flag(sk, SOCK_DEAD))
 586			sk->sk_state_change(sk);
 587		if (pep_connresp_rcv(sk, skb)) {
 588			sk->sk_state = TCP_CLOSE_WAIT;
 589			break;
 590		}
 591		if (pn->init_enable == PN_PIPE_DISABLE)
 592			sk->sk_state = TCP_SYN_RECV;
 593		else {
 594			sk->sk_state = TCP_ESTABLISHED;
 595			pipe_start_flow_control(sk);
 596		}
 597		break;
 598
 599	case PNS_PEP_ENABLE_RESP:
 600		if (sk->sk_state != TCP_SYN_SENT)
 601			break;
 602
 603		if (pep_enableresp_rcv(sk, skb)) {
 604			sk->sk_state = TCP_CLOSE_WAIT;
 605			break;
 606		}
 607
 608		sk->sk_state = TCP_ESTABLISHED;
 609		pipe_start_flow_control(sk);
 610		break;
 611
 612	case PNS_PEP_DISCONNECT_RESP:
 613		/* sock should already be dead, nothing to do */
 614		break;
 615
 616	case PNS_PEP_STATUS_IND:
 617		pipe_rcv_status(sk, skb);
 618		break;
 619	}
 620	kfree_skb(skb);
 621	return err;
 622}
 623
 624/* Listening sock must be locked */
 625static struct sock *pep_find_pipe(const struct hlist_head *hlist,
 626					const struct sockaddr_pn *dst,
 627					u8 pipe_handle)
 628{
 629	struct sock *sknode;
 630	u16 dobj = pn_sockaddr_get_object(dst);
 631
 632	sk_for_each(sknode, hlist) {
 633		struct pep_sock *pnnode = pep_sk(sknode);
 634
 635		/* Ports match, but addresses might not: */
 636		if (pnnode->pn_sk.sobject != dobj)
 637			continue;
 638		if (pnnode->pipe_handle != pipe_handle)
 639			continue;
 640		if (sknode->sk_state == TCP_CLOSE_WAIT)
 641			continue;
 642
 643		sock_hold(sknode);
 644		return sknode;
 645	}
 646	return NULL;
 647}
 648
 649/*
 650 * Deliver an skb to a listening sock.
 651 * Socket lock must be held.
 652 * We then queue the skb to the right connected sock (if any).
 653 */
 654static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 655{
 656	struct pep_sock *pn = pep_sk(sk);
 657	struct sock *sknode;
 658	struct pnpipehdr *hdr;
 659	struct sockaddr_pn dst;
 660	u8 pipe_handle;
 661
 662	if (!pskb_may_pull(skb, sizeof(*hdr)))
 663		goto drop;
 664
 665	hdr = pnp_hdr(skb);
 666	pipe_handle = hdr->pipe_handle;
 667	if (pipe_handle == PN_PIPE_INVALID_HANDLE)
 668		goto drop;
 669
 670	pn_skb_get_dst_sockaddr(skb, &dst);
 671
 672	/* Look for an existing pipe handle */
 673	sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 674	if (sknode)
 675		return sk_receive_skb(sknode, skb, 1);
 676
 677	switch (hdr->message_id) {
 678	case PNS_PEP_CONNECT_REQ:
 679		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
 680			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
 681					GFP_ATOMIC);
 682			break;
 683		}
 684		skb_queue_head(&sk->sk_receive_queue, skb);
 685		sk_acceptq_added(sk);
 686		if (!sock_flag(sk, SOCK_DEAD))
 687			sk->sk_data_ready(sk);
 688		return NET_RX_SUCCESS;
 689
 690	case PNS_PEP_DISCONNECT_REQ:
 691		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 692		break;
 693
 694	case PNS_PEP_CTRL_REQ:
 695		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
 696		break;
 697
 698	case PNS_PEP_RESET_REQ:
 699	case PNS_PEP_ENABLE_REQ:
 700	case PNS_PEP_DISABLE_REQ:
 701		/* invalid handle is not even allowed here! */
 702		break;
 703
 704	default:
 705		if ((1 << sk->sk_state)
 706				& ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
 707			/* actively connected socket */
 708			return pipe_handler_do_rcv(sk, skb);
 709	}
 710drop:
 711	kfree_skb(skb);
 712	return NET_RX_SUCCESS;
 713}
 714
 715static int pipe_do_remove(struct sock *sk)
 716{
 717	struct pep_sock *pn = pep_sk(sk);
 718	struct pnpipehdr *ph;
 719	struct sk_buff *skb;
 720
 721	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
 722	if (!skb)
 723		return -ENOMEM;
 724
 725	ph = pnp_hdr(skb);
 726	ph->utid = 0;
 727	ph->message_id = PNS_PIPE_REMOVE_REQ;
 728	ph->pipe_handle = pn->pipe_handle;
 729	ph->data0 = PAD;
 730	return pn_skb_send(sk, skb, NULL);
 731}
 732
 733/* associated socket ceases to exist */
 734static void pep_sock_close(struct sock *sk, long timeout)
 735{
 736	struct pep_sock *pn = pep_sk(sk);
 737	int ifindex = 0;
 738
 739	sock_hold(sk); /* keep a reference after sk_common_release() */
 740	sk_common_release(sk);
 741
 742	lock_sock(sk);
 743	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
 744		if (sk->sk_backlog_rcv == pipe_do_rcv)
 745			/* Forcefully remove dangling Phonet pipe */
 746			pipe_do_remove(sk);
 747		else
 748			pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
 749						NULL, 0);
 750	}
 751	sk->sk_state = TCP_CLOSE;
 752
 753	ifindex = pn->ifindex;
 754	pn->ifindex = 0;
 755	release_sock(sk);
 756
 757	if (ifindex)
 758		gprs_detach(sk);
 759	sock_put(sk);
 760}
 761
 762static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
 763				    bool kern)
 764{
 765	struct pep_sock *pn = pep_sk(sk), *newpn;
 766	struct sock *newsk = NULL;
 767	struct sk_buff *skb;
 768	struct pnpipehdr *hdr;
 769	struct sockaddr_pn dst, src;
 770	int err;
 771	u16 peer_type;
 772	u8 pipe_handle, enabled, n_sb;
 773	u8 aligned = 0;
 774
 775	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
 776				errp);
 777	if (!skb)
 778		return NULL;
 779
 780	lock_sock(sk);
 781	if (sk->sk_state != TCP_LISTEN) {
 782		err = -EINVAL;
 783		goto drop;
 784	}
 785	sk_acceptq_removed(sk);
 786
 787	err = -EPROTO;
 788	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 789		goto drop;
 790
 791	hdr = pnp_hdr(skb);
 792	pipe_handle = hdr->pipe_handle;
 793	switch (hdr->state_after_connect) {
 794	case PN_PIPE_DISABLE:
 795		enabled = 0;
 796		break;
 797	case PN_PIPE_ENABLE:
 798		enabled = 1;
 799		break;
 800	default:
 801		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
 802				GFP_KERNEL);
 803		goto drop;
 804	}
 805	peer_type = hdr->other_pep_type << 8;
 806
 807	/* Parse sub-blocks (options) */
 808	n_sb = hdr->data[3];
 809	while (n_sb > 0) {
 810		u8 type, buf[1], len = sizeof(buf);
 811		const u8 *data = pep_get_sb(skb, &type, &len, buf);
 812
 813		if (data == NULL)
 814			goto drop;
 815		switch (type) {
 816		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
 817			if (len < 1)
 818				goto drop;
 819			peer_type = (peer_type & 0xff00) | data[0];
 820			break;
 821		case PN_PIPE_SB_ALIGNED_DATA:
 822			aligned = data[0] != 0;
 823			break;
 824		}
 825		n_sb--;
 826	}
 827
 828	/* Check for duplicate pipe handle */
 829	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
 830	if (unlikely(newsk)) {
 831		__sock_put(newsk);
 832		newsk = NULL;
 833		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
 834		goto drop;
 835	}
 836
 837	/* Create a new to-be-accepted sock */
 838	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
 839			 kern);
 840	if (!newsk) {
 841		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
 842		err = -ENOBUFS;
 843		goto drop;
 844	}
 845
 846	sock_init_data(NULL, newsk);
 847	newsk->sk_state = TCP_SYN_RECV;
 848	newsk->sk_backlog_rcv = pipe_do_rcv;
 849	newsk->sk_protocol = sk->sk_protocol;
 850	newsk->sk_destruct = pipe_destruct;
 851
 852	newpn = pep_sk(newsk);
 853	pn_skb_get_dst_sockaddr(skb, &dst);
 854	pn_skb_get_src_sockaddr(skb, &src);
 855	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
 856	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
 857	newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
 858	sock_hold(sk);
 859	newpn->listener = sk;
 860	skb_queue_head_init(&newpn->ctrlreq_queue);
 861	newpn->pipe_handle = pipe_handle;
 862	atomic_set(&newpn->tx_credits, 0);
 863	newpn->ifindex = 0;
 864	newpn->peer_type = peer_type;
 865	newpn->rx_credits = 0;
 866	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 867	newpn->init_enable = enabled;
 868	newpn->aligned = aligned;
 869
 870	err = pep_accept_conn(newsk, skb);
 871	if (err) {
 872		__sock_put(sk);
 873		sock_put(newsk);
 874		newsk = NULL;
 875		goto drop;
 876	}
 877	sk_add_node(newsk, &pn->hlist);
 878drop:
 879	release_sock(sk);
 880	kfree_skb(skb);
 881	*errp = err;
 882	return newsk;
 883}
 884
 885static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 886{
 887	struct pep_sock *pn = pep_sk(sk);
 888	int err;
 889	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 890
 891	if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
 892		pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
 893
 894	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
 895				pn->init_enable, data, 4);
 896	if (err) {
 897		pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 898		return err;
 899	}
 900
 901	sk->sk_state = TCP_SYN_SENT;
 902
 903	return 0;
 904}
 905
 906static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
 907{
 908	int err;
 909
 910	err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
 911				NULL, 0);
 912	if (err)
 913		return err;
 914
 915	sk->sk_state = TCP_SYN_SENT;
 916
 917	return 0;
 918}
 919
 920static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 921{
 922	struct pep_sock *pn = pep_sk(sk);
 923	int answ;
 924	int ret = -ENOIOCTLCMD;
 925
 926	switch (cmd) {
 927	case SIOCINQ:
 928		if (sk->sk_state == TCP_LISTEN) {
 929			ret = -EINVAL;
 930			break;
 931		}
 932
 933		lock_sock(sk);
 934		if (sock_flag(sk, SOCK_URGINLINE) &&
 935		    !skb_queue_empty(&pn->ctrlreq_queue))
 936			answ = skb_peek(&pn->ctrlreq_queue)->len;
 937		else if (!skb_queue_empty(&sk->sk_receive_queue))
 938			answ = skb_peek(&sk->sk_receive_queue)->len;
 939		else
 940			answ = 0;
 941		release_sock(sk);
 942		ret = put_user(answ, (int __user *)arg);
 943		break;
 944
 945	case SIOCPNENABLEPIPE:
 946		lock_sock(sk);
 947		if (sk->sk_state == TCP_SYN_SENT)
 948			ret =  -EBUSY;
 949		else if (sk->sk_state == TCP_ESTABLISHED)
 950			ret = -EISCONN;
 951		else if (!pn->pn_sk.sobject)
 952			ret = -EADDRNOTAVAIL;
 953		else
 954			ret = pep_sock_enable(sk, NULL, 0);
 955		release_sock(sk);
 956		break;
 957	}
 958
 959	return ret;
 960}
 961
 962static int pep_init(struct sock *sk)
 963{
 964	struct pep_sock *pn = pep_sk(sk);
 965
 966	sk->sk_destruct = pipe_destruct;
 967	INIT_HLIST_HEAD(&pn->hlist);
 968	pn->listener = NULL;
 969	skb_queue_head_init(&pn->ctrlreq_queue);
 970	atomic_set(&pn->tx_credits, 0);
 971	pn->ifindex = 0;
 972	pn->peer_type = 0;
 973	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 974	pn->rx_credits = 0;
 975	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 976	pn->init_enable = 1;
 977	pn->aligned = 0;
 978	return 0;
 979}
 980
 981static int pep_setsockopt(struct sock *sk, int level, int optname,
 982			  sockptr_t optval, unsigned int optlen)
 983{
 984	struct pep_sock *pn = pep_sk(sk);
 985	int val = 0, err = 0;
 986
 987	if (level != SOL_PNPIPE)
 988		return -ENOPROTOOPT;
 989	if (optlen >= sizeof(int)) {
 990		if (copy_from_sockptr(&val, optval, sizeof(int)))
 991			return -EFAULT;
 992	}
 993
 994	lock_sock(sk);
 995	switch (optname) {
 996	case PNPIPE_ENCAP:
 997		if (val && val != PNPIPE_ENCAP_IP) {
 998			err = -EINVAL;
 999			break;
1000		}
1001		if (!pn->ifindex == !val)
1002			break; /* Nothing to do! */
1003		if (!capable(CAP_NET_ADMIN)) {
1004			err = -EPERM;
1005			break;
1006		}
1007		if (val) {
1008			release_sock(sk);
1009			err = gprs_attach(sk);
1010			if (err > 0) {
1011				pn->ifindex = err;
1012				err = 0;
1013			}
1014		} else {
1015			pn->ifindex = 0;
1016			release_sock(sk);
1017			gprs_detach(sk);
1018			err = 0;
1019		}
1020		goto out_norel;
1021
1022	case PNPIPE_HANDLE:
1023		if ((sk->sk_state == TCP_CLOSE) &&
1024			(val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
1025			pn->pipe_handle = val;
1026		else
1027			err = -EINVAL;
1028		break;
1029
1030	case PNPIPE_INITSTATE:
1031		pn->init_enable = !!val;
1032		break;
1033
1034	default:
1035		err = -ENOPROTOOPT;
1036	}
1037	release_sock(sk);
1038
1039out_norel:
1040	return err;
1041}
1042
1043static int pep_getsockopt(struct sock *sk, int level, int optname,
1044				char __user *optval, int __user *optlen)
1045{
1046	struct pep_sock *pn = pep_sk(sk);
1047	int len, val;
1048
1049	if (level != SOL_PNPIPE)
1050		return -ENOPROTOOPT;
1051	if (get_user(len, optlen))
1052		return -EFAULT;
1053
1054	switch (optname) {
1055	case PNPIPE_ENCAP:
1056		val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
1057		break;
1058
1059	case PNPIPE_IFINDEX:
1060		val = pn->ifindex;
1061		break;
1062
1063	case PNPIPE_HANDLE:
1064		val = pn->pipe_handle;
1065		if (val == PN_PIPE_INVALID_HANDLE)
1066			return -EINVAL;
1067		break;
1068
1069	case PNPIPE_INITSTATE:
1070		val = pn->init_enable;
1071		break;
1072
1073	default:
1074		return -ENOPROTOOPT;
1075	}
1076
1077	len = min_t(unsigned int, sizeof(int), len);
1078	if (put_user(len, optlen))
1079		return -EFAULT;
1080	if (put_user(val, (int __user *) optval))
1081		return -EFAULT;
1082	return 0;
1083}
1084
1085static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1086{
1087	struct pep_sock *pn = pep_sk(sk);
1088	struct pnpipehdr *ph;
1089	int err;
1090
1091	if (pn_flow_safe(pn->tx_fc) &&
1092	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
1093		kfree_skb(skb);
1094		return -ENOBUFS;
1095	}
1096
1097	skb_push(skb, 3 + pn->aligned);
1098	skb_reset_transport_header(skb);
1099	ph = pnp_hdr(skb);
1100	ph->utid = 0;
1101	if (pn->aligned) {
1102		ph->message_id = PNS_PIPE_ALIGNED_DATA;
1103		ph->data0 = 0; /* padding */
1104	} else
1105		ph->message_id = PNS_PIPE_DATA;
1106	ph->pipe_handle = pn->pipe_handle;
1107	err = pn_skb_send(sk, skb, NULL);
1108
1109	if (err && pn_flow_safe(pn->tx_fc))
1110		atomic_inc(&pn->tx_credits);
1111	return err;
1112
1113}
1114
1115static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1116{
1117	struct pep_sock *pn = pep_sk(sk);
1118	struct sk_buff *skb;
1119	long timeo;
1120	int flags = msg->msg_flags;
1121	int err, done;
1122
1123	if (len > USHRT_MAX)
1124		return -EMSGSIZE;
1125
1126	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1127				MSG_CMSG_COMPAT)) ||
1128			!(msg->msg_flags & MSG_EOR))
1129		return -EOPNOTSUPP;
1130
1131	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
1132					flags & MSG_DONTWAIT, &err);
1133	if (!skb)
1134		return err;
1135
1136	skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
1137	err = memcpy_from_msg(skb_put(skb, len), msg, len);
1138	if (err < 0)
1139		goto outfree;
1140
1141	lock_sock(sk);
1142	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1143	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
1144		err = -ENOTCONN;
1145		goto out;
1146	}
1147	if (sk->sk_state != TCP_ESTABLISHED) {
1148		/* Wait until the pipe gets to enabled state */
1149disabled:
1150		err = sk_stream_wait_connect(sk, &timeo);
1151		if (err)
1152			goto out;
1153
1154		if (sk->sk_state == TCP_CLOSE_WAIT) {
1155			err = -ECONNRESET;
1156			goto out;
1157		}
1158	}
1159	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
1160
1161	/* Wait until flow control allows TX */
1162	done = atomic_read(&pn->tx_credits);
1163	while (!done) {
1164		DEFINE_WAIT_FUNC(wait, woken_wake_function);
1165
1166		if (!timeo) {
1167			err = -EAGAIN;
1168			goto out;
1169		}
1170		if (signal_pending(current)) {
1171			err = sock_intr_errno(timeo);
1172			goto out;
1173		}
1174
1175		add_wait_queue(sk_sleep(sk), &wait);
1176		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait);
1177		remove_wait_queue(sk_sleep(sk), &wait);
1178
1179		if (sk->sk_state != TCP_ESTABLISHED)
1180			goto disabled;
1181	}
1182
1183	err = pipe_skb_send(sk, skb);
1184	if (err >= 0)
1185		err = len; /* success! */
1186	skb = NULL;
1187out:
1188	release_sock(sk);
1189outfree:
1190	kfree_skb(skb);
1191	return err;
1192}
1193
1194int pep_writeable(struct sock *sk)
1195{
1196	struct pep_sock *pn = pep_sk(sk);
1197
1198	return atomic_read(&pn->tx_credits);
1199}
1200
1201int pep_write(struct sock *sk, struct sk_buff *skb)
1202{
1203	struct sk_buff *rskb, *fs;
1204	int flen = 0;
1205
1206	if (pep_sk(sk)->aligned)
1207		return pipe_skb_send(sk, skb);
1208
1209	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
1210	if (!rskb) {
1211		kfree_skb(skb);
1212		return -ENOMEM;
1213	}
1214	skb_shinfo(rskb)->frag_list = skb;
1215	rskb->len += skb->len;
1216	rskb->data_len += rskb->len;
1217	rskb->truesize += rskb->len;
1218
1219	/* Avoid nested fragments */
1220	skb_walk_frags(skb, fs)
1221		flen += fs->len;
1222	skb->next = skb_shinfo(skb)->frag_list;
1223	skb_frag_list_init(skb);
1224	skb->len -= flen;
1225	skb->data_len -= flen;
1226	skb->truesize -= flen;
1227
1228	skb_reserve(rskb, MAX_PHONET_HEADER + 3);
1229	return pipe_skb_send(sk, rskb);
1230}
1231
1232struct sk_buff *pep_read(struct sock *sk)
1233{
1234	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1235
1236	if (sk->sk_state == TCP_ESTABLISHED)
1237		pipe_grant_credits(sk, GFP_ATOMIC);
1238	return skb;
1239}
1240
1241static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1242		       int flags, int *addr_len)
1243{
1244	struct sk_buff *skb;
1245	int err;
1246
1247	if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
1248			MSG_NOSIGNAL|MSG_CMSG_COMPAT))
1249		return -EOPNOTSUPP;
1250
1251	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
1252		return -ENOTCONN;
1253
1254	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
1255		/* Dequeue and acknowledge control request */
1256		struct pep_sock *pn = pep_sk(sk);
1257
1258		if (flags & MSG_PEEK)
1259			return -EOPNOTSUPP;
1260		skb = skb_dequeue(&pn->ctrlreq_queue);
1261		if (skb) {
1262			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
1263						GFP_KERNEL);
1264			msg->msg_flags |= MSG_OOB;
1265			goto copy;
1266		}
1267		if (flags & MSG_OOB)
1268			return -EINVAL;
1269	}
1270
1271	skb = skb_recv_datagram(sk, flags, &err);
1272	lock_sock(sk);
1273	if (skb == NULL) {
1274		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
1275			err = -ECONNRESET;
1276		release_sock(sk);
1277		return err;
1278	}
1279
1280	if (sk->sk_state == TCP_ESTABLISHED)
1281		pipe_grant_credits(sk, GFP_KERNEL);
1282	release_sock(sk);
1283copy:
1284	msg->msg_flags |= MSG_EOR;
1285	if (skb->len > len)
1286		msg->msg_flags |= MSG_TRUNC;
1287	else
1288		len = skb->len;
1289
1290	err = skb_copy_datagram_msg(skb, 0, msg, len);
1291	if (!err)
1292		err = (flags & MSG_TRUNC) ? skb->len : len;
1293
1294	skb_free_datagram(sk, skb);
1295	return err;
1296}
1297
1298static void pep_sock_unhash(struct sock *sk)
1299{
1300	struct pep_sock *pn = pep_sk(sk);
1301	struct sock *skparent = NULL;
1302
1303	lock_sock(sk);
1304
1305	if (pn->listener != NULL) {
1306		skparent = pn->listener;
1307		pn->listener = NULL;
1308		release_sock(sk);
1309
1310		pn = pep_sk(skparent);
1311		lock_sock(skparent);
1312		sk_del_node_init(sk);
1313		sk = skparent;
1314	}
1315
1316	/* Unhash a listening sock only when it is closed
1317	 * and all of its active connected pipes are closed. */
1318	if (hlist_empty(&pn->hlist))
1319		pn_sock_unhash(&pn->pn_sk.sk);
1320	release_sock(sk);
1321
1322	if (skparent)
1323		sock_put(skparent);
1324}
1325
1326static struct proto pep_proto = {
1327	.close		= pep_sock_close,
1328	.accept		= pep_sock_accept,
1329	.connect	= pep_sock_connect,
1330	.ioctl		= pep_ioctl,
1331	.init		= pep_init,
1332	.setsockopt	= pep_setsockopt,
1333	.getsockopt	= pep_getsockopt,
1334	.sendmsg	= pep_sendmsg,
1335	.recvmsg	= pep_recvmsg,
1336	.backlog_rcv	= pep_do_rcv,
1337	.hash		= pn_sock_hash,
1338	.unhash		= pep_sock_unhash,
1339	.get_port	= pn_sock_get_port,
1340	.obj_size	= sizeof(struct pep_sock),
1341	.owner		= THIS_MODULE,
1342	.name		= "PNPIPE",
1343};
1344
1345static const struct phonet_protocol pep_pn_proto = {
1346	.ops		= &phonet_stream_ops,
1347	.prot		= &pep_proto,
1348	.sock_type	= SOCK_SEQPACKET,
1349};
1350
1351static int __init pep_register(void)
1352{
1353	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1354}
1355
1356static void __exit pep_unregister(void)
1357{
1358	phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1359}
1360
1361module_init(pep_register);
1362module_exit(pep_unregister);
1363MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1364MODULE_DESCRIPTION("Phonet pipe protocol");
1365MODULE_LICENSE("GPL");
1366MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);