Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
 
 
 
 
   3 *
   4 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   5 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   6 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
   7 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/moduleparam.h>
  13#include <linux/init.h>
  14#include <linux/errno.h>
  15#include <linux/types.h>
  16#include <linux/socket.h>
  17#include <linux/in.h>
  18#include <linux/slab.h>
  19#include <linux/kernel.h>
  20#include <linux/sched/signal.h>
  21#include <linux/spinlock.h>
  22#include <linux/timer.h>
  23#include <linux/string.h>
  24#include <linux/sockios.h>
  25#include <linux/net.h>
  26#include <linux/stat.h>
  27#include <net/net_namespace.h>
  28#include <net/ax25.h>
  29#include <linux/inet.h>
  30#include <linux/netdevice.h>
  31#include <linux/if_arp.h>
  32#include <linux/skbuff.h>
  33#include <net/sock.h>
  34#include <linux/uaccess.h>
  35#include <linux/fcntl.h>
  36#include <linux/termios.h>
  37#include <linux/mm.h>
  38#include <linux/interrupt.h>
  39#include <linux/notifier.h>
  40#include <net/rose.h>
  41#include <linux/proc_fs.h>
  42#include <linux/seq_file.h>
  43#include <net/tcp_states.h>
  44#include <net/ip.h>
  45#include <net/arp.h>
  46
  47static int rose_ndevs = 10;
  48
  49int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
  50int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
  51int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
  52int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
  53int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
  54int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
  55int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
  56int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
  57int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
  58int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
  59
  60static HLIST_HEAD(rose_list);
  61static DEFINE_SPINLOCK(rose_list_lock);
  62
  63static const struct proto_ops rose_proto_ops;
  64
  65ax25_address rose_callsign;
  66
  67/*
  68 * ROSE network devices are virtual network devices encapsulating ROSE
  69 * frames into AX.25 which will be sent through an AX.25 device, so form a
  70 * special "super class" of normal net devices; split their locks off into a
  71 * separate class since they always nest.
  72 */
  73static struct lock_class_key rose_netdev_xmit_lock_key;
  74static struct lock_class_key rose_netdev_addr_lock_key;
  75
  76static void rose_set_lockdep_one(struct net_device *dev,
  77				 struct netdev_queue *txq,
  78				 void *_unused)
  79{
  80	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
  81}
  82
  83static void rose_set_lockdep_key(struct net_device *dev)
  84{
  85	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
  86	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
  87}
  88
  89/*
  90 *	Convert a ROSE address into text.
  91 */
  92char *rose2asc(char *buf, const rose_address *addr)
  93{
  94	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
  95	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
  96	    addr->rose_addr[4] == 0x00) {
  97		strcpy(buf, "*");
  98	} else {
  99		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
 100						addr->rose_addr[1] & 0xFF,
 101						addr->rose_addr[2] & 0xFF,
 102						addr->rose_addr[3] & 0xFF,
 103						addr->rose_addr[4] & 0xFF);
 104	}
 105
 106	return buf;
 107}
 108
 109/*
 110 *	Compare two ROSE addresses, 0 == equal.
 111 */
 112int rosecmp(const rose_address *addr1, const rose_address *addr2)
 113{
 114	int i;
 115
 116	for (i = 0; i < 5; i++)
 117		if (addr1->rose_addr[i] != addr2->rose_addr[i])
 118			return 1;
 119
 120	return 0;
 121}
 122
 123/*
 124 *	Compare two ROSE addresses for only mask digits, 0 == equal.
 125 */
 126int rosecmpm(const rose_address *addr1, const rose_address *addr2,
 127	     unsigned short mask)
 128{
 129	unsigned int i, j;
 130
 131	if (mask > 10)
 132		return 1;
 133
 134	for (i = 0; i < mask; i++) {
 135		j = i / 2;
 136
 137		if ((i % 2) != 0) {
 138			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
 139				return 1;
 140		} else {
 141			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
 142				return 1;
 143		}
 144	}
 145
 146	return 0;
 147}
 148
 149/*
 150 *	Socket removal during an interrupt is now safe.
 151 */
 152static void rose_remove_socket(struct sock *sk)
 153{
 154	spin_lock_bh(&rose_list_lock);
 155	sk_del_node_init(sk);
 156	spin_unlock_bh(&rose_list_lock);
 157}
 158
 159/*
 160 *	Kill all bound sockets on a broken link layer connection to a
 161 *	particular neighbour.
 162 */
 163void rose_kill_by_neigh(struct rose_neigh *neigh)
 164{
 165	struct sock *s;
 166
 167	spin_lock_bh(&rose_list_lock);
 168	sk_for_each(s, &rose_list) {
 169		struct rose_sock *rose = rose_sk(s);
 170
 171		if (rose->neighbour == neigh) {
 172			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 173			rose->neighbour->use--;
 174			rose->neighbour = NULL;
 175		}
 176	}
 177	spin_unlock_bh(&rose_list_lock);
 178}
 179
 180/*
 181 *	Kill all bound sockets on a dropped device.
 182 */
 183static void rose_kill_by_device(struct net_device *dev)
 184{
 185	struct sock *sk, *array[16];
 186	struct rose_sock *rose;
 187	bool rescan;
 188	int i, cnt;
 189
 190start:
 191	rescan = false;
 192	cnt = 0;
 193	spin_lock_bh(&rose_list_lock);
 194	sk_for_each(sk, &rose_list) {
 195		rose = rose_sk(sk);
 196		if (rose->device == dev) {
 197			if (cnt == ARRAY_SIZE(array)) {
 198				rescan = true;
 199				break;
 200			}
 201			sock_hold(sk);
 202			array[cnt++] = sk;
 203		}
 204	}
 205	spin_unlock_bh(&rose_list_lock);
 206
 207	for (i = 0; i < cnt; i++) {
 208		sk = array[cnt];
 209		rose = rose_sk(sk);
 210		lock_sock(sk);
 211		spin_lock_bh(&rose_list_lock);
 212		if (rose->device == dev) {
 213			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 214			if (rose->neighbour)
 215				rose->neighbour->use--;
 216			netdev_put(rose->device, &rose->dev_tracker);
 217			rose->device = NULL;
 218		}
 219		spin_unlock_bh(&rose_list_lock);
 220		release_sock(sk);
 221		sock_put(sk);
 222		cond_resched();
 223	}
 224	if (rescan)
 225		goto start;
 226}
 227
 228/*
 229 *	Handle device status changes.
 230 */
 231static int rose_device_event(struct notifier_block *this,
 232			     unsigned long event, void *ptr)
 233{
 234	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 235
 236	if (!net_eq(dev_net(dev), &init_net))
 237		return NOTIFY_DONE;
 238
 239	if (event != NETDEV_DOWN)
 240		return NOTIFY_DONE;
 241
 242	switch (dev->type) {
 243	case ARPHRD_ROSE:
 244		rose_kill_by_device(dev);
 245		break;
 246	case ARPHRD_AX25:
 247		rose_link_device_down(dev);
 248		rose_rt_device_down(dev);
 249		break;
 250	}
 251
 252	return NOTIFY_DONE;
 253}
 254
 255/*
 256 *	Add a socket to the bound sockets list.
 257 */
 258static void rose_insert_socket(struct sock *sk)
 259{
 260
 261	spin_lock_bh(&rose_list_lock);
 262	sk_add_node(sk, &rose_list);
 263	spin_unlock_bh(&rose_list_lock);
 264}
 265
 266/*
 267 *	Find a socket that wants to accept the Call Request we just
 268 *	received.
 269 */
 270static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 271{
 272	struct sock *s;
 273
 274	spin_lock_bh(&rose_list_lock);
 275	sk_for_each(s, &rose_list) {
 276		struct rose_sock *rose = rose_sk(s);
 277
 278		if (!rosecmp(&rose->source_addr, addr) &&
 279		    !ax25cmp(&rose->source_call, call) &&
 280		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
 281			goto found;
 282	}
 283
 284	sk_for_each(s, &rose_list) {
 285		struct rose_sock *rose = rose_sk(s);
 286
 287		if (!rosecmp(&rose->source_addr, addr) &&
 288		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
 289		    s->sk_state == TCP_LISTEN)
 290			goto found;
 291	}
 292	s = NULL;
 293found:
 294	spin_unlock_bh(&rose_list_lock);
 295	return s;
 296}
 297
 298/*
 299 *	Find a connected ROSE socket given my LCI and device.
 300 */
 301struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 302{
 303	struct sock *s;
 304
 305	spin_lock_bh(&rose_list_lock);
 306	sk_for_each(s, &rose_list) {
 307		struct rose_sock *rose = rose_sk(s);
 308
 309		if (rose->lci == lci && rose->neighbour == neigh)
 310			goto found;
 311	}
 312	s = NULL;
 313found:
 314	spin_unlock_bh(&rose_list_lock);
 315	return s;
 316}
 317
 318/*
 319 *	Find a unique LCI for a given device.
 320 */
 321unsigned int rose_new_lci(struct rose_neigh *neigh)
 322{
 323	int lci;
 324
 325	if (neigh->dce_mode) {
 326		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
 327			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 328				return lci;
 329	} else {
 330		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
 331			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 332				return lci;
 333	}
 334
 335	return 0;
 336}
 337
 338/*
 339 *	Deferred destroy.
 340 */
 341void rose_destroy_socket(struct sock *);
 342
 343/*
 344 *	Handler for deferred kills.
 345 */
 346static void rose_destroy_timer(struct timer_list *t)
 347{
 348	struct sock *sk = from_timer(sk, t, sk_timer);
 349
 350	rose_destroy_socket(sk);
 351}
 352
 353/*
 354 *	This is called from user mode and the timers. Thus it protects itself
 355 *	against interrupt users but doesn't worry about being called during
 356 *	work.  Once it is removed from the queue no interrupt or bottom half
 357 *	will touch it and we are (fairly 8-) ) safe.
 358 */
 359void rose_destroy_socket(struct sock *sk)
 360{
 361	struct sk_buff *skb;
 362
 363	rose_remove_socket(sk);
 364	rose_stop_heartbeat(sk);
 365	rose_stop_idletimer(sk);
 366	rose_stop_timer(sk);
 367
 368	rose_clear_queues(sk);		/* Flush the queues */
 369
 370	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 371		if (skb->sk != sk) {	/* A pending connection */
 372			/* Queue the unaccepted socket for death */
 373			sock_set_flag(skb->sk, SOCK_DEAD);
 374			rose_start_heartbeat(skb->sk);
 375			rose_sk(skb->sk)->state = ROSE_STATE_0;
 376		}
 377
 378		kfree_skb(skb);
 379	}
 380
 381	if (sk_has_allocations(sk)) {
 382		/* Defer: outstanding buffers */
 383		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
 
 384		sk->sk_timer.expires  = jiffies + 10 * HZ;
 385		add_timer(&sk->sk_timer);
 386	} else
 387		sock_put(sk);
 388}
 389
 390/*
 391 *	Handling for system calls applied via the various interfaces to a
 392 *	ROSE socket object.
 393 */
 394
 395static int rose_setsockopt(struct socket *sock, int level, int optname,
 396		sockptr_t optval, unsigned int optlen)
 397{
 398	struct sock *sk = sock->sk;
 399	struct rose_sock *rose = rose_sk(sk);
 400	unsigned int opt;
 401
 402	if (level != SOL_ROSE)
 403		return -ENOPROTOOPT;
 404
 405	if (optlen < sizeof(unsigned int))
 406		return -EINVAL;
 407
 408	if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
 409		return -EFAULT;
 410
 411	switch (optname) {
 412	case ROSE_DEFER:
 413		rose->defer = opt ? 1 : 0;
 414		return 0;
 415
 416	case ROSE_T1:
 417		if (opt < 1 || opt > UINT_MAX / HZ)
 418			return -EINVAL;
 419		rose->t1 = opt * HZ;
 420		return 0;
 421
 422	case ROSE_T2:
 423		if (opt < 1 || opt > UINT_MAX / HZ)
 424			return -EINVAL;
 425		rose->t2 = opt * HZ;
 426		return 0;
 427
 428	case ROSE_T3:
 429		if (opt < 1 || opt > UINT_MAX / HZ)
 430			return -EINVAL;
 431		rose->t3 = opt * HZ;
 432		return 0;
 433
 434	case ROSE_HOLDBACK:
 435		if (opt < 1 || opt > UINT_MAX / HZ)
 436			return -EINVAL;
 437		rose->hb = opt * HZ;
 438		return 0;
 439
 440	case ROSE_IDLE:
 441		if (opt > UINT_MAX / (60 * HZ))
 442			return -EINVAL;
 443		rose->idle = opt * 60 * HZ;
 444		return 0;
 445
 446	case ROSE_QBITINCL:
 447		rose->qbitincl = opt ? 1 : 0;
 448		return 0;
 449
 450	default:
 451		return -ENOPROTOOPT;
 452	}
 453}
 454
 455static int rose_getsockopt(struct socket *sock, int level, int optname,
 456	char __user *optval, int __user *optlen)
 457{
 458	struct sock *sk = sock->sk;
 459	struct rose_sock *rose = rose_sk(sk);
 460	int val = 0;
 461	int len;
 462
 463	if (level != SOL_ROSE)
 464		return -ENOPROTOOPT;
 465
 466	if (get_user(len, optlen))
 467		return -EFAULT;
 468
 469	if (len < 0)
 470		return -EINVAL;
 471
 472	switch (optname) {
 473	case ROSE_DEFER:
 474		val = rose->defer;
 475		break;
 476
 477	case ROSE_T1:
 478		val = rose->t1 / HZ;
 479		break;
 480
 481	case ROSE_T2:
 482		val = rose->t2 / HZ;
 483		break;
 484
 485	case ROSE_T3:
 486		val = rose->t3 / HZ;
 487		break;
 488
 489	case ROSE_HOLDBACK:
 490		val = rose->hb / HZ;
 491		break;
 492
 493	case ROSE_IDLE:
 494		val = rose->idle / (60 * HZ);
 495		break;
 496
 497	case ROSE_QBITINCL:
 498		val = rose->qbitincl;
 499		break;
 500
 501	default:
 502		return -ENOPROTOOPT;
 503	}
 504
 505	len = min_t(unsigned int, len, sizeof(int));
 506
 507	if (put_user(len, optlen))
 508		return -EFAULT;
 509
 510	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
 511}
 512
 513static int rose_listen(struct socket *sock, int backlog)
 514{
 515	struct sock *sk = sock->sk;
 516
 517	lock_sock(sk);
 518	if (sock->state != SS_UNCONNECTED) {
 519		release_sock(sk);
 520		return -EINVAL;
 521	}
 522
 523	if (sk->sk_state != TCP_LISTEN) {
 524		struct rose_sock *rose = rose_sk(sk);
 525
 526		rose->dest_ndigis = 0;
 527		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
 528		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
 529		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
 530		sk->sk_max_ack_backlog = backlog;
 531		sk->sk_state           = TCP_LISTEN;
 532		release_sock(sk);
 533		return 0;
 534	}
 535	release_sock(sk);
 536
 537	return -EOPNOTSUPP;
 538}
 539
 540static struct proto rose_proto = {
 541	.name	  = "ROSE",
 542	.owner	  = THIS_MODULE,
 543	.obj_size = sizeof(struct rose_sock),
 544};
 545
 546static int rose_create(struct net *net, struct socket *sock, int protocol,
 547		       int kern)
 548{
 549	struct sock *sk;
 550	struct rose_sock *rose;
 551
 552	if (!net_eq(net, &init_net))
 553		return -EAFNOSUPPORT;
 554
 555	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 556		return -ESOCKTNOSUPPORT;
 557
 558	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
 559	if (sk == NULL)
 560		return -ENOMEM;
 561
 562	rose = rose_sk(sk);
 563
 564	sock_init_data(sock, sk);
 565
 566	skb_queue_head_init(&rose->ack_queue);
 567#ifdef M_BIT
 568	skb_queue_head_init(&rose->frag_queue);
 569	rose->fraglen    = 0;
 570#endif
 571
 572	sock->ops    = &rose_proto_ops;
 573	sk->sk_protocol = protocol;
 574
 575	timer_setup(&rose->timer, NULL, 0);
 576	timer_setup(&rose->idletimer, NULL, 0);
 577
 578	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
 579	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
 580	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
 581	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
 582	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
 583
 584	rose->state = ROSE_STATE_0;
 585
 586	return 0;
 587}
 588
 589static struct sock *rose_make_new(struct sock *osk)
 590{
 591	struct sock *sk;
 592	struct rose_sock *rose, *orose;
 593
 594	if (osk->sk_type != SOCK_SEQPACKET)
 595		return NULL;
 596
 597	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
 598	if (sk == NULL)
 599		return NULL;
 600
 601	rose = rose_sk(sk);
 602
 603	sock_init_data(NULL, sk);
 604
 605	skb_queue_head_init(&rose->ack_queue);
 606#ifdef M_BIT
 607	skb_queue_head_init(&rose->frag_queue);
 608	rose->fraglen  = 0;
 609#endif
 610
 611	sk->sk_type     = osk->sk_type;
 612	sk->sk_priority = READ_ONCE(osk->sk_priority);
 613	sk->sk_protocol = osk->sk_protocol;
 614	sk->sk_rcvbuf   = osk->sk_rcvbuf;
 615	sk->sk_sndbuf   = osk->sk_sndbuf;
 616	sk->sk_state    = TCP_ESTABLISHED;
 617	sock_copy_flags(sk, osk);
 618
 619	timer_setup(&rose->timer, NULL, 0);
 620	timer_setup(&rose->idletimer, NULL, 0);
 621
 622	orose		= rose_sk(osk);
 623	rose->t1	= orose->t1;
 624	rose->t2	= orose->t2;
 625	rose->t3	= orose->t3;
 626	rose->hb	= orose->hb;
 627	rose->idle	= orose->idle;
 628	rose->defer	= orose->defer;
 629	rose->device	= orose->device;
 630	if (rose->device)
 631		netdev_hold(rose->device, &rose->dev_tracker, GFP_ATOMIC);
 632	rose->qbitincl	= orose->qbitincl;
 633
 634	return sk;
 635}
 636
 637static int rose_release(struct socket *sock)
 638{
 639	struct sock *sk = sock->sk;
 640	struct rose_sock *rose;
 641
 642	if (sk == NULL) return 0;
 643
 644	sock_hold(sk);
 645	sock_orphan(sk);
 646	lock_sock(sk);
 647	rose = rose_sk(sk);
 648
 649	switch (rose->state) {
 650	case ROSE_STATE_0:
 651		release_sock(sk);
 652		rose_disconnect(sk, 0, -1, -1);
 653		lock_sock(sk);
 654		rose_destroy_socket(sk);
 655		break;
 656
 657	case ROSE_STATE_2:
 658		rose->neighbour->use--;
 659		release_sock(sk);
 660		rose_disconnect(sk, 0, -1, -1);
 661		lock_sock(sk);
 662		rose_destroy_socket(sk);
 663		break;
 664
 665	case ROSE_STATE_1:
 666	case ROSE_STATE_3:
 667	case ROSE_STATE_4:
 668	case ROSE_STATE_5:
 669		rose_clear_queues(sk);
 670		rose_stop_idletimer(sk);
 671		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
 672		rose_start_t3timer(sk);
 673		rose->state  = ROSE_STATE_2;
 674		sk->sk_state    = TCP_CLOSE;
 675		sk->sk_shutdown |= SEND_SHUTDOWN;
 676		sk->sk_state_change(sk);
 677		sock_set_flag(sk, SOCK_DEAD);
 678		sock_set_flag(sk, SOCK_DESTROY);
 679		break;
 680
 681	default:
 682		break;
 683	}
 684
 685	spin_lock_bh(&rose_list_lock);
 686	netdev_put(rose->device, &rose->dev_tracker);
 687	rose->device = NULL;
 688	spin_unlock_bh(&rose_list_lock);
 689	sock->sk = NULL;
 690	release_sock(sk);
 691	sock_put(sk);
 692
 693	return 0;
 694}
 695
 696static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 697{
 698	struct sock *sk = sock->sk;
 699	struct rose_sock *rose = rose_sk(sk);
 700	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 701	struct net_device *dev;
 702	ax25_address *source;
 703	ax25_uid_assoc *user;
 704	int err = -EINVAL;
 705	int n;
 706
 
 
 
 707	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 708		return -EINVAL;
 709
 710	if (addr->srose_family != AF_ROSE)
 711		return -EINVAL;
 712
 713	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 714		return -EINVAL;
 715
 716	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 717		return -EINVAL;
 718
 719	lock_sock(sk);
 720
 721	if (!sock_flag(sk, SOCK_ZAPPED))
 722		goto out_release;
 723
 724	err = -EADDRNOTAVAIL;
 725	dev = rose_dev_get(&addr->srose_addr);
 726	if (!dev)
 727		goto out_release;
 728
 729	source = &addr->srose_call;
 730
 731	user = ax25_findbyuid(current_euid());
 732	if (user) {
 733		rose->source_call = user->call;
 734		ax25_uid_put(user);
 735	} else {
 736		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
 737			dev_put(dev);
 738			err = -EACCES;
 739			goto out_release;
 740		}
 741		rose->source_call   = *source;
 742	}
 743
 744	rose->source_addr   = addr->srose_addr;
 745	rose->device        = dev;
 746	netdev_tracker_alloc(rose->device, &rose->dev_tracker, GFP_KERNEL);
 747	rose->source_ndigis = addr->srose_ndigis;
 748
 749	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 750		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 751		for (n = 0 ; n < addr->srose_ndigis ; n++)
 752			rose->source_digis[n] = full_addr->srose_digis[n];
 753	} else {
 754		if (rose->source_ndigis == 1) {
 755			rose->source_digis[0] = addr->srose_digi;
 756		}
 757	}
 758
 759	rose_insert_socket(sk);
 760
 761	sock_reset_flag(sk, SOCK_ZAPPED);
 762	err = 0;
 763out_release:
 764	release_sock(sk);
 765	return err;
 766}
 767
 768static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
 769{
 770	struct sock *sk = sock->sk;
 771	struct rose_sock *rose = rose_sk(sk);
 772	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 773	unsigned char cause, diagnostic;
 
 774	ax25_uid_assoc *user;
 775	int n, err = 0;
 776
 777	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 778		return -EINVAL;
 779
 780	if (addr->srose_family != AF_ROSE)
 781		return -EINVAL;
 782
 783	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 784		return -EINVAL;
 785
 786	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 787		return -EINVAL;
 788
 789	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
 790	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
 791		return -EINVAL;
 792
 793	lock_sock(sk);
 794
 795	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
 796		/* Connect completed during a ERESTARTSYS event */
 797		sock->state = SS_CONNECTED;
 798		goto out_release;
 799	}
 800
 801	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
 802		sock->state = SS_UNCONNECTED;
 803		err = -ECONNREFUSED;
 804		goto out_release;
 805	}
 806
 807	if (sk->sk_state == TCP_ESTABLISHED) {
 808		/* No reconnect on a seqpacket socket */
 809		err = -EISCONN;
 810		goto out_release;
 811	}
 812
 813	sk->sk_state   = TCP_CLOSE;
 814	sock->state = SS_UNCONNECTED;
 815
 816	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
 817					 &diagnostic, 0);
 818	if (!rose->neighbour) {
 819		err = -ENETUNREACH;
 820		goto out_release;
 821	}
 822
 823	rose->lci = rose_new_lci(rose->neighbour);
 824	if (!rose->lci) {
 825		err = -ENETUNREACH;
 826		goto out_release;
 827	}
 828
 829	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
 830		struct net_device *dev;
 831
 832		sock_reset_flag(sk, SOCK_ZAPPED);
 833
 834		dev = rose_dev_first();
 835		if (!dev) {
 836			err = -ENETUNREACH;
 837			goto out_release;
 838		}
 839
 840		user = ax25_findbyuid(current_euid());
 841		if (!user) {
 842			err = -EINVAL;
 843			dev_put(dev);
 844			goto out_release;
 845		}
 846
 847		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
 848		rose->source_call = user->call;
 849		rose->device      = dev;
 850		netdev_tracker_alloc(rose->device, &rose->dev_tracker,
 851				     GFP_KERNEL);
 852		ax25_uid_put(user);
 853
 854		rose_insert_socket(sk);		/* Finish the bind */
 855	}
 856	rose->dest_addr   = addr->srose_addr;
 857	rose->dest_call   = addr->srose_call;
 858	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
 859	rose->dest_ndigis = addr->srose_ndigis;
 860
 861	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 862		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 863		for (n = 0 ; n < addr->srose_ndigis ; n++)
 864			rose->dest_digis[n] = full_addr->srose_digis[n];
 865	} else {
 866		if (rose->dest_ndigis == 1) {
 867			rose->dest_digis[0] = addr->srose_digi;
 868		}
 869	}
 870
 871	/* Move to connecting socket, start sending Connect Requests */
 872	sock->state   = SS_CONNECTING;
 873	sk->sk_state     = TCP_SYN_SENT;
 874
 875	rose->state = ROSE_STATE_1;
 876
 877	rose->neighbour->use++;
 878
 879	rose_write_internal(sk, ROSE_CALL_REQUEST);
 880	rose_start_heartbeat(sk);
 881	rose_start_t1timer(sk);
 882
 883	/* Now the loop */
 884	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
 885		err = -EINPROGRESS;
 886		goto out_release;
 887	}
 888
 889	/*
 890	 * A Connect Ack with Choke or timeout or failed routing will go to
 891	 * closed.
 892	 */
 893	if (sk->sk_state == TCP_SYN_SENT) {
 894		DEFINE_WAIT(wait);
 895
 896		for (;;) {
 897			prepare_to_wait(sk_sleep(sk), &wait,
 898					TASK_INTERRUPTIBLE);
 899			if (sk->sk_state != TCP_SYN_SENT)
 900				break;
 901			if (!signal_pending(current)) {
 902				release_sock(sk);
 903				schedule();
 904				lock_sock(sk);
 905				continue;
 906			}
 907			err = -ERESTARTSYS;
 908			break;
 909		}
 910		finish_wait(sk_sleep(sk), &wait);
 911
 912		if (err)
 913			goto out_release;
 914	}
 915
 916	if (sk->sk_state != TCP_ESTABLISHED) {
 917		sock->state = SS_UNCONNECTED;
 918		err = sock_error(sk);	/* Always set at this point */
 919		goto out_release;
 920	}
 921
 922	sock->state = SS_CONNECTED;
 923
 924out_release:
 925	release_sock(sk);
 926
 927	return err;
 928}
 929
 930static int rose_accept(struct socket *sock, struct socket *newsock,
 931		       struct proto_accept_arg *arg)
 932{
 933	struct sk_buff *skb;
 934	struct sock *newsk;
 935	DEFINE_WAIT(wait);
 936	struct sock *sk;
 937	int err = 0;
 938
 939	if ((sk = sock->sk) == NULL)
 940		return -EINVAL;
 941
 942	lock_sock(sk);
 943	if (sk->sk_type != SOCK_SEQPACKET) {
 944		err = -EOPNOTSUPP;
 945		goto out_release;
 946	}
 947
 948	if (sk->sk_state != TCP_LISTEN) {
 949		err = -EINVAL;
 950		goto out_release;
 951	}
 952
 953	/*
 954	 *	The write queue this time is holding sockets ready to use
 955	 *	hooked into the SABM we saved
 956	 */
 957	for (;;) {
 958		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 959
 960		skb = skb_dequeue(&sk->sk_receive_queue);
 961		if (skb)
 962			break;
 963
 964		if (arg->flags & O_NONBLOCK) {
 965			err = -EWOULDBLOCK;
 966			break;
 967		}
 968		if (!signal_pending(current)) {
 969			release_sock(sk);
 970			schedule();
 971			lock_sock(sk);
 972			continue;
 973		}
 974		err = -ERESTARTSYS;
 975		break;
 976	}
 977	finish_wait(sk_sleep(sk), &wait);
 978	if (err)
 979		goto out_release;
 980
 981	newsk = skb->sk;
 982	sock_graft(newsk, newsock);
 983
 984	/* Now attach up the new socket */
 985	skb->sk = NULL;
 986	kfree_skb(skb);
 987	sk_acceptq_removed(sk);
 988
 989out_release:
 990	release_sock(sk);
 991
 992	return err;
 993}
 994
 995static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
 996	int peer)
 997{
 998	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
 999	struct sock *sk = sock->sk;
1000	struct rose_sock *rose = rose_sk(sk);
1001	int n;
1002
1003	memset(srose, 0, sizeof(*srose));
1004	if (peer != 0) {
1005		if (sk->sk_state != TCP_ESTABLISHED)
1006			return -ENOTCONN;
1007		srose->srose_family = AF_ROSE;
1008		srose->srose_addr   = rose->dest_addr;
1009		srose->srose_call   = rose->dest_call;
1010		srose->srose_ndigis = rose->dest_ndigis;
1011		for (n = 0; n < rose->dest_ndigis; n++)
1012			srose->srose_digis[n] = rose->dest_digis[n];
1013	} else {
1014		srose->srose_family = AF_ROSE;
1015		srose->srose_addr   = rose->source_addr;
1016		srose->srose_call   = rose->source_call;
1017		srose->srose_ndigis = rose->source_ndigis;
1018		for (n = 0; n < rose->source_ndigis; n++)
1019			srose->srose_digis[n] = rose->source_digis[n];
1020	}
1021
1022	return sizeof(struct full_sockaddr_rose);
 
1023}
1024
1025int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
1026{
1027	struct sock *sk;
1028	struct sock *make;
1029	struct rose_sock *make_rose;
1030	struct rose_facilities_struct facilities;
1031	int n;
1032
1033	skb->sk = NULL;		/* Initially we don't know who it's for */
1034
1035	/*
1036	 *	skb->data points to the rose frame start
1037	 */
1038	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
1039
1040	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
1041				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
1042				   &facilities)) {
1043		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
1044		return 0;
1045	}
1046
1047	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
1048
1049	/*
1050	 * We can't accept the Call Request.
1051	 */
1052	if (sk == NULL || sk_acceptq_is_full(sk) ||
1053	    (make = rose_make_new(sk)) == NULL) {
1054		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1055		return 0;
1056	}
1057
1058	skb->sk     = make;
1059	make->sk_state = TCP_ESTABLISHED;
1060	make_rose = rose_sk(make);
1061
1062	make_rose->lci           = lci;
1063	make_rose->dest_addr     = facilities.dest_addr;
1064	make_rose->dest_call     = facilities.dest_call;
1065	make_rose->dest_ndigis   = facilities.dest_ndigis;
1066	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1067		make_rose->dest_digis[n] = facilities.dest_digis[n];
1068	make_rose->source_addr   = facilities.source_addr;
1069	make_rose->source_call   = facilities.source_call;
1070	make_rose->source_ndigis = facilities.source_ndigis;
1071	for (n = 0 ; n < facilities.source_ndigis ; n++)
1072		make_rose->source_digis[n] = facilities.source_digis[n];
1073	make_rose->neighbour     = neigh;
1074	make_rose->device        = dev;
1075	/* Caller got a reference for us. */
1076	netdev_tracker_alloc(make_rose->device, &make_rose->dev_tracker,
1077			     GFP_ATOMIC);
1078	make_rose->facilities    = facilities;
1079
1080	make_rose->neighbour->use++;
1081
1082	if (rose_sk(sk)->defer) {
1083		make_rose->state = ROSE_STATE_5;
1084	} else {
1085		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1086		make_rose->state = ROSE_STATE_3;
1087		rose_start_idletimer(make);
1088	}
1089
1090	make_rose->condition = 0x00;
1091	make_rose->vs        = 0;
1092	make_rose->va        = 0;
1093	make_rose->vr        = 0;
1094	make_rose->vl        = 0;
1095	sk_acceptq_added(sk);
1096
1097	rose_insert_socket(make);
1098
1099	skb_queue_head(&sk->sk_receive_queue, skb);
1100
1101	rose_start_heartbeat(make);
1102
1103	if (!sock_flag(sk, SOCK_DEAD))
1104		sk->sk_data_ready(sk);
1105
1106	return 1;
1107}
1108
1109static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1110{
1111	struct sock *sk = sock->sk;
1112	struct rose_sock *rose = rose_sk(sk);
1113	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1114	int err;
1115	struct full_sockaddr_rose srose;
1116	struct sk_buff *skb;
1117	unsigned char *asmptr;
1118	int n, size, qbit = 0;
1119
1120	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1121		return -EINVAL;
1122
1123	if (sock_flag(sk, SOCK_ZAPPED))
1124		return -EADDRNOTAVAIL;
1125
1126	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1127		send_sig(SIGPIPE, current, 0);
1128		return -EPIPE;
1129	}
1130
1131	if (rose->neighbour == NULL || rose->device == NULL)
1132		return -ENETUNREACH;
1133
1134	if (usrose != NULL) {
1135		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1136			return -EINVAL;
1137		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1138		memcpy(&srose, usrose, msg->msg_namelen);
1139		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1140		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1141			return -EISCONN;
1142		if (srose.srose_ndigis != rose->dest_ndigis)
1143			return -EISCONN;
1144		if (srose.srose_ndigis == rose->dest_ndigis) {
1145			for (n = 0 ; n < srose.srose_ndigis ; n++)
1146				if (ax25cmp(&rose->dest_digis[n],
1147					    &srose.srose_digis[n]))
1148					return -EISCONN;
1149		}
1150		if (srose.srose_family != AF_ROSE)
1151			return -EINVAL;
1152	} else {
1153		if (sk->sk_state != TCP_ESTABLISHED)
1154			return -ENOTCONN;
1155
1156		srose.srose_family = AF_ROSE;
1157		srose.srose_addr   = rose->dest_addr;
1158		srose.srose_call   = rose->dest_call;
1159		srose.srose_ndigis = rose->dest_ndigis;
1160		for (n = 0 ; n < rose->dest_ndigis ; n++)
1161			srose.srose_digis[n] = rose->dest_digis[n];
1162	}
1163
1164	/* Build a packet */
1165	/* Sanity check the packet size */
1166	if (len > 65535)
1167		return -EMSGSIZE;
1168
1169	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1170
1171	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1172		return err;
1173
1174	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1175
1176	/*
1177	 *	Put the data on the end
1178	 */
1179
1180	skb_reset_transport_header(skb);
1181	skb_put(skb, len);
1182
1183	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1184	if (err) {
1185		kfree_skb(skb);
1186		return err;
1187	}
1188
1189	/*
1190	 *	If the Q BIT Include socket option is in force, the first
1191	 *	byte of the user data is the logical value of the Q Bit.
1192	 */
1193	if (rose->qbitincl) {
1194		qbit = skb->data[0];
1195		skb_pull(skb, 1);
1196	}
1197
1198	/*
1199	 *	Push down the ROSE header
1200	 */
1201	asmptr = skb_push(skb, ROSE_MIN_LEN);
1202
1203	/* Build a ROSE Network header */
1204	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1205	asmptr[1] = (rose->lci >> 0) & 0xFF;
1206	asmptr[2] = ROSE_DATA;
1207
1208	if (qbit)
1209		asmptr[0] |= ROSE_Q_BIT;
1210
1211	if (sk->sk_state != TCP_ESTABLISHED) {
1212		kfree_skb(skb);
1213		return -ENOTCONN;
1214	}
1215
1216#ifdef M_BIT
1217#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1218	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1219		unsigned char header[ROSE_MIN_LEN];
1220		struct sk_buff *skbn;
1221		int frontlen;
1222		int lg;
1223
1224		/* Save a copy of the Header */
1225		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1226		skb_pull(skb, ROSE_MIN_LEN);
1227
1228		frontlen = skb_headroom(skb);
1229
1230		while (skb->len > 0) {
1231			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1232				kfree_skb(skb);
1233				return err;
1234			}
1235
1236			skbn->sk   = sk;
1237			skbn->free = 1;
1238			skbn->arp  = 1;
1239
1240			skb_reserve(skbn, frontlen);
1241
1242			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1243
1244			/* Copy the user data */
1245			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1246			skb_pull(skb, lg);
1247
1248			/* Duplicate the Header */
1249			skb_push(skbn, ROSE_MIN_LEN);
1250			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1251
1252			if (skb->len > 0)
1253				skbn->data[2] |= M_BIT;
1254
1255			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1256		}
1257
1258		skb->free = 1;
1259		kfree_skb(skb);
1260	} else {
1261		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1262	}
1263#else
1264	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1265#endif
1266
1267	rose_kick(sk);
1268
1269	return len;
1270}
1271
1272
1273static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1274			int flags)
1275{
1276	struct sock *sk = sock->sk;
1277	struct rose_sock *rose = rose_sk(sk);
1278	size_t copied;
1279	unsigned char *asmptr;
1280	struct sk_buff *skb;
1281	int n, er, qbit;
1282
1283	/*
1284	 * This works for seqpacket too. The receiver has ordered the queue for
1285	 * us! We do one quick check first though
1286	 */
1287	if (sk->sk_state != TCP_ESTABLISHED)
1288		return -ENOTCONN;
1289
1290	/* Now we can treat all alike */
1291	skb = skb_recv_datagram(sk, flags, &er);
1292	if (!skb)
1293		return er;
1294
1295	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1296
1297	skb_pull(skb, ROSE_MIN_LEN);
1298
1299	if (rose->qbitincl) {
1300		asmptr  = skb_push(skb, 1);
1301		*asmptr = qbit;
1302	}
1303
1304	skb_reset_transport_header(skb);
1305	copied     = skb->len;
1306
1307	if (copied > size) {
1308		copied = size;
1309		msg->msg_flags |= MSG_TRUNC;
1310	}
1311
1312	skb_copy_datagram_msg(skb, 0, msg, copied);
1313
1314	if (msg->msg_name) {
1315		struct sockaddr_rose *srose;
1316		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1317				 msg->msg_name);
1318
1319		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1320		srose = msg->msg_name;
1321		srose->srose_family = AF_ROSE;
1322		srose->srose_addr   = rose->dest_addr;
1323		srose->srose_call   = rose->dest_call;
1324		srose->srose_ndigis = rose->dest_ndigis;
1325		for (n = 0 ; n < rose->dest_ndigis ; n++)
1326			full_srose->srose_digis[n] = rose->dest_digis[n];
1327		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1328	}
1329
1330	skb_free_datagram(sk, skb);
1331
1332	return copied;
1333}
1334
1335
1336static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1337{
1338	struct sock *sk = sock->sk;
1339	struct rose_sock *rose = rose_sk(sk);
1340	void __user *argp = (void __user *)arg;
1341
1342	switch (cmd) {
1343	case TIOCOUTQ: {
1344		long amount;
1345
1346		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1347		if (amount < 0)
1348			amount = 0;
1349		return put_user(amount, (unsigned int __user *) argp);
1350	}
1351
1352	case TIOCINQ: {
1353		struct sk_buff *skb;
1354		long amount = 0L;
1355
1356		spin_lock_irq(&sk->sk_receive_queue.lock);
1357		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1358			amount = skb->len;
1359		spin_unlock_irq(&sk->sk_receive_queue.lock);
1360		return put_user(amount, (unsigned int __user *) argp);
1361	}
1362
 
 
 
 
 
 
1363	case SIOCGIFADDR:
1364	case SIOCSIFADDR:
1365	case SIOCGIFDSTADDR:
1366	case SIOCSIFDSTADDR:
1367	case SIOCGIFBRDADDR:
1368	case SIOCSIFBRDADDR:
1369	case SIOCGIFNETMASK:
1370	case SIOCSIFNETMASK:
1371	case SIOCGIFMETRIC:
1372	case SIOCSIFMETRIC:
1373		return -EINVAL;
1374
1375	case SIOCADDRT:
1376	case SIOCDELRT:
1377	case SIOCRSCLRRT:
1378		if (!capable(CAP_NET_ADMIN))
1379			return -EPERM;
1380		return rose_rt_ioctl(cmd, argp);
1381
1382	case SIOCRSGCAUSE: {
1383		struct rose_cause_struct rose_cause;
1384		rose_cause.cause      = rose->cause;
1385		rose_cause.diagnostic = rose->diagnostic;
1386		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1387	}
1388
1389	case SIOCRSSCAUSE: {
1390		struct rose_cause_struct rose_cause;
1391		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1392			return -EFAULT;
1393		rose->cause      = rose_cause.cause;
1394		rose->diagnostic = rose_cause.diagnostic;
1395		return 0;
1396	}
1397
1398	case SIOCRSSL2CALL:
1399		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1400		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1401			ax25_listen_release(&rose_callsign, NULL);
1402		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1403			return -EFAULT;
1404		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1405			return ax25_listen_register(&rose_callsign, NULL);
1406
1407		return 0;
1408
1409	case SIOCRSGL2CALL:
1410		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1411
1412	case SIOCRSACCEPT:
1413		if (rose->state == ROSE_STATE_5) {
1414			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1415			rose_start_idletimer(sk);
1416			rose->condition = 0x00;
1417			rose->vs        = 0;
1418			rose->va        = 0;
1419			rose->vr        = 0;
1420			rose->vl        = 0;
1421			rose->state     = ROSE_STATE_3;
1422		}
1423		return 0;
1424
1425	default:
1426		return -ENOIOCTLCMD;
1427	}
1428
1429	return 0;
1430}
1431
1432#ifdef CONFIG_PROC_FS
1433static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1434	__acquires(rose_list_lock)
1435{
1436	spin_lock_bh(&rose_list_lock);
1437	return seq_hlist_start_head(&rose_list, *pos);
1438}
1439
1440static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1441{
1442	return seq_hlist_next(v, &rose_list, pos);
1443}
1444
1445static void rose_info_stop(struct seq_file *seq, void *v)
1446	__releases(rose_list_lock)
1447{
1448	spin_unlock_bh(&rose_list_lock);
1449}
1450
1451static int rose_info_show(struct seq_file *seq, void *v)
1452{
1453	char buf[11], rsbuf[11];
1454
1455	if (v == SEQ_START_TOKEN)
1456		seq_puts(seq,
1457			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1458
1459	else {
1460		struct sock *s = sk_entry(v);
1461		struct rose_sock *rose = rose_sk(s);
1462		const char *devname, *callsign;
1463		const struct net_device *dev = rose->device;
1464
1465		if (!dev)
1466			devname = "???";
1467		else
1468			devname = dev->name;
1469
1470		seq_printf(seq, "%-10s %-9s ",
1471			   rose2asc(rsbuf, &rose->dest_addr),
1472			   ax2asc(buf, &rose->dest_call));
1473
1474		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1475			callsign = "??????-?";
1476		else
1477			callsign = ax2asc(buf, &rose->source_call);
1478
1479		seq_printf(seq,
1480			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1481			rose2asc(rsbuf, &rose->source_addr),
1482			callsign,
1483			devname,
1484			rose->lci & 0x0FFF,
1485			(rose->neighbour) ? rose->neighbour->number : 0,
1486			rose->state,
1487			rose->vs,
1488			rose->vr,
1489			rose->va,
1490			ax25_display_timer(&rose->timer) / HZ,
1491			rose->t1 / HZ,
1492			rose->t2 / HZ,
1493			rose->t3 / HZ,
1494			rose->hb / HZ,
1495			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1496			rose->idle / (60 * HZ),
1497			sk_wmem_alloc_get(s),
1498			sk_rmem_alloc_get(s),
1499			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1500	}
1501
1502	return 0;
1503}
1504
1505static const struct seq_operations rose_info_seqops = {
1506	.start = rose_info_start,
1507	.next = rose_info_next,
1508	.stop = rose_info_stop,
1509	.show = rose_info_show,
1510};
 
 
 
 
 
 
 
 
 
 
 
 
 
1511#endif	/* CONFIG_PROC_FS */
1512
1513static const struct net_proto_family rose_family_ops = {
1514	.family		=	PF_ROSE,
1515	.create		=	rose_create,
1516	.owner		=	THIS_MODULE,
1517};
1518
1519static const struct proto_ops rose_proto_ops = {
1520	.family		=	PF_ROSE,
1521	.owner		=	THIS_MODULE,
1522	.release	=	rose_release,
1523	.bind		=	rose_bind,
1524	.connect	=	rose_connect,
1525	.socketpair	=	sock_no_socketpair,
1526	.accept		=	rose_accept,
1527	.getname	=	rose_getname,
1528	.poll		=	datagram_poll,
1529	.ioctl		=	rose_ioctl,
1530	.gettstamp	=	sock_gettstamp,
1531	.listen		=	rose_listen,
1532	.shutdown	=	sock_no_shutdown,
1533	.setsockopt	=	rose_setsockopt,
1534	.getsockopt	=	rose_getsockopt,
1535	.sendmsg	=	rose_sendmsg,
1536	.recvmsg	=	rose_recvmsg,
1537	.mmap		=	sock_no_mmap,
 
1538};
1539
1540static struct notifier_block rose_dev_notifier = {
1541	.notifier_call	=	rose_device_event,
1542};
1543
1544static struct net_device **dev_rose;
1545
1546static struct ax25_protocol rose_pid = {
1547	.pid	= AX25_P_ROSE,
1548	.func	= rose_route_frame
1549};
1550
1551static struct ax25_linkfail rose_linkfail_notifier = {
1552	.func	= rose_link_failed
1553};
1554
1555static int __init rose_proto_init(void)
1556{
1557	int i;
1558	int rc;
1559
1560	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1561		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
1562		rc = -EINVAL;
1563		goto out;
1564	}
1565
1566	rc = proto_register(&rose_proto, 0);
1567	if (rc != 0)
1568		goto out;
1569
1570	rose_callsign = null_ax25_address;
1571
1572	dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
1573			   GFP_KERNEL);
1574	if (dev_rose == NULL) {
1575		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1576		rc = -ENOMEM;
1577		goto out_proto_unregister;
1578	}
1579
1580	for (i = 0; i < rose_ndevs; i++) {
1581		struct net_device *dev;
1582		char name[IFNAMSIZ];
1583
1584		sprintf(name, "rose%d", i);
1585		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1586		if (!dev) {
1587			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1588			rc = -ENOMEM;
1589			goto fail;
1590		}
1591		rc = register_netdev(dev);
1592		if (rc) {
1593			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1594			free_netdev(dev);
1595			goto fail;
1596		}
1597		rose_set_lockdep_key(dev);
1598		dev_rose[i] = dev;
1599	}
1600
1601	sock_register(&rose_family_ops);
1602	register_netdevice_notifier(&rose_dev_notifier);
1603
1604	ax25_register_pid(&rose_pid);
1605	ax25_linkfail_register(&rose_linkfail_notifier);
1606
1607#ifdef CONFIG_SYSCTL
1608	rose_register_sysctl();
1609#endif
1610	rose_loopback_init();
1611
1612	rose_add_loopback_neigh();
1613
1614	proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
1615	proc_create_seq("rose_neigh", 0444, init_net.proc_net,
1616		    &rose_neigh_seqops);
1617	proc_create_seq("rose_nodes", 0444, init_net.proc_net,
1618		    &rose_node_seqops);
1619	proc_create_seq("rose_routes", 0444, init_net.proc_net,
1620		    &rose_route_seqops);
1621out:
1622	return rc;
1623fail:
1624	while (--i >= 0) {
1625		unregister_netdev(dev_rose[i]);
1626		free_netdev(dev_rose[i]);
1627	}
1628	kfree(dev_rose);
1629out_proto_unregister:
1630	proto_unregister(&rose_proto);
1631	goto out;
1632}
1633module_init(rose_proto_init);
1634
1635module_param(rose_ndevs, int, 0);
1636MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1637
1638MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1639MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1640MODULE_LICENSE("GPL");
1641MODULE_ALIAS_NETPROTO(PF_ROSE);
1642
1643static void __exit rose_exit(void)
1644{
1645	int i;
1646
1647	remove_proc_entry("rose", init_net.proc_net);
1648	remove_proc_entry("rose_neigh", init_net.proc_net);
1649	remove_proc_entry("rose_nodes", init_net.proc_net);
1650	remove_proc_entry("rose_routes", init_net.proc_net);
1651	rose_loopback_clear();
1652
1653	rose_rt_free();
1654
1655	ax25_protocol_release(AX25_P_ROSE);
1656	ax25_linkfail_release(&rose_linkfail_notifier);
1657
1658	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1659		ax25_listen_release(&rose_callsign, NULL);
1660
1661#ifdef CONFIG_SYSCTL
1662	rose_unregister_sysctl();
1663#endif
1664	unregister_netdevice_notifier(&rose_dev_notifier);
1665
1666	sock_unregister(PF_ROSE);
1667
1668	for (i = 0; i < rose_ndevs; i++) {
1669		struct net_device *dev = dev_rose[i];
1670
1671		if (dev) {
1672			unregister_netdev(dev);
1673			free_netdev(dev);
1674		}
1675	}
1676
1677	kfree(dev_rose);
1678	proto_unregister(&rose_proto);
1679}
1680
1681module_exit(rose_exit);
v4.6
 
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License as published by
   4 * the Free Software Foundation; either version 2 of the License, or
   5 * (at your option) any later version.
   6 *
   7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
  10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
  11 */
  12
  13#include <linux/capability.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/types.h>
  19#include <linux/socket.h>
  20#include <linux/in.h>
  21#include <linux/slab.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <linux/spinlock.h>
  25#include <linux/timer.h>
  26#include <linux/string.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/stat.h>
  30#include <net/net_namespace.h>
  31#include <net/ax25.h>
  32#include <linux/inet.h>
  33#include <linux/netdevice.h>
  34#include <linux/if_arp.h>
  35#include <linux/skbuff.h>
  36#include <net/sock.h>
  37#include <asm/uaccess.h>
  38#include <linux/fcntl.h>
  39#include <linux/termios.h>
  40#include <linux/mm.h>
  41#include <linux/interrupt.h>
  42#include <linux/notifier.h>
  43#include <net/rose.h>
  44#include <linux/proc_fs.h>
  45#include <linux/seq_file.h>
  46#include <net/tcp_states.h>
  47#include <net/ip.h>
  48#include <net/arp.h>
  49
  50static int rose_ndevs = 10;
  51
  52int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
  53int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
  54int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
  55int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
  56int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
  57int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
  58int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
  59int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
  60int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
  61int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
  62
  63static HLIST_HEAD(rose_list);
  64static DEFINE_SPINLOCK(rose_list_lock);
  65
  66static const struct proto_ops rose_proto_ops;
  67
  68ax25_address rose_callsign;
  69
  70/*
  71 * ROSE network devices are virtual network devices encapsulating ROSE
  72 * frames into AX.25 which will be sent through an AX.25 device, so form a
  73 * special "super class" of normal net devices; split their locks off into a
  74 * separate class since they always nest.
  75 */
  76static struct lock_class_key rose_netdev_xmit_lock_key;
  77static struct lock_class_key rose_netdev_addr_lock_key;
  78
  79static void rose_set_lockdep_one(struct net_device *dev,
  80				 struct netdev_queue *txq,
  81				 void *_unused)
  82{
  83	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
  84}
  85
  86static void rose_set_lockdep_key(struct net_device *dev)
  87{
  88	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
  89	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
  90}
  91
  92/*
  93 *	Convert a ROSE address into text.
  94 */
  95char *rose2asc(char *buf, const rose_address *addr)
  96{
  97	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
  98	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
  99	    addr->rose_addr[4] == 0x00) {
 100		strcpy(buf, "*");
 101	} else {
 102		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
 103						addr->rose_addr[1] & 0xFF,
 104						addr->rose_addr[2] & 0xFF,
 105						addr->rose_addr[3] & 0xFF,
 106						addr->rose_addr[4] & 0xFF);
 107	}
 108
 109	return buf;
 110}
 111
 112/*
 113 *	Compare two ROSE addresses, 0 == equal.
 114 */
 115int rosecmp(rose_address *addr1, rose_address *addr2)
 116{
 117	int i;
 118
 119	for (i = 0; i < 5; i++)
 120		if (addr1->rose_addr[i] != addr2->rose_addr[i])
 121			return 1;
 122
 123	return 0;
 124}
 125
 126/*
 127 *	Compare two ROSE addresses for only mask digits, 0 == equal.
 128 */
 129int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
 
 130{
 131	unsigned int i, j;
 132
 133	if (mask > 10)
 134		return 1;
 135
 136	for (i = 0; i < mask; i++) {
 137		j = i / 2;
 138
 139		if ((i % 2) != 0) {
 140			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
 141				return 1;
 142		} else {
 143			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
 144				return 1;
 145		}
 146	}
 147
 148	return 0;
 149}
 150
 151/*
 152 *	Socket removal during an interrupt is now safe.
 153 */
 154static void rose_remove_socket(struct sock *sk)
 155{
 156	spin_lock_bh(&rose_list_lock);
 157	sk_del_node_init(sk);
 158	spin_unlock_bh(&rose_list_lock);
 159}
 160
 161/*
 162 *	Kill all bound sockets on a broken link layer connection to a
 163 *	particular neighbour.
 164 */
 165void rose_kill_by_neigh(struct rose_neigh *neigh)
 166{
 167	struct sock *s;
 168
 169	spin_lock_bh(&rose_list_lock);
 170	sk_for_each(s, &rose_list) {
 171		struct rose_sock *rose = rose_sk(s);
 172
 173		if (rose->neighbour == neigh) {
 174			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 175			rose->neighbour->use--;
 176			rose->neighbour = NULL;
 177		}
 178	}
 179	spin_unlock_bh(&rose_list_lock);
 180}
 181
 182/*
 183 *	Kill all bound sockets on a dropped device.
 184 */
 185static void rose_kill_by_device(struct net_device *dev)
 186{
 187	struct sock *s;
 
 
 
 188
 
 
 
 189	spin_lock_bh(&rose_list_lock);
 190	sk_for_each(s, &rose_list) {
 191		struct rose_sock *rose = rose_sk(s);
 
 
 
 
 
 
 
 
 
 
 192
 
 
 
 
 
 193		if (rose->device == dev) {
 194			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 195			if (rose->neighbour)
 196				rose->neighbour->use--;
 
 197			rose->device = NULL;
 198		}
 
 
 
 
 199	}
 200	spin_unlock_bh(&rose_list_lock);
 
 201}
 202
 203/*
 204 *	Handle device status changes.
 205 */
 206static int rose_device_event(struct notifier_block *this,
 207			     unsigned long event, void *ptr)
 208{
 209	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 210
 211	if (!net_eq(dev_net(dev), &init_net))
 212		return NOTIFY_DONE;
 213
 214	if (event != NETDEV_DOWN)
 215		return NOTIFY_DONE;
 216
 217	switch (dev->type) {
 218	case ARPHRD_ROSE:
 219		rose_kill_by_device(dev);
 220		break;
 221	case ARPHRD_AX25:
 222		rose_link_device_down(dev);
 223		rose_rt_device_down(dev);
 224		break;
 225	}
 226
 227	return NOTIFY_DONE;
 228}
 229
 230/*
 231 *	Add a socket to the bound sockets list.
 232 */
 233static void rose_insert_socket(struct sock *sk)
 234{
 235
 236	spin_lock_bh(&rose_list_lock);
 237	sk_add_node(sk, &rose_list);
 238	spin_unlock_bh(&rose_list_lock);
 239}
 240
 241/*
 242 *	Find a socket that wants to accept the Call Request we just
 243 *	received.
 244 */
 245static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 246{
 247	struct sock *s;
 248
 249	spin_lock_bh(&rose_list_lock);
 250	sk_for_each(s, &rose_list) {
 251		struct rose_sock *rose = rose_sk(s);
 252
 253		if (!rosecmp(&rose->source_addr, addr) &&
 254		    !ax25cmp(&rose->source_call, call) &&
 255		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
 256			goto found;
 257	}
 258
 259	sk_for_each(s, &rose_list) {
 260		struct rose_sock *rose = rose_sk(s);
 261
 262		if (!rosecmp(&rose->source_addr, addr) &&
 263		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
 264		    s->sk_state == TCP_LISTEN)
 265			goto found;
 266	}
 267	s = NULL;
 268found:
 269	spin_unlock_bh(&rose_list_lock);
 270	return s;
 271}
 272
 273/*
 274 *	Find a connected ROSE socket given my LCI and device.
 275 */
 276struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 277{
 278	struct sock *s;
 279
 280	spin_lock_bh(&rose_list_lock);
 281	sk_for_each(s, &rose_list) {
 282		struct rose_sock *rose = rose_sk(s);
 283
 284		if (rose->lci == lci && rose->neighbour == neigh)
 285			goto found;
 286	}
 287	s = NULL;
 288found:
 289	spin_unlock_bh(&rose_list_lock);
 290	return s;
 291}
 292
 293/*
 294 *	Find a unique LCI for a given device.
 295 */
 296unsigned int rose_new_lci(struct rose_neigh *neigh)
 297{
 298	int lci;
 299
 300	if (neigh->dce_mode) {
 301		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
 302			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 303				return lci;
 304	} else {
 305		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
 306			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 307				return lci;
 308	}
 309
 310	return 0;
 311}
 312
 313/*
 314 *	Deferred destroy.
 315 */
 316void rose_destroy_socket(struct sock *);
 317
 318/*
 319 *	Handler for deferred kills.
 320 */
 321static void rose_destroy_timer(unsigned long data)
 322{
 323	rose_destroy_socket((struct sock *)data);
 
 
 324}
 325
 326/*
 327 *	This is called from user mode and the timers. Thus it protects itself
 328 *	against interrupt users but doesn't worry about being called during
 329 *	work.  Once it is removed from the queue no interrupt or bottom half
 330 *	will touch it and we are (fairly 8-) ) safe.
 331 */
 332void rose_destroy_socket(struct sock *sk)
 333{
 334	struct sk_buff *skb;
 335
 336	rose_remove_socket(sk);
 337	rose_stop_heartbeat(sk);
 338	rose_stop_idletimer(sk);
 339	rose_stop_timer(sk);
 340
 341	rose_clear_queues(sk);		/* Flush the queues */
 342
 343	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 344		if (skb->sk != sk) {	/* A pending connection */
 345			/* Queue the unaccepted socket for death */
 346			sock_set_flag(skb->sk, SOCK_DEAD);
 347			rose_start_heartbeat(skb->sk);
 348			rose_sk(skb->sk)->state = ROSE_STATE_0;
 349		}
 350
 351		kfree_skb(skb);
 352	}
 353
 354	if (sk_has_allocations(sk)) {
 355		/* Defer: outstanding buffers */
 356		setup_timer(&sk->sk_timer, rose_destroy_timer,
 357				(unsigned long)sk);
 358		sk->sk_timer.expires  = jiffies + 10 * HZ;
 359		add_timer(&sk->sk_timer);
 360	} else
 361		sock_put(sk);
 362}
 363
 364/*
 365 *	Handling for system calls applied via the various interfaces to a
 366 *	ROSE socket object.
 367 */
 368
 369static int rose_setsockopt(struct socket *sock, int level, int optname,
 370	char __user *optval, unsigned int optlen)
 371{
 372	struct sock *sk = sock->sk;
 373	struct rose_sock *rose = rose_sk(sk);
 374	int opt;
 375
 376	if (level != SOL_ROSE)
 377		return -ENOPROTOOPT;
 378
 379	if (optlen < sizeof(int))
 380		return -EINVAL;
 381
 382	if (get_user(opt, (int __user *)optval))
 383		return -EFAULT;
 384
 385	switch (optname) {
 386	case ROSE_DEFER:
 387		rose->defer = opt ? 1 : 0;
 388		return 0;
 389
 390	case ROSE_T1:
 391		if (opt < 1)
 392			return -EINVAL;
 393		rose->t1 = opt * HZ;
 394		return 0;
 395
 396	case ROSE_T2:
 397		if (opt < 1)
 398			return -EINVAL;
 399		rose->t2 = opt * HZ;
 400		return 0;
 401
 402	case ROSE_T3:
 403		if (opt < 1)
 404			return -EINVAL;
 405		rose->t3 = opt * HZ;
 406		return 0;
 407
 408	case ROSE_HOLDBACK:
 409		if (opt < 1)
 410			return -EINVAL;
 411		rose->hb = opt * HZ;
 412		return 0;
 413
 414	case ROSE_IDLE:
 415		if (opt < 0)
 416			return -EINVAL;
 417		rose->idle = opt * 60 * HZ;
 418		return 0;
 419
 420	case ROSE_QBITINCL:
 421		rose->qbitincl = opt ? 1 : 0;
 422		return 0;
 423
 424	default:
 425		return -ENOPROTOOPT;
 426	}
 427}
 428
 429static int rose_getsockopt(struct socket *sock, int level, int optname,
 430	char __user *optval, int __user *optlen)
 431{
 432	struct sock *sk = sock->sk;
 433	struct rose_sock *rose = rose_sk(sk);
 434	int val = 0;
 435	int len;
 436
 437	if (level != SOL_ROSE)
 438		return -ENOPROTOOPT;
 439
 440	if (get_user(len, optlen))
 441		return -EFAULT;
 442
 443	if (len < 0)
 444		return -EINVAL;
 445
 446	switch (optname) {
 447	case ROSE_DEFER:
 448		val = rose->defer;
 449		break;
 450
 451	case ROSE_T1:
 452		val = rose->t1 / HZ;
 453		break;
 454
 455	case ROSE_T2:
 456		val = rose->t2 / HZ;
 457		break;
 458
 459	case ROSE_T3:
 460		val = rose->t3 / HZ;
 461		break;
 462
 463	case ROSE_HOLDBACK:
 464		val = rose->hb / HZ;
 465		break;
 466
 467	case ROSE_IDLE:
 468		val = rose->idle / (60 * HZ);
 469		break;
 470
 471	case ROSE_QBITINCL:
 472		val = rose->qbitincl;
 473		break;
 474
 475	default:
 476		return -ENOPROTOOPT;
 477	}
 478
 479	len = min_t(unsigned int, len, sizeof(int));
 480
 481	if (put_user(len, optlen))
 482		return -EFAULT;
 483
 484	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
 485}
 486
 487static int rose_listen(struct socket *sock, int backlog)
 488{
 489	struct sock *sk = sock->sk;
 490
 
 
 
 
 
 
 491	if (sk->sk_state != TCP_LISTEN) {
 492		struct rose_sock *rose = rose_sk(sk);
 493
 494		rose->dest_ndigis = 0;
 495		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
 496		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
 497		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
 498		sk->sk_max_ack_backlog = backlog;
 499		sk->sk_state           = TCP_LISTEN;
 
 500		return 0;
 501	}
 
 502
 503	return -EOPNOTSUPP;
 504}
 505
 506static struct proto rose_proto = {
 507	.name	  = "ROSE",
 508	.owner	  = THIS_MODULE,
 509	.obj_size = sizeof(struct rose_sock),
 510};
 511
 512static int rose_create(struct net *net, struct socket *sock, int protocol,
 513		       int kern)
 514{
 515	struct sock *sk;
 516	struct rose_sock *rose;
 517
 518	if (!net_eq(net, &init_net))
 519		return -EAFNOSUPPORT;
 520
 521	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 522		return -ESOCKTNOSUPPORT;
 523
 524	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
 525	if (sk == NULL)
 526		return -ENOMEM;
 527
 528	rose = rose_sk(sk);
 529
 530	sock_init_data(sock, sk);
 531
 532	skb_queue_head_init(&rose->ack_queue);
 533#ifdef M_BIT
 534	skb_queue_head_init(&rose->frag_queue);
 535	rose->fraglen    = 0;
 536#endif
 537
 538	sock->ops    = &rose_proto_ops;
 539	sk->sk_protocol = protocol;
 540
 541	init_timer(&rose->timer);
 542	init_timer(&rose->idletimer);
 543
 544	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
 545	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
 546	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
 547	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
 548	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
 549
 550	rose->state = ROSE_STATE_0;
 551
 552	return 0;
 553}
 554
 555static struct sock *rose_make_new(struct sock *osk)
 556{
 557	struct sock *sk;
 558	struct rose_sock *rose, *orose;
 559
 560	if (osk->sk_type != SOCK_SEQPACKET)
 561		return NULL;
 562
 563	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
 564	if (sk == NULL)
 565		return NULL;
 566
 567	rose = rose_sk(sk);
 568
 569	sock_init_data(NULL, sk);
 570
 571	skb_queue_head_init(&rose->ack_queue);
 572#ifdef M_BIT
 573	skb_queue_head_init(&rose->frag_queue);
 574	rose->fraglen  = 0;
 575#endif
 576
 577	sk->sk_type     = osk->sk_type;
 578	sk->sk_priority = osk->sk_priority;
 579	sk->sk_protocol = osk->sk_protocol;
 580	sk->sk_rcvbuf   = osk->sk_rcvbuf;
 581	sk->sk_sndbuf   = osk->sk_sndbuf;
 582	sk->sk_state    = TCP_ESTABLISHED;
 583	sock_copy_flags(sk, osk);
 584
 585	init_timer(&rose->timer);
 586	init_timer(&rose->idletimer);
 587
 588	orose		= rose_sk(osk);
 589	rose->t1	= orose->t1;
 590	rose->t2	= orose->t2;
 591	rose->t3	= orose->t3;
 592	rose->hb	= orose->hb;
 593	rose->idle	= orose->idle;
 594	rose->defer	= orose->defer;
 595	rose->device	= orose->device;
 
 
 596	rose->qbitincl	= orose->qbitincl;
 597
 598	return sk;
 599}
 600
 601static int rose_release(struct socket *sock)
 602{
 603	struct sock *sk = sock->sk;
 604	struct rose_sock *rose;
 605
 606	if (sk == NULL) return 0;
 607
 608	sock_hold(sk);
 609	sock_orphan(sk);
 610	lock_sock(sk);
 611	rose = rose_sk(sk);
 612
 613	switch (rose->state) {
 614	case ROSE_STATE_0:
 615		release_sock(sk);
 616		rose_disconnect(sk, 0, -1, -1);
 617		lock_sock(sk);
 618		rose_destroy_socket(sk);
 619		break;
 620
 621	case ROSE_STATE_2:
 622		rose->neighbour->use--;
 623		release_sock(sk);
 624		rose_disconnect(sk, 0, -1, -1);
 625		lock_sock(sk);
 626		rose_destroy_socket(sk);
 627		break;
 628
 629	case ROSE_STATE_1:
 630	case ROSE_STATE_3:
 631	case ROSE_STATE_4:
 632	case ROSE_STATE_5:
 633		rose_clear_queues(sk);
 634		rose_stop_idletimer(sk);
 635		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
 636		rose_start_t3timer(sk);
 637		rose->state  = ROSE_STATE_2;
 638		sk->sk_state    = TCP_CLOSE;
 639		sk->sk_shutdown |= SEND_SHUTDOWN;
 640		sk->sk_state_change(sk);
 641		sock_set_flag(sk, SOCK_DEAD);
 642		sock_set_flag(sk, SOCK_DESTROY);
 643		break;
 644
 645	default:
 646		break;
 647	}
 648
 
 
 
 
 649	sock->sk = NULL;
 650	release_sock(sk);
 651	sock_put(sk);
 652
 653	return 0;
 654}
 655
 656static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 657{
 658	struct sock *sk = sock->sk;
 659	struct rose_sock *rose = rose_sk(sk);
 660	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 661	struct net_device *dev;
 662	ax25_address *source;
 663	ax25_uid_assoc *user;
 
 664	int n;
 665
 666	if (!sock_flag(sk, SOCK_ZAPPED))
 667		return -EINVAL;
 668
 669	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 670		return -EINVAL;
 671
 672	if (addr->srose_family != AF_ROSE)
 673		return -EINVAL;
 674
 675	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 676		return -EINVAL;
 677
 678	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 679		return -EINVAL;
 680
 681	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
 682		return -EADDRNOTAVAIL;
 
 
 
 
 
 
 
 683
 684	source = &addr->srose_call;
 685
 686	user = ax25_findbyuid(current_euid());
 687	if (user) {
 688		rose->source_call = user->call;
 689		ax25_uid_put(user);
 690	} else {
 691		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
 692			return -EACCES;
 
 
 
 693		rose->source_call   = *source;
 694	}
 695
 696	rose->source_addr   = addr->srose_addr;
 697	rose->device        = dev;
 
 698	rose->source_ndigis = addr->srose_ndigis;
 699
 700	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 701		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 702		for (n = 0 ; n < addr->srose_ndigis ; n++)
 703			rose->source_digis[n] = full_addr->srose_digis[n];
 704	} else {
 705		if (rose->source_ndigis == 1) {
 706			rose->source_digis[0] = addr->srose_digi;
 707		}
 708	}
 709
 710	rose_insert_socket(sk);
 711
 712	sock_reset_flag(sk, SOCK_ZAPPED);
 713
 714	return 0;
 
 
 715}
 716
 717static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
 718{
 719	struct sock *sk = sock->sk;
 720	struct rose_sock *rose = rose_sk(sk);
 721	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 722	unsigned char cause, diagnostic;
 723	struct net_device *dev;
 724	ax25_uid_assoc *user;
 725	int n, err = 0;
 726
 727	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 728		return -EINVAL;
 729
 730	if (addr->srose_family != AF_ROSE)
 731		return -EINVAL;
 732
 733	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 734		return -EINVAL;
 735
 736	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 737		return -EINVAL;
 738
 739	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
 740	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
 741		return -EINVAL;
 742
 743	lock_sock(sk);
 744
 745	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
 746		/* Connect completed during a ERESTARTSYS event */
 747		sock->state = SS_CONNECTED;
 748		goto out_release;
 749	}
 750
 751	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
 752		sock->state = SS_UNCONNECTED;
 753		err = -ECONNREFUSED;
 754		goto out_release;
 755	}
 756
 757	if (sk->sk_state == TCP_ESTABLISHED) {
 758		/* No reconnect on a seqpacket socket */
 759		err = -EISCONN;
 760		goto out_release;
 761	}
 762
 763	sk->sk_state   = TCP_CLOSE;
 764	sock->state = SS_UNCONNECTED;
 765
 766	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
 767					 &diagnostic, 0);
 768	if (!rose->neighbour) {
 769		err = -ENETUNREACH;
 770		goto out_release;
 771	}
 772
 773	rose->lci = rose_new_lci(rose->neighbour);
 774	if (!rose->lci) {
 775		err = -ENETUNREACH;
 776		goto out_release;
 777	}
 778
 779	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
 
 
 780		sock_reset_flag(sk, SOCK_ZAPPED);
 781
 782		if ((dev = rose_dev_first()) == NULL) {
 
 783			err = -ENETUNREACH;
 784			goto out_release;
 785		}
 786
 787		user = ax25_findbyuid(current_euid());
 788		if (!user) {
 789			err = -EINVAL;
 
 790			goto out_release;
 791		}
 792
 793		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
 794		rose->source_call = user->call;
 795		rose->device      = dev;
 
 
 796		ax25_uid_put(user);
 797
 798		rose_insert_socket(sk);		/* Finish the bind */
 799	}
 800	rose->dest_addr   = addr->srose_addr;
 801	rose->dest_call   = addr->srose_call;
 802	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
 803	rose->dest_ndigis = addr->srose_ndigis;
 804
 805	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 806		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 807		for (n = 0 ; n < addr->srose_ndigis ; n++)
 808			rose->dest_digis[n] = full_addr->srose_digis[n];
 809	} else {
 810		if (rose->dest_ndigis == 1) {
 811			rose->dest_digis[0] = addr->srose_digi;
 812		}
 813	}
 814
 815	/* Move to connecting socket, start sending Connect Requests */
 816	sock->state   = SS_CONNECTING;
 817	sk->sk_state     = TCP_SYN_SENT;
 818
 819	rose->state = ROSE_STATE_1;
 820
 821	rose->neighbour->use++;
 822
 823	rose_write_internal(sk, ROSE_CALL_REQUEST);
 824	rose_start_heartbeat(sk);
 825	rose_start_t1timer(sk);
 826
 827	/* Now the loop */
 828	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
 829		err = -EINPROGRESS;
 830		goto out_release;
 831	}
 832
 833	/*
 834	 * A Connect Ack with Choke or timeout or failed routing will go to
 835	 * closed.
 836	 */
 837	if (sk->sk_state == TCP_SYN_SENT) {
 838		DEFINE_WAIT(wait);
 839
 840		for (;;) {
 841			prepare_to_wait(sk_sleep(sk), &wait,
 842					TASK_INTERRUPTIBLE);
 843			if (sk->sk_state != TCP_SYN_SENT)
 844				break;
 845			if (!signal_pending(current)) {
 846				release_sock(sk);
 847				schedule();
 848				lock_sock(sk);
 849				continue;
 850			}
 851			err = -ERESTARTSYS;
 852			break;
 853		}
 854		finish_wait(sk_sleep(sk), &wait);
 855
 856		if (err)
 857			goto out_release;
 858	}
 859
 860	if (sk->sk_state != TCP_ESTABLISHED) {
 861		sock->state = SS_UNCONNECTED;
 862		err = sock_error(sk);	/* Always set at this point */
 863		goto out_release;
 864	}
 865
 866	sock->state = SS_CONNECTED;
 867
 868out_release:
 869	release_sock(sk);
 870
 871	return err;
 872}
 873
 874static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
 
 875{
 876	struct sk_buff *skb;
 877	struct sock *newsk;
 878	DEFINE_WAIT(wait);
 879	struct sock *sk;
 880	int err = 0;
 881
 882	if ((sk = sock->sk) == NULL)
 883		return -EINVAL;
 884
 885	lock_sock(sk);
 886	if (sk->sk_type != SOCK_SEQPACKET) {
 887		err = -EOPNOTSUPP;
 888		goto out_release;
 889	}
 890
 891	if (sk->sk_state != TCP_LISTEN) {
 892		err = -EINVAL;
 893		goto out_release;
 894	}
 895
 896	/*
 897	 *	The write queue this time is holding sockets ready to use
 898	 *	hooked into the SABM we saved
 899	 */
 900	for (;;) {
 901		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 902
 903		skb = skb_dequeue(&sk->sk_receive_queue);
 904		if (skb)
 905			break;
 906
 907		if (flags & O_NONBLOCK) {
 908			err = -EWOULDBLOCK;
 909			break;
 910		}
 911		if (!signal_pending(current)) {
 912			release_sock(sk);
 913			schedule();
 914			lock_sock(sk);
 915			continue;
 916		}
 917		err = -ERESTARTSYS;
 918		break;
 919	}
 920	finish_wait(sk_sleep(sk), &wait);
 921	if (err)
 922		goto out_release;
 923
 924	newsk = skb->sk;
 925	sock_graft(newsk, newsock);
 926
 927	/* Now attach up the new socket */
 928	skb->sk = NULL;
 929	kfree_skb(skb);
 930	sk->sk_ack_backlog--;
 931
 932out_release:
 933	release_sock(sk);
 934
 935	return err;
 936}
 937
 938static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
 939	int *uaddr_len, int peer)
 940{
 941	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
 942	struct sock *sk = sock->sk;
 943	struct rose_sock *rose = rose_sk(sk);
 944	int n;
 945
 946	memset(srose, 0, sizeof(*srose));
 947	if (peer != 0) {
 948		if (sk->sk_state != TCP_ESTABLISHED)
 949			return -ENOTCONN;
 950		srose->srose_family = AF_ROSE;
 951		srose->srose_addr   = rose->dest_addr;
 952		srose->srose_call   = rose->dest_call;
 953		srose->srose_ndigis = rose->dest_ndigis;
 954		for (n = 0; n < rose->dest_ndigis; n++)
 955			srose->srose_digis[n] = rose->dest_digis[n];
 956	} else {
 957		srose->srose_family = AF_ROSE;
 958		srose->srose_addr   = rose->source_addr;
 959		srose->srose_call   = rose->source_call;
 960		srose->srose_ndigis = rose->source_ndigis;
 961		for (n = 0; n < rose->source_ndigis; n++)
 962			srose->srose_digis[n] = rose->source_digis[n];
 963	}
 964
 965	*uaddr_len = sizeof(struct full_sockaddr_rose);
 966	return 0;
 967}
 968
 969int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
 970{
 971	struct sock *sk;
 972	struct sock *make;
 973	struct rose_sock *make_rose;
 974	struct rose_facilities_struct facilities;
 975	int n;
 976
 977	skb->sk = NULL;		/* Initially we don't know who it's for */
 978
 979	/*
 980	 *	skb->data points to the rose frame start
 981	 */
 982	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
 983
 984	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
 985				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
 986				   &facilities)) {
 987		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
 988		return 0;
 989	}
 990
 991	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
 992
 993	/*
 994	 * We can't accept the Call Request.
 995	 */
 996	if (sk == NULL || sk_acceptq_is_full(sk) ||
 997	    (make = rose_make_new(sk)) == NULL) {
 998		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
 999		return 0;
1000	}
1001
1002	skb->sk     = make;
1003	make->sk_state = TCP_ESTABLISHED;
1004	make_rose = rose_sk(make);
1005
1006	make_rose->lci           = lci;
1007	make_rose->dest_addr     = facilities.dest_addr;
1008	make_rose->dest_call     = facilities.dest_call;
1009	make_rose->dest_ndigis   = facilities.dest_ndigis;
1010	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1011		make_rose->dest_digis[n] = facilities.dest_digis[n];
1012	make_rose->source_addr   = facilities.source_addr;
1013	make_rose->source_call   = facilities.source_call;
1014	make_rose->source_ndigis = facilities.source_ndigis;
1015	for (n = 0 ; n < facilities.source_ndigis ; n++)
1016		make_rose->source_digis[n] = facilities.source_digis[n];
1017	make_rose->neighbour     = neigh;
1018	make_rose->device        = dev;
 
 
 
1019	make_rose->facilities    = facilities;
1020
1021	make_rose->neighbour->use++;
1022
1023	if (rose_sk(sk)->defer) {
1024		make_rose->state = ROSE_STATE_5;
1025	} else {
1026		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1027		make_rose->state = ROSE_STATE_3;
1028		rose_start_idletimer(make);
1029	}
1030
1031	make_rose->condition = 0x00;
1032	make_rose->vs        = 0;
1033	make_rose->va        = 0;
1034	make_rose->vr        = 0;
1035	make_rose->vl        = 0;
1036	sk->sk_ack_backlog++;
1037
1038	rose_insert_socket(make);
1039
1040	skb_queue_head(&sk->sk_receive_queue, skb);
1041
1042	rose_start_heartbeat(make);
1043
1044	if (!sock_flag(sk, SOCK_DEAD))
1045		sk->sk_data_ready(sk);
1046
1047	return 1;
1048}
1049
1050static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1051{
1052	struct sock *sk = sock->sk;
1053	struct rose_sock *rose = rose_sk(sk);
1054	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1055	int err;
1056	struct full_sockaddr_rose srose;
1057	struct sk_buff *skb;
1058	unsigned char *asmptr;
1059	int n, size, qbit = 0;
1060
1061	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1062		return -EINVAL;
1063
1064	if (sock_flag(sk, SOCK_ZAPPED))
1065		return -EADDRNOTAVAIL;
1066
1067	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1068		send_sig(SIGPIPE, current, 0);
1069		return -EPIPE;
1070	}
1071
1072	if (rose->neighbour == NULL || rose->device == NULL)
1073		return -ENETUNREACH;
1074
1075	if (usrose != NULL) {
1076		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1077			return -EINVAL;
1078		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1079		memcpy(&srose, usrose, msg->msg_namelen);
1080		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1081		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1082			return -EISCONN;
1083		if (srose.srose_ndigis != rose->dest_ndigis)
1084			return -EISCONN;
1085		if (srose.srose_ndigis == rose->dest_ndigis) {
1086			for (n = 0 ; n < srose.srose_ndigis ; n++)
1087				if (ax25cmp(&rose->dest_digis[n],
1088					    &srose.srose_digis[n]))
1089					return -EISCONN;
1090		}
1091		if (srose.srose_family != AF_ROSE)
1092			return -EINVAL;
1093	} else {
1094		if (sk->sk_state != TCP_ESTABLISHED)
1095			return -ENOTCONN;
1096
1097		srose.srose_family = AF_ROSE;
1098		srose.srose_addr   = rose->dest_addr;
1099		srose.srose_call   = rose->dest_call;
1100		srose.srose_ndigis = rose->dest_ndigis;
1101		for (n = 0 ; n < rose->dest_ndigis ; n++)
1102			srose.srose_digis[n] = rose->dest_digis[n];
1103	}
1104
1105	/* Build a packet */
1106	/* Sanity check the packet size */
1107	if (len > 65535)
1108		return -EMSGSIZE;
1109
1110	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1111
1112	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1113		return err;
1114
1115	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1116
1117	/*
1118	 *	Put the data on the end
1119	 */
1120
1121	skb_reset_transport_header(skb);
1122	skb_put(skb, len);
1123
1124	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1125	if (err) {
1126		kfree_skb(skb);
1127		return err;
1128	}
1129
1130	/*
1131	 *	If the Q BIT Include socket option is in force, the first
1132	 *	byte of the user data is the logical value of the Q Bit.
1133	 */
1134	if (rose->qbitincl) {
1135		qbit = skb->data[0];
1136		skb_pull(skb, 1);
1137	}
1138
1139	/*
1140	 *	Push down the ROSE header
1141	 */
1142	asmptr = skb_push(skb, ROSE_MIN_LEN);
1143
1144	/* Build a ROSE Network header */
1145	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1146	asmptr[1] = (rose->lci >> 0) & 0xFF;
1147	asmptr[2] = ROSE_DATA;
1148
1149	if (qbit)
1150		asmptr[0] |= ROSE_Q_BIT;
1151
1152	if (sk->sk_state != TCP_ESTABLISHED) {
1153		kfree_skb(skb);
1154		return -ENOTCONN;
1155	}
1156
1157#ifdef M_BIT
1158#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1159	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1160		unsigned char header[ROSE_MIN_LEN];
1161		struct sk_buff *skbn;
1162		int frontlen;
1163		int lg;
1164
1165		/* Save a copy of the Header */
1166		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1167		skb_pull(skb, ROSE_MIN_LEN);
1168
1169		frontlen = skb_headroom(skb);
1170
1171		while (skb->len > 0) {
1172			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1173				kfree_skb(skb);
1174				return err;
1175			}
1176
1177			skbn->sk   = sk;
1178			skbn->free = 1;
1179			skbn->arp  = 1;
1180
1181			skb_reserve(skbn, frontlen);
1182
1183			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1184
1185			/* Copy the user data */
1186			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1187			skb_pull(skb, lg);
1188
1189			/* Duplicate the Header */
1190			skb_push(skbn, ROSE_MIN_LEN);
1191			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1192
1193			if (skb->len > 0)
1194				skbn->data[2] |= M_BIT;
1195
1196			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1197		}
1198
1199		skb->free = 1;
1200		kfree_skb(skb);
1201	} else {
1202		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1203	}
1204#else
1205	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1206#endif
1207
1208	rose_kick(sk);
1209
1210	return len;
1211}
1212
1213
1214static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1215			int flags)
1216{
1217	struct sock *sk = sock->sk;
1218	struct rose_sock *rose = rose_sk(sk);
1219	size_t copied;
1220	unsigned char *asmptr;
1221	struct sk_buff *skb;
1222	int n, er, qbit;
1223
1224	/*
1225	 * This works for seqpacket too. The receiver has ordered the queue for
1226	 * us! We do one quick check first though
1227	 */
1228	if (sk->sk_state != TCP_ESTABLISHED)
1229		return -ENOTCONN;
1230
1231	/* Now we can treat all alike */
1232	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
 
1233		return er;
1234
1235	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1236
1237	skb_pull(skb, ROSE_MIN_LEN);
1238
1239	if (rose->qbitincl) {
1240		asmptr  = skb_push(skb, 1);
1241		*asmptr = qbit;
1242	}
1243
1244	skb_reset_transport_header(skb);
1245	copied     = skb->len;
1246
1247	if (copied > size) {
1248		copied = size;
1249		msg->msg_flags |= MSG_TRUNC;
1250	}
1251
1252	skb_copy_datagram_msg(skb, 0, msg, copied);
1253
1254	if (msg->msg_name) {
1255		struct sockaddr_rose *srose;
1256		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1257				 msg->msg_name);
1258
1259		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1260		srose = msg->msg_name;
1261		srose->srose_family = AF_ROSE;
1262		srose->srose_addr   = rose->dest_addr;
1263		srose->srose_call   = rose->dest_call;
1264		srose->srose_ndigis = rose->dest_ndigis;
1265		for (n = 0 ; n < rose->dest_ndigis ; n++)
1266			full_srose->srose_digis[n] = rose->dest_digis[n];
1267		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1268	}
1269
1270	skb_free_datagram(sk, skb);
1271
1272	return copied;
1273}
1274
1275
1276static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1277{
1278	struct sock *sk = sock->sk;
1279	struct rose_sock *rose = rose_sk(sk);
1280	void __user *argp = (void __user *)arg;
1281
1282	switch (cmd) {
1283	case TIOCOUTQ: {
1284		long amount;
1285
1286		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1287		if (amount < 0)
1288			amount = 0;
1289		return put_user(amount, (unsigned int __user *) argp);
1290	}
1291
1292	case TIOCINQ: {
1293		struct sk_buff *skb;
1294		long amount = 0L;
1295		/* These two are safe on a single CPU system as only user tasks fiddle here */
 
1296		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1297			amount = skb->len;
 
1298		return put_user(amount, (unsigned int __user *) argp);
1299	}
1300
1301	case SIOCGSTAMP:
1302		return sock_get_timestamp(sk, (struct timeval __user *) argp);
1303
1304	case SIOCGSTAMPNS:
1305		return sock_get_timestampns(sk, (struct timespec __user *) argp);
1306
1307	case SIOCGIFADDR:
1308	case SIOCSIFADDR:
1309	case SIOCGIFDSTADDR:
1310	case SIOCSIFDSTADDR:
1311	case SIOCGIFBRDADDR:
1312	case SIOCSIFBRDADDR:
1313	case SIOCGIFNETMASK:
1314	case SIOCSIFNETMASK:
1315	case SIOCGIFMETRIC:
1316	case SIOCSIFMETRIC:
1317		return -EINVAL;
1318
1319	case SIOCADDRT:
1320	case SIOCDELRT:
1321	case SIOCRSCLRRT:
1322		if (!capable(CAP_NET_ADMIN))
1323			return -EPERM;
1324		return rose_rt_ioctl(cmd, argp);
1325
1326	case SIOCRSGCAUSE: {
1327		struct rose_cause_struct rose_cause;
1328		rose_cause.cause      = rose->cause;
1329		rose_cause.diagnostic = rose->diagnostic;
1330		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1331	}
1332
1333	case SIOCRSSCAUSE: {
1334		struct rose_cause_struct rose_cause;
1335		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1336			return -EFAULT;
1337		rose->cause      = rose_cause.cause;
1338		rose->diagnostic = rose_cause.diagnostic;
1339		return 0;
1340	}
1341
1342	case SIOCRSSL2CALL:
1343		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1344		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1345			ax25_listen_release(&rose_callsign, NULL);
1346		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1347			return -EFAULT;
1348		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1349			return ax25_listen_register(&rose_callsign, NULL);
1350
1351		return 0;
1352
1353	case SIOCRSGL2CALL:
1354		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1355
1356	case SIOCRSACCEPT:
1357		if (rose->state == ROSE_STATE_5) {
1358			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1359			rose_start_idletimer(sk);
1360			rose->condition = 0x00;
1361			rose->vs        = 0;
1362			rose->va        = 0;
1363			rose->vr        = 0;
1364			rose->vl        = 0;
1365			rose->state     = ROSE_STATE_3;
1366		}
1367		return 0;
1368
1369	default:
1370		return -ENOIOCTLCMD;
1371	}
1372
1373	return 0;
1374}
1375
1376#ifdef CONFIG_PROC_FS
1377static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1378	__acquires(rose_list_lock)
1379{
1380	spin_lock_bh(&rose_list_lock);
1381	return seq_hlist_start_head(&rose_list, *pos);
1382}
1383
1384static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1385{
1386	return seq_hlist_next(v, &rose_list, pos);
1387}
1388
1389static void rose_info_stop(struct seq_file *seq, void *v)
1390	__releases(rose_list_lock)
1391{
1392	spin_unlock_bh(&rose_list_lock);
1393}
1394
1395static int rose_info_show(struct seq_file *seq, void *v)
1396{
1397	char buf[11], rsbuf[11];
1398
1399	if (v == SEQ_START_TOKEN)
1400		seq_puts(seq,
1401			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1402
1403	else {
1404		struct sock *s = sk_entry(v);
1405		struct rose_sock *rose = rose_sk(s);
1406		const char *devname, *callsign;
1407		const struct net_device *dev = rose->device;
1408
1409		if (!dev)
1410			devname = "???";
1411		else
1412			devname = dev->name;
1413
1414		seq_printf(seq, "%-10s %-9s ",
1415			   rose2asc(rsbuf, &rose->dest_addr),
1416			   ax2asc(buf, &rose->dest_call));
1417
1418		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1419			callsign = "??????-?";
1420		else
1421			callsign = ax2asc(buf, &rose->source_call);
1422
1423		seq_printf(seq,
1424			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1425			rose2asc(rsbuf, &rose->source_addr),
1426			callsign,
1427			devname,
1428			rose->lci & 0x0FFF,
1429			(rose->neighbour) ? rose->neighbour->number : 0,
1430			rose->state,
1431			rose->vs,
1432			rose->vr,
1433			rose->va,
1434			ax25_display_timer(&rose->timer) / HZ,
1435			rose->t1 / HZ,
1436			rose->t2 / HZ,
1437			rose->t3 / HZ,
1438			rose->hb / HZ,
1439			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1440			rose->idle / (60 * HZ),
1441			sk_wmem_alloc_get(s),
1442			sk_rmem_alloc_get(s),
1443			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1444	}
1445
1446	return 0;
1447}
1448
1449static const struct seq_operations rose_info_seqops = {
1450	.start = rose_info_start,
1451	.next = rose_info_next,
1452	.stop = rose_info_stop,
1453	.show = rose_info_show,
1454};
1455
1456static int rose_info_open(struct inode *inode, struct file *file)
1457{
1458	return seq_open(file, &rose_info_seqops);
1459}
1460
1461static const struct file_operations rose_info_fops = {
1462	.owner = THIS_MODULE,
1463	.open = rose_info_open,
1464	.read = seq_read,
1465	.llseek = seq_lseek,
1466	.release = seq_release,
1467};
1468#endif	/* CONFIG_PROC_FS */
1469
1470static const struct net_proto_family rose_family_ops = {
1471	.family		=	PF_ROSE,
1472	.create		=	rose_create,
1473	.owner		=	THIS_MODULE,
1474};
1475
1476static const struct proto_ops rose_proto_ops = {
1477	.family		=	PF_ROSE,
1478	.owner		=	THIS_MODULE,
1479	.release	=	rose_release,
1480	.bind		=	rose_bind,
1481	.connect	=	rose_connect,
1482	.socketpair	=	sock_no_socketpair,
1483	.accept		=	rose_accept,
1484	.getname	=	rose_getname,
1485	.poll		=	datagram_poll,
1486	.ioctl		=	rose_ioctl,
 
1487	.listen		=	rose_listen,
1488	.shutdown	=	sock_no_shutdown,
1489	.setsockopt	=	rose_setsockopt,
1490	.getsockopt	=	rose_getsockopt,
1491	.sendmsg	=	rose_sendmsg,
1492	.recvmsg	=	rose_recvmsg,
1493	.mmap		=	sock_no_mmap,
1494	.sendpage	=	sock_no_sendpage,
1495};
1496
1497static struct notifier_block rose_dev_notifier = {
1498	.notifier_call	=	rose_device_event,
1499};
1500
1501static struct net_device **dev_rose;
1502
1503static struct ax25_protocol rose_pid = {
1504	.pid	= AX25_P_ROSE,
1505	.func	= rose_route_frame
1506};
1507
1508static struct ax25_linkfail rose_linkfail_notifier = {
1509	.func	= rose_link_failed
1510};
1511
1512static int __init rose_proto_init(void)
1513{
1514	int i;
1515	int rc;
1516
1517	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1518		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1519		rc = -EINVAL;
1520		goto out;
1521	}
1522
1523	rc = proto_register(&rose_proto, 0);
1524	if (rc != 0)
1525		goto out;
1526
1527	rose_callsign = null_ax25_address;
1528
1529	dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
 
1530	if (dev_rose == NULL) {
1531		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1532		rc = -ENOMEM;
1533		goto out_proto_unregister;
1534	}
1535
1536	for (i = 0; i < rose_ndevs; i++) {
1537		struct net_device *dev;
1538		char name[IFNAMSIZ];
1539
1540		sprintf(name, "rose%d", i);
1541		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1542		if (!dev) {
1543			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1544			rc = -ENOMEM;
1545			goto fail;
1546		}
1547		rc = register_netdev(dev);
1548		if (rc) {
1549			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1550			free_netdev(dev);
1551			goto fail;
1552		}
1553		rose_set_lockdep_key(dev);
1554		dev_rose[i] = dev;
1555	}
1556
1557	sock_register(&rose_family_ops);
1558	register_netdevice_notifier(&rose_dev_notifier);
1559
1560	ax25_register_pid(&rose_pid);
1561	ax25_linkfail_register(&rose_linkfail_notifier);
1562
1563#ifdef CONFIG_SYSCTL
1564	rose_register_sysctl();
1565#endif
1566	rose_loopback_init();
1567
1568	rose_add_loopback_neigh();
1569
1570	proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
1571	proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
1572		    &rose_neigh_fops);
1573	proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
1574		    &rose_nodes_fops);
1575	proc_create("rose_routes", S_IRUGO, init_net.proc_net,
1576		    &rose_routes_fops);
1577out:
1578	return rc;
1579fail:
1580	while (--i >= 0) {
1581		unregister_netdev(dev_rose[i]);
1582		free_netdev(dev_rose[i]);
1583	}
1584	kfree(dev_rose);
1585out_proto_unregister:
1586	proto_unregister(&rose_proto);
1587	goto out;
1588}
1589module_init(rose_proto_init);
1590
1591module_param(rose_ndevs, int, 0);
1592MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1593
1594MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1595MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1596MODULE_LICENSE("GPL");
1597MODULE_ALIAS_NETPROTO(PF_ROSE);
1598
1599static void __exit rose_exit(void)
1600{
1601	int i;
1602
1603	remove_proc_entry("rose", init_net.proc_net);
1604	remove_proc_entry("rose_neigh", init_net.proc_net);
1605	remove_proc_entry("rose_nodes", init_net.proc_net);
1606	remove_proc_entry("rose_routes", init_net.proc_net);
1607	rose_loopback_clear();
1608
1609	rose_rt_free();
1610
1611	ax25_protocol_release(AX25_P_ROSE);
1612	ax25_linkfail_release(&rose_linkfail_notifier);
1613
1614	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1615		ax25_listen_release(&rose_callsign, NULL);
1616
1617#ifdef CONFIG_SYSCTL
1618	rose_unregister_sysctl();
1619#endif
1620	unregister_netdevice_notifier(&rose_dev_notifier);
1621
1622	sock_unregister(PF_ROSE);
1623
1624	for (i = 0; i < rose_ndevs; i++) {
1625		struct net_device *dev = dev_rose[i];
1626
1627		if (dev) {
1628			unregister_netdev(dev);
1629			free_netdev(dev);
1630		}
1631	}
1632
1633	kfree(dev_rose);
1634	proto_unregister(&rose_proto);
1635}
1636
1637module_exit(rose_exit);