Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v3.15
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License as published by
   4 * the Free Software Foundation; either version 2 of the License, or
   5 * (at your option) any later version.
   6 *
   7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
  10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
  11 */
  12
  13#include <linux/capability.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/types.h>
  19#include <linux/socket.h>
  20#include <linux/in.h>
  21#include <linux/slab.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <linux/spinlock.h>
  25#include <linux/timer.h>
  26#include <linux/string.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/stat.h>
  30#include <net/net_namespace.h>
  31#include <net/ax25.h>
  32#include <linux/inet.h>
  33#include <linux/netdevice.h>
  34#include <linux/if_arp.h>
  35#include <linux/skbuff.h>
  36#include <net/sock.h>
 
  37#include <asm/uaccess.h>
  38#include <linux/fcntl.h>
  39#include <linux/termios.h>
  40#include <linux/mm.h>
  41#include <linux/interrupt.h>
  42#include <linux/notifier.h>
  43#include <net/rose.h>
  44#include <linux/proc_fs.h>
  45#include <linux/seq_file.h>
  46#include <net/tcp_states.h>
  47#include <net/ip.h>
  48#include <net/arp.h>
  49
  50static int rose_ndevs = 10;
  51
  52int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
  53int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
  54int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
  55int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
  56int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
  57int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
  58int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
  59int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
  60int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
  61int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
  62
  63static HLIST_HEAD(rose_list);
  64static DEFINE_SPINLOCK(rose_list_lock);
  65
  66static const struct proto_ops rose_proto_ops;
  67
  68ax25_address rose_callsign;
  69
  70/*
  71 * ROSE network devices are virtual network devices encapsulating ROSE
  72 * frames into AX.25 which will be sent through an AX.25 device, so form a
  73 * special "super class" of normal net devices; split their locks off into a
  74 * separate class since they always nest.
  75 */
  76static struct lock_class_key rose_netdev_xmit_lock_key;
  77static struct lock_class_key rose_netdev_addr_lock_key;
  78
  79static void rose_set_lockdep_one(struct net_device *dev,
  80				 struct netdev_queue *txq,
  81				 void *_unused)
  82{
  83	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
  84}
  85
  86static void rose_set_lockdep_key(struct net_device *dev)
  87{
  88	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
  89	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
  90}
  91
  92/*
  93 *	Convert a ROSE address into text.
  94 */
  95char *rose2asc(char *buf, const rose_address *addr)
  96{
  97	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
  98	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
  99	    addr->rose_addr[4] == 0x00) {
 100		strcpy(buf, "*");
 101	} else {
 102		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
 103						addr->rose_addr[1] & 0xFF,
 104						addr->rose_addr[2] & 0xFF,
 105						addr->rose_addr[3] & 0xFF,
 106						addr->rose_addr[4] & 0xFF);
 107	}
 108
 109	return buf;
 110}
 111
 112/*
 113 *	Compare two ROSE addresses, 0 == equal.
 114 */
 115int rosecmp(rose_address *addr1, rose_address *addr2)
 116{
 117	int i;
 118
 119	for (i = 0; i < 5; i++)
 120		if (addr1->rose_addr[i] != addr2->rose_addr[i])
 121			return 1;
 122
 123	return 0;
 124}
 125
 126/*
 127 *	Compare two ROSE addresses for only mask digits, 0 == equal.
 128 */
 129int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
 130{
 131	unsigned int i, j;
 132
 133	if (mask > 10)
 134		return 1;
 135
 136	for (i = 0; i < mask; i++) {
 137		j = i / 2;
 138
 139		if ((i % 2) != 0) {
 140			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
 141				return 1;
 142		} else {
 143			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
 144				return 1;
 145		}
 146	}
 147
 148	return 0;
 149}
 150
 151/*
 152 *	Socket removal during an interrupt is now safe.
 153 */
 154static void rose_remove_socket(struct sock *sk)
 155{
 156	spin_lock_bh(&rose_list_lock);
 157	sk_del_node_init(sk);
 158	spin_unlock_bh(&rose_list_lock);
 159}
 160
 161/*
 162 *	Kill all bound sockets on a broken link layer connection to a
 163 *	particular neighbour.
 164 */
 165void rose_kill_by_neigh(struct rose_neigh *neigh)
 166{
 167	struct sock *s;
 
 168
 169	spin_lock_bh(&rose_list_lock);
 170	sk_for_each(s, &rose_list) {
 171		struct rose_sock *rose = rose_sk(s);
 172
 173		if (rose->neighbour == neigh) {
 174			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 175			rose->neighbour->use--;
 176			rose->neighbour = NULL;
 177		}
 178	}
 179	spin_unlock_bh(&rose_list_lock);
 180}
 181
 182/*
 183 *	Kill all bound sockets on a dropped device.
 184 */
 185static void rose_kill_by_device(struct net_device *dev)
 186{
 187	struct sock *s;
 
 188
 189	spin_lock_bh(&rose_list_lock);
 190	sk_for_each(s, &rose_list) {
 191		struct rose_sock *rose = rose_sk(s);
 192
 193		if (rose->device == dev) {
 194			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 195			rose->neighbour->use--;
 196			rose->device = NULL;
 197		}
 198	}
 199	spin_unlock_bh(&rose_list_lock);
 200}
 201
 202/*
 203 *	Handle device status changes.
 204 */
 205static int rose_device_event(struct notifier_block *this,
 206			     unsigned long event, void *ptr)
 207{
 208	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 209
 210	if (!net_eq(dev_net(dev), &init_net))
 211		return NOTIFY_DONE;
 212
 213	if (event != NETDEV_DOWN)
 214		return NOTIFY_DONE;
 215
 216	switch (dev->type) {
 217	case ARPHRD_ROSE:
 218		rose_kill_by_device(dev);
 219		break;
 220	case ARPHRD_AX25:
 221		rose_link_device_down(dev);
 222		rose_rt_device_down(dev);
 223		break;
 224	}
 225
 226	return NOTIFY_DONE;
 227}
 228
 229/*
 230 *	Add a socket to the bound sockets list.
 231 */
 232static void rose_insert_socket(struct sock *sk)
 233{
 234
 235	spin_lock_bh(&rose_list_lock);
 236	sk_add_node(sk, &rose_list);
 237	spin_unlock_bh(&rose_list_lock);
 238}
 239
 240/*
 241 *	Find a socket that wants to accept the Call Request we just
 242 *	received.
 243 */
 244static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 245{
 246	struct sock *s;
 
 247
 248	spin_lock_bh(&rose_list_lock);
 249	sk_for_each(s, &rose_list) {
 250		struct rose_sock *rose = rose_sk(s);
 251
 252		if (!rosecmp(&rose->source_addr, addr) &&
 253		    !ax25cmp(&rose->source_call, call) &&
 254		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
 255			goto found;
 256	}
 257
 258	sk_for_each(s, &rose_list) {
 259		struct rose_sock *rose = rose_sk(s);
 260
 261		if (!rosecmp(&rose->source_addr, addr) &&
 262		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
 263		    s->sk_state == TCP_LISTEN)
 264			goto found;
 265	}
 266	s = NULL;
 267found:
 268	spin_unlock_bh(&rose_list_lock);
 269	return s;
 270}
 271
 272/*
 273 *	Find a connected ROSE socket given my LCI and device.
 274 */
 275struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 276{
 277	struct sock *s;
 
 278
 279	spin_lock_bh(&rose_list_lock);
 280	sk_for_each(s, &rose_list) {
 281		struct rose_sock *rose = rose_sk(s);
 282
 283		if (rose->lci == lci && rose->neighbour == neigh)
 284			goto found;
 285	}
 286	s = NULL;
 287found:
 288	spin_unlock_bh(&rose_list_lock);
 289	return s;
 290}
 291
 292/*
 293 *	Find a unique LCI for a given device.
 294 */
 295unsigned int rose_new_lci(struct rose_neigh *neigh)
 296{
 297	int lci;
 298
 299	if (neigh->dce_mode) {
 300		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
 301			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 302				return lci;
 303	} else {
 304		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
 305			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 306				return lci;
 307	}
 308
 309	return 0;
 310}
 311
 312/*
 313 *	Deferred destroy.
 314 */
 315void rose_destroy_socket(struct sock *);
 316
 317/*
 318 *	Handler for deferred kills.
 319 */
 320static void rose_destroy_timer(unsigned long data)
 321{
 322	rose_destroy_socket((struct sock *)data);
 323}
 324
 325/*
 326 *	This is called from user mode and the timers. Thus it protects itself
 327 *	against interrupt users but doesn't worry about being called during
 328 *	work.  Once it is removed from the queue no interrupt or bottom half
 329 *	will touch it and we are (fairly 8-) ) safe.
 330 */
 331void rose_destroy_socket(struct sock *sk)
 332{
 333	struct sk_buff *skb;
 334
 335	rose_remove_socket(sk);
 336	rose_stop_heartbeat(sk);
 337	rose_stop_idletimer(sk);
 338	rose_stop_timer(sk);
 339
 340	rose_clear_queues(sk);		/* Flush the queues */
 341
 342	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 343		if (skb->sk != sk) {	/* A pending connection */
 344			/* Queue the unaccepted socket for death */
 345			sock_set_flag(skb->sk, SOCK_DEAD);
 346			rose_start_heartbeat(skb->sk);
 347			rose_sk(skb->sk)->state = ROSE_STATE_0;
 348		}
 349
 350		kfree_skb(skb);
 351	}
 352
 353	if (sk_has_allocations(sk)) {
 354		/* Defer: outstanding buffers */
 355		setup_timer(&sk->sk_timer, rose_destroy_timer,
 356				(unsigned long)sk);
 357		sk->sk_timer.expires  = jiffies + 10 * HZ;
 358		add_timer(&sk->sk_timer);
 359	} else
 360		sock_put(sk);
 361}
 362
 363/*
 364 *	Handling for system calls applied via the various interfaces to a
 365 *	ROSE socket object.
 366 */
 367
 368static int rose_setsockopt(struct socket *sock, int level, int optname,
 369	char __user *optval, unsigned int optlen)
 370{
 371	struct sock *sk = sock->sk;
 372	struct rose_sock *rose = rose_sk(sk);
 373	int opt;
 374
 375	if (level != SOL_ROSE)
 376		return -ENOPROTOOPT;
 377
 378	if (optlen < sizeof(int))
 379		return -EINVAL;
 380
 381	if (get_user(opt, (int __user *)optval))
 382		return -EFAULT;
 383
 384	switch (optname) {
 385	case ROSE_DEFER:
 386		rose->defer = opt ? 1 : 0;
 387		return 0;
 388
 389	case ROSE_T1:
 390		if (opt < 1)
 391			return -EINVAL;
 392		rose->t1 = opt * HZ;
 393		return 0;
 394
 395	case ROSE_T2:
 396		if (opt < 1)
 397			return -EINVAL;
 398		rose->t2 = opt * HZ;
 399		return 0;
 400
 401	case ROSE_T3:
 402		if (opt < 1)
 403			return -EINVAL;
 404		rose->t3 = opt * HZ;
 405		return 0;
 406
 407	case ROSE_HOLDBACK:
 408		if (opt < 1)
 409			return -EINVAL;
 410		rose->hb = opt * HZ;
 411		return 0;
 412
 413	case ROSE_IDLE:
 414		if (opt < 0)
 415			return -EINVAL;
 416		rose->idle = opt * 60 * HZ;
 417		return 0;
 418
 419	case ROSE_QBITINCL:
 420		rose->qbitincl = opt ? 1 : 0;
 421		return 0;
 422
 423	default:
 424		return -ENOPROTOOPT;
 425	}
 426}
 427
 428static int rose_getsockopt(struct socket *sock, int level, int optname,
 429	char __user *optval, int __user *optlen)
 430{
 431	struct sock *sk = sock->sk;
 432	struct rose_sock *rose = rose_sk(sk);
 433	int val = 0;
 434	int len;
 435
 436	if (level != SOL_ROSE)
 437		return -ENOPROTOOPT;
 438
 439	if (get_user(len, optlen))
 440		return -EFAULT;
 441
 442	if (len < 0)
 443		return -EINVAL;
 444
 445	switch (optname) {
 446	case ROSE_DEFER:
 447		val = rose->defer;
 448		break;
 449
 450	case ROSE_T1:
 451		val = rose->t1 / HZ;
 452		break;
 453
 454	case ROSE_T2:
 455		val = rose->t2 / HZ;
 456		break;
 457
 458	case ROSE_T3:
 459		val = rose->t3 / HZ;
 460		break;
 461
 462	case ROSE_HOLDBACK:
 463		val = rose->hb / HZ;
 464		break;
 465
 466	case ROSE_IDLE:
 467		val = rose->idle / (60 * HZ);
 468		break;
 469
 470	case ROSE_QBITINCL:
 471		val = rose->qbitincl;
 472		break;
 473
 474	default:
 475		return -ENOPROTOOPT;
 476	}
 477
 478	len = min_t(unsigned int, len, sizeof(int));
 479
 480	if (put_user(len, optlen))
 481		return -EFAULT;
 482
 483	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
 484}
 485
 486static int rose_listen(struct socket *sock, int backlog)
 487{
 488	struct sock *sk = sock->sk;
 489
 490	if (sk->sk_state != TCP_LISTEN) {
 491		struct rose_sock *rose = rose_sk(sk);
 492
 493		rose->dest_ndigis = 0;
 494		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
 495		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
 496		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
 497		sk->sk_max_ack_backlog = backlog;
 498		sk->sk_state           = TCP_LISTEN;
 499		return 0;
 500	}
 501
 502	return -EOPNOTSUPP;
 503}
 504
 505static struct proto rose_proto = {
 506	.name	  = "ROSE",
 507	.owner	  = THIS_MODULE,
 508	.obj_size = sizeof(struct rose_sock),
 509};
 510
 511static int rose_create(struct net *net, struct socket *sock, int protocol,
 512		       int kern)
 513{
 514	struct sock *sk;
 515	struct rose_sock *rose;
 516
 517	if (!net_eq(net, &init_net))
 518		return -EAFNOSUPPORT;
 519
 520	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 521		return -ESOCKTNOSUPPORT;
 522
 523	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
 524	if (sk == NULL)
 525		return -ENOMEM;
 526
 527	rose = rose_sk(sk);
 528
 529	sock_init_data(sock, sk);
 530
 531	skb_queue_head_init(&rose->ack_queue);
 532#ifdef M_BIT
 533	skb_queue_head_init(&rose->frag_queue);
 534	rose->fraglen    = 0;
 535#endif
 536
 537	sock->ops    = &rose_proto_ops;
 538	sk->sk_protocol = protocol;
 539
 540	init_timer(&rose->timer);
 541	init_timer(&rose->idletimer);
 542
 543	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
 544	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
 545	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
 546	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
 547	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
 548
 549	rose->state = ROSE_STATE_0;
 550
 551	return 0;
 552}
 553
 554static struct sock *rose_make_new(struct sock *osk)
 555{
 556	struct sock *sk;
 557	struct rose_sock *rose, *orose;
 558
 559	if (osk->sk_type != SOCK_SEQPACKET)
 560		return NULL;
 561
 562	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
 563	if (sk == NULL)
 564		return NULL;
 565
 566	rose = rose_sk(sk);
 567
 568	sock_init_data(NULL, sk);
 569
 570	skb_queue_head_init(&rose->ack_queue);
 571#ifdef M_BIT
 572	skb_queue_head_init(&rose->frag_queue);
 573	rose->fraglen  = 0;
 574#endif
 575
 576	sk->sk_type     = osk->sk_type;
 577	sk->sk_priority = osk->sk_priority;
 578	sk->sk_protocol = osk->sk_protocol;
 579	sk->sk_rcvbuf   = osk->sk_rcvbuf;
 580	sk->sk_sndbuf   = osk->sk_sndbuf;
 581	sk->sk_state    = TCP_ESTABLISHED;
 582	sock_copy_flags(sk, osk);
 583
 584	init_timer(&rose->timer);
 585	init_timer(&rose->idletimer);
 586
 587	orose		= rose_sk(osk);
 588	rose->t1	= orose->t1;
 589	rose->t2	= orose->t2;
 590	rose->t3	= orose->t3;
 591	rose->hb	= orose->hb;
 592	rose->idle	= orose->idle;
 593	rose->defer	= orose->defer;
 594	rose->device	= orose->device;
 595	rose->qbitincl	= orose->qbitincl;
 596
 597	return sk;
 598}
 599
 600static int rose_release(struct socket *sock)
 601{
 602	struct sock *sk = sock->sk;
 603	struct rose_sock *rose;
 604
 605	if (sk == NULL) return 0;
 606
 607	sock_hold(sk);
 608	sock_orphan(sk);
 609	lock_sock(sk);
 610	rose = rose_sk(sk);
 611
 612	switch (rose->state) {
 613	case ROSE_STATE_0:
 614		release_sock(sk);
 615		rose_disconnect(sk, 0, -1, -1);
 616		lock_sock(sk);
 617		rose_destroy_socket(sk);
 618		break;
 619
 620	case ROSE_STATE_2:
 621		rose->neighbour->use--;
 622		release_sock(sk);
 623		rose_disconnect(sk, 0, -1, -1);
 624		lock_sock(sk);
 625		rose_destroy_socket(sk);
 626		break;
 627
 628	case ROSE_STATE_1:
 629	case ROSE_STATE_3:
 630	case ROSE_STATE_4:
 631	case ROSE_STATE_5:
 632		rose_clear_queues(sk);
 633		rose_stop_idletimer(sk);
 634		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
 635		rose_start_t3timer(sk);
 636		rose->state  = ROSE_STATE_2;
 637		sk->sk_state    = TCP_CLOSE;
 638		sk->sk_shutdown |= SEND_SHUTDOWN;
 639		sk->sk_state_change(sk);
 640		sock_set_flag(sk, SOCK_DEAD);
 641		sock_set_flag(sk, SOCK_DESTROY);
 642		break;
 643
 644	default:
 645		break;
 646	}
 647
 648	sock->sk = NULL;
 649	release_sock(sk);
 650	sock_put(sk);
 651
 652	return 0;
 653}
 654
 655static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 656{
 657	struct sock *sk = sock->sk;
 658	struct rose_sock *rose = rose_sk(sk);
 659	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 660	struct net_device *dev;
 661	ax25_address *source;
 662	ax25_uid_assoc *user;
 663	int n;
 664
 665	if (!sock_flag(sk, SOCK_ZAPPED))
 666		return -EINVAL;
 667
 668	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 669		return -EINVAL;
 670
 671	if (addr->srose_family != AF_ROSE)
 672		return -EINVAL;
 673
 674	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 675		return -EINVAL;
 676
 677	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 678		return -EINVAL;
 679
 680	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
 681		return -EADDRNOTAVAIL;
 682
 683	source = &addr->srose_call;
 684
 685	user = ax25_findbyuid(current_euid());
 686	if (user) {
 687		rose->source_call = user->call;
 688		ax25_uid_put(user);
 689	} else {
 690		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
 691			return -EACCES;
 692		rose->source_call   = *source;
 693	}
 694
 695	rose->source_addr   = addr->srose_addr;
 696	rose->device        = dev;
 697	rose->source_ndigis = addr->srose_ndigis;
 698
 699	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 700		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 701		for (n = 0 ; n < addr->srose_ndigis ; n++)
 702			rose->source_digis[n] = full_addr->srose_digis[n];
 703	} else {
 704		if (rose->source_ndigis == 1) {
 705			rose->source_digis[0] = addr->srose_digi;
 706		}
 707	}
 708
 709	rose_insert_socket(sk);
 710
 711	sock_reset_flag(sk, SOCK_ZAPPED);
 712
 713	return 0;
 714}
 715
 716static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
 717{
 718	struct sock *sk = sock->sk;
 719	struct rose_sock *rose = rose_sk(sk);
 720	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 721	unsigned char cause, diagnostic;
 722	struct net_device *dev;
 723	ax25_uid_assoc *user;
 724	int n, err = 0;
 725
 726	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 727		return -EINVAL;
 728
 729	if (addr->srose_family != AF_ROSE)
 730		return -EINVAL;
 731
 732	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 733		return -EINVAL;
 734
 735	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 736		return -EINVAL;
 737
 738	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
 739	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
 740		return -EINVAL;
 741
 742	lock_sock(sk);
 743
 744	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
 745		/* Connect completed during a ERESTARTSYS event */
 746		sock->state = SS_CONNECTED;
 747		goto out_release;
 748	}
 749
 750	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
 751		sock->state = SS_UNCONNECTED;
 752		err = -ECONNREFUSED;
 753		goto out_release;
 754	}
 755
 756	if (sk->sk_state == TCP_ESTABLISHED) {
 757		/* No reconnect on a seqpacket socket */
 758		err = -EISCONN;
 759		goto out_release;
 760	}
 761
 762	sk->sk_state   = TCP_CLOSE;
 763	sock->state = SS_UNCONNECTED;
 764
 765	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
 766					 &diagnostic, 0);
 767	if (!rose->neighbour) {
 768		err = -ENETUNREACH;
 769		goto out_release;
 770	}
 771
 772	rose->lci = rose_new_lci(rose->neighbour);
 773	if (!rose->lci) {
 774		err = -ENETUNREACH;
 775		goto out_release;
 776	}
 777
 778	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
 779		sock_reset_flag(sk, SOCK_ZAPPED);
 780
 781		if ((dev = rose_dev_first()) == NULL) {
 782			err = -ENETUNREACH;
 783			goto out_release;
 784		}
 785
 786		user = ax25_findbyuid(current_euid());
 787		if (!user) {
 788			err = -EINVAL;
 789			goto out_release;
 790		}
 791
 792		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
 793		rose->source_call = user->call;
 794		rose->device      = dev;
 795		ax25_uid_put(user);
 796
 797		rose_insert_socket(sk);		/* Finish the bind */
 798	}
 799	rose->dest_addr   = addr->srose_addr;
 800	rose->dest_call   = addr->srose_call;
 801	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
 802	rose->dest_ndigis = addr->srose_ndigis;
 803
 804	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 805		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 806		for (n = 0 ; n < addr->srose_ndigis ; n++)
 807			rose->dest_digis[n] = full_addr->srose_digis[n];
 808	} else {
 809		if (rose->dest_ndigis == 1) {
 810			rose->dest_digis[0] = addr->srose_digi;
 811		}
 812	}
 813
 814	/* Move to connecting socket, start sending Connect Requests */
 815	sock->state   = SS_CONNECTING;
 816	sk->sk_state     = TCP_SYN_SENT;
 817
 818	rose->state = ROSE_STATE_1;
 819
 820	rose->neighbour->use++;
 821
 822	rose_write_internal(sk, ROSE_CALL_REQUEST);
 823	rose_start_heartbeat(sk);
 824	rose_start_t1timer(sk);
 825
 826	/* Now the loop */
 827	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
 828		err = -EINPROGRESS;
 829		goto out_release;
 830	}
 831
 832	/*
 833	 * A Connect Ack with Choke or timeout or failed routing will go to
 834	 * closed.
 835	 */
 836	if (sk->sk_state == TCP_SYN_SENT) {
 837		DEFINE_WAIT(wait);
 838
 839		for (;;) {
 840			prepare_to_wait(sk_sleep(sk), &wait,
 841					TASK_INTERRUPTIBLE);
 842			if (sk->sk_state != TCP_SYN_SENT)
 843				break;
 844			if (!signal_pending(current)) {
 845				release_sock(sk);
 846				schedule();
 847				lock_sock(sk);
 848				continue;
 849			}
 850			err = -ERESTARTSYS;
 851			break;
 852		}
 853		finish_wait(sk_sleep(sk), &wait);
 854
 855		if (err)
 856			goto out_release;
 857	}
 858
 859	if (sk->sk_state != TCP_ESTABLISHED) {
 860		sock->state = SS_UNCONNECTED;
 861		err = sock_error(sk);	/* Always set at this point */
 862		goto out_release;
 863	}
 864
 865	sock->state = SS_CONNECTED;
 866
 867out_release:
 868	release_sock(sk);
 869
 870	return err;
 871}
 872
 873static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
 874{
 875	struct sk_buff *skb;
 876	struct sock *newsk;
 877	DEFINE_WAIT(wait);
 878	struct sock *sk;
 879	int err = 0;
 880
 881	if ((sk = sock->sk) == NULL)
 882		return -EINVAL;
 883
 884	lock_sock(sk);
 885	if (sk->sk_type != SOCK_SEQPACKET) {
 886		err = -EOPNOTSUPP;
 887		goto out_release;
 888	}
 889
 890	if (sk->sk_state != TCP_LISTEN) {
 891		err = -EINVAL;
 892		goto out_release;
 893	}
 894
 895	/*
 896	 *	The write queue this time is holding sockets ready to use
 897	 *	hooked into the SABM we saved
 898	 */
 899	for (;;) {
 900		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 901
 902		skb = skb_dequeue(&sk->sk_receive_queue);
 903		if (skb)
 904			break;
 905
 906		if (flags & O_NONBLOCK) {
 907			err = -EWOULDBLOCK;
 908			break;
 909		}
 910		if (!signal_pending(current)) {
 911			release_sock(sk);
 912			schedule();
 913			lock_sock(sk);
 914			continue;
 915		}
 916		err = -ERESTARTSYS;
 917		break;
 918	}
 919	finish_wait(sk_sleep(sk), &wait);
 920	if (err)
 921		goto out_release;
 922
 923	newsk = skb->sk;
 924	sock_graft(newsk, newsock);
 925
 926	/* Now attach up the new socket */
 927	skb->sk = NULL;
 928	kfree_skb(skb);
 929	sk->sk_ack_backlog--;
 930
 931out_release:
 932	release_sock(sk);
 933
 934	return err;
 935}
 936
 937static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
 938	int *uaddr_len, int peer)
 939{
 940	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
 941	struct sock *sk = sock->sk;
 942	struct rose_sock *rose = rose_sk(sk);
 943	int n;
 944
 945	memset(srose, 0, sizeof(*srose));
 946	if (peer != 0) {
 947		if (sk->sk_state != TCP_ESTABLISHED)
 948			return -ENOTCONN;
 949		srose->srose_family = AF_ROSE;
 950		srose->srose_addr   = rose->dest_addr;
 951		srose->srose_call   = rose->dest_call;
 952		srose->srose_ndigis = rose->dest_ndigis;
 953		for (n = 0; n < rose->dest_ndigis; n++)
 954			srose->srose_digis[n] = rose->dest_digis[n];
 955	} else {
 956		srose->srose_family = AF_ROSE;
 957		srose->srose_addr   = rose->source_addr;
 958		srose->srose_call   = rose->source_call;
 959		srose->srose_ndigis = rose->source_ndigis;
 960		for (n = 0; n < rose->source_ndigis; n++)
 961			srose->srose_digis[n] = rose->source_digis[n];
 962	}
 963
 964	*uaddr_len = sizeof(struct full_sockaddr_rose);
 965	return 0;
 966}
 967
 968int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
 969{
 970	struct sock *sk;
 971	struct sock *make;
 972	struct rose_sock *make_rose;
 973	struct rose_facilities_struct facilities;
 974	int n;
 975
 976	skb->sk = NULL;		/* Initially we don't know who it's for */
 977
 978	/*
 979	 *	skb->data points to the rose frame start
 980	 */
 981	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
 982
 983	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
 984				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
 985				   &facilities)) {
 986		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
 987		return 0;
 988	}
 989
 990	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
 991
 992	/*
 993	 * We can't accept the Call Request.
 994	 */
 995	if (sk == NULL || sk_acceptq_is_full(sk) ||
 996	    (make = rose_make_new(sk)) == NULL) {
 997		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
 998		return 0;
 999	}
1000
1001	skb->sk     = make;
1002	make->sk_state = TCP_ESTABLISHED;
1003	make_rose = rose_sk(make);
1004
1005	make_rose->lci           = lci;
1006	make_rose->dest_addr     = facilities.dest_addr;
1007	make_rose->dest_call     = facilities.dest_call;
1008	make_rose->dest_ndigis   = facilities.dest_ndigis;
1009	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1010		make_rose->dest_digis[n] = facilities.dest_digis[n];
1011	make_rose->source_addr   = facilities.source_addr;
1012	make_rose->source_call   = facilities.source_call;
1013	make_rose->source_ndigis = facilities.source_ndigis;
1014	for (n = 0 ; n < facilities.source_ndigis ; n++)
1015		make_rose->source_digis[n] = facilities.source_digis[n];
1016	make_rose->neighbour     = neigh;
1017	make_rose->device        = dev;
1018	make_rose->facilities    = facilities;
1019
1020	make_rose->neighbour->use++;
1021
1022	if (rose_sk(sk)->defer) {
1023		make_rose->state = ROSE_STATE_5;
1024	} else {
1025		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1026		make_rose->state = ROSE_STATE_3;
1027		rose_start_idletimer(make);
1028	}
1029
1030	make_rose->condition = 0x00;
1031	make_rose->vs        = 0;
1032	make_rose->va        = 0;
1033	make_rose->vr        = 0;
1034	make_rose->vl        = 0;
1035	sk->sk_ack_backlog++;
1036
1037	rose_insert_socket(make);
1038
1039	skb_queue_head(&sk->sk_receive_queue, skb);
1040
1041	rose_start_heartbeat(make);
1042
1043	if (!sock_flag(sk, SOCK_DEAD))
1044		sk->sk_data_ready(sk);
1045
1046	return 1;
1047}
1048
1049static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1050			struct msghdr *msg, size_t len)
1051{
1052	struct sock *sk = sock->sk;
1053	struct rose_sock *rose = rose_sk(sk);
1054	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1055	int err;
1056	struct full_sockaddr_rose srose;
1057	struct sk_buff *skb;
1058	unsigned char *asmptr;
1059	int n, size, qbit = 0;
1060
1061	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1062		return -EINVAL;
1063
1064	if (sock_flag(sk, SOCK_ZAPPED))
1065		return -EADDRNOTAVAIL;
1066
1067	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1068		send_sig(SIGPIPE, current, 0);
1069		return -EPIPE;
1070	}
1071
1072	if (rose->neighbour == NULL || rose->device == NULL)
1073		return -ENETUNREACH;
1074
1075	if (usrose != NULL) {
1076		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1077			return -EINVAL;
1078		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1079		memcpy(&srose, usrose, msg->msg_namelen);
1080		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1081		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1082			return -EISCONN;
1083		if (srose.srose_ndigis != rose->dest_ndigis)
1084			return -EISCONN;
1085		if (srose.srose_ndigis == rose->dest_ndigis) {
1086			for (n = 0 ; n < srose.srose_ndigis ; n++)
1087				if (ax25cmp(&rose->dest_digis[n],
1088					    &srose.srose_digis[n]))
1089					return -EISCONN;
1090		}
1091		if (srose.srose_family != AF_ROSE)
1092			return -EINVAL;
1093	} else {
1094		if (sk->sk_state != TCP_ESTABLISHED)
1095			return -ENOTCONN;
1096
1097		srose.srose_family = AF_ROSE;
1098		srose.srose_addr   = rose->dest_addr;
1099		srose.srose_call   = rose->dest_call;
1100		srose.srose_ndigis = rose->dest_ndigis;
1101		for (n = 0 ; n < rose->dest_ndigis ; n++)
1102			srose.srose_digis[n] = rose->dest_digis[n];
1103	}
1104
1105	/* Build a packet */
1106	/* Sanity check the packet size */
1107	if (len > 65535)
1108		return -EMSGSIZE;
1109
1110	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1111
1112	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1113		return err;
1114
1115	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1116
1117	/*
1118	 *	Put the data on the end
1119	 */
1120
1121	skb_reset_transport_header(skb);
1122	skb_put(skb, len);
1123
1124	err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1125	if (err) {
1126		kfree_skb(skb);
1127		return err;
1128	}
1129
1130	/*
1131	 *	If the Q BIT Include socket option is in force, the first
1132	 *	byte of the user data is the logical value of the Q Bit.
1133	 */
1134	if (rose->qbitincl) {
1135		qbit = skb->data[0];
1136		skb_pull(skb, 1);
1137	}
1138
1139	/*
1140	 *	Push down the ROSE header
1141	 */
1142	asmptr = skb_push(skb, ROSE_MIN_LEN);
1143
1144	/* Build a ROSE Network header */
1145	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1146	asmptr[1] = (rose->lci >> 0) & 0xFF;
1147	asmptr[2] = ROSE_DATA;
1148
1149	if (qbit)
1150		asmptr[0] |= ROSE_Q_BIT;
1151
1152	if (sk->sk_state != TCP_ESTABLISHED) {
1153		kfree_skb(skb);
1154		return -ENOTCONN;
1155	}
1156
1157#ifdef M_BIT
1158#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1159	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1160		unsigned char header[ROSE_MIN_LEN];
1161		struct sk_buff *skbn;
1162		int frontlen;
1163		int lg;
1164
1165		/* Save a copy of the Header */
1166		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1167		skb_pull(skb, ROSE_MIN_LEN);
1168
1169		frontlen = skb_headroom(skb);
1170
1171		while (skb->len > 0) {
1172			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1173				kfree_skb(skb);
1174				return err;
1175			}
1176
1177			skbn->sk   = sk;
1178			skbn->free = 1;
1179			skbn->arp  = 1;
1180
1181			skb_reserve(skbn, frontlen);
1182
1183			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1184
1185			/* Copy the user data */
1186			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1187			skb_pull(skb, lg);
1188
1189			/* Duplicate the Header */
1190			skb_push(skbn, ROSE_MIN_LEN);
1191			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1192
1193			if (skb->len > 0)
1194				skbn->data[2] |= M_BIT;
1195
1196			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1197		}
1198
1199		skb->free = 1;
1200		kfree_skb(skb);
1201	} else {
1202		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1203	}
1204#else
1205	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1206#endif
1207
1208	rose_kick(sk);
1209
1210	return len;
1211}
1212
1213
1214static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1215			struct msghdr *msg, size_t size, int flags)
1216{
1217	struct sock *sk = sock->sk;
1218	struct rose_sock *rose = rose_sk(sk);
 
1219	size_t copied;
1220	unsigned char *asmptr;
1221	struct sk_buff *skb;
1222	int n, er, qbit;
1223
1224	/*
1225	 * This works for seqpacket too. The receiver has ordered the queue for
1226	 * us! We do one quick check first though
1227	 */
1228	if (sk->sk_state != TCP_ESTABLISHED)
1229		return -ENOTCONN;
1230
1231	/* Now we can treat all alike */
1232	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1233		return er;
1234
1235	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1236
1237	skb_pull(skb, ROSE_MIN_LEN);
1238
1239	if (rose->qbitincl) {
1240		asmptr  = skb_push(skb, 1);
1241		*asmptr = qbit;
1242	}
1243
1244	skb_reset_transport_header(skb);
1245	copied     = skb->len;
1246
1247	if (copied > size) {
1248		copied = size;
1249		msg->msg_flags |= MSG_TRUNC;
1250	}
1251
1252	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1253
1254	if (msg->msg_name) {
1255		struct sockaddr_rose *srose;
1256		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1257				 msg->msg_name);
1258
1259		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1260		srose = msg->msg_name;
1261		srose->srose_family = AF_ROSE;
1262		srose->srose_addr   = rose->dest_addr;
1263		srose->srose_call   = rose->dest_call;
1264		srose->srose_ndigis = rose->dest_ndigis;
1265		for (n = 0 ; n < rose->dest_ndigis ; n++)
1266			full_srose->srose_digis[n] = rose->dest_digis[n];
1267		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
 
 
 
 
 
 
 
 
 
1268	}
1269
1270	skb_free_datagram(sk, skb);
1271
1272	return copied;
1273}
1274
1275
1276static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1277{
1278	struct sock *sk = sock->sk;
1279	struct rose_sock *rose = rose_sk(sk);
1280	void __user *argp = (void __user *)arg;
1281
1282	switch (cmd) {
1283	case TIOCOUTQ: {
1284		long amount;
1285
1286		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1287		if (amount < 0)
1288			amount = 0;
1289		return put_user(amount, (unsigned int __user *) argp);
1290	}
1291
1292	case TIOCINQ: {
1293		struct sk_buff *skb;
1294		long amount = 0L;
1295		/* These two are safe on a single CPU system as only user tasks fiddle here */
1296		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1297			amount = skb->len;
1298		return put_user(amount, (unsigned int __user *) argp);
1299	}
1300
1301	case SIOCGSTAMP:
1302		return sock_get_timestamp(sk, (struct timeval __user *) argp);
1303
1304	case SIOCGSTAMPNS:
1305		return sock_get_timestampns(sk, (struct timespec __user *) argp);
1306
1307	case SIOCGIFADDR:
1308	case SIOCSIFADDR:
1309	case SIOCGIFDSTADDR:
1310	case SIOCSIFDSTADDR:
1311	case SIOCGIFBRDADDR:
1312	case SIOCSIFBRDADDR:
1313	case SIOCGIFNETMASK:
1314	case SIOCSIFNETMASK:
1315	case SIOCGIFMETRIC:
1316	case SIOCSIFMETRIC:
1317		return -EINVAL;
1318
1319	case SIOCADDRT:
1320	case SIOCDELRT:
1321	case SIOCRSCLRRT:
1322		if (!capable(CAP_NET_ADMIN))
1323			return -EPERM;
1324		return rose_rt_ioctl(cmd, argp);
1325
1326	case SIOCRSGCAUSE: {
1327		struct rose_cause_struct rose_cause;
1328		rose_cause.cause      = rose->cause;
1329		rose_cause.diagnostic = rose->diagnostic;
1330		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1331	}
1332
1333	case SIOCRSSCAUSE: {
1334		struct rose_cause_struct rose_cause;
1335		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1336			return -EFAULT;
1337		rose->cause      = rose_cause.cause;
1338		rose->diagnostic = rose_cause.diagnostic;
1339		return 0;
1340	}
1341
1342	case SIOCRSSL2CALL:
1343		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1344		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1345			ax25_listen_release(&rose_callsign, NULL);
1346		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1347			return -EFAULT;
1348		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1349			return ax25_listen_register(&rose_callsign, NULL);
1350
1351		return 0;
1352
1353	case SIOCRSGL2CALL:
1354		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1355
1356	case SIOCRSACCEPT:
1357		if (rose->state == ROSE_STATE_5) {
1358			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1359			rose_start_idletimer(sk);
1360			rose->condition = 0x00;
1361			rose->vs        = 0;
1362			rose->va        = 0;
1363			rose->vr        = 0;
1364			rose->vl        = 0;
1365			rose->state     = ROSE_STATE_3;
1366		}
1367		return 0;
1368
1369	default:
1370		return -ENOIOCTLCMD;
1371	}
1372
1373	return 0;
1374}
1375
1376#ifdef CONFIG_PROC_FS
1377static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1378	__acquires(rose_list_lock)
1379{
1380	spin_lock_bh(&rose_list_lock);
1381	return seq_hlist_start_head(&rose_list, *pos);
1382}
1383
1384static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1385{
1386	return seq_hlist_next(v, &rose_list, pos);
1387}
1388
1389static void rose_info_stop(struct seq_file *seq, void *v)
1390	__releases(rose_list_lock)
1391{
1392	spin_unlock_bh(&rose_list_lock);
1393}
1394
1395static int rose_info_show(struct seq_file *seq, void *v)
1396{
1397	char buf[11], rsbuf[11];
1398
1399	if (v == SEQ_START_TOKEN)
1400		seq_puts(seq,
1401			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1402
1403	else {
1404		struct sock *s = sk_entry(v);
1405		struct rose_sock *rose = rose_sk(s);
1406		const char *devname, *callsign;
1407		const struct net_device *dev = rose->device;
1408
1409		if (!dev)
1410			devname = "???";
1411		else
1412			devname = dev->name;
1413
1414		seq_printf(seq, "%-10s %-9s ",
1415			   rose2asc(rsbuf, &rose->dest_addr),
1416			   ax2asc(buf, &rose->dest_call));
1417
1418		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1419			callsign = "??????-?";
1420		else
1421			callsign = ax2asc(buf, &rose->source_call);
1422
1423		seq_printf(seq,
1424			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1425			rose2asc(rsbuf, &rose->source_addr),
1426			callsign,
1427			devname,
1428			rose->lci & 0x0FFF,
1429			(rose->neighbour) ? rose->neighbour->number : 0,
1430			rose->state,
1431			rose->vs,
1432			rose->vr,
1433			rose->va,
1434			ax25_display_timer(&rose->timer) / HZ,
1435			rose->t1 / HZ,
1436			rose->t2 / HZ,
1437			rose->t3 / HZ,
1438			rose->hb / HZ,
1439			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1440			rose->idle / (60 * HZ),
1441			sk_wmem_alloc_get(s),
1442			sk_rmem_alloc_get(s),
1443			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1444	}
1445
1446	return 0;
1447}
1448
1449static const struct seq_operations rose_info_seqops = {
1450	.start = rose_info_start,
1451	.next = rose_info_next,
1452	.stop = rose_info_stop,
1453	.show = rose_info_show,
1454};
1455
1456static int rose_info_open(struct inode *inode, struct file *file)
1457{
1458	return seq_open(file, &rose_info_seqops);
1459}
1460
1461static const struct file_operations rose_info_fops = {
1462	.owner = THIS_MODULE,
1463	.open = rose_info_open,
1464	.read = seq_read,
1465	.llseek = seq_lseek,
1466	.release = seq_release,
1467};
1468#endif	/* CONFIG_PROC_FS */
1469
1470static const struct net_proto_family rose_family_ops = {
1471	.family		=	PF_ROSE,
1472	.create		=	rose_create,
1473	.owner		=	THIS_MODULE,
1474};
1475
1476static const struct proto_ops rose_proto_ops = {
1477	.family		=	PF_ROSE,
1478	.owner		=	THIS_MODULE,
1479	.release	=	rose_release,
1480	.bind		=	rose_bind,
1481	.connect	=	rose_connect,
1482	.socketpair	=	sock_no_socketpair,
1483	.accept		=	rose_accept,
1484	.getname	=	rose_getname,
1485	.poll		=	datagram_poll,
1486	.ioctl		=	rose_ioctl,
1487	.listen		=	rose_listen,
1488	.shutdown	=	sock_no_shutdown,
1489	.setsockopt	=	rose_setsockopt,
1490	.getsockopt	=	rose_getsockopt,
1491	.sendmsg	=	rose_sendmsg,
1492	.recvmsg	=	rose_recvmsg,
1493	.mmap		=	sock_no_mmap,
1494	.sendpage	=	sock_no_sendpage,
1495};
1496
1497static struct notifier_block rose_dev_notifier = {
1498	.notifier_call	=	rose_device_event,
1499};
1500
1501static struct net_device **dev_rose;
1502
1503static struct ax25_protocol rose_pid = {
1504	.pid	= AX25_P_ROSE,
1505	.func	= rose_route_frame
1506};
1507
1508static struct ax25_linkfail rose_linkfail_notifier = {
1509	.func	= rose_link_failed
1510};
1511
1512static int __init rose_proto_init(void)
1513{
1514	int i;
1515	int rc;
1516
1517	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1518		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1519		rc = -EINVAL;
1520		goto out;
1521	}
1522
1523	rc = proto_register(&rose_proto, 0);
1524	if (rc != 0)
1525		goto out;
1526
1527	rose_callsign = null_ax25_address;
1528
1529	dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1530	if (dev_rose == NULL) {
1531		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1532		rc = -ENOMEM;
1533		goto out_proto_unregister;
1534	}
1535
1536	for (i = 0; i < rose_ndevs; i++) {
1537		struct net_device *dev;
1538		char name[IFNAMSIZ];
1539
1540		sprintf(name, "rose%d", i);
1541		dev = alloc_netdev(0, name, rose_setup);
1542		if (!dev) {
1543			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1544			rc = -ENOMEM;
1545			goto fail;
1546		}
1547		rc = register_netdev(dev);
1548		if (rc) {
1549			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1550			free_netdev(dev);
1551			goto fail;
1552		}
1553		rose_set_lockdep_key(dev);
1554		dev_rose[i] = dev;
1555	}
1556
1557	sock_register(&rose_family_ops);
1558	register_netdevice_notifier(&rose_dev_notifier);
1559
1560	ax25_register_pid(&rose_pid);
1561	ax25_linkfail_register(&rose_linkfail_notifier);
1562
1563#ifdef CONFIG_SYSCTL
1564	rose_register_sysctl();
1565#endif
1566	rose_loopback_init();
1567
1568	rose_add_loopback_neigh();
1569
1570	proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
1571	proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
1572		    &rose_neigh_fops);
1573	proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
1574		    &rose_nodes_fops);
1575	proc_create("rose_routes", S_IRUGO, init_net.proc_net,
1576		    &rose_routes_fops);
1577out:
1578	return rc;
1579fail:
1580	while (--i >= 0) {
1581		unregister_netdev(dev_rose[i]);
1582		free_netdev(dev_rose[i]);
1583	}
1584	kfree(dev_rose);
1585out_proto_unregister:
1586	proto_unregister(&rose_proto);
1587	goto out;
1588}
1589module_init(rose_proto_init);
1590
1591module_param(rose_ndevs, int, 0);
1592MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1593
1594MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1595MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1596MODULE_LICENSE("GPL");
1597MODULE_ALIAS_NETPROTO(PF_ROSE);
1598
1599static void __exit rose_exit(void)
1600{
1601	int i;
1602
1603	remove_proc_entry("rose", init_net.proc_net);
1604	remove_proc_entry("rose_neigh", init_net.proc_net);
1605	remove_proc_entry("rose_nodes", init_net.proc_net);
1606	remove_proc_entry("rose_routes", init_net.proc_net);
1607	rose_loopback_clear();
1608
1609	rose_rt_free();
1610
1611	ax25_protocol_release(AX25_P_ROSE);
1612	ax25_linkfail_release(&rose_linkfail_notifier);
1613
1614	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1615		ax25_listen_release(&rose_callsign, NULL);
1616
1617#ifdef CONFIG_SYSCTL
1618	rose_unregister_sysctl();
1619#endif
1620	unregister_netdevice_notifier(&rose_dev_notifier);
1621
1622	sock_unregister(PF_ROSE);
1623
1624	for (i = 0; i < rose_ndevs; i++) {
1625		struct net_device *dev = dev_rose[i];
1626
1627		if (dev) {
1628			unregister_netdev(dev);
1629			free_netdev(dev);
1630		}
1631	}
1632
1633	kfree(dev_rose);
1634	proto_unregister(&rose_proto);
1635}
1636
1637module_exit(rose_exit);
v3.1
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License as published by
   4 * the Free Software Foundation; either version 2 of the License, or
   5 * (at your option) any later version.
   6 *
   7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
  10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
  11 */
  12
  13#include <linux/capability.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/types.h>
  19#include <linux/socket.h>
  20#include <linux/in.h>
  21#include <linux/slab.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <linux/spinlock.h>
  25#include <linux/timer.h>
  26#include <linux/string.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/stat.h>
  30#include <net/net_namespace.h>
  31#include <net/ax25.h>
  32#include <linux/inet.h>
  33#include <linux/netdevice.h>
  34#include <linux/if_arp.h>
  35#include <linux/skbuff.h>
  36#include <net/sock.h>
  37#include <asm/system.h>
  38#include <asm/uaccess.h>
  39#include <linux/fcntl.h>
  40#include <linux/termios.h>
  41#include <linux/mm.h>
  42#include <linux/interrupt.h>
  43#include <linux/notifier.h>
  44#include <net/rose.h>
  45#include <linux/proc_fs.h>
  46#include <linux/seq_file.h>
  47#include <net/tcp_states.h>
  48#include <net/ip.h>
  49#include <net/arp.h>
  50
  51static int rose_ndevs = 10;
  52
  53int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
  54int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
  55int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
  56int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
  57int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
  58int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
  59int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
  60int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
  61int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
  62int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
  63
  64static HLIST_HEAD(rose_list);
  65static DEFINE_SPINLOCK(rose_list_lock);
  66
  67static const struct proto_ops rose_proto_ops;
  68
  69ax25_address rose_callsign;
  70
  71/*
  72 * ROSE network devices are virtual network devices encapsulating ROSE
  73 * frames into AX.25 which will be sent through an AX.25 device, so form a
  74 * special "super class" of normal net devices; split their locks off into a
  75 * separate class since they always nest.
  76 */
  77static struct lock_class_key rose_netdev_xmit_lock_key;
  78static struct lock_class_key rose_netdev_addr_lock_key;
  79
  80static void rose_set_lockdep_one(struct net_device *dev,
  81				 struct netdev_queue *txq,
  82				 void *_unused)
  83{
  84	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
  85}
  86
  87static void rose_set_lockdep_key(struct net_device *dev)
  88{
  89	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
  90	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
  91}
  92
  93/*
  94 *	Convert a ROSE address into text.
  95 */
  96char *rose2asc(char *buf, const rose_address *addr)
  97{
  98	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
  99	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
 100	    addr->rose_addr[4] == 0x00) {
 101		strcpy(buf, "*");
 102	} else {
 103		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
 104						addr->rose_addr[1] & 0xFF,
 105						addr->rose_addr[2] & 0xFF,
 106						addr->rose_addr[3] & 0xFF,
 107						addr->rose_addr[4] & 0xFF);
 108	}
 109
 110	return buf;
 111}
 112
 113/*
 114 *	Compare two ROSE addresses, 0 == equal.
 115 */
 116int rosecmp(rose_address *addr1, rose_address *addr2)
 117{
 118	int i;
 119
 120	for (i = 0; i < 5; i++)
 121		if (addr1->rose_addr[i] != addr2->rose_addr[i])
 122			return 1;
 123
 124	return 0;
 125}
 126
 127/*
 128 *	Compare two ROSE addresses for only mask digits, 0 == equal.
 129 */
 130int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
 131{
 132	unsigned int i, j;
 133
 134	if (mask > 10)
 135		return 1;
 136
 137	for (i = 0; i < mask; i++) {
 138		j = i / 2;
 139
 140		if ((i % 2) != 0) {
 141			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
 142				return 1;
 143		} else {
 144			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
 145				return 1;
 146		}
 147	}
 148
 149	return 0;
 150}
 151
 152/*
 153 *	Socket removal during an interrupt is now safe.
 154 */
 155static void rose_remove_socket(struct sock *sk)
 156{
 157	spin_lock_bh(&rose_list_lock);
 158	sk_del_node_init(sk);
 159	spin_unlock_bh(&rose_list_lock);
 160}
 161
 162/*
 163 *	Kill all bound sockets on a broken link layer connection to a
 164 *	particular neighbour.
 165 */
 166void rose_kill_by_neigh(struct rose_neigh *neigh)
 167{
 168	struct sock *s;
 169	struct hlist_node *node;
 170
 171	spin_lock_bh(&rose_list_lock);
 172	sk_for_each(s, node, &rose_list) {
 173		struct rose_sock *rose = rose_sk(s);
 174
 175		if (rose->neighbour == neigh) {
 176			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 177			rose->neighbour->use--;
 178			rose->neighbour = NULL;
 179		}
 180	}
 181	spin_unlock_bh(&rose_list_lock);
 182}
 183
 184/*
 185 *	Kill all bound sockets on a dropped device.
 186 */
 187static void rose_kill_by_device(struct net_device *dev)
 188{
 189	struct sock *s;
 190	struct hlist_node *node;
 191
 192	spin_lock_bh(&rose_list_lock);
 193	sk_for_each(s, node, &rose_list) {
 194		struct rose_sock *rose = rose_sk(s);
 195
 196		if (rose->device == dev) {
 197			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 198			rose->neighbour->use--;
 199			rose->device = NULL;
 200		}
 201	}
 202	spin_unlock_bh(&rose_list_lock);
 203}
 204
 205/*
 206 *	Handle device status changes.
 207 */
 208static int rose_device_event(struct notifier_block *this, unsigned long event,
 209	void *ptr)
 210{
 211	struct net_device *dev = (struct net_device *)ptr;
 212
 213	if (!net_eq(dev_net(dev), &init_net))
 214		return NOTIFY_DONE;
 215
 216	if (event != NETDEV_DOWN)
 217		return NOTIFY_DONE;
 218
 219	switch (dev->type) {
 220	case ARPHRD_ROSE:
 221		rose_kill_by_device(dev);
 222		break;
 223	case ARPHRD_AX25:
 224		rose_link_device_down(dev);
 225		rose_rt_device_down(dev);
 226		break;
 227	}
 228
 229	return NOTIFY_DONE;
 230}
 231
 232/*
 233 *	Add a socket to the bound sockets list.
 234 */
 235static void rose_insert_socket(struct sock *sk)
 236{
 237
 238	spin_lock_bh(&rose_list_lock);
 239	sk_add_node(sk, &rose_list);
 240	spin_unlock_bh(&rose_list_lock);
 241}
 242
 243/*
 244 *	Find a socket that wants to accept the Call Request we just
 245 *	received.
 246 */
 247static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 248{
 249	struct sock *s;
 250	struct hlist_node *node;
 251
 252	spin_lock_bh(&rose_list_lock);
 253	sk_for_each(s, node, &rose_list) {
 254		struct rose_sock *rose = rose_sk(s);
 255
 256		if (!rosecmp(&rose->source_addr, addr) &&
 257		    !ax25cmp(&rose->source_call, call) &&
 258		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
 259			goto found;
 260	}
 261
 262	sk_for_each(s, node, &rose_list) {
 263		struct rose_sock *rose = rose_sk(s);
 264
 265		if (!rosecmp(&rose->source_addr, addr) &&
 266		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
 267		    s->sk_state == TCP_LISTEN)
 268			goto found;
 269	}
 270	s = NULL;
 271found:
 272	spin_unlock_bh(&rose_list_lock);
 273	return s;
 274}
 275
 276/*
 277 *	Find a connected ROSE socket given my LCI and device.
 278 */
 279struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 280{
 281	struct sock *s;
 282	struct hlist_node *node;
 283
 284	spin_lock_bh(&rose_list_lock);
 285	sk_for_each(s, node, &rose_list) {
 286		struct rose_sock *rose = rose_sk(s);
 287
 288		if (rose->lci == lci && rose->neighbour == neigh)
 289			goto found;
 290	}
 291	s = NULL;
 292found:
 293	spin_unlock_bh(&rose_list_lock);
 294	return s;
 295}
 296
 297/*
 298 *	Find a unique LCI for a given device.
 299 */
 300unsigned int rose_new_lci(struct rose_neigh *neigh)
 301{
 302	int lci;
 303
 304	if (neigh->dce_mode) {
 305		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
 306			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 307				return lci;
 308	} else {
 309		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
 310			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 311				return lci;
 312	}
 313
 314	return 0;
 315}
 316
 317/*
 318 *	Deferred destroy.
 319 */
 320void rose_destroy_socket(struct sock *);
 321
 322/*
 323 *	Handler for deferred kills.
 324 */
 325static void rose_destroy_timer(unsigned long data)
 326{
 327	rose_destroy_socket((struct sock *)data);
 328}
 329
 330/*
 331 *	This is called from user mode and the timers. Thus it protects itself
 332 *	against interrupt users but doesn't worry about being called during
 333 *	work.  Once it is removed from the queue no interrupt or bottom half
 334 *	will touch it and we are (fairly 8-) ) safe.
 335 */
 336void rose_destroy_socket(struct sock *sk)
 337{
 338	struct sk_buff *skb;
 339
 340	rose_remove_socket(sk);
 341	rose_stop_heartbeat(sk);
 342	rose_stop_idletimer(sk);
 343	rose_stop_timer(sk);
 344
 345	rose_clear_queues(sk);		/* Flush the queues */
 346
 347	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 348		if (skb->sk != sk) {	/* A pending connection */
 349			/* Queue the unaccepted socket for death */
 350			sock_set_flag(skb->sk, SOCK_DEAD);
 351			rose_start_heartbeat(skb->sk);
 352			rose_sk(skb->sk)->state = ROSE_STATE_0;
 353		}
 354
 355		kfree_skb(skb);
 356	}
 357
 358	if (sk_has_allocations(sk)) {
 359		/* Defer: outstanding buffers */
 360		setup_timer(&sk->sk_timer, rose_destroy_timer,
 361				(unsigned long)sk);
 362		sk->sk_timer.expires  = jiffies + 10 * HZ;
 363		add_timer(&sk->sk_timer);
 364	} else
 365		sock_put(sk);
 366}
 367
 368/*
 369 *	Handling for system calls applied via the various interfaces to a
 370 *	ROSE socket object.
 371 */
 372
 373static int rose_setsockopt(struct socket *sock, int level, int optname,
 374	char __user *optval, unsigned int optlen)
 375{
 376	struct sock *sk = sock->sk;
 377	struct rose_sock *rose = rose_sk(sk);
 378	int opt;
 379
 380	if (level != SOL_ROSE)
 381		return -ENOPROTOOPT;
 382
 383	if (optlen < sizeof(int))
 384		return -EINVAL;
 385
 386	if (get_user(opt, (int __user *)optval))
 387		return -EFAULT;
 388
 389	switch (optname) {
 390	case ROSE_DEFER:
 391		rose->defer = opt ? 1 : 0;
 392		return 0;
 393
 394	case ROSE_T1:
 395		if (opt < 1)
 396			return -EINVAL;
 397		rose->t1 = opt * HZ;
 398		return 0;
 399
 400	case ROSE_T2:
 401		if (opt < 1)
 402			return -EINVAL;
 403		rose->t2 = opt * HZ;
 404		return 0;
 405
 406	case ROSE_T3:
 407		if (opt < 1)
 408			return -EINVAL;
 409		rose->t3 = opt * HZ;
 410		return 0;
 411
 412	case ROSE_HOLDBACK:
 413		if (opt < 1)
 414			return -EINVAL;
 415		rose->hb = opt * HZ;
 416		return 0;
 417
 418	case ROSE_IDLE:
 419		if (opt < 0)
 420			return -EINVAL;
 421		rose->idle = opt * 60 * HZ;
 422		return 0;
 423
 424	case ROSE_QBITINCL:
 425		rose->qbitincl = opt ? 1 : 0;
 426		return 0;
 427
 428	default:
 429		return -ENOPROTOOPT;
 430	}
 431}
 432
 433static int rose_getsockopt(struct socket *sock, int level, int optname,
 434	char __user *optval, int __user *optlen)
 435{
 436	struct sock *sk = sock->sk;
 437	struct rose_sock *rose = rose_sk(sk);
 438	int val = 0;
 439	int len;
 440
 441	if (level != SOL_ROSE)
 442		return -ENOPROTOOPT;
 443
 444	if (get_user(len, optlen))
 445		return -EFAULT;
 446
 447	if (len < 0)
 448		return -EINVAL;
 449
 450	switch (optname) {
 451	case ROSE_DEFER:
 452		val = rose->defer;
 453		break;
 454
 455	case ROSE_T1:
 456		val = rose->t1 / HZ;
 457		break;
 458
 459	case ROSE_T2:
 460		val = rose->t2 / HZ;
 461		break;
 462
 463	case ROSE_T3:
 464		val = rose->t3 / HZ;
 465		break;
 466
 467	case ROSE_HOLDBACK:
 468		val = rose->hb / HZ;
 469		break;
 470
 471	case ROSE_IDLE:
 472		val = rose->idle / (60 * HZ);
 473		break;
 474
 475	case ROSE_QBITINCL:
 476		val = rose->qbitincl;
 477		break;
 478
 479	default:
 480		return -ENOPROTOOPT;
 481	}
 482
 483	len = min_t(unsigned int, len, sizeof(int));
 484
 485	if (put_user(len, optlen))
 486		return -EFAULT;
 487
 488	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
 489}
 490
 491static int rose_listen(struct socket *sock, int backlog)
 492{
 493	struct sock *sk = sock->sk;
 494
 495	if (sk->sk_state != TCP_LISTEN) {
 496		struct rose_sock *rose = rose_sk(sk);
 497
 498		rose->dest_ndigis = 0;
 499		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
 500		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
 501		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
 502		sk->sk_max_ack_backlog = backlog;
 503		sk->sk_state           = TCP_LISTEN;
 504		return 0;
 505	}
 506
 507	return -EOPNOTSUPP;
 508}
 509
 510static struct proto rose_proto = {
 511	.name	  = "ROSE",
 512	.owner	  = THIS_MODULE,
 513	.obj_size = sizeof(struct rose_sock),
 514};
 515
 516static int rose_create(struct net *net, struct socket *sock, int protocol,
 517		       int kern)
 518{
 519	struct sock *sk;
 520	struct rose_sock *rose;
 521
 522	if (!net_eq(net, &init_net))
 523		return -EAFNOSUPPORT;
 524
 525	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 526		return -ESOCKTNOSUPPORT;
 527
 528	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
 529	if (sk == NULL)
 530		return -ENOMEM;
 531
 532	rose = rose_sk(sk);
 533
 534	sock_init_data(sock, sk);
 535
 536	skb_queue_head_init(&rose->ack_queue);
 537#ifdef M_BIT
 538	skb_queue_head_init(&rose->frag_queue);
 539	rose->fraglen    = 0;
 540#endif
 541
 542	sock->ops    = &rose_proto_ops;
 543	sk->sk_protocol = protocol;
 544
 545	init_timer(&rose->timer);
 546	init_timer(&rose->idletimer);
 547
 548	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
 549	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
 550	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
 551	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
 552	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
 553
 554	rose->state = ROSE_STATE_0;
 555
 556	return 0;
 557}
 558
 559static struct sock *rose_make_new(struct sock *osk)
 560{
 561	struct sock *sk;
 562	struct rose_sock *rose, *orose;
 563
 564	if (osk->sk_type != SOCK_SEQPACKET)
 565		return NULL;
 566
 567	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
 568	if (sk == NULL)
 569		return NULL;
 570
 571	rose = rose_sk(sk);
 572
 573	sock_init_data(NULL, sk);
 574
 575	skb_queue_head_init(&rose->ack_queue);
 576#ifdef M_BIT
 577	skb_queue_head_init(&rose->frag_queue);
 578	rose->fraglen  = 0;
 579#endif
 580
 581	sk->sk_type     = osk->sk_type;
 582	sk->sk_priority = osk->sk_priority;
 583	sk->sk_protocol = osk->sk_protocol;
 584	sk->sk_rcvbuf   = osk->sk_rcvbuf;
 585	sk->sk_sndbuf   = osk->sk_sndbuf;
 586	sk->sk_state    = TCP_ESTABLISHED;
 587	sock_copy_flags(sk, osk);
 588
 589	init_timer(&rose->timer);
 590	init_timer(&rose->idletimer);
 591
 592	orose		= rose_sk(osk);
 593	rose->t1	= orose->t1;
 594	rose->t2	= orose->t2;
 595	rose->t3	= orose->t3;
 596	rose->hb	= orose->hb;
 597	rose->idle	= orose->idle;
 598	rose->defer	= orose->defer;
 599	rose->device	= orose->device;
 600	rose->qbitincl	= orose->qbitincl;
 601
 602	return sk;
 603}
 604
 605static int rose_release(struct socket *sock)
 606{
 607	struct sock *sk = sock->sk;
 608	struct rose_sock *rose;
 609
 610	if (sk == NULL) return 0;
 611
 612	sock_hold(sk);
 613	sock_orphan(sk);
 614	lock_sock(sk);
 615	rose = rose_sk(sk);
 616
 617	switch (rose->state) {
 618	case ROSE_STATE_0:
 619		release_sock(sk);
 620		rose_disconnect(sk, 0, -1, -1);
 621		lock_sock(sk);
 622		rose_destroy_socket(sk);
 623		break;
 624
 625	case ROSE_STATE_2:
 626		rose->neighbour->use--;
 627		release_sock(sk);
 628		rose_disconnect(sk, 0, -1, -1);
 629		lock_sock(sk);
 630		rose_destroy_socket(sk);
 631		break;
 632
 633	case ROSE_STATE_1:
 634	case ROSE_STATE_3:
 635	case ROSE_STATE_4:
 636	case ROSE_STATE_5:
 637		rose_clear_queues(sk);
 638		rose_stop_idletimer(sk);
 639		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
 640		rose_start_t3timer(sk);
 641		rose->state  = ROSE_STATE_2;
 642		sk->sk_state    = TCP_CLOSE;
 643		sk->sk_shutdown |= SEND_SHUTDOWN;
 644		sk->sk_state_change(sk);
 645		sock_set_flag(sk, SOCK_DEAD);
 646		sock_set_flag(sk, SOCK_DESTROY);
 647		break;
 648
 649	default:
 650		break;
 651	}
 652
 653	sock->sk = NULL;
 654	release_sock(sk);
 655	sock_put(sk);
 656
 657	return 0;
 658}
 659
 660static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 661{
 662	struct sock *sk = sock->sk;
 663	struct rose_sock *rose = rose_sk(sk);
 664	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 665	struct net_device *dev;
 666	ax25_address *source;
 667	ax25_uid_assoc *user;
 668	int n;
 669
 670	if (!sock_flag(sk, SOCK_ZAPPED))
 671		return -EINVAL;
 672
 673	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 674		return -EINVAL;
 675
 676	if (addr->srose_family != AF_ROSE)
 677		return -EINVAL;
 678
 679	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 680		return -EINVAL;
 681
 682	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 683		return -EINVAL;
 684
 685	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
 686		return -EADDRNOTAVAIL;
 687
 688	source = &addr->srose_call;
 689
 690	user = ax25_findbyuid(current_euid());
 691	if (user) {
 692		rose->source_call = user->call;
 693		ax25_uid_put(user);
 694	} else {
 695		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
 696			return -EACCES;
 697		rose->source_call   = *source;
 698	}
 699
 700	rose->source_addr   = addr->srose_addr;
 701	rose->device        = dev;
 702	rose->source_ndigis = addr->srose_ndigis;
 703
 704	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 705		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 706		for (n = 0 ; n < addr->srose_ndigis ; n++)
 707			rose->source_digis[n] = full_addr->srose_digis[n];
 708	} else {
 709		if (rose->source_ndigis == 1) {
 710			rose->source_digis[0] = addr->srose_digi;
 711		}
 712	}
 713
 714	rose_insert_socket(sk);
 715
 716	sock_reset_flag(sk, SOCK_ZAPPED);
 717
 718	return 0;
 719}
 720
 721static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
 722{
 723	struct sock *sk = sock->sk;
 724	struct rose_sock *rose = rose_sk(sk);
 725	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 726	unsigned char cause, diagnostic;
 727	struct net_device *dev;
 728	ax25_uid_assoc *user;
 729	int n, err = 0;
 730
 731	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 732		return -EINVAL;
 733
 734	if (addr->srose_family != AF_ROSE)
 735		return -EINVAL;
 736
 737	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 738		return -EINVAL;
 739
 740	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 741		return -EINVAL;
 742
 743	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
 744	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
 745		return -EINVAL;
 746
 747	lock_sock(sk);
 748
 749	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
 750		/* Connect completed during a ERESTARTSYS event */
 751		sock->state = SS_CONNECTED;
 752		goto out_release;
 753	}
 754
 755	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
 756		sock->state = SS_UNCONNECTED;
 757		err = -ECONNREFUSED;
 758		goto out_release;
 759	}
 760
 761	if (sk->sk_state == TCP_ESTABLISHED) {
 762		/* No reconnect on a seqpacket socket */
 763		err = -EISCONN;
 764		goto out_release;
 765	}
 766
 767	sk->sk_state   = TCP_CLOSE;
 768	sock->state = SS_UNCONNECTED;
 769
 770	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
 771					 &diagnostic, 0);
 772	if (!rose->neighbour) {
 773		err = -ENETUNREACH;
 774		goto out_release;
 775	}
 776
 777	rose->lci = rose_new_lci(rose->neighbour);
 778	if (!rose->lci) {
 779		err = -ENETUNREACH;
 780		goto out_release;
 781	}
 782
 783	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
 784		sock_reset_flag(sk, SOCK_ZAPPED);
 785
 786		if ((dev = rose_dev_first()) == NULL) {
 787			err = -ENETUNREACH;
 788			goto out_release;
 789		}
 790
 791		user = ax25_findbyuid(current_euid());
 792		if (!user) {
 793			err = -EINVAL;
 794			goto out_release;
 795		}
 796
 797		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
 798		rose->source_call = user->call;
 799		rose->device      = dev;
 800		ax25_uid_put(user);
 801
 802		rose_insert_socket(sk);		/* Finish the bind */
 803	}
 804	rose->dest_addr   = addr->srose_addr;
 805	rose->dest_call   = addr->srose_call;
 806	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
 807	rose->dest_ndigis = addr->srose_ndigis;
 808
 809	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 810		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 811		for (n = 0 ; n < addr->srose_ndigis ; n++)
 812			rose->dest_digis[n] = full_addr->srose_digis[n];
 813	} else {
 814		if (rose->dest_ndigis == 1) {
 815			rose->dest_digis[0] = addr->srose_digi;
 816		}
 817	}
 818
 819	/* Move to connecting socket, start sending Connect Requests */
 820	sock->state   = SS_CONNECTING;
 821	sk->sk_state     = TCP_SYN_SENT;
 822
 823	rose->state = ROSE_STATE_1;
 824
 825	rose->neighbour->use++;
 826
 827	rose_write_internal(sk, ROSE_CALL_REQUEST);
 828	rose_start_heartbeat(sk);
 829	rose_start_t1timer(sk);
 830
 831	/* Now the loop */
 832	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
 833		err = -EINPROGRESS;
 834		goto out_release;
 835	}
 836
 837	/*
 838	 * A Connect Ack with Choke or timeout or failed routing will go to
 839	 * closed.
 840	 */
 841	if (sk->sk_state == TCP_SYN_SENT) {
 842		DEFINE_WAIT(wait);
 843
 844		for (;;) {
 845			prepare_to_wait(sk_sleep(sk), &wait,
 846					TASK_INTERRUPTIBLE);
 847			if (sk->sk_state != TCP_SYN_SENT)
 848				break;
 849			if (!signal_pending(current)) {
 850				release_sock(sk);
 851				schedule();
 852				lock_sock(sk);
 853				continue;
 854			}
 855			err = -ERESTARTSYS;
 856			break;
 857		}
 858		finish_wait(sk_sleep(sk), &wait);
 859
 860		if (err)
 861			goto out_release;
 862	}
 863
 864	if (sk->sk_state != TCP_ESTABLISHED) {
 865		sock->state = SS_UNCONNECTED;
 866		err = sock_error(sk);	/* Always set at this point */
 867		goto out_release;
 868	}
 869
 870	sock->state = SS_CONNECTED;
 871
 872out_release:
 873	release_sock(sk);
 874
 875	return err;
 876}
 877
 878static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
 879{
 880	struct sk_buff *skb;
 881	struct sock *newsk;
 882	DEFINE_WAIT(wait);
 883	struct sock *sk;
 884	int err = 0;
 885
 886	if ((sk = sock->sk) == NULL)
 887		return -EINVAL;
 888
 889	lock_sock(sk);
 890	if (sk->sk_type != SOCK_SEQPACKET) {
 891		err = -EOPNOTSUPP;
 892		goto out_release;
 893	}
 894
 895	if (sk->sk_state != TCP_LISTEN) {
 896		err = -EINVAL;
 897		goto out_release;
 898	}
 899
 900	/*
 901	 *	The write queue this time is holding sockets ready to use
 902	 *	hooked into the SABM we saved
 903	 */
 904	for (;;) {
 905		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 906
 907		skb = skb_dequeue(&sk->sk_receive_queue);
 908		if (skb)
 909			break;
 910
 911		if (flags & O_NONBLOCK) {
 912			err = -EWOULDBLOCK;
 913			break;
 914		}
 915		if (!signal_pending(current)) {
 916			release_sock(sk);
 917			schedule();
 918			lock_sock(sk);
 919			continue;
 920		}
 921		err = -ERESTARTSYS;
 922		break;
 923	}
 924	finish_wait(sk_sleep(sk), &wait);
 925	if (err)
 926		goto out_release;
 927
 928	newsk = skb->sk;
 929	sock_graft(newsk, newsock);
 930
 931	/* Now attach up the new socket */
 932	skb->sk = NULL;
 933	kfree_skb(skb);
 934	sk->sk_ack_backlog--;
 935
 936out_release:
 937	release_sock(sk);
 938
 939	return err;
 940}
 941
 942static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
 943	int *uaddr_len, int peer)
 944{
 945	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
 946	struct sock *sk = sock->sk;
 947	struct rose_sock *rose = rose_sk(sk);
 948	int n;
 949
 950	memset(srose, 0, sizeof(*srose));
 951	if (peer != 0) {
 952		if (sk->sk_state != TCP_ESTABLISHED)
 953			return -ENOTCONN;
 954		srose->srose_family = AF_ROSE;
 955		srose->srose_addr   = rose->dest_addr;
 956		srose->srose_call   = rose->dest_call;
 957		srose->srose_ndigis = rose->dest_ndigis;
 958		for (n = 0; n < rose->dest_ndigis; n++)
 959			srose->srose_digis[n] = rose->dest_digis[n];
 960	} else {
 961		srose->srose_family = AF_ROSE;
 962		srose->srose_addr   = rose->source_addr;
 963		srose->srose_call   = rose->source_call;
 964		srose->srose_ndigis = rose->source_ndigis;
 965		for (n = 0; n < rose->source_ndigis; n++)
 966			srose->srose_digis[n] = rose->source_digis[n];
 967	}
 968
 969	*uaddr_len = sizeof(struct full_sockaddr_rose);
 970	return 0;
 971}
 972
 973int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
 974{
 975	struct sock *sk;
 976	struct sock *make;
 977	struct rose_sock *make_rose;
 978	struct rose_facilities_struct facilities;
 979	int n;
 980
 981	skb->sk = NULL;		/* Initially we don't know who it's for */
 982
 983	/*
 984	 *	skb->data points to the rose frame start
 985	 */
 986	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
 987
 988	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
 989				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
 990				   &facilities)) {
 991		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
 992		return 0;
 993	}
 994
 995	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
 996
 997	/*
 998	 * We can't accept the Call Request.
 999	 */
1000	if (sk == NULL || sk_acceptq_is_full(sk) ||
1001	    (make = rose_make_new(sk)) == NULL) {
1002		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1003		return 0;
1004	}
1005
1006	skb->sk     = make;
1007	make->sk_state = TCP_ESTABLISHED;
1008	make_rose = rose_sk(make);
1009
1010	make_rose->lci           = lci;
1011	make_rose->dest_addr     = facilities.dest_addr;
1012	make_rose->dest_call     = facilities.dest_call;
1013	make_rose->dest_ndigis   = facilities.dest_ndigis;
1014	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1015		make_rose->dest_digis[n] = facilities.dest_digis[n];
1016	make_rose->source_addr   = facilities.source_addr;
1017	make_rose->source_call   = facilities.source_call;
1018	make_rose->source_ndigis = facilities.source_ndigis;
1019	for (n = 0 ; n < facilities.source_ndigis ; n++)
1020		make_rose->source_digis[n]= facilities.source_digis[n];
1021	make_rose->neighbour     = neigh;
1022	make_rose->device        = dev;
1023	make_rose->facilities    = facilities;
1024
1025	make_rose->neighbour->use++;
1026
1027	if (rose_sk(sk)->defer) {
1028		make_rose->state = ROSE_STATE_5;
1029	} else {
1030		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1031		make_rose->state = ROSE_STATE_3;
1032		rose_start_idletimer(make);
1033	}
1034
1035	make_rose->condition = 0x00;
1036	make_rose->vs        = 0;
1037	make_rose->va        = 0;
1038	make_rose->vr        = 0;
1039	make_rose->vl        = 0;
1040	sk->sk_ack_backlog++;
1041
1042	rose_insert_socket(make);
1043
1044	skb_queue_head(&sk->sk_receive_queue, skb);
1045
1046	rose_start_heartbeat(make);
1047
1048	if (!sock_flag(sk, SOCK_DEAD))
1049		sk->sk_data_ready(sk, skb->len);
1050
1051	return 1;
1052}
1053
1054static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1055			struct msghdr *msg, size_t len)
1056{
1057	struct sock *sk = sock->sk;
1058	struct rose_sock *rose = rose_sk(sk);
1059	struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
1060	int err;
1061	struct full_sockaddr_rose srose;
1062	struct sk_buff *skb;
1063	unsigned char *asmptr;
1064	int n, size, qbit = 0;
1065
1066	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1067		return -EINVAL;
1068
1069	if (sock_flag(sk, SOCK_ZAPPED))
1070		return -EADDRNOTAVAIL;
1071
1072	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1073		send_sig(SIGPIPE, current, 0);
1074		return -EPIPE;
1075	}
1076
1077	if (rose->neighbour == NULL || rose->device == NULL)
1078		return -ENETUNREACH;
1079
1080	if (usrose != NULL) {
1081		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1082			return -EINVAL;
1083		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1084		memcpy(&srose, usrose, msg->msg_namelen);
1085		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1086		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1087			return -EISCONN;
1088		if (srose.srose_ndigis != rose->dest_ndigis)
1089			return -EISCONN;
1090		if (srose.srose_ndigis == rose->dest_ndigis) {
1091			for (n = 0 ; n < srose.srose_ndigis ; n++)
1092				if (ax25cmp(&rose->dest_digis[n],
1093					    &srose.srose_digis[n]))
1094					return -EISCONN;
1095		}
1096		if (srose.srose_family != AF_ROSE)
1097			return -EINVAL;
1098	} else {
1099		if (sk->sk_state != TCP_ESTABLISHED)
1100			return -ENOTCONN;
1101
1102		srose.srose_family = AF_ROSE;
1103		srose.srose_addr   = rose->dest_addr;
1104		srose.srose_call   = rose->dest_call;
1105		srose.srose_ndigis = rose->dest_ndigis;
1106		for (n = 0 ; n < rose->dest_ndigis ; n++)
1107			srose.srose_digis[n] = rose->dest_digis[n];
1108	}
1109
1110	/* Build a packet */
1111	/* Sanity check the packet size */
1112	if (len > 65535)
1113		return -EMSGSIZE;
1114
1115	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1116
1117	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1118		return err;
1119
1120	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1121
1122	/*
1123	 *	Put the data on the end
1124	 */
1125
1126	skb_reset_transport_header(skb);
1127	skb_put(skb, len);
1128
1129	err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1130	if (err) {
1131		kfree_skb(skb);
1132		return err;
1133	}
1134
1135	/*
1136	 *	If the Q BIT Include socket option is in force, the first
1137	 *	byte of the user data is the logical value of the Q Bit.
1138	 */
1139	if (rose->qbitincl) {
1140		qbit = skb->data[0];
1141		skb_pull(skb, 1);
1142	}
1143
1144	/*
1145	 *	Push down the ROSE header
1146	 */
1147	asmptr = skb_push(skb, ROSE_MIN_LEN);
1148
1149	/* Build a ROSE Network header */
1150	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1151	asmptr[1] = (rose->lci >> 0) & 0xFF;
1152	asmptr[2] = ROSE_DATA;
1153
1154	if (qbit)
1155		asmptr[0] |= ROSE_Q_BIT;
1156
1157	if (sk->sk_state != TCP_ESTABLISHED) {
1158		kfree_skb(skb);
1159		return -ENOTCONN;
1160	}
1161
1162#ifdef M_BIT
1163#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1164	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1165		unsigned char header[ROSE_MIN_LEN];
1166		struct sk_buff *skbn;
1167		int frontlen;
1168		int lg;
1169
1170		/* Save a copy of the Header */
1171		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1172		skb_pull(skb, ROSE_MIN_LEN);
1173
1174		frontlen = skb_headroom(skb);
1175
1176		while (skb->len > 0) {
1177			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1178				kfree_skb(skb);
1179				return err;
1180			}
1181
1182			skbn->sk   = sk;
1183			skbn->free = 1;
1184			skbn->arp  = 1;
1185
1186			skb_reserve(skbn, frontlen);
1187
1188			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1189
1190			/* Copy the user data */
1191			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1192			skb_pull(skb, lg);
1193
1194			/* Duplicate the Header */
1195			skb_push(skbn, ROSE_MIN_LEN);
1196			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1197
1198			if (skb->len > 0)
1199				skbn->data[2] |= M_BIT;
1200
1201			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1202		}
1203
1204		skb->free = 1;
1205		kfree_skb(skb);
1206	} else {
1207		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1208	}
1209#else
1210	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1211#endif
1212
1213	rose_kick(sk);
1214
1215	return len;
1216}
1217
1218
1219static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1220			struct msghdr *msg, size_t size, int flags)
1221{
1222	struct sock *sk = sock->sk;
1223	struct rose_sock *rose = rose_sk(sk);
1224	struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1225	size_t copied;
1226	unsigned char *asmptr;
1227	struct sk_buff *skb;
1228	int n, er, qbit;
1229
1230	/*
1231	 * This works for seqpacket too. The receiver has ordered the queue for
1232	 * us! We do one quick check first though
1233	 */
1234	if (sk->sk_state != TCP_ESTABLISHED)
1235		return -ENOTCONN;
1236
1237	/* Now we can treat all alike */
1238	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1239		return er;
1240
1241	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1242
1243	skb_pull(skb, ROSE_MIN_LEN);
1244
1245	if (rose->qbitincl) {
1246		asmptr  = skb_push(skb, 1);
1247		*asmptr = qbit;
1248	}
1249
1250	skb_reset_transport_header(skb);
1251	copied     = skb->len;
1252
1253	if (copied > size) {
1254		copied = size;
1255		msg->msg_flags |= MSG_TRUNC;
1256	}
1257
1258	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1259
1260	if (srose != NULL) {
 
 
 
 
 
 
1261		srose->srose_family = AF_ROSE;
1262		srose->srose_addr   = rose->dest_addr;
1263		srose->srose_call   = rose->dest_call;
1264		srose->srose_ndigis = rose->dest_ndigis;
1265		if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1266			struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1267			for (n = 0 ; n < rose->dest_ndigis ; n++)
1268				full_srose->srose_digis[n] = rose->dest_digis[n];
1269			msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1270		} else {
1271			if (rose->dest_ndigis >= 1) {
1272				srose->srose_ndigis = 1;
1273				srose->srose_digi = rose->dest_digis[0];
1274			}
1275			msg->msg_namelen = sizeof(struct sockaddr_rose);
1276		}
1277	}
1278
1279	skb_free_datagram(sk, skb);
1280
1281	return copied;
1282}
1283
1284
1285static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1286{
1287	struct sock *sk = sock->sk;
1288	struct rose_sock *rose = rose_sk(sk);
1289	void __user *argp = (void __user *)arg;
1290
1291	switch (cmd) {
1292	case TIOCOUTQ: {
1293		long amount;
1294
1295		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1296		if (amount < 0)
1297			amount = 0;
1298		return put_user(amount, (unsigned int __user *) argp);
1299	}
1300
1301	case TIOCINQ: {
1302		struct sk_buff *skb;
1303		long amount = 0L;
1304		/* These two are safe on a single CPU system as only user tasks fiddle here */
1305		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1306			amount = skb->len;
1307		return put_user(amount, (unsigned int __user *) argp);
1308	}
1309
1310	case SIOCGSTAMP:
1311		return sock_get_timestamp(sk, (struct timeval __user *) argp);
1312
1313	case SIOCGSTAMPNS:
1314		return sock_get_timestampns(sk, (struct timespec __user *) argp);
1315
1316	case SIOCGIFADDR:
1317	case SIOCSIFADDR:
1318	case SIOCGIFDSTADDR:
1319	case SIOCSIFDSTADDR:
1320	case SIOCGIFBRDADDR:
1321	case SIOCSIFBRDADDR:
1322	case SIOCGIFNETMASK:
1323	case SIOCSIFNETMASK:
1324	case SIOCGIFMETRIC:
1325	case SIOCSIFMETRIC:
1326		return -EINVAL;
1327
1328	case SIOCADDRT:
1329	case SIOCDELRT:
1330	case SIOCRSCLRRT:
1331		if (!capable(CAP_NET_ADMIN))
1332			return -EPERM;
1333		return rose_rt_ioctl(cmd, argp);
1334
1335	case SIOCRSGCAUSE: {
1336		struct rose_cause_struct rose_cause;
1337		rose_cause.cause      = rose->cause;
1338		rose_cause.diagnostic = rose->diagnostic;
1339		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1340	}
1341
1342	case SIOCRSSCAUSE: {
1343		struct rose_cause_struct rose_cause;
1344		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1345			return -EFAULT;
1346		rose->cause      = rose_cause.cause;
1347		rose->diagnostic = rose_cause.diagnostic;
1348		return 0;
1349	}
1350
1351	case SIOCRSSL2CALL:
1352		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1353		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1354			ax25_listen_release(&rose_callsign, NULL);
1355		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1356			return -EFAULT;
1357		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1358			return ax25_listen_register(&rose_callsign, NULL);
1359
1360		return 0;
1361
1362	case SIOCRSGL2CALL:
1363		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1364
1365	case SIOCRSACCEPT:
1366		if (rose->state == ROSE_STATE_5) {
1367			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1368			rose_start_idletimer(sk);
1369			rose->condition = 0x00;
1370			rose->vs        = 0;
1371			rose->va        = 0;
1372			rose->vr        = 0;
1373			rose->vl        = 0;
1374			rose->state     = ROSE_STATE_3;
1375		}
1376		return 0;
1377
1378	default:
1379		return -ENOIOCTLCMD;
1380	}
1381
1382	return 0;
1383}
1384
1385#ifdef CONFIG_PROC_FS
1386static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1387	__acquires(rose_list_lock)
1388{
1389	spin_lock_bh(&rose_list_lock);
1390	return seq_hlist_start_head(&rose_list, *pos);
1391}
1392
1393static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1394{
1395	return seq_hlist_next(v, &rose_list, pos);
1396}
1397
1398static void rose_info_stop(struct seq_file *seq, void *v)
1399	__releases(rose_list_lock)
1400{
1401	spin_unlock_bh(&rose_list_lock);
1402}
1403
1404static int rose_info_show(struct seq_file *seq, void *v)
1405{
1406	char buf[11], rsbuf[11];
1407
1408	if (v == SEQ_START_TOKEN)
1409		seq_puts(seq,
1410			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1411
1412	else {
1413		struct sock *s = sk_entry(v);
1414		struct rose_sock *rose = rose_sk(s);
1415		const char *devname, *callsign;
1416		const struct net_device *dev = rose->device;
1417
1418		if (!dev)
1419			devname = "???";
1420		else
1421			devname = dev->name;
1422
1423		seq_printf(seq, "%-10s %-9s ",
1424			   rose2asc(rsbuf, &rose->dest_addr),
1425			   ax2asc(buf, &rose->dest_call));
1426
1427		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1428			callsign = "??????-?";
1429		else
1430			callsign = ax2asc(buf, &rose->source_call);
1431
1432		seq_printf(seq,
1433			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1434			rose2asc(rsbuf, &rose->source_addr),
1435			callsign,
1436			devname,
1437			rose->lci & 0x0FFF,
1438			(rose->neighbour) ? rose->neighbour->number : 0,
1439			rose->state,
1440			rose->vs,
1441			rose->vr,
1442			rose->va,
1443			ax25_display_timer(&rose->timer) / HZ,
1444			rose->t1 / HZ,
1445			rose->t2 / HZ,
1446			rose->t3 / HZ,
1447			rose->hb / HZ,
1448			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1449			rose->idle / (60 * HZ),
1450			sk_wmem_alloc_get(s),
1451			sk_rmem_alloc_get(s),
1452			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1453	}
1454
1455	return 0;
1456}
1457
1458static const struct seq_operations rose_info_seqops = {
1459	.start = rose_info_start,
1460	.next = rose_info_next,
1461	.stop = rose_info_stop,
1462	.show = rose_info_show,
1463};
1464
1465static int rose_info_open(struct inode *inode, struct file *file)
1466{
1467	return seq_open(file, &rose_info_seqops);
1468}
1469
1470static const struct file_operations rose_info_fops = {
1471	.owner = THIS_MODULE,
1472	.open = rose_info_open,
1473	.read = seq_read,
1474	.llseek = seq_lseek,
1475	.release = seq_release,
1476};
1477#endif	/* CONFIG_PROC_FS */
1478
1479static const struct net_proto_family rose_family_ops = {
1480	.family		=	PF_ROSE,
1481	.create		=	rose_create,
1482	.owner		=	THIS_MODULE,
1483};
1484
1485static const struct proto_ops rose_proto_ops = {
1486	.family		=	PF_ROSE,
1487	.owner		=	THIS_MODULE,
1488	.release	=	rose_release,
1489	.bind		=	rose_bind,
1490	.connect	=	rose_connect,
1491	.socketpair	=	sock_no_socketpair,
1492	.accept		=	rose_accept,
1493	.getname	=	rose_getname,
1494	.poll		=	datagram_poll,
1495	.ioctl		=	rose_ioctl,
1496	.listen		=	rose_listen,
1497	.shutdown	=	sock_no_shutdown,
1498	.setsockopt	=	rose_setsockopt,
1499	.getsockopt	=	rose_getsockopt,
1500	.sendmsg	=	rose_sendmsg,
1501	.recvmsg	=	rose_recvmsg,
1502	.mmap		=	sock_no_mmap,
1503	.sendpage	=	sock_no_sendpage,
1504};
1505
1506static struct notifier_block rose_dev_notifier = {
1507	.notifier_call	=	rose_device_event,
1508};
1509
1510static struct net_device **dev_rose;
1511
1512static struct ax25_protocol rose_pid = {
1513	.pid	= AX25_P_ROSE,
1514	.func	= rose_route_frame
1515};
1516
1517static struct ax25_linkfail rose_linkfail_notifier = {
1518	.func	= rose_link_failed
1519};
1520
1521static int __init rose_proto_init(void)
1522{
1523	int i;
1524	int rc;
1525
1526	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1527		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1528		rc = -EINVAL;
1529		goto out;
1530	}
1531
1532	rc = proto_register(&rose_proto, 0);
1533	if (rc != 0)
1534		goto out;
1535
1536	rose_callsign = null_ax25_address;
1537
1538	dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1539	if (dev_rose == NULL) {
1540		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1541		rc = -ENOMEM;
1542		goto out_proto_unregister;
1543	}
1544
1545	for (i = 0; i < rose_ndevs; i++) {
1546		struct net_device *dev;
1547		char name[IFNAMSIZ];
1548
1549		sprintf(name, "rose%d", i);
1550		dev = alloc_netdev(0, name, rose_setup);
1551		if (!dev) {
1552			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1553			rc = -ENOMEM;
1554			goto fail;
1555		}
1556		rc = register_netdev(dev);
1557		if (rc) {
1558			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1559			free_netdev(dev);
1560			goto fail;
1561		}
1562		rose_set_lockdep_key(dev);
1563		dev_rose[i] = dev;
1564	}
1565
1566	sock_register(&rose_family_ops);
1567	register_netdevice_notifier(&rose_dev_notifier);
1568
1569	ax25_register_pid(&rose_pid);
1570	ax25_linkfail_register(&rose_linkfail_notifier);
1571
1572#ifdef CONFIG_SYSCTL
1573	rose_register_sysctl();
1574#endif
1575	rose_loopback_init();
1576
1577	rose_add_loopback_neigh();
1578
1579	proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
1580	proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
1581	proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
1582	proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
 
 
 
1583out:
1584	return rc;
1585fail:
1586	while (--i >= 0) {
1587		unregister_netdev(dev_rose[i]);
1588		free_netdev(dev_rose[i]);
1589	}
1590	kfree(dev_rose);
1591out_proto_unregister:
1592	proto_unregister(&rose_proto);
1593	goto out;
1594}
1595module_init(rose_proto_init);
1596
1597module_param(rose_ndevs, int, 0);
1598MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1599
1600MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1601MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1602MODULE_LICENSE("GPL");
1603MODULE_ALIAS_NETPROTO(PF_ROSE);
1604
1605static void __exit rose_exit(void)
1606{
1607	int i;
1608
1609	proc_net_remove(&init_net, "rose");
1610	proc_net_remove(&init_net, "rose_neigh");
1611	proc_net_remove(&init_net, "rose_nodes");
1612	proc_net_remove(&init_net, "rose_routes");
1613	rose_loopback_clear();
1614
1615	rose_rt_free();
1616
1617	ax25_protocol_release(AX25_P_ROSE);
1618	ax25_linkfail_release(&rose_linkfail_notifier);
1619
1620	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1621		ax25_listen_release(&rose_callsign, NULL);
1622
1623#ifdef CONFIG_SYSCTL
1624	rose_unregister_sysctl();
1625#endif
1626	unregister_netdevice_notifier(&rose_dev_notifier);
1627
1628	sock_unregister(PF_ROSE);
1629
1630	for (i = 0; i < rose_ndevs; i++) {
1631		struct net_device *dev = dev_rose[i];
1632
1633		if (dev) {
1634			unregister_netdev(dev);
1635			free_netdev(dev);
1636		}
1637	}
1638
1639	kfree(dev_rose);
1640	proto_unregister(&rose_proto);
1641}
1642
1643module_exit(rose_exit);