Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
 
 
 
 
   3 *
   4 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   5 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   6 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
   7 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/moduleparam.h>
  13#include <linux/init.h>
  14#include <linux/errno.h>
  15#include <linux/types.h>
  16#include <linux/socket.h>
  17#include <linux/in.h>
  18#include <linux/slab.h>
  19#include <linux/kernel.h>
  20#include <linux/sched/signal.h>
  21#include <linux/spinlock.h>
  22#include <linux/timer.h>
  23#include <linux/string.h>
  24#include <linux/sockios.h>
  25#include <linux/net.h>
  26#include <linux/stat.h>
  27#include <net/net_namespace.h>
  28#include <net/ax25.h>
  29#include <linux/inet.h>
  30#include <linux/netdevice.h>
  31#include <linux/if_arp.h>
  32#include <linux/skbuff.h>
  33#include <net/sock.h>
  34#include <linux/uaccess.h>
  35#include <linux/fcntl.h>
  36#include <linux/termios.h>
  37#include <linux/mm.h>
  38#include <linux/interrupt.h>
  39#include <linux/notifier.h>
  40#include <net/rose.h>
  41#include <linux/proc_fs.h>
  42#include <linux/seq_file.h>
  43#include <net/tcp_states.h>
  44#include <net/ip.h>
  45#include <net/arp.h>
  46
  47static int rose_ndevs = 10;
  48
  49int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
  50int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
  51int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
  52int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
  53int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
  54int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
  55int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
  56int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
  57int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
  58int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
  59
  60static HLIST_HEAD(rose_list);
  61static DEFINE_SPINLOCK(rose_list_lock);
  62
  63static const struct proto_ops rose_proto_ops;
  64
  65ax25_address rose_callsign;
  66
  67/*
  68 * ROSE network devices are virtual network devices encapsulating ROSE
  69 * frames into AX.25 which will be sent through an AX.25 device, so form a
  70 * special "super class" of normal net devices; split their locks off into a
  71 * separate class since they always nest.
  72 */
  73static struct lock_class_key rose_netdev_xmit_lock_key;
  74static struct lock_class_key rose_netdev_addr_lock_key;
  75
  76static void rose_set_lockdep_one(struct net_device *dev,
  77				 struct netdev_queue *txq,
  78				 void *_unused)
  79{
  80	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
  81}
  82
  83static void rose_set_lockdep_key(struct net_device *dev)
  84{
  85	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
  86	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
  87}
  88
  89/*
  90 *	Convert a ROSE address into text.
  91 */
  92char *rose2asc(char *buf, const rose_address *addr)
  93{
  94	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
  95	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
  96	    addr->rose_addr[4] == 0x00) {
  97		strcpy(buf, "*");
  98	} else {
  99		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
 100						addr->rose_addr[1] & 0xFF,
 101						addr->rose_addr[2] & 0xFF,
 102						addr->rose_addr[3] & 0xFF,
 103						addr->rose_addr[4] & 0xFF);
 104	}
 105
 106	return buf;
 107}
 108
 109/*
 110 *	Compare two ROSE addresses, 0 == equal.
 111 */
 112int rosecmp(const rose_address *addr1, const rose_address *addr2)
 113{
 114	int i;
 115
 116	for (i = 0; i < 5; i++)
 117		if (addr1->rose_addr[i] != addr2->rose_addr[i])
 118			return 1;
 119
 120	return 0;
 121}
 122
 123/*
 124 *	Compare two ROSE addresses for only mask digits, 0 == equal.
 125 */
 126int rosecmpm(const rose_address *addr1, const rose_address *addr2,
 127	     unsigned short mask)
 128{
 129	unsigned int i, j;
 130
 131	if (mask > 10)
 132		return 1;
 133
 134	for (i = 0; i < mask; i++) {
 135		j = i / 2;
 136
 137		if ((i % 2) != 0) {
 138			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
 139				return 1;
 140		} else {
 141			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
 142				return 1;
 143		}
 144	}
 145
 146	return 0;
 147}
 148
 149/*
 150 *	Socket removal during an interrupt is now safe.
 151 */
 152static void rose_remove_socket(struct sock *sk)
 153{
 154	spin_lock_bh(&rose_list_lock);
 155	sk_del_node_init(sk);
 156	spin_unlock_bh(&rose_list_lock);
 157}
 158
 159/*
 160 *	Kill all bound sockets on a broken link layer connection to a
 161 *	particular neighbour.
 162 */
 163void rose_kill_by_neigh(struct rose_neigh *neigh)
 164{
 165	struct sock *s;
 
 166
 167	spin_lock_bh(&rose_list_lock);
 168	sk_for_each(s, &rose_list) {
 169		struct rose_sock *rose = rose_sk(s);
 170
 171		if (rose->neighbour == neigh) {
 172			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 173			rose->neighbour->use--;
 174			rose->neighbour = NULL;
 175		}
 176	}
 177	spin_unlock_bh(&rose_list_lock);
 178}
 179
 180/*
 181 *	Kill all bound sockets on a dropped device.
 182 */
 183static void rose_kill_by_device(struct net_device *dev)
 184{
 185	struct sock *sk, *array[16];
 186	struct rose_sock *rose;
 187	bool rescan;
 188	int i, cnt;
 189
 190start:
 191	rescan = false;
 192	cnt = 0;
 193	spin_lock_bh(&rose_list_lock);
 194	sk_for_each(sk, &rose_list) {
 195		rose = rose_sk(sk);
 196		if (rose->device == dev) {
 197			if (cnt == ARRAY_SIZE(array)) {
 198				rescan = true;
 199				break;
 200			}
 201			sock_hold(sk);
 202			array[cnt++] = sk;
 203		}
 204	}
 205	spin_unlock_bh(&rose_list_lock);
 206
 207	for (i = 0; i < cnt; i++) {
 208		sk = array[cnt];
 209		rose = rose_sk(sk);
 210		lock_sock(sk);
 211		spin_lock_bh(&rose_list_lock);
 212		if (rose->device == dev) {
 213			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 214			if (rose->neighbour)
 215				rose->neighbour->use--;
 216			netdev_put(rose->device, &rose->dev_tracker);
 217			rose->device = NULL;
 218		}
 219		spin_unlock_bh(&rose_list_lock);
 220		release_sock(sk);
 221		sock_put(sk);
 222		cond_resched();
 223	}
 224	if (rescan)
 225		goto start;
 226}
 227
 228/*
 229 *	Handle device status changes.
 230 */
 231static int rose_device_event(struct notifier_block *this,
 232			     unsigned long event, void *ptr)
 233{
 234	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 235
 236	if (!net_eq(dev_net(dev), &init_net))
 237		return NOTIFY_DONE;
 238
 239	if (event != NETDEV_DOWN)
 240		return NOTIFY_DONE;
 241
 242	switch (dev->type) {
 243	case ARPHRD_ROSE:
 244		rose_kill_by_device(dev);
 245		break;
 246	case ARPHRD_AX25:
 247		rose_link_device_down(dev);
 248		rose_rt_device_down(dev);
 249		break;
 250	}
 251
 252	return NOTIFY_DONE;
 253}
 254
 255/*
 256 *	Add a socket to the bound sockets list.
 257 */
 258static void rose_insert_socket(struct sock *sk)
 259{
 260
 261	spin_lock_bh(&rose_list_lock);
 262	sk_add_node(sk, &rose_list);
 263	spin_unlock_bh(&rose_list_lock);
 264}
 265
 266/*
 267 *	Find a socket that wants to accept the Call Request we just
 268 *	received.
 269 */
 270static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 271{
 272	struct sock *s;
 
 273
 274	spin_lock_bh(&rose_list_lock);
 275	sk_for_each(s, &rose_list) {
 276		struct rose_sock *rose = rose_sk(s);
 277
 278		if (!rosecmp(&rose->source_addr, addr) &&
 279		    !ax25cmp(&rose->source_call, call) &&
 280		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
 281			goto found;
 282	}
 283
 284	sk_for_each(s, &rose_list) {
 285		struct rose_sock *rose = rose_sk(s);
 286
 287		if (!rosecmp(&rose->source_addr, addr) &&
 288		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
 289		    s->sk_state == TCP_LISTEN)
 290			goto found;
 291	}
 292	s = NULL;
 293found:
 294	spin_unlock_bh(&rose_list_lock);
 295	return s;
 296}
 297
 298/*
 299 *	Find a connected ROSE socket given my LCI and device.
 300 */
 301struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 302{
 303	struct sock *s;
 
 304
 305	spin_lock_bh(&rose_list_lock);
 306	sk_for_each(s, &rose_list) {
 307		struct rose_sock *rose = rose_sk(s);
 308
 309		if (rose->lci == lci && rose->neighbour == neigh)
 310			goto found;
 311	}
 312	s = NULL;
 313found:
 314	spin_unlock_bh(&rose_list_lock);
 315	return s;
 316}
 317
 318/*
 319 *	Find a unique LCI for a given device.
 320 */
 321unsigned int rose_new_lci(struct rose_neigh *neigh)
 322{
 323	int lci;
 324
 325	if (neigh->dce_mode) {
 326		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
 327			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 328				return lci;
 329	} else {
 330		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
 331			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 332				return lci;
 333	}
 334
 335	return 0;
 336}
 337
 338/*
 339 *	Deferred destroy.
 340 */
 341void rose_destroy_socket(struct sock *);
 342
 343/*
 344 *	Handler for deferred kills.
 345 */
 346static void rose_destroy_timer(struct timer_list *t)
 347{
 348	struct sock *sk = from_timer(sk, t, sk_timer);
 349
 350	rose_destroy_socket(sk);
 351}
 352
 353/*
 354 *	This is called from user mode and the timers. Thus it protects itself
 355 *	against interrupt users but doesn't worry about being called during
 356 *	work.  Once it is removed from the queue no interrupt or bottom half
 357 *	will touch it and we are (fairly 8-) ) safe.
 358 */
 359void rose_destroy_socket(struct sock *sk)
 360{
 361	struct sk_buff *skb;
 362
 363	rose_remove_socket(sk);
 364	rose_stop_heartbeat(sk);
 365	rose_stop_idletimer(sk);
 366	rose_stop_timer(sk);
 367
 368	rose_clear_queues(sk);		/* Flush the queues */
 369
 370	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 371		if (skb->sk != sk) {	/* A pending connection */
 372			/* Queue the unaccepted socket for death */
 373			sock_set_flag(skb->sk, SOCK_DEAD);
 374			rose_start_heartbeat(skb->sk);
 375			rose_sk(skb->sk)->state = ROSE_STATE_0;
 376		}
 377
 378		kfree_skb(skb);
 379	}
 380
 381	if (sk_has_allocations(sk)) {
 382		/* Defer: outstanding buffers */
 383		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
 
 384		sk->sk_timer.expires  = jiffies + 10 * HZ;
 385		add_timer(&sk->sk_timer);
 386	} else
 387		sock_put(sk);
 388}
 389
 390/*
 391 *	Handling for system calls applied via the various interfaces to a
 392 *	ROSE socket object.
 393 */
 394
 395static int rose_setsockopt(struct socket *sock, int level, int optname,
 396		sockptr_t optval, unsigned int optlen)
 397{
 398	struct sock *sk = sock->sk;
 399	struct rose_sock *rose = rose_sk(sk);
 400	unsigned int opt;
 401
 402	if (level != SOL_ROSE)
 403		return -ENOPROTOOPT;
 404
 405	if (optlen < sizeof(unsigned int))
 406		return -EINVAL;
 407
 408	if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
 409		return -EFAULT;
 410
 411	switch (optname) {
 412	case ROSE_DEFER:
 413		rose->defer = opt ? 1 : 0;
 414		return 0;
 415
 416	case ROSE_T1:
 417		if (opt < 1 || opt > UINT_MAX / HZ)
 418			return -EINVAL;
 419		rose->t1 = opt * HZ;
 420		return 0;
 421
 422	case ROSE_T2:
 423		if (opt < 1 || opt > UINT_MAX / HZ)
 424			return -EINVAL;
 425		rose->t2 = opt * HZ;
 426		return 0;
 427
 428	case ROSE_T3:
 429		if (opt < 1 || opt > UINT_MAX / HZ)
 430			return -EINVAL;
 431		rose->t3 = opt * HZ;
 432		return 0;
 433
 434	case ROSE_HOLDBACK:
 435		if (opt < 1 || opt > UINT_MAX / HZ)
 436			return -EINVAL;
 437		rose->hb = opt * HZ;
 438		return 0;
 439
 440	case ROSE_IDLE:
 441		if (opt > UINT_MAX / (60 * HZ))
 442			return -EINVAL;
 443		rose->idle = opt * 60 * HZ;
 444		return 0;
 445
 446	case ROSE_QBITINCL:
 447		rose->qbitincl = opt ? 1 : 0;
 448		return 0;
 449
 450	default:
 451		return -ENOPROTOOPT;
 452	}
 453}
 454
 455static int rose_getsockopt(struct socket *sock, int level, int optname,
 456	char __user *optval, int __user *optlen)
 457{
 458	struct sock *sk = sock->sk;
 459	struct rose_sock *rose = rose_sk(sk);
 460	int val = 0;
 461	int len;
 462
 463	if (level != SOL_ROSE)
 464		return -ENOPROTOOPT;
 465
 466	if (get_user(len, optlen))
 467		return -EFAULT;
 468
 469	if (len < 0)
 470		return -EINVAL;
 471
 472	switch (optname) {
 473	case ROSE_DEFER:
 474		val = rose->defer;
 475		break;
 476
 477	case ROSE_T1:
 478		val = rose->t1 / HZ;
 479		break;
 480
 481	case ROSE_T2:
 482		val = rose->t2 / HZ;
 483		break;
 484
 485	case ROSE_T3:
 486		val = rose->t3 / HZ;
 487		break;
 488
 489	case ROSE_HOLDBACK:
 490		val = rose->hb / HZ;
 491		break;
 492
 493	case ROSE_IDLE:
 494		val = rose->idle / (60 * HZ);
 495		break;
 496
 497	case ROSE_QBITINCL:
 498		val = rose->qbitincl;
 499		break;
 500
 501	default:
 502		return -ENOPROTOOPT;
 503	}
 504
 505	len = min_t(unsigned int, len, sizeof(int));
 506
 507	if (put_user(len, optlen))
 508		return -EFAULT;
 509
 510	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
 511}
 512
 513static int rose_listen(struct socket *sock, int backlog)
 514{
 515	struct sock *sk = sock->sk;
 516
 517	lock_sock(sk);
 518	if (sock->state != SS_UNCONNECTED) {
 519		release_sock(sk);
 520		return -EINVAL;
 521	}
 522
 523	if (sk->sk_state != TCP_LISTEN) {
 524		struct rose_sock *rose = rose_sk(sk);
 525
 526		rose->dest_ndigis = 0;
 527		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
 528		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
 529		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
 530		sk->sk_max_ack_backlog = backlog;
 531		sk->sk_state           = TCP_LISTEN;
 532		release_sock(sk);
 533		return 0;
 534	}
 535	release_sock(sk);
 536
 537	return -EOPNOTSUPP;
 538}
 539
 540static struct proto rose_proto = {
 541	.name	  = "ROSE",
 542	.owner	  = THIS_MODULE,
 543	.obj_size = sizeof(struct rose_sock),
 544};
 545
 546static int rose_create(struct net *net, struct socket *sock, int protocol,
 547		       int kern)
 548{
 549	struct sock *sk;
 550	struct rose_sock *rose;
 551
 552	if (!net_eq(net, &init_net))
 553		return -EAFNOSUPPORT;
 554
 555	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 556		return -ESOCKTNOSUPPORT;
 557
 558	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
 559	if (sk == NULL)
 560		return -ENOMEM;
 561
 562	rose = rose_sk(sk);
 563
 564	sock_init_data(sock, sk);
 565
 566	skb_queue_head_init(&rose->ack_queue);
 567#ifdef M_BIT
 568	skb_queue_head_init(&rose->frag_queue);
 569	rose->fraglen    = 0;
 570#endif
 571
 572	sock->ops    = &rose_proto_ops;
 573	sk->sk_protocol = protocol;
 574
 575	timer_setup(&rose->timer, NULL, 0);
 576	timer_setup(&rose->idletimer, NULL, 0);
 577
 578	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
 579	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
 580	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
 581	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
 582	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
 583
 584	rose->state = ROSE_STATE_0;
 585
 586	return 0;
 587}
 588
 589static struct sock *rose_make_new(struct sock *osk)
 590{
 591	struct sock *sk;
 592	struct rose_sock *rose, *orose;
 593
 594	if (osk->sk_type != SOCK_SEQPACKET)
 595		return NULL;
 596
 597	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
 598	if (sk == NULL)
 599		return NULL;
 600
 601	rose = rose_sk(sk);
 602
 603	sock_init_data(NULL, sk);
 604
 605	skb_queue_head_init(&rose->ack_queue);
 606#ifdef M_BIT
 607	skb_queue_head_init(&rose->frag_queue);
 608	rose->fraglen  = 0;
 609#endif
 610
 611	sk->sk_type     = osk->sk_type;
 612	sk->sk_priority = READ_ONCE(osk->sk_priority);
 613	sk->sk_protocol = osk->sk_protocol;
 614	sk->sk_rcvbuf   = osk->sk_rcvbuf;
 615	sk->sk_sndbuf   = osk->sk_sndbuf;
 616	sk->sk_state    = TCP_ESTABLISHED;
 617	sock_copy_flags(sk, osk);
 618
 619	timer_setup(&rose->timer, NULL, 0);
 620	timer_setup(&rose->idletimer, NULL, 0);
 621
 622	orose		= rose_sk(osk);
 623	rose->t1	= orose->t1;
 624	rose->t2	= orose->t2;
 625	rose->t3	= orose->t3;
 626	rose->hb	= orose->hb;
 627	rose->idle	= orose->idle;
 628	rose->defer	= orose->defer;
 629	rose->device	= orose->device;
 630	if (rose->device)
 631		netdev_hold(rose->device, &rose->dev_tracker, GFP_ATOMIC);
 632	rose->qbitincl	= orose->qbitincl;
 633
 634	return sk;
 635}
 636
 637static int rose_release(struct socket *sock)
 638{
 639	struct sock *sk = sock->sk;
 640	struct rose_sock *rose;
 641
 642	if (sk == NULL) return 0;
 643
 644	sock_hold(sk);
 645	sock_orphan(sk);
 646	lock_sock(sk);
 647	rose = rose_sk(sk);
 648
 649	switch (rose->state) {
 650	case ROSE_STATE_0:
 651		release_sock(sk);
 652		rose_disconnect(sk, 0, -1, -1);
 653		lock_sock(sk);
 654		rose_destroy_socket(sk);
 655		break;
 656
 657	case ROSE_STATE_2:
 658		rose->neighbour->use--;
 659		release_sock(sk);
 660		rose_disconnect(sk, 0, -1, -1);
 661		lock_sock(sk);
 662		rose_destroy_socket(sk);
 663		break;
 664
 665	case ROSE_STATE_1:
 666	case ROSE_STATE_3:
 667	case ROSE_STATE_4:
 668	case ROSE_STATE_5:
 669		rose_clear_queues(sk);
 670		rose_stop_idletimer(sk);
 671		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
 672		rose_start_t3timer(sk);
 673		rose->state  = ROSE_STATE_2;
 674		sk->sk_state    = TCP_CLOSE;
 675		sk->sk_shutdown |= SEND_SHUTDOWN;
 676		sk->sk_state_change(sk);
 677		sock_set_flag(sk, SOCK_DEAD);
 678		sock_set_flag(sk, SOCK_DESTROY);
 679		break;
 680
 681	default:
 682		break;
 683	}
 684
 685	spin_lock_bh(&rose_list_lock);
 686	netdev_put(rose->device, &rose->dev_tracker);
 687	rose->device = NULL;
 688	spin_unlock_bh(&rose_list_lock);
 689	sock->sk = NULL;
 690	release_sock(sk);
 691	sock_put(sk);
 692
 693	return 0;
 694}
 695
 696static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 697{
 698	struct sock *sk = sock->sk;
 699	struct rose_sock *rose = rose_sk(sk);
 700	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 701	struct net_device *dev;
 702	ax25_address *source;
 703	ax25_uid_assoc *user;
 704	int err = -EINVAL;
 705	int n;
 706
 
 
 
 707	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 708		return -EINVAL;
 709
 710	if (addr->srose_family != AF_ROSE)
 711		return -EINVAL;
 712
 713	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 714		return -EINVAL;
 715
 716	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 717		return -EINVAL;
 718
 719	lock_sock(sk);
 720
 721	if (!sock_flag(sk, SOCK_ZAPPED))
 722		goto out_release;
 723
 724	err = -EADDRNOTAVAIL;
 725	dev = rose_dev_get(&addr->srose_addr);
 726	if (!dev)
 727		goto out_release;
 728
 729	source = &addr->srose_call;
 730
 731	user = ax25_findbyuid(current_euid());
 732	if (user) {
 733		rose->source_call = user->call;
 734		ax25_uid_put(user);
 735	} else {
 736		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
 737			dev_put(dev);
 738			err = -EACCES;
 739			goto out_release;
 740		}
 741		rose->source_call   = *source;
 742	}
 743
 744	rose->source_addr   = addr->srose_addr;
 745	rose->device        = dev;
 746	netdev_tracker_alloc(rose->device, &rose->dev_tracker, GFP_KERNEL);
 747	rose->source_ndigis = addr->srose_ndigis;
 748
 749	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 750		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 751		for (n = 0 ; n < addr->srose_ndigis ; n++)
 752			rose->source_digis[n] = full_addr->srose_digis[n];
 753	} else {
 754		if (rose->source_ndigis == 1) {
 755			rose->source_digis[0] = addr->srose_digi;
 756		}
 757	}
 758
 759	rose_insert_socket(sk);
 760
 761	sock_reset_flag(sk, SOCK_ZAPPED);
 762	err = 0;
 763out_release:
 764	release_sock(sk);
 765	return err;
 766}
 767
 768static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
 769{
 770	struct sock *sk = sock->sk;
 771	struct rose_sock *rose = rose_sk(sk);
 772	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 773	unsigned char cause, diagnostic;
 
 774	ax25_uid_assoc *user;
 775	int n, err = 0;
 776
 777	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 778		return -EINVAL;
 779
 780	if (addr->srose_family != AF_ROSE)
 781		return -EINVAL;
 782
 783	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 784		return -EINVAL;
 785
 786	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 787		return -EINVAL;
 788
 789	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
 790	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
 791		return -EINVAL;
 792
 793	lock_sock(sk);
 794
 795	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
 796		/* Connect completed during a ERESTARTSYS event */
 797		sock->state = SS_CONNECTED;
 798		goto out_release;
 799	}
 800
 801	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
 802		sock->state = SS_UNCONNECTED;
 803		err = -ECONNREFUSED;
 804		goto out_release;
 805	}
 806
 807	if (sk->sk_state == TCP_ESTABLISHED) {
 808		/* No reconnect on a seqpacket socket */
 809		err = -EISCONN;
 810		goto out_release;
 811	}
 812
 813	sk->sk_state   = TCP_CLOSE;
 814	sock->state = SS_UNCONNECTED;
 815
 816	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
 817					 &diagnostic, 0);
 818	if (!rose->neighbour) {
 819		err = -ENETUNREACH;
 820		goto out_release;
 821	}
 822
 823	rose->lci = rose_new_lci(rose->neighbour);
 824	if (!rose->lci) {
 825		err = -ENETUNREACH;
 826		goto out_release;
 827	}
 828
 829	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
 830		struct net_device *dev;
 831
 832		sock_reset_flag(sk, SOCK_ZAPPED);
 833
 834		dev = rose_dev_first();
 835		if (!dev) {
 836			err = -ENETUNREACH;
 837			goto out_release;
 838		}
 839
 840		user = ax25_findbyuid(current_euid());
 841		if (!user) {
 842			err = -EINVAL;
 843			dev_put(dev);
 844			goto out_release;
 845		}
 846
 847		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
 848		rose->source_call = user->call;
 849		rose->device      = dev;
 850		netdev_tracker_alloc(rose->device, &rose->dev_tracker,
 851				     GFP_KERNEL);
 852		ax25_uid_put(user);
 853
 854		rose_insert_socket(sk);		/* Finish the bind */
 855	}
 856	rose->dest_addr   = addr->srose_addr;
 857	rose->dest_call   = addr->srose_call;
 858	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
 859	rose->dest_ndigis = addr->srose_ndigis;
 860
 861	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 862		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 863		for (n = 0 ; n < addr->srose_ndigis ; n++)
 864			rose->dest_digis[n] = full_addr->srose_digis[n];
 865	} else {
 866		if (rose->dest_ndigis == 1) {
 867			rose->dest_digis[0] = addr->srose_digi;
 868		}
 869	}
 870
 871	/* Move to connecting socket, start sending Connect Requests */
 872	sock->state   = SS_CONNECTING;
 873	sk->sk_state     = TCP_SYN_SENT;
 874
 875	rose->state = ROSE_STATE_1;
 876
 877	rose->neighbour->use++;
 878
 879	rose_write_internal(sk, ROSE_CALL_REQUEST);
 880	rose_start_heartbeat(sk);
 881	rose_start_t1timer(sk);
 882
 883	/* Now the loop */
 884	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
 885		err = -EINPROGRESS;
 886		goto out_release;
 887	}
 888
 889	/*
 890	 * A Connect Ack with Choke or timeout or failed routing will go to
 891	 * closed.
 892	 */
 893	if (sk->sk_state == TCP_SYN_SENT) {
 894		DEFINE_WAIT(wait);
 895
 896		for (;;) {
 897			prepare_to_wait(sk_sleep(sk), &wait,
 898					TASK_INTERRUPTIBLE);
 899			if (sk->sk_state != TCP_SYN_SENT)
 900				break;
 901			if (!signal_pending(current)) {
 902				release_sock(sk);
 903				schedule();
 904				lock_sock(sk);
 905				continue;
 906			}
 907			err = -ERESTARTSYS;
 908			break;
 909		}
 910		finish_wait(sk_sleep(sk), &wait);
 911
 912		if (err)
 913			goto out_release;
 914	}
 915
 916	if (sk->sk_state != TCP_ESTABLISHED) {
 917		sock->state = SS_UNCONNECTED;
 918		err = sock_error(sk);	/* Always set at this point */
 919		goto out_release;
 920	}
 921
 922	sock->state = SS_CONNECTED;
 923
 924out_release:
 925	release_sock(sk);
 926
 927	return err;
 928}
 929
 930static int rose_accept(struct socket *sock, struct socket *newsock,
 931		       struct proto_accept_arg *arg)
 932{
 933	struct sk_buff *skb;
 934	struct sock *newsk;
 935	DEFINE_WAIT(wait);
 936	struct sock *sk;
 937	int err = 0;
 938
 939	if ((sk = sock->sk) == NULL)
 940		return -EINVAL;
 941
 942	lock_sock(sk);
 943	if (sk->sk_type != SOCK_SEQPACKET) {
 944		err = -EOPNOTSUPP;
 945		goto out_release;
 946	}
 947
 948	if (sk->sk_state != TCP_LISTEN) {
 949		err = -EINVAL;
 950		goto out_release;
 951	}
 952
 953	/*
 954	 *	The write queue this time is holding sockets ready to use
 955	 *	hooked into the SABM we saved
 956	 */
 957	for (;;) {
 958		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 959
 960		skb = skb_dequeue(&sk->sk_receive_queue);
 961		if (skb)
 962			break;
 963
 964		if (arg->flags & O_NONBLOCK) {
 965			err = -EWOULDBLOCK;
 966			break;
 967		}
 968		if (!signal_pending(current)) {
 969			release_sock(sk);
 970			schedule();
 971			lock_sock(sk);
 972			continue;
 973		}
 974		err = -ERESTARTSYS;
 975		break;
 976	}
 977	finish_wait(sk_sleep(sk), &wait);
 978	if (err)
 979		goto out_release;
 980
 981	newsk = skb->sk;
 982	sock_graft(newsk, newsock);
 983
 984	/* Now attach up the new socket */
 985	skb->sk = NULL;
 986	kfree_skb(skb);
 987	sk_acceptq_removed(sk);
 988
 989out_release:
 990	release_sock(sk);
 991
 992	return err;
 993}
 994
 995static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
 996	int peer)
 997{
 998	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
 999	struct sock *sk = sock->sk;
1000	struct rose_sock *rose = rose_sk(sk);
1001	int n;
1002
1003	memset(srose, 0, sizeof(*srose));
1004	if (peer != 0) {
1005		if (sk->sk_state != TCP_ESTABLISHED)
1006			return -ENOTCONN;
1007		srose->srose_family = AF_ROSE;
1008		srose->srose_addr   = rose->dest_addr;
1009		srose->srose_call   = rose->dest_call;
1010		srose->srose_ndigis = rose->dest_ndigis;
1011		for (n = 0; n < rose->dest_ndigis; n++)
1012			srose->srose_digis[n] = rose->dest_digis[n];
1013	} else {
1014		srose->srose_family = AF_ROSE;
1015		srose->srose_addr   = rose->source_addr;
1016		srose->srose_call   = rose->source_call;
1017		srose->srose_ndigis = rose->source_ndigis;
1018		for (n = 0; n < rose->source_ndigis; n++)
1019			srose->srose_digis[n] = rose->source_digis[n];
1020	}
1021
1022	return sizeof(struct full_sockaddr_rose);
 
1023}
1024
1025int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
1026{
1027	struct sock *sk;
1028	struct sock *make;
1029	struct rose_sock *make_rose;
1030	struct rose_facilities_struct facilities;
1031	int n;
1032
1033	skb->sk = NULL;		/* Initially we don't know who it's for */
1034
1035	/*
1036	 *	skb->data points to the rose frame start
1037	 */
1038	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
1039
1040	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
1041				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
1042				   &facilities)) {
1043		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
1044		return 0;
1045	}
1046
1047	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
1048
1049	/*
1050	 * We can't accept the Call Request.
1051	 */
1052	if (sk == NULL || sk_acceptq_is_full(sk) ||
1053	    (make = rose_make_new(sk)) == NULL) {
1054		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1055		return 0;
1056	}
1057
1058	skb->sk     = make;
1059	make->sk_state = TCP_ESTABLISHED;
1060	make_rose = rose_sk(make);
1061
1062	make_rose->lci           = lci;
1063	make_rose->dest_addr     = facilities.dest_addr;
1064	make_rose->dest_call     = facilities.dest_call;
1065	make_rose->dest_ndigis   = facilities.dest_ndigis;
1066	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1067		make_rose->dest_digis[n] = facilities.dest_digis[n];
1068	make_rose->source_addr   = facilities.source_addr;
1069	make_rose->source_call   = facilities.source_call;
1070	make_rose->source_ndigis = facilities.source_ndigis;
1071	for (n = 0 ; n < facilities.source_ndigis ; n++)
1072		make_rose->source_digis[n] = facilities.source_digis[n];
1073	make_rose->neighbour     = neigh;
1074	make_rose->device        = dev;
1075	/* Caller got a reference for us. */
1076	netdev_tracker_alloc(make_rose->device, &make_rose->dev_tracker,
1077			     GFP_ATOMIC);
1078	make_rose->facilities    = facilities;
1079
1080	make_rose->neighbour->use++;
1081
1082	if (rose_sk(sk)->defer) {
1083		make_rose->state = ROSE_STATE_5;
1084	} else {
1085		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1086		make_rose->state = ROSE_STATE_3;
1087		rose_start_idletimer(make);
1088	}
1089
1090	make_rose->condition = 0x00;
1091	make_rose->vs        = 0;
1092	make_rose->va        = 0;
1093	make_rose->vr        = 0;
1094	make_rose->vl        = 0;
1095	sk_acceptq_added(sk);
1096
1097	rose_insert_socket(make);
1098
1099	skb_queue_head(&sk->sk_receive_queue, skb);
1100
1101	rose_start_heartbeat(make);
1102
1103	if (!sock_flag(sk, SOCK_DEAD))
1104		sk->sk_data_ready(sk);
1105
1106	return 1;
1107}
1108
1109static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
1110{
1111	struct sock *sk = sock->sk;
1112	struct rose_sock *rose = rose_sk(sk);
1113	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1114	int err;
1115	struct full_sockaddr_rose srose;
1116	struct sk_buff *skb;
1117	unsigned char *asmptr;
1118	int n, size, qbit = 0;
1119
1120	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1121		return -EINVAL;
1122
1123	if (sock_flag(sk, SOCK_ZAPPED))
1124		return -EADDRNOTAVAIL;
1125
1126	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1127		send_sig(SIGPIPE, current, 0);
1128		return -EPIPE;
1129	}
1130
1131	if (rose->neighbour == NULL || rose->device == NULL)
1132		return -ENETUNREACH;
1133
1134	if (usrose != NULL) {
1135		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1136			return -EINVAL;
1137		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1138		memcpy(&srose, usrose, msg->msg_namelen);
1139		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1140		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1141			return -EISCONN;
1142		if (srose.srose_ndigis != rose->dest_ndigis)
1143			return -EISCONN;
1144		if (srose.srose_ndigis == rose->dest_ndigis) {
1145			for (n = 0 ; n < srose.srose_ndigis ; n++)
1146				if (ax25cmp(&rose->dest_digis[n],
1147					    &srose.srose_digis[n]))
1148					return -EISCONN;
1149		}
1150		if (srose.srose_family != AF_ROSE)
1151			return -EINVAL;
1152	} else {
1153		if (sk->sk_state != TCP_ESTABLISHED)
1154			return -ENOTCONN;
1155
1156		srose.srose_family = AF_ROSE;
1157		srose.srose_addr   = rose->dest_addr;
1158		srose.srose_call   = rose->dest_call;
1159		srose.srose_ndigis = rose->dest_ndigis;
1160		for (n = 0 ; n < rose->dest_ndigis ; n++)
1161			srose.srose_digis[n] = rose->dest_digis[n];
1162	}
1163
1164	/* Build a packet */
1165	/* Sanity check the packet size */
1166	if (len > 65535)
1167		return -EMSGSIZE;
1168
1169	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1170
1171	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1172		return err;
1173
1174	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1175
1176	/*
1177	 *	Put the data on the end
1178	 */
1179
1180	skb_reset_transport_header(skb);
1181	skb_put(skb, len);
1182
1183	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1184	if (err) {
1185		kfree_skb(skb);
1186		return err;
1187	}
1188
1189	/*
1190	 *	If the Q BIT Include socket option is in force, the first
1191	 *	byte of the user data is the logical value of the Q Bit.
1192	 */
1193	if (rose->qbitincl) {
1194		qbit = skb->data[0];
1195		skb_pull(skb, 1);
1196	}
1197
1198	/*
1199	 *	Push down the ROSE header
1200	 */
1201	asmptr = skb_push(skb, ROSE_MIN_LEN);
1202
1203	/* Build a ROSE Network header */
1204	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1205	asmptr[1] = (rose->lci >> 0) & 0xFF;
1206	asmptr[2] = ROSE_DATA;
1207
1208	if (qbit)
1209		asmptr[0] |= ROSE_Q_BIT;
1210
1211	if (sk->sk_state != TCP_ESTABLISHED) {
1212		kfree_skb(skb);
1213		return -ENOTCONN;
1214	}
1215
1216#ifdef M_BIT
1217#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1218	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1219		unsigned char header[ROSE_MIN_LEN];
1220		struct sk_buff *skbn;
1221		int frontlen;
1222		int lg;
1223
1224		/* Save a copy of the Header */
1225		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1226		skb_pull(skb, ROSE_MIN_LEN);
1227
1228		frontlen = skb_headroom(skb);
1229
1230		while (skb->len > 0) {
1231			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1232				kfree_skb(skb);
1233				return err;
1234			}
1235
1236			skbn->sk   = sk;
1237			skbn->free = 1;
1238			skbn->arp  = 1;
1239
1240			skb_reserve(skbn, frontlen);
1241
1242			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1243
1244			/* Copy the user data */
1245			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1246			skb_pull(skb, lg);
1247
1248			/* Duplicate the Header */
1249			skb_push(skbn, ROSE_MIN_LEN);
1250			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1251
1252			if (skb->len > 0)
1253				skbn->data[2] |= M_BIT;
1254
1255			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1256		}
1257
1258		skb->free = 1;
1259		kfree_skb(skb);
1260	} else {
1261		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1262	}
1263#else
1264	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1265#endif
1266
1267	rose_kick(sk);
1268
1269	return len;
1270}
1271
1272
1273static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1274			int flags)
1275{
1276	struct sock *sk = sock->sk;
1277	struct rose_sock *rose = rose_sk(sk);
 
1278	size_t copied;
1279	unsigned char *asmptr;
1280	struct sk_buff *skb;
1281	int n, er, qbit;
1282
1283	/*
1284	 * This works for seqpacket too. The receiver has ordered the queue for
1285	 * us! We do one quick check first though
1286	 */
1287	if (sk->sk_state != TCP_ESTABLISHED)
1288		return -ENOTCONN;
1289
1290	/* Now we can treat all alike */
1291	skb = skb_recv_datagram(sk, flags, &er);
1292	if (!skb)
1293		return er;
1294
1295	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1296
1297	skb_pull(skb, ROSE_MIN_LEN);
1298
1299	if (rose->qbitincl) {
1300		asmptr  = skb_push(skb, 1);
1301		*asmptr = qbit;
1302	}
1303
1304	skb_reset_transport_header(skb);
1305	copied     = skb->len;
1306
1307	if (copied > size) {
1308		copied = size;
1309		msg->msg_flags |= MSG_TRUNC;
1310	}
1311
1312	skb_copy_datagram_msg(skb, 0, msg, copied);
1313
1314	if (msg->msg_name) {
1315		struct sockaddr_rose *srose;
1316		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1317				 msg->msg_name);
1318
1319		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1320		srose = msg->msg_name;
1321		srose->srose_family = AF_ROSE;
1322		srose->srose_addr   = rose->dest_addr;
1323		srose->srose_call   = rose->dest_call;
1324		srose->srose_ndigis = rose->dest_ndigis;
1325		for (n = 0 ; n < rose->dest_ndigis ; n++)
1326			full_srose->srose_digis[n] = rose->dest_digis[n];
1327		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
 
 
 
 
 
 
 
 
 
1328	}
1329
1330	skb_free_datagram(sk, skb);
1331
1332	return copied;
1333}
1334
1335
1336static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1337{
1338	struct sock *sk = sock->sk;
1339	struct rose_sock *rose = rose_sk(sk);
1340	void __user *argp = (void __user *)arg;
1341
1342	switch (cmd) {
1343	case TIOCOUTQ: {
1344		long amount;
1345
1346		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1347		if (amount < 0)
1348			amount = 0;
1349		return put_user(amount, (unsigned int __user *) argp);
1350	}
1351
1352	case TIOCINQ: {
1353		struct sk_buff *skb;
1354		long amount = 0L;
1355
1356		spin_lock_irq(&sk->sk_receive_queue.lock);
1357		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1358			amount = skb->len;
1359		spin_unlock_irq(&sk->sk_receive_queue.lock);
1360		return put_user(amount, (unsigned int __user *) argp);
1361	}
1362
 
 
 
 
 
 
1363	case SIOCGIFADDR:
1364	case SIOCSIFADDR:
1365	case SIOCGIFDSTADDR:
1366	case SIOCSIFDSTADDR:
1367	case SIOCGIFBRDADDR:
1368	case SIOCSIFBRDADDR:
1369	case SIOCGIFNETMASK:
1370	case SIOCSIFNETMASK:
1371	case SIOCGIFMETRIC:
1372	case SIOCSIFMETRIC:
1373		return -EINVAL;
1374
1375	case SIOCADDRT:
1376	case SIOCDELRT:
1377	case SIOCRSCLRRT:
1378		if (!capable(CAP_NET_ADMIN))
1379			return -EPERM;
1380		return rose_rt_ioctl(cmd, argp);
1381
1382	case SIOCRSGCAUSE: {
1383		struct rose_cause_struct rose_cause;
1384		rose_cause.cause      = rose->cause;
1385		rose_cause.diagnostic = rose->diagnostic;
1386		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1387	}
1388
1389	case SIOCRSSCAUSE: {
1390		struct rose_cause_struct rose_cause;
1391		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1392			return -EFAULT;
1393		rose->cause      = rose_cause.cause;
1394		rose->diagnostic = rose_cause.diagnostic;
1395		return 0;
1396	}
1397
1398	case SIOCRSSL2CALL:
1399		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1400		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1401			ax25_listen_release(&rose_callsign, NULL);
1402		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1403			return -EFAULT;
1404		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1405			return ax25_listen_register(&rose_callsign, NULL);
1406
1407		return 0;
1408
1409	case SIOCRSGL2CALL:
1410		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1411
1412	case SIOCRSACCEPT:
1413		if (rose->state == ROSE_STATE_5) {
1414			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1415			rose_start_idletimer(sk);
1416			rose->condition = 0x00;
1417			rose->vs        = 0;
1418			rose->va        = 0;
1419			rose->vr        = 0;
1420			rose->vl        = 0;
1421			rose->state     = ROSE_STATE_3;
1422		}
1423		return 0;
1424
1425	default:
1426		return -ENOIOCTLCMD;
1427	}
1428
1429	return 0;
1430}
1431
1432#ifdef CONFIG_PROC_FS
1433static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1434	__acquires(rose_list_lock)
1435{
1436	spin_lock_bh(&rose_list_lock);
1437	return seq_hlist_start_head(&rose_list, *pos);
1438}
1439
1440static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1441{
1442	return seq_hlist_next(v, &rose_list, pos);
1443}
1444
1445static void rose_info_stop(struct seq_file *seq, void *v)
1446	__releases(rose_list_lock)
1447{
1448	spin_unlock_bh(&rose_list_lock);
1449}
1450
1451static int rose_info_show(struct seq_file *seq, void *v)
1452{
1453	char buf[11], rsbuf[11];
1454
1455	if (v == SEQ_START_TOKEN)
1456		seq_puts(seq,
1457			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1458
1459	else {
1460		struct sock *s = sk_entry(v);
1461		struct rose_sock *rose = rose_sk(s);
1462		const char *devname, *callsign;
1463		const struct net_device *dev = rose->device;
1464
1465		if (!dev)
1466			devname = "???";
1467		else
1468			devname = dev->name;
1469
1470		seq_printf(seq, "%-10s %-9s ",
1471			   rose2asc(rsbuf, &rose->dest_addr),
1472			   ax2asc(buf, &rose->dest_call));
1473
1474		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1475			callsign = "??????-?";
1476		else
1477			callsign = ax2asc(buf, &rose->source_call);
1478
1479		seq_printf(seq,
1480			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1481			rose2asc(rsbuf, &rose->source_addr),
1482			callsign,
1483			devname,
1484			rose->lci & 0x0FFF,
1485			(rose->neighbour) ? rose->neighbour->number : 0,
1486			rose->state,
1487			rose->vs,
1488			rose->vr,
1489			rose->va,
1490			ax25_display_timer(&rose->timer) / HZ,
1491			rose->t1 / HZ,
1492			rose->t2 / HZ,
1493			rose->t3 / HZ,
1494			rose->hb / HZ,
1495			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1496			rose->idle / (60 * HZ),
1497			sk_wmem_alloc_get(s),
1498			sk_rmem_alloc_get(s),
1499			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1500	}
1501
1502	return 0;
1503}
1504
1505static const struct seq_operations rose_info_seqops = {
1506	.start = rose_info_start,
1507	.next = rose_info_next,
1508	.stop = rose_info_stop,
1509	.show = rose_info_show,
1510};
 
 
 
 
 
 
 
 
 
 
 
 
 
1511#endif	/* CONFIG_PROC_FS */
1512
1513static const struct net_proto_family rose_family_ops = {
1514	.family		=	PF_ROSE,
1515	.create		=	rose_create,
1516	.owner		=	THIS_MODULE,
1517};
1518
1519static const struct proto_ops rose_proto_ops = {
1520	.family		=	PF_ROSE,
1521	.owner		=	THIS_MODULE,
1522	.release	=	rose_release,
1523	.bind		=	rose_bind,
1524	.connect	=	rose_connect,
1525	.socketpair	=	sock_no_socketpair,
1526	.accept		=	rose_accept,
1527	.getname	=	rose_getname,
1528	.poll		=	datagram_poll,
1529	.ioctl		=	rose_ioctl,
1530	.gettstamp	=	sock_gettstamp,
1531	.listen		=	rose_listen,
1532	.shutdown	=	sock_no_shutdown,
1533	.setsockopt	=	rose_setsockopt,
1534	.getsockopt	=	rose_getsockopt,
1535	.sendmsg	=	rose_sendmsg,
1536	.recvmsg	=	rose_recvmsg,
1537	.mmap		=	sock_no_mmap,
 
1538};
1539
1540static struct notifier_block rose_dev_notifier = {
1541	.notifier_call	=	rose_device_event,
1542};
1543
1544static struct net_device **dev_rose;
1545
1546static struct ax25_protocol rose_pid = {
1547	.pid	= AX25_P_ROSE,
1548	.func	= rose_route_frame
1549};
1550
1551static struct ax25_linkfail rose_linkfail_notifier = {
1552	.func	= rose_link_failed
1553};
1554
1555static int __init rose_proto_init(void)
1556{
1557	int i;
1558	int rc;
1559
1560	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1561		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
1562		rc = -EINVAL;
1563		goto out;
1564	}
1565
1566	rc = proto_register(&rose_proto, 0);
1567	if (rc != 0)
1568		goto out;
1569
1570	rose_callsign = null_ax25_address;
1571
1572	dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
1573			   GFP_KERNEL);
1574	if (dev_rose == NULL) {
1575		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1576		rc = -ENOMEM;
1577		goto out_proto_unregister;
1578	}
1579
1580	for (i = 0; i < rose_ndevs; i++) {
1581		struct net_device *dev;
1582		char name[IFNAMSIZ];
1583
1584		sprintf(name, "rose%d", i);
1585		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1586		if (!dev) {
1587			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1588			rc = -ENOMEM;
1589			goto fail;
1590		}
1591		rc = register_netdev(dev);
1592		if (rc) {
1593			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1594			free_netdev(dev);
1595			goto fail;
1596		}
1597		rose_set_lockdep_key(dev);
1598		dev_rose[i] = dev;
1599	}
1600
1601	sock_register(&rose_family_ops);
1602	register_netdevice_notifier(&rose_dev_notifier);
1603
1604	ax25_register_pid(&rose_pid);
1605	ax25_linkfail_register(&rose_linkfail_notifier);
1606
1607#ifdef CONFIG_SYSCTL
1608	rose_register_sysctl();
1609#endif
1610	rose_loopback_init();
1611
1612	rose_add_loopback_neigh();
1613
1614	proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
1615	proc_create_seq("rose_neigh", 0444, init_net.proc_net,
1616		    &rose_neigh_seqops);
1617	proc_create_seq("rose_nodes", 0444, init_net.proc_net,
1618		    &rose_node_seqops);
1619	proc_create_seq("rose_routes", 0444, init_net.proc_net,
1620		    &rose_route_seqops);
1621out:
1622	return rc;
1623fail:
1624	while (--i >= 0) {
1625		unregister_netdev(dev_rose[i]);
1626		free_netdev(dev_rose[i]);
1627	}
1628	kfree(dev_rose);
1629out_proto_unregister:
1630	proto_unregister(&rose_proto);
1631	goto out;
1632}
1633module_init(rose_proto_init);
1634
1635module_param(rose_ndevs, int, 0);
1636MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1637
1638MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1639MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1640MODULE_LICENSE("GPL");
1641MODULE_ALIAS_NETPROTO(PF_ROSE);
1642
1643static void __exit rose_exit(void)
1644{
1645	int i;
1646
1647	remove_proc_entry("rose", init_net.proc_net);
1648	remove_proc_entry("rose_neigh", init_net.proc_net);
1649	remove_proc_entry("rose_nodes", init_net.proc_net);
1650	remove_proc_entry("rose_routes", init_net.proc_net);
1651	rose_loopback_clear();
1652
1653	rose_rt_free();
1654
1655	ax25_protocol_release(AX25_P_ROSE);
1656	ax25_linkfail_release(&rose_linkfail_notifier);
1657
1658	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1659		ax25_listen_release(&rose_callsign, NULL);
1660
1661#ifdef CONFIG_SYSCTL
1662	rose_unregister_sysctl();
1663#endif
1664	unregister_netdevice_notifier(&rose_dev_notifier);
1665
1666	sock_unregister(PF_ROSE);
1667
1668	for (i = 0; i < rose_ndevs; i++) {
1669		struct net_device *dev = dev_rose[i];
1670
1671		if (dev) {
1672			unregister_netdev(dev);
1673			free_netdev(dev);
1674		}
1675	}
1676
1677	kfree(dev_rose);
1678	proto_unregister(&rose_proto);
1679}
1680
1681module_exit(rose_exit);
v3.5.6
 
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License as published by
   4 * the Free Software Foundation; either version 2 of the License, or
   5 * (at your option) any later version.
   6 *
   7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
  10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
  11 */
  12
  13#include <linux/capability.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/types.h>
  19#include <linux/socket.h>
  20#include <linux/in.h>
  21#include <linux/slab.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <linux/spinlock.h>
  25#include <linux/timer.h>
  26#include <linux/string.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/stat.h>
  30#include <net/net_namespace.h>
  31#include <net/ax25.h>
  32#include <linux/inet.h>
  33#include <linux/netdevice.h>
  34#include <linux/if_arp.h>
  35#include <linux/skbuff.h>
  36#include <net/sock.h>
  37#include <asm/uaccess.h>
  38#include <linux/fcntl.h>
  39#include <linux/termios.h>
  40#include <linux/mm.h>
  41#include <linux/interrupt.h>
  42#include <linux/notifier.h>
  43#include <net/rose.h>
  44#include <linux/proc_fs.h>
  45#include <linux/seq_file.h>
  46#include <net/tcp_states.h>
  47#include <net/ip.h>
  48#include <net/arp.h>
  49
  50static int rose_ndevs = 10;
  51
  52int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
  53int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
  54int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
  55int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
  56int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
  57int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
  58int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
  59int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
  60int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
  61int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
  62
  63static HLIST_HEAD(rose_list);
  64static DEFINE_SPINLOCK(rose_list_lock);
  65
  66static const struct proto_ops rose_proto_ops;
  67
  68ax25_address rose_callsign;
  69
  70/*
  71 * ROSE network devices are virtual network devices encapsulating ROSE
  72 * frames into AX.25 which will be sent through an AX.25 device, so form a
  73 * special "super class" of normal net devices; split their locks off into a
  74 * separate class since they always nest.
  75 */
  76static struct lock_class_key rose_netdev_xmit_lock_key;
  77static struct lock_class_key rose_netdev_addr_lock_key;
  78
  79static void rose_set_lockdep_one(struct net_device *dev,
  80				 struct netdev_queue *txq,
  81				 void *_unused)
  82{
  83	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
  84}
  85
  86static void rose_set_lockdep_key(struct net_device *dev)
  87{
  88	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
  89	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
  90}
  91
  92/*
  93 *	Convert a ROSE address into text.
  94 */
  95char *rose2asc(char *buf, const rose_address *addr)
  96{
  97	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
  98	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
  99	    addr->rose_addr[4] == 0x00) {
 100		strcpy(buf, "*");
 101	} else {
 102		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
 103						addr->rose_addr[1] & 0xFF,
 104						addr->rose_addr[2] & 0xFF,
 105						addr->rose_addr[3] & 0xFF,
 106						addr->rose_addr[4] & 0xFF);
 107	}
 108
 109	return buf;
 110}
 111
 112/*
 113 *	Compare two ROSE addresses, 0 == equal.
 114 */
 115int rosecmp(rose_address *addr1, rose_address *addr2)
 116{
 117	int i;
 118
 119	for (i = 0; i < 5; i++)
 120		if (addr1->rose_addr[i] != addr2->rose_addr[i])
 121			return 1;
 122
 123	return 0;
 124}
 125
 126/*
 127 *	Compare two ROSE addresses for only mask digits, 0 == equal.
 128 */
 129int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
 
 130{
 131	unsigned int i, j;
 132
 133	if (mask > 10)
 134		return 1;
 135
 136	for (i = 0; i < mask; i++) {
 137		j = i / 2;
 138
 139		if ((i % 2) != 0) {
 140			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
 141				return 1;
 142		} else {
 143			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
 144				return 1;
 145		}
 146	}
 147
 148	return 0;
 149}
 150
 151/*
 152 *	Socket removal during an interrupt is now safe.
 153 */
 154static void rose_remove_socket(struct sock *sk)
 155{
 156	spin_lock_bh(&rose_list_lock);
 157	sk_del_node_init(sk);
 158	spin_unlock_bh(&rose_list_lock);
 159}
 160
 161/*
 162 *	Kill all bound sockets on a broken link layer connection to a
 163 *	particular neighbour.
 164 */
 165void rose_kill_by_neigh(struct rose_neigh *neigh)
 166{
 167	struct sock *s;
 168	struct hlist_node *node;
 169
 170	spin_lock_bh(&rose_list_lock);
 171	sk_for_each(s, node, &rose_list) {
 172		struct rose_sock *rose = rose_sk(s);
 173
 174		if (rose->neighbour == neigh) {
 175			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 176			rose->neighbour->use--;
 177			rose->neighbour = NULL;
 178		}
 179	}
 180	spin_unlock_bh(&rose_list_lock);
 181}
 182
 183/*
 184 *	Kill all bound sockets on a dropped device.
 185 */
 186static void rose_kill_by_device(struct net_device *dev)
 187{
 188	struct sock *s;
 189	struct hlist_node *node;
 
 
 190
 
 
 
 191	spin_lock_bh(&rose_list_lock);
 192	sk_for_each(s, node, &rose_list) {
 193		struct rose_sock *rose = rose_sk(s);
 
 
 
 
 
 
 
 
 
 
 194
 
 
 
 
 
 195		if (rose->device == dev) {
 196			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
 197			rose->neighbour->use--;
 
 
 198			rose->device = NULL;
 199		}
 
 
 
 
 200	}
 201	spin_unlock_bh(&rose_list_lock);
 
 202}
 203
 204/*
 205 *	Handle device status changes.
 206 */
 207static int rose_device_event(struct notifier_block *this, unsigned long event,
 208	void *ptr)
 209{
 210	struct net_device *dev = (struct net_device *)ptr;
 211
 212	if (!net_eq(dev_net(dev), &init_net))
 213		return NOTIFY_DONE;
 214
 215	if (event != NETDEV_DOWN)
 216		return NOTIFY_DONE;
 217
 218	switch (dev->type) {
 219	case ARPHRD_ROSE:
 220		rose_kill_by_device(dev);
 221		break;
 222	case ARPHRD_AX25:
 223		rose_link_device_down(dev);
 224		rose_rt_device_down(dev);
 225		break;
 226	}
 227
 228	return NOTIFY_DONE;
 229}
 230
 231/*
 232 *	Add a socket to the bound sockets list.
 233 */
 234static void rose_insert_socket(struct sock *sk)
 235{
 236
 237	spin_lock_bh(&rose_list_lock);
 238	sk_add_node(sk, &rose_list);
 239	spin_unlock_bh(&rose_list_lock);
 240}
 241
 242/*
 243 *	Find a socket that wants to accept the Call Request we just
 244 *	received.
 245 */
 246static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 247{
 248	struct sock *s;
 249	struct hlist_node *node;
 250
 251	spin_lock_bh(&rose_list_lock);
 252	sk_for_each(s, node, &rose_list) {
 253		struct rose_sock *rose = rose_sk(s);
 254
 255		if (!rosecmp(&rose->source_addr, addr) &&
 256		    !ax25cmp(&rose->source_call, call) &&
 257		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
 258			goto found;
 259	}
 260
 261	sk_for_each(s, node, &rose_list) {
 262		struct rose_sock *rose = rose_sk(s);
 263
 264		if (!rosecmp(&rose->source_addr, addr) &&
 265		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
 266		    s->sk_state == TCP_LISTEN)
 267			goto found;
 268	}
 269	s = NULL;
 270found:
 271	spin_unlock_bh(&rose_list_lock);
 272	return s;
 273}
 274
 275/*
 276 *	Find a connected ROSE socket given my LCI and device.
 277 */
 278struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 279{
 280	struct sock *s;
 281	struct hlist_node *node;
 282
 283	spin_lock_bh(&rose_list_lock);
 284	sk_for_each(s, node, &rose_list) {
 285		struct rose_sock *rose = rose_sk(s);
 286
 287		if (rose->lci == lci && rose->neighbour == neigh)
 288			goto found;
 289	}
 290	s = NULL;
 291found:
 292	spin_unlock_bh(&rose_list_lock);
 293	return s;
 294}
 295
 296/*
 297 *	Find a unique LCI for a given device.
 298 */
 299unsigned int rose_new_lci(struct rose_neigh *neigh)
 300{
 301	int lci;
 302
 303	if (neigh->dce_mode) {
 304		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
 305			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 306				return lci;
 307	} else {
 308		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
 309			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
 310				return lci;
 311	}
 312
 313	return 0;
 314}
 315
 316/*
 317 *	Deferred destroy.
 318 */
 319void rose_destroy_socket(struct sock *);
 320
 321/*
 322 *	Handler for deferred kills.
 323 */
 324static void rose_destroy_timer(unsigned long data)
 325{
 326	rose_destroy_socket((struct sock *)data);
 
 
 327}
 328
 329/*
 330 *	This is called from user mode and the timers. Thus it protects itself
 331 *	against interrupt users but doesn't worry about being called during
 332 *	work.  Once it is removed from the queue no interrupt or bottom half
 333 *	will touch it and we are (fairly 8-) ) safe.
 334 */
 335void rose_destroy_socket(struct sock *sk)
 336{
 337	struct sk_buff *skb;
 338
 339	rose_remove_socket(sk);
 340	rose_stop_heartbeat(sk);
 341	rose_stop_idletimer(sk);
 342	rose_stop_timer(sk);
 343
 344	rose_clear_queues(sk);		/* Flush the queues */
 345
 346	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 347		if (skb->sk != sk) {	/* A pending connection */
 348			/* Queue the unaccepted socket for death */
 349			sock_set_flag(skb->sk, SOCK_DEAD);
 350			rose_start_heartbeat(skb->sk);
 351			rose_sk(skb->sk)->state = ROSE_STATE_0;
 352		}
 353
 354		kfree_skb(skb);
 355	}
 356
 357	if (sk_has_allocations(sk)) {
 358		/* Defer: outstanding buffers */
 359		setup_timer(&sk->sk_timer, rose_destroy_timer,
 360				(unsigned long)sk);
 361		sk->sk_timer.expires  = jiffies + 10 * HZ;
 362		add_timer(&sk->sk_timer);
 363	} else
 364		sock_put(sk);
 365}
 366
 367/*
 368 *	Handling for system calls applied via the various interfaces to a
 369 *	ROSE socket object.
 370 */
 371
 372static int rose_setsockopt(struct socket *sock, int level, int optname,
 373	char __user *optval, unsigned int optlen)
 374{
 375	struct sock *sk = sock->sk;
 376	struct rose_sock *rose = rose_sk(sk);
 377	int opt;
 378
 379	if (level != SOL_ROSE)
 380		return -ENOPROTOOPT;
 381
 382	if (optlen < sizeof(int))
 383		return -EINVAL;
 384
 385	if (get_user(opt, (int __user *)optval))
 386		return -EFAULT;
 387
 388	switch (optname) {
 389	case ROSE_DEFER:
 390		rose->defer = opt ? 1 : 0;
 391		return 0;
 392
 393	case ROSE_T1:
 394		if (opt < 1)
 395			return -EINVAL;
 396		rose->t1 = opt * HZ;
 397		return 0;
 398
 399	case ROSE_T2:
 400		if (opt < 1)
 401			return -EINVAL;
 402		rose->t2 = opt * HZ;
 403		return 0;
 404
 405	case ROSE_T3:
 406		if (opt < 1)
 407			return -EINVAL;
 408		rose->t3 = opt * HZ;
 409		return 0;
 410
 411	case ROSE_HOLDBACK:
 412		if (opt < 1)
 413			return -EINVAL;
 414		rose->hb = opt * HZ;
 415		return 0;
 416
 417	case ROSE_IDLE:
 418		if (opt < 0)
 419			return -EINVAL;
 420		rose->idle = opt * 60 * HZ;
 421		return 0;
 422
 423	case ROSE_QBITINCL:
 424		rose->qbitincl = opt ? 1 : 0;
 425		return 0;
 426
 427	default:
 428		return -ENOPROTOOPT;
 429	}
 430}
 431
 432static int rose_getsockopt(struct socket *sock, int level, int optname,
 433	char __user *optval, int __user *optlen)
 434{
 435	struct sock *sk = sock->sk;
 436	struct rose_sock *rose = rose_sk(sk);
 437	int val = 0;
 438	int len;
 439
 440	if (level != SOL_ROSE)
 441		return -ENOPROTOOPT;
 442
 443	if (get_user(len, optlen))
 444		return -EFAULT;
 445
 446	if (len < 0)
 447		return -EINVAL;
 448
 449	switch (optname) {
 450	case ROSE_DEFER:
 451		val = rose->defer;
 452		break;
 453
 454	case ROSE_T1:
 455		val = rose->t1 / HZ;
 456		break;
 457
 458	case ROSE_T2:
 459		val = rose->t2 / HZ;
 460		break;
 461
 462	case ROSE_T3:
 463		val = rose->t3 / HZ;
 464		break;
 465
 466	case ROSE_HOLDBACK:
 467		val = rose->hb / HZ;
 468		break;
 469
 470	case ROSE_IDLE:
 471		val = rose->idle / (60 * HZ);
 472		break;
 473
 474	case ROSE_QBITINCL:
 475		val = rose->qbitincl;
 476		break;
 477
 478	default:
 479		return -ENOPROTOOPT;
 480	}
 481
 482	len = min_t(unsigned int, len, sizeof(int));
 483
 484	if (put_user(len, optlen))
 485		return -EFAULT;
 486
 487	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
 488}
 489
 490static int rose_listen(struct socket *sock, int backlog)
 491{
 492	struct sock *sk = sock->sk;
 493
 
 
 
 
 
 
 494	if (sk->sk_state != TCP_LISTEN) {
 495		struct rose_sock *rose = rose_sk(sk);
 496
 497		rose->dest_ndigis = 0;
 498		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
 499		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
 500		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
 501		sk->sk_max_ack_backlog = backlog;
 502		sk->sk_state           = TCP_LISTEN;
 
 503		return 0;
 504	}
 
 505
 506	return -EOPNOTSUPP;
 507}
 508
 509static struct proto rose_proto = {
 510	.name	  = "ROSE",
 511	.owner	  = THIS_MODULE,
 512	.obj_size = sizeof(struct rose_sock),
 513};
 514
 515static int rose_create(struct net *net, struct socket *sock, int protocol,
 516		       int kern)
 517{
 518	struct sock *sk;
 519	struct rose_sock *rose;
 520
 521	if (!net_eq(net, &init_net))
 522		return -EAFNOSUPPORT;
 523
 524	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 525		return -ESOCKTNOSUPPORT;
 526
 527	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
 528	if (sk == NULL)
 529		return -ENOMEM;
 530
 531	rose = rose_sk(sk);
 532
 533	sock_init_data(sock, sk);
 534
 535	skb_queue_head_init(&rose->ack_queue);
 536#ifdef M_BIT
 537	skb_queue_head_init(&rose->frag_queue);
 538	rose->fraglen    = 0;
 539#endif
 540
 541	sock->ops    = &rose_proto_ops;
 542	sk->sk_protocol = protocol;
 543
 544	init_timer(&rose->timer);
 545	init_timer(&rose->idletimer);
 546
 547	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
 548	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
 549	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
 550	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
 551	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
 552
 553	rose->state = ROSE_STATE_0;
 554
 555	return 0;
 556}
 557
 558static struct sock *rose_make_new(struct sock *osk)
 559{
 560	struct sock *sk;
 561	struct rose_sock *rose, *orose;
 562
 563	if (osk->sk_type != SOCK_SEQPACKET)
 564		return NULL;
 565
 566	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
 567	if (sk == NULL)
 568		return NULL;
 569
 570	rose = rose_sk(sk);
 571
 572	sock_init_data(NULL, sk);
 573
 574	skb_queue_head_init(&rose->ack_queue);
 575#ifdef M_BIT
 576	skb_queue_head_init(&rose->frag_queue);
 577	rose->fraglen  = 0;
 578#endif
 579
 580	sk->sk_type     = osk->sk_type;
 581	sk->sk_priority = osk->sk_priority;
 582	sk->sk_protocol = osk->sk_protocol;
 583	sk->sk_rcvbuf   = osk->sk_rcvbuf;
 584	sk->sk_sndbuf   = osk->sk_sndbuf;
 585	sk->sk_state    = TCP_ESTABLISHED;
 586	sock_copy_flags(sk, osk);
 587
 588	init_timer(&rose->timer);
 589	init_timer(&rose->idletimer);
 590
 591	orose		= rose_sk(osk);
 592	rose->t1	= orose->t1;
 593	rose->t2	= orose->t2;
 594	rose->t3	= orose->t3;
 595	rose->hb	= orose->hb;
 596	rose->idle	= orose->idle;
 597	rose->defer	= orose->defer;
 598	rose->device	= orose->device;
 
 
 599	rose->qbitincl	= orose->qbitincl;
 600
 601	return sk;
 602}
 603
 604static int rose_release(struct socket *sock)
 605{
 606	struct sock *sk = sock->sk;
 607	struct rose_sock *rose;
 608
 609	if (sk == NULL) return 0;
 610
 611	sock_hold(sk);
 612	sock_orphan(sk);
 613	lock_sock(sk);
 614	rose = rose_sk(sk);
 615
 616	switch (rose->state) {
 617	case ROSE_STATE_0:
 618		release_sock(sk);
 619		rose_disconnect(sk, 0, -1, -1);
 620		lock_sock(sk);
 621		rose_destroy_socket(sk);
 622		break;
 623
 624	case ROSE_STATE_2:
 625		rose->neighbour->use--;
 626		release_sock(sk);
 627		rose_disconnect(sk, 0, -1, -1);
 628		lock_sock(sk);
 629		rose_destroy_socket(sk);
 630		break;
 631
 632	case ROSE_STATE_1:
 633	case ROSE_STATE_3:
 634	case ROSE_STATE_4:
 635	case ROSE_STATE_5:
 636		rose_clear_queues(sk);
 637		rose_stop_idletimer(sk);
 638		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
 639		rose_start_t3timer(sk);
 640		rose->state  = ROSE_STATE_2;
 641		sk->sk_state    = TCP_CLOSE;
 642		sk->sk_shutdown |= SEND_SHUTDOWN;
 643		sk->sk_state_change(sk);
 644		sock_set_flag(sk, SOCK_DEAD);
 645		sock_set_flag(sk, SOCK_DESTROY);
 646		break;
 647
 648	default:
 649		break;
 650	}
 651
 
 
 
 
 652	sock->sk = NULL;
 653	release_sock(sk);
 654	sock_put(sk);
 655
 656	return 0;
 657}
 658
 659static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 660{
 661	struct sock *sk = sock->sk;
 662	struct rose_sock *rose = rose_sk(sk);
 663	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 664	struct net_device *dev;
 665	ax25_address *source;
 666	ax25_uid_assoc *user;
 
 667	int n;
 668
 669	if (!sock_flag(sk, SOCK_ZAPPED))
 670		return -EINVAL;
 671
 672	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 673		return -EINVAL;
 674
 675	if (addr->srose_family != AF_ROSE)
 676		return -EINVAL;
 677
 678	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 679		return -EINVAL;
 680
 681	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 682		return -EINVAL;
 683
 684	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
 685		return -EADDRNOTAVAIL;
 
 
 
 
 
 
 
 686
 687	source = &addr->srose_call;
 688
 689	user = ax25_findbyuid(current_euid());
 690	if (user) {
 691		rose->source_call = user->call;
 692		ax25_uid_put(user);
 693	} else {
 694		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
 695			return -EACCES;
 
 
 
 696		rose->source_call   = *source;
 697	}
 698
 699	rose->source_addr   = addr->srose_addr;
 700	rose->device        = dev;
 
 701	rose->source_ndigis = addr->srose_ndigis;
 702
 703	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 704		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 705		for (n = 0 ; n < addr->srose_ndigis ; n++)
 706			rose->source_digis[n] = full_addr->srose_digis[n];
 707	} else {
 708		if (rose->source_ndigis == 1) {
 709			rose->source_digis[0] = addr->srose_digi;
 710		}
 711	}
 712
 713	rose_insert_socket(sk);
 714
 715	sock_reset_flag(sk, SOCK_ZAPPED);
 716
 717	return 0;
 
 
 718}
 719
 720static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
 721{
 722	struct sock *sk = sock->sk;
 723	struct rose_sock *rose = rose_sk(sk);
 724	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
 725	unsigned char cause, diagnostic;
 726	struct net_device *dev;
 727	ax25_uid_assoc *user;
 728	int n, err = 0;
 729
 730	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
 731		return -EINVAL;
 732
 733	if (addr->srose_family != AF_ROSE)
 734		return -EINVAL;
 735
 736	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
 737		return -EINVAL;
 738
 739	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
 740		return -EINVAL;
 741
 742	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
 743	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
 744		return -EINVAL;
 745
 746	lock_sock(sk);
 747
 748	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
 749		/* Connect completed during a ERESTARTSYS event */
 750		sock->state = SS_CONNECTED;
 751		goto out_release;
 752	}
 753
 754	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
 755		sock->state = SS_UNCONNECTED;
 756		err = -ECONNREFUSED;
 757		goto out_release;
 758	}
 759
 760	if (sk->sk_state == TCP_ESTABLISHED) {
 761		/* No reconnect on a seqpacket socket */
 762		err = -EISCONN;
 763		goto out_release;
 764	}
 765
 766	sk->sk_state   = TCP_CLOSE;
 767	sock->state = SS_UNCONNECTED;
 768
 769	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
 770					 &diagnostic, 0);
 771	if (!rose->neighbour) {
 772		err = -ENETUNREACH;
 773		goto out_release;
 774	}
 775
 776	rose->lci = rose_new_lci(rose->neighbour);
 777	if (!rose->lci) {
 778		err = -ENETUNREACH;
 779		goto out_release;
 780	}
 781
 782	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
 
 
 783		sock_reset_flag(sk, SOCK_ZAPPED);
 784
 785		if ((dev = rose_dev_first()) == NULL) {
 
 786			err = -ENETUNREACH;
 787			goto out_release;
 788		}
 789
 790		user = ax25_findbyuid(current_euid());
 791		if (!user) {
 792			err = -EINVAL;
 
 793			goto out_release;
 794		}
 795
 796		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
 797		rose->source_call = user->call;
 798		rose->device      = dev;
 
 
 799		ax25_uid_put(user);
 800
 801		rose_insert_socket(sk);		/* Finish the bind */
 802	}
 803	rose->dest_addr   = addr->srose_addr;
 804	rose->dest_call   = addr->srose_call;
 805	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
 806	rose->dest_ndigis = addr->srose_ndigis;
 807
 808	if (addr_len == sizeof(struct full_sockaddr_rose)) {
 809		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
 810		for (n = 0 ; n < addr->srose_ndigis ; n++)
 811			rose->dest_digis[n] = full_addr->srose_digis[n];
 812	} else {
 813		if (rose->dest_ndigis == 1) {
 814			rose->dest_digis[0] = addr->srose_digi;
 815		}
 816	}
 817
 818	/* Move to connecting socket, start sending Connect Requests */
 819	sock->state   = SS_CONNECTING;
 820	sk->sk_state     = TCP_SYN_SENT;
 821
 822	rose->state = ROSE_STATE_1;
 823
 824	rose->neighbour->use++;
 825
 826	rose_write_internal(sk, ROSE_CALL_REQUEST);
 827	rose_start_heartbeat(sk);
 828	rose_start_t1timer(sk);
 829
 830	/* Now the loop */
 831	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
 832		err = -EINPROGRESS;
 833		goto out_release;
 834	}
 835
 836	/*
 837	 * A Connect Ack with Choke or timeout or failed routing will go to
 838	 * closed.
 839	 */
 840	if (sk->sk_state == TCP_SYN_SENT) {
 841		DEFINE_WAIT(wait);
 842
 843		for (;;) {
 844			prepare_to_wait(sk_sleep(sk), &wait,
 845					TASK_INTERRUPTIBLE);
 846			if (sk->sk_state != TCP_SYN_SENT)
 847				break;
 848			if (!signal_pending(current)) {
 849				release_sock(sk);
 850				schedule();
 851				lock_sock(sk);
 852				continue;
 853			}
 854			err = -ERESTARTSYS;
 855			break;
 856		}
 857		finish_wait(sk_sleep(sk), &wait);
 858
 859		if (err)
 860			goto out_release;
 861	}
 862
 863	if (sk->sk_state != TCP_ESTABLISHED) {
 864		sock->state = SS_UNCONNECTED;
 865		err = sock_error(sk);	/* Always set at this point */
 866		goto out_release;
 867	}
 868
 869	sock->state = SS_CONNECTED;
 870
 871out_release:
 872	release_sock(sk);
 873
 874	return err;
 875}
 876
 877static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
 
 878{
 879	struct sk_buff *skb;
 880	struct sock *newsk;
 881	DEFINE_WAIT(wait);
 882	struct sock *sk;
 883	int err = 0;
 884
 885	if ((sk = sock->sk) == NULL)
 886		return -EINVAL;
 887
 888	lock_sock(sk);
 889	if (sk->sk_type != SOCK_SEQPACKET) {
 890		err = -EOPNOTSUPP;
 891		goto out_release;
 892	}
 893
 894	if (sk->sk_state != TCP_LISTEN) {
 895		err = -EINVAL;
 896		goto out_release;
 897	}
 898
 899	/*
 900	 *	The write queue this time is holding sockets ready to use
 901	 *	hooked into the SABM we saved
 902	 */
 903	for (;;) {
 904		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 905
 906		skb = skb_dequeue(&sk->sk_receive_queue);
 907		if (skb)
 908			break;
 909
 910		if (flags & O_NONBLOCK) {
 911			err = -EWOULDBLOCK;
 912			break;
 913		}
 914		if (!signal_pending(current)) {
 915			release_sock(sk);
 916			schedule();
 917			lock_sock(sk);
 918			continue;
 919		}
 920		err = -ERESTARTSYS;
 921		break;
 922	}
 923	finish_wait(sk_sleep(sk), &wait);
 924	if (err)
 925		goto out_release;
 926
 927	newsk = skb->sk;
 928	sock_graft(newsk, newsock);
 929
 930	/* Now attach up the new socket */
 931	skb->sk = NULL;
 932	kfree_skb(skb);
 933	sk->sk_ack_backlog--;
 934
 935out_release:
 936	release_sock(sk);
 937
 938	return err;
 939}
 940
 941static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
 942	int *uaddr_len, int peer)
 943{
 944	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
 945	struct sock *sk = sock->sk;
 946	struct rose_sock *rose = rose_sk(sk);
 947	int n;
 948
 949	memset(srose, 0, sizeof(*srose));
 950	if (peer != 0) {
 951		if (sk->sk_state != TCP_ESTABLISHED)
 952			return -ENOTCONN;
 953		srose->srose_family = AF_ROSE;
 954		srose->srose_addr   = rose->dest_addr;
 955		srose->srose_call   = rose->dest_call;
 956		srose->srose_ndigis = rose->dest_ndigis;
 957		for (n = 0; n < rose->dest_ndigis; n++)
 958			srose->srose_digis[n] = rose->dest_digis[n];
 959	} else {
 960		srose->srose_family = AF_ROSE;
 961		srose->srose_addr   = rose->source_addr;
 962		srose->srose_call   = rose->source_call;
 963		srose->srose_ndigis = rose->source_ndigis;
 964		for (n = 0; n < rose->source_ndigis; n++)
 965			srose->srose_digis[n] = rose->source_digis[n];
 966	}
 967
 968	*uaddr_len = sizeof(struct full_sockaddr_rose);
 969	return 0;
 970}
 971
 972int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
 973{
 974	struct sock *sk;
 975	struct sock *make;
 976	struct rose_sock *make_rose;
 977	struct rose_facilities_struct facilities;
 978	int n;
 979
 980	skb->sk = NULL;		/* Initially we don't know who it's for */
 981
 982	/*
 983	 *	skb->data points to the rose frame start
 984	 */
 985	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
 986
 987	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
 988				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
 989				   &facilities)) {
 990		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
 991		return 0;
 992	}
 993
 994	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
 995
 996	/*
 997	 * We can't accept the Call Request.
 998	 */
 999	if (sk == NULL || sk_acceptq_is_full(sk) ||
1000	    (make = rose_make_new(sk)) == NULL) {
1001		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1002		return 0;
1003	}
1004
1005	skb->sk     = make;
1006	make->sk_state = TCP_ESTABLISHED;
1007	make_rose = rose_sk(make);
1008
1009	make_rose->lci           = lci;
1010	make_rose->dest_addr     = facilities.dest_addr;
1011	make_rose->dest_call     = facilities.dest_call;
1012	make_rose->dest_ndigis   = facilities.dest_ndigis;
1013	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1014		make_rose->dest_digis[n] = facilities.dest_digis[n];
1015	make_rose->source_addr   = facilities.source_addr;
1016	make_rose->source_call   = facilities.source_call;
1017	make_rose->source_ndigis = facilities.source_ndigis;
1018	for (n = 0 ; n < facilities.source_ndigis ; n++)
1019		make_rose->source_digis[n]= facilities.source_digis[n];
1020	make_rose->neighbour     = neigh;
1021	make_rose->device        = dev;
 
 
 
1022	make_rose->facilities    = facilities;
1023
1024	make_rose->neighbour->use++;
1025
1026	if (rose_sk(sk)->defer) {
1027		make_rose->state = ROSE_STATE_5;
1028	} else {
1029		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1030		make_rose->state = ROSE_STATE_3;
1031		rose_start_idletimer(make);
1032	}
1033
1034	make_rose->condition = 0x00;
1035	make_rose->vs        = 0;
1036	make_rose->va        = 0;
1037	make_rose->vr        = 0;
1038	make_rose->vl        = 0;
1039	sk->sk_ack_backlog++;
1040
1041	rose_insert_socket(make);
1042
1043	skb_queue_head(&sk->sk_receive_queue, skb);
1044
1045	rose_start_heartbeat(make);
1046
1047	if (!sock_flag(sk, SOCK_DEAD))
1048		sk->sk_data_ready(sk, skb->len);
1049
1050	return 1;
1051}
1052
1053static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1054			struct msghdr *msg, size_t len)
1055{
1056	struct sock *sk = sock->sk;
1057	struct rose_sock *rose = rose_sk(sk);
1058	struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
1059	int err;
1060	struct full_sockaddr_rose srose;
1061	struct sk_buff *skb;
1062	unsigned char *asmptr;
1063	int n, size, qbit = 0;
1064
1065	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1066		return -EINVAL;
1067
1068	if (sock_flag(sk, SOCK_ZAPPED))
1069		return -EADDRNOTAVAIL;
1070
1071	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1072		send_sig(SIGPIPE, current, 0);
1073		return -EPIPE;
1074	}
1075
1076	if (rose->neighbour == NULL || rose->device == NULL)
1077		return -ENETUNREACH;
1078
1079	if (usrose != NULL) {
1080		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1081			return -EINVAL;
1082		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1083		memcpy(&srose, usrose, msg->msg_namelen);
1084		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1085		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1086			return -EISCONN;
1087		if (srose.srose_ndigis != rose->dest_ndigis)
1088			return -EISCONN;
1089		if (srose.srose_ndigis == rose->dest_ndigis) {
1090			for (n = 0 ; n < srose.srose_ndigis ; n++)
1091				if (ax25cmp(&rose->dest_digis[n],
1092					    &srose.srose_digis[n]))
1093					return -EISCONN;
1094		}
1095		if (srose.srose_family != AF_ROSE)
1096			return -EINVAL;
1097	} else {
1098		if (sk->sk_state != TCP_ESTABLISHED)
1099			return -ENOTCONN;
1100
1101		srose.srose_family = AF_ROSE;
1102		srose.srose_addr   = rose->dest_addr;
1103		srose.srose_call   = rose->dest_call;
1104		srose.srose_ndigis = rose->dest_ndigis;
1105		for (n = 0 ; n < rose->dest_ndigis ; n++)
1106			srose.srose_digis[n] = rose->dest_digis[n];
1107	}
1108
1109	/* Build a packet */
1110	/* Sanity check the packet size */
1111	if (len > 65535)
1112		return -EMSGSIZE;
1113
1114	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1115
1116	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1117		return err;
1118
1119	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1120
1121	/*
1122	 *	Put the data on the end
1123	 */
1124
1125	skb_reset_transport_header(skb);
1126	skb_put(skb, len);
1127
1128	err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1129	if (err) {
1130		kfree_skb(skb);
1131		return err;
1132	}
1133
1134	/*
1135	 *	If the Q BIT Include socket option is in force, the first
1136	 *	byte of the user data is the logical value of the Q Bit.
1137	 */
1138	if (rose->qbitincl) {
1139		qbit = skb->data[0];
1140		skb_pull(skb, 1);
1141	}
1142
1143	/*
1144	 *	Push down the ROSE header
1145	 */
1146	asmptr = skb_push(skb, ROSE_MIN_LEN);
1147
1148	/* Build a ROSE Network header */
1149	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1150	asmptr[1] = (rose->lci >> 0) & 0xFF;
1151	asmptr[2] = ROSE_DATA;
1152
1153	if (qbit)
1154		asmptr[0] |= ROSE_Q_BIT;
1155
1156	if (sk->sk_state != TCP_ESTABLISHED) {
1157		kfree_skb(skb);
1158		return -ENOTCONN;
1159	}
1160
1161#ifdef M_BIT
1162#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1163	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1164		unsigned char header[ROSE_MIN_LEN];
1165		struct sk_buff *skbn;
1166		int frontlen;
1167		int lg;
1168
1169		/* Save a copy of the Header */
1170		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1171		skb_pull(skb, ROSE_MIN_LEN);
1172
1173		frontlen = skb_headroom(skb);
1174
1175		while (skb->len > 0) {
1176			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1177				kfree_skb(skb);
1178				return err;
1179			}
1180
1181			skbn->sk   = sk;
1182			skbn->free = 1;
1183			skbn->arp  = 1;
1184
1185			skb_reserve(skbn, frontlen);
1186
1187			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1188
1189			/* Copy the user data */
1190			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1191			skb_pull(skb, lg);
1192
1193			/* Duplicate the Header */
1194			skb_push(skbn, ROSE_MIN_LEN);
1195			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1196
1197			if (skb->len > 0)
1198				skbn->data[2] |= M_BIT;
1199
1200			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1201		}
1202
1203		skb->free = 1;
1204		kfree_skb(skb);
1205	} else {
1206		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1207	}
1208#else
1209	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1210#endif
1211
1212	rose_kick(sk);
1213
1214	return len;
1215}
1216
1217
1218static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1219			struct msghdr *msg, size_t size, int flags)
1220{
1221	struct sock *sk = sock->sk;
1222	struct rose_sock *rose = rose_sk(sk);
1223	struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1224	size_t copied;
1225	unsigned char *asmptr;
1226	struct sk_buff *skb;
1227	int n, er, qbit;
1228
1229	/*
1230	 * This works for seqpacket too. The receiver has ordered the queue for
1231	 * us! We do one quick check first though
1232	 */
1233	if (sk->sk_state != TCP_ESTABLISHED)
1234		return -ENOTCONN;
1235
1236	/* Now we can treat all alike */
1237	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
 
1238		return er;
1239
1240	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1241
1242	skb_pull(skb, ROSE_MIN_LEN);
1243
1244	if (rose->qbitincl) {
1245		asmptr  = skb_push(skb, 1);
1246		*asmptr = qbit;
1247	}
1248
1249	skb_reset_transport_header(skb);
1250	copied     = skb->len;
1251
1252	if (copied > size) {
1253		copied = size;
1254		msg->msg_flags |= MSG_TRUNC;
1255	}
1256
1257	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1258
1259	if (srose != NULL) {
 
 
 
 
 
 
1260		srose->srose_family = AF_ROSE;
1261		srose->srose_addr   = rose->dest_addr;
1262		srose->srose_call   = rose->dest_call;
1263		srose->srose_ndigis = rose->dest_ndigis;
1264		if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1265			struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1266			for (n = 0 ; n < rose->dest_ndigis ; n++)
1267				full_srose->srose_digis[n] = rose->dest_digis[n];
1268			msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1269		} else {
1270			if (rose->dest_ndigis >= 1) {
1271				srose->srose_ndigis = 1;
1272				srose->srose_digi = rose->dest_digis[0];
1273			}
1274			msg->msg_namelen = sizeof(struct sockaddr_rose);
1275		}
1276	}
1277
1278	skb_free_datagram(sk, skb);
1279
1280	return copied;
1281}
1282
1283
1284static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1285{
1286	struct sock *sk = sock->sk;
1287	struct rose_sock *rose = rose_sk(sk);
1288	void __user *argp = (void __user *)arg;
1289
1290	switch (cmd) {
1291	case TIOCOUTQ: {
1292		long amount;
1293
1294		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1295		if (amount < 0)
1296			amount = 0;
1297		return put_user(amount, (unsigned int __user *) argp);
1298	}
1299
1300	case TIOCINQ: {
1301		struct sk_buff *skb;
1302		long amount = 0L;
1303		/* These two are safe on a single CPU system as only user tasks fiddle here */
 
1304		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1305			amount = skb->len;
 
1306		return put_user(amount, (unsigned int __user *) argp);
1307	}
1308
1309	case SIOCGSTAMP:
1310		return sock_get_timestamp(sk, (struct timeval __user *) argp);
1311
1312	case SIOCGSTAMPNS:
1313		return sock_get_timestampns(sk, (struct timespec __user *) argp);
1314
1315	case SIOCGIFADDR:
1316	case SIOCSIFADDR:
1317	case SIOCGIFDSTADDR:
1318	case SIOCSIFDSTADDR:
1319	case SIOCGIFBRDADDR:
1320	case SIOCSIFBRDADDR:
1321	case SIOCGIFNETMASK:
1322	case SIOCSIFNETMASK:
1323	case SIOCGIFMETRIC:
1324	case SIOCSIFMETRIC:
1325		return -EINVAL;
1326
1327	case SIOCADDRT:
1328	case SIOCDELRT:
1329	case SIOCRSCLRRT:
1330		if (!capable(CAP_NET_ADMIN))
1331			return -EPERM;
1332		return rose_rt_ioctl(cmd, argp);
1333
1334	case SIOCRSGCAUSE: {
1335		struct rose_cause_struct rose_cause;
1336		rose_cause.cause      = rose->cause;
1337		rose_cause.diagnostic = rose->diagnostic;
1338		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1339	}
1340
1341	case SIOCRSSCAUSE: {
1342		struct rose_cause_struct rose_cause;
1343		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1344			return -EFAULT;
1345		rose->cause      = rose_cause.cause;
1346		rose->diagnostic = rose_cause.diagnostic;
1347		return 0;
1348	}
1349
1350	case SIOCRSSL2CALL:
1351		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1352		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1353			ax25_listen_release(&rose_callsign, NULL);
1354		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1355			return -EFAULT;
1356		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1357			return ax25_listen_register(&rose_callsign, NULL);
1358
1359		return 0;
1360
1361	case SIOCRSGL2CALL:
1362		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1363
1364	case SIOCRSACCEPT:
1365		if (rose->state == ROSE_STATE_5) {
1366			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1367			rose_start_idletimer(sk);
1368			rose->condition = 0x00;
1369			rose->vs        = 0;
1370			rose->va        = 0;
1371			rose->vr        = 0;
1372			rose->vl        = 0;
1373			rose->state     = ROSE_STATE_3;
1374		}
1375		return 0;
1376
1377	default:
1378		return -ENOIOCTLCMD;
1379	}
1380
1381	return 0;
1382}
1383
1384#ifdef CONFIG_PROC_FS
1385static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1386	__acquires(rose_list_lock)
1387{
1388	spin_lock_bh(&rose_list_lock);
1389	return seq_hlist_start_head(&rose_list, *pos);
1390}
1391
1392static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1393{
1394	return seq_hlist_next(v, &rose_list, pos);
1395}
1396
1397static void rose_info_stop(struct seq_file *seq, void *v)
1398	__releases(rose_list_lock)
1399{
1400	spin_unlock_bh(&rose_list_lock);
1401}
1402
1403static int rose_info_show(struct seq_file *seq, void *v)
1404{
1405	char buf[11], rsbuf[11];
1406
1407	if (v == SEQ_START_TOKEN)
1408		seq_puts(seq,
1409			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1410
1411	else {
1412		struct sock *s = sk_entry(v);
1413		struct rose_sock *rose = rose_sk(s);
1414		const char *devname, *callsign;
1415		const struct net_device *dev = rose->device;
1416
1417		if (!dev)
1418			devname = "???";
1419		else
1420			devname = dev->name;
1421
1422		seq_printf(seq, "%-10s %-9s ",
1423			   rose2asc(rsbuf, &rose->dest_addr),
1424			   ax2asc(buf, &rose->dest_call));
1425
1426		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1427			callsign = "??????-?";
1428		else
1429			callsign = ax2asc(buf, &rose->source_call);
1430
1431		seq_printf(seq,
1432			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1433			rose2asc(rsbuf, &rose->source_addr),
1434			callsign,
1435			devname,
1436			rose->lci & 0x0FFF,
1437			(rose->neighbour) ? rose->neighbour->number : 0,
1438			rose->state,
1439			rose->vs,
1440			rose->vr,
1441			rose->va,
1442			ax25_display_timer(&rose->timer) / HZ,
1443			rose->t1 / HZ,
1444			rose->t2 / HZ,
1445			rose->t3 / HZ,
1446			rose->hb / HZ,
1447			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1448			rose->idle / (60 * HZ),
1449			sk_wmem_alloc_get(s),
1450			sk_rmem_alloc_get(s),
1451			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1452	}
1453
1454	return 0;
1455}
1456
1457static const struct seq_operations rose_info_seqops = {
1458	.start = rose_info_start,
1459	.next = rose_info_next,
1460	.stop = rose_info_stop,
1461	.show = rose_info_show,
1462};
1463
1464static int rose_info_open(struct inode *inode, struct file *file)
1465{
1466	return seq_open(file, &rose_info_seqops);
1467}
1468
1469static const struct file_operations rose_info_fops = {
1470	.owner = THIS_MODULE,
1471	.open = rose_info_open,
1472	.read = seq_read,
1473	.llseek = seq_lseek,
1474	.release = seq_release,
1475};
1476#endif	/* CONFIG_PROC_FS */
1477
1478static const struct net_proto_family rose_family_ops = {
1479	.family		=	PF_ROSE,
1480	.create		=	rose_create,
1481	.owner		=	THIS_MODULE,
1482};
1483
1484static const struct proto_ops rose_proto_ops = {
1485	.family		=	PF_ROSE,
1486	.owner		=	THIS_MODULE,
1487	.release	=	rose_release,
1488	.bind		=	rose_bind,
1489	.connect	=	rose_connect,
1490	.socketpair	=	sock_no_socketpair,
1491	.accept		=	rose_accept,
1492	.getname	=	rose_getname,
1493	.poll		=	datagram_poll,
1494	.ioctl		=	rose_ioctl,
 
1495	.listen		=	rose_listen,
1496	.shutdown	=	sock_no_shutdown,
1497	.setsockopt	=	rose_setsockopt,
1498	.getsockopt	=	rose_getsockopt,
1499	.sendmsg	=	rose_sendmsg,
1500	.recvmsg	=	rose_recvmsg,
1501	.mmap		=	sock_no_mmap,
1502	.sendpage	=	sock_no_sendpage,
1503};
1504
1505static struct notifier_block rose_dev_notifier = {
1506	.notifier_call	=	rose_device_event,
1507};
1508
1509static struct net_device **dev_rose;
1510
1511static struct ax25_protocol rose_pid = {
1512	.pid	= AX25_P_ROSE,
1513	.func	= rose_route_frame
1514};
1515
1516static struct ax25_linkfail rose_linkfail_notifier = {
1517	.func	= rose_link_failed
1518};
1519
1520static int __init rose_proto_init(void)
1521{
1522	int i;
1523	int rc;
1524
1525	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1526		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1527		rc = -EINVAL;
1528		goto out;
1529	}
1530
1531	rc = proto_register(&rose_proto, 0);
1532	if (rc != 0)
1533		goto out;
1534
1535	rose_callsign = null_ax25_address;
1536
1537	dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
 
1538	if (dev_rose == NULL) {
1539		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1540		rc = -ENOMEM;
1541		goto out_proto_unregister;
1542	}
1543
1544	for (i = 0; i < rose_ndevs; i++) {
1545		struct net_device *dev;
1546		char name[IFNAMSIZ];
1547
1548		sprintf(name, "rose%d", i);
1549		dev = alloc_netdev(0, name, rose_setup);
1550		if (!dev) {
1551			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1552			rc = -ENOMEM;
1553			goto fail;
1554		}
1555		rc = register_netdev(dev);
1556		if (rc) {
1557			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1558			free_netdev(dev);
1559			goto fail;
1560		}
1561		rose_set_lockdep_key(dev);
1562		dev_rose[i] = dev;
1563	}
1564
1565	sock_register(&rose_family_ops);
1566	register_netdevice_notifier(&rose_dev_notifier);
1567
1568	ax25_register_pid(&rose_pid);
1569	ax25_linkfail_register(&rose_linkfail_notifier);
1570
1571#ifdef CONFIG_SYSCTL
1572	rose_register_sysctl();
1573#endif
1574	rose_loopback_init();
1575
1576	rose_add_loopback_neigh();
1577
1578	proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
1579	proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
1580	proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
1581	proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
 
 
 
1582out:
1583	return rc;
1584fail:
1585	while (--i >= 0) {
1586		unregister_netdev(dev_rose[i]);
1587		free_netdev(dev_rose[i]);
1588	}
1589	kfree(dev_rose);
1590out_proto_unregister:
1591	proto_unregister(&rose_proto);
1592	goto out;
1593}
1594module_init(rose_proto_init);
1595
1596module_param(rose_ndevs, int, 0);
1597MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1598
1599MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1600MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1601MODULE_LICENSE("GPL");
1602MODULE_ALIAS_NETPROTO(PF_ROSE);
1603
1604static void __exit rose_exit(void)
1605{
1606	int i;
1607
1608	proc_net_remove(&init_net, "rose");
1609	proc_net_remove(&init_net, "rose_neigh");
1610	proc_net_remove(&init_net, "rose_nodes");
1611	proc_net_remove(&init_net, "rose_routes");
1612	rose_loopback_clear();
1613
1614	rose_rt_free();
1615
1616	ax25_protocol_release(AX25_P_ROSE);
1617	ax25_linkfail_release(&rose_linkfail_notifier);
1618
1619	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1620		ax25_listen_release(&rose_callsign, NULL);
1621
1622#ifdef CONFIG_SYSCTL
1623	rose_unregister_sysctl();
1624#endif
1625	unregister_netdevice_notifier(&rose_dev_notifier);
1626
1627	sock_unregister(PF_ROSE);
1628
1629	for (i = 0; i < rose_ndevs; i++) {
1630		struct net_device *dev = dev_rose[i];
1631
1632		if (dev) {
1633			unregister_netdev(dev);
1634			free_netdev(dev);
1635		}
1636	}
1637
1638	kfree(dev_rose);
1639	proto_unregister(&rose_proto);
1640}
1641
1642module_exit(rose_exit);