Linux Audio

Check our new training course

Loading...
   1/*********************************************************************
   2 *
   3 * Filename:      irlap.c
   4 * Version:       1.0
   5 * Description:   IrLAP implementation for Linux
   6 * Status:        Stable
   7 * Author:        Dag Brattli <dagb@cs.uit.no>
   8 * Created at:    Mon Aug  4 20:40:53 1997
   9 * Modified at:   Tue Dec 14 09:26:44 1999
  10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
  11 *
  12 *     Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
  13 *     Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
  14 *
  15 *     This program is free software; you can redistribute it and/or
  16 *     modify it under the terms of the GNU General Public License as
  17 *     published by the Free Software Foundation; either version 2 of
  18 *     the License, or (at your option) any later version.
  19 *
  20 *     This program is distributed in the hope that it will be useful,
  21 *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23 *     GNU General Public License for more details.
  24 *
  25 *     You should have received a copy of the GNU General Public License
  26 *     along with this program; if not, write to the Free Software
  27 *     Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  28 *     MA 02111-1307 USA
  29 *
  30 ********************************************************************/
  31
  32#include <linux/slab.h>
  33#include <linux/string.h>
  34#include <linux/skbuff.h>
  35#include <linux/delay.h>
  36#include <linux/proc_fs.h>
  37#include <linux/init.h>
  38#include <linux/random.h>
  39#include <linux/module.h>
  40#include <linux/seq_file.h>
  41
  42#include <net/irda/irda.h>
  43#include <net/irda/irda_device.h>
  44#include <net/irda/irqueue.h>
  45#include <net/irda/irlmp.h>
  46#include <net/irda/irlmp_frame.h>
  47#include <net/irda/irlap_frame.h>
  48#include <net/irda/irlap.h>
  49#include <net/irda/timer.h>
  50#include <net/irda/qos.h>
  51
  52static hashbin_t *irlap = NULL;
  53int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
  54
  55/* This is the delay of missed pf period before generating an event
  56 * to the application. The spec mandate 3 seconds, but in some cases
  57 * it's way too long. - Jean II */
  58int sysctl_warn_noreply_time = 3;
  59
  60extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
  61static void __irlap_close(struct irlap_cb *self);
  62static void irlap_init_qos_capabilities(struct irlap_cb *self,
  63					struct qos_info *qos_user);
  64
  65#ifdef CONFIG_IRDA_DEBUG
  66static const char *const lap_reasons[] = {
  67	"ERROR, NOT USED",
  68	"LAP_DISC_INDICATION",
  69	"LAP_NO_RESPONSE",
  70	"LAP_RESET_INDICATION",
  71	"LAP_FOUND_NONE",
  72	"LAP_MEDIA_BUSY",
  73	"LAP_PRIMARY_CONFLICT",
  74	"ERROR, NOT USED",
  75};
  76#endif	/* CONFIG_IRDA_DEBUG */
  77
  78int __init irlap_init(void)
  79{
  80	/* Check if the compiler did its job properly.
  81	 * May happen on some ARM configuration, check with Russell King. */
  82	IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
  83	IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
  84	IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
  85	IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
  86
  87	/* Allocate master array */
  88	irlap = hashbin_new(HB_LOCK);
  89	if (irlap == NULL) {
  90		IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
  91			   __func__);
  92		return -ENOMEM;
  93	}
  94
  95	return 0;
  96}
  97
  98void irlap_cleanup(void)
  99{
 100	IRDA_ASSERT(irlap != NULL, return;);
 101
 102	hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
 103}
 104
 105/*
 106 * Function irlap_open (driver)
 107 *
 108 *    Initialize IrLAP layer
 109 *
 110 */
 111struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
 112			    const char *hw_name)
 113{
 114	struct irlap_cb *self;
 115
 116	IRDA_DEBUG(4, "%s()\n", __func__);
 117
 118	/* Initialize the irlap structure. */
 119	self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
 120	if (self == NULL)
 121		return NULL;
 122
 123	self->magic = LAP_MAGIC;
 124
 125	/* Make a binding between the layers */
 126	self->netdev = dev;
 127	self->qos_dev = qos;
 128	/* Copy hardware name */
 129	if(hw_name != NULL) {
 130		strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
 131	} else {
 132		self->hw_name[0] = '\0';
 133	}
 134
 135	/* FIXME: should we get our own field? */
 136	dev->atalk_ptr = self;
 137
 138	self->state = LAP_OFFLINE;
 139
 140	/* Initialize transmit queue */
 141	skb_queue_head_init(&self->txq);
 142	skb_queue_head_init(&self->txq_ultra);
 143	skb_queue_head_init(&self->wx_list);
 144
 145	/* My unique IrLAP device address! */
 146	/* We don't want the broadcast address, neither the NULL address
 147	 * (most often used to signify "invalid"), and we don't want an
 148	 * address already in use (otherwise connect won't be able
 149	 * to select the proper link). - Jean II */
 150	do {
 151		get_random_bytes(&self->saddr, sizeof(self->saddr));
 152	} while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
 153		 (hashbin_lock_find(irlap, self->saddr, NULL)) );
 154	/* Copy to the driver */
 155	memcpy(dev->dev_addr, &self->saddr, 4);
 156
 157	init_timer(&self->slot_timer);
 158	init_timer(&self->query_timer);
 159	init_timer(&self->discovery_timer);
 160	init_timer(&self->final_timer);
 161	init_timer(&self->poll_timer);
 162	init_timer(&self->wd_timer);
 163	init_timer(&self->backoff_timer);
 164	init_timer(&self->media_busy_timer);
 165
 166	irlap_apply_default_connection_parameters(self);
 167
 168	self->N3 = 3; /* # connections attempts to try before giving up */
 169
 170	self->state = LAP_NDM;
 171
 172	hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
 173
 174	irlmp_register_link(self, self->saddr, &self->notify);
 175
 176	return self;
 177}
 178EXPORT_SYMBOL(irlap_open);
 179
 180/*
 181 * Function __irlap_close (self)
 182 *
 183 *    Remove IrLAP and all allocated memory. Stop any pending timers.
 184 *
 185 */
 186static void __irlap_close(struct irlap_cb *self)
 187{
 188	IRDA_ASSERT(self != NULL, return;);
 189	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 190
 191	/* Stop timers */
 192	del_timer(&self->slot_timer);
 193	del_timer(&self->query_timer);
 194	del_timer(&self->discovery_timer);
 195	del_timer(&self->final_timer);
 196	del_timer(&self->poll_timer);
 197	del_timer(&self->wd_timer);
 198	del_timer(&self->backoff_timer);
 199	del_timer(&self->media_busy_timer);
 200
 201	irlap_flush_all_queues(self);
 202
 203	self->magic = 0;
 204
 205	kfree(self);
 206}
 207
 208/*
 209 * Function irlap_close (self)
 210 *
 211 *    Remove IrLAP instance
 212 *
 213 */
 214void irlap_close(struct irlap_cb *self)
 215{
 216	struct irlap_cb *lap;
 217
 218	IRDA_DEBUG(4, "%s()\n", __func__);
 219
 220	IRDA_ASSERT(self != NULL, return;);
 221	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 222
 223	/* We used to send a LAP_DISC_INDICATION here, but this was
 224	 * racy. This has been move within irlmp_unregister_link()
 225	 * itself. Jean II */
 226
 227	/* Kill the LAP and all LSAPs on top of it */
 228	irlmp_unregister_link(self->saddr);
 229	self->notify.instance = NULL;
 230
 231	/* Be sure that we manage to remove ourself from the hash */
 232	lap = hashbin_remove(irlap, self->saddr, NULL);
 233	if (!lap) {
 234		IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
 235		return;
 236	}
 237	__irlap_close(lap);
 238}
 239EXPORT_SYMBOL(irlap_close);
 240
 241/*
 242 * Function irlap_connect_indication (self, skb)
 243 *
 244 *    Another device is attempting to make a connection
 245 *
 246 */
 247void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
 248{
 249	IRDA_DEBUG(4, "%s()\n", __func__);
 250
 251	IRDA_ASSERT(self != NULL, return;);
 252	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 253
 254	irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
 255
 256	irlmp_link_connect_indication(self->notify.instance, self->saddr,
 257				      self->daddr, &self->qos_tx, skb);
 258}
 259
 260/*
 261 * Function irlap_connect_response (self, skb)
 262 *
 263 *    Service user has accepted incoming connection
 264 *
 265 */
 266void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
 267{
 268	IRDA_DEBUG(4, "%s()\n", __func__);
 269
 270	irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
 271}
 272
 273/*
 274 * Function irlap_connect_request (self, daddr, qos_user, sniff)
 275 *
 276 *    Request connection with another device, sniffing is not implemented
 277 *    yet.
 278 *
 279 */
 280void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
 281			   struct qos_info *qos_user, int sniff)
 282{
 283	IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
 284
 285	IRDA_ASSERT(self != NULL, return;);
 286	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 287
 288	self->daddr = daddr;
 289
 290	/*
 291	 *  If the service user specifies QoS values for this connection,
 292	 *  then use them
 293	 */
 294	irlap_init_qos_capabilities(self, qos_user);
 295
 296	if ((self->state == LAP_NDM) && !self->media_busy)
 297		irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
 298	else
 299		self->connect_pending = TRUE;
 300}
 301
 302/*
 303 * Function irlap_connect_confirm (self, skb)
 304 *
 305 *    Connection request has been accepted
 306 *
 307 */
 308void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
 309{
 310	IRDA_DEBUG(4, "%s()\n", __func__);
 311
 312	IRDA_ASSERT(self != NULL, return;);
 313	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 314
 315	irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
 316}
 317
 318/*
 319 * Function irlap_data_indication (self, skb)
 320 *
 321 *    Received data frames from IR-port, so we just pass them up to
 322 *    IrLMP for further processing
 323 *
 324 */
 325void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
 326			   int unreliable)
 327{
 328	/* Hide LAP header from IrLMP layer */
 329	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 330
 331	irlmp_link_data_indication(self->notify.instance, skb, unreliable);
 332}
 333
 334
 335/*
 336 * Function irlap_data_request (self, skb)
 337 *
 338 *    Queue data for transmission, must wait until XMIT state
 339 *
 340 */
 341void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
 342			int unreliable)
 343{
 344	IRDA_ASSERT(self != NULL, return;);
 345	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 346
 347	IRDA_DEBUG(3, "%s()\n", __func__);
 348
 349	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
 350		    return;);
 351	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 352
 353	/*
 354	 *  Must set frame format now so that the rest of the code knows
 355	 *  if its dealing with an I or an UI frame
 356	 */
 357	if (unreliable)
 358		skb->data[1] = UI_FRAME;
 359	else
 360		skb->data[1] = I_FRAME;
 361
 362	/* Don't forget to refcount it - see irlmp_connect_request(). */
 363	skb_get(skb);
 364
 365	/* Add at the end of the queue (keep ordering) - Jean II */
 366	skb_queue_tail(&self->txq, skb);
 367
 368	/*
 369	 *  Send event if this frame only if we are in the right state
 370	 *  FIXME: udata should be sent first! (skb_queue_head?)
 371	 */
 372	if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
 373		/* If we are not already processing the Tx queue, trigger
 374		 * transmission immediately - Jean II */
 375		if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
 376			irlap_do_event(self, DATA_REQUEST, skb, NULL);
 377		/* Otherwise, the packets will be sent normally at the
 378		 * next pf-poll - Jean II */
 379	}
 380}
 381
 382/*
 383 * Function irlap_unitdata_request (self, skb)
 384 *
 385 *    Send Ultra data. This is data that must be sent outside any connection
 386 *
 387 */
 388#ifdef CONFIG_IRDA_ULTRA
 389void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
 390{
 391	IRDA_ASSERT(self != NULL, return;);
 392	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 393
 394	IRDA_DEBUG(3, "%s()\n", __func__);
 395
 396	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
 397	       return;);
 398	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 399
 400	skb->data[0] = CBROADCAST;
 401	skb->data[1] = UI_FRAME;
 402
 403	/* Don't need to refcount, see irlmp_connless_data_request() */
 404
 405	skb_queue_tail(&self->txq_ultra, skb);
 406
 407	irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
 408}
 409#endif /*CONFIG_IRDA_ULTRA */
 410
 411/*
 412 * Function irlap_udata_indication (self, skb)
 413 *
 414 *    Receive Ultra data. This is data that is received outside any connection
 415 *
 416 */
 417#ifdef CONFIG_IRDA_ULTRA
 418void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
 419{
 420	IRDA_DEBUG(1, "%s()\n", __func__);
 421
 422	IRDA_ASSERT(self != NULL, return;);
 423	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 424	IRDA_ASSERT(skb != NULL, return;);
 425
 426	/* Hide LAP header from IrLMP layer */
 427	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 428
 429	irlmp_link_unitdata_indication(self->notify.instance, skb);
 430}
 431#endif /* CONFIG_IRDA_ULTRA */
 432
 433/*
 434 * Function irlap_disconnect_request (void)
 435 *
 436 *    Request to disconnect connection by service user
 437 */
 438void irlap_disconnect_request(struct irlap_cb *self)
 439{
 440	IRDA_DEBUG(3, "%s()\n", __func__);
 441
 442	IRDA_ASSERT(self != NULL, return;);
 443	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 444
 445	/* Don't disconnect until all data frames are successfully sent */
 446	if (!skb_queue_empty(&self->txq)) {
 447		self->disconnect_pending = TRUE;
 448		return;
 449	}
 450
 451	/* Check if we are in the right state for disconnecting */
 452	switch (self->state) {
 453	case LAP_XMIT_P:        /* FALLTHROUGH */
 454	case LAP_XMIT_S:        /* FALLTHROUGH */
 455	case LAP_CONN:          /* FALLTHROUGH */
 456	case LAP_RESET_WAIT:    /* FALLTHROUGH */
 457	case LAP_RESET_CHECK:
 458		irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
 459		break;
 460	default:
 461		IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
 462		self->disconnect_pending = TRUE;
 463		break;
 464	}
 465}
 466
 467/*
 468 * Function irlap_disconnect_indication (void)
 469 *
 470 *    Disconnect request from other device
 471 *
 472 */
 473void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
 474{
 475	IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
 476
 477	IRDA_ASSERT(self != NULL, return;);
 478	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 479
 480	/* Flush queues */
 481	irlap_flush_all_queues(self);
 482
 483	switch (reason) {
 484	case LAP_RESET_INDICATION:
 485		IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
 486		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
 487		break;
 488	case LAP_NO_RESPONSE:	   /* FALLTHROUGH */
 489	case LAP_DISC_INDICATION:  /* FALLTHROUGH */
 490	case LAP_FOUND_NONE:       /* FALLTHROUGH */
 491	case LAP_MEDIA_BUSY:
 492		irlmp_link_disconnect_indication(self->notify.instance, self,
 493						 reason, NULL);
 494		break;
 495	default:
 496		IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
 497	}
 498}
 499
 500/*
 501 * Function irlap_discovery_request (gen_addr_bit)
 502 *
 503 *    Start one single discovery operation.
 504 *
 505 */
 506void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
 507{
 508	struct irlap_info info;
 509
 510	IRDA_ASSERT(self != NULL, return;);
 511	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 512	IRDA_ASSERT(discovery != NULL, return;);
 513
 514	IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
 515
 516	IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
 517		    (discovery->nslots == 8) || (discovery->nslots == 16),
 518		    return;);
 519
 520	/* Discovery is only possible in NDM mode */
 521	if (self->state != LAP_NDM) {
 522		IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
 523			   __func__);
 524		irlap_discovery_confirm(self, NULL);
 525		/* Note : in theory, if we are not in NDM, we could postpone
 526		 * the discovery like we do for connection request.
 527		 * In practice, it's not worth it. If the media was busy,
 528		 * it's likely next time around it won't be busy. If we are
 529		 * in REPLY state, we will get passive discovery info & event.
 530		 * Jean II */
 531		return;
 532	}
 533
 534	/* Check if last discovery request finished in time, or if
 535	 * it was aborted due to the media busy flag. */
 536	if (self->discovery_log != NULL) {
 537		hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
 538		self->discovery_log = NULL;
 539	}
 540
 541	/* All operations will occur at predictable time, no need to lock */
 542	self->discovery_log = hashbin_new(HB_NOLOCK);
 543
 544	if (self->discovery_log == NULL) {
 545		IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
 546			     __func__);
 547		return;
 548	}
 549
 550	info.S = discovery->nslots; /* Number of slots */
 551	info.s = 0; /* Current slot */
 552
 553	self->discovery_cmd = discovery;
 554	info.discovery = discovery;
 555
 556	/* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
 557	self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
 558
 559	irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
 560}
 561
 562/*
 563 * Function irlap_discovery_confirm (log)
 564 *
 565 *    A device has been discovered in front of this station, we
 566 *    report directly to LMP.
 567 */
 568void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
 569{
 570	IRDA_ASSERT(self != NULL, return;);
 571	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 572
 573	IRDA_ASSERT(self->notify.instance != NULL, return;);
 574
 575	/*
 576	 * Check for successful discovery, since we are then allowed to clear
 577	 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
 578	 * us to make connection attempts much faster and easier (i.e. no
 579	 * collisions).
 580	 * Setting media busy to false will also generate an event allowing
 581	 * to process pending events in NDM state machine.
 582	 * Note : the spec doesn't define what's a successful discovery is.
 583	 * If we want Ultra to work, it's successful even if there is
 584	 * nobody discovered - Jean II
 585	 */
 586	if (discovery_log)
 587		irda_device_set_media_busy(self->netdev, FALSE);
 588
 589	/* Inform IrLMP */
 590	irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
 591}
 592
 593/*
 594 * Function irlap_discovery_indication (log)
 595 *
 596 *    Somebody is trying to discover us!
 597 *
 598 */
 599void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
 600{
 601	IRDA_DEBUG(4, "%s()\n", __func__);
 602
 603	IRDA_ASSERT(self != NULL, return;);
 604	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 605	IRDA_ASSERT(discovery != NULL, return;);
 606
 607	IRDA_ASSERT(self->notify.instance != NULL, return;);
 608
 609	/* A device is very likely to connect immediately after it performs
 610	 * a successful discovery. This means that in our case, we are much
 611	 * more likely to receive a connection request over the medium.
 612	 * So, we backoff to avoid collisions.
 613	 * IrLAP spec 6.13.4 suggest 100ms...
 614	 * Note : this little trick actually make a *BIG* difference. If I set
 615	 * my Linux box with discovery enabled and one Ultra frame sent every
 616	 * second, my Palm has no trouble connecting to it every time !
 617	 * Jean II */
 618	irda_device_set_media_busy(self->netdev, SMALL);
 619
 620	irlmp_link_discovery_indication(self->notify.instance, discovery);
 621}
 622
 623/*
 624 * Function irlap_status_indication (quality_of_link)
 625 */
 626void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
 627{
 628	switch (quality_of_link) {
 629	case STATUS_NO_ACTIVITY:
 630		IRDA_MESSAGE("IrLAP, no activity on link!\n");
 631		break;
 632	case STATUS_NOISY:
 633		IRDA_MESSAGE("IrLAP, noisy link!\n");
 634		break;
 635	default:
 636		break;
 637	}
 638	irlmp_status_indication(self->notify.instance,
 639				quality_of_link, LOCK_NO_CHANGE);
 640}
 641
 642/*
 643 * Function irlap_reset_indication (void)
 644 */
 645void irlap_reset_indication(struct irlap_cb *self)
 646{
 647	IRDA_DEBUG(1, "%s()\n", __func__);
 648
 649	IRDA_ASSERT(self != NULL, return;);
 650	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 651
 652	if (self->state == LAP_RESET_WAIT)
 653		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
 654	else
 655		irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
 656}
 657
 658/*
 659 * Function irlap_reset_confirm (void)
 660 */
 661void irlap_reset_confirm(void)
 662{
 663	IRDA_DEBUG(1, "%s()\n", __func__);
 664}
 665
 666/*
 667 * Function irlap_generate_rand_time_slot (S, s)
 668 *
 669 *    Generate a random time slot between s and S-1 where
 670 *    S = Number of slots (0 -> S-1)
 671 *    s = Current slot
 672 */
 673int irlap_generate_rand_time_slot(int S, int s)
 674{
 675	static int rand;
 676	int slot;
 677
 678	IRDA_ASSERT((S - s) > 0, return 0;);
 679
 680	rand += jiffies;
 681	rand ^= (rand << 12);
 682	rand ^= (rand >> 20);
 683
 684	slot = s + rand % (S-s);
 685
 686	IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
 687
 688	return slot;
 689}
 690
 691/*
 692 * Function irlap_update_nr_received (nr)
 693 *
 694 *    Remove all acknowledged frames in current window queue. This code is
 695 *    not intuitive and you should not try to change it. If you think it
 696 *    contains bugs, please mail a patch to the author instead.
 697 */
 698void irlap_update_nr_received(struct irlap_cb *self, int nr)
 699{
 700	struct sk_buff *skb = NULL;
 701	int count = 0;
 702
 703	/*
 704	 * Remove all the ack-ed frames from the window queue.
 705	 */
 706
 707	/*
 708	 *  Optimize for the common case. It is most likely that the receiver
 709	 *  will acknowledge all the frames we have sent! So in that case we
 710	 *  delete all frames stored in window.
 711	 */
 712	if (nr == self->vs) {
 713		while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
 714			dev_kfree_skb(skb);
 715		}
 716		/* The last acked frame is the next to send minus one */
 717		self->va = nr - 1;
 718	} else {
 719		/* Remove all acknowledged frames in current window */
 720		while ((skb_peek(&self->wx_list) != NULL) &&
 721		       (((self->va+1) % 8) != nr))
 722		{
 723			skb = skb_dequeue(&self->wx_list);
 724			dev_kfree_skb(skb);
 725
 726			self->va = (self->va + 1) % 8;
 727			count++;
 728		}
 729	}
 730
 731	/* Advance window */
 732	self->window = self->window_size - skb_queue_len(&self->wx_list);
 733}
 734
 735/*
 736 * Function irlap_validate_ns_received (ns)
 737 *
 738 *    Validate the next to send (ns) field from received frame.
 739 */
 740int irlap_validate_ns_received(struct irlap_cb *self, int ns)
 741{
 742	/*  ns as expected?  */
 743	if (ns == self->vr)
 744		return NS_EXPECTED;
 745	/*
 746	 *  Stations are allowed to treat invalid NS as unexpected NS
 747	 *  IrLAP, Recv ... with-invalid-Ns. p. 84
 748	 */
 749	return NS_UNEXPECTED;
 750
 751	/* return NR_INVALID; */
 752}
 753/*
 754 * Function irlap_validate_nr_received (nr)
 755 *
 756 *    Validate the next to receive (nr) field from received frame.
 757 *
 758 */
 759int irlap_validate_nr_received(struct irlap_cb *self, int nr)
 760{
 761	/*  nr as expected?  */
 762	if (nr == self->vs) {
 763		IRDA_DEBUG(4, "%s(), expected!\n", __func__);
 764		return NR_EXPECTED;
 765	}
 766
 767	/*
 768	 *  unexpected nr? (but within current window), first we check if the
 769	 *  ns numbers of the frames in the current window wrap.
 770	 */
 771	if (self->va < self->vs) {
 772		if ((nr >= self->va) && (nr <= self->vs))
 773			return NR_UNEXPECTED;
 774	} else {
 775		if ((nr >= self->va) || (nr <= self->vs))
 776			return NR_UNEXPECTED;
 777	}
 778
 779	/* Invalid nr!  */
 780	return NR_INVALID;
 781}
 782
 783/*
 784 * Function irlap_initiate_connection_state ()
 785 *
 786 *    Initialize the connection state parameters
 787 *
 788 */
 789void irlap_initiate_connection_state(struct irlap_cb *self)
 790{
 791	IRDA_DEBUG(4, "%s()\n", __func__);
 792
 793	IRDA_ASSERT(self != NULL, return;);
 794	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 795
 796	/* Next to send and next to receive */
 797	self->vs = self->vr = 0;
 798
 799	/* Last frame which got acked (0 - 1) % 8 */
 800	self->va = 7;
 801
 802	self->window = 1;
 803
 804	self->remote_busy = FALSE;
 805	self->retry_count = 0;
 806}
 807
 808/*
 809 * Function irlap_wait_min_turn_around (self, qos)
 810 *
 811 *    Wait negotiated minimum turn around time, this function actually sets
 812 *    the number of BOS's that must be sent before the next transmitted
 813 *    frame in order to delay for the specified amount of time. This is
 814 *    done to avoid using timers, and the forbidden udelay!
 815 */
 816void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
 817{
 818	__u32 min_turn_time;
 819	__u32 speed;
 820
 821	/* Get QoS values.  */
 822	speed = qos->baud_rate.value;
 823	min_turn_time = qos->min_turn_time.value;
 824
 825	/* No need to calculate XBOFs for speeds over 115200 bps */
 826	if (speed > 115200) {
 827		self->mtt_required = min_turn_time;
 828		return;
 829	}
 830
 831	/*
 832	 *  Send additional BOF's for the next frame for the requested
 833	 *  min turn time, so now we must calculate how many chars (XBOF's) we
 834	 *  must send for the requested time period (min turn time)
 835	 */
 836	self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
 837}
 838
 839/*
 840 * Function irlap_flush_all_queues (void)
 841 *
 842 *    Flush all queues
 843 *
 844 */
 845void irlap_flush_all_queues(struct irlap_cb *self)
 846{
 847	struct sk_buff* skb;
 848
 849	IRDA_ASSERT(self != NULL, return;);
 850	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 851
 852	/* Free transmission queue */
 853	while ((skb = skb_dequeue(&self->txq)) != NULL)
 854		dev_kfree_skb(skb);
 855
 856	while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
 857		dev_kfree_skb(skb);
 858
 859	/* Free sliding window buffered packets */
 860	while ((skb = skb_dequeue(&self->wx_list)) != NULL)
 861		dev_kfree_skb(skb);
 862}
 863
 864/*
 865 * Function irlap_setspeed (self, speed)
 866 *
 867 *    Change the speed of the IrDA port
 868 *
 869 */
 870static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
 871{
 872	struct sk_buff *skb;
 873
 874	IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
 875
 876	IRDA_ASSERT(self != NULL, return;);
 877	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 878
 879	self->speed = speed;
 880
 881	/* Change speed now, or just piggyback speed on frames */
 882	if (now) {
 883		/* Send down empty frame to trigger speed change */
 884		skb = alloc_skb(0, GFP_ATOMIC);
 885		if (skb)
 886			irlap_queue_xmit(self, skb);
 887	}
 888}
 889
 890/*
 891 * Function irlap_init_qos_capabilities (self, qos)
 892 *
 893 *    Initialize QoS for this IrLAP session, What we do is to compute the
 894 *    intersection of the QoS capabilities for the user, driver and for
 895 *    IrLAP itself. Normally, IrLAP will not specify any values, but it can
 896 *    be used to restrict certain values.
 897 */
 898static void irlap_init_qos_capabilities(struct irlap_cb *self,
 899					struct qos_info *qos_user)
 900{
 901	IRDA_ASSERT(self != NULL, return;);
 902	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 903	IRDA_ASSERT(self->netdev != NULL, return;);
 904
 905	/* Start out with the maximum QoS support possible */
 906	irda_init_max_qos_capabilies(&self->qos_rx);
 907
 908	/* Apply drivers QoS capabilities */
 909	irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
 910
 911	/*
 912	 *  Check for user supplied QoS parameters. The service user is only
 913	 *  allowed to supply these values. We check each parameter since the
 914	 *  user may not have set all of them.
 915	 */
 916	if (qos_user) {
 917		IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
 918
 919		if (qos_user->baud_rate.bits)
 920			self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
 921
 922		if (qos_user->max_turn_time.bits)
 923			self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
 924		if (qos_user->data_size.bits)
 925			self->qos_rx.data_size.bits &= qos_user->data_size.bits;
 926
 927		if (qos_user->link_disc_time.bits)
 928			self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
 929	}
 930
 931	/* Use 500ms in IrLAP for now */
 932	self->qos_rx.max_turn_time.bits &= 0x01;
 933
 934	/* Set data size */
 935	/*self->qos_rx.data_size.bits &= 0x03;*/
 936
 937	irda_qos_bits_to_value(&self->qos_rx);
 938}
 939
 940/*
 941 * Function irlap_apply_default_connection_parameters (void, now)
 942 *
 943 *    Use the default connection and transmission parameters
 944 */
 945void irlap_apply_default_connection_parameters(struct irlap_cb *self)
 946{
 947	IRDA_DEBUG(4, "%s()\n", __func__);
 948
 949	IRDA_ASSERT(self != NULL, return;);
 950	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 951
 952	/* xbofs : Default value in NDM */
 953	self->next_bofs   = 12;
 954	self->bofs_count  = 12;
 955
 956	/* NDM Speed is 9600 */
 957	irlap_change_speed(self, 9600, TRUE);
 958
 959	/* Set mbusy when going to NDM state */
 960	irda_device_set_media_busy(self->netdev, TRUE);
 961
 962	/*
 963	 * Generate random connection address for this session, which must
 964	 * be 7 bits wide and different from 0x00 and 0xfe
 965	 */
 966	while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
 967		get_random_bytes(&self->caddr, sizeof(self->caddr));
 968		self->caddr &= 0xfe;
 969	}
 970
 971	/* Use default values until connection has been negitiated */
 972	self->slot_timeout = sysctl_slot_timeout;
 973	self->final_timeout = FINAL_TIMEOUT;
 974	self->poll_timeout = POLL_TIMEOUT;
 975	self->wd_timeout = WD_TIMEOUT;
 976
 977	/* Set some default values */
 978	self->qos_tx.baud_rate.value = 9600;
 979	self->qos_rx.baud_rate.value = 9600;
 980	self->qos_tx.max_turn_time.value = 0;
 981	self->qos_rx.max_turn_time.value = 0;
 982	self->qos_tx.min_turn_time.value = 0;
 983	self->qos_rx.min_turn_time.value = 0;
 984	self->qos_tx.data_size.value = 64;
 985	self->qos_rx.data_size.value = 64;
 986	self->qos_tx.window_size.value = 1;
 987	self->qos_rx.window_size.value = 1;
 988	self->qos_tx.additional_bofs.value = 12;
 989	self->qos_rx.additional_bofs.value = 12;
 990	self->qos_tx.link_disc_time.value = 0;
 991	self->qos_rx.link_disc_time.value = 0;
 992
 993	irlap_flush_all_queues(self);
 994
 995	self->disconnect_pending = FALSE;
 996	self->connect_pending = FALSE;
 997}
 998
 999/*
1000 * Function irlap_apply_connection_parameters (qos, now)
1001 *
1002 *    Initialize IrLAP with the negotiated QoS values
1003 *
1004 * If 'now' is false, the speed and xbofs will be changed after the next
1005 * frame is sent.
1006 * If 'now' is true, the speed and xbofs is changed immediately
1007 */
1008void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1009{
1010	IRDA_DEBUG(4, "%s()\n", __func__);
1011
1012	IRDA_ASSERT(self != NULL, return;);
1013	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1014
1015	/* Set the negotiated xbofs value */
1016	self->next_bofs   = self->qos_tx.additional_bofs.value;
1017	if (now)
1018		self->bofs_count = self->next_bofs;
1019
1020	/* Set the negotiated link speed (may need the new xbofs value) */
1021	irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1022
1023	self->window_size = self->qos_tx.window_size.value;
1024	self->window      = self->qos_tx.window_size.value;
1025
1026#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1027	/*
1028	 *  Calculate how many bytes it is possible to transmit before the
1029	 *  link must be turned around
1030	 */
1031	self->line_capacity =
1032		irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1033					self->qos_tx.max_turn_time.value);
1034	self->bytes_left = self->line_capacity;
1035#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1036
1037
1038	/*
1039	 *  Initialize timeout values, some of the rules are listed on
1040	 *  page 92 in IrLAP.
1041	 */
1042	IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1043	IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1044	/* The poll timeout applies only to the primary station.
1045	 * It defines the maximum time the primary stay in XMIT mode
1046	 * before timeout and turning the link around (sending a RR).
1047	 * Or, this is how much we can keep the pf bit in primary mode.
1048	 * Therefore, it must be lower or equal than our *OWN* max turn around.
1049	 * Jean II */
1050	self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1051	/* The Final timeout applies only to the primary station.
1052	 * It defines the maximum time the primary wait (mostly in RECV mode)
1053	 * for an answer from the secondary station before polling it again.
1054	 * Therefore, it must be greater or equal than our *PARTNER*
1055	 * max turn around time - Jean II */
1056	self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1057	/* The Watchdog Bit timeout applies only to the secondary station.
1058	 * It defines the maximum time the secondary wait (mostly in RECV mode)
1059	 * for poll from the primary station before getting annoyed.
1060	 * Therefore, it must be greater or equal than our *PARTNER*
1061	 * max turn around time - Jean II */
1062	self->wd_timeout = self->final_timeout * 2;
1063
1064	/*
1065	 * N1 and N2 are maximum retry count for *both* the final timer
1066	 * and the wd timer (with a factor 2) as defined above.
1067	 * After N1 retry of a timer, we give a warning to the user.
1068	 * After N2 retry, we consider the link dead and disconnect it.
1069	 * Jean II
1070	 */
1071
1072	/*
1073	 *  Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1074	 *  3 seconds otherwise. See page 71 in IrLAP for more details.
1075	 *  Actually, it's not always 3 seconds, as we allow to set
1076	 *  it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1077	 *  of 2, so 1 second is minimum we can allow. - Jean II
1078	 */
1079	if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1080		/*
1081		 * If we set N1 to 0, it will trigger immediately, which is
1082		 * not what we want. What we really want is to disable it,
1083		 * Jean II
1084		 */
1085		self->N1 = -2; /* Disable - Need to be multiple of 2*/
1086	else
1087		self->N1 = sysctl_warn_noreply_time * 1000 /
1088		  self->qos_rx.max_turn_time.value;
1089
1090	IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1091
1092	/* Set N2 to match our own disconnect time */
1093	self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1094		self->qos_rx.max_turn_time.value;
1095	IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1096}
1097
1098#ifdef CONFIG_PROC_FS
1099struct irlap_iter_state {
1100	int id;
1101};
1102
1103static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1104{
1105	struct irlap_iter_state *iter = seq->private;
1106	struct irlap_cb *self;
1107
1108	/* Protect our access to the tsap list */
1109	spin_lock_irq(&irlap->hb_spinlock);
1110	iter->id = 0;
1111
1112	for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1113	     self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1114		if (iter->id == *pos)
1115			break;
1116		++iter->id;
1117	}
1118
1119	return self;
1120}
1121
1122static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1123{
1124	struct irlap_iter_state *iter = seq->private;
1125
1126	++*pos;
1127	++iter->id;
1128	return (void *) hashbin_get_next(irlap);
1129}
1130
1131static void irlap_seq_stop(struct seq_file *seq, void *v)
1132{
1133	spin_unlock_irq(&irlap->hb_spinlock);
1134}
1135
1136static int irlap_seq_show(struct seq_file *seq, void *v)
1137{
1138	const struct irlap_iter_state *iter = seq->private;
1139	const struct irlap_cb *self = v;
1140
1141	IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1142
1143	seq_printf(seq, "irlap%d ", iter->id);
1144	seq_printf(seq, "state: %s\n",
1145		   irlap_state[self->state]);
1146
1147	seq_printf(seq, "  device name: %s, ",
1148		   (self->netdev) ? self->netdev->name : "bug");
1149	seq_printf(seq, "hardware name: %s\n", self->hw_name);
1150
1151	seq_printf(seq, "  caddr: %#02x, ", self->caddr);
1152	seq_printf(seq, "saddr: %#08x, ", self->saddr);
1153	seq_printf(seq, "daddr: %#08x\n", self->daddr);
1154
1155	seq_printf(seq, "  win size: %d, ",
1156		   self->window_size);
1157	seq_printf(seq, "win: %d, ", self->window);
1158#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1159	seq_printf(seq, "line capacity: %d, ",
1160		   self->line_capacity);
1161	seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1162#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1163	seq_printf(seq, "  tx queue len: %d ",
1164		   skb_queue_len(&self->txq));
1165	seq_printf(seq, "win queue len: %d ",
1166		   skb_queue_len(&self->wx_list));
1167	seq_printf(seq, "rbusy: %s", self->remote_busy ?
1168		   "TRUE" : "FALSE");
1169	seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1170		   "TRUE" : "FALSE");
1171
1172	seq_printf(seq, "  retrans: %d ", self->retry_count);
1173	seq_printf(seq, "vs: %d ", self->vs);
1174	seq_printf(seq, "vr: %d ", self->vr);
1175	seq_printf(seq, "va: %d\n", self->va);
1176
1177	seq_printf(seq, "  qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1178
1179	seq_printf(seq, "  tx\t%d\t",
1180		   self->qos_tx.baud_rate.value);
1181	seq_printf(seq, "%d\t",
1182		   self->qos_tx.max_turn_time.value);
1183	seq_printf(seq, "%d\t",
1184		   self->qos_tx.data_size.value);
1185	seq_printf(seq, "%d\t",
1186		   self->qos_tx.window_size.value);
1187	seq_printf(seq, "%d\t",
1188		   self->qos_tx.additional_bofs.value);
1189	seq_printf(seq, "%d\t",
1190		   self->qos_tx.min_turn_time.value);
1191	seq_printf(seq, "%d\t",
1192		   self->qos_tx.link_disc_time.value);
1193	seq_printf(seq, "\n");
1194
1195	seq_printf(seq, "  rx\t%d\t",
1196		   self->qos_rx.baud_rate.value);
1197	seq_printf(seq, "%d\t",
1198		   self->qos_rx.max_turn_time.value);
1199	seq_printf(seq, "%d\t",
1200		   self->qos_rx.data_size.value);
1201	seq_printf(seq, "%d\t",
1202		   self->qos_rx.window_size.value);
1203	seq_printf(seq, "%d\t",
1204		   self->qos_rx.additional_bofs.value);
1205	seq_printf(seq, "%d\t",
1206		   self->qos_rx.min_turn_time.value);
1207	seq_printf(seq, "%d\n",
1208		   self->qos_rx.link_disc_time.value);
1209
1210	return 0;
1211}
1212
1213static const struct seq_operations irlap_seq_ops = {
1214	.start  = irlap_seq_start,
1215	.next   = irlap_seq_next,
1216	.stop   = irlap_seq_stop,
1217	.show   = irlap_seq_show,
1218};
1219
1220static int irlap_seq_open(struct inode *inode, struct file *file)
1221{
1222	if (irlap == NULL)
1223		return -EINVAL;
1224
1225	return seq_open_private(file, &irlap_seq_ops,
1226			sizeof(struct irlap_iter_state));
1227}
1228
1229const struct file_operations irlap_seq_fops = {
1230	.owner		= THIS_MODULE,
1231	.open           = irlap_seq_open,
1232	.read           = seq_read,
1233	.llseek         = seq_lseek,
1234	.release	= seq_release_private,
1235};
1236
1237#endif /* CONFIG_PROC_FS */