Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*********************************************************************
   2 *
   3 * Filename:      irlap.c
   4 * Version:       1.0
   5 * Description:   IrLAP implementation for Linux
   6 * Status:        Stable
   7 * Author:        Dag Brattli <dagb@cs.uit.no>
   8 * Created at:    Mon Aug  4 20:40:53 1997
   9 * Modified at:   Tue Dec 14 09:26:44 1999
  10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
  11 *
  12 *     Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
  13 *     Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
  14 *
  15 *     This program is free software; you can redistribute it and/or
  16 *     modify it under the terms of the GNU General Public License as
  17 *     published by the Free Software Foundation; either version 2 of
  18 *     the License, or (at your option) any later version.
  19 *
  20 *     This program is distributed in the hope that it will be useful,
  21 *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23 *     GNU General Public License for more details.
  24 *
  25 *     You should have received a copy of the GNU General Public License
  26 *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  27 *
  28 ********************************************************************/
  29
  30#include <linux/slab.h>
  31#include <linux/string.h>
  32#include <linux/skbuff.h>
  33#include <linux/delay.h>
  34#include <linux/proc_fs.h>
  35#include <linux/init.h>
  36#include <linux/random.h>
  37#include <linux/module.h>
  38#include <linux/seq_file.h>
  39
  40#include <net/irda/irda.h>
  41#include <net/irda/irda_device.h>
  42#include <net/irda/irqueue.h>
  43#include <net/irda/irlmp.h>
  44#include <net/irda/irlmp_frame.h>
  45#include <net/irda/irlap_frame.h>
  46#include <net/irda/irlap.h>
  47#include <net/irda/timer.h>
  48#include <net/irda/qos.h>
  49
  50static hashbin_t *irlap = NULL;
  51int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
  52
  53/* This is the delay of missed pf period before generating an event
  54 * to the application. The spec mandate 3 seconds, but in some cases
  55 * it's way too long. - Jean II */
  56int sysctl_warn_noreply_time = 3;
  57
  58extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
  59static void __irlap_close(struct irlap_cb *self);
  60static void irlap_init_qos_capabilities(struct irlap_cb *self,
  61					struct qos_info *qos_user);
  62
  63#ifdef CONFIG_IRDA_DEBUG
  64static const char *const lap_reasons[] = {
  65	"ERROR, NOT USED",
  66	"LAP_DISC_INDICATION",
  67	"LAP_NO_RESPONSE",
  68	"LAP_RESET_INDICATION",
  69	"LAP_FOUND_NONE",
  70	"LAP_MEDIA_BUSY",
  71	"LAP_PRIMARY_CONFLICT",
  72	"ERROR, NOT USED",
  73};
  74#endif	/* CONFIG_IRDA_DEBUG */
  75
  76int __init irlap_init(void)
  77{
  78	/* Check if the compiler did its job properly.
  79	 * May happen on some ARM configuration, check with Russell King. */
  80	IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
  81	IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
  82	IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
  83	IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
  84
  85	/* Allocate master array */
  86	irlap = hashbin_new(HB_LOCK);
  87	if (irlap == NULL) {
  88		IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
  89			   __func__);
  90		return -ENOMEM;
  91	}
  92
  93	return 0;
  94}
  95
  96void irlap_cleanup(void)
  97{
  98	IRDA_ASSERT(irlap != NULL, return;);
  99
 100	hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
 101}
 102
 103/*
 104 * Function irlap_open (driver)
 105 *
 106 *    Initialize IrLAP layer
 107 *
 108 */
 109struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
 110			    const char *hw_name)
 111{
 112	struct irlap_cb *self;
 113
 114	IRDA_DEBUG(4, "%s()\n", __func__);
 115
 116	/* Initialize the irlap structure. */
 117	self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
 118	if (self == NULL)
 119		return NULL;
 120
 121	self->magic = LAP_MAGIC;
 122
 123	/* Make a binding between the layers */
 124	self->netdev = dev;
 125	self->qos_dev = qos;
 126	/* Copy hardware name */
 127	if(hw_name != NULL) {
 128		strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
 129	} else {
 130		self->hw_name[0] = '\0';
 131	}
 132
 133	/* FIXME: should we get our own field? */
 134	dev->atalk_ptr = self;
 135
 136	self->state = LAP_OFFLINE;
 137
 138	/* Initialize transmit queue */
 139	skb_queue_head_init(&self->txq);
 140	skb_queue_head_init(&self->txq_ultra);
 141	skb_queue_head_init(&self->wx_list);
 142
 143	/* My unique IrLAP device address! */
 144	/* We don't want the broadcast address, neither the NULL address
 145	 * (most often used to signify "invalid"), and we don't want an
 146	 * address already in use (otherwise connect won't be able
 147	 * to select the proper link). - Jean II */
 148	do {
 149		get_random_bytes(&self->saddr, sizeof(self->saddr));
 150	} while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
 151		 (hashbin_lock_find(irlap, self->saddr, NULL)) );
 152	/* Copy to the driver */
 153	memcpy(dev->dev_addr, &self->saddr, 4);
 154
 155	init_timer(&self->slot_timer);
 156	init_timer(&self->query_timer);
 157	init_timer(&self->discovery_timer);
 158	init_timer(&self->final_timer);
 159	init_timer(&self->poll_timer);
 160	init_timer(&self->wd_timer);
 161	init_timer(&self->backoff_timer);
 162	init_timer(&self->media_busy_timer);
 163
 164	irlap_apply_default_connection_parameters(self);
 165
 166	self->N3 = 3; /* # connections attempts to try before giving up */
 167
 168	self->state = LAP_NDM;
 169
 170	hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
 171
 172	irlmp_register_link(self, self->saddr, &self->notify);
 173
 174	return self;
 175}
 176EXPORT_SYMBOL(irlap_open);
 177
 178/*
 179 * Function __irlap_close (self)
 180 *
 181 *    Remove IrLAP and all allocated memory. Stop any pending timers.
 182 *
 183 */
 184static void __irlap_close(struct irlap_cb *self)
 185{
 186	IRDA_ASSERT(self != NULL, return;);
 187	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 188
 189	/* Stop timers */
 190	del_timer(&self->slot_timer);
 191	del_timer(&self->query_timer);
 192	del_timer(&self->discovery_timer);
 193	del_timer(&self->final_timer);
 194	del_timer(&self->poll_timer);
 195	del_timer(&self->wd_timer);
 196	del_timer(&self->backoff_timer);
 197	del_timer(&self->media_busy_timer);
 198
 199	irlap_flush_all_queues(self);
 200
 201	self->magic = 0;
 202
 203	kfree(self);
 204}
 205
 206/*
 207 * Function irlap_close (self)
 208 *
 209 *    Remove IrLAP instance
 210 *
 211 */
 212void irlap_close(struct irlap_cb *self)
 213{
 214	struct irlap_cb *lap;
 215
 216	IRDA_DEBUG(4, "%s()\n", __func__);
 217
 218	IRDA_ASSERT(self != NULL, return;);
 219	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 220
 221	/* We used to send a LAP_DISC_INDICATION here, but this was
 222	 * racy. This has been move within irlmp_unregister_link()
 223	 * itself. Jean II */
 224
 225	/* Kill the LAP and all LSAPs on top of it */
 226	irlmp_unregister_link(self->saddr);
 227	self->notify.instance = NULL;
 228
 229	/* Be sure that we manage to remove ourself from the hash */
 230	lap = hashbin_remove(irlap, self->saddr, NULL);
 231	if (!lap) {
 232		IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
 233		return;
 234	}
 235	__irlap_close(lap);
 236}
 237EXPORT_SYMBOL(irlap_close);
 238
 239/*
 240 * Function irlap_connect_indication (self, skb)
 241 *
 242 *    Another device is attempting to make a connection
 243 *
 244 */
 245void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
 246{
 247	IRDA_DEBUG(4, "%s()\n", __func__);
 248
 249	IRDA_ASSERT(self != NULL, return;);
 250	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 251
 252	irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
 253
 254	irlmp_link_connect_indication(self->notify.instance, self->saddr,
 255				      self->daddr, &self->qos_tx, skb);
 256}
 257
 258/*
 259 * Function irlap_connect_response (self, skb)
 260 *
 261 *    Service user has accepted incoming connection
 262 *
 263 */
 264void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
 265{
 266	IRDA_DEBUG(4, "%s()\n", __func__);
 267
 268	irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
 269}
 270
 271/*
 272 * Function irlap_connect_request (self, daddr, qos_user, sniff)
 273 *
 274 *    Request connection with another device, sniffing is not implemented
 275 *    yet.
 276 *
 277 */
 278void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
 279			   struct qos_info *qos_user, int sniff)
 280{
 281	IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
 282
 283	IRDA_ASSERT(self != NULL, return;);
 284	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 285
 286	self->daddr = daddr;
 287
 288	/*
 289	 *  If the service user specifies QoS values for this connection,
 290	 *  then use them
 291	 */
 292	irlap_init_qos_capabilities(self, qos_user);
 293
 294	if ((self->state == LAP_NDM) && !self->media_busy)
 295		irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
 296	else
 297		self->connect_pending = TRUE;
 298}
 299
 300/*
 301 * Function irlap_connect_confirm (self, skb)
 302 *
 303 *    Connection request has been accepted
 304 *
 305 */
 306void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
 307{
 308	IRDA_DEBUG(4, "%s()\n", __func__);
 309
 310	IRDA_ASSERT(self != NULL, return;);
 311	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 312
 313	irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
 314}
 315
 316/*
 317 * Function irlap_data_indication (self, skb)
 318 *
 319 *    Received data frames from IR-port, so we just pass them up to
 320 *    IrLMP for further processing
 321 *
 322 */
 323void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
 324			   int unreliable)
 325{
 326	/* Hide LAP header from IrLMP layer */
 327	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 328
 329	irlmp_link_data_indication(self->notify.instance, skb, unreliable);
 330}
 331
 332
 333/*
 334 * Function irlap_data_request (self, skb)
 335 *
 336 *    Queue data for transmission, must wait until XMIT state
 337 *
 338 */
 339void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
 340			int unreliable)
 341{
 342	IRDA_ASSERT(self != NULL, return;);
 343	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 344
 345	IRDA_DEBUG(3, "%s()\n", __func__);
 346
 347	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
 348		    return;);
 349	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 350
 351	/*
 352	 *  Must set frame format now so that the rest of the code knows
 353	 *  if its dealing with an I or an UI frame
 354	 */
 355	if (unreliable)
 356		skb->data[1] = UI_FRAME;
 357	else
 358		skb->data[1] = I_FRAME;
 359
 360	/* Don't forget to refcount it - see irlmp_connect_request(). */
 361	skb_get(skb);
 362
 363	/* Add at the end of the queue (keep ordering) - Jean II */
 364	skb_queue_tail(&self->txq, skb);
 365
 366	/*
 367	 *  Send event if this frame only if we are in the right state
 368	 *  FIXME: udata should be sent first! (skb_queue_head?)
 369	 */
 370	if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
 371		/* If we are not already processing the Tx queue, trigger
 372		 * transmission immediately - Jean II */
 373		if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
 374			irlap_do_event(self, DATA_REQUEST, skb, NULL);
 375		/* Otherwise, the packets will be sent normally at the
 376		 * next pf-poll - Jean II */
 377	}
 378}
 379
 380/*
 381 * Function irlap_unitdata_request (self, skb)
 382 *
 383 *    Send Ultra data. This is data that must be sent outside any connection
 384 *
 385 */
 386#ifdef CONFIG_IRDA_ULTRA
 387void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
 388{
 389	IRDA_ASSERT(self != NULL, return;);
 390	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 391
 392	IRDA_DEBUG(3, "%s()\n", __func__);
 393
 394	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
 395	       return;);
 396	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 397
 398	skb->data[0] = CBROADCAST;
 399	skb->data[1] = UI_FRAME;
 400
 401	/* Don't need to refcount, see irlmp_connless_data_request() */
 402
 403	skb_queue_tail(&self->txq_ultra, skb);
 404
 405	irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
 406}
 407#endif /*CONFIG_IRDA_ULTRA */
 408
 409/*
 410 * Function irlap_udata_indication (self, skb)
 411 *
 412 *    Receive Ultra data. This is data that is received outside any connection
 413 *
 414 */
 415#ifdef CONFIG_IRDA_ULTRA
 416void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
 417{
 418	IRDA_DEBUG(1, "%s()\n", __func__);
 419
 420	IRDA_ASSERT(self != NULL, return;);
 421	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 422	IRDA_ASSERT(skb != NULL, return;);
 423
 424	/* Hide LAP header from IrLMP layer */
 425	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
 426
 427	irlmp_link_unitdata_indication(self->notify.instance, skb);
 428}
 429#endif /* CONFIG_IRDA_ULTRA */
 430
 431/*
 432 * Function irlap_disconnect_request (void)
 433 *
 434 *    Request to disconnect connection by service user
 435 */
 436void irlap_disconnect_request(struct irlap_cb *self)
 437{
 438	IRDA_DEBUG(3, "%s()\n", __func__);
 439
 440	IRDA_ASSERT(self != NULL, return;);
 441	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 442
 443	/* Don't disconnect until all data frames are successfully sent */
 444	if (!skb_queue_empty(&self->txq)) {
 445		self->disconnect_pending = TRUE;
 446		return;
 447	}
 448
 449	/* Check if we are in the right state for disconnecting */
 450	switch (self->state) {
 451	case LAP_XMIT_P:        /* FALLTHROUGH */
 452	case LAP_XMIT_S:        /* FALLTHROUGH */
 453	case LAP_CONN:          /* FALLTHROUGH */
 454	case LAP_RESET_WAIT:    /* FALLTHROUGH */
 455	case LAP_RESET_CHECK:
 456		irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
 457		break;
 458	default:
 459		IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
 460		self->disconnect_pending = TRUE;
 461		break;
 462	}
 463}
 464
 465/*
 466 * Function irlap_disconnect_indication (void)
 467 *
 468 *    Disconnect request from other device
 469 *
 470 */
 471void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
 472{
 473	IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
 474
 475	IRDA_ASSERT(self != NULL, return;);
 476	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 477
 478	/* Flush queues */
 479	irlap_flush_all_queues(self);
 480
 481	switch (reason) {
 482	case LAP_RESET_INDICATION:
 483		IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
 484		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
 485		break;
 486	case LAP_NO_RESPONSE:	   /* FALLTHROUGH */
 487	case LAP_DISC_INDICATION:  /* FALLTHROUGH */
 488	case LAP_FOUND_NONE:       /* FALLTHROUGH */
 489	case LAP_MEDIA_BUSY:
 490		irlmp_link_disconnect_indication(self->notify.instance, self,
 491						 reason, NULL);
 492		break;
 493	default:
 494		IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
 495	}
 496}
 497
 498/*
 499 * Function irlap_discovery_request (gen_addr_bit)
 500 *
 501 *    Start one single discovery operation.
 502 *
 503 */
 504void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
 505{
 506	struct irlap_info info;
 507
 508	IRDA_ASSERT(self != NULL, return;);
 509	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 510	IRDA_ASSERT(discovery != NULL, return;);
 511
 512	IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
 513
 514	IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
 515		    (discovery->nslots == 8) || (discovery->nslots == 16),
 516		    return;);
 517
 518	/* Discovery is only possible in NDM mode */
 519	if (self->state != LAP_NDM) {
 520		IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
 521			   __func__);
 522		irlap_discovery_confirm(self, NULL);
 523		/* Note : in theory, if we are not in NDM, we could postpone
 524		 * the discovery like we do for connection request.
 525		 * In practice, it's not worth it. If the media was busy,
 526		 * it's likely next time around it won't be busy. If we are
 527		 * in REPLY state, we will get passive discovery info & event.
 528		 * Jean II */
 529		return;
 530	}
 531
 532	/* Check if last discovery request finished in time, or if
 533	 * it was aborted due to the media busy flag. */
 534	if (self->discovery_log != NULL) {
 535		hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
 536		self->discovery_log = NULL;
 537	}
 538
 539	/* All operations will occur at predictable time, no need to lock */
 540	self->discovery_log = hashbin_new(HB_NOLOCK);
 541
 542	if (self->discovery_log == NULL) {
 543		IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
 544			     __func__);
 545		return;
 546	}
 547
 548	info.S = discovery->nslots; /* Number of slots */
 549	info.s = 0; /* Current slot */
 550
 551	self->discovery_cmd = discovery;
 552	info.discovery = discovery;
 553
 554	/* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
 555	self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
 556
 557	irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
 558}
 559
 560/*
 561 * Function irlap_discovery_confirm (log)
 562 *
 563 *    A device has been discovered in front of this station, we
 564 *    report directly to LMP.
 565 */
 566void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
 567{
 568	IRDA_ASSERT(self != NULL, return;);
 569	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 570
 571	IRDA_ASSERT(self->notify.instance != NULL, return;);
 572
 573	/*
 574	 * Check for successful discovery, since we are then allowed to clear
 575	 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
 576	 * us to make connection attempts much faster and easier (i.e. no
 577	 * collisions).
 578	 * Setting media busy to false will also generate an event allowing
 579	 * to process pending events in NDM state machine.
 580	 * Note : the spec doesn't define what's a successful discovery is.
 581	 * If we want Ultra to work, it's successful even if there is
 582	 * nobody discovered - Jean II
 583	 */
 584	if (discovery_log)
 585		irda_device_set_media_busy(self->netdev, FALSE);
 586
 587	/* Inform IrLMP */
 588	irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
 589}
 590
 591/*
 592 * Function irlap_discovery_indication (log)
 593 *
 594 *    Somebody is trying to discover us!
 595 *
 596 */
 597void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
 598{
 599	IRDA_DEBUG(4, "%s()\n", __func__);
 600
 601	IRDA_ASSERT(self != NULL, return;);
 602	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 603	IRDA_ASSERT(discovery != NULL, return;);
 604
 605	IRDA_ASSERT(self->notify.instance != NULL, return;);
 606
 607	/* A device is very likely to connect immediately after it performs
 608	 * a successful discovery. This means that in our case, we are much
 609	 * more likely to receive a connection request over the medium.
 610	 * So, we backoff to avoid collisions.
 611	 * IrLAP spec 6.13.4 suggest 100ms...
 612	 * Note : this little trick actually make a *BIG* difference. If I set
 613	 * my Linux box with discovery enabled and one Ultra frame sent every
 614	 * second, my Palm has no trouble connecting to it every time !
 615	 * Jean II */
 616	irda_device_set_media_busy(self->netdev, SMALL);
 617
 618	irlmp_link_discovery_indication(self->notify.instance, discovery);
 619}
 620
 621/*
 622 * Function irlap_status_indication (quality_of_link)
 623 */
 624void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
 625{
 626	switch (quality_of_link) {
 627	case STATUS_NO_ACTIVITY:
 628		IRDA_MESSAGE("IrLAP, no activity on link!\n");
 629		break;
 630	case STATUS_NOISY:
 631		IRDA_MESSAGE("IrLAP, noisy link!\n");
 632		break;
 633	default:
 634		break;
 635	}
 636	irlmp_status_indication(self->notify.instance,
 637				quality_of_link, LOCK_NO_CHANGE);
 638}
 639
 640/*
 641 * Function irlap_reset_indication (void)
 642 */
 643void irlap_reset_indication(struct irlap_cb *self)
 644{
 645	IRDA_DEBUG(1, "%s()\n", __func__);
 646
 647	IRDA_ASSERT(self != NULL, return;);
 648	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 649
 650	if (self->state == LAP_RESET_WAIT)
 651		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
 652	else
 653		irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
 654}
 655
 656/*
 657 * Function irlap_reset_confirm (void)
 658 */
 659void irlap_reset_confirm(void)
 660{
 661	IRDA_DEBUG(1, "%s()\n", __func__);
 662}
 663
 664/*
 665 * Function irlap_generate_rand_time_slot (S, s)
 666 *
 667 *    Generate a random time slot between s and S-1 where
 668 *    S = Number of slots (0 -> S-1)
 669 *    s = Current slot
 670 */
 671int irlap_generate_rand_time_slot(int S, int s)
 672{
 673	static int rand;
 674	int slot;
 675
 676	IRDA_ASSERT((S - s) > 0, return 0;);
 677
 678	rand += jiffies;
 679	rand ^= (rand << 12);
 680	rand ^= (rand >> 20);
 681
 682	slot = s + rand % (S-s);
 683
 684	IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
 685
 686	return slot;
 687}
 688
 689/*
 690 * Function irlap_update_nr_received (nr)
 691 *
 692 *    Remove all acknowledged frames in current window queue. This code is
 693 *    not intuitive and you should not try to change it. If you think it
 694 *    contains bugs, please mail a patch to the author instead.
 695 */
 696void irlap_update_nr_received(struct irlap_cb *self, int nr)
 697{
 698	struct sk_buff *skb = NULL;
 699	int count = 0;
 700
 701	/*
 702	 * Remove all the ack-ed frames from the window queue.
 703	 */
 704
 705	/*
 706	 *  Optimize for the common case. It is most likely that the receiver
 707	 *  will acknowledge all the frames we have sent! So in that case we
 708	 *  delete all frames stored in window.
 709	 */
 710	if (nr == self->vs) {
 711		while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
 712			dev_kfree_skb(skb);
 713		}
 714		/* The last acked frame is the next to send minus one */
 715		self->va = nr - 1;
 716	} else {
 717		/* Remove all acknowledged frames in current window */
 718		while ((skb_peek(&self->wx_list) != NULL) &&
 719		       (((self->va+1) % 8) != nr))
 720		{
 721			skb = skb_dequeue(&self->wx_list);
 722			dev_kfree_skb(skb);
 723
 724			self->va = (self->va + 1) % 8;
 725			count++;
 726		}
 727	}
 728
 729	/* Advance window */
 730	self->window = self->window_size - skb_queue_len(&self->wx_list);
 731}
 732
 733/*
 734 * Function irlap_validate_ns_received (ns)
 735 *
 736 *    Validate the next to send (ns) field from received frame.
 737 */
 738int irlap_validate_ns_received(struct irlap_cb *self, int ns)
 739{
 740	/*  ns as expected?  */
 741	if (ns == self->vr)
 742		return NS_EXPECTED;
 743	/*
 744	 *  Stations are allowed to treat invalid NS as unexpected NS
 745	 *  IrLAP, Recv ... with-invalid-Ns. p. 84
 746	 */
 747	return NS_UNEXPECTED;
 748
 749	/* return NR_INVALID; */
 750}
 751/*
 752 * Function irlap_validate_nr_received (nr)
 753 *
 754 *    Validate the next to receive (nr) field from received frame.
 755 *
 756 */
 757int irlap_validate_nr_received(struct irlap_cb *self, int nr)
 758{
 759	/*  nr as expected?  */
 760	if (nr == self->vs) {
 761		IRDA_DEBUG(4, "%s(), expected!\n", __func__);
 762		return NR_EXPECTED;
 763	}
 764
 765	/*
 766	 *  unexpected nr? (but within current window), first we check if the
 767	 *  ns numbers of the frames in the current window wrap.
 768	 */
 769	if (self->va < self->vs) {
 770		if ((nr >= self->va) && (nr <= self->vs))
 771			return NR_UNEXPECTED;
 772	} else {
 773		if ((nr >= self->va) || (nr <= self->vs))
 774			return NR_UNEXPECTED;
 775	}
 776
 777	/* Invalid nr!  */
 778	return NR_INVALID;
 779}
 780
 781/*
 782 * Function irlap_initiate_connection_state ()
 783 *
 784 *    Initialize the connection state parameters
 785 *
 786 */
 787void irlap_initiate_connection_state(struct irlap_cb *self)
 788{
 789	IRDA_DEBUG(4, "%s()\n", __func__);
 790
 791	IRDA_ASSERT(self != NULL, return;);
 792	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 793
 794	/* Next to send and next to receive */
 795	self->vs = self->vr = 0;
 796
 797	/* Last frame which got acked (0 - 1) % 8 */
 798	self->va = 7;
 799
 800	self->window = 1;
 801
 802	self->remote_busy = FALSE;
 803	self->retry_count = 0;
 804}
 805
 806/*
 807 * Function irlap_wait_min_turn_around (self, qos)
 808 *
 809 *    Wait negotiated minimum turn around time, this function actually sets
 810 *    the number of BOS's that must be sent before the next transmitted
 811 *    frame in order to delay for the specified amount of time. This is
 812 *    done to avoid using timers, and the forbidden udelay!
 813 */
 814void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
 815{
 816	__u32 min_turn_time;
 817	__u32 speed;
 818
 819	/* Get QoS values.  */
 820	speed = qos->baud_rate.value;
 821	min_turn_time = qos->min_turn_time.value;
 822
 823	/* No need to calculate XBOFs for speeds over 115200 bps */
 824	if (speed > 115200) {
 825		self->mtt_required = min_turn_time;
 826		return;
 827	}
 828
 829	/*
 830	 *  Send additional BOF's for the next frame for the requested
 831	 *  min turn time, so now we must calculate how many chars (XBOF's) we
 832	 *  must send for the requested time period (min turn time)
 833	 */
 834	self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
 835}
 836
 837/*
 838 * Function irlap_flush_all_queues (void)
 839 *
 840 *    Flush all queues
 841 *
 842 */
 843void irlap_flush_all_queues(struct irlap_cb *self)
 844{
 845	struct sk_buff* skb;
 846
 847	IRDA_ASSERT(self != NULL, return;);
 848	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 849
 850	/* Free transmission queue */
 851	while ((skb = skb_dequeue(&self->txq)) != NULL)
 852		dev_kfree_skb(skb);
 853
 854	while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
 855		dev_kfree_skb(skb);
 856
 857	/* Free sliding window buffered packets */
 858	while ((skb = skb_dequeue(&self->wx_list)) != NULL)
 859		dev_kfree_skb(skb);
 860}
 861
 862/*
 863 * Function irlap_setspeed (self, speed)
 864 *
 865 *    Change the speed of the IrDA port
 866 *
 867 */
 868static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
 869{
 870	struct sk_buff *skb;
 871
 872	IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
 873
 874	IRDA_ASSERT(self != NULL, return;);
 875	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 876
 877	self->speed = speed;
 878
 879	/* Change speed now, or just piggyback speed on frames */
 880	if (now) {
 881		/* Send down empty frame to trigger speed change */
 882		skb = alloc_skb(0, GFP_ATOMIC);
 883		if (skb)
 884			irlap_queue_xmit(self, skb);
 885	}
 886}
 887
 888/*
 889 * Function irlap_init_qos_capabilities (self, qos)
 890 *
 891 *    Initialize QoS for this IrLAP session, What we do is to compute the
 892 *    intersection of the QoS capabilities for the user, driver and for
 893 *    IrLAP itself. Normally, IrLAP will not specify any values, but it can
 894 *    be used to restrict certain values.
 895 */
 896static void irlap_init_qos_capabilities(struct irlap_cb *self,
 897					struct qos_info *qos_user)
 898{
 899	IRDA_ASSERT(self != NULL, return;);
 900	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 901	IRDA_ASSERT(self->netdev != NULL, return;);
 902
 903	/* Start out with the maximum QoS support possible */
 904	irda_init_max_qos_capabilies(&self->qos_rx);
 905
 906	/* Apply drivers QoS capabilities */
 907	irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
 908
 909	/*
 910	 *  Check for user supplied QoS parameters. The service user is only
 911	 *  allowed to supply these values. We check each parameter since the
 912	 *  user may not have set all of them.
 913	 */
 914	if (qos_user) {
 915		IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
 916
 917		if (qos_user->baud_rate.bits)
 918			self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
 919
 920		if (qos_user->max_turn_time.bits)
 921			self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
 922		if (qos_user->data_size.bits)
 923			self->qos_rx.data_size.bits &= qos_user->data_size.bits;
 924
 925		if (qos_user->link_disc_time.bits)
 926			self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
 927	}
 928
 929	/* Use 500ms in IrLAP for now */
 930	self->qos_rx.max_turn_time.bits &= 0x01;
 931
 932	/* Set data size */
 933	/*self->qos_rx.data_size.bits &= 0x03;*/
 934
 935	irda_qos_bits_to_value(&self->qos_rx);
 936}
 937
 938/*
 939 * Function irlap_apply_default_connection_parameters (void, now)
 940 *
 941 *    Use the default connection and transmission parameters
 942 */
 943void irlap_apply_default_connection_parameters(struct irlap_cb *self)
 944{
 945	IRDA_DEBUG(4, "%s()\n", __func__);
 946
 947	IRDA_ASSERT(self != NULL, return;);
 948	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 949
 950	/* xbofs : Default value in NDM */
 951	self->next_bofs   = 12;
 952	self->bofs_count  = 12;
 953
 954	/* NDM Speed is 9600 */
 955	irlap_change_speed(self, 9600, TRUE);
 956
 957	/* Set mbusy when going to NDM state */
 958	irda_device_set_media_busy(self->netdev, TRUE);
 959
 960	/*
 961	 * Generate random connection address for this session, which must
 962	 * be 7 bits wide and different from 0x00 and 0xfe
 963	 */
 964	while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
 965		get_random_bytes(&self->caddr, sizeof(self->caddr));
 966		self->caddr &= 0xfe;
 967	}
 968
 969	/* Use default values until connection has been negitiated */
 970	self->slot_timeout = sysctl_slot_timeout;
 971	self->final_timeout = FINAL_TIMEOUT;
 972	self->poll_timeout = POLL_TIMEOUT;
 973	self->wd_timeout = WD_TIMEOUT;
 974
 975	/* Set some default values */
 976	self->qos_tx.baud_rate.value = 9600;
 977	self->qos_rx.baud_rate.value = 9600;
 978	self->qos_tx.max_turn_time.value = 0;
 979	self->qos_rx.max_turn_time.value = 0;
 980	self->qos_tx.min_turn_time.value = 0;
 981	self->qos_rx.min_turn_time.value = 0;
 982	self->qos_tx.data_size.value = 64;
 983	self->qos_rx.data_size.value = 64;
 984	self->qos_tx.window_size.value = 1;
 985	self->qos_rx.window_size.value = 1;
 986	self->qos_tx.additional_bofs.value = 12;
 987	self->qos_rx.additional_bofs.value = 12;
 988	self->qos_tx.link_disc_time.value = 0;
 989	self->qos_rx.link_disc_time.value = 0;
 990
 991	irlap_flush_all_queues(self);
 992
 993	self->disconnect_pending = FALSE;
 994	self->connect_pending = FALSE;
 995}
 996
 997/*
 998 * Function irlap_apply_connection_parameters (qos, now)
 999 *
1000 *    Initialize IrLAP with the negotiated QoS values
1001 *
1002 * If 'now' is false, the speed and xbofs will be changed after the next
1003 * frame is sent.
1004 * If 'now' is true, the speed and xbofs is changed immediately
1005 */
1006void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1007{
1008	IRDA_DEBUG(4, "%s()\n", __func__);
1009
1010	IRDA_ASSERT(self != NULL, return;);
1011	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1012
1013	/* Set the negotiated xbofs value */
1014	self->next_bofs   = self->qos_tx.additional_bofs.value;
1015	if (now)
1016		self->bofs_count = self->next_bofs;
1017
1018	/* Set the negotiated link speed (may need the new xbofs value) */
1019	irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1020
1021	self->window_size = self->qos_tx.window_size.value;
1022	self->window      = self->qos_tx.window_size.value;
1023
1024#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1025	/*
1026	 *  Calculate how many bytes it is possible to transmit before the
1027	 *  link must be turned around
1028	 */
1029	self->line_capacity =
1030		irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1031					self->qos_tx.max_turn_time.value);
1032	self->bytes_left = self->line_capacity;
1033#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1034
1035
1036	/*
1037	 *  Initialize timeout values, some of the rules are listed on
1038	 *  page 92 in IrLAP.
1039	 */
1040	IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1041	IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1042	/* The poll timeout applies only to the primary station.
1043	 * It defines the maximum time the primary stay in XMIT mode
1044	 * before timeout and turning the link around (sending a RR).
1045	 * Or, this is how much we can keep the pf bit in primary mode.
1046	 * Therefore, it must be lower or equal than our *OWN* max turn around.
1047	 * Jean II */
1048	self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1049	/* The Final timeout applies only to the primary station.
1050	 * It defines the maximum time the primary wait (mostly in RECV mode)
1051	 * for an answer from the secondary station before polling it again.
1052	 * Therefore, it must be greater or equal than our *PARTNER*
1053	 * max turn around time - Jean II */
1054	self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1055	/* The Watchdog Bit timeout applies only to the secondary station.
1056	 * It defines the maximum time the secondary wait (mostly in RECV mode)
1057	 * for poll from the primary station before getting annoyed.
1058	 * Therefore, it must be greater or equal than our *PARTNER*
1059	 * max turn around time - Jean II */
1060	self->wd_timeout = self->final_timeout * 2;
1061
1062	/*
1063	 * N1 and N2 are maximum retry count for *both* the final timer
1064	 * and the wd timer (with a factor 2) as defined above.
1065	 * After N1 retry of a timer, we give a warning to the user.
1066	 * After N2 retry, we consider the link dead and disconnect it.
1067	 * Jean II
1068	 */
1069
1070	/*
1071	 *  Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1072	 *  3 seconds otherwise. See page 71 in IrLAP for more details.
1073	 *  Actually, it's not always 3 seconds, as we allow to set
1074	 *  it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1075	 *  of 2, so 1 second is minimum we can allow. - Jean II
1076	 */
1077	if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1078		/*
1079		 * If we set N1 to 0, it will trigger immediately, which is
1080		 * not what we want. What we really want is to disable it,
1081		 * Jean II
1082		 */
1083		self->N1 = -2; /* Disable - Need to be multiple of 2*/
1084	else
1085		self->N1 = sysctl_warn_noreply_time * 1000 /
1086		  self->qos_rx.max_turn_time.value;
1087
1088	IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1089
1090	/* Set N2 to match our own disconnect time */
1091	self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1092		self->qos_rx.max_turn_time.value;
1093	IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1094}
1095
1096#ifdef CONFIG_PROC_FS
1097struct irlap_iter_state {
1098	int id;
1099};
1100
1101static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1102{
1103	struct irlap_iter_state *iter = seq->private;
1104	struct irlap_cb *self;
1105
1106	/* Protect our access to the tsap list */
1107	spin_lock_irq(&irlap->hb_spinlock);
1108	iter->id = 0;
1109
1110	for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1111	     self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1112		if (iter->id == *pos)
1113			break;
1114		++iter->id;
1115	}
1116
1117	return self;
1118}
1119
1120static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1121{
1122	struct irlap_iter_state *iter = seq->private;
1123
1124	++*pos;
1125	++iter->id;
1126	return (void *) hashbin_get_next(irlap);
1127}
1128
1129static void irlap_seq_stop(struct seq_file *seq, void *v)
1130{
1131	spin_unlock_irq(&irlap->hb_spinlock);
1132}
1133
1134static int irlap_seq_show(struct seq_file *seq, void *v)
1135{
1136	const struct irlap_iter_state *iter = seq->private;
1137	const struct irlap_cb *self = v;
1138
1139	IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1140
1141	seq_printf(seq, "irlap%d ", iter->id);
1142	seq_printf(seq, "state: %s\n",
1143		   irlap_state[self->state]);
1144
1145	seq_printf(seq, "  device name: %s, ",
1146		   (self->netdev) ? self->netdev->name : "bug");
1147	seq_printf(seq, "hardware name: %s\n", self->hw_name);
1148
1149	seq_printf(seq, "  caddr: %#02x, ", self->caddr);
1150	seq_printf(seq, "saddr: %#08x, ", self->saddr);
1151	seq_printf(seq, "daddr: %#08x\n", self->daddr);
1152
1153	seq_printf(seq, "  win size: %d, ",
1154		   self->window_size);
1155	seq_printf(seq, "win: %d, ", self->window);
1156#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1157	seq_printf(seq, "line capacity: %d, ",
1158		   self->line_capacity);
1159	seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1160#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1161	seq_printf(seq, "  tx queue len: %d ",
1162		   skb_queue_len(&self->txq));
1163	seq_printf(seq, "win queue len: %d ",
1164		   skb_queue_len(&self->wx_list));
1165	seq_printf(seq, "rbusy: %s", self->remote_busy ?
1166		   "TRUE" : "FALSE");
1167	seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1168		   "TRUE" : "FALSE");
1169
1170	seq_printf(seq, "  retrans: %d ", self->retry_count);
1171	seq_printf(seq, "vs: %d ", self->vs);
1172	seq_printf(seq, "vr: %d ", self->vr);
1173	seq_printf(seq, "va: %d\n", self->va);
1174
1175	seq_printf(seq, "  qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1176
1177	seq_printf(seq, "  tx\t%d\t",
1178		   self->qos_tx.baud_rate.value);
1179	seq_printf(seq, "%d\t",
1180		   self->qos_tx.max_turn_time.value);
1181	seq_printf(seq, "%d\t",
1182		   self->qos_tx.data_size.value);
1183	seq_printf(seq, "%d\t",
1184		   self->qos_tx.window_size.value);
1185	seq_printf(seq, "%d\t",
1186		   self->qos_tx.additional_bofs.value);
1187	seq_printf(seq, "%d\t",
1188		   self->qos_tx.min_turn_time.value);
1189	seq_printf(seq, "%d\t",
1190		   self->qos_tx.link_disc_time.value);
1191	seq_printf(seq, "\n");
1192
1193	seq_printf(seq, "  rx\t%d\t",
1194		   self->qos_rx.baud_rate.value);
1195	seq_printf(seq, "%d\t",
1196		   self->qos_rx.max_turn_time.value);
1197	seq_printf(seq, "%d\t",
1198		   self->qos_rx.data_size.value);
1199	seq_printf(seq, "%d\t",
1200		   self->qos_rx.window_size.value);
1201	seq_printf(seq, "%d\t",
1202		   self->qos_rx.additional_bofs.value);
1203	seq_printf(seq, "%d\t",
1204		   self->qos_rx.min_turn_time.value);
1205	seq_printf(seq, "%d\n",
1206		   self->qos_rx.link_disc_time.value);
1207
1208	return 0;
1209}
1210
1211static const struct seq_operations irlap_seq_ops = {
1212	.start  = irlap_seq_start,
1213	.next   = irlap_seq_next,
1214	.stop   = irlap_seq_stop,
1215	.show   = irlap_seq_show,
1216};
1217
1218static int irlap_seq_open(struct inode *inode, struct file *file)
1219{
1220	if (irlap == NULL)
1221		return -EINVAL;
1222
1223	return seq_open_private(file, &irlap_seq_ops,
1224			sizeof(struct irlap_iter_state));
1225}
1226
1227const struct file_operations irlap_seq_fops = {
1228	.owner		= THIS_MODULE,
1229	.open           = irlap_seq_open,
1230	.read           = seq_read,
1231	.llseek         = seq_lseek,
1232	.release	= seq_release_private,
1233};
1234
1235#endif /* CONFIG_PROC_FS */