Loading...
1/*********************************************************************
2 *
3 * Filename: irlap.c
4 * Version: 1.0
5 * Description: IrLAP implementation for Linux
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Tue Dec 14 09:26:44 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 * MA 02111-1307 USA
29 *
30 ********************************************************************/
31
32#include <linux/slab.h>
33#include <linux/string.h>
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/proc_fs.h>
37#include <linux/init.h>
38#include <linux/random.h>
39#include <linux/module.h>
40#include <linux/seq_file.h>
41
42#include <net/irda/irda.h>
43#include <net/irda/irda_device.h>
44#include <net/irda/irqueue.h>
45#include <net/irda/irlmp.h>
46#include <net/irda/irlmp_frame.h>
47#include <net/irda/irlap_frame.h>
48#include <net/irda/irlap.h>
49#include <net/irda/timer.h>
50#include <net/irda/qos.h>
51
52static hashbin_t *irlap = NULL;
53int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
54
55/* This is the delay of missed pf period before generating an event
56 * to the application. The spec mandate 3 seconds, but in some cases
57 * it's way too long. - Jean II */
58int sysctl_warn_noreply_time = 3;
59
60extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
61static void __irlap_close(struct irlap_cb *self);
62static void irlap_init_qos_capabilities(struct irlap_cb *self,
63 struct qos_info *qos_user);
64
65#ifdef CONFIG_IRDA_DEBUG
66static const char *const lap_reasons[] = {
67 "ERROR, NOT USED",
68 "LAP_DISC_INDICATION",
69 "LAP_NO_RESPONSE",
70 "LAP_RESET_INDICATION",
71 "LAP_FOUND_NONE",
72 "LAP_MEDIA_BUSY",
73 "LAP_PRIMARY_CONFLICT",
74 "ERROR, NOT USED",
75};
76#endif /* CONFIG_IRDA_DEBUG */
77
78int __init irlap_init(void)
79{
80 /* Check if the compiler did its job properly.
81 * May happen on some ARM configuration, check with Russell King. */
82 IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
83 IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
84 IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
85 IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
86
87 /* Allocate master array */
88 irlap = hashbin_new(HB_LOCK);
89 if (irlap == NULL) {
90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
91 __func__);
92 return -ENOMEM;
93 }
94
95 return 0;
96}
97
98void irlap_cleanup(void)
99{
100 IRDA_ASSERT(irlap != NULL, return;);
101
102 hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
103}
104
105/*
106 * Function irlap_open (driver)
107 *
108 * Initialize IrLAP layer
109 *
110 */
111struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
112 const char *hw_name)
113{
114 struct irlap_cb *self;
115
116 IRDA_DEBUG(4, "%s()\n", __func__);
117
118 /* Initialize the irlap structure. */
119 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
120 if (self == NULL)
121 return NULL;
122
123 self->magic = LAP_MAGIC;
124
125 /* Make a binding between the layers */
126 self->netdev = dev;
127 self->qos_dev = qos;
128 /* Copy hardware name */
129 if(hw_name != NULL) {
130 strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
131 } else {
132 self->hw_name[0] = '\0';
133 }
134
135 /* FIXME: should we get our own field? */
136 dev->atalk_ptr = self;
137
138 self->state = LAP_OFFLINE;
139
140 /* Initialize transmit queue */
141 skb_queue_head_init(&self->txq);
142 skb_queue_head_init(&self->txq_ultra);
143 skb_queue_head_init(&self->wx_list);
144
145 /* My unique IrLAP device address! */
146 /* We don't want the broadcast address, neither the NULL address
147 * (most often used to signify "invalid"), and we don't want an
148 * address already in use (otherwise connect won't be able
149 * to select the proper link). - Jean II */
150 do {
151 get_random_bytes(&self->saddr, sizeof(self->saddr));
152 } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
153 (hashbin_lock_find(irlap, self->saddr, NULL)) );
154 /* Copy to the driver */
155 memcpy(dev->dev_addr, &self->saddr, 4);
156
157 init_timer(&self->slot_timer);
158 init_timer(&self->query_timer);
159 init_timer(&self->discovery_timer);
160 init_timer(&self->final_timer);
161 init_timer(&self->poll_timer);
162 init_timer(&self->wd_timer);
163 init_timer(&self->backoff_timer);
164 init_timer(&self->media_busy_timer);
165
166 irlap_apply_default_connection_parameters(self);
167
168 self->N3 = 3; /* # connections attempts to try before giving up */
169
170 self->state = LAP_NDM;
171
172 hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
173
174 irlmp_register_link(self, self->saddr, &self->notify);
175
176 return self;
177}
178EXPORT_SYMBOL(irlap_open);
179
180/*
181 * Function __irlap_close (self)
182 *
183 * Remove IrLAP and all allocated memory. Stop any pending timers.
184 *
185 */
186static void __irlap_close(struct irlap_cb *self)
187{
188 IRDA_ASSERT(self != NULL, return;);
189 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
190
191 /* Stop timers */
192 del_timer(&self->slot_timer);
193 del_timer(&self->query_timer);
194 del_timer(&self->discovery_timer);
195 del_timer(&self->final_timer);
196 del_timer(&self->poll_timer);
197 del_timer(&self->wd_timer);
198 del_timer(&self->backoff_timer);
199 del_timer(&self->media_busy_timer);
200
201 irlap_flush_all_queues(self);
202
203 self->magic = 0;
204
205 kfree(self);
206}
207
208/*
209 * Function irlap_close (self)
210 *
211 * Remove IrLAP instance
212 *
213 */
214void irlap_close(struct irlap_cb *self)
215{
216 struct irlap_cb *lap;
217
218 IRDA_DEBUG(4, "%s()\n", __func__);
219
220 IRDA_ASSERT(self != NULL, return;);
221 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
222
223 /* We used to send a LAP_DISC_INDICATION here, but this was
224 * racy. This has been move within irlmp_unregister_link()
225 * itself. Jean II */
226
227 /* Kill the LAP and all LSAPs on top of it */
228 irlmp_unregister_link(self->saddr);
229 self->notify.instance = NULL;
230
231 /* Be sure that we manage to remove ourself from the hash */
232 lap = hashbin_remove(irlap, self->saddr, NULL);
233 if (!lap) {
234 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
235 return;
236 }
237 __irlap_close(lap);
238}
239EXPORT_SYMBOL(irlap_close);
240
241/*
242 * Function irlap_connect_indication (self, skb)
243 *
244 * Another device is attempting to make a connection
245 *
246 */
247void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
248{
249 IRDA_DEBUG(4, "%s()\n", __func__);
250
251 IRDA_ASSERT(self != NULL, return;);
252 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
253
254 irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
255
256 irlmp_link_connect_indication(self->notify.instance, self->saddr,
257 self->daddr, &self->qos_tx, skb);
258}
259
260/*
261 * Function irlap_connect_response (self, skb)
262 *
263 * Service user has accepted incoming connection
264 *
265 */
266void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
267{
268 IRDA_DEBUG(4, "%s()\n", __func__);
269
270 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
271}
272
273/*
274 * Function irlap_connect_request (self, daddr, qos_user, sniff)
275 *
276 * Request connection with another device, sniffing is not implemented
277 * yet.
278 *
279 */
280void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
281 struct qos_info *qos_user, int sniff)
282{
283 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
284
285 IRDA_ASSERT(self != NULL, return;);
286 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
287
288 self->daddr = daddr;
289
290 /*
291 * If the service user specifies QoS values for this connection,
292 * then use them
293 */
294 irlap_init_qos_capabilities(self, qos_user);
295
296 if ((self->state == LAP_NDM) && !self->media_busy)
297 irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
298 else
299 self->connect_pending = TRUE;
300}
301
302/*
303 * Function irlap_connect_confirm (self, skb)
304 *
305 * Connection request has been accepted
306 *
307 */
308void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
309{
310 IRDA_DEBUG(4, "%s()\n", __func__);
311
312 IRDA_ASSERT(self != NULL, return;);
313 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
314
315 irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
316}
317
318/*
319 * Function irlap_data_indication (self, skb)
320 *
321 * Received data frames from IR-port, so we just pass them up to
322 * IrLMP for further processing
323 *
324 */
325void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
326 int unreliable)
327{
328 /* Hide LAP header from IrLMP layer */
329 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
330
331 irlmp_link_data_indication(self->notify.instance, skb, unreliable);
332}
333
334
335/*
336 * Function irlap_data_request (self, skb)
337 *
338 * Queue data for transmission, must wait until XMIT state
339 *
340 */
341void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
342 int unreliable)
343{
344 IRDA_ASSERT(self != NULL, return;);
345 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
346
347 IRDA_DEBUG(3, "%s()\n", __func__);
348
349 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
350 return;);
351 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
352
353 /*
354 * Must set frame format now so that the rest of the code knows
355 * if its dealing with an I or an UI frame
356 */
357 if (unreliable)
358 skb->data[1] = UI_FRAME;
359 else
360 skb->data[1] = I_FRAME;
361
362 /* Don't forget to refcount it - see irlmp_connect_request(). */
363 skb_get(skb);
364
365 /* Add at the end of the queue (keep ordering) - Jean II */
366 skb_queue_tail(&self->txq, skb);
367
368 /*
369 * Send event if this frame only if we are in the right state
370 * FIXME: udata should be sent first! (skb_queue_head?)
371 */
372 if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
373 /* If we are not already processing the Tx queue, trigger
374 * transmission immediately - Jean II */
375 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
376 irlap_do_event(self, DATA_REQUEST, skb, NULL);
377 /* Otherwise, the packets will be sent normally at the
378 * next pf-poll - Jean II */
379 }
380}
381
382/*
383 * Function irlap_unitdata_request (self, skb)
384 *
385 * Send Ultra data. This is data that must be sent outside any connection
386 *
387 */
388#ifdef CONFIG_IRDA_ULTRA
389void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
390{
391 IRDA_ASSERT(self != NULL, return;);
392 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
393
394 IRDA_DEBUG(3, "%s()\n", __func__);
395
396 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
397 return;);
398 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
399
400 skb->data[0] = CBROADCAST;
401 skb->data[1] = UI_FRAME;
402
403 /* Don't need to refcount, see irlmp_connless_data_request() */
404
405 skb_queue_tail(&self->txq_ultra, skb);
406
407 irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
408}
409#endif /*CONFIG_IRDA_ULTRA */
410
411/*
412 * Function irlap_udata_indication (self, skb)
413 *
414 * Receive Ultra data. This is data that is received outside any connection
415 *
416 */
417#ifdef CONFIG_IRDA_ULTRA
418void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
419{
420 IRDA_DEBUG(1, "%s()\n", __func__);
421
422 IRDA_ASSERT(self != NULL, return;);
423 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
424 IRDA_ASSERT(skb != NULL, return;);
425
426 /* Hide LAP header from IrLMP layer */
427 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
428
429 irlmp_link_unitdata_indication(self->notify.instance, skb);
430}
431#endif /* CONFIG_IRDA_ULTRA */
432
433/*
434 * Function irlap_disconnect_request (void)
435 *
436 * Request to disconnect connection by service user
437 */
438void irlap_disconnect_request(struct irlap_cb *self)
439{
440 IRDA_DEBUG(3, "%s()\n", __func__);
441
442 IRDA_ASSERT(self != NULL, return;);
443 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
444
445 /* Don't disconnect until all data frames are successfully sent */
446 if (!skb_queue_empty(&self->txq)) {
447 self->disconnect_pending = TRUE;
448 return;
449 }
450
451 /* Check if we are in the right state for disconnecting */
452 switch (self->state) {
453 case LAP_XMIT_P: /* FALLTHROUGH */
454 case LAP_XMIT_S: /* FALLTHROUGH */
455 case LAP_CONN: /* FALLTHROUGH */
456 case LAP_RESET_WAIT: /* FALLTHROUGH */
457 case LAP_RESET_CHECK:
458 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
459 break;
460 default:
461 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
462 self->disconnect_pending = TRUE;
463 break;
464 }
465}
466
467/*
468 * Function irlap_disconnect_indication (void)
469 *
470 * Disconnect request from other device
471 *
472 */
473void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
474{
475 IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
476
477 IRDA_ASSERT(self != NULL, return;);
478 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
479
480 /* Flush queues */
481 irlap_flush_all_queues(self);
482
483 switch (reason) {
484 case LAP_RESET_INDICATION:
485 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
486 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
487 break;
488 case LAP_NO_RESPONSE: /* FALLTHROUGH */
489 case LAP_DISC_INDICATION: /* FALLTHROUGH */
490 case LAP_FOUND_NONE: /* FALLTHROUGH */
491 case LAP_MEDIA_BUSY:
492 irlmp_link_disconnect_indication(self->notify.instance, self,
493 reason, NULL);
494 break;
495 default:
496 IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
497 }
498}
499
500/*
501 * Function irlap_discovery_request (gen_addr_bit)
502 *
503 * Start one single discovery operation.
504 *
505 */
506void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
507{
508 struct irlap_info info;
509
510 IRDA_ASSERT(self != NULL, return;);
511 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
512 IRDA_ASSERT(discovery != NULL, return;);
513
514 IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
515
516 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
517 (discovery->nslots == 8) || (discovery->nslots == 16),
518 return;);
519
520 /* Discovery is only possible in NDM mode */
521 if (self->state != LAP_NDM) {
522 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
523 __func__);
524 irlap_discovery_confirm(self, NULL);
525 /* Note : in theory, if we are not in NDM, we could postpone
526 * the discovery like we do for connection request.
527 * In practice, it's not worth it. If the media was busy,
528 * it's likely next time around it won't be busy. If we are
529 * in REPLY state, we will get passive discovery info & event.
530 * Jean II */
531 return;
532 }
533
534 /* Check if last discovery request finished in time, or if
535 * it was aborted due to the media busy flag. */
536 if (self->discovery_log != NULL) {
537 hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
538 self->discovery_log = NULL;
539 }
540
541 /* All operations will occur at predictable time, no need to lock */
542 self->discovery_log = hashbin_new(HB_NOLOCK);
543
544 if (self->discovery_log == NULL) {
545 IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
546 __func__);
547 return;
548 }
549
550 info.S = discovery->nslots; /* Number of slots */
551 info.s = 0; /* Current slot */
552
553 self->discovery_cmd = discovery;
554 info.discovery = discovery;
555
556 /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
557 self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
558
559 irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
560}
561
562/*
563 * Function irlap_discovery_confirm (log)
564 *
565 * A device has been discovered in front of this station, we
566 * report directly to LMP.
567 */
568void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
569{
570 IRDA_ASSERT(self != NULL, return;);
571 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
572
573 IRDA_ASSERT(self->notify.instance != NULL, return;);
574
575 /*
576 * Check for successful discovery, since we are then allowed to clear
577 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
578 * us to make connection attempts much faster and easier (i.e. no
579 * collisions).
580 * Setting media busy to false will also generate an event allowing
581 * to process pending events in NDM state machine.
582 * Note : the spec doesn't define what's a successful discovery is.
583 * If we want Ultra to work, it's successful even if there is
584 * nobody discovered - Jean II
585 */
586 if (discovery_log)
587 irda_device_set_media_busy(self->netdev, FALSE);
588
589 /* Inform IrLMP */
590 irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
591}
592
593/*
594 * Function irlap_discovery_indication (log)
595 *
596 * Somebody is trying to discover us!
597 *
598 */
599void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
600{
601 IRDA_DEBUG(4, "%s()\n", __func__);
602
603 IRDA_ASSERT(self != NULL, return;);
604 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
605 IRDA_ASSERT(discovery != NULL, return;);
606
607 IRDA_ASSERT(self->notify.instance != NULL, return;);
608
609 /* A device is very likely to connect immediately after it performs
610 * a successful discovery. This means that in our case, we are much
611 * more likely to receive a connection request over the medium.
612 * So, we backoff to avoid collisions.
613 * IrLAP spec 6.13.4 suggest 100ms...
614 * Note : this little trick actually make a *BIG* difference. If I set
615 * my Linux box with discovery enabled and one Ultra frame sent every
616 * second, my Palm has no trouble connecting to it every time !
617 * Jean II */
618 irda_device_set_media_busy(self->netdev, SMALL);
619
620 irlmp_link_discovery_indication(self->notify.instance, discovery);
621}
622
623/*
624 * Function irlap_status_indication (quality_of_link)
625 */
626void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
627{
628 switch (quality_of_link) {
629 case STATUS_NO_ACTIVITY:
630 IRDA_MESSAGE("IrLAP, no activity on link!\n");
631 break;
632 case STATUS_NOISY:
633 IRDA_MESSAGE("IrLAP, noisy link!\n");
634 break;
635 default:
636 break;
637 }
638 irlmp_status_indication(self->notify.instance,
639 quality_of_link, LOCK_NO_CHANGE);
640}
641
642/*
643 * Function irlap_reset_indication (void)
644 */
645void irlap_reset_indication(struct irlap_cb *self)
646{
647 IRDA_DEBUG(1, "%s()\n", __func__);
648
649 IRDA_ASSERT(self != NULL, return;);
650 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
651
652 if (self->state == LAP_RESET_WAIT)
653 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
654 else
655 irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
656}
657
658/*
659 * Function irlap_reset_confirm (void)
660 */
661void irlap_reset_confirm(void)
662{
663 IRDA_DEBUG(1, "%s()\n", __func__);
664}
665
666/*
667 * Function irlap_generate_rand_time_slot (S, s)
668 *
669 * Generate a random time slot between s and S-1 where
670 * S = Number of slots (0 -> S-1)
671 * s = Current slot
672 */
673int irlap_generate_rand_time_slot(int S, int s)
674{
675 static int rand;
676 int slot;
677
678 IRDA_ASSERT((S - s) > 0, return 0;);
679
680 rand += jiffies;
681 rand ^= (rand << 12);
682 rand ^= (rand >> 20);
683
684 slot = s + rand % (S-s);
685
686 IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
687
688 return slot;
689}
690
691/*
692 * Function irlap_update_nr_received (nr)
693 *
694 * Remove all acknowledged frames in current window queue. This code is
695 * not intuitive and you should not try to change it. If you think it
696 * contains bugs, please mail a patch to the author instead.
697 */
698void irlap_update_nr_received(struct irlap_cb *self, int nr)
699{
700 struct sk_buff *skb = NULL;
701 int count = 0;
702
703 /*
704 * Remove all the ack-ed frames from the window queue.
705 */
706
707 /*
708 * Optimize for the common case. It is most likely that the receiver
709 * will acknowledge all the frames we have sent! So in that case we
710 * delete all frames stored in window.
711 */
712 if (nr == self->vs) {
713 while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
714 dev_kfree_skb(skb);
715 }
716 /* The last acked frame is the next to send minus one */
717 self->va = nr - 1;
718 } else {
719 /* Remove all acknowledged frames in current window */
720 while ((skb_peek(&self->wx_list) != NULL) &&
721 (((self->va+1) % 8) != nr))
722 {
723 skb = skb_dequeue(&self->wx_list);
724 dev_kfree_skb(skb);
725
726 self->va = (self->va + 1) % 8;
727 count++;
728 }
729 }
730
731 /* Advance window */
732 self->window = self->window_size - skb_queue_len(&self->wx_list);
733}
734
735/*
736 * Function irlap_validate_ns_received (ns)
737 *
738 * Validate the next to send (ns) field from received frame.
739 */
740int irlap_validate_ns_received(struct irlap_cb *self, int ns)
741{
742 /* ns as expected? */
743 if (ns == self->vr)
744 return NS_EXPECTED;
745 /*
746 * Stations are allowed to treat invalid NS as unexpected NS
747 * IrLAP, Recv ... with-invalid-Ns. p. 84
748 */
749 return NS_UNEXPECTED;
750
751 /* return NR_INVALID; */
752}
753/*
754 * Function irlap_validate_nr_received (nr)
755 *
756 * Validate the next to receive (nr) field from received frame.
757 *
758 */
759int irlap_validate_nr_received(struct irlap_cb *self, int nr)
760{
761 /* nr as expected? */
762 if (nr == self->vs) {
763 IRDA_DEBUG(4, "%s(), expected!\n", __func__);
764 return NR_EXPECTED;
765 }
766
767 /*
768 * unexpected nr? (but within current window), first we check if the
769 * ns numbers of the frames in the current window wrap.
770 */
771 if (self->va < self->vs) {
772 if ((nr >= self->va) && (nr <= self->vs))
773 return NR_UNEXPECTED;
774 } else {
775 if ((nr >= self->va) || (nr <= self->vs))
776 return NR_UNEXPECTED;
777 }
778
779 /* Invalid nr! */
780 return NR_INVALID;
781}
782
783/*
784 * Function irlap_initiate_connection_state ()
785 *
786 * Initialize the connection state parameters
787 *
788 */
789void irlap_initiate_connection_state(struct irlap_cb *self)
790{
791 IRDA_DEBUG(4, "%s()\n", __func__);
792
793 IRDA_ASSERT(self != NULL, return;);
794 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
795
796 /* Next to send and next to receive */
797 self->vs = self->vr = 0;
798
799 /* Last frame which got acked (0 - 1) % 8 */
800 self->va = 7;
801
802 self->window = 1;
803
804 self->remote_busy = FALSE;
805 self->retry_count = 0;
806}
807
808/*
809 * Function irlap_wait_min_turn_around (self, qos)
810 *
811 * Wait negotiated minimum turn around time, this function actually sets
812 * the number of BOS's that must be sent before the next transmitted
813 * frame in order to delay for the specified amount of time. This is
814 * done to avoid using timers, and the forbidden udelay!
815 */
816void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
817{
818 __u32 min_turn_time;
819 __u32 speed;
820
821 /* Get QoS values. */
822 speed = qos->baud_rate.value;
823 min_turn_time = qos->min_turn_time.value;
824
825 /* No need to calculate XBOFs for speeds over 115200 bps */
826 if (speed > 115200) {
827 self->mtt_required = min_turn_time;
828 return;
829 }
830
831 /*
832 * Send additional BOF's for the next frame for the requested
833 * min turn time, so now we must calculate how many chars (XBOF's) we
834 * must send for the requested time period (min turn time)
835 */
836 self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
837}
838
839/*
840 * Function irlap_flush_all_queues (void)
841 *
842 * Flush all queues
843 *
844 */
845void irlap_flush_all_queues(struct irlap_cb *self)
846{
847 struct sk_buff* skb;
848
849 IRDA_ASSERT(self != NULL, return;);
850 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
851
852 /* Free transmission queue */
853 while ((skb = skb_dequeue(&self->txq)) != NULL)
854 dev_kfree_skb(skb);
855
856 while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
857 dev_kfree_skb(skb);
858
859 /* Free sliding window buffered packets */
860 while ((skb = skb_dequeue(&self->wx_list)) != NULL)
861 dev_kfree_skb(skb);
862}
863
864/*
865 * Function irlap_setspeed (self, speed)
866 *
867 * Change the speed of the IrDA port
868 *
869 */
870static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
871{
872 struct sk_buff *skb;
873
874 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
875
876 IRDA_ASSERT(self != NULL, return;);
877 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
878
879 self->speed = speed;
880
881 /* Change speed now, or just piggyback speed on frames */
882 if (now) {
883 /* Send down empty frame to trigger speed change */
884 skb = alloc_skb(0, GFP_ATOMIC);
885 if (skb)
886 irlap_queue_xmit(self, skb);
887 }
888}
889
890/*
891 * Function irlap_init_qos_capabilities (self, qos)
892 *
893 * Initialize QoS for this IrLAP session, What we do is to compute the
894 * intersection of the QoS capabilities for the user, driver and for
895 * IrLAP itself. Normally, IrLAP will not specify any values, but it can
896 * be used to restrict certain values.
897 */
898static void irlap_init_qos_capabilities(struct irlap_cb *self,
899 struct qos_info *qos_user)
900{
901 IRDA_ASSERT(self != NULL, return;);
902 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
903 IRDA_ASSERT(self->netdev != NULL, return;);
904
905 /* Start out with the maximum QoS support possible */
906 irda_init_max_qos_capabilies(&self->qos_rx);
907
908 /* Apply drivers QoS capabilities */
909 irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
910
911 /*
912 * Check for user supplied QoS parameters. The service user is only
913 * allowed to supply these values. We check each parameter since the
914 * user may not have set all of them.
915 */
916 if (qos_user) {
917 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
918
919 if (qos_user->baud_rate.bits)
920 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
921
922 if (qos_user->max_turn_time.bits)
923 self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
924 if (qos_user->data_size.bits)
925 self->qos_rx.data_size.bits &= qos_user->data_size.bits;
926
927 if (qos_user->link_disc_time.bits)
928 self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
929 }
930
931 /* Use 500ms in IrLAP for now */
932 self->qos_rx.max_turn_time.bits &= 0x01;
933
934 /* Set data size */
935 /*self->qos_rx.data_size.bits &= 0x03;*/
936
937 irda_qos_bits_to_value(&self->qos_rx);
938}
939
940/*
941 * Function irlap_apply_default_connection_parameters (void, now)
942 *
943 * Use the default connection and transmission parameters
944 */
945void irlap_apply_default_connection_parameters(struct irlap_cb *self)
946{
947 IRDA_DEBUG(4, "%s()\n", __func__);
948
949 IRDA_ASSERT(self != NULL, return;);
950 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
951
952 /* xbofs : Default value in NDM */
953 self->next_bofs = 12;
954 self->bofs_count = 12;
955
956 /* NDM Speed is 9600 */
957 irlap_change_speed(self, 9600, TRUE);
958
959 /* Set mbusy when going to NDM state */
960 irda_device_set_media_busy(self->netdev, TRUE);
961
962 /*
963 * Generate random connection address for this session, which must
964 * be 7 bits wide and different from 0x00 and 0xfe
965 */
966 while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
967 get_random_bytes(&self->caddr, sizeof(self->caddr));
968 self->caddr &= 0xfe;
969 }
970
971 /* Use default values until connection has been negitiated */
972 self->slot_timeout = sysctl_slot_timeout;
973 self->final_timeout = FINAL_TIMEOUT;
974 self->poll_timeout = POLL_TIMEOUT;
975 self->wd_timeout = WD_TIMEOUT;
976
977 /* Set some default values */
978 self->qos_tx.baud_rate.value = 9600;
979 self->qos_rx.baud_rate.value = 9600;
980 self->qos_tx.max_turn_time.value = 0;
981 self->qos_rx.max_turn_time.value = 0;
982 self->qos_tx.min_turn_time.value = 0;
983 self->qos_rx.min_turn_time.value = 0;
984 self->qos_tx.data_size.value = 64;
985 self->qos_rx.data_size.value = 64;
986 self->qos_tx.window_size.value = 1;
987 self->qos_rx.window_size.value = 1;
988 self->qos_tx.additional_bofs.value = 12;
989 self->qos_rx.additional_bofs.value = 12;
990 self->qos_tx.link_disc_time.value = 0;
991 self->qos_rx.link_disc_time.value = 0;
992
993 irlap_flush_all_queues(self);
994
995 self->disconnect_pending = FALSE;
996 self->connect_pending = FALSE;
997}
998
999/*
1000 * Function irlap_apply_connection_parameters (qos, now)
1001 *
1002 * Initialize IrLAP with the negotiated QoS values
1003 *
1004 * If 'now' is false, the speed and xbofs will be changed after the next
1005 * frame is sent.
1006 * If 'now' is true, the speed and xbofs is changed immediately
1007 */
1008void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1009{
1010 IRDA_DEBUG(4, "%s()\n", __func__);
1011
1012 IRDA_ASSERT(self != NULL, return;);
1013 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1014
1015 /* Set the negotiated xbofs value */
1016 self->next_bofs = self->qos_tx.additional_bofs.value;
1017 if (now)
1018 self->bofs_count = self->next_bofs;
1019
1020 /* Set the negotiated link speed (may need the new xbofs value) */
1021 irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1022
1023 self->window_size = self->qos_tx.window_size.value;
1024 self->window = self->qos_tx.window_size.value;
1025
1026#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1027 /*
1028 * Calculate how many bytes it is possible to transmit before the
1029 * link must be turned around
1030 */
1031 self->line_capacity =
1032 irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1033 self->qos_tx.max_turn_time.value);
1034 self->bytes_left = self->line_capacity;
1035#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1036
1037
1038 /*
1039 * Initialize timeout values, some of the rules are listed on
1040 * page 92 in IrLAP.
1041 */
1042 IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1043 IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1044 /* The poll timeout applies only to the primary station.
1045 * It defines the maximum time the primary stay in XMIT mode
1046 * before timeout and turning the link around (sending a RR).
1047 * Or, this is how much we can keep the pf bit in primary mode.
1048 * Therefore, it must be lower or equal than our *OWN* max turn around.
1049 * Jean II */
1050 self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1051 /* The Final timeout applies only to the primary station.
1052 * It defines the maximum time the primary wait (mostly in RECV mode)
1053 * for an answer from the secondary station before polling it again.
1054 * Therefore, it must be greater or equal than our *PARTNER*
1055 * max turn around time - Jean II */
1056 self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1057 /* The Watchdog Bit timeout applies only to the secondary station.
1058 * It defines the maximum time the secondary wait (mostly in RECV mode)
1059 * for poll from the primary station before getting annoyed.
1060 * Therefore, it must be greater or equal than our *PARTNER*
1061 * max turn around time - Jean II */
1062 self->wd_timeout = self->final_timeout * 2;
1063
1064 /*
1065 * N1 and N2 are maximum retry count for *both* the final timer
1066 * and the wd timer (with a factor 2) as defined above.
1067 * After N1 retry of a timer, we give a warning to the user.
1068 * After N2 retry, we consider the link dead and disconnect it.
1069 * Jean II
1070 */
1071
1072 /*
1073 * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1074 * 3 seconds otherwise. See page 71 in IrLAP for more details.
1075 * Actually, it's not always 3 seconds, as we allow to set
1076 * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1077 * of 2, so 1 second is minimum we can allow. - Jean II
1078 */
1079 if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1080 /*
1081 * If we set N1 to 0, it will trigger immediately, which is
1082 * not what we want. What we really want is to disable it,
1083 * Jean II
1084 */
1085 self->N1 = -2; /* Disable - Need to be multiple of 2*/
1086 else
1087 self->N1 = sysctl_warn_noreply_time * 1000 /
1088 self->qos_rx.max_turn_time.value;
1089
1090 IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1091
1092 /* Set N2 to match our own disconnect time */
1093 self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1094 self->qos_rx.max_turn_time.value;
1095 IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1096}
1097
1098#ifdef CONFIG_PROC_FS
1099struct irlap_iter_state {
1100 int id;
1101};
1102
1103static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1104{
1105 struct irlap_iter_state *iter = seq->private;
1106 struct irlap_cb *self;
1107
1108 /* Protect our access to the tsap list */
1109 spin_lock_irq(&irlap->hb_spinlock);
1110 iter->id = 0;
1111
1112 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1113 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1114 if (iter->id == *pos)
1115 break;
1116 ++iter->id;
1117 }
1118
1119 return self;
1120}
1121
1122static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1123{
1124 struct irlap_iter_state *iter = seq->private;
1125
1126 ++*pos;
1127 ++iter->id;
1128 return (void *) hashbin_get_next(irlap);
1129}
1130
1131static void irlap_seq_stop(struct seq_file *seq, void *v)
1132{
1133 spin_unlock_irq(&irlap->hb_spinlock);
1134}
1135
1136static int irlap_seq_show(struct seq_file *seq, void *v)
1137{
1138 const struct irlap_iter_state *iter = seq->private;
1139 const struct irlap_cb *self = v;
1140
1141 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1142
1143 seq_printf(seq, "irlap%d ", iter->id);
1144 seq_printf(seq, "state: %s\n",
1145 irlap_state[self->state]);
1146
1147 seq_printf(seq, " device name: %s, ",
1148 (self->netdev) ? self->netdev->name : "bug");
1149 seq_printf(seq, "hardware name: %s\n", self->hw_name);
1150
1151 seq_printf(seq, " caddr: %#02x, ", self->caddr);
1152 seq_printf(seq, "saddr: %#08x, ", self->saddr);
1153 seq_printf(seq, "daddr: %#08x\n", self->daddr);
1154
1155 seq_printf(seq, " win size: %d, ",
1156 self->window_size);
1157 seq_printf(seq, "win: %d, ", self->window);
1158#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1159 seq_printf(seq, "line capacity: %d, ",
1160 self->line_capacity);
1161 seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1162#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1163 seq_printf(seq, " tx queue len: %d ",
1164 skb_queue_len(&self->txq));
1165 seq_printf(seq, "win queue len: %d ",
1166 skb_queue_len(&self->wx_list));
1167 seq_printf(seq, "rbusy: %s", self->remote_busy ?
1168 "TRUE" : "FALSE");
1169 seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1170 "TRUE" : "FALSE");
1171
1172 seq_printf(seq, " retrans: %d ", self->retry_count);
1173 seq_printf(seq, "vs: %d ", self->vs);
1174 seq_printf(seq, "vr: %d ", self->vr);
1175 seq_printf(seq, "va: %d\n", self->va);
1176
1177 seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1178
1179 seq_printf(seq, " tx\t%d\t",
1180 self->qos_tx.baud_rate.value);
1181 seq_printf(seq, "%d\t",
1182 self->qos_tx.max_turn_time.value);
1183 seq_printf(seq, "%d\t",
1184 self->qos_tx.data_size.value);
1185 seq_printf(seq, "%d\t",
1186 self->qos_tx.window_size.value);
1187 seq_printf(seq, "%d\t",
1188 self->qos_tx.additional_bofs.value);
1189 seq_printf(seq, "%d\t",
1190 self->qos_tx.min_turn_time.value);
1191 seq_printf(seq, "%d\t",
1192 self->qos_tx.link_disc_time.value);
1193 seq_printf(seq, "\n");
1194
1195 seq_printf(seq, " rx\t%d\t",
1196 self->qos_rx.baud_rate.value);
1197 seq_printf(seq, "%d\t",
1198 self->qos_rx.max_turn_time.value);
1199 seq_printf(seq, "%d\t",
1200 self->qos_rx.data_size.value);
1201 seq_printf(seq, "%d\t",
1202 self->qos_rx.window_size.value);
1203 seq_printf(seq, "%d\t",
1204 self->qos_rx.additional_bofs.value);
1205 seq_printf(seq, "%d\t",
1206 self->qos_rx.min_turn_time.value);
1207 seq_printf(seq, "%d\n",
1208 self->qos_rx.link_disc_time.value);
1209
1210 return 0;
1211}
1212
1213static const struct seq_operations irlap_seq_ops = {
1214 .start = irlap_seq_start,
1215 .next = irlap_seq_next,
1216 .stop = irlap_seq_stop,
1217 .show = irlap_seq_show,
1218};
1219
1220static int irlap_seq_open(struct inode *inode, struct file *file)
1221{
1222 if (irlap == NULL)
1223 return -EINVAL;
1224
1225 return seq_open_private(file, &irlap_seq_ops,
1226 sizeof(struct irlap_iter_state));
1227}
1228
1229const struct file_operations irlap_seq_fops = {
1230 .owner = THIS_MODULE,
1231 .open = irlap_seq_open,
1232 .read = seq_read,
1233 .llseek = seq_lseek,
1234 .release = seq_release_private,
1235};
1236
1237#endif /* CONFIG_PROC_FS */
1/*********************************************************************
2 *
3 * Filename: irlap.c
4 * Version: 1.0
5 * Description: IrLAP implementation for Linux
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Tue Dec 14 09:26:44 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <http://www.gnu.org/licenses/>.
27 *
28 ********************************************************************/
29
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/skbuff.h>
33#include <linux/delay.h>
34#include <linux/proc_fs.h>
35#include <linux/init.h>
36#include <linux/random.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39
40#include <net/irda/irda.h>
41#include <net/irda/irda_device.h>
42#include <net/irda/irqueue.h>
43#include <net/irda/irlmp.h>
44#include <net/irda/irlmp_frame.h>
45#include <net/irda/irlap_frame.h>
46#include <net/irda/irlap.h>
47#include <net/irda/timer.h>
48#include <net/irda/qos.h>
49
50static hashbin_t *irlap = NULL;
51int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
52
53/* This is the delay of missed pf period before generating an event
54 * to the application. The spec mandate 3 seconds, but in some cases
55 * it's way too long. - Jean II */
56int sysctl_warn_noreply_time = 3;
57
58extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
59static void __irlap_close(struct irlap_cb *self);
60static void irlap_init_qos_capabilities(struct irlap_cb *self,
61 struct qos_info *qos_user);
62
63#ifdef CONFIG_IRDA_DEBUG
64static const char *const lap_reasons[] = {
65 "ERROR, NOT USED",
66 "LAP_DISC_INDICATION",
67 "LAP_NO_RESPONSE",
68 "LAP_RESET_INDICATION",
69 "LAP_FOUND_NONE",
70 "LAP_MEDIA_BUSY",
71 "LAP_PRIMARY_CONFLICT",
72 "ERROR, NOT USED",
73};
74#endif /* CONFIG_IRDA_DEBUG */
75
76int __init irlap_init(void)
77{
78 /* Check if the compiler did its job properly.
79 * May happen on some ARM configuration, check with Russell King. */
80 IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
81 IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
82 IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
83 IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
84
85 /* Allocate master array */
86 irlap = hashbin_new(HB_LOCK);
87 if (irlap == NULL) {
88 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
89 __func__);
90 return -ENOMEM;
91 }
92
93 return 0;
94}
95
96void irlap_cleanup(void)
97{
98 IRDA_ASSERT(irlap != NULL, return;);
99
100 hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
101}
102
103/*
104 * Function irlap_open (driver)
105 *
106 * Initialize IrLAP layer
107 *
108 */
109struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
110 const char *hw_name)
111{
112 struct irlap_cb *self;
113
114 IRDA_DEBUG(4, "%s()\n", __func__);
115
116 /* Initialize the irlap structure. */
117 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
118 if (self == NULL)
119 return NULL;
120
121 self->magic = LAP_MAGIC;
122
123 /* Make a binding between the layers */
124 self->netdev = dev;
125 self->qos_dev = qos;
126 /* Copy hardware name */
127 if(hw_name != NULL) {
128 strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
129 } else {
130 self->hw_name[0] = '\0';
131 }
132
133 /* FIXME: should we get our own field? */
134 dev->atalk_ptr = self;
135
136 self->state = LAP_OFFLINE;
137
138 /* Initialize transmit queue */
139 skb_queue_head_init(&self->txq);
140 skb_queue_head_init(&self->txq_ultra);
141 skb_queue_head_init(&self->wx_list);
142
143 /* My unique IrLAP device address! */
144 /* We don't want the broadcast address, neither the NULL address
145 * (most often used to signify "invalid"), and we don't want an
146 * address already in use (otherwise connect won't be able
147 * to select the proper link). - Jean II */
148 do {
149 get_random_bytes(&self->saddr, sizeof(self->saddr));
150 } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
151 (hashbin_lock_find(irlap, self->saddr, NULL)) );
152 /* Copy to the driver */
153 memcpy(dev->dev_addr, &self->saddr, 4);
154
155 init_timer(&self->slot_timer);
156 init_timer(&self->query_timer);
157 init_timer(&self->discovery_timer);
158 init_timer(&self->final_timer);
159 init_timer(&self->poll_timer);
160 init_timer(&self->wd_timer);
161 init_timer(&self->backoff_timer);
162 init_timer(&self->media_busy_timer);
163
164 irlap_apply_default_connection_parameters(self);
165
166 self->N3 = 3; /* # connections attempts to try before giving up */
167
168 self->state = LAP_NDM;
169
170 hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
171
172 irlmp_register_link(self, self->saddr, &self->notify);
173
174 return self;
175}
176EXPORT_SYMBOL(irlap_open);
177
178/*
179 * Function __irlap_close (self)
180 *
181 * Remove IrLAP and all allocated memory. Stop any pending timers.
182 *
183 */
184static void __irlap_close(struct irlap_cb *self)
185{
186 IRDA_ASSERT(self != NULL, return;);
187 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
188
189 /* Stop timers */
190 del_timer(&self->slot_timer);
191 del_timer(&self->query_timer);
192 del_timer(&self->discovery_timer);
193 del_timer(&self->final_timer);
194 del_timer(&self->poll_timer);
195 del_timer(&self->wd_timer);
196 del_timer(&self->backoff_timer);
197 del_timer(&self->media_busy_timer);
198
199 irlap_flush_all_queues(self);
200
201 self->magic = 0;
202
203 kfree(self);
204}
205
206/*
207 * Function irlap_close (self)
208 *
209 * Remove IrLAP instance
210 *
211 */
212void irlap_close(struct irlap_cb *self)
213{
214 struct irlap_cb *lap;
215
216 IRDA_DEBUG(4, "%s()\n", __func__);
217
218 IRDA_ASSERT(self != NULL, return;);
219 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
220
221 /* We used to send a LAP_DISC_INDICATION here, but this was
222 * racy. This has been move within irlmp_unregister_link()
223 * itself. Jean II */
224
225 /* Kill the LAP and all LSAPs on top of it */
226 irlmp_unregister_link(self->saddr);
227 self->notify.instance = NULL;
228
229 /* Be sure that we manage to remove ourself from the hash */
230 lap = hashbin_remove(irlap, self->saddr, NULL);
231 if (!lap) {
232 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
233 return;
234 }
235 __irlap_close(lap);
236}
237EXPORT_SYMBOL(irlap_close);
238
239/*
240 * Function irlap_connect_indication (self, skb)
241 *
242 * Another device is attempting to make a connection
243 *
244 */
245void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
246{
247 IRDA_DEBUG(4, "%s()\n", __func__);
248
249 IRDA_ASSERT(self != NULL, return;);
250 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
251
252 irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
253
254 irlmp_link_connect_indication(self->notify.instance, self->saddr,
255 self->daddr, &self->qos_tx, skb);
256}
257
258/*
259 * Function irlap_connect_response (self, skb)
260 *
261 * Service user has accepted incoming connection
262 *
263 */
264void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
265{
266 IRDA_DEBUG(4, "%s()\n", __func__);
267
268 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
269}
270
271/*
272 * Function irlap_connect_request (self, daddr, qos_user, sniff)
273 *
274 * Request connection with another device, sniffing is not implemented
275 * yet.
276 *
277 */
278void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
279 struct qos_info *qos_user, int sniff)
280{
281 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
282
283 IRDA_ASSERT(self != NULL, return;);
284 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
285
286 self->daddr = daddr;
287
288 /*
289 * If the service user specifies QoS values for this connection,
290 * then use them
291 */
292 irlap_init_qos_capabilities(self, qos_user);
293
294 if ((self->state == LAP_NDM) && !self->media_busy)
295 irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
296 else
297 self->connect_pending = TRUE;
298}
299
300/*
301 * Function irlap_connect_confirm (self, skb)
302 *
303 * Connection request has been accepted
304 *
305 */
306void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
307{
308 IRDA_DEBUG(4, "%s()\n", __func__);
309
310 IRDA_ASSERT(self != NULL, return;);
311 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
312
313 irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
314}
315
316/*
317 * Function irlap_data_indication (self, skb)
318 *
319 * Received data frames from IR-port, so we just pass them up to
320 * IrLMP for further processing
321 *
322 */
323void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
324 int unreliable)
325{
326 /* Hide LAP header from IrLMP layer */
327 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
328
329 irlmp_link_data_indication(self->notify.instance, skb, unreliable);
330}
331
332
333/*
334 * Function irlap_data_request (self, skb)
335 *
336 * Queue data for transmission, must wait until XMIT state
337 *
338 */
339void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
340 int unreliable)
341{
342 IRDA_ASSERT(self != NULL, return;);
343 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
344
345 IRDA_DEBUG(3, "%s()\n", __func__);
346
347 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
348 return;);
349 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
350
351 /*
352 * Must set frame format now so that the rest of the code knows
353 * if its dealing with an I or an UI frame
354 */
355 if (unreliable)
356 skb->data[1] = UI_FRAME;
357 else
358 skb->data[1] = I_FRAME;
359
360 /* Don't forget to refcount it - see irlmp_connect_request(). */
361 skb_get(skb);
362
363 /* Add at the end of the queue (keep ordering) - Jean II */
364 skb_queue_tail(&self->txq, skb);
365
366 /*
367 * Send event if this frame only if we are in the right state
368 * FIXME: udata should be sent first! (skb_queue_head?)
369 */
370 if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
371 /* If we are not already processing the Tx queue, trigger
372 * transmission immediately - Jean II */
373 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
374 irlap_do_event(self, DATA_REQUEST, skb, NULL);
375 /* Otherwise, the packets will be sent normally at the
376 * next pf-poll - Jean II */
377 }
378}
379
380/*
381 * Function irlap_unitdata_request (self, skb)
382 *
383 * Send Ultra data. This is data that must be sent outside any connection
384 *
385 */
386#ifdef CONFIG_IRDA_ULTRA
387void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
388{
389 IRDA_ASSERT(self != NULL, return;);
390 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
391
392 IRDA_DEBUG(3, "%s()\n", __func__);
393
394 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
395 return;);
396 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
397
398 skb->data[0] = CBROADCAST;
399 skb->data[1] = UI_FRAME;
400
401 /* Don't need to refcount, see irlmp_connless_data_request() */
402
403 skb_queue_tail(&self->txq_ultra, skb);
404
405 irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
406}
407#endif /*CONFIG_IRDA_ULTRA */
408
409/*
410 * Function irlap_udata_indication (self, skb)
411 *
412 * Receive Ultra data. This is data that is received outside any connection
413 *
414 */
415#ifdef CONFIG_IRDA_ULTRA
416void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
417{
418 IRDA_DEBUG(1, "%s()\n", __func__);
419
420 IRDA_ASSERT(self != NULL, return;);
421 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
422 IRDA_ASSERT(skb != NULL, return;);
423
424 /* Hide LAP header from IrLMP layer */
425 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
426
427 irlmp_link_unitdata_indication(self->notify.instance, skb);
428}
429#endif /* CONFIG_IRDA_ULTRA */
430
431/*
432 * Function irlap_disconnect_request (void)
433 *
434 * Request to disconnect connection by service user
435 */
436void irlap_disconnect_request(struct irlap_cb *self)
437{
438 IRDA_DEBUG(3, "%s()\n", __func__);
439
440 IRDA_ASSERT(self != NULL, return;);
441 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
442
443 /* Don't disconnect until all data frames are successfully sent */
444 if (!skb_queue_empty(&self->txq)) {
445 self->disconnect_pending = TRUE;
446 return;
447 }
448
449 /* Check if we are in the right state for disconnecting */
450 switch (self->state) {
451 case LAP_XMIT_P: /* FALLTHROUGH */
452 case LAP_XMIT_S: /* FALLTHROUGH */
453 case LAP_CONN: /* FALLTHROUGH */
454 case LAP_RESET_WAIT: /* FALLTHROUGH */
455 case LAP_RESET_CHECK:
456 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
457 break;
458 default:
459 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
460 self->disconnect_pending = TRUE;
461 break;
462 }
463}
464
465/*
466 * Function irlap_disconnect_indication (void)
467 *
468 * Disconnect request from other device
469 *
470 */
471void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
472{
473 IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
474
475 IRDA_ASSERT(self != NULL, return;);
476 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
477
478 /* Flush queues */
479 irlap_flush_all_queues(self);
480
481 switch (reason) {
482 case LAP_RESET_INDICATION:
483 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
484 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
485 break;
486 case LAP_NO_RESPONSE: /* FALLTHROUGH */
487 case LAP_DISC_INDICATION: /* FALLTHROUGH */
488 case LAP_FOUND_NONE: /* FALLTHROUGH */
489 case LAP_MEDIA_BUSY:
490 irlmp_link_disconnect_indication(self->notify.instance, self,
491 reason, NULL);
492 break;
493 default:
494 IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
495 }
496}
497
498/*
499 * Function irlap_discovery_request (gen_addr_bit)
500 *
501 * Start one single discovery operation.
502 *
503 */
504void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
505{
506 struct irlap_info info;
507
508 IRDA_ASSERT(self != NULL, return;);
509 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
510 IRDA_ASSERT(discovery != NULL, return;);
511
512 IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
513
514 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
515 (discovery->nslots == 8) || (discovery->nslots == 16),
516 return;);
517
518 /* Discovery is only possible in NDM mode */
519 if (self->state != LAP_NDM) {
520 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
521 __func__);
522 irlap_discovery_confirm(self, NULL);
523 /* Note : in theory, if we are not in NDM, we could postpone
524 * the discovery like we do for connection request.
525 * In practice, it's not worth it. If the media was busy,
526 * it's likely next time around it won't be busy. If we are
527 * in REPLY state, we will get passive discovery info & event.
528 * Jean II */
529 return;
530 }
531
532 /* Check if last discovery request finished in time, or if
533 * it was aborted due to the media busy flag. */
534 if (self->discovery_log != NULL) {
535 hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
536 self->discovery_log = NULL;
537 }
538
539 /* All operations will occur at predictable time, no need to lock */
540 self->discovery_log = hashbin_new(HB_NOLOCK);
541
542 if (self->discovery_log == NULL) {
543 IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
544 __func__);
545 return;
546 }
547
548 info.S = discovery->nslots; /* Number of slots */
549 info.s = 0; /* Current slot */
550
551 self->discovery_cmd = discovery;
552 info.discovery = discovery;
553
554 /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
555 self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
556
557 irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
558}
559
560/*
561 * Function irlap_discovery_confirm (log)
562 *
563 * A device has been discovered in front of this station, we
564 * report directly to LMP.
565 */
566void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
567{
568 IRDA_ASSERT(self != NULL, return;);
569 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
570
571 IRDA_ASSERT(self->notify.instance != NULL, return;);
572
573 /*
574 * Check for successful discovery, since we are then allowed to clear
575 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
576 * us to make connection attempts much faster and easier (i.e. no
577 * collisions).
578 * Setting media busy to false will also generate an event allowing
579 * to process pending events in NDM state machine.
580 * Note : the spec doesn't define what's a successful discovery is.
581 * If we want Ultra to work, it's successful even if there is
582 * nobody discovered - Jean II
583 */
584 if (discovery_log)
585 irda_device_set_media_busy(self->netdev, FALSE);
586
587 /* Inform IrLMP */
588 irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
589}
590
591/*
592 * Function irlap_discovery_indication (log)
593 *
594 * Somebody is trying to discover us!
595 *
596 */
597void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
598{
599 IRDA_DEBUG(4, "%s()\n", __func__);
600
601 IRDA_ASSERT(self != NULL, return;);
602 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
603 IRDA_ASSERT(discovery != NULL, return;);
604
605 IRDA_ASSERT(self->notify.instance != NULL, return;);
606
607 /* A device is very likely to connect immediately after it performs
608 * a successful discovery. This means that in our case, we are much
609 * more likely to receive a connection request over the medium.
610 * So, we backoff to avoid collisions.
611 * IrLAP spec 6.13.4 suggest 100ms...
612 * Note : this little trick actually make a *BIG* difference. If I set
613 * my Linux box with discovery enabled and one Ultra frame sent every
614 * second, my Palm has no trouble connecting to it every time !
615 * Jean II */
616 irda_device_set_media_busy(self->netdev, SMALL);
617
618 irlmp_link_discovery_indication(self->notify.instance, discovery);
619}
620
621/*
622 * Function irlap_status_indication (quality_of_link)
623 */
624void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
625{
626 switch (quality_of_link) {
627 case STATUS_NO_ACTIVITY:
628 IRDA_MESSAGE("IrLAP, no activity on link!\n");
629 break;
630 case STATUS_NOISY:
631 IRDA_MESSAGE("IrLAP, noisy link!\n");
632 break;
633 default:
634 break;
635 }
636 irlmp_status_indication(self->notify.instance,
637 quality_of_link, LOCK_NO_CHANGE);
638}
639
640/*
641 * Function irlap_reset_indication (void)
642 */
643void irlap_reset_indication(struct irlap_cb *self)
644{
645 IRDA_DEBUG(1, "%s()\n", __func__);
646
647 IRDA_ASSERT(self != NULL, return;);
648 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
649
650 if (self->state == LAP_RESET_WAIT)
651 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
652 else
653 irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
654}
655
656/*
657 * Function irlap_reset_confirm (void)
658 */
659void irlap_reset_confirm(void)
660{
661 IRDA_DEBUG(1, "%s()\n", __func__);
662}
663
664/*
665 * Function irlap_generate_rand_time_slot (S, s)
666 *
667 * Generate a random time slot between s and S-1 where
668 * S = Number of slots (0 -> S-1)
669 * s = Current slot
670 */
671int irlap_generate_rand_time_slot(int S, int s)
672{
673 static int rand;
674 int slot;
675
676 IRDA_ASSERT((S - s) > 0, return 0;);
677
678 rand += jiffies;
679 rand ^= (rand << 12);
680 rand ^= (rand >> 20);
681
682 slot = s + rand % (S-s);
683
684 IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
685
686 return slot;
687}
688
689/*
690 * Function irlap_update_nr_received (nr)
691 *
692 * Remove all acknowledged frames in current window queue. This code is
693 * not intuitive and you should not try to change it. If you think it
694 * contains bugs, please mail a patch to the author instead.
695 */
696void irlap_update_nr_received(struct irlap_cb *self, int nr)
697{
698 struct sk_buff *skb = NULL;
699 int count = 0;
700
701 /*
702 * Remove all the ack-ed frames from the window queue.
703 */
704
705 /*
706 * Optimize for the common case. It is most likely that the receiver
707 * will acknowledge all the frames we have sent! So in that case we
708 * delete all frames stored in window.
709 */
710 if (nr == self->vs) {
711 while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
712 dev_kfree_skb(skb);
713 }
714 /* The last acked frame is the next to send minus one */
715 self->va = nr - 1;
716 } else {
717 /* Remove all acknowledged frames in current window */
718 while ((skb_peek(&self->wx_list) != NULL) &&
719 (((self->va+1) % 8) != nr))
720 {
721 skb = skb_dequeue(&self->wx_list);
722 dev_kfree_skb(skb);
723
724 self->va = (self->va + 1) % 8;
725 count++;
726 }
727 }
728
729 /* Advance window */
730 self->window = self->window_size - skb_queue_len(&self->wx_list);
731}
732
733/*
734 * Function irlap_validate_ns_received (ns)
735 *
736 * Validate the next to send (ns) field from received frame.
737 */
738int irlap_validate_ns_received(struct irlap_cb *self, int ns)
739{
740 /* ns as expected? */
741 if (ns == self->vr)
742 return NS_EXPECTED;
743 /*
744 * Stations are allowed to treat invalid NS as unexpected NS
745 * IrLAP, Recv ... with-invalid-Ns. p. 84
746 */
747 return NS_UNEXPECTED;
748
749 /* return NR_INVALID; */
750}
751/*
752 * Function irlap_validate_nr_received (nr)
753 *
754 * Validate the next to receive (nr) field from received frame.
755 *
756 */
757int irlap_validate_nr_received(struct irlap_cb *self, int nr)
758{
759 /* nr as expected? */
760 if (nr == self->vs) {
761 IRDA_DEBUG(4, "%s(), expected!\n", __func__);
762 return NR_EXPECTED;
763 }
764
765 /*
766 * unexpected nr? (but within current window), first we check if the
767 * ns numbers of the frames in the current window wrap.
768 */
769 if (self->va < self->vs) {
770 if ((nr >= self->va) && (nr <= self->vs))
771 return NR_UNEXPECTED;
772 } else {
773 if ((nr >= self->va) || (nr <= self->vs))
774 return NR_UNEXPECTED;
775 }
776
777 /* Invalid nr! */
778 return NR_INVALID;
779}
780
781/*
782 * Function irlap_initiate_connection_state ()
783 *
784 * Initialize the connection state parameters
785 *
786 */
787void irlap_initiate_connection_state(struct irlap_cb *self)
788{
789 IRDA_DEBUG(4, "%s()\n", __func__);
790
791 IRDA_ASSERT(self != NULL, return;);
792 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
793
794 /* Next to send and next to receive */
795 self->vs = self->vr = 0;
796
797 /* Last frame which got acked (0 - 1) % 8 */
798 self->va = 7;
799
800 self->window = 1;
801
802 self->remote_busy = FALSE;
803 self->retry_count = 0;
804}
805
806/*
807 * Function irlap_wait_min_turn_around (self, qos)
808 *
809 * Wait negotiated minimum turn around time, this function actually sets
810 * the number of BOS's that must be sent before the next transmitted
811 * frame in order to delay for the specified amount of time. This is
812 * done to avoid using timers, and the forbidden udelay!
813 */
814void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
815{
816 __u32 min_turn_time;
817 __u32 speed;
818
819 /* Get QoS values. */
820 speed = qos->baud_rate.value;
821 min_turn_time = qos->min_turn_time.value;
822
823 /* No need to calculate XBOFs for speeds over 115200 bps */
824 if (speed > 115200) {
825 self->mtt_required = min_turn_time;
826 return;
827 }
828
829 /*
830 * Send additional BOF's for the next frame for the requested
831 * min turn time, so now we must calculate how many chars (XBOF's) we
832 * must send for the requested time period (min turn time)
833 */
834 self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
835}
836
837/*
838 * Function irlap_flush_all_queues (void)
839 *
840 * Flush all queues
841 *
842 */
843void irlap_flush_all_queues(struct irlap_cb *self)
844{
845 struct sk_buff* skb;
846
847 IRDA_ASSERT(self != NULL, return;);
848 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
849
850 /* Free transmission queue */
851 while ((skb = skb_dequeue(&self->txq)) != NULL)
852 dev_kfree_skb(skb);
853
854 while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
855 dev_kfree_skb(skb);
856
857 /* Free sliding window buffered packets */
858 while ((skb = skb_dequeue(&self->wx_list)) != NULL)
859 dev_kfree_skb(skb);
860}
861
862/*
863 * Function irlap_setspeed (self, speed)
864 *
865 * Change the speed of the IrDA port
866 *
867 */
868static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
869{
870 struct sk_buff *skb;
871
872 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
873
874 IRDA_ASSERT(self != NULL, return;);
875 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
876
877 self->speed = speed;
878
879 /* Change speed now, or just piggyback speed on frames */
880 if (now) {
881 /* Send down empty frame to trigger speed change */
882 skb = alloc_skb(0, GFP_ATOMIC);
883 if (skb)
884 irlap_queue_xmit(self, skb);
885 }
886}
887
888/*
889 * Function irlap_init_qos_capabilities (self, qos)
890 *
891 * Initialize QoS for this IrLAP session, What we do is to compute the
892 * intersection of the QoS capabilities for the user, driver and for
893 * IrLAP itself. Normally, IrLAP will not specify any values, but it can
894 * be used to restrict certain values.
895 */
896static void irlap_init_qos_capabilities(struct irlap_cb *self,
897 struct qos_info *qos_user)
898{
899 IRDA_ASSERT(self != NULL, return;);
900 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
901 IRDA_ASSERT(self->netdev != NULL, return;);
902
903 /* Start out with the maximum QoS support possible */
904 irda_init_max_qos_capabilies(&self->qos_rx);
905
906 /* Apply drivers QoS capabilities */
907 irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
908
909 /*
910 * Check for user supplied QoS parameters. The service user is only
911 * allowed to supply these values. We check each parameter since the
912 * user may not have set all of them.
913 */
914 if (qos_user) {
915 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
916
917 if (qos_user->baud_rate.bits)
918 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
919
920 if (qos_user->max_turn_time.bits)
921 self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
922 if (qos_user->data_size.bits)
923 self->qos_rx.data_size.bits &= qos_user->data_size.bits;
924
925 if (qos_user->link_disc_time.bits)
926 self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
927 }
928
929 /* Use 500ms in IrLAP for now */
930 self->qos_rx.max_turn_time.bits &= 0x01;
931
932 /* Set data size */
933 /*self->qos_rx.data_size.bits &= 0x03;*/
934
935 irda_qos_bits_to_value(&self->qos_rx);
936}
937
938/*
939 * Function irlap_apply_default_connection_parameters (void, now)
940 *
941 * Use the default connection and transmission parameters
942 */
943void irlap_apply_default_connection_parameters(struct irlap_cb *self)
944{
945 IRDA_DEBUG(4, "%s()\n", __func__);
946
947 IRDA_ASSERT(self != NULL, return;);
948 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
949
950 /* xbofs : Default value in NDM */
951 self->next_bofs = 12;
952 self->bofs_count = 12;
953
954 /* NDM Speed is 9600 */
955 irlap_change_speed(self, 9600, TRUE);
956
957 /* Set mbusy when going to NDM state */
958 irda_device_set_media_busy(self->netdev, TRUE);
959
960 /*
961 * Generate random connection address for this session, which must
962 * be 7 bits wide and different from 0x00 and 0xfe
963 */
964 while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
965 get_random_bytes(&self->caddr, sizeof(self->caddr));
966 self->caddr &= 0xfe;
967 }
968
969 /* Use default values until connection has been negitiated */
970 self->slot_timeout = sysctl_slot_timeout;
971 self->final_timeout = FINAL_TIMEOUT;
972 self->poll_timeout = POLL_TIMEOUT;
973 self->wd_timeout = WD_TIMEOUT;
974
975 /* Set some default values */
976 self->qos_tx.baud_rate.value = 9600;
977 self->qos_rx.baud_rate.value = 9600;
978 self->qos_tx.max_turn_time.value = 0;
979 self->qos_rx.max_turn_time.value = 0;
980 self->qos_tx.min_turn_time.value = 0;
981 self->qos_rx.min_turn_time.value = 0;
982 self->qos_tx.data_size.value = 64;
983 self->qos_rx.data_size.value = 64;
984 self->qos_tx.window_size.value = 1;
985 self->qos_rx.window_size.value = 1;
986 self->qos_tx.additional_bofs.value = 12;
987 self->qos_rx.additional_bofs.value = 12;
988 self->qos_tx.link_disc_time.value = 0;
989 self->qos_rx.link_disc_time.value = 0;
990
991 irlap_flush_all_queues(self);
992
993 self->disconnect_pending = FALSE;
994 self->connect_pending = FALSE;
995}
996
997/*
998 * Function irlap_apply_connection_parameters (qos, now)
999 *
1000 * Initialize IrLAP with the negotiated QoS values
1001 *
1002 * If 'now' is false, the speed and xbofs will be changed after the next
1003 * frame is sent.
1004 * If 'now' is true, the speed and xbofs is changed immediately
1005 */
1006void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1007{
1008 IRDA_DEBUG(4, "%s()\n", __func__);
1009
1010 IRDA_ASSERT(self != NULL, return;);
1011 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1012
1013 /* Set the negotiated xbofs value */
1014 self->next_bofs = self->qos_tx.additional_bofs.value;
1015 if (now)
1016 self->bofs_count = self->next_bofs;
1017
1018 /* Set the negotiated link speed (may need the new xbofs value) */
1019 irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1020
1021 self->window_size = self->qos_tx.window_size.value;
1022 self->window = self->qos_tx.window_size.value;
1023
1024#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1025 /*
1026 * Calculate how many bytes it is possible to transmit before the
1027 * link must be turned around
1028 */
1029 self->line_capacity =
1030 irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1031 self->qos_tx.max_turn_time.value);
1032 self->bytes_left = self->line_capacity;
1033#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1034
1035
1036 /*
1037 * Initialize timeout values, some of the rules are listed on
1038 * page 92 in IrLAP.
1039 */
1040 IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1041 IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1042 /* The poll timeout applies only to the primary station.
1043 * It defines the maximum time the primary stay in XMIT mode
1044 * before timeout and turning the link around (sending a RR).
1045 * Or, this is how much we can keep the pf bit in primary mode.
1046 * Therefore, it must be lower or equal than our *OWN* max turn around.
1047 * Jean II */
1048 self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1049 /* The Final timeout applies only to the primary station.
1050 * It defines the maximum time the primary wait (mostly in RECV mode)
1051 * for an answer from the secondary station before polling it again.
1052 * Therefore, it must be greater or equal than our *PARTNER*
1053 * max turn around time - Jean II */
1054 self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1055 /* The Watchdog Bit timeout applies only to the secondary station.
1056 * It defines the maximum time the secondary wait (mostly in RECV mode)
1057 * for poll from the primary station before getting annoyed.
1058 * Therefore, it must be greater or equal than our *PARTNER*
1059 * max turn around time - Jean II */
1060 self->wd_timeout = self->final_timeout * 2;
1061
1062 /*
1063 * N1 and N2 are maximum retry count for *both* the final timer
1064 * and the wd timer (with a factor 2) as defined above.
1065 * After N1 retry of a timer, we give a warning to the user.
1066 * After N2 retry, we consider the link dead and disconnect it.
1067 * Jean II
1068 */
1069
1070 /*
1071 * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1072 * 3 seconds otherwise. See page 71 in IrLAP for more details.
1073 * Actually, it's not always 3 seconds, as we allow to set
1074 * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1075 * of 2, so 1 second is minimum we can allow. - Jean II
1076 */
1077 if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1078 /*
1079 * If we set N1 to 0, it will trigger immediately, which is
1080 * not what we want. What we really want is to disable it,
1081 * Jean II
1082 */
1083 self->N1 = -2; /* Disable - Need to be multiple of 2*/
1084 else
1085 self->N1 = sysctl_warn_noreply_time * 1000 /
1086 self->qos_rx.max_turn_time.value;
1087
1088 IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1089
1090 /* Set N2 to match our own disconnect time */
1091 self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1092 self->qos_rx.max_turn_time.value;
1093 IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1094}
1095
1096#ifdef CONFIG_PROC_FS
1097struct irlap_iter_state {
1098 int id;
1099};
1100
1101static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1102{
1103 struct irlap_iter_state *iter = seq->private;
1104 struct irlap_cb *self;
1105
1106 /* Protect our access to the tsap list */
1107 spin_lock_irq(&irlap->hb_spinlock);
1108 iter->id = 0;
1109
1110 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1111 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1112 if (iter->id == *pos)
1113 break;
1114 ++iter->id;
1115 }
1116
1117 return self;
1118}
1119
1120static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1121{
1122 struct irlap_iter_state *iter = seq->private;
1123
1124 ++*pos;
1125 ++iter->id;
1126 return (void *) hashbin_get_next(irlap);
1127}
1128
1129static void irlap_seq_stop(struct seq_file *seq, void *v)
1130{
1131 spin_unlock_irq(&irlap->hb_spinlock);
1132}
1133
1134static int irlap_seq_show(struct seq_file *seq, void *v)
1135{
1136 const struct irlap_iter_state *iter = seq->private;
1137 const struct irlap_cb *self = v;
1138
1139 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1140
1141 seq_printf(seq, "irlap%d ", iter->id);
1142 seq_printf(seq, "state: %s\n",
1143 irlap_state[self->state]);
1144
1145 seq_printf(seq, " device name: %s, ",
1146 (self->netdev) ? self->netdev->name : "bug");
1147 seq_printf(seq, "hardware name: %s\n", self->hw_name);
1148
1149 seq_printf(seq, " caddr: %#02x, ", self->caddr);
1150 seq_printf(seq, "saddr: %#08x, ", self->saddr);
1151 seq_printf(seq, "daddr: %#08x\n", self->daddr);
1152
1153 seq_printf(seq, " win size: %d, ",
1154 self->window_size);
1155 seq_printf(seq, "win: %d, ", self->window);
1156#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1157 seq_printf(seq, "line capacity: %d, ",
1158 self->line_capacity);
1159 seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1160#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1161 seq_printf(seq, " tx queue len: %d ",
1162 skb_queue_len(&self->txq));
1163 seq_printf(seq, "win queue len: %d ",
1164 skb_queue_len(&self->wx_list));
1165 seq_printf(seq, "rbusy: %s", self->remote_busy ?
1166 "TRUE" : "FALSE");
1167 seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1168 "TRUE" : "FALSE");
1169
1170 seq_printf(seq, " retrans: %d ", self->retry_count);
1171 seq_printf(seq, "vs: %d ", self->vs);
1172 seq_printf(seq, "vr: %d ", self->vr);
1173 seq_printf(seq, "va: %d\n", self->va);
1174
1175 seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1176
1177 seq_printf(seq, " tx\t%d\t",
1178 self->qos_tx.baud_rate.value);
1179 seq_printf(seq, "%d\t",
1180 self->qos_tx.max_turn_time.value);
1181 seq_printf(seq, "%d\t",
1182 self->qos_tx.data_size.value);
1183 seq_printf(seq, "%d\t",
1184 self->qos_tx.window_size.value);
1185 seq_printf(seq, "%d\t",
1186 self->qos_tx.additional_bofs.value);
1187 seq_printf(seq, "%d\t",
1188 self->qos_tx.min_turn_time.value);
1189 seq_printf(seq, "%d\t",
1190 self->qos_tx.link_disc_time.value);
1191 seq_printf(seq, "\n");
1192
1193 seq_printf(seq, " rx\t%d\t",
1194 self->qos_rx.baud_rate.value);
1195 seq_printf(seq, "%d\t",
1196 self->qos_rx.max_turn_time.value);
1197 seq_printf(seq, "%d\t",
1198 self->qos_rx.data_size.value);
1199 seq_printf(seq, "%d\t",
1200 self->qos_rx.window_size.value);
1201 seq_printf(seq, "%d\t",
1202 self->qos_rx.additional_bofs.value);
1203 seq_printf(seq, "%d\t",
1204 self->qos_rx.min_turn_time.value);
1205 seq_printf(seq, "%d\n",
1206 self->qos_rx.link_disc_time.value);
1207
1208 return 0;
1209}
1210
1211static const struct seq_operations irlap_seq_ops = {
1212 .start = irlap_seq_start,
1213 .next = irlap_seq_next,
1214 .stop = irlap_seq_stop,
1215 .show = irlap_seq_show,
1216};
1217
1218static int irlap_seq_open(struct inode *inode, struct file *file)
1219{
1220 if (irlap == NULL)
1221 return -EINVAL;
1222
1223 return seq_open_private(file, &irlap_seq_ops,
1224 sizeof(struct irlap_iter_state));
1225}
1226
1227const struct file_operations irlap_seq_fops = {
1228 .owner = THIS_MODULE,
1229 .open = irlap_seq_open,
1230 .read = seq_read,
1231 .llseek = seq_lseek,
1232 .release = seq_release_private,
1233};
1234
1235#endif /* CONFIG_PROC_FS */