Loading...
1/*
2 * Generic HDLC support routines for Linux
3 * Frame Relay support
4 *
5 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11
12 Theory of PVC state
13
14 DCE mode:
15
16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
17 0,x -> 1,1 if "link reliable" when sending FULL STATUS
18 1,1 -> 1,0 if received FULL STATUS ACK
19
20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
21 -> 1 when "PVC up" and (exist,new) = 1,0
22
23 DTE mode:
24 (exist,new,active) = FULL STATUS if "link reliable"
25 = 0, 0, 0 if "link unreliable"
26 No LMI:
27 active = open and "link reliable"
28 exist = new = not used
29
30 CCITT LMI: ITU-T Q.933 Annex A
31 ANSI LMI: ANSI T1.617 Annex D
32 CISCO LMI: the original, aka "Gang of Four" LMI
33
34*/
35
36#include <linux/errno.h>
37#include <linux/etherdevice.h>
38#include <linux/hdlc.h>
39#include <linux/if_arp.h>
40#include <linux/inetdevice.h>
41#include <linux/init.h>
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/pkt_sched.h>
45#include <linux/poll.h>
46#include <linux/rtnetlink.h>
47#include <linux/skbuff.h>
48#include <linux/slab.h>
49
50#undef DEBUG_PKT
51#undef DEBUG_ECN
52#undef DEBUG_LINK
53#undef DEBUG_PROTO
54#undef DEBUG_PVC
55
56#define FR_UI 0x03
57#define FR_PAD 0x00
58
59#define NLPID_IP 0xCC
60#define NLPID_IPV6 0x8E
61#define NLPID_SNAP 0x80
62#define NLPID_PAD 0x00
63#define NLPID_CCITT_ANSI_LMI 0x08
64#define NLPID_CISCO_LMI 0x09
65
66
67#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
68#define LMI_CISCO_DLCI 1023
69
70#define LMI_CALLREF 0x00 /* Call Reference */
71#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
72#define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
73#define LMI_CCITT_REPTYPE 0x51
74#define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
75#define LMI_CCITT_ALIVE 0x53
76#define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
77#define LMI_CCITT_PVCSTAT 0x57
78
79#define LMI_FULLREP 0x00 /* full report */
80#define LMI_INTEGRITY 0x01 /* link integrity report */
81#define LMI_SINGLE 0x02 /* single PVC report */
82
83#define LMI_STATUS_ENQUIRY 0x75
84#define LMI_STATUS 0x7D /* reply */
85
86#define LMI_REPT_LEN 1 /* report type element length */
87#define LMI_INTEG_LEN 2 /* link integrity element length */
88
89#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
90#define LMI_ANSI_LENGTH 14
91
92
93typedef struct {
94#if defined(__LITTLE_ENDIAN_BITFIELD)
95 unsigned ea1: 1;
96 unsigned cr: 1;
97 unsigned dlcih: 6;
98
99 unsigned ea2: 1;
100 unsigned de: 1;
101 unsigned becn: 1;
102 unsigned fecn: 1;
103 unsigned dlcil: 4;
104#else
105 unsigned dlcih: 6;
106 unsigned cr: 1;
107 unsigned ea1: 1;
108
109 unsigned dlcil: 4;
110 unsigned fecn: 1;
111 unsigned becn: 1;
112 unsigned de: 1;
113 unsigned ea2: 1;
114#endif
115}__packed fr_hdr;
116
117
118typedef struct pvc_device_struct {
119 struct net_device *frad;
120 struct net_device *main;
121 struct net_device *ether; /* bridged Ethernet interface */
122 struct pvc_device_struct *next; /* Sorted in ascending DLCI order */
123 int dlci;
124 int open_count;
125
126 struct {
127 unsigned int new: 1;
128 unsigned int active: 1;
129 unsigned int exist: 1;
130 unsigned int deleted: 1;
131 unsigned int fecn: 1;
132 unsigned int becn: 1;
133 unsigned int bandwidth; /* Cisco LMI reporting only */
134 }state;
135}pvc_device;
136
137struct frad_state {
138 fr_proto settings;
139 pvc_device *first_pvc;
140 int dce_pvc_count;
141
142 struct timer_list timer;
143 unsigned long last_poll;
144 int reliable;
145 int dce_changed;
146 int request;
147 int fullrep_sent;
148 u32 last_errors; /* last errors bit list */
149 u8 n391cnt;
150 u8 txseq; /* TX sequence number */
151 u8 rxseq; /* RX sequence number */
152};
153
154
155static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
156
157
158static inline u16 q922_to_dlci(u8 *hdr)
159{
160 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
161}
162
163
164static inline void dlci_to_q922(u8 *hdr, u16 dlci)
165{
166 hdr[0] = (dlci >> 2) & 0xFC;
167 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
168}
169
170
171static inline struct frad_state* state(hdlc_device *hdlc)
172{
173 return(struct frad_state *)(hdlc->state);
174}
175
176
177static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
178{
179 pvc_device *pvc = state(hdlc)->first_pvc;
180
181 while (pvc) {
182 if (pvc->dlci == dlci)
183 return pvc;
184 if (pvc->dlci > dlci)
185 return NULL; /* the list is sorted */
186 pvc = pvc->next;
187 }
188
189 return NULL;
190}
191
192
193static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
194{
195 hdlc_device *hdlc = dev_to_hdlc(dev);
196 pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
197
198 while (*pvc_p) {
199 if ((*pvc_p)->dlci == dlci)
200 return *pvc_p;
201 if ((*pvc_p)->dlci > dlci)
202 break; /* the list is sorted */
203 pvc_p = &(*pvc_p)->next;
204 }
205
206 pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC);
207#ifdef DEBUG_PVC
208 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
209#endif
210 if (!pvc)
211 return NULL;
212
213 pvc->dlci = dlci;
214 pvc->frad = dev;
215 pvc->next = *pvc_p; /* Put it in the chain */
216 *pvc_p = pvc;
217 return pvc;
218}
219
220
221static inline int pvc_is_used(pvc_device *pvc)
222{
223 return pvc->main || pvc->ether;
224}
225
226
227static inline void pvc_carrier(int on, pvc_device *pvc)
228{
229 if (on) {
230 if (pvc->main)
231 if (!netif_carrier_ok(pvc->main))
232 netif_carrier_on(pvc->main);
233 if (pvc->ether)
234 if (!netif_carrier_ok(pvc->ether))
235 netif_carrier_on(pvc->ether);
236 } else {
237 if (pvc->main)
238 if (netif_carrier_ok(pvc->main))
239 netif_carrier_off(pvc->main);
240 if (pvc->ether)
241 if (netif_carrier_ok(pvc->ether))
242 netif_carrier_off(pvc->ether);
243 }
244}
245
246
247static inline void delete_unused_pvcs(hdlc_device *hdlc)
248{
249 pvc_device **pvc_p = &state(hdlc)->first_pvc;
250
251 while (*pvc_p) {
252 if (!pvc_is_used(*pvc_p)) {
253 pvc_device *pvc = *pvc_p;
254#ifdef DEBUG_PVC
255 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
256#endif
257 *pvc_p = pvc->next;
258 kfree(pvc);
259 continue;
260 }
261 pvc_p = &(*pvc_p)->next;
262 }
263}
264
265
266static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
267{
268 if (type == ARPHRD_ETHER)
269 return &pvc->ether;
270 else
271 return &pvc->main;
272}
273
274
275static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
276{
277 u16 head_len;
278 struct sk_buff *skb = *skb_p;
279
280 switch (skb->protocol) {
281 case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
282 head_len = 4;
283 skb_push(skb, head_len);
284 skb->data[3] = NLPID_CCITT_ANSI_LMI;
285 break;
286
287 case cpu_to_be16(NLPID_CISCO_LMI):
288 head_len = 4;
289 skb_push(skb, head_len);
290 skb->data[3] = NLPID_CISCO_LMI;
291 break;
292
293 case cpu_to_be16(ETH_P_IP):
294 head_len = 4;
295 skb_push(skb, head_len);
296 skb->data[3] = NLPID_IP;
297 break;
298
299 case cpu_to_be16(ETH_P_IPV6):
300 head_len = 4;
301 skb_push(skb, head_len);
302 skb->data[3] = NLPID_IPV6;
303 break;
304
305 case cpu_to_be16(ETH_P_802_3):
306 head_len = 10;
307 if (skb_headroom(skb) < head_len) {
308 struct sk_buff *skb2 = skb_realloc_headroom(skb,
309 head_len);
310 if (!skb2)
311 return -ENOBUFS;
312 dev_kfree_skb(skb);
313 skb = *skb_p = skb2;
314 }
315 skb_push(skb, head_len);
316 skb->data[3] = FR_PAD;
317 skb->data[4] = NLPID_SNAP;
318 skb->data[5] = FR_PAD;
319 skb->data[6] = 0x80;
320 skb->data[7] = 0xC2;
321 skb->data[8] = 0x00;
322 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
323 break;
324
325 default:
326 head_len = 10;
327 skb_push(skb, head_len);
328 skb->data[3] = FR_PAD;
329 skb->data[4] = NLPID_SNAP;
330 skb->data[5] = FR_PAD;
331 skb->data[6] = FR_PAD;
332 skb->data[7] = FR_PAD;
333 *(__be16*)(skb->data + 8) = skb->protocol;
334 }
335
336 dlci_to_q922(skb->data, dlci);
337 skb->data[2] = FR_UI;
338 return 0;
339}
340
341
342
343static int pvc_open(struct net_device *dev)
344{
345 pvc_device *pvc = dev->ml_priv;
346
347 if ((pvc->frad->flags & IFF_UP) == 0)
348 return -EIO; /* Frad must be UP in order to activate PVC */
349
350 if (pvc->open_count++ == 0) {
351 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
352 if (state(hdlc)->settings.lmi == LMI_NONE)
353 pvc->state.active = netif_carrier_ok(pvc->frad);
354
355 pvc_carrier(pvc->state.active, pvc);
356 state(hdlc)->dce_changed = 1;
357 }
358 return 0;
359}
360
361
362
363static int pvc_close(struct net_device *dev)
364{
365 pvc_device *pvc = dev->ml_priv;
366
367 if (--pvc->open_count == 0) {
368 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
369 if (state(hdlc)->settings.lmi == LMI_NONE)
370 pvc->state.active = 0;
371
372 if (state(hdlc)->settings.dce) {
373 state(hdlc)->dce_changed = 1;
374 pvc->state.active = 0;
375 }
376 }
377 return 0;
378}
379
380
381
382static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
383{
384 pvc_device *pvc = dev->ml_priv;
385 fr_proto_pvc_info info;
386
387 if (ifr->ifr_settings.type == IF_GET_PROTO) {
388 if (dev->type == ARPHRD_ETHER)
389 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
390 else
391 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
392
393 if (ifr->ifr_settings.size < sizeof(info)) {
394 /* data size wanted */
395 ifr->ifr_settings.size = sizeof(info);
396 return -ENOBUFS;
397 }
398
399 info.dlci = pvc->dlci;
400 memcpy(info.master, pvc->frad->name, IFNAMSIZ);
401 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
402 &info, sizeof(info)))
403 return -EFAULT;
404 return 0;
405 }
406
407 return -EINVAL;
408}
409
410static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
411{
412 pvc_device *pvc = dev->ml_priv;
413
414 if (pvc->state.active) {
415 if (dev->type == ARPHRD_ETHER) {
416 int pad = ETH_ZLEN - skb->len;
417 if (pad > 0) { /* Pad the frame with zeros */
418 int len = skb->len;
419 if (skb_tailroom(skb) < pad)
420 if (pskb_expand_head(skb, 0, pad,
421 GFP_ATOMIC)) {
422 dev->stats.tx_dropped++;
423 dev_kfree_skb(skb);
424 return NETDEV_TX_OK;
425 }
426 skb_put(skb, pad);
427 memset(skb->data + len, 0, pad);
428 }
429 skb->protocol = cpu_to_be16(ETH_P_802_3);
430 }
431 if (!fr_hard_header(&skb, pvc->dlci)) {
432 dev->stats.tx_bytes += skb->len;
433 dev->stats.tx_packets++;
434 if (pvc->state.fecn) /* TX Congestion counter */
435 dev->stats.tx_compressed++;
436 skb->dev = pvc->frad;
437 dev_queue_xmit(skb);
438 return NETDEV_TX_OK;
439 }
440 }
441
442 dev->stats.tx_dropped++;
443 dev_kfree_skb(skb);
444 return NETDEV_TX_OK;
445}
446
447static inline void fr_log_dlci_active(pvc_device *pvc)
448{
449 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
450 pvc->dlci,
451 pvc->main ? pvc->main->name : "",
452 pvc->main && pvc->ether ? " " : "",
453 pvc->ether ? pvc->ether->name : "",
454 pvc->state.new ? " new" : "",
455 !pvc->state.exist ? "deleted" :
456 pvc->state.active ? "active" : "inactive");
457}
458
459
460
461static inline u8 fr_lmi_nextseq(u8 x)
462{
463 x++;
464 return x ? x : 1;
465}
466
467
468static void fr_lmi_send(struct net_device *dev, int fullrep)
469{
470 hdlc_device *hdlc = dev_to_hdlc(dev);
471 struct sk_buff *skb;
472 pvc_device *pvc = state(hdlc)->first_pvc;
473 int lmi = state(hdlc)->settings.lmi;
474 int dce = state(hdlc)->settings.dce;
475 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
476 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
477 u8 *data;
478 int i = 0;
479
480 if (dce && fullrep) {
481 len += state(hdlc)->dce_pvc_count * (2 + stat_len);
482 if (len > HDLC_MAX_MRU) {
483 netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
484 return;
485 }
486 }
487
488 skb = dev_alloc_skb(len);
489 if (!skb) {
490 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
491 return;
492 }
493 memset(skb->data, 0, len);
494 skb_reserve(skb, 4);
495 if (lmi == LMI_CISCO) {
496 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
497 fr_hard_header(&skb, LMI_CISCO_DLCI);
498 } else {
499 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
500 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
501 }
502 data = skb_tail_pointer(skb);
503 data[i++] = LMI_CALLREF;
504 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
505 if (lmi == LMI_ANSI)
506 data[i++] = LMI_ANSI_LOCKSHIFT;
507 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
508 LMI_ANSI_CISCO_REPTYPE;
509 data[i++] = LMI_REPT_LEN;
510 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
511 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
512 data[i++] = LMI_INTEG_LEN;
513 data[i++] = state(hdlc)->txseq =
514 fr_lmi_nextseq(state(hdlc)->txseq);
515 data[i++] = state(hdlc)->rxseq;
516
517 if (dce && fullrep) {
518 while (pvc) {
519 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
520 LMI_ANSI_CISCO_PVCSTAT;
521 data[i++] = stat_len;
522
523 /* LMI start/restart */
524 if (state(hdlc)->reliable && !pvc->state.exist) {
525 pvc->state.exist = pvc->state.new = 1;
526 fr_log_dlci_active(pvc);
527 }
528
529 /* ifconfig PVC up */
530 if (pvc->open_count && !pvc->state.active &&
531 pvc->state.exist && !pvc->state.new) {
532 pvc_carrier(1, pvc);
533 pvc->state.active = 1;
534 fr_log_dlci_active(pvc);
535 }
536
537 if (lmi == LMI_CISCO) {
538 data[i] = pvc->dlci >> 8;
539 data[i + 1] = pvc->dlci & 0xFF;
540 } else {
541 data[i] = (pvc->dlci >> 4) & 0x3F;
542 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
543 data[i + 2] = 0x80;
544 }
545
546 if (pvc->state.new)
547 data[i + 2] |= 0x08;
548 else if (pvc->state.active)
549 data[i + 2] |= 0x02;
550
551 i += stat_len;
552 pvc = pvc->next;
553 }
554 }
555
556 skb_put(skb, i);
557 skb->priority = TC_PRIO_CONTROL;
558 skb->dev = dev;
559 skb_reset_network_header(skb);
560
561 dev_queue_xmit(skb);
562}
563
564
565
566static void fr_set_link_state(int reliable, struct net_device *dev)
567{
568 hdlc_device *hdlc = dev_to_hdlc(dev);
569 pvc_device *pvc = state(hdlc)->first_pvc;
570
571 state(hdlc)->reliable = reliable;
572 if (reliable) {
573 netif_dormant_off(dev);
574 state(hdlc)->n391cnt = 0; /* Request full status */
575 state(hdlc)->dce_changed = 1;
576
577 if (state(hdlc)->settings.lmi == LMI_NONE) {
578 while (pvc) { /* Activate all PVCs */
579 pvc_carrier(1, pvc);
580 pvc->state.exist = pvc->state.active = 1;
581 pvc->state.new = 0;
582 pvc = pvc->next;
583 }
584 }
585 } else {
586 netif_dormant_on(dev);
587 while (pvc) { /* Deactivate all PVCs */
588 pvc_carrier(0, pvc);
589 pvc->state.exist = pvc->state.active = 0;
590 pvc->state.new = 0;
591 if (!state(hdlc)->settings.dce)
592 pvc->state.bandwidth = 0;
593 pvc = pvc->next;
594 }
595 }
596}
597
598
599static void fr_timer(unsigned long arg)
600{
601 struct net_device *dev = (struct net_device *)arg;
602 hdlc_device *hdlc = dev_to_hdlc(dev);
603 int i, cnt = 0, reliable;
604 u32 list;
605
606 if (state(hdlc)->settings.dce) {
607 reliable = state(hdlc)->request &&
608 time_before(jiffies, state(hdlc)->last_poll +
609 state(hdlc)->settings.t392 * HZ);
610 state(hdlc)->request = 0;
611 } else {
612 state(hdlc)->last_errors <<= 1; /* Shift the list */
613 if (state(hdlc)->request) {
614 if (state(hdlc)->reliable)
615 netdev_info(dev, "No LMI status reply received\n");
616 state(hdlc)->last_errors |= 1;
617 }
618
619 list = state(hdlc)->last_errors;
620 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
621 cnt += (list & 1); /* errors count */
622
623 reliable = (cnt < state(hdlc)->settings.n392);
624 }
625
626 if (state(hdlc)->reliable != reliable) {
627 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
628 fr_set_link_state(reliable, dev);
629 }
630
631 if (state(hdlc)->settings.dce)
632 state(hdlc)->timer.expires = jiffies +
633 state(hdlc)->settings.t392 * HZ;
634 else {
635 if (state(hdlc)->n391cnt)
636 state(hdlc)->n391cnt--;
637
638 fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
639
640 state(hdlc)->last_poll = jiffies;
641 state(hdlc)->request = 1;
642 state(hdlc)->timer.expires = jiffies +
643 state(hdlc)->settings.t391 * HZ;
644 }
645
646 state(hdlc)->timer.function = fr_timer;
647 state(hdlc)->timer.data = arg;
648 add_timer(&state(hdlc)->timer);
649}
650
651
652static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
653{
654 hdlc_device *hdlc = dev_to_hdlc(dev);
655 pvc_device *pvc;
656 u8 rxseq, txseq;
657 int lmi = state(hdlc)->settings.lmi;
658 int dce = state(hdlc)->settings.dce;
659 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
660
661 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
662 LMI_CCITT_CISCO_LENGTH)) {
663 netdev_info(dev, "Short LMI frame\n");
664 return 1;
665 }
666
667 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
668 NLPID_CCITT_ANSI_LMI)) {
669 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
670 return 1;
671 }
672
673 if (skb->data[4] != LMI_CALLREF) {
674 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
675 skb->data[4]);
676 return 1;
677 }
678
679 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
680 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
681 skb->data[5]);
682 return 1;
683 }
684
685 if (lmi == LMI_ANSI) {
686 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
687 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
688 skb->data[6]);
689 return 1;
690 }
691 i = 7;
692 } else
693 i = 6;
694
695 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
696 LMI_ANSI_CISCO_REPTYPE)) {
697 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
698 skb->data[i]);
699 return 1;
700 }
701
702 if (skb->data[++i] != LMI_REPT_LEN) {
703 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
704 skb->data[i]);
705 return 1;
706 }
707
708 reptype = skb->data[++i];
709 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
710 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
711 reptype);
712 return 1;
713 }
714
715 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
716 LMI_ANSI_CISCO_ALIVE)) {
717 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
718 skb->data[i]);
719 return 1;
720 }
721
722 if (skb->data[++i] != LMI_INTEG_LEN) {
723 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
724 skb->data[i]);
725 return 1;
726 }
727 i++;
728
729 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
730 rxseq = skb->data[i++]; /* Should confirm our sequence */
731
732 txseq = state(hdlc)->txseq;
733
734 if (dce)
735 state(hdlc)->last_poll = jiffies;
736
737 error = 0;
738 if (!state(hdlc)->reliable)
739 error = 1;
740
741 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
742 state(hdlc)->n391cnt = 0;
743 error = 1;
744 }
745
746 if (dce) {
747 if (state(hdlc)->fullrep_sent && !error) {
748/* Stop sending full report - the last one has been confirmed by DTE */
749 state(hdlc)->fullrep_sent = 0;
750 pvc = state(hdlc)->first_pvc;
751 while (pvc) {
752 if (pvc->state.new) {
753 pvc->state.new = 0;
754
755/* Tell DTE that new PVC is now active */
756 state(hdlc)->dce_changed = 1;
757 }
758 pvc = pvc->next;
759 }
760 }
761
762 if (state(hdlc)->dce_changed) {
763 reptype = LMI_FULLREP;
764 state(hdlc)->fullrep_sent = 1;
765 state(hdlc)->dce_changed = 0;
766 }
767
768 state(hdlc)->request = 1; /* got request */
769 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
770 return 0;
771 }
772
773 /* DTE */
774
775 state(hdlc)->request = 0; /* got response, no request pending */
776
777 if (error)
778 return 0;
779
780 if (reptype != LMI_FULLREP)
781 return 0;
782
783 pvc = state(hdlc)->first_pvc;
784
785 while (pvc) {
786 pvc->state.deleted = 1;
787 pvc = pvc->next;
788 }
789
790 no_ram = 0;
791 while (skb->len >= i + 2 + stat_len) {
792 u16 dlci;
793 u32 bw;
794 unsigned int active, new;
795
796 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
797 LMI_ANSI_CISCO_PVCSTAT)) {
798 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
799 skb->data[i]);
800 return 1;
801 }
802
803 if (skb->data[++i] != stat_len) {
804 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
805 skb->data[i]);
806 return 1;
807 }
808 i++;
809
810 new = !! (skb->data[i + 2] & 0x08);
811 active = !! (skb->data[i + 2] & 0x02);
812 if (lmi == LMI_CISCO) {
813 dlci = (skb->data[i] << 8) | skb->data[i + 1];
814 bw = (skb->data[i + 3] << 16) |
815 (skb->data[i + 4] << 8) |
816 (skb->data[i + 5]);
817 } else {
818 dlci = ((skb->data[i] & 0x3F) << 4) |
819 ((skb->data[i + 1] & 0x78) >> 3);
820 bw = 0;
821 }
822
823 pvc = add_pvc(dev, dlci);
824
825 if (!pvc && !no_ram) {
826 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
827 no_ram = 1;
828 }
829
830 if (pvc) {
831 pvc->state.exist = 1;
832 pvc->state.deleted = 0;
833 if (active != pvc->state.active ||
834 new != pvc->state.new ||
835 bw != pvc->state.bandwidth ||
836 !pvc->state.exist) {
837 pvc->state.new = new;
838 pvc->state.active = active;
839 pvc->state.bandwidth = bw;
840 pvc_carrier(active, pvc);
841 fr_log_dlci_active(pvc);
842 }
843 }
844
845 i += stat_len;
846 }
847
848 pvc = state(hdlc)->first_pvc;
849
850 while (pvc) {
851 if (pvc->state.deleted && pvc->state.exist) {
852 pvc_carrier(0, pvc);
853 pvc->state.active = pvc->state.new = 0;
854 pvc->state.exist = 0;
855 pvc->state.bandwidth = 0;
856 fr_log_dlci_active(pvc);
857 }
858 pvc = pvc->next;
859 }
860
861 /* Next full report after N391 polls */
862 state(hdlc)->n391cnt = state(hdlc)->settings.n391;
863
864 return 0;
865}
866
867
868static int fr_rx(struct sk_buff *skb)
869{
870 struct net_device *frad = skb->dev;
871 hdlc_device *hdlc = dev_to_hdlc(frad);
872 fr_hdr *fh = (fr_hdr*)skb->data;
873 u8 *data = skb->data;
874 u16 dlci;
875 pvc_device *pvc;
876 struct net_device *dev = NULL;
877
878 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
879 goto rx_error;
880
881 dlci = q922_to_dlci(skb->data);
882
883 if ((dlci == LMI_CCITT_ANSI_DLCI &&
884 (state(hdlc)->settings.lmi == LMI_ANSI ||
885 state(hdlc)->settings.lmi == LMI_CCITT)) ||
886 (dlci == LMI_CISCO_DLCI &&
887 state(hdlc)->settings.lmi == LMI_CISCO)) {
888 if (fr_lmi_recv(frad, skb))
889 goto rx_error;
890 dev_kfree_skb_any(skb);
891 return NET_RX_SUCCESS;
892 }
893
894 pvc = find_pvc(hdlc, dlci);
895 if (!pvc) {
896#ifdef DEBUG_PKT
897 netdev_info(frad, "No PVC for received frame's DLCI %d\n",
898 dlci);
899#endif
900 dev_kfree_skb_any(skb);
901 return NET_RX_DROP;
902 }
903
904 if (pvc->state.fecn != fh->fecn) {
905#ifdef DEBUG_ECN
906 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
907 dlci, fh->fecn ? "N" : "FF");
908#endif
909 pvc->state.fecn ^= 1;
910 }
911
912 if (pvc->state.becn != fh->becn) {
913#ifdef DEBUG_ECN
914 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
915 dlci, fh->becn ? "N" : "FF");
916#endif
917 pvc->state.becn ^= 1;
918 }
919
920
921 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
922 frad->stats.rx_dropped++;
923 return NET_RX_DROP;
924 }
925
926 if (data[3] == NLPID_IP) {
927 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
928 dev = pvc->main;
929 skb->protocol = htons(ETH_P_IP);
930
931 } else if (data[3] == NLPID_IPV6) {
932 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
933 dev = pvc->main;
934 skb->protocol = htons(ETH_P_IPV6);
935
936 } else if (skb->len > 10 && data[3] == FR_PAD &&
937 data[4] == NLPID_SNAP && data[5] == FR_PAD) {
938 u16 oui = ntohs(*(__be16*)(data + 6));
939 u16 pid = ntohs(*(__be16*)(data + 8));
940 skb_pull(skb, 10);
941
942 switch ((((u32)oui) << 16) | pid) {
943 case ETH_P_ARP: /* routed frame with SNAP */
944 case ETH_P_IPX:
945 case ETH_P_IP: /* a long variant */
946 case ETH_P_IPV6:
947 dev = pvc->main;
948 skb->protocol = htons(pid);
949 break;
950
951 case 0x80C20007: /* bridged Ethernet frame */
952 if ((dev = pvc->ether) != NULL)
953 skb->protocol = eth_type_trans(skb, dev);
954 break;
955
956 default:
957 netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
958 oui, pid);
959 dev_kfree_skb_any(skb);
960 return NET_RX_DROP;
961 }
962 } else {
963 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
964 data[3], skb->len);
965 dev_kfree_skb_any(skb);
966 return NET_RX_DROP;
967 }
968
969 if (dev) {
970 dev->stats.rx_packets++; /* PVC traffic */
971 dev->stats.rx_bytes += skb->len;
972 if (pvc->state.becn)
973 dev->stats.rx_compressed++;
974 skb->dev = dev;
975 netif_rx(skb);
976 return NET_RX_SUCCESS;
977 } else {
978 dev_kfree_skb_any(skb);
979 return NET_RX_DROP;
980 }
981
982 rx_error:
983 frad->stats.rx_errors++; /* Mark error */
984 dev_kfree_skb_any(skb);
985 return NET_RX_DROP;
986}
987
988
989
990static void fr_start(struct net_device *dev)
991{
992 hdlc_device *hdlc = dev_to_hdlc(dev);
993#ifdef DEBUG_LINK
994 printk(KERN_DEBUG "fr_start\n");
995#endif
996 if (state(hdlc)->settings.lmi != LMI_NONE) {
997 state(hdlc)->reliable = 0;
998 state(hdlc)->dce_changed = 1;
999 state(hdlc)->request = 0;
1000 state(hdlc)->fullrep_sent = 0;
1001 state(hdlc)->last_errors = 0xFFFFFFFF;
1002 state(hdlc)->n391cnt = 0;
1003 state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1004
1005 init_timer(&state(hdlc)->timer);
1006 /* First poll after 1 s */
1007 state(hdlc)->timer.expires = jiffies + HZ;
1008 state(hdlc)->timer.function = fr_timer;
1009 state(hdlc)->timer.data = (unsigned long)dev;
1010 add_timer(&state(hdlc)->timer);
1011 } else
1012 fr_set_link_state(1, dev);
1013}
1014
1015
1016static void fr_stop(struct net_device *dev)
1017{
1018 hdlc_device *hdlc = dev_to_hdlc(dev);
1019#ifdef DEBUG_LINK
1020 printk(KERN_DEBUG "fr_stop\n");
1021#endif
1022 if (state(hdlc)->settings.lmi != LMI_NONE)
1023 del_timer_sync(&state(hdlc)->timer);
1024 fr_set_link_state(0, dev);
1025}
1026
1027
1028static void fr_close(struct net_device *dev)
1029{
1030 hdlc_device *hdlc = dev_to_hdlc(dev);
1031 pvc_device *pvc = state(hdlc)->first_pvc;
1032
1033 while (pvc) { /* Shutdown all PVCs for this FRAD */
1034 if (pvc->main)
1035 dev_close(pvc->main);
1036 if (pvc->ether)
1037 dev_close(pvc->ether);
1038 pvc = pvc->next;
1039 }
1040}
1041
1042
1043static void pvc_setup(struct net_device *dev)
1044{
1045 dev->type = ARPHRD_DLCI;
1046 dev->flags = IFF_POINTOPOINT;
1047 dev->hard_header_len = 10;
1048 dev->addr_len = 2;
1049 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1050}
1051
1052static const struct net_device_ops pvc_ops = {
1053 .ndo_open = pvc_open,
1054 .ndo_stop = pvc_close,
1055 .ndo_change_mtu = hdlc_change_mtu,
1056 .ndo_start_xmit = pvc_xmit,
1057 .ndo_do_ioctl = pvc_ioctl,
1058};
1059
1060static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1061{
1062 hdlc_device *hdlc = dev_to_hdlc(frad);
1063 pvc_device *pvc;
1064 struct net_device *dev;
1065 int used;
1066
1067 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1068 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1069 return -ENOBUFS;
1070 }
1071
1072 if (*get_dev_p(pvc, type))
1073 return -EEXIST;
1074
1075 used = pvc_is_used(pvc);
1076
1077 if (type == ARPHRD_ETHER) {
1078 dev = alloc_netdev(0, "pvceth%d", ether_setup);
1079 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1080 } else
1081 dev = alloc_netdev(0, "pvc%d", pvc_setup);
1082
1083 if (!dev) {
1084 netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1085 delete_unused_pvcs(hdlc);
1086 return -ENOBUFS;
1087 }
1088
1089 if (type == ARPHRD_ETHER)
1090 eth_hw_addr_random(dev);
1091 else {
1092 *(__be16*)dev->dev_addr = htons(dlci);
1093 dlci_to_q922(dev->broadcast, dlci);
1094 }
1095 dev->netdev_ops = &pvc_ops;
1096 dev->mtu = HDLC_MAX_MTU;
1097 dev->tx_queue_len = 0;
1098 dev->ml_priv = pvc;
1099
1100 if (register_netdevice(dev) != 0) {
1101 free_netdev(dev);
1102 delete_unused_pvcs(hdlc);
1103 return -EIO;
1104 }
1105
1106 dev->destructor = free_netdev;
1107 *get_dev_p(pvc, type) = dev;
1108 if (!used) {
1109 state(hdlc)->dce_changed = 1;
1110 state(hdlc)->dce_pvc_count++;
1111 }
1112 return 0;
1113}
1114
1115
1116
1117static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1118{
1119 pvc_device *pvc;
1120 struct net_device *dev;
1121
1122 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1123 return -ENOENT;
1124
1125 if ((dev = *get_dev_p(pvc, type)) == NULL)
1126 return -ENOENT;
1127
1128 if (dev->flags & IFF_UP)
1129 return -EBUSY; /* PVC in use */
1130
1131 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1132 *get_dev_p(pvc, type) = NULL;
1133
1134 if (!pvc_is_used(pvc)) {
1135 state(hdlc)->dce_pvc_count--;
1136 state(hdlc)->dce_changed = 1;
1137 }
1138 delete_unused_pvcs(hdlc);
1139 return 0;
1140}
1141
1142
1143
1144static void fr_destroy(struct net_device *frad)
1145{
1146 hdlc_device *hdlc = dev_to_hdlc(frad);
1147 pvc_device *pvc = state(hdlc)->first_pvc;
1148 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1149 state(hdlc)->dce_pvc_count = 0;
1150 state(hdlc)->dce_changed = 1;
1151
1152 while (pvc) {
1153 pvc_device *next = pvc->next;
1154 /* destructors will free_netdev() main and ether */
1155 if (pvc->main)
1156 unregister_netdevice(pvc->main);
1157
1158 if (pvc->ether)
1159 unregister_netdevice(pvc->ether);
1160
1161 kfree(pvc);
1162 pvc = next;
1163 }
1164}
1165
1166
1167static struct hdlc_proto proto = {
1168 .close = fr_close,
1169 .start = fr_start,
1170 .stop = fr_stop,
1171 .detach = fr_destroy,
1172 .ioctl = fr_ioctl,
1173 .netif_rx = fr_rx,
1174 .module = THIS_MODULE,
1175};
1176
1177
1178static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1179{
1180 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1181 const size_t size = sizeof(fr_proto);
1182 fr_proto new_settings;
1183 hdlc_device *hdlc = dev_to_hdlc(dev);
1184 fr_proto_pvc pvc;
1185 int result;
1186
1187 switch (ifr->ifr_settings.type) {
1188 case IF_GET_PROTO:
1189 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1190 return -EINVAL;
1191 ifr->ifr_settings.type = IF_PROTO_FR;
1192 if (ifr->ifr_settings.size < size) {
1193 ifr->ifr_settings.size = size; /* data size wanted */
1194 return -ENOBUFS;
1195 }
1196 if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1197 return -EFAULT;
1198 return 0;
1199
1200 case IF_PROTO_FR:
1201 if (!capable(CAP_NET_ADMIN))
1202 return -EPERM;
1203
1204 if (dev->flags & IFF_UP)
1205 return -EBUSY;
1206
1207 if (copy_from_user(&new_settings, fr_s, size))
1208 return -EFAULT;
1209
1210 if (new_settings.lmi == LMI_DEFAULT)
1211 new_settings.lmi = LMI_ANSI;
1212
1213 if ((new_settings.lmi != LMI_NONE &&
1214 new_settings.lmi != LMI_ANSI &&
1215 new_settings.lmi != LMI_CCITT &&
1216 new_settings.lmi != LMI_CISCO) ||
1217 new_settings.t391 < 1 ||
1218 new_settings.t392 < 2 ||
1219 new_settings.n391 < 1 ||
1220 new_settings.n392 < 1 ||
1221 new_settings.n393 < new_settings.n392 ||
1222 new_settings.n393 > 32 ||
1223 (new_settings.dce != 0 &&
1224 new_settings.dce != 1))
1225 return -EINVAL;
1226
1227 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1228 if (result)
1229 return result;
1230
1231 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1232 result = attach_hdlc_protocol(dev, &proto,
1233 sizeof(struct frad_state));
1234 if (result)
1235 return result;
1236 state(hdlc)->first_pvc = NULL;
1237 state(hdlc)->dce_pvc_count = 0;
1238 }
1239 memcpy(&state(hdlc)->settings, &new_settings, size);
1240 dev->type = ARPHRD_FRAD;
1241 return 0;
1242
1243 case IF_PROTO_FR_ADD_PVC:
1244 case IF_PROTO_FR_DEL_PVC:
1245 case IF_PROTO_FR_ADD_ETH_PVC:
1246 case IF_PROTO_FR_DEL_ETH_PVC:
1247 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1248 return -EINVAL;
1249
1250 if (!capable(CAP_NET_ADMIN))
1251 return -EPERM;
1252
1253 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1254 sizeof(fr_proto_pvc)))
1255 return -EFAULT;
1256
1257 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1258 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1259
1260 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1261 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1262 result = ARPHRD_ETHER; /* bridged Ethernet device */
1263 else
1264 result = ARPHRD_DLCI;
1265
1266 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1267 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1268 return fr_add_pvc(dev, pvc.dlci, result);
1269 else
1270 return fr_del_pvc(hdlc, pvc.dlci, result);
1271 }
1272
1273 return -EINVAL;
1274}
1275
1276
1277static int __init mod_init(void)
1278{
1279 register_hdlc_protocol(&proto);
1280 return 0;
1281}
1282
1283
1284static void __exit mod_exit(void)
1285{
1286 unregister_hdlc_protocol(&proto);
1287}
1288
1289
1290module_init(mod_init);
1291module_exit(mod_exit);
1292
1293MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1294MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1295MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic HDLC support routines for Linux
4 * Frame Relay support
5 *
6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 *
8
9 Theory of PVC state
10
11 DCE mode:
12
13 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
14 0,x -> 1,1 if "link reliable" when sending FULL STATUS
15 1,1 -> 1,0 if received FULL STATUS ACK
16
17 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
18 -> 1 when "PVC up" and (exist,new) = 1,0
19
20 DTE mode:
21 (exist,new,active) = FULL STATUS if "link reliable"
22 = 0, 0, 0 if "link unreliable"
23 No LMI:
24 active = open and "link reliable"
25 exist = new = not used
26
27 CCITT LMI: ITU-T Q.933 Annex A
28 ANSI LMI: ANSI T1.617 Annex D
29 CISCO LMI: the original, aka "Gang of Four" LMI
30
31*/
32
33#include <linux/errno.h>
34#include <linux/etherdevice.h>
35#include <linux/hdlc.h>
36#include <linux/if_arp.h>
37#include <linux/inetdevice.h>
38#include <linux/init.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pkt_sched.h>
42#include <linux/poll.h>
43#include <linux/rtnetlink.h>
44#include <linux/skbuff.h>
45#include <linux/slab.h>
46
47#undef DEBUG_PKT
48#undef DEBUG_ECN
49#undef DEBUG_LINK
50#undef DEBUG_PROTO
51#undef DEBUG_PVC
52
53#define FR_UI 0x03
54#define FR_PAD 0x00
55
56#define NLPID_IP 0xCC
57#define NLPID_IPV6 0x8E
58#define NLPID_SNAP 0x80
59#define NLPID_PAD 0x00
60#define NLPID_CCITT_ANSI_LMI 0x08
61#define NLPID_CISCO_LMI 0x09
62
63
64#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
65#define LMI_CISCO_DLCI 1023
66
67#define LMI_CALLREF 0x00 /* Call Reference */
68#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
69#define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
70#define LMI_CCITT_REPTYPE 0x51
71#define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
72#define LMI_CCITT_ALIVE 0x53
73#define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
74#define LMI_CCITT_PVCSTAT 0x57
75
76#define LMI_FULLREP 0x00 /* full report */
77#define LMI_INTEGRITY 0x01 /* link integrity report */
78#define LMI_SINGLE 0x02 /* single PVC report */
79
80#define LMI_STATUS_ENQUIRY 0x75
81#define LMI_STATUS 0x7D /* reply */
82
83#define LMI_REPT_LEN 1 /* report type element length */
84#define LMI_INTEG_LEN 2 /* link integrity element length */
85
86#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
87#define LMI_ANSI_LENGTH 14
88
89
90struct fr_hdr {
91#if defined(__LITTLE_ENDIAN_BITFIELD)
92 unsigned ea1: 1;
93 unsigned cr: 1;
94 unsigned dlcih: 6;
95
96 unsigned ea2: 1;
97 unsigned de: 1;
98 unsigned becn: 1;
99 unsigned fecn: 1;
100 unsigned dlcil: 4;
101#else
102 unsigned dlcih: 6;
103 unsigned cr: 1;
104 unsigned ea1: 1;
105
106 unsigned dlcil: 4;
107 unsigned fecn: 1;
108 unsigned becn: 1;
109 unsigned de: 1;
110 unsigned ea2: 1;
111#endif
112} __packed;
113
114
115struct pvc_device {
116 struct net_device *frad;
117 struct net_device *main;
118 struct net_device *ether; /* bridged Ethernet interface */
119 struct pvc_device *next; /* Sorted in ascending DLCI order */
120 int dlci;
121 int open_count;
122
123 struct {
124 unsigned int new: 1;
125 unsigned int active: 1;
126 unsigned int exist: 1;
127 unsigned int deleted: 1;
128 unsigned int fecn: 1;
129 unsigned int becn: 1;
130 unsigned int bandwidth; /* Cisco LMI reporting only */
131 }state;
132};
133
134struct frad_state {
135 fr_proto settings;
136 struct pvc_device *first_pvc;
137 int dce_pvc_count;
138
139 struct timer_list timer;
140 struct net_device *dev;
141 unsigned long last_poll;
142 int reliable;
143 int dce_changed;
144 int request;
145 int fullrep_sent;
146 u32 last_errors; /* last errors bit list */
147 u8 n391cnt;
148 u8 txseq; /* TX sequence number */
149 u8 rxseq; /* RX sequence number */
150};
151
152
153static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
154
155
156static inline u16 q922_to_dlci(u8 *hdr)
157{
158 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
159}
160
161
162static inline void dlci_to_q922(u8 *hdr, u16 dlci)
163{
164 hdr[0] = (dlci >> 2) & 0xFC;
165 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
166}
167
168
169static inline struct frad_state* state(hdlc_device *hdlc)
170{
171 return(struct frad_state *)(hdlc->state);
172}
173
174
175static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
176{
177 struct pvc_device *pvc = state(hdlc)->first_pvc;
178
179 while (pvc) {
180 if (pvc->dlci == dlci)
181 return pvc;
182 if (pvc->dlci > dlci)
183 return NULL; /* the list is sorted */
184 pvc = pvc->next;
185 }
186
187 return NULL;
188}
189
190
191static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
192{
193 hdlc_device *hdlc = dev_to_hdlc(dev);
194 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
195
196 while (*pvc_p) {
197 if ((*pvc_p)->dlci == dlci)
198 return *pvc_p;
199 if ((*pvc_p)->dlci > dlci)
200 break; /* the list is sorted */
201 pvc_p = &(*pvc_p)->next;
202 }
203
204 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
205#ifdef DEBUG_PVC
206 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
207#endif
208 if (!pvc)
209 return NULL;
210
211 pvc->dlci = dlci;
212 pvc->frad = dev;
213 pvc->next = *pvc_p; /* Put it in the chain */
214 *pvc_p = pvc;
215 return pvc;
216}
217
218
219static inline int pvc_is_used(struct pvc_device *pvc)
220{
221 return pvc->main || pvc->ether;
222}
223
224
225static inline void pvc_carrier(int on, struct pvc_device *pvc)
226{
227 if (on) {
228 if (pvc->main)
229 if (!netif_carrier_ok(pvc->main))
230 netif_carrier_on(pvc->main);
231 if (pvc->ether)
232 if (!netif_carrier_ok(pvc->ether))
233 netif_carrier_on(pvc->ether);
234 } else {
235 if (pvc->main)
236 if (netif_carrier_ok(pvc->main))
237 netif_carrier_off(pvc->main);
238 if (pvc->ether)
239 if (netif_carrier_ok(pvc->ether))
240 netif_carrier_off(pvc->ether);
241 }
242}
243
244
245static inline void delete_unused_pvcs(hdlc_device *hdlc)
246{
247 struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
248
249 while (*pvc_p) {
250 if (!pvc_is_used(*pvc_p)) {
251 struct pvc_device *pvc = *pvc_p;
252#ifdef DEBUG_PVC
253 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
254#endif
255 *pvc_p = pvc->next;
256 kfree(pvc);
257 continue;
258 }
259 pvc_p = &(*pvc_p)->next;
260 }
261}
262
263
264static inline struct net_device **get_dev_p(struct pvc_device *pvc,
265 int type)
266{
267 if (type == ARPHRD_ETHER)
268 return &pvc->ether;
269 else
270 return &pvc->main;
271}
272
273
274static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
275{
276 u16 head_len;
277 struct sk_buff *skb = *skb_p;
278
279 switch (skb->protocol) {
280 case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
281 head_len = 4;
282 skb_push(skb, head_len);
283 skb->data[3] = NLPID_CCITT_ANSI_LMI;
284 break;
285
286 case cpu_to_be16(NLPID_CISCO_LMI):
287 head_len = 4;
288 skb_push(skb, head_len);
289 skb->data[3] = NLPID_CISCO_LMI;
290 break;
291
292 case cpu_to_be16(ETH_P_IP):
293 head_len = 4;
294 skb_push(skb, head_len);
295 skb->data[3] = NLPID_IP;
296 break;
297
298 case cpu_to_be16(ETH_P_IPV6):
299 head_len = 4;
300 skb_push(skb, head_len);
301 skb->data[3] = NLPID_IPV6;
302 break;
303
304 case cpu_to_be16(ETH_P_802_3):
305 head_len = 10;
306 if (skb_headroom(skb) < head_len) {
307 struct sk_buff *skb2 = skb_realloc_headroom(skb,
308 head_len);
309 if (!skb2)
310 return -ENOBUFS;
311 dev_kfree_skb(skb);
312 skb = *skb_p = skb2;
313 }
314 skb_push(skb, head_len);
315 skb->data[3] = FR_PAD;
316 skb->data[4] = NLPID_SNAP;
317 skb->data[5] = FR_PAD;
318 skb->data[6] = 0x80;
319 skb->data[7] = 0xC2;
320 skb->data[8] = 0x00;
321 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
322 break;
323
324 default:
325 head_len = 10;
326 skb_push(skb, head_len);
327 skb->data[3] = FR_PAD;
328 skb->data[4] = NLPID_SNAP;
329 skb->data[5] = FR_PAD;
330 skb->data[6] = FR_PAD;
331 skb->data[7] = FR_PAD;
332 *(__be16*)(skb->data + 8) = skb->protocol;
333 }
334
335 dlci_to_q922(skb->data, dlci);
336 skb->data[2] = FR_UI;
337 return 0;
338}
339
340
341
342static int pvc_open(struct net_device *dev)
343{
344 struct pvc_device *pvc = dev->ml_priv;
345
346 if ((pvc->frad->flags & IFF_UP) == 0)
347 return -EIO; /* Frad must be UP in order to activate PVC */
348
349 if (pvc->open_count++ == 0) {
350 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
351 if (state(hdlc)->settings.lmi == LMI_NONE)
352 pvc->state.active = netif_carrier_ok(pvc->frad);
353
354 pvc_carrier(pvc->state.active, pvc);
355 state(hdlc)->dce_changed = 1;
356 }
357 return 0;
358}
359
360
361
362static int pvc_close(struct net_device *dev)
363{
364 struct pvc_device *pvc = dev->ml_priv;
365
366 if (--pvc->open_count == 0) {
367 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
368 if (state(hdlc)->settings.lmi == LMI_NONE)
369 pvc->state.active = 0;
370
371 if (state(hdlc)->settings.dce) {
372 state(hdlc)->dce_changed = 1;
373 pvc->state.active = 0;
374 }
375 }
376 return 0;
377}
378
379
380
381static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
382{
383 struct pvc_device *pvc = dev->ml_priv;
384 fr_proto_pvc_info info;
385
386 if (ifr->ifr_settings.type == IF_GET_PROTO) {
387 if (dev->type == ARPHRD_ETHER)
388 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
389 else
390 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
391
392 if (ifr->ifr_settings.size < sizeof(info)) {
393 /* data size wanted */
394 ifr->ifr_settings.size = sizeof(info);
395 return -ENOBUFS;
396 }
397
398 info.dlci = pvc->dlci;
399 memcpy(info.master, pvc->frad->name, IFNAMSIZ);
400 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
401 &info, sizeof(info)))
402 return -EFAULT;
403 return 0;
404 }
405
406 return -EINVAL;
407}
408
409static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
410{
411 struct pvc_device *pvc = dev->ml_priv;
412
413 if (pvc->state.active) {
414 if (dev->type == ARPHRD_ETHER) {
415 int pad = ETH_ZLEN - skb->len;
416 if (pad > 0) { /* Pad the frame with zeros */
417 int len = skb->len;
418 if (skb_tailroom(skb) < pad)
419 if (pskb_expand_head(skb, 0, pad,
420 GFP_ATOMIC)) {
421 dev->stats.tx_dropped++;
422 dev_kfree_skb(skb);
423 return NETDEV_TX_OK;
424 }
425 skb_put(skb, pad);
426 memset(skb->data + len, 0, pad);
427 }
428 skb->protocol = cpu_to_be16(ETH_P_802_3);
429 }
430 if (!fr_hard_header(&skb, pvc->dlci)) {
431 dev->stats.tx_bytes += skb->len;
432 dev->stats.tx_packets++;
433 if (pvc->state.fecn) /* TX Congestion counter */
434 dev->stats.tx_compressed++;
435 skb->dev = pvc->frad;
436 skb->protocol = htons(ETH_P_HDLC);
437 skb_reset_network_header(skb);
438 dev_queue_xmit(skb);
439 return NETDEV_TX_OK;
440 }
441 }
442
443 dev->stats.tx_dropped++;
444 dev_kfree_skb(skb);
445 return NETDEV_TX_OK;
446}
447
448static inline void fr_log_dlci_active(struct pvc_device *pvc)
449{
450 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
451 pvc->dlci,
452 pvc->main ? pvc->main->name : "",
453 pvc->main && pvc->ether ? " " : "",
454 pvc->ether ? pvc->ether->name : "",
455 pvc->state.new ? " new" : "",
456 !pvc->state.exist ? "deleted" :
457 pvc->state.active ? "active" : "inactive");
458}
459
460
461
462static inline u8 fr_lmi_nextseq(u8 x)
463{
464 x++;
465 return x ? x : 1;
466}
467
468
469static void fr_lmi_send(struct net_device *dev, int fullrep)
470{
471 hdlc_device *hdlc = dev_to_hdlc(dev);
472 struct sk_buff *skb;
473 struct pvc_device *pvc = state(hdlc)->first_pvc;
474 int lmi = state(hdlc)->settings.lmi;
475 int dce = state(hdlc)->settings.dce;
476 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
477 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
478 u8 *data;
479 int i = 0;
480
481 if (dce && fullrep) {
482 len += state(hdlc)->dce_pvc_count * (2 + stat_len);
483 if (len > HDLC_MAX_MRU) {
484 netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
485 return;
486 }
487 }
488
489 skb = dev_alloc_skb(len);
490 if (!skb) {
491 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
492 return;
493 }
494 memset(skb->data, 0, len);
495 skb_reserve(skb, 4);
496 if (lmi == LMI_CISCO) {
497 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
498 fr_hard_header(&skb, LMI_CISCO_DLCI);
499 } else {
500 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
501 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
502 }
503 data = skb_tail_pointer(skb);
504 data[i++] = LMI_CALLREF;
505 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
506 if (lmi == LMI_ANSI)
507 data[i++] = LMI_ANSI_LOCKSHIFT;
508 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
509 LMI_ANSI_CISCO_REPTYPE;
510 data[i++] = LMI_REPT_LEN;
511 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
512 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
513 data[i++] = LMI_INTEG_LEN;
514 data[i++] = state(hdlc)->txseq =
515 fr_lmi_nextseq(state(hdlc)->txseq);
516 data[i++] = state(hdlc)->rxseq;
517
518 if (dce && fullrep) {
519 while (pvc) {
520 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
521 LMI_ANSI_CISCO_PVCSTAT;
522 data[i++] = stat_len;
523
524 /* LMI start/restart */
525 if (state(hdlc)->reliable && !pvc->state.exist) {
526 pvc->state.exist = pvc->state.new = 1;
527 fr_log_dlci_active(pvc);
528 }
529
530 /* ifconfig PVC up */
531 if (pvc->open_count && !pvc->state.active &&
532 pvc->state.exist && !pvc->state.new) {
533 pvc_carrier(1, pvc);
534 pvc->state.active = 1;
535 fr_log_dlci_active(pvc);
536 }
537
538 if (lmi == LMI_CISCO) {
539 data[i] = pvc->dlci >> 8;
540 data[i + 1] = pvc->dlci & 0xFF;
541 } else {
542 data[i] = (pvc->dlci >> 4) & 0x3F;
543 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
544 data[i + 2] = 0x80;
545 }
546
547 if (pvc->state.new)
548 data[i + 2] |= 0x08;
549 else if (pvc->state.active)
550 data[i + 2] |= 0x02;
551
552 i += stat_len;
553 pvc = pvc->next;
554 }
555 }
556
557 skb_put(skb, i);
558 skb->priority = TC_PRIO_CONTROL;
559 skb->dev = dev;
560 skb->protocol = htons(ETH_P_HDLC);
561 skb_reset_network_header(skb);
562
563 dev_queue_xmit(skb);
564}
565
566
567
568static void fr_set_link_state(int reliable, struct net_device *dev)
569{
570 hdlc_device *hdlc = dev_to_hdlc(dev);
571 struct pvc_device *pvc = state(hdlc)->first_pvc;
572
573 state(hdlc)->reliable = reliable;
574 if (reliable) {
575 netif_dormant_off(dev);
576 state(hdlc)->n391cnt = 0; /* Request full status */
577 state(hdlc)->dce_changed = 1;
578
579 if (state(hdlc)->settings.lmi == LMI_NONE) {
580 while (pvc) { /* Activate all PVCs */
581 pvc_carrier(1, pvc);
582 pvc->state.exist = pvc->state.active = 1;
583 pvc->state.new = 0;
584 pvc = pvc->next;
585 }
586 }
587 } else {
588 netif_dormant_on(dev);
589 while (pvc) { /* Deactivate all PVCs */
590 pvc_carrier(0, pvc);
591 pvc->state.exist = pvc->state.active = 0;
592 pvc->state.new = 0;
593 if (!state(hdlc)->settings.dce)
594 pvc->state.bandwidth = 0;
595 pvc = pvc->next;
596 }
597 }
598}
599
600
601static void fr_timer(struct timer_list *t)
602{
603 struct frad_state *st = from_timer(st, t, timer);
604 struct net_device *dev = st->dev;
605 hdlc_device *hdlc = dev_to_hdlc(dev);
606 int i, cnt = 0, reliable;
607 u32 list;
608
609 if (state(hdlc)->settings.dce) {
610 reliable = state(hdlc)->request &&
611 time_before(jiffies, state(hdlc)->last_poll +
612 state(hdlc)->settings.t392 * HZ);
613 state(hdlc)->request = 0;
614 } else {
615 state(hdlc)->last_errors <<= 1; /* Shift the list */
616 if (state(hdlc)->request) {
617 if (state(hdlc)->reliable)
618 netdev_info(dev, "No LMI status reply received\n");
619 state(hdlc)->last_errors |= 1;
620 }
621
622 list = state(hdlc)->last_errors;
623 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
624 cnt += (list & 1); /* errors count */
625
626 reliable = (cnt < state(hdlc)->settings.n392);
627 }
628
629 if (state(hdlc)->reliable != reliable) {
630 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
631 fr_set_link_state(reliable, dev);
632 }
633
634 if (state(hdlc)->settings.dce)
635 state(hdlc)->timer.expires = jiffies +
636 state(hdlc)->settings.t392 * HZ;
637 else {
638 if (state(hdlc)->n391cnt)
639 state(hdlc)->n391cnt--;
640
641 fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
642
643 state(hdlc)->last_poll = jiffies;
644 state(hdlc)->request = 1;
645 state(hdlc)->timer.expires = jiffies +
646 state(hdlc)->settings.t391 * HZ;
647 }
648
649 add_timer(&state(hdlc)->timer);
650}
651
652
653static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
654{
655 hdlc_device *hdlc = dev_to_hdlc(dev);
656 struct pvc_device *pvc;
657 u8 rxseq, txseq;
658 int lmi = state(hdlc)->settings.lmi;
659 int dce = state(hdlc)->settings.dce;
660 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
661
662 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
663 LMI_CCITT_CISCO_LENGTH)) {
664 netdev_info(dev, "Short LMI frame\n");
665 return 1;
666 }
667
668 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
669 NLPID_CCITT_ANSI_LMI)) {
670 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
671 return 1;
672 }
673
674 if (skb->data[4] != LMI_CALLREF) {
675 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
676 skb->data[4]);
677 return 1;
678 }
679
680 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
681 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
682 skb->data[5]);
683 return 1;
684 }
685
686 if (lmi == LMI_ANSI) {
687 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
688 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
689 skb->data[6]);
690 return 1;
691 }
692 i = 7;
693 } else
694 i = 6;
695
696 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
697 LMI_ANSI_CISCO_REPTYPE)) {
698 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
699 skb->data[i]);
700 return 1;
701 }
702
703 if (skb->data[++i] != LMI_REPT_LEN) {
704 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
705 skb->data[i]);
706 return 1;
707 }
708
709 reptype = skb->data[++i];
710 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
711 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
712 reptype);
713 return 1;
714 }
715
716 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
717 LMI_ANSI_CISCO_ALIVE)) {
718 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
719 skb->data[i]);
720 return 1;
721 }
722
723 if (skb->data[++i] != LMI_INTEG_LEN) {
724 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
725 skb->data[i]);
726 return 1;
727 }
728 i++;
729
730 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
731 rxseq = skb->data[i++]; /* Should confirm our sequence */
732
733 txseq = state(hdlc)->txseq;
734
735 if (dce)
736 state(hdlc)->last_poll = jiffies;
737
738 error = 0;
739 if (!state(hdlc)->reliable)
740 error = 1;
741
742 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
743 state(hdlc)->n391cnt = 0;
744 error = 1;
745 }
746
747 if (dce) {
748 if (state(hdlc)->fullrep_sent && !error) {
749/* Stop sending full report - the last one has been confirmed by DTE */
750 state(hdlc)->fullrep_sent = 0;
751 pvc = state(hdlc)->first_pvc;
752 while (pvc) {
753 if (pvc->state.new) {
754 pvc->state.new = 0;
755
756/* Tell DTE that new PVC is now active */
757 state(hdlc)->dce_changed = 1;
758 }
759 pvc = pvc->next;
760 }
761 }
762
763 if (state(hdlc)->dce_changed) {
764 reptype = LMI_FULLREP;
765 state(hdlc)->fullrep_sent = 1;
766 state(hdlc)->dce_changed = 0;
767 }
768
769 state(hdlc)->request = 1; /* got request */
770 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
771 return 0;
772 }
773
774 /* DTE */
775
776 state(hdlc)->request = 0; /* got response, no request pending */
777
778 if (error)
779 return 0;
780
781 if (reptype != LMI_FULLREP)
782 return 0;
783
784 pvc = state(hdlc)->first_pvc;
785
786 while (pvc) {
787 pvc->state.deleted = 1;
788 pvc = pvc->next;
789 }
790
791 no_ram = 0;
792 while (skb->len >= i + 2 + stat_len) {
793 u16 dlci;
794 u32 bw;
795 unsigned int active, new;
796
797 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
798 LMI_ANSI_CISCO_PVCSTAT)) {
799 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
800 skb->data[i]);
801 return 1;
802 }
803
804 if (skb->data[++i] != stat_len) {
805 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
806 skb->data[i]);
807 return 1;
808 }
809 i++;
810
811 new = !! (skb->data[i + 2] & 0x08);
812 active = !! (skb->data[i + 2] & 0x02);
813 if (lmi == LMI_CISCO) {
814 dlci = (skb->data[i] << 8) | skb->data[i + 1];
815 bw = (skb->data[i + 3] << 16) |
816 (skb->data[i + 4] << 8) |
817 (skb->data[i + 5]);
818 } else {
819 dlci = ((skb->data[i] & 0x3F) << 4) |
820 ((skb->data[i + 1] & 0x78) >> 3);
821 bw = 0;
822 }
823
824 pvc = add_pvc(dev, dlci);
825
826 if (!pvc && !no_ram) {
827 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
828 no_ram = 1;
829 }
830
831 if (pvc) {
832 pvc->state.exist = 1;
833 pvc->state.deleted = 0;
834 if (active != pvc->state.active ||
835 new != pvc->state.new ||
836 bw != pvc->state.bandwidth ||
837 !pvc->state.exist) {
838 pvc->state.new = new;
839 pvc->state.active = active;
840 pvc->state.bandwidth = bw;
841 pvc_carrier(active, pvc);
842 fr_log_dlci_active(pvc);
843 }
844 }
845
846 i += stat_len;
847 }
848
849 pvc = state(hdlc)->first_pvc;
850
851 while (pvc) {
852 if (pvc->state.deleted && pvc->state.exist) {
853 pvc_carrier(0, pvc);
854 pvc->state.active = pvc->state.new = 0;
855 pvc->state.exist = 0;
856 pvc->state.bandwidth = 0;
857 fr_log_dlci_active(pvc);
858 }
859 pvc = pvc->next;
860 }
861
862 /* Next full report after N391 polls */
863 state(hdlc)->n391cnt = state(hdlc)->settings.n391;
864
865 return 0;
866}
867
868
869static int fr_rx(struct sk_buff *skb)
870{
871 struct net_device *frad = skb->dev;
872 hdlc_device *hdlc = dev_to_hdlc(frad);
873 struct fr_hdr *fh = (struct fr_hdr *)skb->data;
874 u8 *data = skb->data;
875 u16 dlci;
876 struct pvc_device *pvc;
877 struct net_device *dev = NULL;
878
879 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
880 goto rx_error;
881
882 dlci = q922_to_dlci(skb->data);
883
884 if ((dlci == LMI_CCITT_ANSI_DLCI &&
885 (state(hdlc)->settings.lmi == LMI_ANSI ||
886 state(hdlc)->settings.lmi == LMI_CCITT)) ||
887 (dlci == LMI_CISCO_DLCI &&
888 state(hdlc)->settings.lmi == LMI_CISCO)) {
889 if (fr_lmi_recv(frad, skb))
890 goto rx_error;
891 dev_kfree_skb_any(skb);
892 return NET_RX_SUCCESS;
893 }
894
895 pvc = find_pvc(hdlc, dlci);
896 if (!pvc) {
897#ifdef DEBUG_PKT
898 netdev_info(frad, "No PVC for received frame's DLCI %d\n",
899 dlci);
900#endif
901 dev_kfree_skb_any(skb);
902 return NET_RX_DROP;
903 }
904
905 if (pvc->state.fecn != fh->fecn) {
906#ifdef DEBUG_ECN
907 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
908 dlci, fh->fecn ? "N" : "FF");
909#endif
910 pvc->state.fecn ^= 1;
911 }
912
913 if (pvc->state.becn != fh->becn) {
914#ifdef DEBUG_ECN
915 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
916 dlci, fh->becn ? "N" : "FF");
917#endif
918 pvc->state.becn ^= 1;
919 }
920
921
922 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
923 frad->stats.rx_dropped++;
924 return NET_RX_DROP;
925 }
926
927 if (data[3] == NLPID_IP) {
928 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
929 dev = pvc->main;
930 skb->protocol = htons(ETH_P_IP);
931
932 } else if (data[3] == NLPID_IPV6) {
933 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
934 dev = pvc->main;
935 skb->protocol = htons(ETH_P_IPV6);
936
937 } else if (skb->len > 10 && data[3] == FR_PAD &&
938 data[4] == NLPID_SNAP && data[5] == FR_PAD) {
939 u16 oui = ntohs(*(__be16*)(data + 6));
940 u16 pid = ntohs(*(__be16*)(data + 8));
941 skb_pull(skb, 10);
942
943 switch ((((u32)oui) << 16) | pid) {
944 case ETH_P_ARP: /* routed frame with SNAP */
945 case ETH_P_IPX:
946 case ETH_P_IP: /* a long variant */
947 case ETH_P_IPV6:
948 dev = pvc->main;
949 skb->protocol = htons(pid);
950 break;
951
952 case 0x80C20007: /* bridged Ethernet frame */
953 if ((dev = pvc->ether) != NULL)
954 skb->protocol = eth_type_trans(skb, dev);
955 break;
956
957 default:
958 netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
959 oui, pid);
960 dev_kfree_skb_any(skb);
961 return NET_RX_DROP;
962 }
963 } else {
964 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
965 data[3], skb->len);
966 dev_kfree_skb_any(skb);
967 return NET_RX_DROP;
968 }
969
970 if (dev) {
971 dev->stats.rx_packets++; /* PVC traffic */
972 dev->stats.rx_bytes += skb->len;
973 if (pvc->state.becn)
974 dev->stats.rx_compressed++;
975 skb->dev = dev;
976 netif_rx(skb);
977 return NET_RX_SUCCESS;
978 } else {
979 dev_kfree_skb_any(skb);
980 return NET_RX_DROP;
981 }
982
983 rx_error:
984 frad->stats.rx_errors++; /* Mark error */
985 dev_kfree_skb_any(skb);
986 return NET_RX_DROP;
987}
988
989
990
991static void fr_start(struct net_device *dev)
992{
993 hdlc_device *hdlc = dev_to_hdlc(dev);
994#ifdef DEBUG_LINK
995 printk(KERN_DEBUG "fr_start\n");
996#endif
997 if (state(hdlc)->settings.lmi != LMI_NONE) {
998 state(hdlc)->reliable = 0;
999 state(hdlc)->dce_changed = 1;
1000 state(hdlc)->request = 0;
1001 state(hdlc)->fullrep_sent = 0;
1002 state(hdlc)->last_errors = 0xFFFFFFFF;
1003 state(hdlc)->n391cnt = 0;
1004 state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1005
1006 state(hdlc)->dev = dev;
1007 timer_setup(&state(hdlc)->timer, fr_timer, 0);
1008 /* First poll after 1 s */
1009 state(hdlc)->timer.expires = jiffies + HZ;
1010 add_timer(&state(hdlc)->timer);
1011 } else
1012 fr_set_link_state(1, dev);
1013}
1014
1015
1016static void fr_stop(struct net_device *dev)
1017{
1018 hdlc_device *hdlc = dev_to_hdlc(dev);
1019#ifdef DEBUG_LINK
1020 printk(KERN_DEBUG "fr_stop\n");
1021#endif
1022 if (state(hdlc)->settings.lmi != LMI_NONE)
1023 del_timer_sync(&state(hdlc)->timer);
1024 fr_set_link_state(0, dev);
1025}
1026
1027
1028static void fr_close(struct net_device *dev)
1029{
1030 hdlc_device *hdlc = dev_to_hdlc(dev);
1031 struct pvc_device *pvc = state(hdlc)->first_pvc;
1032
1033 while (pvc) { /* Shutdown all PVCs for this FRAD */
1034 if (pvc->main)
1035 dev_close(pvc->main);
1036 if (pvc->ether)
1037 dev_close(pvc->ether);
1038 pvc = pvc->next;
1039 }
1040}
1041
1042
1043static void pvc_setup(struct net_device *dev)
1044{
1045 dev->type = ARPHRD_DLCI;
1046 dev->flags = IFF_POINTOPOINT;
1047 dev->hard_header_len = 0;
1048 dev->addr_len = 2;
1049 netif_keep_dst(dev);
1050}
1051
1052static const struct net_device_ops pvc_ops = {
1053 .ndo_open = pvc_open,
1054 .ndo_stop = pvc_close,
1055 .ndo_start_xmit = pvc_xmit,
1056 .ndo_do_ioctl = pvc_ioctl,
1057};
1058
1059static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1060{
1061 hdlc_device *hdlc = dev_to_hdlc(frad);
1062 struct pvc_device *pvc;
1063 struct net_device *dev;
1064 int used;
1065
1066 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1067 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1068 return -ENOBUFS;
1069 }
1070
1071 if (*get_dev_p(pvc, type))
1072 return -EEXIST;
1073
1074 used = pvc_is_used(pvc);
1075
1076 if (type == ARPHRD_ETHER)
1077 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1078 ether_setup);
1079 else
1080 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1081
1082 if (!dev) {
1083 netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1084 delete_unused_pvcs(hdlc);
1085 return -ENOBUFS;
1086 }
1087
1088 if (type == ARPHRD_ETHER) {
1089 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1090 eth_hw_addr_random(dev);
1091 } else {
1092 *(__be16*)dev->dev_addr = htons(dlci);
1093 dlci_to_q922(dev->broadcast, dlci);
1094 }
1095 dev->netdev_ops = &pvc_ops;
1096 dev->mtu = HDLC_MAX_MTU;
1097 dev->min_mtu = 68;
1098 dev->max_mtu = HDLC_MAX_MTU;
1099 dev->needed_headroom = 10;
1100 dev->priv_flags |= IFF_NO_QUEUE;
1101 dev->ml_priv = pvc;
1102
1103 if (register_netdevice(dev) != 0) {
1104 free_netdev(dev);
1105 delete_unused_pvcs(hdlc);
1106 return -EIO;
1107 }
1108
1109 dev->needs_free_netdev = true;
1110 *get_dev_p(pvc, type) = dev;
1111 if (!used) {
1112 state(hdlc)->dce_changed = 1;
1113 state(hdlc)->dce_pvc_count++;
1114 }
1115 return 0;
1116}
1117
1118
1119
1120static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1121{
1122 struct pvc_device *pvc;
1123 struct net_device *dev;
1124
1125 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1126 return -ENOENT;
1127
1128 if ((dev = *get_dev_p(pvc, type)) == NULL)
1129 return -ENOENT;
1130
1131 if (dev->flags & IFF_UP)
1132 return -EBUSY; /* PVC in use */
1133
1134 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1135 *get_dev_p(pvc, type) = NULL;
1136
1137 if (!pvc_is_used(pvc)) {
1138 state(hdlc)->dce_pvc_count--;
1139 state(hdlc)->dce_changed = 1;
1140 }
1141 delete_unused_pvcs(hdlc);
1142 return 0;
1143}
1144
1145
1146
1147static void fr_destroy(struct net_device *frad)
1148{
1149 hdlc_device *hdlc = dev_to_hdlc(frad);
1150 struct pvc_device *pvc = state(hdlc)->first_pvc;
1151 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1152 state(hdlc)->dce_pvc_count = 0;
1153 state(hdlc)->dce_changed = 1;
1154
1155 while (pvc) {
1156 struct pvc_device *next = pvc->next;
1157 /* destructors will free_netdev() main and ether */
1158 if (pvc->main)
1159 unregister_netdevice(pvc->main);
1160
1161 if (pvc->ether)
1162 unregister_netdevice(pvc->ether);
1163
1164 kfree(pvc);
1165 pvc = next;
1166 }
1167}
1168
1169
1170static struct hdlc_proto proto = {
1171 .close = fr_close,
1172 .start = fr_start,
1173 .stop = fr_stop,
1174 .detach = fr_destroy,
1175 .ioctl = fr_ioctl,
1176 .netif_rx = fr_rx,
1177 .module = THIS_MODULE,
1178};
1179
1180
1181static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1182{
1183 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1184 const size_t size = sizeof(fr_proto);
1185 fr_proto new_settings;
1186 hdlc_device *hdlc = dev_to_hdlc(dev);
1187 fr_proto_pvc pvc;
1188 int result;
1189
1190 switch (ifr->ifr_settings.type) {
1191 case IF_GET_PROTO:
1192 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1193 return -EINVAL;
1194 ifr->ifr_settings.type = IF_PROTO_FR;
1195 if (ifr->ifr_settings.size < size) {
1196 ifr->ifr_settings.size = size; /* data size wanted */
1197 return -ENOBUFS;
1198 }
1199 if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1200 return -EFAULT;
1201 return 0;
1202
1203 case IF_PROTO_FR:
1204 if (!capable(CAP_NET_ADMIN))
1205 return -EPERM;
1206
1207 if (dev->flags & IFF_UP)
1208 return -EBUSY;
1209
1210 if (copy_from_user(&new_settings, fr_s, size))
1211 return -EFAULT;
1212
1213 if (new_settings.lmi == LMI_DEFAULT)
1214 new_settings.lmi = LMI_ANSI;
1215
1216 if ((new_settings.lmi != LMI_NONE &&
1217 new_settings.lmi != LMI_ANSI &&
1218 new_settings.lmi != LMI_CCITT &&
1219 new_settings.lmi != LMI_CISCO) ||
1220 new_settings.t391 < 1 ||
1221 new_settings.t392 < 2 ||
1222 new_settings.n391 < 1 ||
1223 new_settings.n392 < 1 ||
1224 new_settings.n393 < new_settings.n392 ||
1225 new_settings.n393 > 32 ||
1226 (new_settings.dce != 0 &&
1227 new_settings.dce != 1))
1228 return -EINVAL;
1229
1230 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1231 if (result)
1232 return result;
1233
1234 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1235 result = attach_hdlc_protocol(dev, &proto,
1236 sizeof(struct frad_state));
1237 if (result)
1238 return result;
1239 state(hdlc)->first_pvc = NULL;
1240 state(hdlc)->dce_pvc_count = 0;
1241 }
1242 memcpy(&state(hdlc)->settings, &new_settings, size);
1243 dev->type = ARPHRD_FRAD;
1244 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1245 return 0;
1246
1247 case IF_PROTO_FR_ADD_PVC:
1248 case IF_PROTO_FR_DEL_PVC:
1249 case IF_PROTO_FR_ADD_ETH_PVC:
1250 case IF_PROTO_FR_DEL_ETH_PVC:
1251 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1252 return -EINVAL;
1253
1254 if (!capable(CAP_NET_ADMIN))
1255 return -EPERM;
1256
1257 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1258 sizeof(fr_proto_pvc)))
1259 return -EFAULT;
1260
1261 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1262 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1263
1264 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1265 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1266 result = ARPHRD_ETHER; /* bridged Ethernet device */
1267 else
1268 result = ARPHRD_DLCI;
1269
1270 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1271 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1272 return fr_add_pvc(dev, pvc.dlci, result);
1273 else
1274 return fr_del_pvc(hdlc, pvc.dlci, result);
1275 }
1276
1277 return -EINVAL;
1278}
1279
1280
1281static int __init mod_init(void)
1282{
1283 register_hdlc_protocol(&proto);
1284 return 0;
1285}
1286
1287
1288static void __exit mod_exit(void)
1289{
1290 unregister_hdlc_protocol(&proto);
1291}
1292
1293
1294module_init(mod_init);
1295module_exit(mod_exit);
1296
1297MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1298MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1299MODULE_LICENSE("GPL v2");