Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Generic HDLC support routines for Linux
   3 * Frame Relay support
   4 *
   5 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of version 2 of the GNU General Public License
   9 * as published by the Free Software Foundation.
  10 *
  11
  12            Theory of PVC state
  13
  14 DCE mode:
  15
  16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  17         0,x -> 1,1 if "link reliable" when sending FULL STATUS
  18         1,1 -> 1,0 if received FULL STATUS ACK
  19
  20 (active)    -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  21             -> 1 when "PVC up" and (exist,new) = 1,0
  22
  23 DTE mode:
  24 (exist,new,active) = FULL STATUS if "link reliable"
  25		    = 0, 0, 0 if "link unreliable"
  26 No LMI:
  27 active = open and "link reliable"
  28 exist = new = not used
  29
  30 CCITT LMI: ITU-T Q.933 Annex A
  31 ANSI LMI: ANSI T1.617 Annex D
  32 CISCO LMI: the original, aka "Gang of Four" LMI
  33
  34*/
  35
  36#include <linux/errno.h>
  37#include <linux/etherdevice.h>
  38#include <linux/hdlc.h>
  39#include <linux/if_arp.h>
  40#include <linux/inetdevice.h>
  41#include <linux/init.h>
  42#include <linux/kernel.h>
  43#include <linux/module.h>
  44#include <linux/pkt_sched.h>
  45#include <linux/poll.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/skbuff.h>
  48#include <linux/slab.h>
  49
  50#undef DEBUG_PKT
  51#undef DEBUG_ECN
  52#undef DEBUG_LINK
  53#undef DEBUG_PROTO
  54#undef DEBUG_PVC
  55
  56#define FR_UI			0x03
  57#define FR_PAD			0x00
  58
  59#define NLPID_IP		0xCC
  60#define NLPID_IPV6		0x8E
  61#define NLPID_SNAP		0x80
  62#define NLPID_PAD		0x00
  63#define NLPID_CCITT_ANSI_LMI	0x08
  64#define NLPID_CISCO_LMI		0x09
  65
  66
  67#define LMI_CCITT_ANSI_DLCI	   0 /* LMI DLCI */
  68#define LMI_CISCO_DLCI		1023
  69
  70#define LMI_CALLREF		0x00 /* Call Reference */
  71#define LMI_ANSI_LOCKSHIFT	0x95 /* ANSI locking shift */
  72#define LMI_ANSI_CISCO_REPTYPE	0x01 /* report type */
  73#define LMI_CCITT_REPTYPE	0x51
  74#define LMI_ANSI_CISCO_ALIVE	0x03 /* keep alive */
  75#define LMI_CCITT_ALIVE		0x53
  76#define LMI_ANSI_CISCO_PVCSTAT	0x07 /* PVC status */
  77#define LMI_CCITT_PVCSTAT	0x57
  78
  79#define LMI_FULLREP		0x00 /* full report  */
  80#define LMI_INTEGRITY		0x01 /* link integrity report */
  81#define LMI_SINGLE		0x02 /* single PVC report */
  82
  83#define LMI_STATUS_ENQUIRY      0x75
  84#define LMI_STATUS              0x7D /* reply */
  85
  86#define LMI_REPT_LEN               1 /* report type element length */
  87#define LMI_INTEG_LEN              2 /* link integrity element length */
  88
  89#define LMI_CCITT_CISCO_LENGTH	  13 /* LMI frame lengths */
  90#define LMI_ANSI_LENGTH		  14
  91
  92
  93typedef struct {
  94#if defined(__LITTLE_ENDIAN_BITFIELD)
  95	unsigned ea1:	1;
  96	unsigned cr:	1;
  97	unsigned dlcih:	6;
  98
  99	unsigned ea2:	1;
 100	unsigned de:	1;
 101	unsigned becn:	1;
 102	unsigned fecn:	1;
 103	unsigned dlcil:	4;
 104#else
 105	unsigned dlcih:	6;
 106	unsigned cr:	1;
 107	unsigned ea1:	1;
 108
 109	unsigned dlcil:	4;
 110	unsigned fecn:	1;
 111	unsigned becn:	1;
 112	unsigned de:	1;
 113	unsigned ea2:	1;
 114#endif
 115}__packed fr_hdr;
 116
 117
 118typedef struct pvc_device_struct {
 119	struct net_device *frad;
 120	struct net_device *main;
 121	struct net_device *ether;	/* bridged Ethernet interface	*/
 122	struct pvc_device_struct *next;	/* Sorted in ascending DLCI order */
 123	int dlci;
 124	int open_count;
 125
 126	struct {
 127		unsigned int new: 1;
 128		unsigned int active: 1;
 129		unsigned int exist: 1;
 130		unsigned int deleted: 1;
 131		unsigned int fecn: 1;
 132		unsigned int becn: 1;
 133		unsigned int bandwidth;	/* Cisco LMI reporting only */
 134	}state;
 135}pvc_device;
 136
 137struct frad_state {
 138	fr_proto settings;
 139	pvc_device *first_pvc;
 140	int dce_pvc_count;
 141
 142	struct timer_list timer;
 
 143	unsigned long last_poll;
 144	int reliable;
 145	int dce_changed;
 146	int request;
 147	int fullrep_sent;
 148	u32 last_errors; /* last errors bit list */
 149	u8 n391cnt;
 150	u8 txseq; /* TX sequence number */
 151	u8 rxseq; /* RX sequence number */
 152};
 153
 154
 155static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
 156
 157
 158static inline u16 q922_to_dlci(u8 *hdr)
 159{
 160	return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
 161}
 162
 163
 164static inline void dlci_to_q922(u8 *hdr, u16 dlci)
 165{
 166	hdr[0] = (dlci >> 2) & 0xFC;
 167	hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
 168}
 169
 170
 171static inline struct frad_state* state(hdlc_device *hdlc)
 172{
 173	return(struct frad_state *)(hdlc->state);
 174}
 175
 176
 177static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
 178{
 179	pvc_device *pvc = state(hdlc)->first_pvc;
 180
 181	while (pvc) {
 182		if (pvc->dlci == dlci)
 183			return pvc;
 184		if (pvc->dlci > dlci)
 185			return NULL; /* the list is sorted */
 186		pvc = pvc->next;
 187	}
 188
 189	return NULL;
 190}
 191
 192
 193static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
 194{
 195	hdlc_device *hdlc = dev_to_hdlc(dev);
 196	pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
 197
 198	while (*pvc_p) {
 199		if ((*pvc_p)->dlci == dlci)
 200			return *pvc_p;
 201		if ((*pvc_p)->dlci > dlci)
 202			break;	/* the list is sorted */
 203		pvc_p = &(*pvc_p)->next;
 204	}
 205
 206	pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC);
 207#ifdef DEBUG_PVC
 208	printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
 209#endif
 210	if (!pvc)
 211		return NULL;
 212
 213	pvc->dlci = dlci;
 214	pvc->frad = dev;
 215	pvc->next = *pvc_p;	/* Put it in the chain */
 216	*pvc_p = pvc;
 217	return pvc;
 218}
 219
 220
 221static inline int pvc_is_used(pvc_device *pvc)
 222{
 223	return pvc->main || pvc->ether;
 224}
 225
 226
 227static inline void pvc_carrier(int on, pvc_device *pvc)
 228{
 229	if (on) {
 230		if (pvc->main)
 231			if (!netif_carrier_ok(pvc->main))
 232				netif_carrier_on(pvc->main);
 233		if (pvc->ether)
 234			if (!netif_carrier_ok(pvc->ether))
 235				netif_carrier_on(pvc->ether);
 236	} else {
 237		if (pvc->main)
 238			if (netif_carrier_ok(pvc->main))
 239				netif_carrier_off(pvc->main);
 240		if (pvc->ether)
 241			if (netif_carrier_ok(pvc->ether))
 242				netif_carrier_off(pvc->ether);
 243	}
 244}
 245
 246
 247static inline void delete_unused_pvcs(hdlc_device *hdlc)
 248{
 249	pvc_device **pvc_p = &state(hdlc)->first_pvc;
 250
 251	while (*pvc_p) {
 252		if (!pvc_is_used(*pvc_p)) {
 253			pvc_device *pvc = *pvc_p;
 254#ifdef DEBUG_PVC
 255			printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
 256#endif
 257			*pvc_p = pvc->next;
 258			kfree(pvc);
 259			continue;
 260		}
 261		pvc_p = &(*pvc_p)->next;
 262	}
 263}
 264
 265
 266static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
 
 267{
 268	if (type == ARPHRD_ETHER)
 269		return &pvc->ether;
 270	else
 271		return &pvc->main;
 272}
 273
 274
 275static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
 276{
 277	u16 head_len;
 278	struct sk_buff *skb = *skb_p;
 279
 280	switch (skb->protocol) {
 281	case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
 282		head_len = 4;
 283		skb_push(skb, head_len);
 284		skb->data[3] = NLPID_CCITT_ANSI_LMI;
 285		break;
 286
 287	case cpu_to_be16(NLPID_CISCO_LMI):
 288		head_len = 4;
 289		skb_push(skb, head_len);
 290		skb->data[3] = NLPID_CISCO_LMI;
 291		break;
 292
 293	case cpu_to_be16(ETH_P_IP):
 294		head_len = 4;
 295		skb_push(skb, head_len);
 296		skb->data[3] = NLPID_IP;
 297		break;
 298
 299	case cpu_to_be16(ETH_P_IPV6):
 300		head_len = 4;
 301		skb_push(skb, head_len);
 302		skb->data[3] = NLPID_IPV6;
 303		break;
 304
 305	case cpu_to_be16(ETH_P_802_3):
 306		head_len = 10;
 307		if (skb_headroom(skb) < head_len) {
 308			struct sk_buff *skb2 = skb_realloc_headroom(skb,
 309								    head_len);
 310			if (!skb2)
 311				return -ENOBUFS;
 312			dev_kfree_skb(skb);
 313			skb = *skb_p = skb2;
 314		}
 315		skb_push(skb, head_len);
 316		skb->data[3] = FR_PAD;
 317		skb->data[4] = NLPID_SNAP;
 318		skb->data[5] = FR_PAD;
 319		skb->data[6] = 0x80;
 320		skb->data[7] = 0xC2;
 321		skb->data[8] = 0x00;
 322		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
 323		break;
 324
 325	default:
 326		head_len = 10;
 327		skb_push(skb, head_len);
 328		skb->data[3] = FR_PAD;
 329		skb->data[4] = NLPID_SNAP;
 330		skb->data[5] = FR_PAD;
 331		skb->data[6] = FR_PAD;
 332		skb->data[7] = FR_PAD;
 333		*(__be16*)(skb->data + 8) = skb->protocol;
 334	}
 335
 336	dlci_to_q922(skb->data, dlci);
 337	skb->data[2] = FR_UI;
 338	return 0;
 339}
 340
 341
 342
 343static int pvc_open(struct net_device *dev)
 344{
 345	pvc_device *pvc = dev->ml_priv;
 346
 347	if ((pvc->frad->flags & IFF_UP) == 0)
 348		return -EIO;  /* Frad must be UP in order to activate PVC */
 349
 350	if (pvc->open_count++ == 0) {
 351		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
 352		if (state(hdlc)->settings.lmi == LMI_NONE)
 353			pvc->state.active = netif_carrier_ok(pvc->frad);
 354
 355		pvc_carrier(pvc->state.active, pvc);
 356		state(hdlc)->dce_changed = 1;
 357	}
 358	return 0;
 359}
 360
 361
 362
 363static int pvc_close(struct net_device *dev)
 364{
 365	pvc_device *pvc = dev->ml_priv;
 366
 367	if (--pvc->open_count == 0) {
 368		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
 369		if (state(hdlc)->settings.lmi == LMI_NONE)
 370			pvc->state.active = 0;
 371
 372		if (state(hdlc)->settings.dce) {
 373			state(hdlc)->dce_changed = 1;
 374			pvc->state.active = 0;
 375		}
 376	}
 377	return 0;
 378}
 379
 380
 381
 382static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 383{
 384	pvc_device *pvc = dev->ml_priv;
 385	fr_proto_pvc_info info;
 386
 387	if (ifr->ifr_settings.type == IF_GET_PROTO) {
 388		if (dev->type == ARPHRD_ETHER)
 389			ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
 390		else
 391			ifr->ifr_settings.type = IF_PROTO_FR_PVC;
 392
 393		if (ifr->ifr_settings.size < sizeof(info)) {
 394			/* data size wanted */
 395			ifr->ifr_settings.size = sizeof(info);
 396			return -ENOBUFS;
 397		}
 398
 399		info.dlci = pvc->dlci;
 400		memcpy(info.master, pvc->frad->name, IFNAMSIZ);
 401		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
 402				 &info, sizeof(info)))
 403			return -EFAULT;
 404		return 0;
 405	}
 406
 407	return -EINVAL;
 408}
 409
 410static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 411{
 412	pvc_device *pvc = dev->ml_priv;
 413
 414	if (pvc->state.active) {
 415		if (dev->type == ARPHRD_ETHER) {
 416			int pad = ETH_ZLEN - skb->len;
 417			if (pad > 0) { /* Pad the frame with zeros */
 418				int len = skb->len;
 419				if (skb_tailroom(skb) < pad)
 420					if (pskb_expand_head(skb, 0, pad,
 421							     GFP_ATOMIC)) {
 422						dev->stats.tx_dropped++;
 423						dev_kfree_skb(skb);
 424						return NETDEV_TX_OK;
 425					}
 426				skb_put(skb, pad);
 427				memset(skb->data + len, 0, pad);
 428			}
 429			skb->protocol = cpu_to_be16(ETH_P_802_3);
 430		}
 431		if (!fr_hard_header(&skb, pvc->dlci)) {
 432			dev->stats.tx_bytes += skb->len;
 433			dev->stats.tx_packets++;
 434			if (pvc->state.fecn) /* TX Congestion counter */
 435				dev->stats.tx_compressed++;
 436			skb->dev = pvc->frad;
 437			dev_queue_xmit(skb);
 438			return NETDEV_TX_OK;
 439		}
 440	}
 441
 442	dev->stats.tx_dropped++;
 443	dev_kfree_skb(skb);
 444	return NETDEV_TX_OK;
 445}
 446
 447static inline void fr_log_dlci_active(pvc_device *pvc)
 448{
 449	netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
 450		    pvc->dlci,
 451		    pvc->main ? pvc->main->name : "",
 452		    pvc->main && pvc->ether ? " " : "",
 453		    pvc->ether ? pvc->ether->name : "",
 454		    pvc->state.new ? " new" : "",
 455		    !pvc->state.exist ? "deleted" :
 456		    pvc->state.active ? "active" : "inactive");
 457}
 458
 459
 460
 461static inline u8 fr_lmi_nextseq(u8 x)
 462{
 463	x++;
 464	return x ? x : 1;
 465}
 466
 467
 468static void fr_lmi_send(struct net_device *dev, int fullrep)
 469{
 470	hdlc_device *hdlc = dev_to_hdlc(dev);
 471	struct sk_buff *skb;
 472	pvc_device *pvc = state(hdlc)->first_pvc;
 473	int lmi = state(hdlc)->settings.lmi;
 474	int dce = state(hdlc)->settings.dce;
 475	int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
 476	int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
 477	u8 *data;
 478	int i = 0;
 479
 480	if (dce && fullrep) {
 481		len += state(hdlc)->dce_pvc_count * (2 + stat_len);
 482		if (len > HDLC_MAX_MRU) {
 483			netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
 484			return;
 485		}
 486	}
 487
 488	skb = dev_alloc_skb(len);
 489	if (!skb) {
 490		netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
 491		return;
 492	}
 493	memset(skb->data, 0, len);
 494	skb_reserve(skb, 4);
 495	if (lmi == LMI_CISCO) {
 496		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
 497		fr_hard_header(&skb, LMI_CISCO_DLCI);
 498	} else {
 499		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
 500		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
 501	}
 502	data = skb_tail_pointer(skb);
 503	data[i++] = LMI_CALLREF;
 504	data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
 505	if (lmi == LMI_ANSI)
 506		data[i++] = LMI_ANSI_LOCKSHIFT;
 507	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
 508		LMI_ANSI_CISCO_REPTYPE;
 509	data[i++] = LMI_REPT_LEN;
 510	data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
 511	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
 512	data[i++] = LMI_INTEG_LEN;
 513	data[i++] = state(hdlc)->txseq =
 514		fr_lmi_nextseq(state(hdlc)->txseq);
 515	data[i++] = state(hdlc)->rxseq;
 516
 517	if (dce && fullrep) {
 518		while (pvc) {
 519			data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
 520				LMI_ANSI_CISCO_PVCSTAT;
 521			data[i++] = stat_len;
 522
 523			/* LMI start/restart */
 524			if (state(hdlc)->reliable && !pvc->state.exist) {
 525				pvc->state.exist = pvc->state.new = 1;
 526				fr_log_dlci_active(pvc);
 527			}
 528
 529			/* ifconfig PVC up */
 530			if (pvc->open_count && !pvc->state.active &&
 531			    pvc->state.exist && !pvc->state.new) {
 532				pvc_carrier(1, pvc);
 533				pvc->state.active = 1;
 534				fr_log_dlci_active(pvc);
 535			}
 536
 537			if (lmi == LMI_CISCO) {
 538				data[i] = pvc->dlci >> 8;
 539				data[i + 1] = pvc->dlci & 0xFF;
 540			} else {
 541				data[i] = (pvc->dlci >> 4) & 0x3F;
 542				data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
 543				data[i + 2] = 0x80;
 544			}
 545
 546			if (pvc->state.new)
 547				data[i + 2] |= 0x08;
 548			else if (pvc->state.active)
 549				data[i + 2] |= 0x02;
 550
 551			i += stat_len;
 552			pvc = pvc->next;
 553		}
 554	}
 555
 556	skb_put(skb, i);
 557	skb->priority = TC_PRIO_CONTROL;
 558	skb->dev = dev;
 559	skb_reset_network_header(skb);
 560
 561	dev_queue_xmit(skb);
 562}
 563
 564
 565
 566static void fr_set_link_state(int reliable, struct net_device *dev)
 567{
 568	hdlc_device *hdlc = dev_to_hdlc(dev);
 569	pvc_device *pvc = state(hdlc)->first_pvc;
 570
 571	state(hdlc)->reliable = reliable;
 572	if (reliable) {
 573		netif_dormant_off(dev);
 574		state(hdlc)->n391cnt = 0; /* Request full status */
 575		state(hdlc)->dce_changed = 1;
 576
 577		if (state(hdlc)->settings.lmi == LMI_NONE) {
 578			while (pvc) {	/* Activate all PVCs */
 579				pvc_carrier(1, pvc);
 580				pvc->state.exist = pvc->state.active = 1;
 581				pvc->state.new = 0;
 582				pvc = pvc->next;
 583			}
 584		}
 585	} else {
 586		netif_dormant_on(dev);
 587		while (pvc) {		/* Deactivate all PVCs */
 588			pvc_carrier(0, pvc);
 589			pvc->state.exist = pvc->state.active = 0;
 590			pvc->state.new = 0;
 591			if (!state(hdlc)->settings.dce)
 592				pvc->state.bandwidth = 0;
 593			pvc = pvc->next;
 594		}
 595	}
 596}
 597
 598
 599static void fr_timer(unsigned long arg)
 600{
 601	struct net_device *dev = (struct net_device *)arg;
 
 602	hdlc_device *hdlc = dev_to_hdlc(dev);
 603	int i, cnt = 0, reliable;
 604	u32 list;
 605
 606	if (state(hdlc)->settings.dce) {
 607		reliable = state(hdlc)->request &&
 608			time_before(jiffies, state(hdlc)->last_poll +
 609				    state(hdlc)->settings.t392 * HZ);
 610		state(hdlc)->request = 0;
 611	} else {
 612		state(hdlc)->last_errors <<= 1; /* Shift the list */
 613		if (state(hdlc)->request) {
 614			if (state(hdlc)->reliable)
 615				netdev_info(dev, "No LMI status reply received\n");
 616			state(hdlc)->last_errors |= 1;
 617		}
 618
 619		list = state(hdlc)->last_errors;
 620		for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
 621			cnt += (list & 1);	/* errors count */
 622
 623		reliable = (cnt < state(hdlc)->settings.n392);
 624	}
 625
 626	if (state(hdlc)->reliable != reliable) {
 627		netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
 628		fr_set_link_state(reliable, dev);
 629	}
 630
 631	if (state(hdlc)->settings.dce)
 632		state(hdlc)->timer.expires = jiffies +
 633			state(hdlc)->settings.t392 * HZ;
 634	else {
 635		if (state(hdlc)->n391cnt)
 636			state(hdlc)->n391cnt--;
 637
 638		fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
 639
 640		state(hdlc)->last_poll = jiffies;
 641		state(hdlc)->request = 1;
 642		state(hdlc)->timer.expires = jiffies +
 643			state(hdlc)->settings.t391 * HZ;
 644	}
 645
 646	state(hdlc)->timer.function = fr_timer;
 647	state(hdlc)->timer.data = arg;
 648	add_timer(&state(hdlc)->timer);
 649}
 650
 651
 652static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
 653{
 654	hdlc_device *hdlc = dev_to_hdlc(dev);
 655	pvc_device *pvc;
 656	u8 rxseq, txseq;
 657	int lmi = state(hdlc)->settings.lmi;
 658	int dce = state(hdlc)->settings.dce;
 659	int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
 660
 661	if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
 662			LMI_CCITT_CISCO_LENGTH)) {
 663		netdev_info(dev, "Short LMI frame\n");
 664		return 1;
 665	}
 666
 667	if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
 668			     NLPID_CCITT_ANSI_LMI)) {
 669		netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
 670		return 1;
 671	}
 672
 673	if (skb->data[4] != LMI_CALLREF) {
 674		netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
 675			    skb->data[4]);
 676		return 1;
 677	}
 678
 679	if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
 680		netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
 681			    skb->data[5]);
 682		return 1;
 683	}
 684
 685	if (lmi == LMI_ANSI) {
 686		if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
 687			netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
 688				    skb->data[6]);
 689			return 1;
 690		}
 691		i = 7;
 692	} else
 693		i = 6;
 694
 695	if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
 696			     LMI_ANSI_CISCO_REPTYPE)) {
 697		netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
 698			    skb->data[i]);
 699		return 1;
 700	}
 701
 702	if (skb->data[++i] != LMI_REPT_LEN) {
 703		netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
 704			    skb->data[i]);
 705		return 1;
 706	}
 707
 708	reptype = skb->data[++i];
 709	if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
 710		netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
 711			    reptype);
 712		return 1;
 713	}
 714
 715	if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
 716			       LMI_ANSI_CISCO_ALIVE)) {
 717		netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
 718			    skb->data[i]);
 719		return 1;
 720	}
 721
 722	if (skb->data[++i] != LMI_INTEG_LEN) {
 723		netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
 724			    skb->data[i]);
 725		return 1;
 726	}
 727	i++;
 728
 729	state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
 730	rxseq = skb->data[i++];	/* Should confirm our sequence */
 731
 732	txseq = state(hdlc)->txseq;
 733
 734	if (dce)
 735		state(hdlc)->last_poll = jiffies;
 736
 737	error = 0;
 738	if (!state(hdlc)->reliable)
 739		error = 1;
 740
 741	if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
 742		state(hdlc)->n391cnt = 0;
 743		error = 1;
 744	}
 745
 746	if (dce) {
 747		if (state(hdlc)->fullrep_sent && !error) {
 748/* Stop sending full report - the last one has been confirmed by DTE */
 749			state(hdlc)->fullrep_sent = 0;
 750			pvc = state(hdlc)->first_pvc;
 751			while (pvc) {
 752				if (pvc->state.new) {
 753					pvc->state.new = 0;
 754
 755/* Tell DTE that new PVC is now active */
 756					state(hdlc)->dce_changed = 1;
 757				}
 758				pvc = pvc->next;
 759			}
 760		}
 761
 762		if (state(hdlc)->dce_changed) {
 763			reptype = LMI_FULLREP;
 764			state(hdlc)->fullrep_sent = 1;
 765			state(hdlc)->dce_changed = 0;
 766		}
 767
 768		state(hdlc)->request = 1; /* got request */
 769		fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
 770		return 0;
 771	}
 772
 773	/* DTE */
 774
 775	state(hdlc)->request = 0; /* got response, no request pending */
 776
 777	if (error)
 778		return 0;
 779
 780	if (reptype != LMI_FULLREP)
 781		return 0;
 782
 783	pvc = state(hdlc)->first_pvc;
 784
 785	while (pvc) {
 786		pvc->state.deleted = 1;
 787		pvc = pvc->next;
 788	}
 789
 790	no_ram = 0;
 791	while (skb->len >= i + 2 + stat_len) {
 792		u16 dlci;
 793		u32 bw;
 794		unsigned int active, new;
 795
 796		if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
 797				       LMI_ANSI_CISCO_PVCSTAT)) {
 798			netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
 799				    skb->data[i]);
 800			return 1;
 801		}
 802
 803		if (skb->data[++i] != stat_len) {
 804			netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
 805				    skb->data[i]);
 806			return 1;
 807		}
 808		i++;
 809
 810		new = !! (skb->data[i + 2] & 0x08);
 811		active = !! (skb->data[i + 2] & 0x02);
 812		if (lmi == LMI_CISCO) {
 813			dlci = (skb->data[i] << 8) | skb->data[i + 1];
 814			bw = (skb->data[i + 3] << 16) |
 815				(skb->data[i + 4] << 8) |
 816				(skb->data[i + 5]);
 817		} else {
 818			dlci = ((skb->data[i] & 0x3F) << 4) |
 819				((skb->data[i + 1] & 0x78) >> 3);
 820			bw = 0;
 821		}
 822
 823		pvc = add_pvc(dev, dlci);
 824
 825		if (!pvc && !no_ram) {
 826			netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
 827			no_ram = 1;
 828		}
 829
 830		if (pvc) {
 831			pvc->state.exist = 1;
 832			pvc->state.deleted = 0;
 833			if (active != pvc->state.active ||
 834			    new != pvc->state.new ||
 835			    bw != pvc->state.bandwidth ||
 836			    !pvc->state.exist) {
 837				pvc->state.new = new;
 838				pvc->state.active = active;
 839				pvc->state.bandwidth = bw;
 840				pvc_carrier(active, pvc);
 841				fr_log_dlci_active(pvc);
 842			}
 843		}
 844
 845		i += stat_len;
 846	}
 847
 848	pvc = state(hdlc)->first_pvc;
 849
 850	while (pvc) {
 851		if (pvc->state.deleted && pvc->state.exist) {
 852			pvc_carrier(0, pvc);
 853			pvc->state.active = pvc->state.new = 0;
 854			pvc->state.exist = 0;
 855			pvc->state.bandwidth = 0;
 856			fr_log_dlci_active(pvc);
 857		}
 858		pvc = pvc->next;
 859	}
 860
 861	/* Next full report after N391 polls */
 862	state(hdlc)->n391cnt = state(hdlc)->settings.n391;
 863
 864	return 0;
 865}
 866
 867
 868static int fr_rx(struct sk_buff *skb)
 869{
 870	struct net_device *frad = skb->dev;
 871	hdlc_device *hdlc = dev_to_hdlc(frad);
 872	fr_hdr *fh = (fr_hdr*)skb->data;
 873	u8 *data = skb->data;
 874	u16 dlci;
 875	pvc_device *pvc;
 876	struct net_device *dev = NULL;
 877
 878	if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
 879		goto rx_error;
 880
 881	dlci = q922_to_dlci(skb->data);
 882
 883	if ((dlci == LMI_CCITT_ANSI_DLCI &&
 884	     (state(hdlc)->settings.lmi == LMI_ANSI ||
 885	      state(hdlc)->settings.lmi == LMI_CCITT)) ||
 886	    (dlci == LMI_CISCO_DLCI &&
 887	     state(hdlc)->settings.lmi == LMI_CISCO)) {
 888		if (fr_lmi_recv(frad, skb))
 889			goto rx_error;
 890		dev_kfree_skb_any(skb);
 891		return NET_RX_SUCCESS;
 892	}
 893
 894	pvc = find_pvc(hdlc, dlci);
 895	if (!pvc) {
 896#ifdef DEBUG_PKT
 897		netdev_info(frad, "No PVC for received frame's DLCI %d\n",
 898			    dlci);
 899#endif
 900		dev_kfree_skb_any(skb);
 901		return NET_RX_DROP;
 902	}
 903
 904	if (pvc->state.fecn != fh->fecn) {
 905#ifdef DEBUG_ECN
 906		printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
 907		       dlci, fh->fecn ? "N" : "FF");
 908#endif
 909		pvc->state.fecn ^= 1;
 910	}
 911
 912	if (pvc->state.becn != fh->becn) {
 913#ifdef DEBUG_ECN
 914		printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
 915		       dlci, fh->becn ? "N" : "FF");
 916#endif
 917		pvc->state.becn ^= 1;
 918	}
 919
 920
 921	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
 922		frad->stats.rx_dropped++;
 923		return NET_RX_DROP;
 924	}
 925
 926	if (data[3] == NLPID_IP) {
 927		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
 928		dev = pvc->main;
 929		skb->protocol = htons(ETH_P_IP);
 930
 931	} else if (data[3] == NLPID_IPV6) {
 932		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
 933		dev = pvc->main;
 934		skb->protocol = htons(ETH_P_IPV6);
 935
 936	} else if (skb->len > 10 && data[3] == FR_PAD &&
 937		   data[4] == NLPID_SNAP && data[5] == FR_PAD) {
 938		u16 oui = ntohs(*(__be16*)(data + 6));
 939		u16 pid = ntohs(*(__be16*)(data + 8));
 940		skb_pull(skb, 10);
 941
 942		switch ((((u32)oui) << 16) | pid) {
 943		case ETH_P_ARP: /* routed frame with SNAP */
 944		case ETH_P_IPX:
 945		case ETH_P_IP:	/* a long variant */
 946		case ETH_P_IPV6:
 947			dev = pvc->main;
 948			skb->protocol = htons(pid);
 949			break;
 950
 951		case 0x80C20007: /* bridged Ethernet frame */
 952			if ((dev = pvc->ether) != NULL)
 953				skb->protocol = eth_type_trans(skb, dev);
 954			break;
 955
 956		default:
 957			netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
 958				    oui, pid);
 959			dev_kfree_skb_any(skb);
 960			return NET_RX_DROP;
 961		}
 962	} else {
 963		netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
 964			    data[3], skb->len);
 965		dev_kfree_skb_any(skb);
 966		return NET_RX_DROP;
 967	}
 968
 969	if (dev) {
 970		dev->stats.rx_packets++; /* PVC traffic */
 971		dev->stats.rx_bytes += skb->len;
 972		if (pvc->state.becn)
 973			dev->stats.rx_compressed++;
 974		skb->dev = dev;
 975		netif_rx(skb);
 976		return NET_RX_SUCCESS;
 977	} else {
 978		dev_kfree_skb_any(skb);
 979		return NET_RX_DROP;
 980	}
 981
 982 rx_error:
 983	frad->stats.rx_errors++; /* Mark error */
 984	dev_kfree_skb_any(skb);
 985	return NET_RX_DROP;
 986}
 987
 988
 989
 990static void fr_start(struct net_device *dev)
 991{
 992	hdlc_device *hdlc = dev_to_hdlc(dev);
 993#ifdef DEBUG_LINK
 994	printk(KERN_DEBUG "fr_start\n");
 995#endif
 996	if (state(hdlc)->settings.lmi != LMI_NONE) {
 997		state(hdlc)->reliable = 0;
 998		state(hdlc)->dce_changed = 1;
 999		state(hdlc)->request = 0;
1000		state(hdlc)->fullrep_sent = 0;
1001		state(hdlc)->last_errors = 0xFFFFFFFF;
1002		state(hdlc)->n391cnt = 0;
1003		state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1004
1005		init_timer(&state(hdlc)->timer);
 
1006		/* First poll after 1 s */
1007		state(hdlc)->timer.expires = jiffies + HZ;
1008		state(hdlc)->timer.function = fr_timer;
1009		state(hdlc)->timer.data = (unsigned long)dev;
1010		add_timer(&state(hdlc)->timer);
1011	} else
1012		fr_set_link_state(1, dev);
1013}
1014
1015
1016static void fr_stop(struct net_device *dev)
1017{
1018	hdlc_device *hdlc = dev_to_hdlc(dev);
1019#ifdef DEBUG_LINK
1020	printk(KERN_DEBUG "fr_stop\n");
1021#endif
1022	if (state(hdlc)->settings.lmi != LMI_NONE)
1023		del_timer_sync(&state(hdlc)->timer);
1024	fr_set_link_state(0, dev);
1025}
1026
1027
1028static void fr_close(struct net_device *dev)
1029{
1030	hdlc_device *hdlc = dev_to_hdlc(dev);
1031	pvc_device *pvc = state(hdlc)->first_pvc;
1032
1033	while (pvc) {		/* Shutdown all PVCs for this FRAD */
1034		if (pvc->main)
1035			dev_close(pvc->main);
1036		if (pvc->ether)
1037			dev_close(pvc->ether);
1038		pvc = pvc->next;
1039	}
1040}
1041
1042
1043static void pvc_setup(struct net_device *dev)
1044{
1045	dev->type = ARPHRD_DLCI;
1046	dev->flags = IFF_POINTOPOINT;
1047	dev->hard_header_len = 10;
1048	dev->addr_len = 2;
1049	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1050}
1051
1052static const struct net_device_ops pvc_ops = {
1053	.ndo_open       = pvc_open,
1054	.ndo_stop       = pvc_close,
1055	.ndo_change_mtu = hdlc_change_mtu,
1056	.ndo_start_xmit = pvc_xmit,
1057	.ndo_do_ioctl   = pvc_ioctl,
1058};
1059
1060static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1061{
1062	hdlc_device *hdlc = dev_to_hdlc(frad);
1063	pvc_device *pvc;
1064	struct net_device *dev;
1065	int used;
1066
1067	if ((pvc = add_pvc(frad, dlci)) == NULL) {
1068		netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1069		return -ENOBUFS;
1070	}
1071
1072	if (*get_dev_p(pvc, type))
1073		return -EEXIST;
1074
1075	used = pvc_is_used(pvc);
1076
1077	if (type == ARPHRD_ETHER) {
1078		dev = alloc_netdev(0, "pvceth%d", ether_setup);
1079		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1080	} else
1081		dev = alloc_netdev(0, "pvc%d", pvc_setup);
1082
1083	if (!dev) {
1084		netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1085		delete_unused_pvcs(hdlc);
1086		return -ENOBUFS;
1087	}
1088
1089	if (type == ARPHRD_ETHER)
 
1090		eth_hw_addr_random(dev);
1091	else {
1092		*(__be16*)dev->dev_addr = htons(dlci);
1093		dlci_to_q922(dev->broadcast, dlci);
1094	}
1095	dev->netdev_ops = &pvc_ops;
1096	dev->mtu = HDLC_MAX_MTU;
1097	dev->tx_queue_len = 0;
 
 
1098	dev->ml_priv = pvc;
1099
1100	if (register_netdevice(dev) != 0) {
1101		free_netdev(dev);
1102		delete_unused_pvcs(hdlc);
1103		return -EIO;
1104	}
1105
1106	dev->destructor = free_netdev;
1107	*get_dev_p(pvc, type) = dev;
1108	if (!used) {
1109		state(hdlc)->dce_changed = 1;
1110		state(hdlc)->dce_pvc_count++;
1111	}
1112	return 0;
1113}
1114
1115
1116
1117static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1118{
1119	pvc_device *pvc;
1120	struct net_device *dev;
1121
1122	if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1123		return -ENOENT;
1124
1125	if ((dev = *get_dev_p(pvc, type)) == NULL)
1126		return -ENOENT;
1127
1128	if (dev->flags & IFF_UP)
1129		return -EBUSY;		/* PVC in use */
1130
1131	unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1132	*get_dev_p(pvc, type) = NULL;
1133
1134	if (!pvc_is_used(pvc)) {
1135		state(hdlc)->dce_pvc_count--;
1136		state(hdlc)->dce_changed = 1;
1137	}
1138	delete_unused_pvcs(hdlc);
1139	return 0;
1140}
1141
1142
1143
1144static void fr_destroy(struct net_device *frad)
1145{
1146	hdlc_device *hdlc = dev_to_hdlc(frad);
1147	pvc_device *pvc = state(hdlc)->first_pvc;
1148	state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1149	state(hdlc)->dce_pvc_count = 0;
1150	state(hdlc)->dce_changed = 1;
1151
1152	while (pvc) {
1153		pvc_device *next = pvc->next;
1154		/* destructors will free_netdev() main and ether */
1155		if (pvc->main)
1156			unregister_netdevice(pvc->main);
1157
1158		if (pvc->ether)
1159			unregister_netdevice(pvc->ether);
1160
1161		kfree(pvc);
1162		pvc = next;
1163	}
1164}
1165
1166
1167static struct hdlc_proto proto = {
1168	.close		= fr_close,
1169	.start		= fr_start,
1170	.stop		= fr_stop,
1171	.detach		= fr_destroy,
1172	.ioctl		= fr_ioctl,
1173	.netif_rx	= fr_rx,
1174	.module		= THIS_MODULE,
1175};
1176
1177
1178static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1179{
1180	fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1181	const size_t size = sizeof(fr_proto);
1182	fr_proto new_settings;
1183	hdlc_device *hdlc = dev_to_hdlc(dev);
1184	fr_proto_pvc pvc;
1185	int result;
1186
1187	switch (ifr->ifr_settings.type) {
1188	case IF_GET_PROTO:
1189		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1190			return -EINVAL;
1191		ifr->ifr_settings.type = IF_PROTO_FR;
1192		if (ifr->ifr_settings.size < size) {
1193			ifr->ifr_settings.size = size; /* data size wanted */
1194			return -ENOBUFS;
1195		}
1196		if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1197			return -EFAULT;
1198		return 0;
1199
1200	case IF_PROTO_FR:
1201		if (!capable(CAP_NET_ADMIN))
1202			return -EPERM;
1203
1204		if (dev->flags & IFF_UP)
1205			return -EBUSY;
1206
1207		if (copy_from_user(&new_settings, fr_s, size))
1208			return -EFAULT;
1209
1210		if (new_settings.lmi == LMI_DEFAULT)
1211			new_settings.lmi = LMI_ANSI;
1212
1213		if ((new_settings.lmi != LMI_NONE &&
1214		     new_settings.lmi != LMI_ANSI &&
1215		     new_settings.lmi != LMI_CCITT &&
1216		     new_settings.lmi != LMI_CISCO) ||
1217		    new_settings.t391 < 1 ||
1218		    new_settings.t392 < 2 ||
1219		    new_settings.n391 < 1 ||
1220		    new_settings.n392 < 1 ||
1221		    new_settings.n393 < new_settings.n392 ||
1222		    new_settings.n393 > 32 ||
1223		    (new_settings.dce != 0 &&
1224		     new_settings.dce != 1))
1225			return -EINVAL;
1226
1227		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1228		if (result)
1229			return result;
1230
1231		if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1232			result = attach_hdlc_protocol(dev, &proto,
1233						      sizeof(struct frad_state));
1234			if (result)
1235				return result;
1236			state(hdlc)->first_pvc = NULL;
1237			state(hdlc)->dce_pvc_count = 0;
1238		}
1239		memcpy(&state(hdlc)->settings, &new_settings, size);
1240		dev->type = ARPHRD_FRAD;
 
1241		return 0;
1242
1243	case IF_PROTO_FR_ADD_PVC:
1244	case IF_PROTO_FR_DEL_PVC:
1245	case IF_PROTO_FR_ADD_ETH_PVC:
1246	case IF_PROTO_FR_DEL_ETH_PVC:
1247		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1248			return -EINVAL;
1249
1250		if (!capable(CAP_NET_ADMIN))
1251			return -EPERM;
1252
1253		if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1254				   sizeof(fr_proto_pvc)))
1255			return -EFAULT;
1256
1257		if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1258			return -EINVAL;	/* Only 10 bits, DLCI 0 reserved */
1259
1260		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1261		    ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1262			result = ARPHRD_ETHER; /* bridged Ethernet device */
1263		else
1264			result = ARPHRD_DLCI;
1265
1266		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1267		    ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1268			return fr_add_pvc(dev, pvc.dlci, result);
1269		else
1270			return fr_del_pvc(hdlc, pvc.dlci, result);
1271	}
1272
1273	return -EINVAL;
1274}
1275
1276
1277static int __init mod_init(void)
1278{
1279	register_hdlc_protocol(&proto);
1280	return 0;
1281}
1282
1283
1284static void __exit mod_exit(void)
1285{
1286	unregister_hdlc_protocol(&proto);
1287}
1288
1289
1290module_init(mod_init);
1291module_exit(mod_exit);
1292
1293MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1294MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1295MODULE_LICENSE("GPL v2");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic HDLC support routines for Linux
   4 * Frame Relay support
   5 *
   6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
   7 *
 
 
 
 
   8
   9            Theory of PVC state
  10
  11 DCE mode:
  12
  13 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  14         0,x -> 1,1 if "link reliable" when sending FULL STATUS
  15         1,1 -> 1,0 if received FULL STATUS ACK
  16
  17 (active)    -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  18             -> 1 when "PVC up" and (exist,new) = 1,0
  19
  20 DTE mode:
  21 (exist,new,active) = FULL STATUS if "link reliable"
  22		    = 0, 0, 0 if "link unreliable"
  23 No LMI:
  24 active = open and "link reliable"
  25 exist = new = not used
  26
  27 CCITT LMI: ITU-T Q.933 Annex A
  28 ANSI LMI: ANSI T1.617 Annex D
  29 CISCO LMI: the original, aka "Gang of Four" LMI
  30
  31*/
  32
  33#include <linux/errno.h>
  34#include <linux/etherdevice.h>
  35#include <linux/hdlc.h>
  36#include <linux/if_arp.h>
  37#include <linux/inetdevice.h>
  38#include <linux/init.h>
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/pkt_sched.h>
  42#include <linux/poll.h>
  43#include <linux/rtnetlink.h>
  44#include <linux/skbuff.h>
  45#include <linux/slab.h>
  46
  47#undef DEBUG_PKT
  48#undef DEBUG_ECN
  49#undef DEBUG_LINK
  50#undef DEBUG_PROTO
  51#undef DEBUG_PVC
  52
  53#define FR_UI			0x03
  54#define FR_PAD			0x00
  55
  56#define NLPID_IP		0xCC
  57#define NLPID_IPV6		0x8E
  58#define NLPID_SNAP		0x80
  59#define NLPID_PAD		0x00
  60#define NLPID_CCITT_ANSI_LMI	0x08
  61#define NLPID_CISCO_LMI		0x09
  62
  63
  64#define LMI_CCITT_ANSI_DLCI	   0 /* LMI DLCI */
  65#define LMI_CISCO_DLCI		1023
  66
  67#define LMI_CALLREF		0x00 /* Call Reference */
  68#define LMI_ANSI_LOCKSHIFT	0x95 /* ANSI locking shift */
  69#define LMI_ANSI_CISCO_REPTYPE	0x01 /* report type */
  70#define LMI_CCITT_REPTYPE	0x51
  71#define LMI_ANSI_CISCO_ALIVE	0x03 /* keep alive */
  72#define LMI_CCITT_ALIVE		0x53
  73#define LMI_ANSI_CISCO_PVCSTAT	0x07 /* PVC status */
  74#define LMI_CCITT_PVCSTAT	0x57
  75
  76#define LMI_FULLREP		0x00 /* full report  */
  77#define LMI_INTEGRITY		0x01 /* link integrity report */
  78#define LMI_SINGLE		0x02 /* single PVC report */
  79
  80#define LMI_STATUS_ENQUIRY      0x75
  81#define LMI_STATUS              0x7D /* reply */
  82
  83#define LMI_REPT_LEN               1 /* report type element length */
  84#define LMI_INTEG_LEN              2 /* link integrity element length */
  85
  86#define LMI_CCITT_CISCO_LENGTH	  13 /* LMI frame lengths */
  87#define LMI_ANSI_LENGTH		  14
  88
  89
  90struct fr_hdr {
  91#if defined(__LITTLE_ENDIAN_BITFIELD)
  92	unsigned ea1:	1;
  93	unsigned cr:	1;
  94	unsigned dlcih:	6;
  95
  96	unsigned ea2:	1;
  97	unsigned de:	1;
  98	unsigned becn:	1;
  99	unsigned fecn:	1;
 100	unsigned dlcil:	4;
 101#else
 102	unsigned dlcih:	6;
 103	unsigned cr:	1;
 104	unsigned ea1:	1;
 105
 106	unsigned dlcil:	4;
 107	unsigned fecn:	1;
 108	unsigned becn:	1;
 109	unsigned de:	1;
 110	unsigned ea2:	1;
 111#endif
 112} __packed;
 113
 114
 115struct pvc_device {
 116	struct net_device *frad;
 117	struct net_device *main;
 118	struct net_device *ether;	/* bridged Ethernet interface	*/
 119	struct pvc_device *next;	/* Sorted in ascending DLCI order */
 120	int dlci;
 121	int open_count;
 122
 123	struct {
 124		unsigned int new: 1;
 125		unsigned int active: 1;
 126		unsigned int exist: 1;
 127		unsigned int deleted: 1;
 128		unsigned int fecn: 1;
 129		unsigned int becn: 1;
 130		unsigned int bandwidth;	/* Cisco LMI reporting only */
 131	}state;
 132};
 133
 134struct frad_state {
 135	fr_proto settings;
 136	struct pvc_device *first_pvc;
 137	int dce_pvc_count;
 138
 139	struct timer_list timer;
 140	struct net_device *dev;
 141	unsigned long last_poll;
 142	int reliable;
 143	int dce_changed;
 144	int request;
 145	int fullrep_sent;
 146	u32 last_errors; /* last errors bit list */
 147	u8 n391cnt;
 148	u8 txseq; /* TX sequence number */
 149	u8 rxseq; /* RX sequence number */
 150};
 151
 152
 153static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
 154
 155
 156static inline u16 q922_to_dlci(u8 *hdr)
 157{
 158	return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
 159}
 160
 161
 162static inline void dlci_to_q922(u8 *hdr, u16 dlci)
 163{
 164	hdr[0] = (dlci >> 2) & 0xFC;
 165	hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
 166}
 167
 168
 169static inline struct frad_state* state(hdlc_device *hdlc)
 170{
 171	return(struct frad_state *)(hdlc->state);
 172}
 173
 174
 175static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
 176{
 177	struct pvc_device *pvc = state(hdlc)->first_pvc;
 178
 179	while (pvc) {
 180		if (pvc->dlci == dlci)
 181			return pvc;
 182		if (pvc->dlci > dlci)
 183			return NULL; /* the list is sorted */
 184		pvc = pvc->next;
 185	}
 186
 187	return NULL;
 188}
 189
 190
 191static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
 192{
 193	hdlc_device *hdlc = dev_to_hdlc(dev);
 194	struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
 195
 196	while (*pvc_p) {
 197		if ((*pvc_p)->dlci == dlci)
 198			return *pvc_p;
 199		if ((*pvc_p)->dlci > dlci)
 200			break;	/* the list is sorted */
 201		pvc_p = &(*pvc_p)->next;
 202	}
 203
 204	pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
 205#ifdef DEBUG_PVC
 206	printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
 207#endif
 208	if (!pvc)
 209		return NULL;
 210
 211	pvc->dlci = dlci;
 212	pvc->frad = dev;
 213	pvc->next = *pvc_p;	/* Put it in the chain */
 214	*pvc_p = pvc;
 215	return pvc;
 216}
 217
 218
 219static inline int pvc_is_used(struct pvc_device *pvc)
 220{
 221	return pvc->main || pvc->ether;
 222}
 223
 224
 225static inline void pvc_carrier(int on, struct pvc_device *pvc)
 226{
 227	if (on) {
 228		if (pvc->main)
 229			if (!netif_carrier_ok(pvc->main))
 230				netif_carrier_on(pvc->main);
 231		if (pvc->ether)
 232			if (!netif_carrier_ok(pvc->ether))
 233				netif_carrier_on(pvc->ether);
 234	} else {
 235		if (pvc->main)
 236			if (netif_carrier_ok(pvc->main))
 237				netif_carrier_off(pvc->main);
 238		if (pvc->ether)
 239			if (netif_carrier_ok(pvc->ether))
 240				netif_carrier_off(pvc->ether);
 241	}
 242}
 243
 244
 245static inline void delete_unused_pvcs(hdlc_device *hdlc)
 246{
 247	struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
 248
 249	while (*pvc_p) {
 250		if (!pvc_is_used(*pvc_p)) {
 251			struct pvc_device *pvc = *pvc_p;
 252#ifdef DEBUG_PVC
 253			printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
 254#endif
 255			*pvc_p = pvc->next;
 256			kfree(pvc);
 257			continue;
 258		}
 259		pvc_p = &(*pvc_p)->next;
 260	}
 261}
 262
 263
 264static inline struct net_device **get_dev_p(struct pvc_device *pvc,
 265					    int type)
 266{
 267	if (type == ARPHRD_ETHER)
 268		return &pvc->ether;
 269	else
 270		return &pvc->main;
 271}
 272
 273
 274static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
 275{
 276	u16 head_len;
 277	struct sk_buff *skb = *skb_p;
 278
 279	switch (skb->protocol) {
 280	case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
 281		head_len = 4;
 282		skb_push(skb, head_len);
 283		skb->data[3] = NLPID_CCITT_ANSI_LMI;
 284		break;
 285
 286	case cpu_to_be16(NLPID_CISCO_LMI):
 287		head_len = 4;
 288		skb_push(skb, head_len);
 289		skb->data[3] = NLPID_CISCO_LMI;
 290		break;
 291
 292	case cpu_to_be16(ETH_P_IP):
 293		head_len = 4;
 294		skb_push(skb, head_len);
 295		skb->data[3] = NLPID_IP;
 296		break;
 297
 298	case cpu_to_be16(ETH_P_IPV6):
 299		head_len = 4;
 300		skb_push(skb, head_len);
 301		skb->data[3] = NLPID_IPV6;
 302		break;
 303
 304	case cpu_to_be16(ETH_P_802_3):
 305		head_len = 10;
 306		if (skb_headroom(skb) < head_len) {
 307			struct sk_buff *skb2 = skb_realloc_headroom(skb,
 308								    head_len);
 309			if (!skb2)
 310				return -ENOBUFS;
 311			dev_kfree_skb(skb);
 312			skb = *skb_p = skb2;
 313		}
 314		skb_push(skb, head_len);
 315		skb->data[3] = FR_PAD;
 316		skb->data[4] = NLPID_SNAP;
 317		skb->data[5] = FR_PAD;
 318		skb->data[6] = 0x80;
 319		skb->data[7] = 0xC2;
 320		skb->data[8] = 0x00;
 321		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
 322		break;
 323
 324	default:
 325		head_len = 10;
 326		skb_push(skb, head_len);
 327		skb->data[3] = FR_PAD;
 328		skb->data[4] = NLPID_SNAP;
 329		skb->data[5] = FR_PAD;
 330		skb->data[6] = FR_PAD;
 331		skb->data[7] = FR_PAD;
 332		*(__be16*)(skb->data + 8) = skb->protocol;
 333	}
 334
 335	dlci_to_q922(skb->data, dlci);
 336	skb->data[2] = FR_UI;
 337	return 0;
 338}
 339
 340
 341
 342static int pvc_open(struct net_device *dev)
 343{
 344	struct pvc_device *pvc = dev->ml_priv;
 345
 346	if ((pvc->frad->flags & IFF_UP) == 0)
 347		return -EIO;  /* Frad must be UP in order to activate PVC */
 348
 349	if (pvc->open_count++ == 0) {
 350		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
 351		if (state(hdlc)->settings.lmi == LMI_NONE)
 352			pvc->state.active = netif_carrier_ok(pvc->frad);
 353
 354		pvc_carrier(pvc->state.active, pvc);
 355		state(hdlc)->dce_changed = 1;
 356	}
 357	return 0;
 358}
 359
 360
 361
 362static int pvc_close(struct net_device *dev)
 363{
 364	struct pvc_device *pvc = dev->ml_priv;
 365
 366	if (--pvc->open_count == 0) {
 367		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
 368		if (state(hdlc)->settings.lmi == LMI_NONE)
 369			pvc->state.active = 0;
 370
 371		if (state(hdlc)->settings.dce) {
 372			state(hdlc)->dce_changed = 1;
 373			pvc->state.active = 0;
 374		}
 375	}
 376	return 0;
 377}
 378
 379
 380
 381static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 382{
 383	struct pvc_device *pvc = dev->ml_priv;
 384	fr_proto_pvc_info info;
 385
 386	if (ifr->ifr_settings.type == IF_GET_PROTO) {
 387		if (dev->type == ARPHRD_ETHER)
 388			ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
 389		else
 390			ifr->ifr_settings.type = IF_PROTO_FR_PVC;
 391
 392		if (ifr->ifr_settings.size < sizeof(info)) {
 393			/* data size wanted */
 394			ifr->ifr_settings.size = sizeof(info);
 395			return -ENOBUFS;
 396		}
 397
 398		info.dlci = pvc->dlci;
 399		memcpy(info.master, pvc->frad->name, IFNAMSIZ);
 400		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
 401				 &info, sizeof(info)))
 402			return -EFAULT;
 403		return 0;
 404	}
 405
 406	return -EINVAL;
 407}
 408
 409static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 410{
 411	struct pvc_device *pvc = dev->ml_priv;
 412
 413	if (pvc->state.active) {
 414		if (dev->type == ARPHRD_ETHER) {
 415			int pad = ETH_ZLEN - skb->len;
 416			if (pad > 0) { /* Pad the frame with zeros */
 417				int len = skb->len;
 418				if (skb_tailroom(skb) < pad)
 419					if (pskb_expand_head(skb, 0, pad,
 420							     GFP_ATOMIC)) {
 421						dev->stats.tx_dropped++;
 422						dev_kfree_skb(skb);
 423						return NETDEV_TX_OK;
 424					}
 425				skb_put(skb, pad);
 426				memset(skb->data + len, 0, pad);
 427			}
 428			skb->protocol = cpu_to_be16(ETH_P_802_3);
 429		}
 430		if (!fr_hard_header(&skb, pvc->dlci)) {
 431			dev->stats.tx_bytes += skb->len;
 432			dev->stats.tx_packets++;
 433			if (pvc->state.fecn) /* TX Congestion counter */
 434				dev->stats.tx_compressed++;
 435			skb->dev = pvc->frad;
 436			dev_queue_xmit(skb);
 437			return NETDEV_TX_OK;
 438		}
 439	}
 440
 441	dev->stats.tx_dropped++;
 442	dev_kfree_skb(skb);
 443	return NETDEV_TX_OK;
 444}
 445
 446static inline void fr_log_dlci_active(struct pvc_device *pvc)
 447{
 448	netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
 449		    pvc->dlci,
 450		    pvc->main ? pvc->main->name : "",
 451		    pvc->main && pvc->ether ? " " : "",
 452		    pvc->ether ? pvc->ether->name : "",
 453		    pvc->state.new ? " new" : "",
 454		    !pvc->state.exist ? "deleted" :
 455		    pvc->state.active ? "active" : "inactive");
 456}
 457
 458
 459
 460static inline u8 fr_lmi_nextseq(u8 x)
 461{
 462	x++;
 463	return x ? x : 1;
 464}
 465
 466
 467static void fr_lmi_send(struct net_device *dev, int fullrep)
 468{
 469	hdlc_device *hdlc = dev_to_hdlc(dev);
 470	struct sk_buff *skb;
 471	struct pvc_device *pvc = state(hdlc)->first_pvc;
 472	int lmi = state(hdlc)->settings.lmi;
 473	int dce = state(hdlc)->settings.dce;
 474	int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
 475	int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
 476	u8 *data;
 477	int i = 0;
 478
 479	if (dce && fullrep) {
 480		len += state(hdlc)->dce_pvc_count * (2 + stat_len);
 481		if (len > HDLC_MAX_MRU) {
 482			netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
 483			return;
 484		}
 485	}
 486
 487	skb = dev_alloc_skb(len);
 488	if (!skb) {
 489		netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
 490		return;
 491	}
 492	memset(skb->data, 0, len);
 493	skb_reserve(skb, 4);
 494	if (lmi == LMI_CISCO) {
 495		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
 496		fr_hard_header(&skb, LMI_CISCO_DLCI);
 497	} else {
 498		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
 499		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
 500	}
 501	data = skb_tail_pointer(skb);
 502	data[i++] = LMI_CALLREF;
 503	data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
 504	if (lmi == LMI_ANSI)
 505		data[i++] = LMI_ANSI_LOCKSHIFT;
 506	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
 507		LMI_ANSI_CISCO_REPTYPE;
 508	data[i++] = LMI_REPT_LEN;
 509	data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
 510	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
 511	data[i++] = LMI_INTEG_LEN;
 512	data[i++] = state(hdlc)->txseq =
 513		fr_lmi_nextseq(state(hdlc)->txseq);
 514	data[i++] = state(hdlc)->rxseq;
 515
 516	if (dce && fullrep) {
 517		while (pvc) {
 518			data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
 519				LMI_ANSI_CISCO_PVCSTAT;
 520			data[i++] = stat_len;
 521
 522			/* LMI start/restart */
 523			if (state(hdlc)->reliable && !pvc->state.exist) {
 524				pvc->state.exist = pvc->state.new = 1;
 525				fr_log_dlci_active(pvc);
 526			}
 527
 528			/* ifconfig PVC up */
 529			if (pvc->open_count && !pvc->state.active &&
 530			    pvc->state.exist && !pvc->state.new) {
 531				pvc_carrier(1, pvc);
 532				pvc->state.active = 1;
 533				fr_log_dlci_active(pvc);
 534			}
 535
 536			if (lmi == LMI_CISCO) {
 537				data[i] = pvc->dlci >> 8;
 538				data[i + 1] = pvc->dlci & 0xFF;
 539			} else {
 540				data[i] = (pvc->dlci >> 4) & 0x3F;
 541				data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
 542				data[i + 2] = 0x80;
 543			}
 544
 545			if (pvc->state.new)
 546				data[i + 2] |= 0x08;
 547			else if (pvc->state.active)
 548				data[i + 2] |= 0x02;
 549
 550			i += stat_len;
 551			pvc = pvc->next;
 552		}
 553	}
 554
 555	skb_put(skb, i);
 556	skb->priority = TC_PRIO_CONTROL;
 557	skb->dev = dev;
 558	skb_reset_network_header(skb);
 559
 560	dev_queue_xmit(skb);
 561}
 562
 563
 564
 565static void fr_set_link_state(int reliable, struct net_device *dev)
 566{
 567	hdlc_device *hdlc = dev_to_hdlc(dev);
 568	struct pvc_device *pvc = state(hdlc)->first_pvc;
 569
 570	state(hdlc)->reliable = reliable;
 571	if (reliable) {
 572		netif_dormant_off(dev);
 573		state(hdlc)->n391cnt = 0; /* Request full status */
 574		state(hdlc)->dce_changed = 1;
 575
 576		if (state(hdlc)->settings.lmi == LMI_NONE) {
 577			while (pvc) {	/* Activate all PVCs */
 578				pvc_carrier(1, pvc);
 579				pvc->state.exist = pvc->state.active = 1;
 580				pvc->state.new = 0;
 581				pvc = pvc->next;
 582			}
 583		}
 584	} else {
 585		netif_dormant_on(dev);
 586		while (pvc) {		/* Deactivate all PVCs */
 587			pvc_carrier(0, pvc);
 588			pvc->state.exist = pvc->state.active = 0;
 589			pvc->state.new = 0;
 590			if (!state(hdlc)->settings.dce)
 591				pvc->state.bandwidth = 0;
 592			pvc = pvc->next;
 593		}
 594	}
 595}
 596
 597
 598static void fr_timer(struct timer_list *t)
 599{
 600	struct frad_state *st = from_timer(st, t, timer);
 601	struct net_device *dev = st->dev;
 602	hdlc_device *hdlc = dev_to_hdlc(dev);
 603	int i, cnt = 0, reliable;
 604	u32 list;
 605
 606	if (state(hdlc)->settings.dce) {
 607		reliable = state(hdlc)->request &&
 608			time_before(jiffies, state(hdlc)->last_poll +
 609				    state(hdlc)->settings.t392 * HZ);
 610		state(hdlc)->request = 0;
 611	} else {
 612		state(hdlc)->last_errors <<= 1; /* Shift the list */
 613		if (state(hdlc)->request) {
 614			if (state(hdlc)->reliable)
 615				netdev_info(dev, "No LMI status reply received\n");
 616			state(hdlc)->last_errors |= 1;
 617		}
 618
 619		list = state(hdlc)->last_errors;
 620		for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
 621			cnt += (list & 1);	/* errors count */
 622
 623		reliable = (cnt < state(hdlc)->settings.n392);
 624	}
 625
 626	if (state(hdlc)->reliable != reliable) {
 627		netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
 628		fr_set_link_state(reliable, dev);
 629	}
 630
 631	if (state(hdlc)->settings.dce)
 632		state(hdlc)->timer.expires = jiffies +
 633			state(hdlc)->settings.t392 * HZ;
 634	else {
 635		if (state(hdlc)->n391cnt)
 636			state(hdlc)->n391cnt--;
 637
 638		fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
 639
 640		state(hdlc)->last_poll = jiffies;
 641		state(hdlc)->request = 1;
 642		state(hdlc)->timer.expires = jiffies +
 643			state(hdlc)->settings.t391 * HZ;
 644	}
 645
 
 
 646	add_timer(&state(hdlc)->timer);
 647}
 648
 649
 650static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
 651{
 652	hdlc_device *hdlc = dev_to_hdlc(dev);
 653	struct pvc_device *pvc;
 654	u8 rxseq, txseq;
 655	int lmi = state(hdlc)->settings.lmi;
 656	int dce = state(hdlc)->settings.dce;
 657	int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
 658
 659	if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
 660			LMI_CCITT_CISCO_LENGTH)) {
 661		netdev_info(dev, "Short LMI frame\n");
 662		return 1;
 663	}
 664
 665	if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
 666			     NLPID_CCITT_ANSI_LMI)) {
 667		netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
 668		return 1;
 669	}
 670
 671	if (skb->data[4] != LMI_CALLREF) {
 672		netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
 673			    skb->data[4]);
 674		return 1;
 675	}
 676
 677	if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
 678		netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
 679			    skb->data[5]);
 680		return 1;
 681	}
 682
 683	if (lmi == LMI_ANSI) {
 684		if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
 685			netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
 686				    skb->data[6]);
 687			return 1;
 688		}
 689		i = 7;
 690	} else
 691		i = 6;
 692
 693	if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
 694			     LMI_ANSI_CISCO_REPTYPE)) {
 695		netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
 696			    skb->data[i]);
 697		return 1;
 698	}
 699
 700	if (skb->data[++i] != LMI_REPT_LEN) {
 701		netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
 702			    skb->data[i]);
 703		return 1;
 704	}
 705
 706	reptype = skb->data[++i];
 707	if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
 708		netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
 709			    reptype);
 710		return 1;
 711	}
 712
 713	if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
 714			       LMI_ANSI_CISCO_ALIVE)) {
 715		netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
 716			    skb->data[i]);
 717		return 1;
 718	}
 719
 720	if (skb->data[++i] != LMI_INTEG_LEN) {
 721		netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
 722			    skb->data[i]);
 723		return 1;
 724	}
 725	i++;
 726
 727	state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
 728	rxseq = skb->data[i++];	/* Should confirm our sequence */
 729
 730	txseq = state(hdlc)->txseq;
 731
 732	if (dce)
 733		state(hdlc)->last_poll = jiffies;
 734
 735	error = 0;
 736	if (!state(hdlc)->reliable)
 737		error = 1;
 738
 739	if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
 740		state(hdlc)->n391cnt = 0;
 741		error = 1;
 742	}
 743
 744	if (dce) {
 745		if (state(hdlc)->fullrep_sent && !error) {
 746/* Stop sending full report - the last one has been confirmed by DTE */
 747			state(hdlc)->fullrep_sent = 0;
 748			pvc = state(hdlc)->first_pvc;
 749			while (pvc) {
 750				if (pvc->state.new) {
 751					pvc->state.new = 0;
 752
 753/* Tell DTE that new PVC is now active */
 754					state(hdlc)->dce_changed = 1;
 755				}
 756				pvc = pvc->next;
 757			}
 758		}
 759
 760		if (state(hdlc)->dce_changed) {
 761			reptype = LMI_FULLREP;
 762			state(hdlc)->fullrep_sent = 1;
 763			state(hdlc)->dce_changed = 0;
 764		}
 765
 766		state(hdlc)->request = 1; /* got request */
 767		fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
 768		return 0;
 769	}
 770
 771	/* DTE */
 772
 773	state(hdlc)->request = 0; /* got response, no request pending */
 774
 775	if (error)
 776		return 0;
 777
 778	if (reptype != LMI_FULLREP)
 779		return 0;
 780
 781	pvc = state(hdlc)->first_pvc;
 782
 783	while (pvc) {
 784		pvc->state.deleted = 1;
 785		pvc = pvc->next;
 786	}
 787
 788	no_ram = 0;
 789	while (skb->len >= i + 2 + stat_len) {
 790		u16 dlci;
 791		u32 bw;
 792		unsigned int active, new;
 793
 794		if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
 795				       LMI_ANSI_CISCO_PVCSTAT)) {
 796			netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
 797				    skb->data[i]);
 798			return 1;
 799		}
 800
 801		if (skb->data[++i] != stat_len) {
 802			netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
 803				    skb->data[i]);
 804			return 1;
 805		}
 806		i++;
 807
 808		new = !! (skb->data[i + 2] & 0x08);
 809		active = !! (skb->data[i + 2] & 0x02);
 810		if (lmi == LMI_CISCO) {
 811			dlci = (skb->data[i] << 8) | skb->data[i + 1];
 812			bw = (skb->data[i + 3] << 16) |
 813				(skb->data[i + 4] << 8) |
 814				(skb->data[i + 5]);
 815		} else {
 816			dlci = ((skb->data[i] & 0x3F) << 4) |
 817				((skb->data[i + 1] & 0x78) >> 3);
 818			bw = 0;
 819		}
 820
 821		pvc = add_pvc(dev, dlci);
 822
 823		if (!pvc && !no_ram) {
 824			netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
 825			no_ram = 1;
 826		}
 827
 828		if (pvc) {
 829			pvc->state.exist = 1;
 830			pvc->state.deleted = 0;
 831			if (active != pvc->state.active ||
 832			    new != pvc->state.new ||
 833			    bw != pvc->state.bandwidth ||
 834			    !pvc->state.exist) {
 835				pvc->state.new = new;
 836				pvc->state.active = active;
 837				pvc->state.bandwidth = bw;
 838				pvc_carrier(active, pvc);
 839				fr_log_dlci_active(pvc);
 840			}
 841		}
 842
 843		i += stat_len;
 844	}
 845
 846	pvc = state(hdlc)->first_pvc;
 847
 848	while (pvc) {
 849		if (pvc->state.deleted && pvc->state.exist) {
 850			pvc_carrier(0, pvc);
 851			pvc->state.active = pvc->state.new = 0;
 852			pvc->state.exist = 0;
 853			pvc->state.bandwidth = 0;
 854			fr_log_dlci_active(pvc);
 855		}
 856		pvc = pvc->next;
 857	}
 858
 859	/* Next full report after N391 polls */
 860	state(hdlc)->n391cnt = state(hdlc)->settings.n391;
 861
 862	return 0;
 863}
 864
 865
 866static int fr_rx(struct sk_buff *skb)
 867{
 868	struct net_device *frad = skb->dev;
 869	hdlc_device *hdlc = dev_to_hdlc(frad);
 870	struct fr_hdr *fh = (struct fr_hdr *)skb->data;
 871	u8 *data = skb->data;
 872	u16 dlci;
 873	struct pvc_device *pvc;
 874	struct net_device *dev = NULL;
 875
 876	if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
 877		goto rx_error;
 878
 879	dlci = q922_to_dlci(skb->data);
 880
 881	if ((dlci == LMI_CCITT_ANSI_DLCI &&
 882	     (state(hdlc)->settings.lmi == LMI_ANSI ||
 883	      state(hdlc)->settings.lmi == LMI_CCITT)) ||
 884	    (dlci == LMI_CISCO_DLCI &&
 885	     state(hdlc)->settings.lmi == LMI_CISCO)) {
 886		if (fr_lmi_recv(frad, skb))
 887			goto rx_error;
 888		dev_kfree_skb_any(skb);
 889		return NET_RX_SUCCESS;
 890	}
 891
 892	pvc = find_pvc(hdlc, dlci);
 893	if (!pvc) {
 894#ifdef DEBUG_PKT
 895		netdev_info(frad, "No PVC for received frame's DLCI %d\n",
 896			    dlci);
 897#endif
 898		dev_kfree_skb_any(skb);
 899		return NET_RX_DROP;
 900	}
 901
 902	if (pvc->state.fecn != fh->fecn) {
 903#ifdef DEBUG_ECN
 904		printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
 905		       dlci, fh->fecn ? "N" : "FF");
 906#endif
 907		pvc->state.fecn ^= 1;
 908	}
 909
 910	if (pvc->state.becn != fh->becn) {
 911#ifdef DEBUG_ECN
 912		printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
 913		       dlci, fh->becn ? "N" : "FF");
 914#endif
 915		pvc->state.becn ^= 1;
 916	}
 917
 918
 919	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
 920		frad->stats.rx_dropped++;
 921		return NET_RX_DROP;
 922	}
 923
 924	if (data[3] == NLPID_IP) {
 925		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
 926		dev = pvc->main;
 927		skb->protocol = htons(ETH_P_IP);
 928
 929	} else if (data[3] == NLPID_IPV6) {
 930		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
 931		dev = pvc->main;
 932		skb->protocol = htons(ETH_P_IPV6);
 933
 934	} else if (skb->len > 10 && data[3] == FR_PAD &&
 935		   data[4] == NLPID_SNAP && data[5] == FR_PAD) {
 936		u16 oui = ntohs(*(__be16*)(data + 6));
 937		u16 pid = ntohs(*(__be16*)(data + 8));
 938		skb_pull(skb, 10);
 939
 940		switch ((((u32)oui) << 16) | pid) {
 941		case ETH_P_ARP: /* routed frame with SNAP */
 942		case ETH_P_IPX:
 943		case ETH_P_IP:	/* a long variant */
 944		case ETH_P_IPV6:
 945			dev = pvc->main;
 946			skb->protocol = htons(pid);
 947			break;
 948
 949		case 0x80C20007: /* bridged Ethernet frame */
 950			if ((dev = pvc->ether) != NULL)
 951				skb->protocol = eth_type_trans(skb, dev);
 952			break;
 953
 954		default:
 955			netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
 956				    oui, pid);
 957			dev_kfree_skb_any(skb);
 958			return NET_RX_DROP;
 959		}
 960	} else {
 961		netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
 962			    data[3], skb->len);
 963		dev_kfree_skb_any(skb);
 964		return NET_RX_DROP;
 965	}
 966
 967	if (dev) {
 968		dev->stats.rx_packets++; /* PVC traffic */
 969		dev->stats.rx_bytes += skb->len;
 970		if (pvc->state.becn)
 971			dev->stats.rx_compressed++;
 972		skb->dev = dev;
 973		netif_rx(skb);
 974		return NET_RX_SUCCESS;
 975	} else {
 976		dev_kfree_skb_any(skb);
 977		return NET_RX_DROP;
 978	}
 979
 980 rx_error:
 981	frad->stats.rx_errors++; /* Mark error */
 982	dev_kfree_skb_any(skb);
 983	return NET_RX_DROP;
 984}
 985
 986
 987
 988static void fr_start(struct net_device *dev)
 989{
 990	hdlc_device *hdlc = dev_to_hdlc(dev);
 991#ifdef DEBUG_LINK
 992	printk(KERN_DEBUG "fr_start\n");
 993#endif
 994	if (state(hdlc)->settings.lmi != LMI_NONE) {
 995		state(hdlc)->reliable = 0;
 996		state(hdlc)->dce_changed = 1;
 997		state(hdlc)->request = 0;
 998		state(hdlc)->fullrep_sent = 0;
 999		state(hdlc)->last_errors = 0xFFFFFFFF;
1000		state(hdlc)->n391cnt = 0;
1001		state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1002
1003		state(hdlc)->dev = dev;
1004		timer_setup(&state(hdlc)->timer, fr_timer, 0);
1005		/* First poll after 1 s */
1006		state(hdlc)->timer.expires = jiffies + HZ;
 
 
1007		add_timer(&state(hdlc)->timer);
1008	} else
1009		fr_set_link_state(1, dev);
1010}
1011
1012
1013static void fr_stop(struct net_device *dev)
1014{
1015	hdlc_device *hdlc = dev_to_hdlc(dev);
1016#ifdef DEBUG_LINK
1017	printk(KERN_DEBUG "fr_stop\n");
1018#endif
1019	if (state(hdlc)->settings.lmi != LMI_NONE)
1020		del_timer_sync(&state(hdlc)->timer);
1021	fr_set_link_state(0, dev);
1022}
1023
1024
1025static void fr_close(struct net_device *dev)
1026{
1027	hdlc_device *hdlc = dev_to_hdlc(dev);
1028	struct pvc_device *pvc = state(hdlc)->first_pvc;
1029
1030	while (pvc) {		/* Shutdown all PVCs for this FRAD */
1031		if (pvc->main)
1032			dev_close(pvc->main);
1033		if (pvc->ether)
1034			dev_close(pvc->ether);
1035		pvc = pvc->next;
1036	}
1037}
1038
1039
1040static void pvc_setup(struct net_device *dev)
1041{
1042	dev->type = ARPHRD_DLCI;
1043	dev->flags = IFF_POINTOPOINT;
1044	dev->hard_header_len = 10;
1045	dev->addr_len = 2;
1046	netif_keep_dst(dev);
1047}
1048
1049static const struct net_device_ops pvc_ops = {
1050	.ndo_open       = pvc_open,
1051	.ndo_stop       = pvc_close,
 
1052	.ndo_start_xmit = pvc_xmit,
1053	.ndo_do_ioctl   = pvc_ioctl,
1054};
1055
1056static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1057{
1058	hdlc_device *hdlc = dev_to_hdlc(frad);
1059	struct pvc_device *pvc;
1060	struct net_device *dev;
1061	int used;
1062
1063	if ((pvc = add_pvc(frad, dlci)) == NULL) {
1064		netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1065		return -ENOBUFS;
1066	}
1067
1068	if (*get_dev_p(pvc, type))
1069		return -EEXIST;
1070
1071	used = pvc_is_used(pvc);
1072
1073	if (type == ARPHRD_ETHER)
1074		dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1075				   ether_setup);
1076	else
1077		dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1078
1079	if (!dev) {
1080		netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1081		delete_unused_pvcs(hdlc);
1082		return -ENOBUFS;
1083	}
1084
1085	if (type == ARPHRD_ETHER) {
1086		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1087		eth_hw_addr_random(dev);
1088	} else {
1089		*(__be16*)dev->dev_addr = htons(dlci);
1090		dlci_to_q922(dev->broadcast, dlci);
1091	}
1092	dev->netdev_ops = &pvc_ops;
1093	dev->mtu = HDLC_MAX_MTU;
1094	dev->min_mtu = 68;
1095	dev->max_mtu = HDLC_MAX_MTU;
1096	dev->priv_flags |= IFF_NO_QUEUE;
1097	dev->ml_priv = pvc;
1098
1099	if (register_netdevice(dev) != 0) {
1100		free_netdev(dev);
1101		delete_unused_pvcs(hdlc);
1102		return -EIO;
1103	}
1104
1105	dev->needs_free_netdev = true;
1106	*get_dev_p(pvc, type) = dev;
1107	if (!used) {
1108		state(hdlc)->dce_changed = 1;
1109		state(hdlc)->dce_pvc_count++;
1110	}
1111	return 0;
1112}
1113
1114
1115
1116static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1117{
1118	struct pvc_device *pvc;
1119	struct net_device *dev;
1120
1121	if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1122		return -ENOENT;
1123
1124	if ((dev = *get_dev_p(pvc, type)) == NULL)
1125		return -ENOENT;
1126
1127	if (dev->flags & IFF_UP)
1128		return -EBUSY;		/* PVC in use */
1129
1130	unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1131	*get_dev_p(pvc, type) = NULL;
1132
1133	if (!pvc_is_used(pvc)) {
1134		state(hdlc)->dce_pvc_count--;
1135		state(hdlc)->dce_changed = 1;
1136	}
1137	delete_unused_pvcs(hdlc);
1138	return 0;
1139}
1140
1141
1142
1143static void fr_destroy(struct net_device *frad)
1144{
1145	hdlc_device *hdlc = dev_to_hdlc(frad);
1146	struct pvc_device *pvc = state(hdlc)->first_pvc;
1147	state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1148	state(hdlc)->dce_pvc_count = 0;
1149	state(hdlc)->dce_changed = 1;
1150
1151	while (pvc) {
1152		struct pvc_device *next = pvc->next;
1153		/* destructors will free_netdev() main and ether */
1154		if (pvc->main)
1155			unregister_netdevice(pvc->main);
1156
1157		if (pvc->ether)
1158			unregister_netdevice(pvc->ether);
1159
1160		kfree(pvc);
1161		pvc = next;
1162	}
1163}
1164
1165
1166static struct hdlc_proto proto = {
1167	.close		= fr_close,
1168	.start		= fr_start,
1169	.stop		= fr_stop,
1170	.detach		= fr_destroy,
1171	.ioctl		= fr_ioctl,
1172	.netif_rx	= fr_rx,
1173	.module		= THIS_MODULE,
1174};
1175
1176
1177static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1178{
1179	fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1180	const size_t size = sizeof(fr_proto);
1181	fr_proto new_settings;
1182	hdlc_device *hdlc = dev_to_hdlc(dev);
1183	fr_proto_pvc pvc;
1184	int result;
1185
1186	switch (ifr->ifr_settings.type) {
1187	case IF_GET_PROTO:
1188		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1189			return -EINVAL;
1190		ifr->ifr_settings.type = IF_PROTO_FR;
1191		if (ifr->ifr_settings.size < size) {
1192			ifr->ifr_settings.size = size; /* data size wanted */
1193			return -ENOBUFS;
1194		}
1195		if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1196			return -EFAULT;
1197		return 0;
1198
1199	case IF_PROTO_FR:
1200		if (!capable(CAP_NET_ADMIN))
1201			return -EPERM;
1202
1203		if (dev->flags & IFF_UP)
1204			return -EBUSY;
1205
1206		if (copy_from_user(&new_settings, fr_s, size))
1207			return -EFAULT;
1208
1209		if (new_settings.lmi == LMI_DEFAULT)
1210			new_settings.lmi = LMI_ANSI;
1211
1212		if ((new_settings.lmi != LMI_NONE &&
1213		     new_settings.lmi != LMI_ANSI &&
1214		     new_settings.lmi != LMI_CCITT &&
1215		     new_settings.lmi != LMI_CISCO) ||
1216		    new_settings.t391 < 1 ||
1217		    new_settings.t392 < 2 ||
1218		    new_settings.n391 < 1 ||
1219		    new_settings.n392 < 1 ||
1220		    new_settings.n393 < new_settings.n392 ||
1221		    new_settings.n393 > 32 ||
1222		    (new_settings.dce != 0 &&
1223		     new_settings.dce != 1))
1224			return -EINVAL;
1225
1226		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1227		if (result)
1228			return result;
1229
1230		if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1231			result = attach_hdlc_protocol(dev, &proto,
1232						      sizeof(struct frad_state));
1233			if (result)
1234				return result;
1235			state(hdlc)->first_pvc = NULL;
1236			state(hdlc)->dce_pvc_count = 0;
1237		}
1238		memcpy(&state(hdlc)->settings, &new_settings, size);
1239		dev->type = ARPHRD_FRAD;
1240		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1241		return 0;
1242
1243	case IF_PROTO_FR_ADD_PVC:
1244	case IF_PROTO_FR_DEL_PVC:
1245	case IF_PROTO_FR_ADD_ETH_PVC:
1246	case IF_PROTO_FR_DEL_ETH_PVC:
1247		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1248			return -EINVAL;
1249
1250		if (!capable(CAP_NET_ADMIN))
1251			return -EPERM;
1252
1253		if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1254				   sizeof(fr_proto_pvc)))
1255			return -EFAULT;
1256
1257		if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1258			return -EINVAL;	/* Only 10 bits, DLCI 0 reserved */
1259
1260		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1261		    ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1262			result = ARPHRD_ETHER; /* bridged Ethernet device */
1263		else
1264			result = ARPHRD_DLCI;
1265
1266		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1267		    ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1268			return fr_add_pvc(dev, pvc.dlci, result);
1269		else
1270			return fr_del_pvc(hdlc, pvc.dlci, result);
1271	}
1272
1273	return -EINVAL;
1274}
1275
1276
1277static int __init mod_init(void)
1278{
1279	register_hdlc_protocol(&proto);
1280	return 0;
1281}
1282
1283
1284static void __exit mod_exit(void)
1285{
1286	unregister_hdlc_protocol(&proto);
1287}
1288
1289
1290module_init(mod_init);
1291module_exit(mod_exit);
1292
1293MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1294MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1295MODULE_LICENSE("GPL v2");