Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic HDLC support routines for Linux
4 * Cisco HDLC support
5 *
6 * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 */
8
9#include <linux/errno.h>
10#include <linux/hdlc.h>
11#include <linux/if_arp.h>
12#include <linux/inetdevice.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pkt_sched.h>
17#include <linux/poll.h>
18#include <linux/rtnetlink.h>
19#include <linux/skbuff.h>
20
21#undef DEBUG_HARD_HEADER
22
23#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
24#define CISCO_UNICAST 0x0F /* Cisco unicast address */
25#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
26#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
27#define CISCO_ADDR_REQ 0 /* Cisco address request */
28#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
29#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
30
31struct hdlc_header {
32 u8 address;
33 u8 control;
34 __be16 protocol;
35} __packed;
36
37struct cisco_packet {
38 __be32 type; /* code */
39 __be32 par1;
40 __be32 par2;
41 __be16 rel; /* reliability */
42 __be32 time;
43} __packed;
44#define CISCO_PACKET_LEN 18
45#define CISCO_BIG_PACKET_LEN 20
46
47struct cisco_state {
48 cisco_proto settings;
49
50 struct timer_list timer;
51 struct net_device *dev;
52 spinlock_t lock;
53 unsigned long last_poll;
54 int up;
55 u32 txseq; /* TX sequence number, 0 = none */
56 u32 rxseq; /* RX sequence number */
57};
58
59static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs);
60
61static inline struct cisco_state *state(hdlc_device *hdlc)
62{
63 return (struct cisco_state *)hdlc->state;
64}
65
66static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
67 u16 type, const void *daddr, const void *saddr,
68 unsigned int len)
69{
70 struct hdlc_header *data;
71#ifdef DEBUG_HARD_HEADER
72 netdev_dbg(dev, "%s called\n", __func__);
73#endif
74
75 skb_push(skb, sizeof(struct hdlc_header));
76 data = (struct hdlc_header *)skb->data;
77 if (type == CISCO_KEEPALIVE)
78 data->address = CISCO_MULTICAST;
79 else
80 data->address = CISCO_UNICAST;
81 data->control = 0;
82 data->protocol = htons(type);
83
84 return sizeof(struct hdlc_header);
85}
86
87static void cisco_keepalive_send(struct net_device *dev, u32 type,
88 __be32 par1, __be32 par2)
89{
90 struct sk_buff *skb;
91 struct cisco_packet *data;
92
93 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
94 sizeof(struct cisco_packet));
95 if (!skb)
96 return;
97
98 skb_reserve(skb, 4);
99 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
100 data = (struct cisco_packet *)(skb->data + 4);
101
102 data->type = htonl(type);
103 data->par1 = par1;
104 data->par2 = par2;
105 data->rel = cpu_to_be16(0xFFFF);
106 /* we will need do_div here if 1000 % HZ != 0 */
107 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
108
109 skb_put(skb, sizeof(struct cisco_packet));
110 skb->priority = TC_PRIO_CONTROL;
111 skb->dev = dev;
112 skb->protocol = htons(ETH_P_HDLC);
113 skb_reset_network_header(skb);
114
115 dev_queue_xmit(skb);
116}
117
118static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
119{
120 struct hdlc_header *data = (struct hdlc_header *)skb->data;
121
122 if (skb->len < sizeof(struct hdlc_header))
123 return cpu_to_be16(ETH_P_HDLC);
124
125 if (data->address != CISCO_MULTICAST &&
126 data->address != CISCO_UNICAST)
127 return cpu_to_be16(ETH_P_HDLC);
128
129 switch (data->protocol) {
130 case cpu_to_be16(ETH_P_IP):
131 case cpu_to_be16(ETH_P_IPX):
132 case cpu_to_be16(ETH_P_IPV6):
133 skb_pull(skb, sizeof(struct hdlc_header));
134 return data->protocol;
135 default:
136 return cpu_to_be16(ETH_P_HDLC);
137 }
138}
139
140static int cisco_rx(struct sk_buff *skb)
141{
142 struct net_device *dev = skb->dev;
143 hdlc_device *hdlc = dev_to_hdlc(dev);
144 struct cisco_state *st = state(hdlc);
145 struct hdlc_header *data = (struct hdlc_header *)skb->data;
146 struct cisco_packet *cisco_data;
147 struct in_device *in_dev;
148 __be32 addr, mask;
149 u32 ack;
150
151 if (skb->len < sizeof(struct hdlc_header))
152 goto rx_error;
153
154 if (data->address != CISCO_MULTICAST &&
155 data->address != CISCO_UNICAST)
156 goto rx_error;
157
158 switch (ntohs(data->protocol)) {
159 case CISCO_SYS_INFO:
160 /* Packet is not needed, drop it. */
161 dev_kfree_skb_any(skb);
162 return NET_RX_SUCCESS;
163
164 case CISCO_KEEPALIVE:
165 if ((skb->len != sizeof(struct hdlc_header) +
166 CISCO_PACKET_LEN) &&
167 (skb->len != sizeof(struct hdlc_header) +
168 CISCO_BIG_PACKET_LEN)) {
169 netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
170 skb->len);
171 goto rx_error;
172 }
173
174 cisco_data = (struct cisco_packet *)(skb->data + sizeof
175 (struct hdlc_header));
176
177 switch (ntohl(cisco_data->type)) {
178 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
179 rcu_read_lock();
180 in_dev = __in_dev_get_rcu(dev);
181 addr = 0;
182 mask = ~cpu_to_be32(0); /* is the mask correct? */
183
184 if (in_dev != NULL) {
185 const struct in_ifaddr *ifa;
186
187 in_dev_for_each_ifa_rcu(ifa, in_dev) {
188 if (strcmp(dev->name,
189 ifa->ifa_label) == 0) {
190 addr = ifa->ifa_local;
191 mask = ifa->ifa_mask;
192 break;
193 }
194 }
195
196 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
197 addr, mask);
198 }
199 rcu_read_unlock();
200 dev_kfree_skb_any(skb);
201 return NET_RX_SUCCESS;
202
203 case CISCO_ADDR_REPLY:
204 netdev_info(dev, "Unexpected Cisco IP address reply\n");
205 goto rx_error;
206
207 case CISCO_KEEPALIVE_REQ:
208 spin_lock(&st->lock);
209 st->rxseq = ntohl(cisco_data->par1);
210 ack = ntohl(cisco_data->par2);
211 if (ack && (ack == st->txseq ||
212 /* our current REQ may be in transit */
213 ack == st->txseq - 1)) {
214 st->last_poll = jiffies;
215 if (!st->up) {
216 u32 sec, min, hrs, days;
217
218 sec = ntohl(cisco_data->time) / 1000;
219 min = sec / 60; sec -= min * 60;
220 hrs = min / 60; min -= hrs * 60;
221 days = hrs / 24; hrs -= days * 24;
222 netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
223 days, hrs, min, sec);
224 netif_dormant_off(dev);
225 st->up = 1;
226 }
227 }
228 spin_unlock(&st->lock);
229
230 dev_kfree_skb_any(skb);
231 return NET_RX_SUCCESS;
232 } /* switch (keepalive type) */
233 } /* switch (protocol) */
234
235 netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
236 dev_kfree_skb_any(skb);
237 return NET_RX_DROP;
238
239rx_error:
240 dev->stats.rx_errors++; /* Mark error */
241 dev_kfree_skb_any(skb);
242 return NET_RX_DROP;
243}
244
245static void cisco_timer(struct timer_list *t)
246{
247 struct cisco_state *st = from_timer(st, t, timer);
248 struct net_device *dev = st->dev;
249
250 spin_lock(&st->lock);
251 if (st->up &&
252 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
253 st->up = 0;
254 netdev_info(dev, "Link down\n");
255 netif_dormant_on(dev);
256 }
257
258 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
259 htonl(st->rxseq));
260 spin_unlock(&st->lock);
261
262 st->timer.expires = jiffies + st->settings.interval * HZ;
263 add_timer(&st->timer);
264}
265
266static void cisco_start(struct net_device *dev)
267{
268 hdlc_device *hdlc = dev_to_hdlc(dev);
269 struct cisco_state *st = state(hdlc);
270 unsigned long flags;
271
272 spin_lock_irqsave(&st->lock, flags);
273 st->up = st->txseq = st->rxseq = 0;
274 spin_unlock_irqrestore(&st->lock, flags);
275
276 st->dev = dev;
277 timer_setup(&st->timer, cisco_timer, 0);
278 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
279 add_timer(&st->timer);
280}
281
282static void cisco_stop(struct net_device *dev)
283{
284 hdlc_device *hdlc = dev_to_hdlc(dev);
285 struct cisco_state *st = state(hdlc);
286 unsigned long flags;
287
288 del_timer_sync(&st->timer);
289
290 spin_lock_irqsave(&st->lock, flags);
291 netif_dormant_on(dev);
292 st->up = st->txseq = 0;
293 spin_unlock_irqrestore(&st->lock, flags);
294}
295
296static struct hdlc_proto proto = {
297 .start = cisco_start,
298 .stop = cisco_stop,
299 .type_trans = cisco_type_trans,
300 .ioctl = cisco_ioctl,
301 .netif_rx = cisco_rx,
302 .module = THIS_MODULE,
303};
304
305static const struct header_ops cisco_header_ops = {
306 .create = cisco_hard_header,
307};
308
309static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs)
310{
311 cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco;
312 const size_t size = sizeof(cisco_proto);
313 cisco_proto new_settings;
314 hdlc_device *hdlc = dev_to_hdlc(dev);
315 int result;
316
317 switch (ifs->type) {
318 case IF_GET_PROTO:
319 if (dev_to_hdlc(dev)->proto != &proto)
320 return -EINVAL;
321 ifs->type = IF_PROTO_CISCO;
322 if (ifs->size < size) {
323 ifs->size = size; /* data size wanted */
324 return -ENOBUFS;
325 }
326 if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
327 return -EFAULT;
328 return 0;
329
330 case IF_PROTO_CISCO:
331 if (!capable(CAP_NET_ADMIN))
332 return -EPERM;
333
334 if (dev->flags & IFF_UP)
335 return -EBUSY;
336
337 if (copy_from_user(&new_settings, cisco_s, size))
338 return -EFAULT;
339
340 if (new_settings.interval < 1 ||
341 new_settings.timeout < 2)
342 return -EINVAL;
343
344 result = hdlc->attach(dev, ENCODING_NRZ,
345 PARITY_CRC16_PR1_CCITT);
346 if (result)
347 return result;
348
349 result = attach_hdlc_protocol(dev, &proto,
350 sizeof(struct cisco_state));
351 if (result)
352 return result;
353
354 memcpy(&state(hdlc)->settings, &new_settings, size);
355 spin_lock_init(&state(hdlc)->lock);
356 dev->header_ops = &cisco_header_ops;
357 dev->hard_header_len = sizeof(struct hdlc_header);
358 dev->type = ARPHRD_CISCO;
359 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
360 netif_dormant_on(dev);
361 return 0;
362 }
363
364 return -EINVAL;
365}
366
367static int __init hdlc_cisco_init(void)
368{
369 register_hdlc_protocol(&proto);
370 return 0;
371}
372
373static void __exit hdlc_cisco_exit(void)
374{
375 unregister_hdlc_protocol(&proto);
376}
377
378module_init(hdlc_cisco_init);
379module_exit(hdlc_cisco_exit);
380
381MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
382MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
383MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic HDLC support routines for Linux
4 * Cisco HDLC support
5 *
6 * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 */
8
9#include <linux/errno.h>
10#include <linux/hdlc.h>
11#include <linux/if_arp.h>
12#include <linux/inetdevice.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pkt_sched.h>
17#include <linux/poll.h>
18#include <linux/rtnetlink.h>
19#include <linux/skbuff.h>
20
21#undef DEBUG_HARD_HEADER
22
23#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
24#define CISCO_UNICAST 0x0F /* Cisco unicast address */
25#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
26#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
27#define CISCO_ADDR_REQ 0 /* Cisco address request */
28#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
29#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
30
31
32struct hdlc_header {
33 u8 address;
34 u8 control;
35 __be16 protocol;
36}__packed;
37
38
39struct cisco_packet {
40 __be32 type; /* code */
41 __be32 par1;
42 __be32 par2;
43 __be16 rel; /* reliability */
44 __be32 time;
45}__packed;
46#define CISCO_PACKET_LEN 18
47#define CISCO_BIG_PACKET_LEN 20
48
49
50struct cisco_state {
51 cisco_proto settings;
52
53 struct timer_list timer;
54 struct net_device *dev;
55 spinlock_t lock;
56 unsigned long last_poll;
57 int up;
58 u32 txseq; /* TX sequence number, 0 = none */
59 u32 rxseq; /* RX sequence number */
60};
61
62
63static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
64
65
66static inline struct cisco_state* state(hdlc_device *hdlc)
67{
68 return (struct cisco_state *)hdlc->state;
69}
70
71
72static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
73 u16 type, const void *daddr, const void *saddr,
74 unsigned int len)
75{
76 struct hdlc_header *data;
77#ifdef DEBUG_HARD_HEADER
78 printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
79#endif
80
81 skb_push(skb, sizeof(struct hdlc_header));
82 data = (struct hdlc_header*)skb->data;
83 if (type == CISCO_KEEPALIVE)
84 data->address = CISCO_MULTICAST;
85 else
86 data->address = CISCO_UNICAST;
87 data->control = 0;
88 data->protocol = htons(type);
89
90 return sizeof(struct hdlc_header);
91}
92
93
94
95static void cisco_keepalive_send(struct net_device *dev, u32 type,
96 __be32 par1, __be32 par2)
97{
98 struct sk_buff *skb;
99 struct cisco_packet *data;
100
101 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
102 sizeof(struct cisco_packet));
103 if (!skb) {
104 netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
105 return;
106 }
107 skb_reserve(skb, 4);
108 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
109 data = (struct cisco_packet*)(skb->data + 4);
110
111 data->type = htonl(type);
112 data->par1 = par1;
113 data->par2 = par2;
114 data->rel = cpu_to_be16(0xFFFF);
115 /* we will need do_div here if 1000 % HZ != 0 */
116 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
117
118 skb_put(skb, sizeof(struct cisco_packet));
119 skb->priority = TC_PRIO_CONTROL;
120 skb->dev = dev;
121 skb_reset_network_header(skb);
122
123 dev_queue_xmit(skb);
124}
125
126
127
128static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
129{
130 struct hdlc_header *data = (struct hdlc_header*)skb->data;
131
132 if (skb->len < sizeof(struct hdlc_header))
133 return cpu_to_be16(ETH_P_HDLC);
134
135 if (data->address != CISCO_MULTICAST &&
136 data->address != CISCO_UNICAST)
137 return cpu_to_be16(ETH_P_HDLC);
138
139 switch (data->protocol) {
140 case cpu_to_be16(ETH_P_IP):
141 case cpu_to_be16(ETH_P_IPX):
142 case cpu_to_be16(ETH_P_IPV6):
143 skb_pull(skb, sizeof(struct hdlc_header));
144 return data->protocol;
145 default:
146 return cpu_to_be16(ETH_P_HDLC);
147 }
148}
149
150
151static int cisco_rx(struct sk_buff *skb)
152{
153 struct net_device *dev = skb->dev;
154 hdlc_device *hdlc = dev_to_hdlc(dev);
155 struct cisco_state *st = state(hdlc);
156 struct hdlc_header *data = (struct hdlc_header*)skb->data;
157 struct cisco_packet *cisco_data;
158 struct in_device *in_dev;
159 __be32 addr, mask;
160 u32 ack;
161
162 if (skb->len < sizeof(struct hdlc_header))
163 goto rx_error;
164
165 if (data->address != CISCO_MULTICAST &&
166 data->address != CISCO_UNICAST)
167 goto rx_error;
168
169 switch (ntohs(data->protocol)) {
170 case CISCO_SYS_INFO:
171 /* Packet is not needed, drop it. */
172 dev_kfree_skb_any(skb);
173 return NET_RX_SUCCESS;
174
175 case CISCO_KEEPALIVE:
176 if ((skb->len != sizeof(struct hdlc_header) +
177 CISCO_PACKET_LEN) &&
178 (skb->len != sizeof(struct hdlc_header) +
179 CISCO_BIG_PACKET_LEN)) {
180 netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
181 skb->len);
182 goto rx_error;
183 }
184
185 cisco_data = (struct cisco_packet*)(skb->data + sizeof
186 (struct hdlc_header));
187
188 switch (ntohl (cisco_data->type)) {
189 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
190 rcu_read_lock();
191 in_dev = __in_dev_get_rcu(dev);
192 addr = 0;
193 mask = ~cpu_to_be32(0); /* is the mask correct? */
194
195 if (in_dev != NULL) {
196 const struct in_ifaddr *ifa;
197
198 in_dev_for_each_ifa_rcu(ifa, in_dev) {
199 if (strcmp(dev->name,
200 ifa->ifa_label) == 0) {
201 addr = ifa->ifa_local;
202 mask = ifa->ifa_mask;
203 break;
204 }
205 }
206
207 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
208 addr, mask);
209 }
210 rcu_read_unlock();
211 dev_kfree_skb_any(skb);
212 return NET_RX_SUCCESS;
213
214 case CISCO_ADDR_REPLY:
215 netdev_info(dev, "Unexpected Cisco IP address reply\n");
216 goto rx_error;
217
218 case CISCO_KEEPALIVE_REQ:
219 spin_lock(&st->lock);
220 st->rxseq = ntohl(cisco_data->par1);
221 ack = ntohl(cisco_data->par2);
222 if (ack && (ack == st->txseq ||
223 /* our current REQ may be in transit */
224 ack == st->txseq - 1)) {
225 st->last_poll = jiffies;
226 if (!st->up) {
227 u32 sec, min, hrs, days;
228 sec = ntohl(cisco_data->time) / 1000;
229 min = sec / 60; sec -= min * 60;
230 hrs = min / 60; min -= hrs * 60;
231 days = hrs / 24; hrs -= days * 24;
232 netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
233 days, hrs, min, sec);
234 netif_dormant_off(dev);
235 st->up = 1;
236 }
237 }
238 spin_unlock(&st->lock);
239
240 dev_kfree_skb_any(skb);
241 return NET_RX_SUCCESS;
242 } /* switch (keepalive type) */
243 } /* switch (protocol) */
244
245 netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
246 dev_kfree_skb_any(skb);
247 return NET_RX_DROP;
248
249rx_error:
250 dev->stats.rx_errors++; /* Mark error */
251 dev_kfree_skb_any(skb);
252 return NET_RX_DROP;
253}
254
255
256
257static void cisco_timer(struct timer_list *t)
258{
259 struct cisco_state *st = from_timer(st, t, timer);
260 struct net_device *dev = st->dev;
261
262 spin_lock(&st->lock);
263 if (st->up &&
264 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
265 st->up = 0;
266 netdev_info(dev, "Link down\n");
267 netif_dormant_on(dev);
268 }
269
270 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
271 htonl(st->rxseq));
272 spin_unlock(&st->lock);
273
274 st->timer.expires = jiffies + st->settings.interval * HZ;
275 add_timer(&st->timer);
276}
277
278
279
280static void cisco_start(struct net_device *dev)
281{
282 hdlc_device *hdlc = dev_to_hdlc(dev);
283 struct cisco_state *st = state(hdlc);
284 unsigned long flags;
285
286 spin_lock_irqsave(&st->lock, flags);
287 st->up = st->txseq = st->rxseq = 0;
288 spin_unlock_irqrestore(&st->lock, flags);
289
290 st->dev = dev;
291 timer_setup(&st->timer, cisco_timer, 0);
292 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
293 add_timer(&st->timer);
294}
295
296
297
298static void cisco_stop(struct net_device *dev)
299{
300 hdlc_device *hdlc = dev_to_hdlc(dev);
301 struct cisco_state *st = state(hdlc);
302 unsigned long flags;
303
304 del_timer_sync(&st->timer);
305
306 spin_lock_irqsave(&st->lock, flags);
307 netif_dormant_on(dev);
308 st->up = st->txseq = 0;
309 spin_unlock_irqrestore(&st->lock, flags);
310}
311
312
313static struct hdlc_proto proto = {
314 .start = cisco_start,
315 .stop = cisco_stop,
316 .type_trans = cisco_type_trans,
317 .ioctl = cisco_ioctl,
318 .netif_rx = cisco_rx,
319 .module = THIS_MODULE,
320};
321
322static const struct header_ops cisco_header_ops = {
323 .create = cisco_hard_header,
324};
325
326static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
327{
328 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
329 const size_t size = sizeof(cisco_proto);
330 cisco_proto new_settings;
331 hdlc_device *hdlc = dev_to_hdlc(dev);
332 int result;
333
334 switch (ifr->ifr_settings.type) {
335 case IF_GET_PROTO:
336 if (dev_to_hdlc(dev)->proto != &proto)
337 return -EINVAL;
338 ifr->ifr_settings.type = IF_PROTO_CISCO;
339 if (ifr->ifr_settings.size < size) {
340 ifr->ifr_settings.size = size; /* data size wanted */
341 return -ENOBUFS;
342 }
343 if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
344 return -EFAULT;
345 return 0;
346
347 case IF_PROTO_CISCO:
348 if (!capable(CAP_NET_ADMIN))
349 return -EPERM;
350
351 if (dev->flags & IFF_UP)
352 return -EBUSY;
353
354 if (copy_from_user(&new_settings, cisco_s, size))
355 return -EFAULT;
356
357 if (new_settings.interval < 1 ||
358 new_settings.timeout < 2)
359 return -EINVAL;
360
361 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
362 if (result)
363 return result;
364
365 result = attach_hdlc_protocol(dev, &proto,
366 sizeof(struct cisco_state));
367 if (result)
368 return result;
369
370 memcpy(&state(hdlc)->settings, &new_settings, size);
371 spin_lock_init(&state(hdlc)->lock);
372 dev->header_ops = &cisco_header_ops;
373 dev->type = ARPHRD_CISCO;
374 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
375 netif_dormant_on(dev);
376 return 0;
377 }
378
379 return -EINVAL;
380}
381
382
383static int __init mod_init(void)
384{
385 register_hdlc_protocol(&proto);
386 return 0;
387}
388
389
390
391static void __exit mod_exit(void)
392{
393 unregister_hdlc_protocol(&proto);
394}
395
396
397module_init(mod_init);
398module_exit(mod_exit);
399
400MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
401MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
402MODULE_LICENSE("GPL v2");