Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic HDLC support routines for Linux
4 * Cisco HDLC support
5 *
6 * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 */
8
9#include <linux/errno.h>
10#include <linux/hdlc.h>
11#include <linux/if_arp.h>
12#include <linux/inetdevice.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pkt_sched.h>
17#include <linux/poll.h>
18#include <linux/rtnetlink.h>
19#include <linux/skbuff.h>
20
21#undef DEBUG_HARD_HEADER
22
23#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
24#define CISCO_UNICAST 0x0F /* Cisco unicast address */
25#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
26#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
27#define CISCO_ADDR_REQ 0 /* Cisco address request */
28#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
29#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
30
31struct hdlc_header {
32 u8 address;
33 u8 control;
34 __be16 protocol;
35} __packed;
36
37struct cisco_packet {
38 __be32 type; /* code */
39 __be32 par1;
40 __be32 par2;
41 __be16 rel; /* reliability */
42 __be32 time;
43} __packed;
44#define CISCO_PACKET_LEN 18
45#define CISCO_BIG_PACKET_LEN 20
46
47struct cisco_state {
48 cisco_proto settings;
49
50 struct timer_list timer;
51 struct net_device *dev;
52 spinlock_t lock;
53 unsigned long last_poll;
54 int up;
55 u32 txseq; /* TX sequence number, 0 = none */
56 u32 rxseq; /* RX sequence number */
57};
58
59static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs);
60
61static inline struct cisco_state *state(hdlc_device *hdlc)
62{
63 return (struct cisco_state *)hdlc->state;
64}
65
66static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
67 u16 type, const void *daddr, const void *saddr,
68 unsigned int len)
69{
70 struct hdlc_header *data;
71#ifdef DEBUG_HARD_HEADER
72 netdev_dbg(dev, "%s called\n", __func__);
73#endif
74
75 skb_push(skb, sizeof(struct hdlc_header));
76 data = (struct hdlc_header *)skb->data;
77 if (type == CISCO_KEEPALIVE)
78 data->address = CISCO_MULTICAST;
79 else
80 data->address = CISCO_UNICAST;
81 data->control = 0;
82 data->protocol = htons(type);
83
84 return sizeof(struct hdlc_header);
85}
86
87static void cisco_keepalive_send(struct net_device *dev, u32 type,
88 __be32 par1, __be32 par2)
89{
90 struct sk_buff *skb;
91 struct cisco_packet *data;
92
93 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
94 sizeof(struct cisco_packet));
95 if (!skb)
96 return;
97
98 skb_reserve(skb, 4);
99 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
100 data = (struct cisco_packet *)(skb->data + 4);
101
102 data->type = htonl(type);
103 data->par1 = par1;
104 data->par2 = par2;
105 data->rel = cpu_to_be16(0xFFFF);
106 /* we will need do_div here if 1000 % HZ != 0 */
107 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
108
109 skb_put(skb, sizeof(struct cisco_packet));
110 skb->priority = TC_PRIO_CONTROL;
111 skb->dev = dev;
112 skb->protocol = htons(ETH_P_HDLC);
113 skb_reset_network_header(skb);
114
115 dev_queue_xmit(skb);
116}
117
118static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
119{
120 struct hdlc_header *data = (struct hdlc_header *)skb->data;
121
122 if (skb->len < sizeof(struct hdlc_header))
123 return cpu_to_be16(ETH_P_HDLC);
124
125 if (data->address != CISCO_MULTICAST &&
126 data->address != CISCO_UNICAST)
127 return cpu_to_be16(ETH_P_HDLC);
128
129 switch (data->protocol) {
130 case cpu_to_be16(ETH_P_IP):
131 case cpu_to_be16(ETH_P_IPX):
132 case cpu_to_be16(ETH_P_IPV6):
133 skb_pull(skb, sizeof(struct hdlc_header));
134 return data->protocol;
135 default:
136 return cpu_to_be16(ETH_P_HDLC);
137 }
138}
139
140static int cisco_rx(struct sk_buff *skb)
141{
142 struct net_device *dev = skb->dev;
143 hdlc_device *hdlc = dev_to_hdlc(dev);
144 struct cisco_state *st = state(hdlc);
145 struct hdlc_header *data = (struct hdlc_header *)skb->data;
146 struct cisco_packet *cisco_data;
147 struct in_device *in_dev;
148 __be32 addr, mask;
149 u32 ack;
150
151 if (skb->len < sizeof(struct hdlc_header))
152 goto rx_error;
153
154 if (data->address != CISCO_MULTICAST &&
155 data->address != CISCO_UNICAST)
156 goto rx_error;
157
158 switch (ntohs(data->protocol)) {
159 case CISCO_SYS_INFO:
160 /* Packet is not needed, drop it. */
161 dev_kfree_skb_any(skb);
162 return NET_RX_SUCCESS;
163
164 case CISCO_KEEPALIVE:
165 if ((skb->len != sizeof(struct hdlc_header) +
166 CISCO_PACKET_LEN) &&
167 (skb->len != sizeof(struct hdlc_header) +
168 CISCO_BIG_PACKET_LEN)) {
169 netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
170 skb->len);
171 goto rx_error;
172 }
173
174 cisco_data = (struct cisco_packet *)(skb->data + sizeof
175 (struct hdlc_header));
176
177 switch (ntohl(cisco_data->type)) {
178 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
179 rcu_read_lock();
180 in_dev = __in_dev_get_rcu(dev);
181 addr = 0;
182 mask = ~cpu_to_be32(0); /* is the mask correct? */
183
184 if (in_dev != NULL) {
185 const struct in_ifaddr *ifa;
186
187 in_dev_for_each_ifa_rcu(ifa, in_dev) {
188 if (strcmp(dev->name,
189 ifa->ifa_label) == 0) {
190 addr = ifa->ifa_local;
191 mask = ifa->ifa_mask;
192 break;
193 }
194 }
195
196 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
197 addr, mask);
198 }
199 rcu_read_unlock();
200 dev_kfree_skb_any(skb);
201 return NET_RX_SUCCESS;
202
203 case CISCO_ADDR_REPLY:
204 netdev_info(dev, "Unexpected Cisco IP address reply\n");
205 goto rx_error;
206
207 case CISCO_KEEPALIVE_REQ:
208 spin_lock(&st->lock);
209 st->rxseq = ntohl(cisco_data->par1);
210 ack = ntohl(cisco_data->par2);
211 if (ack && (ack == st->txseq ||
212 /* our current REQ may be in transit */
213 ack == st->txseq - 1)) {
214 st->last_poll = jiffies;
215 if (!st->up) {
216 u32 sec, min, hrs, days;
217
218 sec = ntohl(cisco_data->time) / 1000;
219 min = sec / 60; sec -= min * 60;
220 hrs = min / 60; min -= hrs * 60;
221 days = hrs / 24; hrs -= days * 24;
222 netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
223 days, hrs, min, sec);
224 netif_dormant_off(dev);
225 st->up = 1;
226 }
227 }
228 spin_unlock(&st->lock);
229
230 dev_kfree_skb_any(skb);
231 return NET_RX_SUCCESS;
232 } /* switch (keepalive type) */
233 } /* switch (protocol) */
234
235 netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
236 dev_kfree_skb_any(skb);
237 return NET_RX_DROP;
238
239rx_error:
240 dev->stats.rx_errors++; /* Mark error */
241 dev_kfree_skb_any(skb);
242 return NET_RX_DROP;
243}
244
245static void cisco_timer(struct timer_list *t)
246{
247 struct cisco_state *st = from_timer(st, t, timer);
248 struct net_device *dev = st->dev;
249
250 spin_lock(&st->lock);
251 if (st->up &&
252 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
253 st->up = 0;
254 netdev_info(dev, "Link down\n");
255 netif_dormant_on(dev);
256 }
257
258 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
259 htonl(st->rxseq));
260 spin_unlock(&st->lock);
261
262 st->timer.expires = jiffies + st->settings.interval * HZ;
263 add_timer(&st->timer);
264}
265
266static void cisco_start(struct net_device *dev)
267{
268 hdlc_device *hdlc = dev_to_hdlc(dev);
269 struct cisco_state *st = state(hdlc);
270 unsigned long flags;
271
272 spin_lock_irqsave(&st->lock, flags);
273 st->up = st->txseq = st->rxseq = 0;
274 spin_unlock_irqrestore(&st->lock, flags);
275
276 st->dev = dev;
277 timer_setup(&st->timer, cisco_timer, 0);
278 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
279 add_timer(&st->timer);
280}
281
282static void cisco_stop(struct net_device *dev)
283{
284 hdlc_device *hdlc = dev_to_hdlc(dev);
285 struct cisco_state *st = state(hdlc);
286 unsigned long flags;
287
288 del_timer_sync(&st->timer);
289
290 spin_lock_irqsave(&st->lock, flags);
291 netif_dormant_on(dev);
292 st->up = st->txseq = 0;
293 spin_unlock_irqrestore(&st->lock, flags);
294}
295
296static struct hdlc_proto proto = {
297 .start = cisco_start,
298 .stop = cisco_stop,
299 .type_trans = cisco_type_trans,
300 .ioctl = cisco_ioctl,
301 .netif_rx = cisco_rx,
302 .module = THIS_MODULE,
303};
304
305static const struct header_ops cisco_header_ops = {
306 .create = cisco_hard_header,
307};
308
309static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs)
310{
311 cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco;
312 const size_t size = sizeof(cisco_proto);
313 cisco_proto new_settings;
314 hdlc_device *hdlc = dev_to_hdlc(dev);
315 int result;
316
317 switch (ifs->type) {
318 case IF_GET_PROTO:
319 if (dev_to_hdlc(dev)->proto != &proto)
320 return -EINVAL;
321 ifs->type = IF_PROTO_CISCO;
322 if (ifs->size < size) {
323 ifs->size = size; /* data size wanted */
324 return -ENOBUFS;
325 }
326 if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
327 return -EFAULT;
328 return 0;
329
330 case IF_PROTO_CISCO:
331 if (!capable(CAP_NET_ADMIN))
332 return -EPERM;
333
334 if (dev->flags & IFF_UP)
335 return -EBUSY;
336
337 if (copy_from_user(&new_settings, cisco_s, size))
338 return -EFAULT;
339
340 if (new_settings.interval < 1 ||
341 new_settings.timeout < 2)
342 return -EINVAL;
343
344 result = hdlc->attach(dev, ENCODING_NRZ,
345 PARITY_CRC16_PR1_CCITT);
346 if (result)
347 return result;
348
349 result = attach_hdlc_protocol(dev, &proto,
350 sizeof(struct cisco_state));
351 if (result)
352 return result;
353
354 memcpy(&state(hdlc)->settings, &new_settings, size);
355 spin_lock_init(&state(hdlc)->lock);
356 dev->header_ops = &cisco_header_ops;
357 dev->hard_header_len = sizeof(struct hdlc_header);
358 dev->type = ARPHRD_CISCO;
359 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
360 netif_dormant_on(dev);
361 return 0;
362 }
363
364 return -EINVAL;
365}
366
367static int __init hdlc_cisco_init(void)
368{
369 register_hdlc_protocol(&proto);
370 return 0;
371}
372
373static void __exit hdlc_cisco_exit(void)
374{
375 unregister_hdlc_protocol(&proto);
376}
377
378module_init(hdlc_cisco_init);
379module_exit(hdlc_cisco_exit);
380
381MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
382MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
383MODULE_LICENSE("GPL v2");
1/*
2 * Generic HDLC support routines for Linux
3 * Cisco HDLC support
4 *
5 * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/errno.h>
13#include <linux/hdlc.h>
14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/pkt_sched.h>
20#include <linux/poll.h>
21#include <linux/rtnetlink.h>
22#include <linux/skbuff.h>
23
24#undef DEBUG_HARD_HEADER
25
26#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
27#define CISCO_UNICAST 0x0F /* Cisco unicast address */
28#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
29#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
30#define CISCO_ADDR_REQ 0 /* Cisco address request */
31#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
32#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
33
34
35struct hdlc_header {
36 u8 address;
37 u8 control;
38 __be16 protocol;
39}__packed;
40
41
42struct cisco_packet {
43 __be32 type; /* code */
44 __be32 par1;
45 __be32 par2;
46 __be16 rel; /* reliability */
47 __be32 time;
48}__packed;
49#define CISCO_PACKET_LEN 18
50#define CISCO_BIG_PACKET_LEN 20
51
52
53struct cisco_state {
54 cisco_proto settings;
55
56 struct timer_list timer;
57 struct net_device *dev;
58 spinlock_t lock;
59 unsigned long last_poll;
60 int up;
61 u32 txseq; /* TX sequence number, 0 = none */
62 u32 rxseq; /* RX sequence number */
63};
64
65
66static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
67
68
69static inline struct cisco_state* state(hdlc_device *hdlc)
70{
71 return (struct cisco_state *)hdlc->state;
72}
73
74
75static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
76 u16 type, const void *daddr, const void *saddr,
77 unsigned int len)
78{
79 struct hdlc_header *data;
80#ifdef DEBUG_HARD_HEADER
81 printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
82#endif
83
84 skb_push(skb, sizeof(struct hdlc_header));
85 data = (struct hdlc_header*)skb->data;
86 if (type == CISCO_KEEPALIVE)
87 data->address = CISCO_MULTICAST;
88 else
89 data->address = CISCO_UNICAST;
90 data->control = 0;
91 data->protocol = htons(type);
92
93 return sizeof(struct hdlc_header);
94}
95
96
97
98static void cisco_keepalive_send(struct net_device *dev, u32 type,
99 __be32 par1, __be32 par2)
100{
101 struct sk_buff *skb;
102 struct cisco_packet *data;
103
104 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
105 sizeof(struct cisco_packet));
106 if (!skb) {
107 netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
108 return;
109 }
110 skb_reserve(skb, 4);
111 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
112 data = (struct cisco_packet*)(skb->data + 4);
113
114 data->type = htonl(type);
115 data->par1 = par1;
116 data->par2 = par2;
117 data->rel = cpu_to_be16(0xFFFF);
118 /* we will need do_div here if 1000 % HZ != 0 */
119 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
120
121 skb_put(skb, sizeof(struct cisco_packet));
122 skb->priority = TC_PRIO_CONTROL;
123 skb->dev = dev;
124 skb_reset_network_header(skb);
125
126 dev_queue_xmit(skb);
127}
128
129
130
131static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
132{
133 struct hdlc_header *data = (struct hdlc_header*)skb->data;
134
135 if (skb->len < sizeof(struct hdlc_header))
136 return cpu_to_be16(ETH_P_HDLC);
137
138 if (data->address != CISCO_MULTICAST &&
139 data->address != CISCO_UNICAST)
140 return cpu_to_be16(ETH_P_HDLC);
141
142 switch (data->protocol) {
143 case cpu_to_be16(ETH_P_IP):
144 case cpu_to_be16(ETH_P_IPX):
145 case cpu_to_be16(ETH_P_IPV6):
146 skb_pull(skb, sizeof(struct hdlc_header));
147 return data->protocol;
148 default:
149 return cpu_to_be16(ETH_P_HDLC);
150 }
151}
152
153
154static int cisco_rx(struct sk_buff *skb)
155{
156 struct net_device *dev = skb->dev;
157 hdlc_device *hdlc = dev_to_hdlc(dev);
158 struct cisco_state *st = state(hdlc);
159 struct hdlc_header *data = (struct hdlc_header*)skb->data;
160 struct cisco_packet *cisco_data;
161 struct in_device *in_dev;
162 __be32 addr, mask;
163 u32 ack;
164
165 if (skb->len < sizeof(struct hdlc_header))
166 goto rx_error;
167
168 if (data->address != CISCO_MULTICAST &&
169 data->address != CISCO_UNICAST)
170 goto rx_error;
171
172 switch (ntohs(data->protocol)) {
173 case CISCO_SYS_INFO:
174 /* Packet is not needed, drop it. */
175 dev_kfree_skb_any(skb);
176 return NET_RX_SUCCESS;
177
178 case CISCO_KEEPALIVE:
179 if ((skb->len != sizeof(struct hdlc_header) +
180 CISCO_PACKET_LEN) &&
181 (skb->len != sizeof(struct hdlc_header) +
182 CISCO_BIG_PACKET_LEN)) {
183 netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
184 skb->len);
185 goto rx_error;
186 }
187
188 cisco_data = (struct cisco_packet*)(skb->data + sizeof
189 (struct hdlc_header));
190
191 switch (ntohl (cisco_data->type)) {
192 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
193 rcu_read_lock();
194 in_dev = __in_dev_get_rcu(dev);
195 addr = 0;
196 mask = ~cpu_to_be32(0); /* is the mask correct? */
197
198 if (in_dev != NULL) {
199 struct in_ifaddr **ifap = &in_dev->ifa_list;
200
201 while (*ifap != NULL) {
202 if (strcmp(dev->name,
203 (*ifap)->ifa_label) == 0) {
204 addr = (*ifap)->ifa_local;
205 mask = (*ifap)->ifa_mask;
206 break;
207 }
208 ifap = &(*ifap)->ifa_next;
209 }
210
211 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
212 addr, mask);
213 }
214 rcu_read_unlock();
215 dev_kfree_skb_any(skb);
216 return NET_RX_SUCCESS;
217
218 case CISCO_ADDR_REPLY:
219 netdev_info(dev, "Unexpected Cisco IP address reply\n");
220 goto rx_error;
221
222 case CISCO_KEEPALIVE_REQ:
223 spin_lock(&st->lock);
224 st->rxseq = ntohl(cisco_data->par1);
225 ack = ntohl(cisco_data->par2);
226 if (ack && (ack == st->txseq ||
227 /* our current REQ may be in transit */
228 ack == st->txseq - 1)) {
229 st->last_poll = jiffies;
230 if (!st->up) {
231 u32 sec, min, hrs, days;
232 sec = ntohl(cisco_data->time) / 1000;
233 min = sec / 60; sec -= min * 60;
234 hrs = min / 60; min -= hrs * 60;
235 days = hrs / 24; hrs -= days * 24;
236 netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
237 days, hrs, min, sec);
238 netif_dormant_off(dev);
239 st->up = 1;
240 }
241 }
242 spin_unlock(&st->lock);
243
244 dev_kfree_skb_any(skb);
245 return NET_RX_SUCCESS;
246 } /* switch (keepalive type) */
247 } /* switch (protocol) */
248
249 netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
250 dev_kfree_skb_any(skb);
251 return NET_RX_DROP;
252
253rx_error:
254 dev->stats.rx_errors++; /* Mark error */
255 dev_kfree_skb_any(skb);
256 return NET_RX_DROP;
257}
258
259
260
261static void cisco_timer(struct timer_list *t)
262{
263 struct cisco_state *st = from_timer(st, t, timer);
264 struct net_device *dev = st->dev;
265
266 spin_lock(&st->lock);
267 if (st->up &&
268 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
269 st->up = 0;
270 netdev_info(dev, "Link down\n");
271 netif_dormant_on(dev);
272 }
273
274 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
275 htonl(st->rxseq));
276 spin_unlock(&st->lock);
277
278 st->timer.expires = jiffies + st->settings.interval * HZ;
279 add_timer(&st->timer);
280}
281
282
283
284static void cisco_start(struct net_device *dev)
285{
286 hdlc_device *hdlc = dev_to_hdlc(dev);
287 struct cisco_state *st = state(hdlc);
288 unsigned long flags;
289
290 spin_lock_irqsave(&st->lock, flags);
291 st->up = st->txseq = st->rxseq = 0;
292 spin_unlock_irqrestore(&st->lock, flags);
293
294 st->dev = dev;
295 timer_setup(&st->timer, cisco_timer, 0);
296 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
297 add_timer(&st->timer);
298}
299
300
301
302static void cisco_stop(struct net_device *dev)
303{
304 hdlc_device *hdlc = dev_to_hdlc(dev);
305 struct cisco_state *st = state(hdlc);
306 unsigned long flags;
307
308 del_timer_sync(&st->timer);
309
310 spin_lock_irqsave(&st->lock, flags);
311 netif_dormant_on(dev);
312 st->up = st->txseq = 0;
313 spin_unlock_irqrestore(&st->lock, flags);
314}
315
316
317static struct hdlc_proto proto = {
318 .start = cisco_start,
319 .stop = cisco_stop,
320 .type_trans = cisco_type_trans,
321 .ioctl = cisco_ioctl,
322 .netif_rx = cisco_rx,
323 .module = THIS_MODULE,
324};
325
326static const struct header_ops cisco_header_ops = {
327 .create = cisco_hard_header,
328};
329
330static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
331{
332 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
333 const size_t size = sizeof(cisco_proto);
334 cisco_proto new_settings;
335 hdlc_device *hdlc = dev_to_hdlc(dev);
336 int result;
337
338 switch (ifr->ifr_settings.type) {
339 case IF_GET_PROTO:
340 if (dev_to_hdlc(dev)->proto != &proto)
341 return -EINVAL;
342 ifr->ifr_settings.type = IF_PROTO_CISCO;
343 if (ifr->ifr_settings.size < size) {
344 ifr->ifr_settings.size = size; /* data size wanted */
345 return -ENOBUFS;
346 }
347 if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
348 return -EFAULT;
349 return 0;
350
351 case IF_PROTO_CISCO:
352 if (!capable(CAP_NET_ADMIN))
353 return -EPERM;
354
355 if (dev->flags & IFF_UP)
356 return -EBUSY;
357
358 if (copy_from_user(&new_settings, cisco_s, size))
359 return -EFAULT;
360
361 if (new_settings.interval < 1 ||
362 new_settings.timeout < 2)
363 return -EINVAL;
364
365 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
366 if (result)
367 return result;
368
369 result = attach_hdlc_protocol(dev, &proto,
370 sizeof(struct cisco_state));
371 if (result)
372 return result;
373
374 memcpy(&state(hdlc)->settings, &new_settings, size);
375 spin_lock_init(&state(hdlc)->lock);
376 dev->header_ops = &cisco_header_ops;
377 dev->type = ARPHRD_CISCO;
378 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
379 netif_dormant_on(dev);
380 return 0;
381 }
382
383 return -EINVAL;
384}
385
386
387static int __init mod_init(void)
388{
389 register_hdlc_protocol(&proto);
390 return 0;
391}
392
393
394
395static void __exit mod_exit(void)
396{
397 unregister_hdlc_protocol(&proto);
398}
399
400
401module_init(mod_init);
402module_exit(mod_exit);
403
404MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
405MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
406MODULE_LICENSE("GPL v2");