Loading...
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 */
11#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/socket.h>
14#include <linux/in.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/string.h>
19#include <linux/sockios.h>
20#include <linux/spinlock.h>
21#include <linux/net.h>
22#include <linux/slab.h>
23#include <net/ax25.h>
24#include <linux/inet.h>
25#include <linux/netdevice.h>
26#include <linux/skbuff.h>
27#include <net/sock.h>
28#include <linux/uaccess.h>
29#include <linux/fcntl.h>
30#include <linux/mm.h>
31#include <linux/interrupt.h>
32
33static DEFINE_SPINLOCK(ax25_frag_lock);
34
35ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
36{
37 ax25_dev *ax25_dev;
38 ax25_cb *ax25;
39
40 /*
41 * Take the default packet length for the device if zero is
42 * specified.
43 */
44 if (paclen == 0) {
45 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
46 return NULL;
47
48 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
49 }
50
51 /*
52 * Look for an existing connection.
53 */
54 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
55 ax25_output(ax25, paclen, skb);
56 return ax25; /* It already existed */
57 }
58
59 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
60 return NULL;
61
62 if ((ax25 = ax25_create_cb()) == NULL)
63 return NULL;
64
65 ax25_fillin_cb(ax25, ax25_dev);
66
67 ax25->source_addr = *src;
68 ax25->dest_addr = *dest;
69
70 if (digi != NULL) {
71 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
72 if (ax25->digipeat == NULL) {
73 ax25_cb_put(ax25);
74 return NULL;
75 }
76 }
77
78 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
79 case AX25_PROTO_STD_SIMPLEX:
80 case AX25_PROTO_STD_DUPLEX:
81 ax25_std_establish_data_link(ax25);
82 break;
83
84#ifdef CONFIG_AX25_DAMA_SLAVE
85 case AX25_PROTO_DAMA_SLAVE:
86 if (ax25_dev->dama.slave)
87 ax25_ds_establish_data_link(ax25);
88 else
89 ax25_std_establish_data_link(ax25);
90 break;
91#endif
92 }
93
94 /*
95 * There is one ref for the state machine; a caller needs
96 * one more to put it back, just like with the existing one.
97 */
98 ax25_cb_hold(ax25);
99
100 ax25_cb_add(ax25);
101
102 ax25->state = AX25_STATE_1;
103
104 ax25_start_heartbeat(ax25);
105
106 ax25_output(ax25, paclen, skb);
107
108 return ax25; /* We had to create it */
109}
110
111EXPORT_SYMBOL(ax25_send_frame);
112
113/*
114 * All outgoing AX.25 I frames pass via this routine. Therefore this is
115 * where the fragmentation of frames takes place. If fragment is set to
116 * zero then we are not allowed to do fragmentation, even if the frame
117 * is too large.
118 */
119void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
120{
121 struct sk_buff *skbn;
122 unsigned char *p;
123 int frontlen, len, fragno, ka9qfrag, first = 1;
124
125 if (paclen < 16) {
126 WARN_ON_ONCE(1);
127 kfree_skb(skb);
128 return;
129 }
130
131 if ((skb->len - 1) > paclen) {
132 if (*skb->data == AX25_P_TEXT) {
133 skb_pull(skb, 1); /* skip PID */
134 ka9qfrag = 0;
135 } else {
136 paclen -= 2; /* Allow for fragment control info */
137 ka9qfrag = 1;
138 }
139
140 fragno = skb->len / paclen;
141 if (skb->len % paclen == 0) fragno--;
142
143 frontlen = skb_headroom(skb); /* Address space + CTRL */
144
145 while (skb->len > 0) {
146 spin_lock_bh(&ax25_frag_lock);
147 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
148 spin_unlock_bh(&ax25_frag_lock);
149 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
150 return;
151 }
152
153 if (skb->sk != NULL)
154 skb_set_owner_w(skbn, skb->sk);
155
156 spin_unlock_bh(&ax25_frag_lock);
157
158 len = (paclen > skb->len) ? skb->len : paclen;
159
160 if (ka9qfrag == 1) {
161 skb_reserve(skbn, frontlen + 2);
162 skb_set_network_header(skbn,
163 skb_network_offset(skb));
164 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
165 p = skb_push(skbn, 2);
166
167 *p++ = AX25_P_SEGMENT;
168
169 *p = fragno--;
170 if (first) {
171 *p |= AX25_SEG_FIRST;
172 first = 0;
173 }
174 } else {
175 skb_reserve(skbn, frontlen + 1);
176 skb_set_network_header(skbn,
177 skb_network_offset(skb));
178 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
179 p = skb_push(skbn, 1);
180 *p = AX25_P_TEXT;
181 }
182
183 skb_pull(skb, len);
184 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
185 }
186
187 kfree_skb(skb);
188 } else {
189 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
190 }
191
192 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
193 case AX25_PROTO_STD_SIMPLEX:
194 case AX25_PROTO_STD_DUPLEX:
195 ax25_kick(ax25);
196 break;
197
198#ifdef CONFIG_AX25_DAMA_SLAVE
199 /*
200 * A DAMA slave is _required_ to work as normal AX.25L2V2
201 * if no DAMA master is available.
202 */
203 case AX25_PROTO_DAMA_SLAVE:
204 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
205 break;
206#endif
207 }
208}
209
210/*
211 * This procedure is passed a buffer descriptor for an iframe. It builds
212 * the rest of the control part of the frame and then writes it out.
213 */
214static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
215{
216 unsigned char *frame;
217
218 if (skb == NULL)
219 return;
220
221 skb_reset_network_header(skb);
222
223 if (ax25->modulus == AX25_MODULUS) {
224 frame = skb_push(skb, 1);
225
226 *frame = AX25_I;
227 *frame |= (poll_bit) ? AX25_PF : 0;
228 *frame |= (ax25->vr << 5);
229 *frame |= (ax25->vs << 1);
230 } else {
231 frame = skb_push(skb, 2);
232
233 frame[0] = AX25_I;
234 frame[0] |= (ax25->vs << 1);
235 frame[1] = (poll_bit) ? AX25_EPF : 0;
236 frame[1] |= (ax25->vr << 1);
237 }
238
239 ax25_start_idletimer(ax25);
240
241 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
242}
243
244void ax25_kick(ax25_cb *ax25)
245{
246 struct sk_buff *skb, *skbn;
247 int last = 1;
248 unsigned short start, end, next;
249
250 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
251 return;
252
253 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
254 return;
255
256 if (skb_peek(&ax25->write_queue) == NULL)
257 return;
258
259 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
260 end = (ax25->va + ax25->window) % ax25->modulus;
261
262 if (start == end)
263 return;
264
265 /*
266 * Transmit data until either we're out of data to send or
267 * the window is full. Send a poll on the final I frame if
268 * the window is filled.
269 */
270
271 /*
272 * Dequeue the frame and copy it.
273 * Check for race with ax25_clear_queues().
274 */
275 skb = skb_dequeue(&ax25->write_queue);
276 if (!skb)
277 return;
278
279 ax25->vs = start;
280
281 do {
282 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
283 skb_queue_head(&ax25->write_queue, skb);
284 break;
285 }
286
287 if (skb->sk != NULL)
288 skb_set_owner_w(skbn, skb->sk);
289
290 next = (ax25->vs + 1) % ax25->modulus;
291 last = (next == end);
292
293 /*
294 * Transmit the frame copy.
295 * bke 960114: do not set the Poll bit on the last frame
296 * in DAMA mode.
297 */
298 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
299 case AX25_PROTO_STD_SIMPLEX:
300 case AX25_PROTO_STD_DUPLEX:
301 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
302 break;
303
304#ifdef CONFIG_AX25_DAMA_SLAVE
305 case AX25_PROTO_DAMA_SLAVE:
306 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
307 break;
308#endif
309 }
310
311 ax25->vs = next;
312
313 /*
314 * Requeue the original data frame.
315 */
316 skb_queue_tail(&ax25->ack_queue, skb);
317
318 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
319
320 ax25->condition &= ~AX25_COND_ACK_PENDING;
321
322 if (!ax25_t1timer_running(ax25)) {
323 ax25_stop_t3timer(ax25);
324 ax25_calculate_t1(ax25);
325 ax25_start_t1timer(ax25);
326 }
327}
328
329void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
330{
331 struct sk_buff *skbn;
332 unsigned char *ptr;
333 int headroom;
334
335 if (ax25->ax25_dev == NULL) {
336 ax25_disconnect(ax25, ENETUNREACH);
337 return;
338 }
339
340 headroom = ax25_addr_size(ax25->digipeat);
341
342 if (skb_headroom(skb) < headroom) {
343 if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
344 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
345 kfree_skb(skb);
346 return;
347 }
348
349 if (skb->sk != NULL)
350 skb_set_owner_w(skbn, skb->sk);
351
352 consume_skb(skb);
353 skb = skbn;
354 }
355
356 ptr = skb_push(skb, headroom);
357
358 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
359
360 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
361}
362
363/*
364 * A small shim to dev_queue_xmit to add the KISS control byte, and do
365 * any packet forwarding in operation.
366 */
367void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
368{
369 unsigned char *ptr;
370
371 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
372
373 ptr = skb_push(skb, 1);
374 *ptr = 0x00; /* KISS */
375
376 dev_queue_xmit(skb);
377}
378
379int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
380{
381 if (ax25->vs == nr) {
382 ax25_frames_acked(ax25, nr);
383 ax25_calculate_rtt(ax25);
384 ax25_stop_t1timer(ax25);
385 ax25_start_t3timer(ax25);
386 return 1;
387 } else {
388 if (ax25->va != nr) {
389 ax25_frames_acked(ax25, nr);
390 ax25_calculate_t1(ax25);
391 ax25_start_t1timer(ax25);
392 return 1;
393 }
394 }
395 return 0;
396}
397
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
5 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
6 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
7 */
8#include <linux/errno.h>
9#include <linux/types.h>
10#include <linux/socket.h>
11#include <linux/in.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/timer.h>
15#include <linux/string.h>
16#include <linux/sockios.h>
17#include <linux/spinlock.h>
18#include <linux/net.h>
19#include <linux/slab.h>
20#include <net/ax25.h>
21#include <linux/inet.h>
22#include <linux/netdevice.h>
23#include <linux/skbuff.h>
24#include <net/sock.h>
25#include <linux/uaccess.h>
26#include <linux/fcntl.h>
27#include <linux/mm.h>
28#include <linux/interrupt.h>
29
30static DEFINE_SPINLOCK(ax25_frag_lock);
31
32ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
33{
34 ax25_dev *ax25_dev;
35 ax25_cb *ax25;
36
37 /*
38 * Take the default packet length for the device if zero is
39 * specified.
40 */
41 if (paclen == 0) {
42 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
43 return NULL;
44
45 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
46 }
47
48 /*
49 * Look for an existing connection.
50 */
51 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
52 ax25_output(ax25, paclen, skb);
53 return ax25; /* It already existed */
54 }
55
56 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
57 return NULL;
58
59 if ((ax25 = ax25_create_cb()) == NULL)
60 return NULL;
61
62 ax25_fillin_cb(ax25, ax25_dev);
63
64 ax25->source_addr = *src;
65 ax25->dest_addr = *dest;
66
67 if (digi != NULL) {
68 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
69 if (ax25->digipeat == NULL) {
70 ax25_cb_put(ax25);
71 return NULL;
72 }
73 }
74
75 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
76 case AX25_PROTO_STD_SIMPLEX:
77 case AX25_PROTO_STD_DUPLEX:
78 ax25_std_establish_data_link(ax25);
79 break;
80
81#ifdef CONFIG_AX25_DAMA_SLAVE
82 case AX25_PROTO_DAMA_SLAVE:
83 if (ax25_dev->dama.slave)
84 ax25_ds_establish_data_link(ax25);
85 else
86 ax25_std_establish_data_link(ax25);
87 break;
88#endif
89 }
90
91 /*
92 * There is one ref for the state machine; a caller needs
93 * one more to put it back, just like with the existing one.
94 */
95 ax25_cb_hold(ax25);
96
97 ax25_cb_add(ax25);
98
99 ax25->state = AX25_STATE_1;
100
101 ax25_start_heartbeat(ax25);
102
103 ax25_output(ax25, paclen, skb);
104
105 return ax25; /* We had to create it */
106}
107
108EXPORT_SYMBOL(ax25_send_frame);
109
110/*
111 * All outgoing AX.25 I frames pass via this routine. Therefore this is
112 * where the fragmentation of frames takes place. If fragment is set to
113 * zero then we are not allowed to do fragmentation, even if the frame
114 * is too large.
115 */
116void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
117{
118 struct sk_buff *skbn;
119 unsigned char *p;
120 int frontlen, len, fragno, ka9qfrag, first = 1;
121
122 if (paclen < 16) {
123 WARN_ON_ONCE(1);
124 kfree_skb(skb);
125 return;
126 }
127
128 if ((skb->len - 1) > paclen) {
129 if (*skb->data == AX25_P_TEXT) {
130 skb_pull(skb, 1); /* skip PID */
131 ka9qfrag = 0;
132 } else {
133 paclen -= 2; /* Allow for fragment control info */
134 ka9qfrag = 1;
135 }
136
137 fragno = skb->len / paclen;
138 if (skb->len % paclen == 0) fragno--;
139
140 frontlen = skb_headroom(skb); /* Address space + CTRL */
141
142 while (skb->len > 0) {
143 spin_lock_bh(&ax25_frag_lock);
144 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
145 spin_unlock_bh(&ax25_frag_lock);
146 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
147 return;
148 }
149
150 if (skb->sk != NULL)
151 skb_set_owner_w(skbn, skb->sk);
152
153 spin_unlock_bh(&ax25_frag_lock);
154
155 len = (paclen > skb->len) ? skb->len : paclen;
156
157 if (ka9qfrag == 1) {
158 skb_reserve(skbn, frontlen + 2);
159 skb_set_network_header(skbn,
160 skb_network_offset(skb));
161 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
162 p = skb_push(skbn, 2);
163
164 *p++ = AX25_P_SEGMENT;
165
166 *p = fragno--;
167 if (first) {
168 *p |= AX25_SEG_FIRST;
169 first = 0;
170 }
171 } else {
172 skb_reserve(skbn, frontlen + 1);
173 skb_set_network_header(skbn,
174 skb_network_offset(skb));
175 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
176 p = skb_push(skbn, 1);
177 *p = AX25_P_TEXT;
178 }
179
180 skb_pull(skb, len);
181 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
182 }
183
184 kfree_skb(skb);
185 } else {
186 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
187 }
188
189 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
190 case AX25_PROTO_STD_SIMPLEX:
191 case AX25_PROTO_STD_DUPLEX:
192 ax25_kick(ax25);
193 break;
194
195#ifdef CONFIG_AX25_DAMA_SLAVE
196 /*
197 * A DAMA slave is _required_ to work as normal AX.25L2V2
198 * if no DAMA master is available.
199 */
200 case AX25_PROTO_DAMA_SLAVE:
201 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
202 break;
203#endif
204 }
205}
206
207/*
208 * This procedure is passed a buffer descriptor for an iframe. It builds
209 * the rest of the control part of the frame and then writes it out.
210 */
211static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
212{
213 unsigned char *frame;
214
215 if (skb == NULL)
216 return;
217
218 skb_reset_network_header(skb);
219
220 if (ax25->modulus == AX25_MODULUS) {
221 frame = skb_push(skb, 1);
222
223 *frame = AX25_I;
224 *frame |= (poll_bit) ? AX25_PF : 0;
225 *frame |= (ax25->vr << 5);
226 *frame |= (ax25->vs << 1);
227 } else {
228 frame = skb_push(skb, 2);
229
230 frame[0] = AX25_I;
231 frame[0] |= (ax25->vs << 1);
232 frame[1] = (poll_bit) ? AX25_EPF : 0;
233 frame[1] |= (ax25->vr << 1);
234 }
235
236 ax25_start_idletimer(ax25);
237
238 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
239}
240
241void ax25_kick(ax25_cb *ax25)
242{
243 struct sk_buff *skb, *skbn;
244 int last = 1;
245 unsigned short start, end, next;
246
247 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
248 return;
249
250 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
251 return;
252
253 if (skb_peek(&ax25->write_queue) == NULL)
254 return;
255
256 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
257 end = (ax25->va + ax25->window) % ax25->modulus;
258
259 if (start == end)
260 return;
261
262 /*
263 * Transmit data until either we're out of data to send or
264 * the window is full. Send a poll on the final I frame if
265 * the window is filled.
266 */
267
268 /*
269 * Dequeue the frame and copy it.
270 * Check for race with ax25_clear_queues().
271 */
272 skb = skb_dequeue(&ax25->write_queue);
273 if (!skb)
274 return;
275
276 ax25->vs = start;
277
278 do {
279 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
280 skb_queue_head(&ax25->write_queue, skb);
281 break;
282 }
283
284 if (skb->sk != NULL)
285 skb_set_owner_w(skbn, skb->sk);
286
287 next = (ax25->vs + 1) % ax25->modulus;
288 last = (next == end);
289
290 /*
291 * Transmit the frame copy.
292 * bke 960114: do not set the Poll bit on the last frame
293 * in DAMA mode.
294 */
295 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
296 case AX25_PROTO_STD_SIMPLEX:
297 case AX25_PROTO_STD_DUPLEX:
298 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
299 break;
300
301#ifdef CONFIG_AX25_DAMA_SLAVE
302 case AX25_PROTO_DAMA_SLAVE:
303 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
304 break;
305#endif
306 }
307
308 ax25->vs = next;
309
310 /*
311 * Requeue the original data frame.
312 */
313 skb_queue_tail(&ax25->ack_queue, skb);
314
315 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
316
317 ax25->condition &= ~AX25_COND_ACK_PENDING;
318
319 if (!ax25_t1timer_running(ax25)) {
320 ax25_stop_t3timer(ax25);
321 ax25_calculate_t1(ax25);
322 ax25_start_t1timer(ax25);
323 }
324}
325
326void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
327{
328 unsigned char *ptr;
329 int headroom;
330
331 if (ax25->ax25_dev == NULL) {
332 ax25_disconnect(ax25, ENETUNREACH);
333 return;
334 }
335
336 headroom = ax25_addr_size(ax25->digipeat);
337
338 if (unlikely(skb_headroom(skb) < headroom)) {
339 skb = skb_expand_head(skb, headroom);
340 if (!skb) {
341 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
342 return;
343 }
344 }
345
346 ptr = skb_push(skb, headroom);
347
348 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
349
350 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
351}
352
353/*
354 * A small shim to dev_queue_xmit to add the KISS control byte, and do
355 * any packet forwarding in operation.
356 */
357void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
358{
359 unsigned char *ptr;
360
361 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
362
363 ptr = skb_push(skb, 1);
364 *ptr = 0x00; /* KISS */
365
366 dev_queue_xmit(skb);
367}
368
369int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
370{
371 if (ax25->vs == nr) {
372 ax25_frames_acked(ax25, nr);
373 ax25_calculate_rtt(ax25);
374 ax25_stop_t1timer(ax25);
375 ax25_start_t3timer(ax25);
376 return 1;
377 } else {
378 if (ax25->va != nr) {
379 ax25_frames_acked(ax25, nr);
380 ax25_calculate_t1(ax25);
381 ax25_start_t1timer(ax25);
382 return 1;
383 }
384 }
385 return 0;
386}