Loading...
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 */
11#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/socket.h>
14#include <linux/in.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/string.h>
19#include <linux/sockios.h>
20#include <linux/spinlock.h>
21#include <linux/net.h>
22#include <linux/slab.h>
23#include <net/ax25.h>
24#include <linux/inet.h>
25#include <linux/netdevice.h>
26#include <linux/skbuff.h>
27#include <linux/netfilter.h>
28#include <net/sock.h>
29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <linux/fcntl.h>
32#include <linux/mm.h>
33#include <linux/interrupt.h>
34
35static DEFINE_SPINLOCK(ax25_frag_lock);
36
37ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
38{
39 ax25_dev *ax25_dev;
40 ax25_cb *ax25;
41
42 /*
43 * Take the default packet length for the device if zero is
44 * specified.
45 */
46 if (paclen == 0) {
47 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
48 return NULL;
49
50 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
51 }
52
53 /*
54 * Look for an existing connection.
55 */
56 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
57 ax25_output(ax25, paclen, skb);
58 return ax25; /* It already existed */
59 }
60
61 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
62 return NULL;
63
64 if ((ax25 = ax25_create_cb()) == NULL)
65 return NULL;
66
67 ax25_fillin_cb(ax25, ax25_dev);
68
69 ax25->source_addr = *src;
70 ax25->dest_addr = *dest;
71
72 if (digi != NULL) {
73 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
74 if (ax25->digipeat == NULL) {
75 ax25_cb_put(ax25);
76 return NULL;
77 }
78 }
79
80 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
81 case AX25_PROTO_STD_SIMPLEX:
82 case AX25_PROTO_STD_DUPLEX:
83 ax25_std_establish_data_link(ax25);
84 break;
85
86#ifdef CONFIG_AX25_DAMA_SLAVE
87 case AX25_PROTO_DAMA_SLAVE:
88 if (ax25_dev->dama.slave)
89 ax25_ds_establish_data_link(ax25);
90 else
91 ax25_std_establish_data_link(ax25);
92 break;
93#endif
94 }
95
96 /*
97 * There is one ref for the state machine; a caller needs
98 * one more to put it back, just like with the existing one.
99 */
100 ax25_cb_hold(ax25);
101
102 ax25_cb_add(ax25);
103
104 ax25->state = AX25_STATE_1;
105
106 ax25_start_heartbeat(ax25);
107
108 ax25_output(ax25, paclen, skb);
109
110 return ax25; /* We had to create it */
111}
112
113EXPORT_SYMBOL(ax25_send_frame);
114
115/*
116 * All outgoing AX.25 I frames pass via this routine. Therefore this is
117 * where the fragmentation of frames takes place. If fragment is set to
118 * zero then we are not allowed to do fragmentation, even if the frame
119 * is too large.
120 */
121void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
122{
123 struct sk_buff *skbn;
124 unsigned char *p;
125 int frontlen, len, fragno, ka9qfrag, first = 1;
126
127 if (paclen < 16) {
128 WARN_ON_ONCE(1);
129 kfree_skb(skb);
130 return;
131 }
132
133 if ((skb->len - 1) > paclen) {
134 if (*skb->data == AX25_P_TEXT) {
135 skb_pull(skb, 1); /* skip PID */
136 ka9qfrag = 0;
137 } else {
138 paclen -= 2; /* Allow for fragment control info */
139 ka9qfrag = 1;
140 }
141
142 fragno = skb->len / paclen;
143 if (skb->len % paclen == 0) fragno--;
144
145 frontlen = skb_headroom(skb); /* Address space + CTRL */
146
147 while (skb->len > 0) {
148 spin_lock_bh(&ax25_frag_lock);
149 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
150 spin_unlock_bh(&ax25_frag_lock);
151 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
152 return;
153 }
154
155 if (skb->sk != NULL)
156 skb_set_owner_w(skbn, skb->sk);
157
158 spin_unlock_bh(&ax25_frag_lock);
159
160 len = (paclen > skb->len) ? skb->len : paclen;
161
162 if (ka9qfrag == 1) {
163 skb_reserve(skbn, frontlen + 2);
164 skb_set_network_header(skbn,
165 skb_network_offset(skb));
166 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
167 p = skb_push(skbn, 2);
168
169 *p++ = AX25_P_SEGMENT;
170
171 *p = fragno--;
172 if (first) {
173 *p |= AX25_SEG_FIRST;
174 first = 0;
175 }
176 } else {
177 skb_reserve(skbn, frontlen + 1);
178 skb_set_network_header(skbn,
179 skb_network_offset(skb));
180 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
181 p = skb_push(skbn, 1);
182 *p = AX25_P_TEXT;
183 }
184
185 skb_pull(skb, len);
186 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
187 }
188
189 kfree_skb(skb);
190 } else {
191 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
192 }
193
194 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
195 case AX25_PROTO_STD_SIMPLEX:
196 case AX25_PROTO_STD_DUPLEX:
197 ax25_kick(ax25);
198 break;
199
200#ifdef CONFIG_AX25_DAMA_SLAVE
201 /*
202 * A DAMA slave is _required_ to work as normal AX.25L2V2
203 * if no DAMA master is available.
204 */
205 case AX25_PROTO_DAMA_SLAVE:
206 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
207 break;
208#endif
209 }
210}
211
212/*
213 * This procedure is passed a buffer descriptor for an iframe. It builds
214 * the rest of the control part of the frame and then writes it out.
215 */
216static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
217{
218 unsigned char *frame;
219
220 if (skb == NULL)
221 return;
222
223 skb_reset_network_header(skb);
224
225 if (ax25->modulus == AX25_MODULUS) {
226 frame = skb_push(skb, 1);
227
228 *frame = AX25_I;
229 *frame |= (poll_bit) ? AX25_PF : 0;
230 *frame |= (ax25->vr << 5);
231 *frame |= (ax25->vs << 1);
232 } else {
233 frame = skb_push(skb, 2);
234
235 frame[0] = AX25_I;
236 frame[0] |= (ax25->vs << 1);
237 frame[1] = (poll_bit) ? AX25_EPF : 0;
238 frame[1] |= (ax25->vr << 1);
239 }
240
241 ax25_start_idletimer(ax25);
242
243 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
244}
245
246void ax25_kick(ax25_cb *ax25)
247{
248 struct sk_buff *skb, *skbn;
249 int last = 1;
250 unsigned short start, end, next;
251
252 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
253 return;
254
255 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
256 return;
257
258 if (skb_peek(&ax25->write_queue) == NULL)
259 return;
260
261 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
262 end = (ax25->va + ax25->window) % ax25->modulus;
263
264 if (start == end)
265 return;
266
267 /*
268 * Transmit data until either we're out of data to send or
269 * the window is full. Send a poll on the final I frame if
270 * the window is filled.
271 */
272
273 /*
274 * Dequeue the frame and copy it.
275 * Check for race with ax25_clear_queues().
276 */
277 skb = skb_dequeue(&ax25->write_queue);
278 if (!skb)
279 return;
280
281 ax25->vs = start;
282
283 do {
284 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
285 skb_queue_head(&ax25->write_queue, skb);
286 break;
287 }
288
289 if (skb->sk != NULL)
290 skb_set_owner_w(skbn, skb->sk);
291
292 next = (ax25->vs + 1) % ax25->modulus;
293 last = (next == end);
294
295 /*
296 * Transmit the frame copy.
297 * bke 960114: do not set the Poll bit on the last frame
298 * in DAMA mode.
299 */
300 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
301 case AX25_PROTO_STD_SIMPLEX:
302 case AX25_PROTO_STD_DUPLEX:
303 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
304 break;
305
306#ifdef CONFIG_AX25_DAMA_SLAVE
307 case AX25_PROTO_DAMA_SLAVE:
308 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
309 break;
310#endif
311 }
312
313 ax25->vs = next;
314
315 /*
316 * Requeue the original data frame.
317 */
318 skb_queue_tail(&ax25->ack_queue, skb);
319
320 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
321
322 ax25->condition &= ~AX25_COND_ACK_PENDING;
323
324 if (!ax25_t1timer_running(ax25)) {
325 ax25_stop_t3timer(ax25);
326 ax25_calculate_t1(ax25);
327 ax25_start_t1timer(ax25);
328 }
329}
330
331void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
332{
333 struct sk_buff *skbn;
334 unsigned char *ptr;
335 int headroom;
336
337 if (ax25->ax25_dev == NULL) {
338 ax25_disconnect(ax25, ENETUNREACH);
339 return;
340 }
341
342 headroom = ax25_addr_size(ax25->digipeat);
343
344 if (skb_headroom(skb) < headroom) {
345 if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
346 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
347 kfree_skb(skb);
348 return;
349 }
350
351 if (skb->sk != NULL)
352 skb_set_owner_w(skbn, skb->sk);
353
354 kfree_skb(skb);
355 skb = skbn;
356 }
357
358 ptr = skb_push(skb, headroom);
359
360 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
361
362 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
363}
364
365/*
366 * A small shim to dev_queue_xmit to add the KISS control byte, and do
367 * any packet forwarding in operation.
368 */
369void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
370{
371 unsigned char *ptr;
372
373 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
374
375 ptr = skb_push(skb, 1);
376 *ptr = 0x00; /* KISS */
377
378 dev_queue_xmit(skb);
379}
380
381int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
382{
383 if (ax25->vs == nr) {
384 ax25_frames_acked(ax25, nr);
385 ax25_calculate_rtt(ax25);
386 ax25_stop_t1timer(ax25);
387 ax25_start_t3timer(ax25);
388 return 1;
389 } else {
390 if (ax25->va != nr) {
391 ax25_frames_acked(ax25, nr);
392 ax25_calculate_t1(ax25);
393 ax25_start_t1timer(ax25);
394 return 1;
395 }
396 }
397 return 0;
398}
399
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
5 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
6 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
7 */
8#include <linux/errno.h>
9#include <linux/types.h>
10#include <linux/socket.h>
11#include <linux/in.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/timer.h>
15#include <linux/string.h>
16#include <linux/sockios.h>
17#include <linux/spinlock.h>
18#include <linux/net.h>
19#include <linux/slab.h>
20#include <net/ax25.h>
21#include <linux/inet.h>
22#include <linux/netdevice.h>
23#include <linux/skbuff.h>
24#include <net/sock.h>
25#include <linux/uaccess.h>
26#include <linux/fcntl.h>
27#include <linux/mm.h>
28#include <linux/interrupt.h>
29
30static DEFINE_SPINLOCK(ax25_frag_lock);
31
32ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
33{
34 ax25_dev *ax25_dev;
35 ax25_cb *ax25;
36
37 /*
38 * Take the default packet length for the device if zero is
39 * specified.
40 */
41 if (paclen == 0) {
42 rcu_read_lock();
43 ax25_dev = ax25_dev_ax25dev(dev);
44 if (!ax25_dev) {
45 rcu_read_unlock();
46 return NULL;
47 }
48 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
49 rcu_read_unlock();
50 }
51
52 /*
53 * Look for an existing connection.
54 */
55 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
56 ax25_output(ax25, paclen, skb);
57 return ax25; /* It already existed */
58 }
59
60 rcu_read_lock();
61 ax25_dev = ax25_dev_ax25dev(dev);
62 if (!ax25_dev) {
63 rcu_read_unlock();
64 return NULL;
65 }
66
67 if ((ax25 = ax25_create_cb()) == NULL) {
68 rcu_read_unlock();
69 return NULL;
70 }
71 ax25_fillin_cb(ax25, ax25_dev);
72 rcu_read_unlock();
73
74 ax25->source_addr = *src;
75 ax25->dest_addr = *dest;
76
77 if (digi != NULL) {
78 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
79 if (ax25->digipeat == NULL) {
80 ax25_cb_put(ax25);
81 return NULL;
82 }
83 }
84
85 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
86 case AX25_PROTO_STD_SIMPLEX:
87 case AX25_PROTO_STD_DUPLEX:
88 ax25_std_establish_data_link(ax25);
89 break;
90
91#ifdef CONFIG_AX25_DAMA_SLAVE
92 case AX25_PROTO_DAMA_SLAVE:
93 if (ax25_dev->dama.slave)
94 ax25_ds_establish_data_link(ax25);
95 else
96 ax25_std_establish_data_link(ax25);
97 break;
98#endif
99 }
100
101 /*
102 * There is one ref for the state machine; a caller needs
103 * one more to put it back, just like with the existing one.
104 */
105 ax25_cb_hold(ax25);
106
107 ax25_cb_add(ax25);
108
109 ax25->state = AX25_STATE_1;
110
111 ax25_start_heartbeat(ax25);
112
113 ax25_output(ax25, paclen, skb);
114
115 return ax25; /* We had to create it */
116}
117
118EXPORT_SYMBOL(ax25_send_frame);
119
120/*
121 * All outgoing AX.25 I frames pass via this routine. Therefore this is
122 * where the fragmentation of frames takes place. If fragment is set to
123 * zero then we are not allowed to do fragmentation, even if the frame
124 * is too large.
125 */
126void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
127{
128 struct sk_buff *skbn;
129 unsigned char *p;
130 int frontlen, len, fragno, ka9qfrag, first = 1;
131
132 if (paclen < 16) {
133 WARN_ON_ONCE(1);
134 kfree_skb(skb);
135 return;
136 }
137
138 if ((skb->len - 1) > paclen) {
139 if (*skb->data == AX25_P_TEXT) {
140 skb_pull(skb, 1); /* skip PID */
141 ka9qfrag = 0;
142 } else {
143 paclen -= 2; /* Allow for fragment control info */
144 ka9qfrag = 1;
145 }
146
147 fragno = skb->len / paclen;
148 if (skb->len % paclen == 0) fragno--;
149
150 frontlen = skb_headroom(skb); /* Address space + CTRL */
151
152 while (skb->len > 0) {
153 spin_lock_bh(&ax25_frag_lock);
154 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
155 spin_unlock_bh(&ax25_frag_lock);
156 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
157 return;
158 }
159
160 if (skb->sk != NULL)
161 skb_set_owner_w(skbn, skb->sk);
162
163 spin_unlock_bh(&ax25_frag_lock);
164
165 len = (paclen > skb->len) ? skb->len : paclen;
166
167 if (ka9qfrag == 1) {
168 skb_reserve(skbn, frontlen + 2);
169 skb_set_network_header(skbn,
170 skb_network_offset(skb));
171 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
172 p = skb_push(skbn, 2);
173
174 *p++ = AX25_P_SEGMENT;
175
176 *p = fragno--;
177 if (first) {
178 *p |= AX25_SEG_FIRST;
179 first = 0;
180 }
181 } else {
182 skb_reserve(skbn, frontlen + 1);
183 skb_set_network_header(skbn,
184 skb_network_offset(skb));
185 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
186 p = skb_push(skbn, 1);
187 *p = AX25_P_TEXT;
188 }
189
190 skb_pull(skb, len);
191 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
192 }
193
194 kfree_skb(skb);
195 } else {
196 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
197 }
198
199 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
200 case AX25_PROTO_STD_SIMPLEX:
201 case AX25_PROTO_STD_DUPLEX:
202 ax25_kick(ax25);
203 break;
204
205#ifdef CONFIG_AX25_DAMA_SLAVE
206 /*
207 * A DAMA slave is _required_ to work as normal AX.25L2V2
208 * if no DAMA master is available.
209 */
210 case AX25_PROTO_DAMA_SLAVE:
211 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
212 break;
213#endif
214 }
215}
216
217/*
218 * This procedure is passed a buffer descriptor for an iframe. It builds
219 * the rest of the control part of the frame and then writes it out.
220 */
221static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
222{
223 unsigned char *frame;
224
225 if (skb == NULL)
226 return;
227
228 skb_reset_network_header(skb);
229
230 if (ax25->modulus == AX25_MODULUS) {
231 frame = skb_push(skb, 1);
232
233 *frame = AX25_I;
234 *frame |= (poll_bit) ? AX25_PF : 0;
235 *frame |= (ax25->vr << 5);
236 *frame |= (ax25->vs << 1);
237 } else {
238 frame = skb_push(skb, 2);
239
240 frame[0] = AX25_I;
241 frame[0] |= (ax25->vs << 1);
242 frame[1] = (poll_bit) ? AX25_EPF : 0;
243 frame[1] |= (ax25->vr << 1);
244 }
245
246 ax25_start_idletimer(ax25);
247
248 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
249}
250
251void ax25_kick(ax25_cb *ax25)
252{
253 struct sk_buff *skb, *skbn;
254 int last = 1;
255 unsigned short start, end, next;
256
257 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
258 return;
259
260 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
261 return;
262
263 if (skb_peek(&ax25->write_queue) == NULL)
264 return;
265
266 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
267 end = (ax25->va + ax25->window) % ax25->modulus;
268
269 if (start == end)
270 return;
271
272 /*
273 * Transmit data until either we're out of data to send or
274 * the window is full. Send a poll on the final I frame if
275 * the window is filled.
276 */
277
278 /*
279 * Dequeue the frame and copy it.
280 * Check for race with ax25_clear_queues().
281 */
282 skb = skb_dequeue(&ax25->write_queue);
283 if (!skb)
284 return;
285
286 ax25->vs = start;
287
288 do {
289 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
290 skb_queue_head(&ax25->write_queue, skb);
291 break;
292 }
293
294 if (skb->sk != NULL)
295 skb_set_owner_w(skbn, skb->sk);
296
297 next = (ax25->vs + 1) % ax25->modulus;
298 last = (next == end);
299
300 /*
301 * Transmit the frame copy.
302 * bke 960114: do not set the Poll bit on the last frame
303 * in DAMA mode.
304 */
305 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
306 case AX25_PROTO_STD_SIMPLEX:
307 case AX25_PROTO_STD_DUPLEX:
308 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
309 break;
310
311#ifdef CONFIG_AX25_DAMA_SLAVE
312 case AX25_PROTO_DAMA_SLAVE:
313 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
314 break;
315#endif
316 }
317
318 ax25->vs = next;
319
320 /*
321 * Requeue the original data frame.
322 */
323 skb_queue_tail(&ax25->ack_queue, skb);
324
325 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
326
327 ax25->condition &= ~AX25_COND_ACK_PENDING;
328
329 if (!ax25_t1timer_running(ax25)) {
330 ax25_stop_t3timer(ax25);
331 ax25_calculate_t1(ax25);
332 ax25_start_t1timer(ax25);
333 }
334}
335
336void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
337{
338 unsigned char *ptr;
339 int headroom;
340
341 if (ax25->ax25_dev == NULL) {
342 ax25_disconnect(ax25, ENETUNREACH);
343 return;
344 }
345
346 headroom = ax25_addr_size(ax25->digipeat);
347
348 if (unlikely(skb_headroom(skb) < headroom)) {
349 skb = skb_expand_head(skb, headroom);
350 if (!skb) {
351 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
352 return;
353 }
354 }
355
356 ptr = skb_push(skb, headroom);
357
358 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
359
360 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
361}
362
363/*
364 * A small shim to dev_queue_xmit to add the KISS control byte, and do
365 * any packet forwarding in operation.
366 */
367void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
368{
369 unsigned char *ptr;
370
371 rcu_read_lock();
372 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
373 rcu_read_unlock();
374
375 ptr = skb_push(skb, 1);
376 *ptr = 0x00; /* KISS */
377
378 dev_queue_xmit(skb);
379}
380
381int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
382{
383 if (ax25->vs == nr) {
384 ax25_frames_acked(ax25, nr);
385 ax25_calculate_rtt(ax25);
386 ax25_stop_t1timer(ax25);
387 ax25_start_t3timer(ax25);
388 return 1;
389 } else {
390 if (ax25->va != nr) {
391 ax25_frames_acked(ax25, nr);
392 ax25_calculate_t1(ax25);
393 ax25_start_t1timer(ax25);
394 return 1;
395 }
396 }
397 return 0;
398}