Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
 
 
 
 
  3 *
  4 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
  5 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
  6 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
  7 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
  8 */
  9#include <linux/errno.h>
 10#include <linux/types.h>
 11#include <linux/socket.h>
 12#include <linux/in.h>
 13#include <linux/kernel.h>
 14#include <linux/timer.h>
 15#include <linux/string.h>
 16#include <linux/sockios.h>
 17#include <linux/net.h>
 18#include <linux/slab.h>
 19#include <net/ax25.h>
 20#include <linux/inet.h>
 21#include <linux/netdevice.h>
 22#include <linux/skbuff.h>
 
 23#include <net/sock.h>
 24#include <net/tcp_states.h>
 25#include <linux/uaccess.h>
 
 26#include <linux/fcntl.h>
 27#include <linux/mm.h>
 28#include <linux/interrupt.h>
 29
 30/*
 31 *	Given a fragment, queue it on the fragment queue and if the fragment
 32 *	is complete, send it back to ax25_rx_iframe.
 33 */
 34static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
 35{
 36	struct sk_buff *skbn, *skbo;
 37
 38	if (ax25->fragno != 0) {
 39		if (!(*skb->data & AX25_SEG_FIRST)) {
 40			if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
 41				/* Enqueue fragment */
 42				ax25->fragno = *skb->data & AX25_SEG_REM;
 43				skb_pull(skb, 1);	/* skip fragno */
 44				ax25->fraglen += skb->len;
 45				skb_queue_tail(&ax25->frag_queue, skb);
 46
 47				/* Last fragment received ? */
 48				if (ax25->fragno == 0) {
 49					skbn = alloc_skb(AX25_MAX_HEADER_LEN +
 50							 ax25->fraglen,
 51							 GFP_ATOMIC);
 52					if (!skbn) {
 53						skb_queue_purge(&ax25->frag_queue);
 54						return 1;
 55					}
 56
 57					skb_reserve(skbn, AX25_MAX_HEADER_LEN);
 58
 59					skbn->dev   = ax25->ax25_dev->dev;
 60					skb_reset_network_header(skbn);
 61					skb_reset_transport_header(skbn);
 62
 63					/* Copy data from the fragments */
 64					while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
 65						skb_copy_from_linear_data(skbo,
 66							  skb_put(skbn, skbo->len),
 67									  skbo->len);
 68						kfree_skb(skbo);
 69					}
 70
 71					ax25->fraglen = 0;
 72
 73					if (ax25_rx_iframe(ax25, skbn) == 0)
 74						kfree_skb(skbn);
 75				}
 76
 77				return 1;
 78			}
 79		}
 80	} else {
 81		/* First fragment received */
 82		if (*skb->data & AX25_SEG_FIRST) {
 83			skb_queue_purge(&ax25->frag_queue);
 84			ax25->fragno = *skb->data & AX25_SEG_REM;
 85			skb_pull(skb, 1);		/* skip fragno */
 86			ax25->fraglen = skb->len;
 87			skb_queue_tail(&ax25->frag_queue, skb);
 88			return 1;
 89		}
 90	}
 91
 92	return 0;
 93}
 94
 95/*
 96 *	This is where all valid I frames are sent to, to be dispatched to
 97 *	whichever protocol requires them.
 98 */
 99int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
100{
101	int (*func)(struct sk_buff *, ax25_cb *);
102	unsigned char pid;
103	int queued = 0;
104
105	if (skb == NULL) return 0;
106
107	ax25_start_idletimer(ax25);
108
109	pid = *skb->data;
110
111	if (pid == AX25_P_IP) {
112		/* working around a TCP bug to keep additional listeners
113		 * happy. TCP re-uses the buffer and destroys the original
114		 * content.
115		 */
116		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
117		if (skbn != NULL) {
118			kfree_skb(skb);
119			skb = skbn;
120		}
121
122		skb_pull(skb, 1);	/* Remove PID */
123		skb->mac_header = skb->network_header;
124		skb_reset_network_header(skb);
125		skb->dev      = ax25->ax25_dev->dev;
126		skb->pkt_type = PACKET_HOST;
127		skb->protocol = htons(ETH_P_IP);
128		netif_rx(skb);
129		return 1;
130	}
131	if (pid == AX25_P_SEGMENT) {
132		skb_pull(skb, 1);	/* Remove PID */
133		return ax25_rx_fragment(ax25, skb);
134	}
135
136	if ((func = ax25_protocol_function(pid)) != NULL) {
137		skb_pull(skb, 1);	/* Remove PID */
138		return (*func)(skb, ax25);
139	}
140
141	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
142		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
143		    ax25->pidincl) {
144			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
145				queued = 1;
146			else
147				ax25->condition |= AX25_COND_OWN_RX_BUSY;
148		}
149	}
150
151	return queued;
152}
153
154/*
155 *	Higher level upcall for a LAPB frame
156 */
157static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
158{
159	int queued = 0;
160
161	if (ax25->state == AX25_STATE_0)
162		return 0;
163
164	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
165	case AX25_PROTO_STD_SIMPLEX:
166	case AX25_PROTO_STD_DUPLEX:
167		queued = ax25_std_frame_in(ax25, skb, type);
168		break;
169
170#ifdef CONFIG_AX25_DAMA_SLAVE
171	case AX25_PROTO_DAMA_SLAVE:
172		if (dama || ax25->ax25_dev->dama.slave)
173			queued = ax25_ds_frame_in(ax25, skb, type);
174		else
175			queued = ax25_std_frame_in(ax25, skb, type);
176		break;
177#endif
178	}
179
180	return queued;
181}
182
183static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
184		    const ax25_address *dev_addr, struct packet_type *ptype)
185{
186	ax25_address src, dest, *next_digi = NULL;
187	int type = 0, mine = 0, dama;
188	struct sock *make, *sk;
189	ax25_digi dp, reverse_dp;
190	ax25_cb *ax25;
191	ax25_dev *ax25_dev;
192
193	/*
194	 *	Process the AX.25/LAPB frame.
195	 */
196
197	skb_reset_transport_header(skb);
198
199	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
200		goto free;
201
202	/*
203	 *	Parse the address header.
204	 */
205
206	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
207		goto free;
208
209	/*
210	 *	Ours perhaps ?
211	 */
212	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
213		next_digi = &dp.calls[dp.lastrepeat + 1];
214
215	/*
216	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
217	 */
218	skb_pull(skb, ax25_addr_size(&dp));
219
220	/* For our port addresses ? */
221	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
222		mine = 1;
223
224	/* Also match on any registered callsign from L3/4 */
225	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
226		mine = 1;
227
228	/* UI frame - bypass LAPB processing */
229	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
230		skb_set_transport_header(skb, 2); /* skip control and pid */
231
232		ax25_send_to_raw(&dest, skb, skb->data[1]);
233
234		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
235			goto free;
236
237		/* Now we are pointing at the pid byte */
238		switch (skb->data[1]) {
239		case AX25_P_IP:
240			skb_pull(skb,2);		/* drop PID/CTRL */
241			skb_reset_transport_header(skb);
242			skb_reset_network_header(skb);
243			skb->dev      = dev;
244			skb->pkt_type = PACKET_HOST;
245			skb->protocol = htons(ETH_P_IP);
246			netif_rx(skb);
247			break;
248
249		case AX25_P_ARP:
250			skb_pull(skb,2);
251			skb_reset_transport_header(skb);
252			skb_reset_network_header(skb);
253			skb->dev      = dev;
254			skb->pkt_type = PACKET_HOST;
255			skb->protocol = htons(ETH_P_ARP);
256			netif_rx(skb);
257			break;
258		case AX25_P_TEXT:
259			/* Now find a suitable dgram socket */
260			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
261			if (sk != NULL) {
262				bh_lock_sock(sk);
263				if (atomic_read(&sk->sk_rmem_alloc) >=
264				    sk->sk_rcvbuf) {
265					kfree_skb(skb);
266				} else {
267					/*
268					 *	Remove the control and PID.
269					 */
270					skb_pull(skb, 2);
271					if (sock_queue_rcv_skb(sk, skb) != 0)
272						kfree_skb(skb);
273				}
274				bh_unlock_sock(sk);
275				sock_put(sk);
276			} else {
277				kfree_skb(skb);
278			}
279			break;
280
281		default:
282			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
283			break;
284		}
285
286		return 0;
287	}
288
289	/*
290	 *	Is connected mode supported on this device ?
291	 *	If not, should we DM the incoming frame (except DMs) or
292	 *	silently ignore them. For now we stay quiet.
293	 */
294	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
295		goto free;
296
297	/* LAPB */
298
299	/* AX.25 state 1-4 */
300
301	ax25_digi_invert(&dp, &reverse_dp);
302
303	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
304		/*
305		 *	Process the frame. If it is queued up internally it
306		 *	returns one otherwise we free it immediately. This
307		 *	routine itself wakes the user context layers so we do
308		 *	no further work
309		 */
310		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
311			kfree_skb(skb);
312
313		ax25_cb_put(ax25);
314		return 0;
315	}
316
317	/* AX.25 state 0 (disconnected) */
318
319	/* a) received not a SABM(E) */
320
321	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
322	    (*skb->data & ~AX25_PF) != AX25_SABME) {
323		/*
324		 *	Never reply to a DM. Also ignore any connects for
325		 *	addresses that are not our interfaces and not a socket.
326		 */
327		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
328			ax25_return_dm(dev, &src, &dest, &dp);
329
330		goto free;
331	}
332
333	/* b) received SABM(E) */
334
335	if (dp.lastrepeat + 1 == dp.ndigi)
336		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
337	else
338		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
339
340	if (sk != NULL) {
341		bh_lock_sock(sk);
342		if (sk_acceptq_is_full(sk) ||
343		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
344			if (mine)
345				ax25_return_dm(dev, &src, &dest, &dp);
346			kfree_skb(skb);
347			bh_unlock_sock(sk);
348			sock_put(sk);
349
350			return 0;
351		}
352
353		ax25 = sk_to_ax25(make);
354		skb_set_owner_r(skb, make);
355		skb_queue_head(&sk->sk_receive_queue, skb);
356
357		make->sk_state = TCP_ESTABLISHED;
358
359		sk_acceptq_added(sk);
360		bh_unlock_sock(sk);
361	} else {
362		if (!mine)
363			goto free;
364
365		if ((ax25 = ax25_create_cb()) == NULL) {
366			ax25_return_dm(dev, &src, &dest, &dp);
367			goto free;
368		}
369
370		ax25_fillin_cb(ax25, ax25_dev);
371	}
372
373	ax25->source_addr = dest;
374	ax25->dest_addr   = src;
375
376	/*
377	 *	Sort out any digipeated paths.
378	 */
379	if (dp.ndigi && !ax25->digipeat &&
380	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
381		kfree_skb(skb);
382		ax25_destroy_socket(ax25);
383		if (sk)
384			sock_put(sk);
385		return 0;
386	}
387
388	if (dp.ndigi == 0) {
389		kfree(ax25->digipeat);
390		ax25->digipeat = NULL;
391	} else {
392		/* Reverse the source SABM's path */
393		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
394	}
395
396	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
397		ax25->modulus = AX25_EMODULUS;
398		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
399	} else {
400		ax25->modulus = AX25_MODULUS;
401		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
402	}
403
404	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
405
406#ifdef CONFIG_AX25_DAMA_SLAVE
407	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
408		ax25_dama_on(ax25);
409#endif
410
411	ax25->state = AX25_STATE_3;
412
413	ax25_cb_add(ax25);
414
415	ax25_start_heartbeat(ax25);
416	ax25_start_t3timer(ax25);
417	ax25_start_idletimer(ax25);
418
419	if (sk) {
420		if (!sock_flag(sk, SOCK_DEAD))
421			sk->sk_data_ready(sk);
422		sock_put(sk);
423	} else {
424free:
425		kfree_skb(skb);
426	}
427	return 0;
428}
429
430/*
431 *	Receive an AX.25 frame via a SLIP interface.
432 */
433int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
434		  struct packet_type *ptype, struct net_device *orig_dev)
435{
436	skb_orphan(skb);
437
438	if (!net_eq(dev_net(dev), &init_net)) {
439		kfree_skb(skb);
440		return 0;
441	}
442
443	if ((*skb->data & 0x0F) != 0) {
444		kfree_skb(skb);	/* Not a KISS data frame */
445		return 0;
446	}
447
448	skb_pull(skb, AX25_KISS_HEADER_LEN);	/* Remove the KISS byte */
449
450	return ax25_rcv(skb, dev, (const ax25_address *)dev->dev_addr, ptype);
451}
v3.1
 
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License as published by
  4 * the Free Software Foundation; either version 2 of the License, or
  5 * (at your option) any later version.
  6 *
  7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
  8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
  9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
 10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
 11 */
 12#include <linux/errno.h>
 13#include <linux/types.h>
 14#include <linux/socket.h>
 15#include <linux/in.h>
 16#include <linux/kernel.h>
 17#include <linux/timer.h>
 18#include <linux/string.h>
 19#include <linux/sockios.h>
 20#include <linux/net.h>
 21#include <linux/slab.h>
 22#include <net/ax25.h>
 23#include <linux/inet.h>
 24#include <linux/netdevice.h>
 25#include <linux/skbuff.h>
 26#include <linux/netfilter.h>
 27#include <net/sock.h>
 28#include <net/tcp_states.h>
 29#include <asm/uaccess.h>
 30#include <asm/system.h>
 31#include <linux/fcntl.h>
 32#include <linux/mm.h>
 33#include <linux/interrupt.h>
 34
 35/*
 36 *	Given a fragment, queue it on the fragment queue and if the fragment
 37 *	is complete, send it back to ax25_rx_iframe.
 38 */
 39static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
 40{
 41	struct sk_buff *skbn, *skbo;
 42
 43	if (ax25->fragno != 0) {
 44		if (!(*skb->data & AX25_SEG_FIRST)) {
 45			if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
 46				/* Enqueue fragment */
 47				ax25->fragno = *skb->data & AX25_SEG_REM;
 48				skb_pull(skb, 1);	/* skip fragno */
 49				ax25->fraglen += skb->len;
 50				skb_queue_tail(&ax25->frag_queue, skb);
 51
 52				/* Last fragment received ? */
 53				if (ax25->fragno == 0) {
 54					skbn = alloc_skb(AX25_MAX_HEADER_LEN +
 55							 ax25->fraglen,
 56							 GFP_ATOMIC);
 57					if (!skbn) {
 58						skb_queue_purge(&ax25->frag_queue);
 59						return 1;
 60					}
 61
 62					skb_reserve(skbn, AX25_MAX_HEADER_LEN);
 63
 64					skbn->dev   = ax25->ax25_dev->dev;
 65					skb_reset_network_header(skbn);
 66					skb_reset_transport_header(skbn);
 67
 68					/* Copy data from the fragments */
 69					while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
 70						skb_copy_from_linear_data(skbo,
 71							  skb_put(skbn, skbo->len),
 72									  skbo->len);
 73						kfree_skb(skbo);
 74					}
 75
 76					ax25->fraglen = 0;
 77
 78					if (ax25_rx_iframe(ax25, skbn) == 0)
 79						kfree_skb(skbn);
 80				}
 81
 82				return 1;
 83			}
 84		}
 85	} else {
 86		/* First fragment received */
 87		if (*skb->data & AX25_SEG_FIRST) {
 88			skb_queue_purge(&ax25->frag_queue);
 89			ax25->fragno = *skb->data & AX25_SEG_REM;
 90			skb_pull(skb, 1);		/* skip fragno */
 91			ax25->fraglen = skb->len;
 92			skb_queue_tail(&ax25->frag_queue, skb);
 93			return 1;
 94		}
 95	}
 96
 97	return 0;
 98}
 99
100/*
101 *	This is where all valid I frames are sent to, to be dispatched to
102 *	whichever protocol requires them.
103 */
104int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
105{
106	int (*func)(struct sk_buff *, ax25_cb *);
107	unsigned char pid;
108	int queued = 0;
109
110	if (skb == NULL) return 0;
111
112	ax25_start_idletimer(ax25);
113
114	pid = *skb->data;
115
116	if (pid == AX25_P_IP) {
117		/* working around a TCP bug to keep additional listeners
118		 * happy. TCP re-uses the buffer and destroys the original
119		 * content.
120		 */
121		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
122		if (skbn != NULL) {
123			kfree_skb(skb);
124			skb = skbn;
125		}
126
127		skb_pull(skb, 1);	/* Remove PID */
128		skb->mac_header = skb->network_header;
129		skb_reset_network_header(skb);
130		skb->dev      = ax25->ax25_dev->dev;
131		skb->pkt_type = PACKET_HOST;
132		skb->protocol = htons(ETH_P_IP);
133		netif_rx(skb);
134		return 1;
135	}
136	if (pid == AX25_P_SEGMENT) {
137		skb_pull(skb, 1);	/* Remove PID */
138		return ax25_rx_fragment(ax25, skb);
139	}
140
141	if ((func = ax25_protocol_function(pid)) != NULL) {
142		skb_pull(skb, 1);	/* Remove PID */
143		return (*func)(skb, ax25);
144	}
145
146	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
147		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
148		    ax25->pidincl) {
149			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
150				queued = 1;
151			else
152				ax25->condition |= AX25_COND_OWN_RX_BUSY;
153		}
154	}
155
156	return queued;
157}
158
159/*
160 *	Higher level upcall for a LAPB frame
161 */
162static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
163{
164	int queued = 0;
165
166	if (ax25->state == AX25_STATE_0)
167		return 0;
168
169	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
170	case AX25_PROTO_STD_SIMPLEX:
171	case AX25_PROTO_STD_DUPLEX:
172		queued = ax25_std_frame_in(ax25, skb, type);
173		break;
174
175#ifdef CONFIG_AX25_DAMA_SLAVE
176	case AX25_PROTO_DAMA_SLAVE:
177		if (dama || ax25->ax25_dev->dama.slave)
178			queued = ax25_ds_frame_in(ax25, skb, type);
179		else
180			queued = ax25_std_frame_in(ax25, skb, type);
181		break;
182#endif
183	}
184
185	return queued;
186}
187
188static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
189	ax25_address *dev_addr, struct packet_type *ptype)
190{
191	ax25_address src, dest, *next_digi = NULL;
192	int type = 0, mine = 0, dama;
193	struct sock *make, *sk;
194	ax25_digi dp, reverse_dp;
195	ax25_cb *ax25;
196	ax25_dev *ax25_dev;
197
198	/*
199	 *	Process the AX.25/LAPB frame.
200	 */
201
202	skb_reset_transport_header(skb);
203
204	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
205		goto free;
206
207	/*
208	 *	Parse the address header.
209	 */
210
211	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
212		goto free;
213
214	/*
215	 *	Ours perhaps ?
216	 */
217	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
218		next_digi = &dp.calls[dp.lastrepeat + 1];
219
220	/*
221	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
222	 */
223	skb_pull(skb, ax25_addr_size(&dp));
224
225	/* For our port addresses ? */
226	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
227		mine = 1;
228
229	/* Also match on any registered callsign from L3/4 */
230	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
231		mine = 1;
232
233	/* UI frame - bypass LAPB processing */
234	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
235		skb_set_transport_header(skb, 2); /* skip control and pid */
236
237		ax25_send_to_raw(&dest, skb, skb->data[1]);
238
239		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
240			goto free;
241
242		/* Now we are pointing at the pid byte */
243		switch (skb->data[1]) {
244		case AX25_P_IP:
245			skb_pull(skb,2);		/* drop PID/CTRL */
246			skb_reset_transport_header(skb);
247			skb_reset_network_header(skb);
248			skb->dev      = dev;
249			skb->pkt_type = PACKET_HOST;
250			skb->protocol = htons(ETH_P_IP);
251			netif_rx(skb);
252			break;
253
254		case AX25_P_ARP:
255			skb_pull(skb,2);
256			skb_reset_transport_header(skb);
257			skb_reset_network_header(skb);
258			skb->dev      = dev;
259			skb->pkt_type = PACKET_HOST;
260			skb->protocol = htons(ETH_P_ARP);
261			netif_rx(skb);
262			break;
263		case AX25_P_TEXT:
264			/* Now find a suitable dgram socket */
265			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
266			if (sk != NULL) {
267				bh_lock_sock(sk);
268				if (atomic_read(&sk->sk_rmem_alloc) >=
269				    sk->sk_rcvbuf) {
270					kfree_skb(skb);
271				} else {
272					/*
273					 *	Remove the control and PID.
274					 */
275					skb_pull(skb, 2);
276					if (sock_queue_rcv_skb(sk, skb) != 0)
277						kfree_skb(skb);
278				}
279				bh_unlock_sock(sk);
280				sock_put(sk);
281			} else {
282				kfree_skb(skb);
283			}
284			break;
285
286		default:
287			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
288			break;
289		}
290
291		return 0;
292	}
293
294	/*
295	 *	Is connected mode supported on this device ?
296	 *	If not, should we DM the incoming frame (except DMs) or
297	 *	silently ignore them. For now we stay quiet.
298	 */
299	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
300		goto free;
301
302	/* LAPB */
303
304	/* AX.25 state 1-4 */
305
306	ax25_digi_invert(&dp, &reverse_dp);
307
308	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
309		/*
310		 *	Process the frame. If it is queued up internally it
311		 *	returns one otherwise we free it immediately. This
312		 *	routine itself wakes the user context layers so we do
313		 *	no further work
314		 */
315		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
316			kfree_skb(skb);
317
318		ax25_cb_put(ax25);
319		return 0;
320	}
321
322	/* AX.25 state 0 (disconnected) */
323
324	/* a) received not a SABM(E) */
325
326	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
327	    (*skb->data & ~AX25_PF) != AX25_SABME) {
328		/*
329		 *	Never reply to a DM. Also ignore any connects for
330		 *	addresses that are not our interfaces and not a socket.
331		 */
332		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
333			ax25_return_dm(dev, &src, &dest, &dp);
334
335		goto free;
336	}
337
338	/* b) received SABM(E) */
339
340	if (dp.lastrepeat + 1 == dp.ndigi)
341		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
342	else
343		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
344
345	if (sk != NULL) {
346		bh_lock_sock(sk);
347		if (sk_acceptq_is_full(sk) ||
348		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
349			if (mine)
350				ax25_return_dm(dev, &src, &dest, &dp);
351			kfree_skb(skb);
352			bh_unlock_sock(sk);
353			sock_put(sk);
354
355			return 0;
356		}
357
358		ax25 = ax25_sk(make);
359		skb_set_owner_r(skb, make);
360		skb_queue_head(&sk->sk_receive_queue, skb);
361
362		make->sk_state = TCP_ESTABLISHED;
363
364		sk->sk_ack_backlog++;
365		bh_unlock_sock(sk);
366	} else {
367		if (!mine)
368			goto free;
369
370		if ((ax25 = ax25_create_cb()) == NULL) {
371			ax25_return_dm(dev, &src, &dest, &dp);
372			goto free;
373		}
374
375		ax25_fillin_cb(ax25, ax25_dev);
376	}
377
378	ax25->source_addr = dest;
379	ax25->dest_addr   = src;
380
381	/*
382	 *	Sort out any digipeated paths.
383	 */
384	if (dp.ndigi && !ax25->digipeat &&
385	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
386		kfree_skb(skb);
387		ax25_destroy_socket(ax25);
388		if (sk)
389			sock_put(sk);
390		return 0;
391	}
392
393	if (dp.ndigi == 0) {
394		kfree(ax25->digipeat);
395		ax25->digipeat = NULL;
396	} else {
397		/* Reverse the source SABM's path */
398		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
399	}
400
401	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
402		ax25->modulus = AX25_EMODULUS;
403		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
404	} else {
405		ax25->modulus = AX25_MODULUS;
406		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
407	}
408
409	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
410
411#ifdef CONFIG_AX25_DAMA_SLAVE
412	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
413		ax25_dama_on(ax25);
414#endif
415
416	ax25->state = AX25_STATE_3;
417
418	ax25_cb_add(ax25);
419
420	ax25_start_heartbeat(ax25);
421	ax25_start_t3timer(ax25);
422	ax25_start_idletimer(ax25);
423
424	if (sk) {
425		if (!sock_flag(sk, SOCK_DEAD))
426			sk->sk_data_ready(sk, skb->len);
427		sock_put(sk);
428	} else {
429free:
430		kfree_skb(skb);
431	}
432	return 0;
433}
434
435/*
436 *	Receive an AX.25 frame via a SLIP interface.
437 */
438int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
439		  struct packet_type *ptype, struct net_device *orig_dev)
440{
441	skb_orphan(skb);
442
443	if (!net_eq(dev_net(dev), &init_net)) {
444		kfree_skb(skb);
445		return 0;
446	}
447
448	if ((*skb->data & 0x0F) != 0) {
449		kfree_skb(skb);	/* Not a KISS data frame */
450		return 0;
451	}
452
453	skb_pull(skb, AX25_KISS_HEADER_LEN);	/* Remove the KISS byte */
454
455	return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
456}