Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License as published by
  4 * the Free Software Foundation; either version 2 of the License, or
  5 * (at your option) any later version.
  6 *
  7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
  8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
  9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
 10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
 11 */
 12#include <linux/errno.h>
 13#include <linux/types.h>
 14#include <linux/socket.h>
 15#include <linux/in.h>
 16#include <linux/kernel.h>
 17#include <linux/timer.h>
 18#include <linux/string.h>
 19#include <linux/sockios.h>
 20#include <linux/net.h>
 21#include <linux/slab.h>
 22#include <net/ax25.h>
 23#include <linux/inet.h>
 24#include <linux/netdevice.h>
 25#include <linux/skbuff.h>
 26#include <linux/netfilter.h>
 27#include <net/sock.h>
 28#include <net/tcp_states.h>
 29#include <asm/uaccess.h>
 30#include <linux/fcntl.h>
 31#include <linux/mm.h>
 32#include <linux/interrupt.h>
 33
 34/*
 35 *	Given a fragment, queue it on the fragment queue and if the fragment
 36 *	is complete, send it back to ax25_rx_iframe.
 37 */
 38static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
 39{
 40	struct sk_buff *skbn, *skbo;
 41
 42	if (ax25->fragno != 0) {
 43		if (!(*skb->data & AX25_SEG_FIRST)) {
 44			if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
 45				/* Enqueue fragment */
 46				ax25->fragno = *skb->data & AX25_SEG_REM;
 47				skb_pull(skb, 1);	/* skip fragno */
 48				ax25->fraglen += skb->len;
 49				skb_queue_tail(&ax25->frag_queue, skb);
 50
 51				/* Last fragment received ? */
 52				if (ax25->fragno == 0) {
 53					skbn = alloc_skb(AX25_MAX_HEADER_LEN +
 54							 ax25->fraglen,
 55							 GFP_ATOMIC);
 56					if (!skbn) {
 57						skb_queue_purge(&ax25->frag_queue);
 58						return 1;
 59					}
 60
 61					skb_reserve(skbn, AX25_MAX_HEADER_LEN);
 62
 63					skbn->dev   = ax25->ax25_dev->dev;
 64					skb_reset_network_header(skbn);
 65					skb_reset_transport_header(skbn);
 66
 67					/* Copy data from the fragments */
 68					while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
 69						skb_copy_from_linear_data(skbo,
 70							  skb_put(skbn, skbo->len),
 71									  skbo->len);
 72						kfree_skb(skbo);
 73					}
 74
 75					ax25->fraglen = 0;
 76
 77					if (ax25_rx_iframe(ax25, skbn) == 0)
 78						kfree_skb(skbn);
 79				}
 80
 81				return 1;
 82			}
 83		}
 84	} else {
 85		/* First fragment received */
 86		if (*skb->data & AX25_SEG_FIRST) {
 87			skb_queue_purge(&ax25->frag_queue);
 88			ax25->fragno = *skb->data & AX25_SEG_REM;
 89			skb_pull(skb, 1);		/* skip fragno */
 90			ax25->fraglen = skb->len;
 91			skb_queue_tail(&ax25->frag_queue, skb);
 92			return 1;
 93		}
 94	}
 95
 96	return 0;
 97}
 98
 99/*
100 *	This is where all valid I frames are sent to, to be dispatched to
101 *	whichever protocol requires them.
102 */
103int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
104{
105	int (*func)(struct sk_buff *, ax25_cb *);
106	unsigned char pid;
107	int queued = 0;
108
109	if (skb == NULL) return 0;
110
111	ax25_start_idletimer(ax25);
112
113	pid = *skb->data;
114
115	if (pid == AX25_P_IP) {
116		/* working around a TCP bug to keep additional listeners
117		 * happy. TCP re-uses the buffer and destroys the original
118		 * content.
119		 */
120		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
121		if (skbn != NULL) {
122			kfree_skb(skb);
123			skb = skbn;
124		}
125
126		skb_pull(skb, 1);	/* Remove PID */
127		skb->mac_header = skb->network_header;
128		skb_reset_network_header(skb);
129		skb->dev      = ax25->ax25_dev->dev;
130		skb->pkt_type = PACKET_HOST;
131		skb->protocol = htons(ETH_P_IP);
132		netif_rx(skb);
133		return 1;
134	}
135	if (pid == AX25_P_SEGMENT) {
136		skb_pull(skb, 1);	/* Remove PID */
137		return ax25_rx_fragment(ax25, skb);
138	}
139
140	if ((func = ax25_protocol_function(pid)) != NULL) {
141		skb_pull(skb, 1);	/* Remove PID */
142		return (*func)(skb, ax25);
143	}
144
145	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
146		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
147		    ax25->pidincl) {
148			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
149				queued = 1;
150			else
151				ax25->condition |= AX25_COND_OWN_RX_BUSY;
152		}
153	}
154
155	return queued;
156}
157
158/*
159 *	Higher level upcall for a LAPB frame
160 */
161static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
162{
163	int queued = 0;
164
165	if (ax25->state == AX25_STATE_0)
166		return 0;
167
168	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
169	case AX25_PROTO_STD_SIMPLEX:
170	case AX25_PROTO_STD_DUPLEX:
171		queued = ax25_std_frame_in(ax25, skb, type);
172		break;
173
174#ifdef CONFIG_AX25_DAMA_SLAVE
175	case AX25_PROTO_DAMA_SLAVE:
176		if (dama || ax25->ax25_dev->dama.slave)
177			queued = ax25_ds_frame_in(ax25, skb, type);
178		else
179			queued = ax25_std_frame_in(ax25, skb, type);
180		break;
181#endif
182	}
183
184	return queued;
185}
186
187static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
188	ax25_address *dev_addr, struct packet_type *ptype)
189{
190	ax25_address src, dest, *next_digi = NULL;
191	int type = 0, mine = 0, dama;
192	struct sock *make, *sk;
193	ax25_digi dp, reverse_dp;
194	ax25_cb *ax25;
195	ax25_dev *ax25_dev;
196
197	/*
198	 *	Process the AX.25/LAPB frame.
199	 */
200
201	skb_reset_transport_header(skb);
202
203	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
204		goto free;
205
206	/*
207	 *	Parse the address header.
208	 */
209
210	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
211		goto free;
212
213	/*
214	 *	Ours perhaps ?
215	 */
216	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
217		next_digi = &dp.calls[dp.lastrepeat + 1];
218
219	/*
220	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
221	 */
222	skb_pull(skb, ax25_addr_size(&dp));
223
224	/* For our port addresses ? */
225	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
226		mine = 1;
227
228	/* Also match on any registered callsign from L3/4 */
229	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
230		mine = 1;
231
232	/* UI frame - bypass LAPB processing */
233	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
234		skb_set_transport_header(skb, 2); /* skip control and pid */
235
236		ax25_send_to_raw(&dest, skb, skb->data[1]);
237
238		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
239			goto free;
240
241		/* Now we are pointing at the pid byte */
242		switch (skb->data[1]) {
243		case AX25_P_IP:
244			skb_pull(skb,2);		/* drop PID/CTRL */
245			skb_reset_transport_header(skb);
246			skb_reset_network_header(skb);
247			skb->dev      = dev;
248			skb->pkt_type = PACKET_HOST;
249			skb->protocol = htons(ETH_P_IP);
250			netif_rx(skb);
251			break;
252
253		case AX25_P_ARP:
254			skb_pull(skb,2);
255			skb_reset_transport_header(skb);
256			skb_reset_network_header(skb);
257			skb->dev      = dev;
258			skb->pkt_type = PACKET_HOST;
259			skb->protocol = htons(ETH_P_ARP);
260			netif_rx(skb);
261			break;
262		case AX25_P_TEXT:
263			/* Now find a suitable dgram socket */
264			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
265			if (sk != NULL) {
266				bh_lock_sock(sk);
267				if (atomic_read(&sk->sk_rmem_alloc) >=
268				    sk->sk_rcvbuf) {
269					kfree_skb(skb);
270				} else {
271					/*
272					 *	Remove the control and PID.
273					 */
274					skb_pull(skb, 2);
275					if (sock_queue_rcv_skb(sk, skb) != 0)
276						kfree_skb(skb);
277				}
278				bh_unlock_sock(sk);
279				sock_put(sk);
280			} else {
281				kfree_skb(skb);
282			}
283			break;
284
285		default:
286			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
287			break;
288		}
289
290		return 0;
291	}
292
293	/*
294	 *	Is connected mode supported on this device ?
295	 *	If not, should we DM the incoming frame (except DMs) or
296	 *	silently ignore them. For now we stay quiet.
297	 */
298	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
299		goto free;
300
301	/* LAPB */
302
303	/* AX.25 state 1-4 */
304
305	ax25_digi_invert(&dp, &reverse_dp);
306
307	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
308		/*
309		 *	Process the frame. If it is queued up internally it
310		 *	returns one otherwise we free it immediately. This
311		 *	routine itself wakes the user context layers so we do
312		 *	no further work
313		 */
314		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
315			kfree_skb(skb);
316
317		ax25_cb_put(ax25);
318		return 0;
319	}
320
321	/* AX.25 state 0 (disconnected) */
322
323	/* a) received not a SABM(E) */
324
325	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
326	    (*skb->data & ~AX25_PF) != AX25_SABME) {
327		/*
328		 *	Never reply to a DM. Also ignore any connects for
329		 *	addresses that are not our interfaces and not a socket.
330		 */
331		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
332			ax25_return_dm(dev, &src, &dest, &dp);
333
334		goto free;
335	}
336
337	/* b) received SABM(E) */
338
339	if (dp.lastrepeat + 1 == dp.ndigi)
340		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
341	else
342		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
343
344	if (sk != NULL) {
345		bh_lock_sock(sk);
346		if (sk_acceptq_is_full(sk) ||
347		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
348			if (mine)
349				ax25_return_dm(dev, &src, &dest, &dp);
350			kfree_skb(skb);
351			bh_unlock_sock(sk);
352			sock_put(sk);
353
354			return 0;
355		}
356
357		ax25 = ax25_sk(make);
358		skb_set_owner_r(skb, make);
359		skb_queue_head(&sk->sk_receive_queue, skb);
360
361		make->sk_state = TCP_ESTABLISHED;
362
363		sk->sk_ack_backlog++;
364		bh_unlock_sock(sk);
365	} else {
366		if (!mine)
367			goto free;
368
369		if ((ax25 = ax25_create_cb()) == NULL) {
370			ax25_return_dm(dev, &src, &dest, &dp);
371			goto free;
372		}
373
374		ax25_fillin_cb(ax25, ax25_dev);
375	}
376
377	ax25->source_addr = dest;
378	ax25->dest_addr   = src;
379
380	/*
381	 *	Sort out any digipeated paths.
382	 */
383	if (dp.ndigi && !ax25->digipeat &&
384	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
385		kfree_skb(skb);
386		ax25_destroy_socket(ax25);
387		if (sk)
388			sock_put(sk);
389		return 0;
390	}
391
392	if (dp.ndigi == 0) {
393		kfree(ax25->digipeat);
394		ax25->digipeat = NULL;
395	} else {
396		/* Reverse the source SABM's path */
397		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
398	}
399
400	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
401		ax25->modulus = AX25_EMODULUS;
402		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
403	} else {
404		ax25->modulus = AX25_MODULUS;
405		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
406	}
407
408	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
409
410#ifdef CONFIG_AX25_DAMA_SLAVE
411	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
412		ax25_dama_on(ax25);
413#endif
414
415	ax25->state = AX25_STATE_3;
416
417	ax25_cb_add(ax25);
418
419	ax25_start_heartbeat(ax25);
420	ax25_start_t3timer(ax25);
421	ax25_start_idletimer(ax25);
422
423	if (sk) {
424		if (!sock_flag(sk, SOCK_DEAD))
425			sk->sk_data_ready(sk);
426		sock_put(sk);
427	} else {
428free:
429		kfree_skb(skb);
430	}
431	return 0;
432}
433
434/*
435 *	Receive an AX.25 frame via a SLIP interface.
436 */
437int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
438		  struct packet_type *ptype, struct net_device *orig_dev)
439{
440	skb_orphan(skb);
441
442	if (!net_eq(dev_net(dev), &init_net)) {
443		kfree_skb(skb);
444		return 0;
445	}
446
447	if ((*skb->data & 0x0F) != 0) {
448		kfree_skb(skb);	/* Not a KISS data frame */
449		return 0;
450	}
451
452	skb_pull(skb, AX25_KISS_HEADER_LEN);	/* Remove the KISS byte */
453
454	return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
455}
v4.17
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License as published by
  4 * the Free Software Foundation; either version 2 of the License, or
  5 * (at your option) any later version.
  6 *
  7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
  8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
  9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
 10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
 11 */
 12#include <linux/errno.h>
 13#include <linux/types.h>
 14#include <linux/socket.h>
 15#include <linux/in.h>
 16#include <linux/kernel.h>
 17#include <linux/timer.h>
 18#include <linux/string.h>
 19#include <linux/sockios.h>
 20#include <linux/net.h>
 21#include <linux/slab.h>
 22#include <net/ax25.h>
 23#include <linux/inet.h>
 24#include <linux/netdevice.h>
 25#include <linux/skbuff.h>
 
 26#include <net/sock.h>
 27#include <net/tcp_states.h>
 28#include <linux/uaccess.h>
 29#include <linux/fcntl.h>
 30#include <linux/mm.h>
 31#include <linux/interrupt.h>
 32
 33/*
 34 *	Given a fragment, queue it on the fragment queue and if the fragment
 35 *	is complete, send it back to ax25_rx_iframe.
 36 */
 37static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
 38{
 39	struct sk_buff *skbn, *skbo;
 40
 41	if (ax25->fragno != 0) {
 42		if (!(*skb->data & AX25_SEG_FIRST)) {
 43			if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
 44				/* Enqueue fragment */
 45				ax25->fragno = *skb->data & AX25_SEG_REM;
 46				skb_pull(skb, 1);	/* skip fragno */
 47				ax25->fraglen += skb->len;
 48				skb_queue_tail(&ax25->frag_queue, skb);
 49
 50				/* Last fragment received ? */
 51				if (ax25->fragno == 0) {
 52					skbn = alloc_skb(AX25_MAX_HEADER_LEN +
 53							 ax25->fraglen,
 54							 GFP_ATOMIC);
 55					if (!skbn) {
 56						skb_queue_purge(&ax25->frag_queue);
 57						return 1;
 58					}
 59
 60					skb_reserve(skbn, AX25_MAX_HEADER_LEN);
 61
 62					skbn->dev   = ax25->ax25_dev->dev;
 63					skb_reset_network_header(skbn);
 64					skb_reset_transport_header(skbn);
 65
 66					/* Copy data from the fragments */
 67					while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
 68						skb_copy_from_linear_data(skbo,
 69							  skb_put(skbn, skbo->len),
 70									  skbo->len);
 71						kfree_skb(skbo);
 72					}
 73
 74					ax25->fraglen = 0;
 75
 76					if (ax25_rx_iframe(ax25, skbn) == 0)
 77						kfree_skb(skbn);
 78				}
 79
 80				return 1;
 81			}
 82		}
 83	} else {
 84		/* First fragment received */
 85		if (*skb->data & AX25_SEG_FIRST) {
 86			skb_queue_purge(&ax25->frag_queue);
 87			ax25->fragno = *skb->data & AX25_SEG_REM;
 88			skb_pull(skb, 1);		/* skip fragno */
 89			ax25->fraglen = skb->len;
 90			skb_queue_tail(&ax25->frag_queue, skb);
 91			return 1;
 92		}
 93	}
 94
 95	return 0;
 96}
 97
 98/*
 99 *	This is where all valid I frames are sent to, to be dispatched to
100 *	whichever protocol requires them.
101 */
102int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
103{
104	int (*func)(struct sk_buff *, ax25_cb *);
105	unsigned char pid;
106	int queued = 0;
107
108	if (skb == NULL) return 0;
109
110	ax25_start_idletimer(ax25);
111
112	pid = *skb->data;
113
114	if (pid == AX25_P_IP) {
115		/* working around a TCP bug to keep additional listeners
116		 * happy. TCP re-uses the buffer and destroys the original
117		 * content.
118		 */
119		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
120		if (skbn != NULL) {
121			kfree_skb(skb);
122			skb = skbn;
123		}
124
125		skb_pull(skb, 1);	/* Remove PID */
126		skb->mac_header = skb->network_header;
127		skb_reset_network_header(skb);
128		skb->dev      = ax25->ax25_dev->dev;
129		skb->pkt_type = PACKET_HOST;
130		skb->protocol = htons(ETH_P_IP);
131		netif_rx(skb);
132		return 1;
133	}
134	if (pid == AX25_P_SEGMENT) {
135		skb_pull(skb, 1);	/* Remove PID */
136		return ax25_rx_fragment(ax25, skb);
137	}
138
139	if ((func = ax25_protocol_function(pid)) != NULL) {
140		skb_pull(skb, 1);	/* Remove PID */
141		return (*func)(skb, ax25);
142	}
143
144	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
145		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
146		    ax25->pidincl) {
147			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
148				queued = 1;
149			else
150				ax25->condition |= AX25_COND_OWN_RX_BUSY;
151		}
152	}
153
154	return queued;
155}
156
157/*
158 *	Higher level upcall for a LAPB frame
159 */
160static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
161{
162	int queued = 0;
163
164	if (ax25->state == AX25_STATE_0)
165		return 0;
166
167	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
168	case AX25_PROTO_STD_SIMPLEX:
169	case AX25_PROTO_STD_DUPLEX:
170		queued = ax25_std_frame_in(ax25, skb, type);
171		break;
172
173#ifdef CONFIG_AX25_DAMA_SLAVE
174	case AX25_PROTO_DAMA_SLAVE:
175		if (dama || ax25->ax25_dev->dama.slave)
176			queued = ax25_ds_frame_in(ax25, skb, type);
177		else
178			queued = ax25_std_frame_in(ax25, skb, type);
179		break;
180#endif
181	}
182
183	return queued;
184}
185
186static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
187	ax25_address *dev_addr, struct packet_type *ptype)
188{
189	ax25_address src, dest, *next_digi = NULL;
190	int type = 0, mine = 0, dama;
191	struct sock *make, *sk;
192	ax25_digi dp, reverse_dp;
193	ax25_cb *ax25;
194	ax25_dev *ax25_dev;
195
196	/*
197	 *	Process the AX.25/LAPB frame.
198	 */
199
200	skb_reset_transport_header(skb);
201
202	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
203		goto free;
204
205	/*
206	 *	Parse the address header.
207	 */
208
209	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
210		goto free;
211
212	/*
213	 *	Ours perhaps ?
214	 */
215	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
216		next_digi = &dp.calls[dp.lastrepeat + 1];
217
218	/*
219	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
220	 */
221	skb_pull(skb, ax25_addr_size(&dp));
222
223	/* For our port addresses ? */
224	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
225		mine = 1;
226
227	/* Also match on any registered callsign from L3/4 */
228	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
229		mine = 1;
230
231	/* UI frame - bypass LAPB processing */
232	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
233		skb_set_transport_header(skb, 2); /* skip control and pid */
234
235		ax25_send_to_raw(&dest, skb, skb->data[1]);
236
237		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
238			goto free;
239
240		/* Now we are pointing at the pid byte */
241		switch (skb->data[1]) {
242		case AX25_P_IP:
243			skb_pull(skb,2);		/* drop PID/CTRL */
244			skb_reset_transport_header(skb);
245			skb_reset_network_header(skb);
246			skb->dev      = dev;
247			skb->pkt_type = PACKET_HOST;
248			skb->protocol = htons(ETH_P_IP);
249			netif_rx(skb);
250			break;
251
252		case AX25_P_ARP:
253			skb_pull(skb,2);
254			skb_reset_transport_header(skb);
255			skb_reset_network_header(skb);
256			skb->dev      = dev;
257			skb->pkt_type = PACKET_HOST;
258			skb->protocol = htons(ETH_P_ARP);
259			netif_rx(skb);
260			break;
261		case AX25_P_TEXT:
262			/* Now find a suitable dgram socket */
263			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
264			if (sk != NULL) {
265				bh_lock_sock(sk);
266				if (atomic_read(&sk->sk_rmem_alloc) >=
267				    sk->sk_rcvbuf) {
268					kfree_skb(skb);
269				} else {
270					/*
271					 *	Remove the control and PID.
272					 */
273					skb_pull(skb, 2);
274					if (sock_queue_rcv_skb(sk, skb) != 0)
275						kfree_skb(skb);
276				}
277				bh_unlock_sock(sk);
278				sock_put(sk);
279			} else {
280				kfree_skb(skb);
281			}
282			break;
283
284		default:
285			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
286			break;
287		}
288
289		return 0;
290	}
291
292	/*
293	 *	Is connected mode supported on this device ?
294	 *	If not, should we DM the incoming frame (except DMs) or
295	 *	silently ignore them. For now we stay quiet.
296	 */
297	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
298		goto free;
299
300	/* LAPB */
301
302	/* AX.25 state 1-4 */
303
304	ax25_digi_invert(&dp, &reverse_dp);
305
306	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
307		/*
308		 *	Process the frame. If it is queued up internally it
309		 *	returns one otherwise we free it immediately. This
310		 *	routine itself wakes the user context layers so we do
311		 *	no further work
312		 */
313		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
314			kfree_skb(skb);
315
316		ax25_cb_put(ax25);
317		return 0;
318	}
319
320	/* AX.25 state 0 (disconnected) */
321
322	/* a) received not a SABM(E) */
323
324	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
325	    (*skb->data & ~AX25_PF) != AX25_SABME) {
326		/*
327		 *	Never reply to a DM. Also ignore any connects for
328		 *	addresses that are not our interfaces and not a socket.
329		 */
330		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
331			ax25_return_dm(dev, &src, &dest, &dp);
332
333		goto free;
334	}
335
336	/* b) received SABM(E) */
337
338	if (dp.lastrepeat + 1 == dp.ndigi)
339		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
340	else
341		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
342
343	if (sk != NULL) {
344		bh_lock_sock(sk);
345		if (sk_acceptq_is_full(sk) ||
346		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
347			if (mine)
348				ax25_return_dm(dev, &src, &dest, &dp);
349			kfree_skb(skb);
350			bh_unlock_sock(sk);
351			sock_put(sk);
352
353			return 0;
354		}
355
356		ax25 = sk_to_ax25(make);
357		skb_set_owner_r(skb, make);
358		skb_queue_head(&sk->sk_receive_queue, skb);
359
360		make->sk_state = TCP_ESTABLISHED;
361
362		sk->sk_ack_backlog++;
363		bh_unlock_sock(sk);
364	} else {
365		if (!mine)
366			goto free;
367
368		if ((ax25 = ax25_create_cb()) == NULL) {
369			ax25_return_dm(dev, &src, &dest, &dp);
370			goto free;
371		}
372
373		ax25_fillin_cb(ax25, ax25_dev);
374	}
375
376	ax25->source_addr = dest;
377	ax25->dest_addr   = src;
378
379	/*
380	 *	Sort out any digipeated paths.
381	 */
382	if (dp.ndigi && !ax25->digipeat &&
383	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
384		kfree_skb(skb);
385		ax25_destroy_socket(ax25);
386		if (sk)
387			sock_put(sk);
388		return 0;
389	}
390
391	if (dp.ndigi == 0) {
392		kfree(ax25->digipeat);
393		ax25->digipeat = NULL;
394	} else {
395		/* Reverse the source SABM's path */
396		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
397	}
398
399	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
400		ax25->modulus = AX25_EMODULUS;
401		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
402	} else {
403		ax25->modulus = AX25_MODULUS;
404		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
405	}
406
407	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
408
409#ifdef CONFIG_AX25_DAMA_SLAVE
410	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
411		ax25_dama_on(ax25);
412#endif
413
414	ax25->state = AX25_STATE_3;
415
416	ax25_cb_add(ax25);
417
418	ax25_start_heartbeat(ax25);
419	ax25_start_t3timer(ax25);
420	ax25_start_idletimer(ax25);
421
422	if (sk) {
423		if (!sock_flag(sk, SOCK_DEAD))
424			sk->sk_data_ready(sk);
425		sock_put(sk);
426	} else {
427free:
428		kfree_skb(skb);
429	}
430	return 0;
431}
432
433/*
434 *	Receive an AX.25 frame via a SLIP interface.
435 */
436int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
437		  struct packet_type *ptype, struct net_device *orig_dev)
438{
439	skb_orphan(skb);
440
441	if (!net_eq(dev_net(dev), &init_net)) {
442		kfree_skb(skb);
443		return 0;
444	}
445
446	if ((*skb->data & 0x0F) != 0) {
447		kfree_skb(skb);	/* Not a KISS data frame */
448		return 0;
449	}
450
451	skb_pull(skb, AX25_KISS_HEADER_LEN);	/* Remove the KISS byte */
452
453	return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
454}