Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *	X.25 Packet Layer release 002
  3 *
  4 *	This is ALPHA test software. This code may break your machine,
  5 *	randomly fail to work with new releases, misbehave and/or generally
  6 *	screw up. It might even work.
  7 *
  8 *	This code REQUIRES 2.1.15 or higher
  9 *
 10 *	This module:
 11 *		This module is free software; you can redistribute it and/or
 12 *		modify it under the terms of the GNU General Public License
 13 *		as published by the Free Software Foundation; either version
 14 *		2 of the License, or (at your option) any later version.
 15 *
 16 *	History
 17 *	X.25 001	Jonathan Naylor	  Started coding.
 18 *	X.25 002	Jonathan Naylor	  New timer architecture.
 19 *	mar/20/00	Daniela Squassoni Disabling/enabling of facilities
 20 *					  negotiation.
 21 *	2000-09-04	Henner Eisen	  dev_hold() / dev_put() for x25_neigh.
 22 */
 23
 24#define pr_fmt(fmt) "X25: " fmt
 25
 26#include <linux/kernel.h>
 27#include <linux/jiffies.h>
 28#include <linux/timer.h>
 29#include <linux/slab.h>
 30#include <linux/netdevice.h>
 31#include <linux/skbuff.h>
 32#include <asm/uaccess.h>
 33#include <linux/init.h>
 34#include <net/x25.h>
 35
 36LIST_HEAD(x25_neigh_list);
 37DEFINE_RWLOCK(x25_neigh_list_lock);
 38
 39static void x25_t20timer_expiry(unsigned long);
 40
 41static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
 42static void x25_transmit_restart_request(struct x25_neigh *nb);
 43
 44/*
 45 *	Linux set/reset timer routines
 46 */
 47static inline void x25_start_t20timer(struct x25_neigh *nb)
 48{
 49	mod_timer(&nb->t20timer, jiffies + nb->t20);
 50}
 51
 52static void x25_t20timer_expiry(unsigned long param)
 53{
 54	struct x25_neigh *nb = (struct x25_neigh *)param;
 55
 56	x25_transmit_restart_request(nb);
 57
 58	x25_start_t20timer(nb);
 59}
 60
 61static inline void x25_stop_t20timer(struct x25_neigh *nb)
 62{
 63	del_timer(&nb->t20timer);
 64}
 65
 66static inline int x25_t20timer_pending(struct x25_neigh *nb)
 67{
 68	return timer_pending(&nb->t20timer);
 69}
 70
 71/*
 72 *	This handles all restart and diagnostic frames.
 73 */
 74void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
 75		      unsigned short frametype)
 76{
 77	struct sk_buff *skbn;
 78	int confirm;
 79
 80	switch (frametype) {
 81	case X25_RESTART_REQUEST:
 82		confirm = !x25_t20timer_pending(nb);
 83		x25_stop_t20timer(nb);
 84		nb->state = X25_LINK_STATE_3;
 85		if (confirm)
 86			x25_transmit_restart_confirmation(nb);
 87		break;
 88
 89	case X25_RESTART_CONFIRMATION:
 90		x25_stop_t20timer(nb);
 91		nb->state = X25_LINK_STATE_3;
 92		break;
 93
 94	case X25_DIAGNOSTIC:
 95		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
 96			break;
 97
 98		pr_warn("diagnostic #%d - %02X %02X %02X\n",
 99		       skb->data[3], skb->data[4],
100		       skb->data[5], skb->data[6]);
101		break;
102
103	default:
104		pr_warn("received unknown %02X with LCI 000\n",
105		       frametype);
106		break;
107	}
108
109	if (nb->state == X25_LINK_STATE_3)
110		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
111			x25_send_frame(skbn, nb);
112}
113
114/*
115 *	This routine is called when a Restart Request is needed
116 */
117static void x25_transmit_restart_request(struct x25_neigh *nb)
118{
119	unsigned char *dptr;
120	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
121	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
122
123	if (!skb)
124		return;
125
126	skb_reserve(skb, X25_MAX_L2_LEN);
127
128	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
129
130	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
131	*dptr++ = 0x00;
132	*dptr++ = X25_RESTART_REQUEST;
133	*dptr++ = 0x00;
134	*dptr++ = 0;
135
136	skb->sk = NULL;
137
138	x25_send_frame(skb, nb);
139}
140
141/*
142 * This routine is called when a Restart Confirmation is needed
143 */
144static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
145{
146	unsigned char *dptr;
147	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
148	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
149
150	if (!skb)
151		return;
152
153	skb_reserve(skb, X25_MAX_L2_LEN);
154
155	dptr = skb_put(skb, X25_STD_MIN_LEN);
156
157	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
158	*dptr++ = 0x00;
159	*dptr++ = X25_RESTART_CONFIRMATION;
160
161	skb->sk = NULL;
162
163	x25_send_frame(skb, nb);
164}
165
166/*
167 *	This routine is called when a Clear Request is needed outside of the context
168 *	of a connected socket.
169 */
170void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
171				unsigned char cause)
172{
173	unsigned char *dptr;
174	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
175	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
176
177	if (!skb)
178		return;
179
180	skb_reserve(skb, X25_MAX_L2_LEN);
181
182	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
183
184	*dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
185					 X25_GFI_EXTSEQ :
186					 X25_GFI_STDSEQ);
187	*dptr++ = (lci >> 0) & 0xFF;
188	*dptr++ = X25_CLEAR_REQUEST;
189	*dptr++ = cause;
190	*dptr++ = 0x00;
191
192	skb->sk = NULL;
193
194	x25_send_frame(skb, nb);
195}
196
197void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
198{
199	switch (nb->state) {
200	case X25_LINK_STATE_0:
201		skb_queue_tail(&nb->queue, skb);
202		nb->state = X25_LINK_STATE_1;
203		x25_establish_link(nb);
204		break;
205	case X25_LINK_STATE_1:
206	case X25_LINK_STATE_2:
207		skb_queue_tail(&nb->queue, skb);
208		break;
209	case X25_LINK_STATE_3:
210		x25_send_frame(skb, nb);
211		break;
212	}
213}
214
215/*
216 *	Called when the link layer has become established.
217 */
218void x25_link_established(struct x25_neigh *nb)
219{
220	switch (nb->state) {
221	case X25_LINK_STATE_0:
222		nb->state = X25_LINK_STATE_2;
223		break;
224	case X25_LINK_STATE_1:
225		x25_transmit_restart_request(nb);
226		nb->state = X25_LINK_STATE_2;
227		x25_start_t20timer(nb);
228		break;
229	}
230}
231
232/*
233 *	Called when the link layer has terminated, or an establishment
234 *	request has failed.
235 */
236
237void x25_link_terminated(struct x25_neigh *nb)
238{
239	nb->state = X25_LINK_STATE_0;
240	/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
241	x25_kill_by_neigh(nb);
242}
243
244/*
245 *	Add a new device.
246 */
247void x25_link_device_up(struct net_device *dev)
248{
249	struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
250
251	if (!nb)
252		return;
253
254	skb_queue_head_init(&nb->queue);
255	setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
256
257	dev_hold(dev);
258	nb->dev      = dev;
259	nb->state    = X25_LINK_STATE_0;
260	nb->extended = 0;
261	/*
262	 * Enables negotiation
263	 */
264	nb->global_facil_mask = X25_MASK_REVERSE |
265				       X25_MASK_THROUGHPUT |
266				       X25_MASK_PACKET_SIZE |
267				       X25_MASK_WINDOW_SIZE;
268	nb->t20      = sysctl_x25_restart_request_timeout;
269	atomic_set(&nb->refcnt, 1);
270
271	write_lock_bh(&x25_neigh_list_lock);
272	list_add(&nb->node, &x25_neigh_list);
273	write_unlock_bh(&x25_neigh_list_lock);
274}
275
276/**
277 *	__x25_remove_neigh - remove neighbour from x25_neigh_list
278 *	@nb - neigh to remove
279 *
280 *	Remove neighbour from x25_neigh_list. If it was there.
281 *	Caller must hold x25_neigh_list_lock.
282 */
283static void __x25_remove_neigh(struct x25_neigh *nb)
284{
285	skb_queue_purge(&nb->queue);
286	x25_stop_t20timer(nb);
287
288	if (nb->node.next) {
289		list_del(&nb->node);
290		x25_neigh_put(nb);
291	}
292}
293
294/*
295 *	A device has been removed, remove its links.
296 */
297void x25_link_device_down(struct net_device *dev)
298{
299	struct x25_neigh *nb;
300	struct list_head *entry, *tmp;
301
302	write_lock_bh(&x25_neigh_list_lock);
303
304	list_for_each_safe(entry, tmp, &x25_neigh_list) {
305		nb = list_entry(entry, struct x25_neigh, node);
306
307		if (nb->dev == dev) {
308			__x25_remove_neigh(nb);
309			dev_put(dev);
310		}
311	}
312
313	write_unlock_bh(&x25_neigh_list_lock);
314}
315
316/*
317 *	Given a device, return the neighbour address.
318 */
319struct x25_neigh *x25_get_neigh(struct net_device *dev)
320{
321	struct x25_neigh *nb, *use = NULL;
322	struct list_head *entry;
323
324	read_lock_bh(&x25_neigh_list_lock);
325	list_for_each(entry, &x25_neigh_list) {
326		nb = list_entry(entry, struct x25_neigh, node);
327
328		if (nb->dev == dev) {
329			use = nb;
330			break;
331		}
332	}
333
334	if (use)
335		x25_neigh_hold(use);
336	read_unlock_bh(&x25_neigh_list_lock);
337	return use;
338}
339
340/*
341 *	Handle the ioctls that control the subscription functions.
342 */
343int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
344{
345	struct x25_subscrip_struct x25_subscr;
346	struct x25_neigh *nb;
347	struct net_device *dev;
348	int rc = -EINVAL;
349
350	if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
351		goto out;
352
353	rc = -EFAULT;
354	if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
355		goto out;
356
357	rc = -EINVAL;
358	if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
359		goto out;
360
361	if ((nb = x25_get_neigh(dev)) == NULL)
362		goto out_dev_put;
363
364	dev_put(dev);
365
366	if (cmd == SIOCX25GSUBSCRIP) {
367		read_lock_bh(&x25_neigh_list_lock);
368		x25_subscr.extended	     = nb->extended;
369		x25_subscr.global_facil_mask = nb->global_facil_mask;
370		read_unlock_bh(&x25_neigh_list_lock);
371		rc = copy_to_user(arg, &x25_subscr,
372				  sizeof(x25_subscr)) ? -EFAULT : 0;
373	} else {
374		rc = -EINVAL;
375		if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
376			rc = 0;
377			write_lock_bh(&x25_neigh_list_lock);
378			nb->extended	     = x25_subscr.extended;
379			nb->global_facil_mask = x25_subscr.global_facil_mask;
380			write_unlock_bh(&x25_neigh_list_lock);
381		}
382	}
383	x25_neigh_put(nb);
384out:
385	return rc;
386out_dev_put:
387	dev_put(dev);
388	goto out;
389}
390
391
392/*
393 *	Release all memory associated with X.25 neighbour structures.
394 */
395void __exit x25_link_free(void)
396{
397	struct x25_neigh *nb;
398	struct list_head *entry, *tmp;
399
400	write_lock_bh(&x25_neigh_list_lock);
401
402	list_for_each_safe(entry, tmp, &x25_neigh_list) {
403		struct net_device *dev;
404
405		nb = list_entry(entry, struct x25_neigh, node);
406		dev = nb->dev;
407		__x25_remove_neigh(nb);
408		dev_put(dev);
409	}
410	write_unlock_bh(&x25_neigh_list_lock);
411}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	X.25 Packet Layer release 002
  4 *
  5 *	This is ALPHA test software. This code may break your machine,
  6 *	randomly fail to work with new releases, misbehave and/or generally
  7 *	screw up. It might even work.
  8 *
  9 *	This code REQUIRES 2.1.15 or higher
 10 *
 
 
 
 
 
 
 11 *	History
 12 *	X.25 001	Jonathan Naylor	  Started coding.
 13 *	X.25 002	Jonathan Naylor	  New timer architecture.
 14 *	mar/20/00	Daniela Squassoni Disabling/enabling of facilities
 15 *					  negotiation.
 16 *	2000-09-04	Henner Eisen	  dev_hold() / dev_put() for x25_neigh.
 17 */
 18
 19#define pr_fmt(fmt) "X25: " fmt
 20
 21#include <linux/kernel.h>
 22#include <linux/jiffies.h>
 23#include <linux/timer.h>
 24#include <linux/slab.h>
 25#include <linux/netdevice.h>
 26#include <linux/skbuff.h>
 27#include <linux/uaccess.h>
 28#include <linux/init.h>
 29#include <net/x25.h>
 30
 31LIST_HEAD(x25_neigh_list);
 32DEFINE_RWLOCK(x25_neigh_list_lock);
 33
 34static void x25_t20timer_expiry(struct timer_list *);
 35
 36static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
 37static void x25_transmit_restart_request(struct x25_neigh *nb);
 38
 39/*
 40 *	Linux set/reset timer routines
 41 */
 42static inline void x25_start_t20timer(struct x25_neigh *nb)
 43{
 44	mod_timer(&nb->t20timer, jiffies + nb->t20);
 45}
 46
 47static void x25_t20timer_expiry(struct timer_list *t)
 48{
 49	struct x25_neigh *nb = from_timer(nb, t, t20timer);
 50
 51	x25_transmit_restart_request(nb);
 52
 53	x25_start_t20timer(nb);
 54}
 55
 56static inline void x25_stop_t20timer(struct x25_neigh *nb)
 57{
 58	del_timer(&nb->t20timer);
 59}
 60
 61static inline int x25_t20timer_pending(struct x25_neigh *nb)
 62{
 63	return timer_pending(&nb->t20timer);
 64}
 65
 66/*
 67 *	This handles all restart and diagnostic frames.
 68 */
 69void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
 70		      unsigned short frametype)
 71{
 72	struct sk_buff *skbn;
 73	int confirm;
 74
 75	switch (frametype) {
 76	case X25_RESTART_REQUEST:
 77		confirm = !x25_t20timer_pending(nb);
 78		x25_stop_t20timer(nb);
 79		nb->state = X25_LINK_STATE_3;
 80		if (confirm)
 81			x25_transmit_restart_confirmation(nb);
 82		break;
 83
 84	case X25_RESTART_CONFIRMATION:
 85		x25_stop_t20timer(nb);
 86		nb->state = X25_LINK_STATE_3;
 87		break;
 88
 89	case X25_DIAGNOSTIC:
 90		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
 91			break;
 92
 93		pr_warn("diagnostic #%d - %02X %02X %02X\n",
 94		       skb->data[3], skb->data[4],
 95		       skb->data[5], skb->data[6]);
 96		break;
 97
 98	default:
 99		pr_warn("received unknown %02X with LCI 000\n",
100		       frametype);
101		break;
102	}
103
104	if (nb->state == X25_LINK_STATE_3)
105		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
106			x25_send_frame(skbn, nb);
107}
108
109/*
110 *	This routine is called when a Restart Request is needed
111 */
112static void x25_transmit_restart_request(struct x25_neigh *nb)
113{
114	unsigned char *dptr;
115	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
116	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
117
118	if (!skb)
119		return;
120
121	skb_reserve(skb, X25_MAX_L2_LEN);
122
123	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
124
125	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
126	*dptr++ = 0x00;
127	*dptr++ = X25_RESTART_REQUEST;
128	*dptr++ = 0x00;
129	*dptr++ = 0;
130
131	skb->sk = NULL;
132
133	x25_send_frame(skb, nb);
134}
135
136/*
137 * This routine is called when a Restart Confirmation is needed
138 */
139static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
140{
141	unsigned char *dptr;
142	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
143	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
144
145	if (!skb)
146		return;
147
148	skb_reserve(skb, X25_MAX_L2_LEN);
149
150	dptr = skb_put(skb, X25_STD_MIN_LEN);
151
152	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
153	*dptr++ = 0x00;
154	*dptr++ = X25_RESTART_CONFIRMATION;
155
156	skb->sk = NULL;
157
158	x25_send_frame(skb, nb);
159}
160
161/*
162 *	This routine is called when a Clear Request is needed outside of the context
163 *	of a connected socket.
164 */
165void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
166				unsigned char cause)
167{
168	unsigned char *dptr;
169	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
170	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
171
172	if (!skb)
173		return;
174
175	skb_reserve(skb, X25_MAX_L2_LEN);
176
177	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
178
179	*dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
180					 X25_GFI_EXTSEQ :
181					 X25_GFI_STDSEQ);
182	*dptr++ = (lci >> 0) & 0xFF;
183	*dptr++ = X25_CLEAR_REQUEST;
184	*dptr++ = cause;
185	*dptr++ = 0x00;
186
187	skb->sk = NULL;
188
189	x25_send_frame(skb, nb);
190}
191
192void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
193{
194	switch (nb->state) {
195	case X25_LINK_STATE_0:
196		skb_queue_tail(&nb->queue, skb);
197		nb->state = X25_LINK_STATE_1;
198		x25_establish_link(nb);
199		break;
200	case X25_LINK_STATE_1:
201	case X25_LINK_STATE_2:
202		skb_queue_tail(&nb->queue, skb);
203		break;
204	case X25_LINK_STATE_3:
205		x25_send_frame(skb, nb);
206		break;
207	}
208}
209
210/*
211 *	Called when the link layer has become established.
212 */
213void x25_link_established(struct x25_neigh *nb)
214{
215	switch (nb->state) {
216	case X25_LINK_STATE_0:
217		nb->state = X25_LINK_STATE_2;
218		break;
219	case X25_LINK_STATE_1:
220		x25_transmit_restart_request(nb);
221		nb->state = X25_LINK_STATE_2;
222		x25_start_t20timer(nb);
223		break;
224	}
225}
226
227/*
228 *	Called when the link layer has terminated, or an establishment
229 *	request has failed.
230 */
231
232void x25_link_terminated(struct x25_neigh *nb)
233{
234	nb->state = X25_LINK_STATE_0;
235	/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
236	x25_kill_by_neigh(nb);
237}
238
239/*
240 *	Add a new device.
241 */
242void x25_link_device_up(struct net_device *dev)
243{
244	struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
245
246	if (!nb)
247		return;
248
249	skb_queue_head_init(&nb->queue);
250	timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
251
252	dev_hold(dev);
253	nb->dev      = dev;
254	nb->state    = X25_LINK_STATE_0;
255	nb->extended = 0;
256	/*
257	 * Enables negotiation
258	 */
259	nb->global_facil_mask = X25_MASK_REVERSE |
260				       X25_MASK_THROUGHPUT |
261				       X25_MASK_PACKET_SIZE |
262				       X25_MASK_WINDOW_SIZE;
263	nb->t20      = sysctl_x25_restart_request_timeout;
264	refcount_set(&nb->refcnt, 1);
265
266	write_lock_bh(&x25_neigh_list_lock);
267	list_add(&nb->node, &x25_neigh_list);
268	write_unlock_bh(&x25_neigh_list_lock);
269}
270
271/**
272 *	__x25_remove_neigh - remove neighbour from x25_neigh_list
273 *	@nb - neigh to remove
274 *
275 *	Remove neighbour from x25_neigh_list. If it was there.
276 *	Caller must hold x25_neigh_list_lock.
277 */
278static void __x25_remove_neigh(struct x25_neigh *nb)
279{
280	skb_queue_purge(&nb->queue);
281	x25_stop_t20timer(nb);
282
283	if (nb->node.next) {
284		list_del(&nb->node);
285		x25_neigh_put(nb);
286	}
287}
288
289/*
290 *	A device has been removed, remove its links.
291 */
292void x25_link_device_down(struct net_device *dev)
293{
294	struct x25_neigh *nb;
295	struct list_head *entry, *tmp;
296
297	write_lock_bh(&x25_neigh_list_lock);
298
299	list_for_each_safe(entry, tmp, &x25_neigh_list) {
300		nb = list_entry(entry, struct x25_neigh, node);
301
302		if (nb->dev == dev) {
303			__x25_remove_neigh(nb);
304			dev_put(dev);
305		}
306	}
307
308	write_unlock_bh(&x25_neigh_list_lock);
309}
310
311/*
312 *	Given a device, return the neighbour address.
313 */
314struct x25_neigh *x25_get_neigh(struct net_device *dev)
315{
316	struct x25_neigh *nb, *use = NULL;
317	struct list_head *entry;
318
319	read_lock_bh(&x25_neigh_list_lock);
320	list_for_each(entry, &x25_neigh_list) {
321		nb = list_entry(entry, struct x25_neigh, node);
322
323		if (nb->dev == dev) {
324			use = nb;
325			break;
326		}
327	}
328
329	if (use)
330		x25_neigh_hold(use);
331	read_unlock_bh(&x25_neigh_list_lock);
332	return use;
333}
334
335/*
336 *	Handle the ioctls that control the subscription functions.
337 */
338int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
339{
340	struct x25_subscrip_struct x25_subscr;
341	struct x25_neigh *nb;
342	struct net_device *dev;
343	int rc = -EINVAL;
344
345	if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
346		goto out;
347
348	rc = -EFAULT;
349	if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
350		goto out;
351
352	rc = -EINVAL;
353	if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
354		goto out;
355
356	if ((nb = x25_get_neigh(dev)) == NULL)
357		goto out_dev_put;
358
359	dev_put(dev);
360
361	if (cmd == SIOCX25GSUBSCRIP) {
362		read_lock_bh(&x25_neigh_list_lock);
363		x25_subscr.extended	     = nb->extended;
364		x25_subscr.global_facil_mask = nb->global_facil_mask;
365		read_unlock_bh(&x25_neigh_list_lock);
366		rc = copy_to_user(arg, &x25_subscr,
367				  sizeof(x25_subscr)) ? -EFAULT : 0;
368	} else {
369		rc = -EINVAL;
370		if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
371			rc = 0;
372			write_lock_bh(&x25_neigh_list_lock);
373			nb->extended	     = x25_subscr.extended;
374			nb->global_facil_mask = x25_subscr.global_facil_mask;
375			write_unlock_bh(&x25_neigh_list_lock);
376		}
377	}
378	x25_neigh_put(nb);
379out:
380	return rc;
381out_dev_put:
382	dev_put(dev);
383	goto out;
384}
385
386
387/*
388 *	Release all memory associated with X.25 neighbour structures.
389 */
390void __exit x25_link_free(void)
391{
392	struct x25_neigh *nb;
393	struct list_head *entry, *tmp;
394
395	write_lock_bh(&x25_neigh_list_lock);
396
397	list_for_each_safe(entry, tmp, &x25_neigh_list) {
398		struct net_device *dev;
399
400		nb = list_entry(entry, struct x25_neigh, node);
401		dev = nb->dev;
402		__x25_remove_neigh(nb);
403		dev_put(dev);
404	}
405	write_unlock_bh(&x25_neigh_list_lock);
406}