Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *	This module:
  3 *		This module is free software; you can redistribute it and/or
  4 *		modify it under the terms of the GNU General Public License
  5 *		as published by the Free Software Foundation; either version
  6 *		2 of the License, or (at your option) any later version.
  7 *
  8 *	History
  9 *	03-01-2007	Added forwarding for x.25	Andrew Hendry
 10 */
 11
 12#define pr_fmt(fmt) "X25: " fmt
 13
 14#include <linux/if_arp.h>
 15#include <linux/init.h>
 16#include <linux/slab.h>
 17#include <net/x25.h>
 18
 19LIST_HEAD(x25_forward_list);
 20DEFINE_RWLOCK(x25_forward_list_lock);
 21
 22int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
 23			struct sk_buff *skb, int lci)
 24{
 25	struct x25_route *rt;
 26	struct x25_neigh *neigh_new = NULL;
 27	struct list_head *entry;
 28	struct x25_forward *x25_frwd, *new_frwd;
 29	struct sk_buff *skbn;
 30	short same_lci = 0;
 31	int rc = 0;
 32
 33	if ((rt = x25_get_route(dest_addr)) == NULL)
 34		goto out_no_route;
 35
 36	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
 37		/* This shouldn't happen, if it occurs somehow
 38		 * do something sensible
 39		 */
 40		goto out_put_route;
 41	}
 42
 43	/* Avoid a loop. This is the normal exit path for a
 44	 * system with only one x.25 iface and default route
 45	 */
 46	if (rt->dev == from->dev) {
 47		goto out_put_nb;
 48	}
 49
 50	/* Remote end sending a call request on an already
 51	 * established LCI? It shouldn't happen, just in case..
 52	 */
 53	read_lock_bh(&x25_forward_list_lock);
 54	list_for_each(entry, &x25_forward_list) {
 55		x25_frwd = list_entry(entry, struct x25_forward, node);
 56		if (x25_frwd->lci == lci) {
 57			pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
 58			same_lci = 1;
 59		}
 60	}
 61	read_unlock_bh(&x25_forward_list_lock);
 62
 63	/* Save the forwarding details for future traffic */
 64	if (!same_lci){
 65		if ((new_frwd = kmalloc(sizeof(struct x25_forward),
 66						GFP_ATOMIC)) == NULL){
 67			rc = -ENOMEM;
 68			goto out_put_nb;
 69		}
 70		new_frwd->lci = lci;
 71		new_frwd->dev1 = rt->dev;
 72		new_frwd->dev2 = from->dev;
 73		write_lock_bh(&x25_forward_list_lock);
 74		list_add(&new_frwd->node, &x25_forward_list);
 75		write_unlock_bh(&x25_forward_list_lock);
 76	}
 77
 78	/* Forward the call request */
 79	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
 80		goto out_put_nb;
 81	}
 82	x25_transmit_link(skbn, neigh_new);
 83	rc = 1;
 84
 85
 86out_put_nb:
 87	x25_neigh_put(neigh_new);
 88
 89out_put_route:
 90	x25_route_put(rt);
 91
 92out_no_route:
 93	return rc;
 94}
 95
 96
 97int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
 98
 99	struct x25_forward *frwd;
100	struct list_head *entry;
101	struct net_device *peer = NULL;
102	struct x25_neigh *nb;
103	struct sk_buff *skbn;
104	int rc = 0;
105
106	read_lock_bh(&x25_forward_list_lock);
107	list_for_each(entry, &x25_forward_list) {
108		frwd = list_entry(entry, struct x25_forward, node);
109		if (frwd->lci == lci) {
110			/* The call is established, either side can send */
111			if (from->dev == frwd->dev1) {
112				peer = frwd->dev2;
113			} else {
114				peer = frwd->dev1;
115			}
116			break;
117		}
118	}
119	read_unlock_bh(&x25_forward_list_lock);
120
121	if ( (nb = x25_get_neigh(peer)) == NULL)
122		goto out;
123
124	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
125		goto output;
126
127	}
128	x25_transmit_link(skbn, nb);
129
130	rc = 1;
131output:
132	x25_neigh_put(nb);
133out:
134	return rc;
135}
136
137void x25_clear_forward_by_lci(unsigned int lci)
138{
139	struct x25_forward *fwd;
140	struct list_head *entry, *tmp;
141
142	write_lock_bh(&x25_forward_list_lock);
143
144	list_for_each_safe(entry, tmp, &x25_forward_list) {
145		fwd = list_entry(entry, struct x25_forward, node);
146		if (fwd->lci == lci) {
147			list_del(&fwd->node);
148			kfree(fwd);
149		}
150	}
151	write_unlock_bh(&x25_forward_list_lock);
152}
153
154
155void x25_clear_forward_by_dev(struct net_device *dev)
156{
157	struct x25_forward *fwd;
158	struct list_head *entry, *tmp;
159
160	write_lock_bh(&x25_forward_list_lock);
161
162	list_for_each_safe(entry, tmp, &x25_forward_list) {
163		fwd = list_entry(entry, struct x25_forward, node);
164		if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
165			list_del(&fwd->node);
166			kfree(fwd);
167		}
168	}
169	write_unlock_bh(&x25_forward_list_lock);
170}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
 
 
 
 
 
 
  3 *	History
  4 *	03-01-2007	Added forwarding for x.25	Andrew Hendry
  5 */
  6
  7#define pr_fmt(fmt) "X25: " fmt
  8
  9#include <linux/if_arp.h>
 10#include <linux/init.h>
 11#include <linux/slab.h>
 12#include <net/x25.h>
 13
 14LIST_HEAD(x25_forward_list);
 15DEFINE_RWLOCK(x25_forward_list_lock);
 16
 17int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
 18			struct sk_buff *skb, int lci)
 19{
 20	struct x25_route *rt;
 21	struct x25_neigh *neigh_new = NULL;
 
 22	struct x25_forward *x25_frwd, *new_frwd;
 23	struct sk_buff *skbn;
 24	short same_lci = 0;
 25	int rc = 0;
 26
 27	if ((rt = x25_get_route(dest_addr)) == NULL)
 28		goto out_no_route;
 29
 30	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
 31		/* This shouldn't happen, if it occurs somehow
 32		 * do something sensible
 33		 */
 34		goto out_put_route;
 35	}
 36
 37	/* Avoid a loop. This is the normal exit path for a
 38	 * system with only one x.25 iface and default route
 39	 */
 40	if (rt->dev == from->dev) {
 41		goto out_put_nb;
 42	}
 43
 44	/* Remote end sending a call request on an already
 45	 * established LCI? It shouldn't happen, just in case..
 46	 */
 47	read_lock_bh(&x25_forward_list_lock);
 48	list_for_each_entry(x25_frwd, &x25_forward_list, node) {
 
 49		if (x25_frwd->lci == lci) {
 50			pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
 51			same_lci = 1;
 52		}
 53	}
 54	read_unlock_bh(&x25_forward_list_lock);
 55
 56	/* Save the forwarding details for future traffic */
 57	if (!same_lci){
 58		if ((new_frwd = kmalloc(sizeof(struct x25_forward),
 59						GFP_ATOMIC)) == NULL){
 60			rc = -ENOMEM;
 61			goto out_put_nb;
 62		}
 63		new_frwd->lci = lci;
 64		new_frwd->dev1 = rt->dev;
 65		new_frwd->dev2 = from->dev;
 66		write_lock_bh(&x25_forward_list_lock);
 67		list_add(&new_frwd->node, &x25_forward_list);
 68		write_unlock_bh(&x25_forward_list_lock);
 69	}
 70
 71	/* Forward the call request */
 72	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
 73		goto out_put_nb;
 74	}
 75	x25_transmit_link(skbn, neigh_new);
 76	rc = 1;
 77
 78
 79out_put_nb:
 80	x25_neigh_put(neigh_new);
 81
 82out_put_route:
 83	x25_route_put(rt);
 84
 85out_no_route:
 86	return rc;
 87}
 88
 89
 90int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
 91
 92	struct x25_forward *frwd;
 
 93	struct net_device *peer = NULL;
 94	struct x25_neigh *nb;
 95	struct sk_buff *skbn;
 96	int rc = 0;
 97
 98	read_lock_bh(&x25_forward_list_lock);
 99	list_for_each_entry(frwd, &x25_forward_list, node) {
 
100		if (frwd->lci == lci) {
101			/* The call is established, either side can send */
102			if (from->dev == frwd->dev1) {
103				peer = frwd->dev2;
104			} else {
105				peer = frwd->dev1;
106			}
107			break;
108		}
109	}
110	read_unlock_bh(&x25_forward_list_lock);
111
112	if ( (nb = x25_get_neigh(peer)) == NULL)
113		goto out;
114
115	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
116		goto output;
117
118	}
119	x25_transmit_link(skbn, nb);
120
121	rc = 1;
122output:
123	x25_neigh_put(nb);
124out:
125	return rc;
126}
127
128void x25_clear_forward_by_lci(unsigned int lci)
129{
130	struct x25_forward *fwd, *tmp;
 
131
132	write_lock_bh(&x25_forward_list_lock);
133
134	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
 
135		if (fwd->lci == lci) {
136			list_del(&fwd->node);
137			kfree(fwd);
138		}
139	}
140	write_unlock_bh(&x25_forward_list_lock);
141}
142
143
144void x25_clear_forward_by_dev(struct net_device *dev)
145{
146	struct x25_forward *fwd, *tmp;
 
147
148	write_lock_bh(&x25_forward_list_lock);
149
150	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
 
151		if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
152			list_del(&fwd->node);
153			kfree(fwd);
154		}
155	}
156	write_unlock_bh(&x25_forward_list_lock);
157}