Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v4.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18/* IPv4 address key for cache lookups */
 19struct ipv4_addr_key {
 20	__be32	addr;
 21	int	vif;
 
 22};
 23
 24#define INETPEER_MAXKEYSZ   (sizeof(struct in6_addr) / sizeof(u32))
 25
 26struct inetpeer_addr {
 27	union {
 28		struct ipv4_addr_key	a4;
 29		struct in6_addr		a6;
 30		u32			key[INETPEER_MAXKEYSZ];
 31	};
 32	__u16				family;
 33};
 34
 35struct inet_peer {
 36	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 37	struct inet_peer __rcu	*avl_left, *avl_right;
 38	struct inetpeer_addr	daddr;
 39	__u32			avl_height;
 40
 41	u32			metrics[RTAX_MAX];
 42	u32			rate_tokens;	/* rate limiting for ICMP */
 43	unsigned long		rate_last;
 
 
 
 
 44	union {
 45		struct list_head	gc_list;
 46		struct rcu_head     gc_rcu;
 47	};
 48	/*
 49	 * Once inet_peer is queued for deletion (refcnt == -1), following field
 50	 * is not available: rid
 51	 * We can share memory with rcu_head to help keep inet_peer small.
 52	 */
 53	union {
 54		struct {
 55			atomic_t			rid;		/* Frag reception counter */
 
 
 
 56		};
 57		struct rcu_head         rcu;
 58		struct inet_peer	*gc_next;
 59	};
 60
 61	/* following fields might be frequently dirtied */
 62	__u32			dtime;	/* the time of last use of not referenced entries */
 63	atomic_t		refcnt;
 64};
 65
 66struct inet_peer_base {
 67	struct inet_peer __rcu	*root;
 68	seqlock_t		lock;
 69	int			total;
 70};
 71
 72void inet_peer_base_init(struct inet_peer_base *);
 73
 74void inet_initpeers(void) __init;
 75
 76#define INETPEER_METRICS_NEW	(~(u32) 0)
 77
 78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
 79{
 80	iaddr->a4.addr = ip;
 81	iaddr->a4.vif = 0;
 82	iaddr->family = AF_INET;
 83}
 84
 85static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
 86{
 87	return iaddr->a4.addr;
 88}
 89
 90static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
 91					struct in6_addr *in6)
 92{
 93	iaddr->a6 = *in6;
 94	iaddr->family = AF_INET6;
 95}
 96
 97static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
 98{
 99	return &iaddr->a6;
100}
101
102/* can be called with or without local BH being disabled */
103struct inet_peer *inet_getpeer(struct inet_peer_base *base,
104			       const struct inetpeer_addr *daddr,
105			       int create);
106
107static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
108						__be32 v4daddr,
109						int vif, int create)
110{
111	struct inetpeer_addr daddr;
112
113	daddr.a4.addr = v4daddr;
114	daddr.a4.vif = vif;
115	daddr.family = AF_INET;
116	return inet_getpeer(base, &daddr, create);
117}
118
119static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
120						const struct in6_addr *v6daddr,
121						int create)
122{
123	struct inetpeer_addr daddr;
124
125	daddr.a6 = *v6daddr;
126	daddr.family = AF_INET6;
127	return inet_getpeer(base, &daddr, create);
128}
129
130static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
131				    const struct inetpeer_addr *b)
132{
133	int i, n;
134
135	if (a->family == AF_INET)
136		n = sizeof(a->a4) / sizeof(u32);
137	else
138		n = sizeof(a->a6) / sizeof(u32);
139
140	for (i = 0; i < n; i++) {
141		if (a->key[i] == b->key[i])
142			continue;
143		if (a->key[i] < b->key[i])
144			return -1;
145		return 1;
146	}
147
148	return 0;
 
 
 
 
 
 
149}
150
151/* can be called from BH context or outside */
152void inet_putpeer(struct inet_peer *p);
153bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
154
155void inetpeer_invalidate_tree(struct inet_peer_base *);
 
 
 
 
 
 
 
 
 
 
 
 
 
156
157#endif /* _NET_INETPEER_H */
v3.5.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18struct inetpeer_addr_base {
 19	union {
 20		__be32			a4;
 21		__be32			a6[4];
 22	};
 23};
 24
 
 
 25struct inetpeer_addr {
 26	struct inetpeer_addr_base	addr;
 
 
 
 
 27	__u16				family;
 28};
 29
 30struct inet_peer {
 31	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 32	struct inet_peer __rcu	*avl_left, *avl_right;
 33	struct inetpeer_addr	daddr;
 34	__u32			avl_height;
 35
 36	u32			metrics[RTAX_MAX];
 37	u32			rate_tokens;	/* rate limiting for ICMP */
 38	unsigned long		rate_last;
 39	unsigned long		pmtu_expires;
 40	u32			pmtu_orig;
 41	u32			pmtu_learned;
 42	struct inetpeer_addr_base redirect_learned;
 43	union {
 44		struct list_head	gc_list;
 45		struct rcu_head     gc_rcu;
 46	};
 47	/*
 48	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 49	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
 50	 * We can share memory with rcu_head to help keep inet_peer small.
 51	 */
 52	union {
 53		struct {
 54			atomic_t			rid;		/* Frag reception counter */
 55			atomic_t			ip_id_count;	/* IP ID for the next packet */
 56			__u32				tcp_ts;
 57			__u32				tcp_ts_stamp;
 58		};
 59		struct rcu_head         rcu;
 60		struct inet_peer	*gc_next;
 61	};
 62
 63	/* following fields might be frequently dirtied */
 64	__u32			dtime;	/* the time of last use of not referenced entries */
 65	atomic_t		refcnt;
 66};
 67
 68void			inet_initpeers(void) __init;
 
 
 
 
 
 
 
 
 69
 70#define INETPEER_METRICS_NEW	(~(u32) 0)
 71
 72static inline bool inet_metrics_new(const struct inet_peer *p)
 
 
 
 
 
 
 
 73{
 74	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
 
 
 
 
 
 
 
 
 
 
 
 
 75}
 76
 77/* can be called with or without local BH being disabled */
 78struct inet_peer	*inet_getpeer(const struct inetpeer_addr *daddr, int create);
 79
 80static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
 
 
 
 
 81{
 82	struct inetpeer_addr daddr;
 83
 84	daddr.addr.a4 = v4daddr;
 
 85	daddr.family = AF_INET;
 86	return inet_getpeer(&daddr, create);
 87}
 88
 89static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create)
 
 
 90{
 91	struct inetpeer_addr daddr;
 92
 93	*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
 94	daddr.family = AF_INET6;
 95	return inet_getpeer(&daddr, create);
 96}
 97
 98/* can be called from BH context or outside */
 99extern void inet_putpeer(struct inet_peer *p);
100extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
101
102extern void inetpeer_invalidate_tree(int family);
 
 
 
 
 
 
 
 
 
 
 
103
104/*
105 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
106 * tcp_ts_stamp if no refcount is taken on inet_peer
107 */
108static inline void inet_peer_refcheck(const struct inet_peer *p)
109{
110	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
111}
112
 
 
 
113
114/* can be called with or without local BH being disabled */
115static inline int inet_getid(struct inet_peer *p, int more)
116{
117	int old, new;
118	more++;
119	inet_peer_refcheck(p);
120	do {
121		old = atomic_read(&p->ip_id_count);
122		new = old + more;
123		if (!new)
124			new = 1;
125	} while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
126	return new;
127}
128
129#endif /* _NET_INETPEER_H */