Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18/* IPv4 address key for cache lookups */
 19struct ipv4_addr_key {
 20	__be32	addr;
 21	int	vif;
 
 22};
 23
 24#define INETPEER_MAXKEYSZ   (sizeof(struct in6_addr) / sizeof(u32))
 25
 26struct inetpeer_addr {
 27	union {
 28		struct ipv4_addr_key	a4;
 29		struct in6_addr		a6;
 30		u32			key[INETPEER_MAXKEYSZ];
 31	};
 32	__u16				family;
 33};
 34
 35struct inet_peer {
 36	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 37	struct inet_peer __rcu	*avl_left, *avl_right;
 38	struct inetpeer_addr	daddr;
 39	__u32			avl_height;
 40
 41	u32			metrics[RTAX_MAX];
 42	u32			rate_tokens;	/* rate limiting for ICMP */
 43	unsigned long		rate_last;
 44	union {
 45		struct list_head	gc_list;
 46		struct rcu_head     gc_rcu;
 47	};
 48	/*
 49	 * Once inet_peer is queued for deletion (refcnt == -1), following field
 50	 * is not available: rid
 51	 * We can share memory with rcu_head to help keep inet_peer small.
 52	 */
 53	union {
 54		struct {
 55			atomic_t			rid;		/* Frag reception counter */
 
 56		};
 57		struct rcu_head         rcu;
 58		struct inet_peer	*gc_next;
 59	};
 60
 61	/* following fields might be frequently dirtied */
 62	__u32			dtime;	/* the time of last use of not referenced entries */
 63	atomic_t		refcnt;
 64};
 65
 66struct inet_peer_base {
 67	struct inet_peer __rcu	*root;
 68	seqlock_t		lock;
 
 69	int			total;
 70};
 71
 72void inet_peer_base_init(struct inet_peer_base *);
 73
 74void inet_initpeers(void) __init;
 
 
 
 
 75
 76#define INETPEER_METRICS_NEW	(~(u32) 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77
 78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
 79{
 80	iaddr->a4.addr = ip;
 81	iaddr->a4.vif = 0;
 82	iaddr->family = AF_INET;
 
 
 
 
 83}
 84
 85static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
 86{
 87	return iaddr->a4.addr;
 88}
 89
 90static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
 91					struct in6_addr *in6)
 92{
 93	iaddr->a6 = *in6;
 94	iaddr->family = AF_INET6;
 
 
 
 
 
 95}
 96
 97static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
 
 
 
 
 
 
 98{
 99	return &iaddr->a6;
100}
101
102/* can be called with or without local BH being disabled */
103struct inet_peer *inet_getpeer(struct inet_peer_base *base,
104			       const struct inetpeer_addr *daddr,
105			       int create);
106
107static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
108						__be32 v4daddr,
109						int vif, int create)
110{
111	struct inetpeer_addr daddr;
112
113	daddr.a4.addr = v4daddr;
114	daddr.a4.vif = vif;
115	daddr.family = AF_INET;
116	return inet_getpeer(base, &daddr, create);
117}
118
119static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
120						const struct in6_addr *v6daddr,
121						int create)
122{
123	struct inetpeer_addr daddr;
124
125	daddr.a6 = *v6daddr;
126	daddr.family = AF_INET6;
127	return inet_getpeer(base, &daddr, create);
128}
129
130static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
131				    const struct inetpeer_addr *b)
132{
133	int i, n;
134
135	if (a->family == AF_INET)
136		n = sizeof(a->a4) / sizeof(u32);
137	else
138		n = sizeof(a->a6) / sizeof(u32);
139
140	for (i = 0; i < n; i++) {
141		if (a->key[i] == b->key[i])
142			continue;
143		if (a->key[i] < b->key[i])
144			return -1;
145		return 1;
146	}
147
148	return 0;
149}
150
151/* can be called from BH context or outside */
152void inet_putpeer(struct inet_peer *p);
153bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
154
155void inetpeer_invalidate_tree(struct inet_peer_base *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
157#endif /* _NET_INETPEER_H */
v3.15
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18struct inetpeer_addr_base {
 19	union {
 20		__be32			a4;
 21		__be32			a6[4];
 22	};
 23};
 24
 
 
 25struct inetpeer_addr {
 26	struct inetpeer_addr_base	addr;
 
 
 
 
 27	__u16				family;
 28};
 29
 30struct inet_peer {
 31	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 32	struct inet_peer __rcu	*avl_left, *avl_right;
 33	struct inetpeer_addr	daddr;
 34	__u32			avl_height;
 35
 36	u32			metrics[RTAX_MAX];
 37	u32			rate_tokens;	/* rate limiting for ICMP */
 38	unsigned long		rate_last;
 39	union {
 40		struct list_head	gc_list;
 41		struct rcu_head     gc_rcu;
 42	};
 43	/*
 44	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 45	 * are not available: rid, ip_id_count
 46	 * We can share memory with rcu_head to help keep inet_peer small.
 47	 */
 48	union {
 49		struct {
 50			atomic_t			rid;		/* Frag reception counter */
 51			atomic_t			ip_id_count;	/* IP ID for the next packet */
 52		};
 53		struct rcu_head         rcu;
 54		struct inet_peer	*gc_next;
 55	};
 56
 57	/* following fields might be frequently dirtied */
 58	__u32			dtime;	/* the time of last use of not referenced entries */
 59	atomic_t		refcnt;
 60};
 61
 62struct inet_peer_base {
 63	struct inet_peer __rcu	*root;
 64	seqlock_t		lock;
 65	u32			flush_seq;
 66	int			total;
 67};
 68
 69#define INETPEER_BASE_BIT	0x1UL
 70
 71static inline struct inet_peer *inetpeer_ptr(unsigned long val)
 72{
 73	BUG_ON(val & INETPEER_BASE_BIT);
 74	return (struct inet_peer *) val;
 75}
 76
 77static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
 78{
 79	if (!(val & INETPEER_BASE_BIT))
 80		return NULL;
 81	val &= ~INETPEER_BASE_BIT;
 82	return (struct inet_peer_base *) val;
 83}
 84
 85static inline bool inetpeer_ptr_is_peer(unsigned long val)
 86{
 87	return !(val & INETPEER_BASE_BIT);
 88}
 89
 90static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
 91{
 92	/* This implicitly clears INETPEER_BASE_BIT */
 93	*val = (unsigned long) peer;
 94}
 95
 96static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
 97{
 98	unsigned long val = (unsigned long) peer;
 99	unsigned long orig = *ptr;
100
101	if (!(orig & INETPEER_BASE_BIT) ||
102	    cmpxchg(ptr, orig, val) != orig)
103		return false;
104	return true;
105}
106
107static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
108{
109	*ptr = (unsigned long) base | INETPEER_BASE_BIT;
110}
111
112static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
 
113{
114	unsigned long val = *from;
115
116	*to = val;
117	if (inetpeer_ptr_is_peer(val)) {
118		struct inet_peer *peer = inetpeer_ptr(val);
119		atomic_inc(&peer->refcnt);
120	}
121}
122
123void inet_peer_base_init(struct inet_peer_base *);
124
125void inet_initpeers(void) __init;
126
127#define INETPEER_METRICS_NEW	(~(u32) 0)
128
129static inline bool inet_metrics_new(const struct inet_peer *p)
130{
131	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
132}
133
134/* can be called with or without local BH being disabled */
135struct inet_peer *inet_getpeer(struct inet_peer_base *base,
136			       const struct inetpeer_addr *daddr,
137			       int create);
138
139static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
140						__be32 v4daddr,
141						int create)
142{
143	struct inetpeer_addr daddr;
144
145	daddr.addr.a4 = v4daddr;
 
146	daddr.family = AF_INET;
147	return inet_getpeer(base, &daddr, create);
148}
149
150static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
151						const struct in6_addr *v6daddr,
152						int create)
153{
154	struct inetpeer_addr daddr;
155
156	*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
157	daddr.family = AF_INET6;
158	return inet_getpeer(base, &daddr, create);
159}
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161/* can be called from BH context or outside */
162void inet_putpeer(struct inet_peer *p);
163bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
164
165void inetpeer_invalidate_tree(struct inet_peer_base *);
166
167/*
168 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
169 * tcp_ts_stamp if no refcount is taken on inet_peer
170 */
171static inline void inet_peer_refcheck(const struct inet_peer *p)
172{
173	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
174}
175
176
177/* can be called with or without local BH being disabled */
178static inline int inet_getid(struct inet_peer *p, int more)
179{
180	more++;
181	inet_peer_refcheck(p);
182	return atomic_add_return(more, &p->ip_id_count) - more;
183}
184
185#endif /* _NET_INETPEER_H */