Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18struct inetpeer_addr_base {
 19	union {
 20		__be32			a4;
 21		__be32			a6[4];
 22	};
 23};
 24
 25struct inetpeer_addr {
 26	struct inetpeer_addr_base	addr;
 27	__u16				family;
 28};
 29
 30struct inet_peer {
 31	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 32	struct inet_peer __rcu	*avl_left, *avl_right;
 33	struct inetpeer_addr	daddr;
 34	__u32			avl_height;
 35
 36	u32			metrics[RTAX_MAX];
 37	u32			rate_tokens;	/* rate limiting for ICMP */
 38	unsigned long		rate_last;
 39	unsigned long		pmtu_expires;
 40	u32			pmtu_orig;
 41	u32			pmtu_learned;
 42	struct inetpeer_addr_base redirect_learned;
 43	/*
 44	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 45	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
 46	 * We can share memory with rcu_head to help keep inet_peer small.
 47	 */
 48	union {
 49		struct {
 50			atomic_t			rid;		/* Frag reception counter */
 51			atomic_t			ip_id_count;	/* IP ID for the next packet */
 52			__u32				tcp_ts;
 53			__u32				tcp_ts_stamp;
 54		};
 55		struct rcu_head         rcu;
 56		struct inet_peer	*gc_next;
 57	};
 58
 59	/* following fields might be frequently dirtied */
 60	__u32			dtime;	/* the time of last use of not referenced entries */
 61	atomic_t		refcnt;
 62};
 63
 64void			inet_initpeers(void) __init;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65
 66#define INETPEER_METRICS_NEW	(~(u32) 0)
 67
 68static inline bool inet_metrics_new(const struct inet_peer *p)
 69{
 70	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
 71}
 72
 73/* can be called with or without local BH being disabled */
 74struct inet_peer	*inet_getpeer(const struct inetpeer_addr *daddr, int create);
 75
 76static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
 
 
 
 
 77{
 78	struct inetpeer_addr daddr;
 79
 80	daddr.addr.a4 = v4daddr;
 81	daddr.family = AF_INET;
 82	return inet_getpeer(&daddr, create);
 83}
 84
 85static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create)
 
 
 86{
 87	struct inetpeer_addr daddr;
 88
 89	ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
 90	daddr.family = AF_INET6;
 91	return inet_getpeer(&daddr, create);
 92}
 93
 94/* can be called from BH context or outside */
 95extern void inet_putpeer(struct inet_peer *p);
 96extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
 
 97
 98/*
 99 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
100 * tcp_ts_stamp if no refcount is taken on inet_peer
101 */
102static inline void inet_peer_refcheck(const struct inet_peer *p)
103{
104	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
105}
106
107
108/* can be called with or without local BH being disabled */
109static inline int inet_getid(struct inet_peer *p, int more)
110{
111	int old, new;
112	more++;
113	inet_peer_refcheck(p);
114	do {
115		old = atomic_read(&p->ip_id_count);
116		new = old + more;
117		if (!new)
118			new = 1;
119	} while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
120	return new;
121}
122
123#endif /* _NET_INETPEER_H */
v3.15
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18struct inetpeer_addr_base {
 19	union {
 20		__be32			a4;
 21		__be32			a6[4];
 22	};
 23};
 24
 25struct inetpeer_addr {
 26	struct inetpeer_addr_base	addr;
 27	__u16				family;
 28};
 29
 30struct inet_peer {
 31	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 32	struct inet_peer __rcu	*avl_left, *avl_right;
 33	struct inetpeer_addr	daddr;
 34	__u32			avl_height;
 35
 36	u32			metrics[RTAX_MAX];
 37	u32			rate_tokens;	/* rate limiting for ICMP */
 38	unsigned long		rate_last;
 39	union {
 40		struct list_head	gc_list;
 41		struct rcu_head     gc_rcu;
 42	};
 43	/*
 44	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 45	 * are not available: rid, ip_id_count
 46	 * We can share memory with rcu_head to help keep inet_peer small.
 47	 */
 48	union {
 49		struct {
 50			atomic_t			rid;		/* Frag reception counter */
 51			atomic_t			ip_id_count;	/* IP ID for the next packet */
 
 
 52		};
 53		struct rcu_head         rcu;
 54		struct inet_peer	*gc_next;
 55	};
 56
 57	/* following fields might be frequently dirtied */
 58	__u32			dtime;	/* the time of last use of not referenced entries */
 59	atomic_t		refcnt;
 60};
 61
 62struct inet_peer_base {
 63	struct inet_peer __rcu	*root;
 64	seqlock_t		lock;
 65	u32			flush_seq;
 66	int			total;
 67};
 68
 69#define INETPEER_BASE_BIT	0x1UL
 70
 71static inline struct inet_peer *inetpeer_ptr(unsigned long val)
 72{
 73	BUG_ON(val & INETPEER_BASE_BIT);
 74	return (struct inet_peer *) val;
 75}
 76
 77static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
 78{
 79	if (!(val & INETPEER_BASE_BIT))
 80		return NULL;
 81	val &= ~INETPEER_BASE_BIT;
 82	return (struct inet_peer_base *) val;
 83}
 84
 85static inline bool inetpeer_ptr_is_peer(unsigned long val)
 86{
 87	return !(val & INETPEER_BASE_BIT);
 88}
 89
 90static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
 91{
 92	/* This implicitly clears INETPEER_BASE_BIT */
 93	*val = (unsigned long) peer;
 94}
 95
 96static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
 97{
 98	unsigned long val = (unsigned long) peer;
 99	unsigned long orig = *ptr;
100
101	if (!(orig & INETPEER_BASE_BIT) ||
102	    cmpxchg(ptr, orig, val) != orig)
103		return false;
104	return true;
105}
106
107static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
108{
109	*ptr = (unsigned long) base | INETPEER_BASE_BIT;
110}
111
112static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
113{
114	unsigned long val = *from;
115
116	*to = val;
117	if (inetpeer_ptr_is_peer(val)) {
118		struct inet_peer *peer = inetpeer_ptr(val);
119		atomic_inc(&peer->refcnt);
120	}
121}
122
123void inet_peer_base_init(struct inet_peer_base *);
124
125void inet_initpeers(void) __init;
126
127#define INETPEER_METRICS_NEW	(~(u32) 0)
128
129static inline bool inet_metrics_new(const struct inet_peer *p)
130{
131	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
132}
133
134/* can be called with or without local BH being disabled */
135struct inet_peer *inet_getpeer(struct inet_peer_base *base,
136			       const struct inetpeer_addr *daddr,
137			       int create);
138
139static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
140						__be32 v4daddr,
141						int create)
142{
143	struct inetpeer_addr daddr;
144
145	daddr.addr.a4 = v4daddr;
146	daddr.family = AF_INET;
147	return inet_getpeer(base, &daddr, create);
148}
149
150static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
151						const struct in6_addr *v6daddr,
152						int create)
153{
154	struct inetpeer_addr daddr;
155
156	*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
157	daddr.family = AF_INET6;
158	return inet_getpeer(base, &daddr, create);
159}
160
161/* can be called from BH context or outside */
162void inet_putpeer(struct inet_peer *p);
163bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
164
165void inetpeer_invalidate_tree(struct inet_peer_base *);
166
167/*
168 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
169 * tcp_ts_stamp if no refcount is taken on inet_peer
170 */
171static inline void inet_peer_refcheck(const struct inet_peer *p)
172{
173	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
174}
175
176
177/* can be called with or without local BH being disabled */
178static inline int inet_getid(struct inet_peer *p, int more)
179{
 
180	more++;
181	inet_peer_refcheck(p);
182	return atomic_add_return(more, &p->ip_id_count) - more;
 
 
 
 
 
 
183}
184
185#endif /* _NET_INETPEER_H */