Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *		INETPEER - A storage for permanent information about peers
  4 *
  5 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  6 */
  7
  8#ifndef _NET_INETPEER_H
  9#define _NET_INETPEER_H
 10
 11#include <linux/types.h>
 12#include <linux/init.h>
 13#include <linux/jiffies.h>
 14#include <linux/spinlock.h>
 15#include <linux/rtnetlink.h>
 16#include <net/ipv6.h>
 17#include <linux/atomic.h>
 18
 19/* IPv4 address key for cache lookups */
 20struct ipv4_addr_key {
 21	__be32	addr;
 22	int	vif;
 
 23};
 24
 25#define INETPEER_MAXKEYSZ   (sizeof(struct in6_addr) / sizeof(u32))
 26
 27struct inetpeer_addr {
 28	union {
 29		struct ipv4_addr_key	a4;
 30		struct in6_addr		a6;
 31		u32			key[INETPEER_MAXKEYSZ];
 32	};
 33	__u16				family;
 34};
 35
 36struct inet_peer {
 37	struct rb_node		rb_node;
 
 38	struct inetpeer_addr	daddr;
 
 39
 40	u32			metrics[RTAX_MAX];
 41	u32			rate_tokens;	/* rate limiting for ICMP */
 42	u32			n_redirects;
 43	unsigned long		rate_last;
 
 
 
 
 44	/*
 45	 * Once inet_peer is queued for deletion (refcnt == 0), following field
 46	 * is not available: rid
 47	 * We can share memory with rcu_head to help keep inet_peer small.
 48	 */
 49	union {
 50		struct {
 51			atomic_t			rid;		/* Frag reception counter */
 
 52		};
 53		struct rcu_head         rcu;
 
 54	};
 55
 56	/* following fields might be frequently dirtied */
 57	__u32			dtime;	/* the time of last use of not referenced entries */
 58	refcount_t		refcnt;
 59};
 60
 61struct inet_peer_base {
 62	struct rb_root		rb_root;
 63	seqlock_t		lock;
 
 64	int			total;
 65};
 66
 67void inet_peer_base_init(struct inet_peer_base *);
 68
 69void inet_initpeers(void) __init;
 
 
 
 
 70
 71#define INETPEER_METRICS_NEW	(~(u32) 0)
 
 
 
 
 
 
 
 
 
 
 
 72
 73static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
 74{
 75	iaddr->a4.addr = ip;
 76	iaddr->a4.vif = 0;
 77	iaddr->family = AF_INET;
 78}
 79
 80static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
 81{
 82	return iaddr->a4.addr;
 
 
 
 
 
 
 83}
 84
 85static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
 86					struct in6_addr *in6)
 87{
 88	iaddr->a6 = *in6;
 89	iaddr->family = AF_INET6;
 
 
 
 
 
 
 
 
 
 
 90}
 91
 92static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
 
 
 
 
 
 
 93{
 94	return &iaddr->a6;
 95}
 96
 97/* can be called with or without local BH being disabled */
 98struct inet_peer *inet_getpeer(struct inet_peer_base *base,
 99			       const struct inetpeer_addr *daddr);
 
100
101static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
102						__be32 v4daddr,
103						int vif)
104{
105	struct inetpeer_addr daddr;
106
107	daddr.a4.addr = v4daddr;
108	daddr.a4.vif = vif;
109	daddr.family = AF_INET;
110	return inet_getpeer(base, &daddr);
111}
112
113static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
114						const struct in6_addr *v6daddr)
 
115{
116	struct inetpeer_addr daddr;
117
118	daddr.a6 = *v6daddr;
119	daddr.family = AF_INET6;
120	return inet_getpeer(base, &daddr);
121}
122
123static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
124				    const struct inetpeer_addr *b)
125{
126	int i, n;
127
128	if (a->family == AF_INET)
129		n = sizeof(a->a4) / sizeof(u32);
130	else
131		n = sizeof(a->a6) / sizeof(u32);
132
133	for (i = 0; i < n; i++) {
134		if (a->key[i] == b->key[i])
135			continue;
136		if (a->key[i] < b->key[i])
137			return -1;
138		return 1;
139	}
140
141	return 0;
142}
143
144/* can be called from BH context or outside */
145void inet_putpeer(struct inet_peer *p);
146bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
147
148void inetpeer_invalidate_tree(struct inet_peer_base *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
150#endif /* _NET_INETPEER_H */
v3.15
 
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  5 */
  6
  7#ifndef _NET_INETPEER_H
  8#define _NET_INETPEER_H
  9
 10#include <linux/types.h>
 11#include <linux/init.h>
 12#include <linux/jiffies.h>
 13#include <linux/spinlock.h>
 14#include <linux/rtnetlink.h>
 15#include <net/ipv6.h>
 16#include <linux/atomic.h>
 17
 18struct inetpeer_addr_base {
 19	union {
 20		__be32			a4;
 21		__be32			a6[4];
 22	};
 23};
 24
 
 
 25struct inetpeer_addr {
 26	struct inetpeer_addr_base	addr;
 
 
 
 
 27	__u16				family;
 28};
 29
 30struct inet_peer {
 31	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 32	struct inet_peer __rcu	*avl_left, *avl_right;
 33	struct inetpeer_addr	daddr;
 34	__u32			avl_height;
 35
 36	u32			metrics[RTAX_MAX];
 37	u32			rate_tokens;	/* rate limiting for ICMP */
 
 38	unsigned long		rate_last;
 39	union {
 40		struct list_head	gc_list;
 41		struct rcu_head     gc_rcu;
 42	};
 43	/*
 44	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 45	 * are not available: rid, ip_id_count
 46	 * We can share memory with rcu_head to help keep inet_peer small.
 47	 */
 48	union {
 49		struct {
 50			atomic_t			rid;		/* Frag reception counter */
 51			atomic_t			ip_id_count;	/* IP ID for the next packet */
 52		};
 53		struct rcu_head         rcu;
 54		struct inet_peer	*gc_next;
 55	};
 56
 57	/* following fields might be frequently dirtied */
 58	__u32			dtime;	/* the time of last use of not referenced entries */
 59	atomic_t		refcnt;
 60};
 61
 62struct inet_peer_base {
 63	struct inet_peer __rcu	*root;
 64	seqlock_t		lock;
 65	u32			flush_seq;
 66	int			total;
 67};
 68
 69#define INETPEER_BASE_BIT	0x1UL
 70
 71static inline struct inet_peer *inetpeer_ptr(unsigned long val)
 72{
 73	BUG_ON(val & INETPEER_BASE_BIT);
 74	return (struct inet_peer *) val;
 75}
 76
 77static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
 78{
 79	if (!(val & INETPEER_BASE_BIT))
 80		return NULL;
 81	val &= ~INETPEER_BASE_BIT;
 82	return (struct inet_peer_base *) val;
 83}
 84
 85static inline bool inetpeer_ptr_is_peer(unsigned long val)
 86{
 87	return !(val & INETPEER_BASE_BIT);
 88}
 89
 90static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
 91{
 92	/* This implicitly clears INETPEER_BASE_BIT */
 93	*val = (unsigned long) peer;
 
 94}
 95
 96static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
 97{
 98	unsigned long val = (unsigned long) peer;
 99	unsigned long orig = *ptr;
100
101	if (!(orig & INETPEER_BASE_BIT) ||
102	    cmpxchg(ptr, orig, val) != orig)
103		return false;
104	return true;
105}
106
107static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
 
108{
109	*ptr = (unsigned long) base | INETPEER_BASE_BIT;
110}
111
112static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
113{
114	unsigned long val = *from;
115
116	*to = val;
117	if (inetpeer_ptr_is_peer(val)) {
118		struct inet_peer *peer = inetpeer_ptr(val);
119		atomic_inc(&peer->refcnt);
120	}
121}
122
123void inet_peer_base_init(struct inet_peer_base *);
124
125void inet_initpeers(void) __init;
126
127#define INETPEER_METRICS_NEW	(~(u32) 0)
128
129static inline bool inet_metrics_new(const struct inet_peer *p)
130{
131	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
132}
133
134/* can be called with or without local BH being disabled */
135struct inet_peer *inet_getpeer(struct inet_peer_base *base,
136			       const struct inetpeer_addr *daddr,
137			       int create);
138
139static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
140						__be32 v4daddr,
141						int create)
142{
143	struct inetpeer_addr daddr;
144
145	daddr.addr.a4 = v4daddr;
 
146	daddr.family = AF_INET;
147	return inet_getpeer(base, &daddr, create);
148}
149
150static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
151						const struct in6_addr *v6daddr,
152						int create)
153{
154	struct inetpeer_addr daddr;
155
156	*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
157	daddr.family = AF_INET6;
158	return inet_getpeer(base, &daddr, create);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159}
160
161/* can be called from BH context or outside */
162void inet_putpeer(struct inet_peer *p);
163bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
164
165void inetpeer_invalidate_tree(struct inet_peer_base *);
166
167/*
168 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
169 * tcp_ts_stamp if no refcount is taken on inet_peer
170 */
171static inline void inet_peer_refcheck(const struct inet_peer *p)
172{
173	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
174}
175
176
177/* can be called with or without local BH being disabled */
178static inline int inet_getid(struct inet_peer *p, int more)
179{
180	more++;
181	inet_peer_refcheck(p);
182	return atomic_add_return(more, &p->ip_id_count) - more;
183}
184
185#endif /* _NET_INETPEER_H */