Linux Audio

Check our new training course

Loading...
v3.15
 
  1#ifndef _NET_NEIGHBOUR_H
  2#define _NET_NEIGHBOUR_H
  3
  4#include <linux/neighbour.h>
  5
  6/*
  7 *	Generic neighbour manipulation
  8 *
  9 *	Authors:
 10 *	Pedro Roque		<roque@di.fc.ul.pt>
 11 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
 12 *
 13 * 	Changes:
 14 *
 15 *	Harald Welte:		<laforge@gnumonks.org>
 16 *		- Add neighbour cache statistics like rtstat
 17 */
 18
 19#include <linux/atomic.h>
 
 20#include <linux/netdevice.h>
 21#include <linux/skbuff.h>
 22#include <linux/rcupdate.h>
 23#include <linux/seq_file.h>
 24#include <linux/bitmap.h>
 25
 26#include <linux/err.h>
 27#include <linux/sysctl.h>
 28#include <linux/workqueue.h>
 29#include <net/rtnetlink.h>
 30
 31/*
 32 * NUD stands for "neighbor unreachability detection"
 33 */
 34
 35#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 36#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 37#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 38
 39struct neighbour;
 40
 41enum {
 42	NEIGH_VAR_MCAST_PROBES,
 43	NEIGH_VAR_UCAST_PROBES,
 44	NEIGH_VAR_APP_PROBES,
 
 45	NEIGH_VAR_RETRANS_TIME,
 46	NEIGH_VAR_BASE_REACHABLE_TIME,
 47	NEIGH_VAR_DELAY_PROBE_TIME,
 
 48	NEIGH_VAR_GC_STALETIME,
 49	NEIGH_VAR_QUEUE_LEN_BYTES,
 50	NEIGH_VAR_PROXY_QLEN,
 51	NEIGH_VAR_ANYCAST_DELAY,
 52	NEIGH_VAR_PROXY_DELAY,
 53	NEIGH_VAR_LOCKTIME,
 54#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
 55	/* Following are used as a second way to access one of the above */
 56	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
 57	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
 58	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
 59	/* Following are used by "default" only */
 60	NEIGH_VAR_GC_INTERVAL,
 61	NEIGH_VAR_GC_THRESH1,
 62	NEIGH_VAR_GC_THRESH2,
 63	NEIGH_VAR_GC_THRESH3,
 64	NEIGH_VAR_MAX
 65};
 66
 67struct neigh_parms {
 68#ifdef CONFIG_NET_NS
 69	struct net *net;
 70#endif
 71	struct net_device *dev;
 72	struct neigh_parms *next;
 
 73	int	(*neigh_setup)(struct neighbour *);
 74	void	(*neigh_cleanup)(struct neighbour *);
 75	struct neigh_table *tbl;
 76
 77	void	*sysctl_table;
 78
 79	int dead;
 80	atomic_t refcnt;
 81	struct rcu_head rcu_head;
 82
 83	int	reachable_time;
 
 84	int	data[NEIGH_VAR_DATA_MAX];
 85	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 86};
 87
 88static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
 89{
 90	set_bit(index, p->data_state);
 91	p->data[index] = val;
 92}
 93
 94#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
 95
 96/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
 97 * In other cases, NEIGH_VAR_SET should be used.
 98 */
 99#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
100#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
101
102static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
103{
104	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
105}
106
107static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
108{
109	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
110}
111
112struct neigh_statistics {
113	unsigned long allocs;		/* number of allocated neighs */
114	unsigned long destroys;		/* number of destroyed neighs */
115	unsigned long hash_grows;	/* number of hash resizes */
116
117	unsigned long res_failed;	/* number of failed resolutions */
118
119	unsigned long lookups;		/* number of lookups */
120	unsigned long hits;		/* number of hits (among lookups) */
121
122	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
123	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
124
125	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
126	unsigned long forced_gc_runs;	/* number of forced GC runs */
127
128	unsigned long unres_discards;	/* number of unresolved drops */
 
129};
130
131#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
132
133struct neighbour {
134	struct neighbour __rcu	*next;
135	struct neigh_table	*tbl;
136	struct neigh_parms	*parms;
137	unsigned long		confirmed;
138	unsigned long		updated;
139	rwlock_t		lock;
140	atomic_t		refcnt;
141	struct sk_buff_head	arp_queue;
142	unsigned int		arp_queue_len_bytes;
 
143	struct timer_list	timer;
144	unsigned long		used;
145	atomic_t		probes;
146	__u8			flags;
147	__u8			nud_state;
148	__u8			type;
149	__u8			dead;
 
150	seqlock_t		ha_lock;
151	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
152	struct hh_cache		hh;
153	int			(*output)(struct neighbour *, struct sk_buff *);
154	const struct neigh_ops	*ops;
 
 
155	struct rcu_head		rcu;
156	struct net_device	*dev;
157	u8			primary_key[0];
158};
 
159
160struct neigh_ops {
161	int			family;
162	void			(*solicit)(struct neighbour *, struct sk_buff *);
163	void			(*error_report)(struct neighbour *, struct sk_buff *);
164	int			(*output)(struct neighbour *, struct sk_buff *);
165	int			(*connected_output)(struct neighbour *, struct sk_buff *);
166};
167
168struct pneigh_entry {
169	struct pneigh_entry	*next;
170#ifdef CONFIG_NET_NS
171	struct net		*net;
172#endif
173	struct net_device	*dev;
174	u8			flags;
175	u8			key[0];
 
 
176};
177
178/*
179 *	neighbour table manipulation
180 */
181
182#define NEIGH_NUM_HASH_RND	4
183
184struct neigh_hash_table {
185	struct neighbour __rcu	**hash_buckets;
186	unsigned int		hash_shift;
187	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
188	struct rcu_head		rcu;
189};
190
191
192struct neigh_table {
193	struct neigh_table	*next;
194	int			family;
195	int			entry_size;
196	int			key_len;
 
197	__u32			(*hash)(const void *pkey,
198					const struct net_device *dev,
199					__u32 *hash_rnd);
 
200	int			(*constructor)(struct neighbour *);
201	int			(*pconstructor)(struct pneigh_entry *);
202	void			(*pdestructor)(struct pneigh_entry *);
203	void			(*proxy_redo)(struct sk_buff *skb);
 
 
 
204	char			*id;
205	struct neigh_parms	parms;
206	/* HACK. gc_* should follow parms without a gap! */
207	int			gc_interval;
208	int			gc_thresh1;
209	int			gc_thresh2;
210	int			gc_thresh3;
211	unsigned long		last_flush;
212	struct delayed_work	gc_work;
 
213	struct timer_list 	proxy_timer;
214	struct sk_buff_head	proxy_queue;
215	atomic_t		entries;
 
 
 
216	rwlock_t		lock;
217	unsigned long		last_rand;
218	struct neigh_statistics	__percpu *stats;
219	struct neigh_hash_table __rcu *nht;
220	struct pneigh_entry	**phash_buckets;
221};
222
 
 
 
 
 
 
 
 
223static inline int neigh_parms_family(struct neigh_parms *p)
224{
225	return p->tbl->family;
226}
227
228#define NEIGH_PRIV_ALIGN	sizeof(long long)
229#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
230
231static inline void *neighbour_priv(const struct neighbour *n)
232{
233	return (char *)n + n->tbl->entry_size;
234}
235
236/* flags for neigh_update() */
237#define NEIGH_UPDATE_F_OVERRIDE			0x00000001
238#define NEIGH_UPDATE_F_WEAK_OVERRIDE		0x00000002
239#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	0x00000004
240#define NEIGH_UPDATE_F_ISROUTER			0x40000000
241#define NEIGH_UPDATE_F_ADMIN			0x80000000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
243void neigh_table_init(struct neigh_table *tbl);
244int neigh_table_clear(struct neigh_table *tbl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
246			       struct net_device *dev);
247struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
248				     const void *pkey);
249struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
250				 struct net_device *dev, bool want_ref);
251static inline struct neighbour *neigh_create(struct neigh_table *tbl,
252					     const void *pkey,
253					     struct net_device *dev)
254{
255	return __neigh_create(tbl, pkey, dev, true);
256}
257void neigh_destroy(struct neighbour *neigh);
258int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
259int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
 
 
260void __neigh_set_probe_once(struct neighbour *neigh);
 
261void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
262int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 
263int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
264int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
265int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
266int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
267struct neighbour *neigh_event_ns(struct neigh_table *tbl,
268						u8 *lladdr, void *saddr,
269						struct net_device *dev);
270
271struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
272				      struct neigh_table *tbl);
273void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
274
275static inline
276struct net *neigh_parms_net(const struct neigh_parms *parms)
277{
278	return read_pnet(&parms->net);
279}
280
281unsigned long neigh_rand_reach_time(unsigned long base);
282
283void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
284		    struct sk_buff *skb);
285struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
286				   const void *key, struct net_device *dev,
287				   int creat);
288struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
289				     const void *key, struct net_device *dev);
290int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
291		  struct net_device *dev);
292
293static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
294{
295	return read_pnet(&pneigh->net);
296}
297
298void neigh_app_ns(struct neighbour *n);
299void neigh_for_each(struct neigh_table *tbl,
300		    void (*cb)(struct neighbour *, void *), void *cookie);
301void __neigh_for_each_release(struct neigh_table *tbl,
302			      int (*cb)(struct neighbour *));
303void pneigh_for_each(struct neigh_table *tbl,
304		     void (*cb)(struct pneigh_entry *));
305
306struct neigh_seq_state {
307	struct seq_net_private p;
308	struct neigh_table *tbl;
309	struct neigh_hash_table *nht;
310	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
311				struct neighbour *n, loff_t *pos);
312	unsigned int bucket;
313	unsigned int flags;
314#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
315#define NEIGH_SEQ_IS_PNEIGH	0x00000002
316#define NEIGH_SEQ_SKIP_NOARP	0x00000004
317};
318void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
319		      unsigned int);
320void *neigh_seq_next(struct seq_file *, void *, loff_t *);
321void neigh_seq_stop(struct seq_file *, void *);
322
323int neigh_proc_dointvec(struct ctl_table *ctl, int write,
324			void __user *buffer, size_t *lenp, loff_t *ppos);
325int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
326				void __user *buffer,
327				size_t *lenp, loff_t *ppos);
328int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
329				   void __user *buffer,
330				   size_t *lenp, loff_t *ppos);
331
332int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
333			  proc_handler *proc_handler);
334void neigh_sysctl_unregister(struct neigh_parms *p);
335
336static inline void __neigh_parms_put(struct neigh_parms *parms)
337{
338	atomic_dec(&parms->refcnt);
339}
340
341static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
342{
343	atomic_inc(&parms->refcnt);
344	return parms;
345}
346
347/*
348 *	Neighbour references
349 */
350
351static inline void neigh_release(struct neighbour *neigh)
352{
353	if (atomic_dec_and_test(&neigh->refcnt))
354		neigh_destroy(neigh);
355}
356
357static inline struct neighbour * neigh_clone(struct neighbour *neigh)
358{
359	if (neigh)
360		atomic_inc(&neigh->refcnt);
361	return neigh;
362}
363
364#define neigh_hold(n)	atomic_inc(&(n)->refcnt)
365
366static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
 
367{
368	unsigned long now = jiffies;
369	
370	if (neigh->used != now)
371		neigh->used = now;
372	if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
373		return __neigh_event_send(neigh, skb);
374	return 0;
375}
376
377#ifdef CONFIG_BRIDGE_NETFILTER
 
 
 
 
 
378static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
379{
380	unsigned int seq, hh_alen;
381
382	do {
383		seq = read_seqbegin(&hh->hh_lock);
384		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
385		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
386	} while (read_seqretry(&hh->hh_lock, seq));
387	return 0;
388}
389#endif
390
391static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
392{
 
393	unsigned int seq;
394	int hh_len;
395
396	do {
397		seq = read_seqbegin(&hh->hh_lock);
398		hh_len = hh->hh_len;
399		if (likely(hh_len <= HH_DATA_MOD)) {
400			/* this is inlined by gcc */
401			memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
 
 
 
 
 
 
 
 
 
402		} else {
403			int hh_alen = HH_DATA_ALIGN(hh_len);
404
405			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
 
 
 
406		}
407	} while (read_seqretry(&hh->hh_lock, seq));
408
409	skb_push(skb, hh_len);
 
 
 
 
 
410	return dev_queue_xmit(skb);
411}
412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413static inline struct neighbour *
414__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
415{
416	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
417
418	if (n || !creat)
419		return n;
420
421	n = neigh_create(tbl, pkey, dev);
422	return IS_ERR(n) ? NULL : n;
423}
424
425static inline struct neighbour *
426__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
427  struct net_device *dev)
428{
429	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
430
431	if (n)
432		return n;
433
434	return neigh_create(tbl, pkey, dev);
435}
436
437struct neighbour_cb {
438	unsigned long sched_next;
439	unsigned int flags;
440};
441
442#define LOCALLY_ENQUEUED 0x1
443
444#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
445
446static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
447				     const struct net_device *dev)
448{
449	unsigned int seq;
450
451	do {
452		seq = read_seqbegin(&n->ha_lock);
453		memcpy(dst, n->ha, dev->addr_len);
454	} while (read_seqretry(&n->ha_lock, seq));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455}
456#endif
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _NET_NEIGHBOUR_H
  3#define _NET_NEIGHBOUR_H
  4
  5#include <linux/neighbour.h>
  6
  7/*
  8 *	Generic neighbour manipulation
  9 *
 10 *	Authors:
 11 *	Pedro Roque		<roque@di.fc.ul.pt>
 12 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
 13 *
 14 * 	Changes:
 15 *
 16 *	Harald Welte:		<laforge@gnumonks.org>
 17 *		- Add neighbour cache statistics like rtstat
 18 */
 19
 20#include <linux/atomic.h>
 21#include <linux/refcount.h>
 22#include <linux/netdevice.h>
 23#include <linux/skbuff.h>
 24#include <linux/rcupdate.h>
 25#include <linux/seq_file.h>
 26#include <linux/bitmap.h>
 27
 28#include <linux/err.h>
 29#include <linux/sysctl.h>
 30#include <linux/workqueue.h>
 31#include <net/rtnetlink.h>
 32
 33/*
 34 * NUD stands for "neighbor unreachability detection"
 35 */
 36
 37#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 38#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 39#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 40
 41struct neighbour;
 42
 43enum {
 44	NEIGH_VAR_MCAST_PROBES,
 45	NEIGH_VAR_UCAST_PROBES,
 46	NEIGH_VAR_APP_PROBES,
 47	NEIGH_VAR_MCAST_REPROBES,
 48	NEIGH_VAR_RETRANS_TIME,
 49	NEIGH_VAR_BASE_REACHABLE_TIME,
 50	NEIGH_VAR_DELAY_PROBE_TIME,
 51	NEIGH_VAR_INTERVAL_PROBE_TIME_MS,
 52	NEIGH_VAR_GC_STALETIME,
 53	NEIGH_VAR_QUEUE_LEN_BYTES,
 54	NEIGH_VAR_PROXY_QLEN,
 55	NEIGH_VAR_ANYCAST_DELAY,
 56	NEIGH_VAR_PROXY_DELAY,
 57	NEIGH_VAR_LOCKTIME,
 58#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
 59	/* Following are used as a second way to access one of the above */
 60	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
 61	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
 62	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
 63	/* Following are used by "default" only */
 64	NEIGH_VAR_GC_INTERVAL,
 65	NEIGH_VAR_GC_THRESH1,
 66	NEIGH_VAR_GC_THRESH2,
 67	NEIGH_VAR_GC_THRESH3,
 68	NEIGH_VAR_MAX
 69};
 70
 71struct neigh_parms {
 72	possible_net_t net;
 
 
 73	struct net_device *dev;
 74	netdevice_tracker dev_tracker;
 75	struct list_head list;
 76	int	(*neigh_setup)(struct neighbour *);
 
 77	struct neigh_table *tbl;
 78
 79	void	*sysctl_table;
 80
 81	int dead;
 82	refcount_t refcnt;
 83	struct rcu_head rcu_head;
 84
 85	int	reachable_time;
 86	u32	qlen;
 87	int	data[NEIGH_VAR_DATA_MAX];
 88	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 89};
 90
 91static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
 92{
 93	set_bit(index, p->data_state);
 94	p->data[index] = val;
 95}
 96
 97#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
 98
 99/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
100 * In other cases, NEIGH_VAR_SET should be used.
101 */
102#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
103#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
104
105static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
106{
107	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
108}
109
110static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
111{
112	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
113}
114
115struct neigh_statistics {
116	unsigned long allocs;		/* number of allocated neighs */
117	unsigned long destroys;		/* number of destroyed neighs */
118	unsigned long hash_grows;	/* number of hash resizes */
119
120	unsigned long res_failed;	/* number of failed resolutions */
121
122	unsigned long lookups;		/* number of lookups */
123	unsigned long hits;		/* number of hits (among lookups) */
124
125	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
126	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
127
128	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
129	unsigned long forced_gc_runs;	/* number of forced GC runs */
130
131	unsigned long unres_discards;	/* number of unresolved drops */
132	unsigned long table_fulls;      /* times even gc couldn't help */
133};
134
135#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
136
137struct neighbour {
138	struct neighbour __rcu	*next;
139	struct neigh_table	*tbl;
140	struct neigh_parms	*parms;
141	unsigned long		confirmed;
142	unsigned long		updated;
143	rwlock_t		lock;
144	refcount_t		refcnt;
 
145	unsigned int		arp_queue_len_bytes;
146	struct sk_buff_head	arp_queue;
147	struct timer_list	timer;
148	unsigned long		used;
149	atomic_t		probes;
150	u8			nud_state;
151	u8			type;
152	u8			dead;
153	u8			protocol;
154	u32			flags;
155	seqlock_t		ha_lock;
156	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
157	struct hh_cache		hh;
158	int			(*output)(struct neighbour *, struct sk_buff *);
159	const struct neigh_ops	*ops;
160	struct list_head	gc_list;
161	struct list_head	managed_list;
162	struct rcu_head		rcu;
163	struct net_device	*dev;
164	netdevice_tracker	dev_tracker;
165	u8			primary_key[];
166} __randomize_layout;
167
168struct neigh_ops {
169	int			family;
170	void			(*solicit)(struct neighbour *, struct sk_buff *);
171	void			(*error_report)(struct neighbour *, struct sk_buff *);
172	int			(*output)(struct neighbour *, struct sk_buff *);
173	int			(*connected_output)(struct neighbour *, struct sk_buff *);
174};
175
176struct pneigh_entry {
177	struct pneigh_entry	*next;
178	possible_net_t		net;
 
 
179	struct net_device	*dev;
180	netdevice_tracker	dev_tracker;
181	u32			flags;
182	u8			protocol;
183	u32			key[];
184};
185
186/*
187 *	neighbour table manipulation
188 */
189
190#define NEIGH_NUM_HASH_RND	4
191
192struct neigh_hash_table {
193	struct neighbour __rcu	**hash_buckets;
194	unsigned int		hash_shift;
195	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
196	struct rcu_head		rcu;
197};
198
199
200struct neigh_table {
 
201	int			family;
202	unsigned int		entry_size;
203	unsigned int		key_len;
204	__be16			protocol;
205	__u32			(*hash)(const void *pkey,
206					const struct net_device *dev,
207					__u32 *hash_rnd);
208	bool			(*key_eq)(const struct neighbour *, const void *pkey);
209	int			(*constructor)(struct neighbour *);
210	int			(*pconstructor)(struct pneigh_entry *);
211	void			(*pdestructor)(struct pneigh_entry *);
212	void			(*proxy_redo)(struct sk_buff *skb);
213	int			(*is_multicast)(const void *pkey);
214	bool			(*allow_add)(const struct net_device *dev,
215					     struct netlink_ext_ack *extack);
216	char			*id;
217	struct neigh_parms	parms;
218	struct list_head	parms_list;
219	int			gc_interval;
220	int			gc_thresh1;
221	int			gc_thresh2;
222	int			gc_thresh3;
223	unsigned long		last_flush;
224	struct delayed_work	gc_work;
225	struct delayed_work	managed_work;
226	struct timer_list 	proxy_timer;
227	struct sk_buff_head	proxy_queue;
228	atomic_t		entries;
229	atomic_t		gc_entries;
230	struct list_head	gc_list;
231	struct list_head	managed_list;
232	rwlock_t		lock;
233	unsigned long		last_rand;
234	struct neigh_statistics	__percpu *stats;
235	struct neigh_hash_table __rcu *nht;
236	struct pneigh_entry	**phash_buckets;
237};
238
239enum {
240	NEIGH_ARP_TABLE = 0,
241	NEIGH_ND_TABLE = 1,
242	NEIGH_DN_TABLE = 2,
243	NEIGH_NR_TABLES,
244	NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
245};
246
247static inline int neigh_parms_family(struct neigh_parms *p)
248{
249	return p->tbl->family;
250}
251
252#define NEIGH_PRIV_ALIGN	sizeof(long long)
253#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
254
255static inline void *neighbour_priv(const struct neighbour *n)
256{
257	return (char *)n + n->tbl->entry_size;
258}
259
260/* flags for neigh_update() */
261#define NEIGH_UPDATE_F_OVERRIDE			BIT(0)
262#define NEIGH_UPDATE_F_WEAK_OVERRIDE		BIT(1)
263#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	BIT(2)
264#define NEIGH_UPDATE_F_USE			BIT(3)
265#define NEIGH_UPDATE_F_MANAGED			BIT(4)
266#define NEIGH_UPDATE_F_EXT_LEARNED		BIT(5)
267#define NEIGH_UPDATE_F_ISROUTER			BIT(6)
268#define NEIGH_UPDATE_F_ADMIN			BIT(7)
269
270/* In-kernel representation for NDA_FLAGS_EXT flags: */
271#define NTF_OLD_MASK		0xff
272#define NTF_EXT_SHIFT		8
273#define NTF_EXT_MASK		(NTF_EXT_MANAGED)
274
275#define NTF_MANAGED		(NTF_EXT_MANAGED << NTF_EXT_SHIFT)
276
277extern const struct nla_policy nda_policy[];
278
279static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
280{
281	return *(const u32 *)n->primary_key == *(const u32 *)pkey;
282}
283
284static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
285{
286	const u32 *n32 = (const u32 *)n->primary_key;
287	const u32 *p32 = pkey;
288
289	return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
290		(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
291}
292
293static inline struct neighbour *___neigh_lookup_noref(
294	struct neigh_table *tbl,
295	bool (*key_eq)(const struct neighbour *n, const void *pkey),
296	__u32 (*hash)(const void *pkey,
297		      const struct net_device *dev,
298		      __u32 *hash_rnd),
299	const void *pkey,
300	struct net_device *dev)
301{
302	struct neigh_hash_table *nht = rcu_dereference(tbl->nht);
303	struct neighbour *n;
304	u32 hash_val;
305
306	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
307	for (n = rcu_dereference(nht->hash_buckets[hash_val]);
308	     n != NULL;
309	     n = rcu_dereference(n->next)) {
310		if (n->dev == dev && key_eq(n, pkey))
311			return n;
312	}
313
314	return NULL;
315}
316
317static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
318						     const void *pkey,
319						     struct net_device *dev)
320{
321	return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
322}
323
324static inline void neigh_confirm(struct neighbour *n)
325{
326	if (n) {
327		unsigned long now = jiffies;
328
329		/* avoid dirtying neighbour */
330		if (READ_ONCE(n->confirmed) != now)
331			WRITE_ONCE(n->confirmed, now);
332	}
333}
334
335void neigh_table_init(int index, struct neigh_table *tbl);
336int neigh_table_clear(int index, struct neigh_table *tbl);
337struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
338			       struct net_device *dev);
 
 
339struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
340				 struct net_device *dev, bool want_ref);
341static inline struct neighbour *neigh_create(struct neigh_table *tbl,
342					     const void *pkey,
343					     struct net_device *dev)
344{
345	return __neigh_create(tbl, pkey, dev, true);
346}
347void neigh_destroy(struct neighbour *neigh);
348int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
349		       const bool immediate_ok);
350int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
351		 u32 nlmsg_pid);
352void __neigh_set_probe_once(struct neighbour *neigh);
353bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
354void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
355int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
356int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
357int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
358int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
 
359int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
360struct neighbour *neigh_event_ns(struct neigh_table *tbl,
361						u8 *lladdr, void *saddr,
362						struct net_device *dev);
363
364struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
365				      struct neigh_table *tbl);
366void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
367
368static inline
369struct net *neigh_parms_net(const struct neigh_parms *parms)
370{
371	return read_pnet(&parms->net);
372}
373
374unsigned long neigh_rand_reach_time(unsigned long base);
375
376void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
377		    struct sk_buff *skb);
378struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
379				   const void *key, struct net_device *dev,
380				   int creat);
381struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
382				     const void *key, struct net_device *dev);
383int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
384		  struct net_device *dev);
385
386static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
387{
388	return read_pnet(&pneigh->net);
389}
390
391void neigh_app_ns(struct neighbour *n);
392void neigh_for_each(struct neigh_table *tbl,
393		    void (*cb)(struct neighbour *, void *), void *cookie);
394void __neigh_for_each_release(struct neigh_table *tbl,
395			      int (*cb)(struct neighbour *));
396int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
 
397
398struct neigh_seq_state {
399	struct seq_net_private p;
400	struct neigh_table *tbl;
401	struct neigh_hash_table *nht;
402	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
403				struct neighbour *n, loff_t *pos);
404	unsigned int bucket;
405	unsigned int flags;
406#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
407#define NEIGH_SEQ_IS_PNEIGH	0x00000002
408#define NEIGH_SEQ_SKIP_NOARP	0x00000004
409};
410void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
411		      unsigned int);
412void *neigh_seq_next(struct seq_file *, void *, loff_t *);
413void neigh_seq_stop(struct seq_file *, void *);
414
415int neigh_proc_dointvec(struct ctl_table *ctl, int write,
416			void *buffer, size_t *lenp, loff_t *ppos);
417int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
418				void *buffer,
419				size_t *lenp, loff_t *ppos);
420int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
421				   void *buffer, size_t *lenp, loff_t *ppos);
 
422
423int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
424			  proc_handler *proc_handler);
425void neigh_sysctl_unregister(struct neigh_parms *p);
426
427static inline void __neigh_parms_put(struct neigh_parms *parms)
428{
429	refcount_dec(&parms->refcnt);
430}
431
432static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
433{
434	refcount_inc(&parms->refcnt);
435	return parms;
436}
437
438/*
439 *	Neighbour references
440 */
441
442static inline void neigh_release(struct neighbour *neigh)
443{
444	if (refcount_dec_and_test(&neigh->refcnt))
445		neigh_destroy(neigh);
446}
447
448static inline struct neighbour * neigh_clone(struct neighbour *neigh)
449{
450	if (neigh)
451		refcount_inc(&neigh->refcnt);
452	return neigh;
453}
454
455#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
456
457static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
458						  struct sk_buff *skb,
459						  const bool immediate_ok)
460{
461	unsigned long now = jiffies;
462
463	if (READ_ONCE(neigh->used) != now)
464		WRITE_ONCE(neigh->used, now);
465	if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
466		return __neigh_event_send(neigh, skb, immediate_ok);
467	return 0;
468}
469
470static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
471{
472	return neigh_event_send_probe(neigh, skb, true);
473}
474
475#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
476static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
477{
478	unsigned int seq, hh_alen;
479
480	do {
481		seq = read_seqbegin(&hh->hh_lock);
482		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
483		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
484	} while (read_seqretry(&hh->hh_lock, seq));
485	return 0;
486}
487#endif
488
489static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
490{
491	unsigned int hh_alen = 0;
492	unsigned int seq;
493	unsigned int hh_len;
494
495	do {
496		seq = read_seqbegin(&hh->hh_lock);
497		hh_len = READ_ONCE(hh->hh_len);
498		if (likely(hh_len <= HH_DATA_MOD)) {
499			hh_alen = HH_DATA_MOD;
500
501			/* skb_push() would proceed silently if we have room for
502			 * the unaligned size but not for the aligned size:
503			 * check headroom explicitly.
504			 */
505			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
506				/* this is inlined by gcc */
507				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
508				       HH_DATA_MOD);
509			}
510		} else {
511			hh_alen = HH_DATA_ALIGN(hh_len);
512
513			if (likely(skb_headroom(skb) >= hh_alen)) {
514				memcpy(skb->data - hh_alen, hh->hh_data,
515				       hh_alen);
516			}
517		}
518	} while (read_seqretry(&hh->hh_lock, seq));
519
520	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
521		kfree_skb(skb);
522		return NET_XMIT_DROP;
523	}
524
525	__skb_push(skb, hh_len);
526	return dev_queue_xmit(skb);
527}
528
529static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
530			       bool skip_cache)
531{
532	const struct hh_cache *hh = &n->hh;
533
534	/* n->nud_state and hh->hh_len could be changed under us.
535	 * neigh_hh_output() is taking care of the race later.
536	 */
537	if (!skip_cache &&
538	    (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
539	    READ_ONCE(hh->hh_len))
540		return neigh_hh_output(hh, skb);
541
542	return READ_ONCE(n->output)(n, skb);
543}
544
545static inline struct neighbour *
546__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
547{
548	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
549
550	if (n || !creat)
551		return n;
552
553	n = neigh_create(tbl, pkey, dev);
554	return IS_ERR(n) ? NULL : n;
555}
556
557static inline struct neighbour *
558__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
559  struct net_device *dev)
560{
561	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
562
563	if (n)
564		return n;
565
566	return neigh_create(tbl, pkey, dev);
567}
568
569struct neighbour_cb {
570	unsigned long sched_next;
571	unsigned int flags;
572};
573
574#define LOCALLY_ENQUEUED 0x1
575
576#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
577
578static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
579				     const struct net_device *dev)
580{
581	unsigned int seq;
582
583	do {
584		seq = read_seqbegin(&n->ha_lock);
585		memcpy(dst, n->ha, dev->addr_len);
586	} while (read_seqretry(&n->ha_lock, seq));
587}
588
589static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
590					  int *notify)
591{
592	u8 ndm_flags = 0;
593
594	ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
595	if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
596		if (ndm_flags & NTF_ROUTER)
597			neigh->flags |= NTF_ROUTER;
598		else
599			neigh->flags &= ~NTF_ROUTER;
600		*notify = 1;
601	}
602}
603#endif