Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _NET_NEIGHBOUR_H
  3#define _NET_NEIGHBOUR_H
  4
  5#include <linux/neighbour.h>
  6
  7/*
  8 *	Generic neighbour manipulation
  9 *
 10 *	Authors:
 11 *	Pedro Roque		<roque@di.fc.ul.pt>
 12 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
 13 *
 14 * 	Changes:
 15 *
 16 *	Harald Welte:		<laforge@gnumonks.org>
 17 *		- Add neighbour cache statistics like rtstat
 18 */
 19
 20#include <linux/atomic.h>
 21#include <linux/refcount.h>
 22#include <linux/netdevice.h>
 23#include <linux/skbuff.h>
 24#include <linux/rcupdate.h>
 25#include <linux/seq_file.h>
 26#include <linux/bitmap.h>
 27
 28#include <linux/err.h>
 29#include <linux/sysctl.h>
 30#include <linux/workqueue.h>
 31#include <net/rtnetlink.h>
 32#include <net/neighbour_tables.h>
 33
 34/*
 35 * NUD stands for "neighbor unreachability detection"
 36 */
 37
 38#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 39#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 40#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 41
 42struct neighbour;
 43
 44enum {
 45	NEIGH_VAR_MCAST_PROBES,
 46	NEIGH_VAR_UCAST_PROBES,
 47	NEIGH_VAR_APP_PROBES,
 48	NEIGH_VAR_MCAST_REPROBES,
 49	NEIGH_VAR_RETRANS_TIME,
 50	NEIGH_VAR_BASE_REACHABLE_TIME,
 51	NEIGH_VAR_DELAY_PROBE_TIME,
 52	NEIGH_VAR_INTERVAL_PROBE_TIME_MS,
 53	NEIGH_VAR_GC_STALETIME,
 54	NEIGH_VAR_QUEUE_LEN_BYTES,
 55	NEIGH_VAR_PROXY_QLEN,
 56	NEIGH_VAR_ANYCAST_DELAY,
 57	NEIGH_VAR_PROXY_DELAY,
 58	NEIGH_VAR_LOCKTIME,
 59#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
 60	/* Following are used as a second way to access one of the above */
 61	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
 62	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
 63	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
 64	/* Following are used by "default" only */
 65	NEIGH_VAR_GC_INTERVAL,
 66	NEIGH_VAR_GC_THRESH1,
 67	NEIGH_VAR_GC_THRESH2,
 68	NEIGH_VAR_GC_THRESH3,
 69	NEIGH_VAR_MAX
 70};
 71
 72struct neigh_parms {
 73	possible_net_t net;
 74	struct net_device *dev;
 75	netdevice_tracker dev_tracker;
 76	struct list_head list;
 77	int	(*neigh_setup)(struct neighbour *);
 
 78	struct neigh_table *tbl;
 79
 80	void	*sysctl_table;
 81
 82	int dead;
 83	refcount_t refcnt;
 84	struct rcu_head rcu_head;
 85
 86	int	reachable_time;
 87	u32	qlen;
 88	int	data[NEIGH_VAR_DATA_MAX];
 89	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 90};
 91
 92static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
 93{
 94	set_bit(index, p->data_state);
 95	p->data[index] = val;
 96}
 97
 98#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
 99
100/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
101 * In other cases, NEIGH_VAR_SET should be used.
102 */
103#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
104#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
105
106static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
107{
108	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
109}
110
111static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
112{
113	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
114}
115
116struct neigh_statistics {
117	unsigned long allocs;		/* number of allocated neighs */
118	unsigned long destroys;		/* number of destroyed neighs */
119	unsigned long hash_grows;	/* number of hash resizes */
120
121	unsigned long res_failed;	/* number of failed resolutions */
122
123	unsigned long lookups;		/* number of lookups */
124	unsigned long hits;		/* number of hits (among lookups) */
125
126	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
127	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
128
129	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
130	unsigned long forced_gc_runs;	/* number of forced GC runs */
131
132	unsigned long unres_discards;	/* number of unresolved drops */
133	unsigned long table_fulls;      /* times even gc couldn't help */
134};
135
136#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
137
138struct neighbour {
139	struct hlist_node	hash;
140	struct hlist_node	dev_list;
141	struct neigh_table	*tbl;
142	struct neigh_parms	*parms;
143	unsigned long		confirmed;
144	unsigned long		updated;
145	rwlock_t		lock;
146	refcount_t		refcnt;
147	unsigned int		arp_queue_len_bytes;
148	struct sk_buff_head	arp_queue;
 
149	struct timer_list	timer;
150	unsigned long		used;
151	atomic_t		probes;
152	u8			nud_state;
153	u8			type;
154	u8			dead;
155	u8			protocol;
156	u32			flags;
157	seqlock_t		ha_lock;
158	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
159	struct hh_cache		hh;
160	int			(*output)(struct neighbour *, struct sk_buff *);
161	const struct neigh_ops	*ops;
162	struct list_head	gc_list;
163	struct list_head	managed_list;
164	struct rcu_head		rcu;
165	struct net_device	*dev;
166	netdevice_tracker	dev_tracker;
167	u8			primary_key[];
168} __randomize_layout;
169
170struct neigh_ops {
171	int			family;
172	void			(*solicit)(struct neighbour *, struct sk_buff *);
173	void			(*error_report)(struct neighbour *, struct sk_buff *);
174	int			(*output)(struct neighbour *, struct sk_buff *);
175	int			(*connected_output)(struct neighbour *, struct sk_buff *);
176};
177
178struct pneigh_entry {
179	struct pneigh_entry	*next;
180	possible_net_t		net;
181	struct net_device	*dev;
182	netdevice_tracker	dev_tracker;
183	u32			flags;
184	u8			protocol;
185	u32			key[];
186};
187
188/*
189 *	neighbour table manipulation
190 */
191
192#define NEIGH_NUM_HASH_RND	4
193
194struct neigh_hash_table {
195	struct hlist_head	*hash_heads;
196	unsigned int		hash_shift;
197	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
198	struct rcu_head		rcu;
199};
200
201
202struct neigh_table {
203	int			family;
204	unsigned int		entry_size;
205	unsigned int		key_len;
206	__be16			protocol;
207	__u32			(*hash)(const void *pkey,
208					const struct net_device *dev,
209					__u32 *hash_rnd);
210	bool			(*key_eq)(const struct neighbour *, const void *pkey);
211	int			(*constructor)(struct neighbour *);
212	int			(*pconstructor)(struct pneigh_entry *);
213	void			(*pdestructor)(struct pneigh_entry *);
214	void			(*proxy_redo)(struct sk_buff *skb);
215	int			(*is_multicast)(const void *pkey);
216	bool			(*allow_add)(const struct net_device *dev,
217					     struct netlink_ext_ack *extack);
218	char			*id;
219	struct neigh_parms	parms;
220	struct list_head	parms_list;
221	int			gc_interval;
222	int			gc_thresh1;
223	int			gc_thresh2;
224	int			gc_thresh3;
225	unsigned long		last_flush;
226	struct delayed_work	gc_work;
227	struct delayed_work	managed_work;
228	struct timer_list 	proxy_timer;
229	struct sk_buff_head	proxy_queue;
230	atomic_t		entries;
231	atomic_t		gc_entries;
232	struct list_head	gc_list;
233	struct list_head	managed_list;
234	rwlock_t		lock;
235	unsigned long		last_rand;
236	struct neigh_statistics	__percpu *stats;
237	struct neigh_hash_table __rcu *nht;
238	struct pneigh_entry	**phash_buckets;
239};
240
 
 
 
 
 
 
 
 
241static inline int neigh_parms_family(struct neigh_parms *p)
242{
243	return p->tbl->family;
244}
245
246#define NEIGH_PRIV_ALIGN	sizeof(long long)
247#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
248
249static inline void *neighbour_priv(const struct neighbour *n)
250{
251	return (char *)n + n->tbl->entry_size;
252}
253
254/* flags for neigh_update() */
255#define NEIGH_UPDATE_F_OVERRIDE			BIT(0)
256#define NEIGH_UPDATE_F_WEAK_OVERRIDE		BIT(1)
257#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	BIT(2)
258#define NEIGH_UPDATE_F_USE			BIT(3)
259#define NEIGH_UPDATE_F_MANAGED			BIT(4)
260#define NEIGH_UPDATE_F_EXT_LEARNED		BIT(5)
261#define NEIGH_UPDATE_F_ISROUTER			BIT(6)
262#define NEIGH_UPDATE_F_ADMIN			BIT(7)
263
264/* In-kernel representation for NDA_FLAGS_EXT flags: */
265#define NTF_OLD_MASK		0xff
266#define NTF_EXT_SHIFT		8
267#define NTF_EXT_MASK		(NTF_EXT_MANAGED)
268
269#define NTF_MANAGED		(NTF_EXT_MANAGED << NTF_EXT_SHIFT)
270
271extern const struct nla_policy nda_policy[];
272
273#define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash)
274#define neigh_for_each_in_bucket_rcu(pos, head) \
275	hlist_for_each_entry_rcu(pos, head, hash)
276#define neigh_for_each_in_bucket_safe(pos, tmp, head) \
277	hlist_for_each_entry_safe(pos, tmp, head, hash)
278
279static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
280{
281	return *(const u32 *)n->primary_key == *(const u32 *)pkey;
282}
283
284static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
285{
286	const u32 *n32 = (const u32 *)n->primary_key;
287	const u32 *p32 = pkey;
288
289	return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
290		(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
291}
292
293static inline struct neighbour *___neigh_lookup_noref(
294	struct neigh_table *tbl,
295	bool (*key_eq)(const struct neighbour *n, const void *pkey),
296	__u32 (*hash)(const void *pkey,
297		      const struct net_device *dev,
298		      __u32 *hash_rnd),
299	const void *pkey,
300	struct net_device *dev)
301{
302	struct neigh_hash_table *nht = rcu_dereference(tbl->nht);
303	struct neighbour *n;
304	u32 hash_val;
305
306	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
307	neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val])
 
 
308		if (n->dev == dev && key_eq(n, pkey))
309			return n;
 
310
311	return NULL;
312}
313
314static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
315						     const void *pkey,
316						     struct net_device *dev)
317{
318	return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
319}
320
321static inline void neigh_confirm(struct neighbour *n)
322{
323	if (n) {
324		unsigned long now = jiffies;
325
326		/* avoid dirtying neighbour */
327		if (READ_ONCE(n->confirmed) != now)
328			WRITE_ONCE(n->confirmed, now);
329	}
330}
331
332void neigh_table_init(int index, struct neigh_table *tbl);
333int neigh_table_clear(int index, struct neigh_table *tbl);
334struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
335			       struct net_device *dev);
 
 
336struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
337				 struct net_device *dev, bool want_ref);
338static inline struct neighbour *neigh_create(struct neigh_table *tbl,
339					     const void *pkey,
340					     struct net_device *dev)
341{
342	return __neigh_create(tbl, pkey, dev, true);
343}
344void neigh_destroy(struct neighbour *neigh);
345int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
346		       const bool immediate_ok);
347int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
348		 u32 nlmsg_pid);
349void __neigh_set_probe_once(struct neighbour *neigh);
350bool neigh_remove_one(struct neighbour *ndel);
351void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
352int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
353int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
354int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
355int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
356int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
357struct neighbour *neigh_event_ns(struct neigh_table *tbl,
358						u8 *lladdr, void *saddr,
359						struct net_device *dev);
360
361struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
362				      struct neigh_table *tbl);
363void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
364
365static inline
366struct net *neigh_parms_net(const struct neigh_parms *parms)
367{
368	return read_pnet(&parms->net);
369}
370
371unsigned long neigh_rand_reach_time(unsigned long base);
372
373void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
374		    struct sk_buff *skb);
375struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
376				   const void *key, struct net_device *dev,
377				   int creat);
378struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
379				     const void *key, struct net_device *dev);
380int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
381		  struct net_device *dev);
382
383static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
384{
385	return read_pnet(&pneigh->net);
386}
387
388void neigh_app_ns(struct neighbour *n);
389void neigh_for_each(struct neigh_table *tbl,
390		    void (*cb)(struct neighbour *, void *), void *cookie);
391void __neigh_for_each_release(struct neigh_table *tbl,
392			      int (*cb)(struct neighbour *));
393int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
 
 
394
395struct neigh_seq_state {
396	struct seq_net_private p;
397	struct neigh_table *tbl;
398	struct neigh_hash_table *nht;
399	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
400				struct neighbour *n, loff_t *pos);
401	unsigned int bucket;
402	unsigned int flags;
403#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
404#define NEIGH_SEQ_IS_PNEIGH	0x00000002
405#define NEIGH_SEQ_SKIP_NOARP	0x00000004
406};
407void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
408		      unsigned int);
409void *neigh_seq_next(struct seq_file *, void *, loff_t *);
410void neigh_seq_stop(struct seq_file *, void *);
411
412int neigh_proc_dointvec(const struct ctl_table *ctl, int write,
413			void *buffer, size_t *lenp, loff_t *ppos);
414int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write,
415				void *buffer,
416				size_t *lenp, loff_t *ppos);
417int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
418				   void *buffer, size_t *lenp, loff_t *ppos);
 
419
420int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
421			  proc_handler *proc_handler);
422void neigh_sysctl_unregister(struct neigh_parms *p);
423
424static inline void __neigh_parms_put(struct neigh_parms *parms)
425{
426	refcount_dec(&parms->refcnt);
427}
428
429static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
430{
431	refcount_inc(&parms->refcnt);
432	return parms;
433}
434
435/*
436 *	Neighbour references
437 */
438
439static inline void neigh_release(struct neighbour *neigh)
440{
441	if (refcount_dec_and_test(&neigh->refcnt))
442		neigh_destroy(neigh);
443}
444
445static inline struct neighbour * neigh_clone(struct neighbour *neigh)
446{
447	if (neigh)
448		refcount_inc(&neigh->refcnt);
449	return neigh;
450}
451
452#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
453
454static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
455						  struct sk_buff *skb,
456						  const bool immediate_ok)
457{
458	unsigned long now = jiffies;
459
460	if (READ_ONCE(neigh->used) != now)
461		WRITE_ONCE(neigh->used, now);
462	if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
463		return __neigh_event_send(neigh, skb, immediate_ok);
464	return 0;
465}
466
467static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
468{
469	return neigh_event_send_probe(neigh, skb, true);
470}
471
472#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
473static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
474{
475	unsigned int seq, hh_alen;
476
477	do {
478		seq = read_seqbegin(&hh->hh_lock);
479		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
480		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
481	} while (read_seqretry(&hh->hh_lock, seq));
482	return 0;
483}
484#endif
485
486static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
487{
488	unsigned int hh_alen = 0;
489	unsigned int seq;
490	unsigned int hh_len;
491
492	do {
493		seq = read_seqbegin(&hh->hh_lock);
494		hh_len = READ_ONCE(hh->hh_len);
495		if (likely(hh_len <= HH_DATA_MOD)) {
496			hh_alen = HH_DATA_MOD;
497
498			/* skb_push() would proceed silently if we have room for
499			 * the unaligned size but not for the aligned size:
500			 * check headroom explicitly.
501			 */
502			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
503				/* this is inlined by gcc */
504				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
505				       HH_DATA_MOD);
506			}
507		} else {
508			hh_alen = HH_DATA_ALIGN(hh_len);
509
510			if (likely(skb_headroom(skb) >= hh_alen)) {
511				memcpy(skb->data - hh_alen, hh->hh_data,
512				       hh_alen);
513			}
514		}
515	} while (read_seqretry(&hh->hh_lock, seq));
516
517	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
518		kfree_skb(skb);
519		return NET_XMIT_DROP;
520	}
521
522	__skb_push(skb, hh_len);
523	return dev_queue_xmit(skb);
524}
525
526static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
527			       bool skip_cache)
528{
529	const struct hh_cache *hh = &n->hh;
530
531	/* n->nud_state and hh->hh_len could be changed under us.
532	 * neigh_hh_output() is taking care of the race later.
533	 */
534	if (!skip_cache &&
535	    (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
536	    READ_ONCE(hh->hh_len))
537		return neigh_hh_output(hh, skb);
538
539	return READ_ONCE(n->output)(n, skb);
540}
541
542static inline struct neighbour *
543__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
544{
545	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
546
547	if (n || !creat)
548		return n;
549
550	n = neigh_create(tbl, pkey, dev);
551	return IS_ERR(n) ? NULL : n;
552}
553
554static inline struct neighbour *
555__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
556  struct net_device *dev)
557{
558	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
559
560	if (n)
561		return n;
562
563	return neigh_create(tbl, pkey, dev);
564}
565
566struct neighbour_cb {
567	unsigned long sched_next;
568	unsigned int flags;
569};
570
571#define LOCALLY_ENQUEUED 0x1
572
573#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
574
575static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
576				     const struct net_device *dev)
577{
578	unsigned int seq;
579
580	do {
581		seq = read_seqbegin(&n->ha_lock);
582		memcpy(dst, n->ha, dev->addr_len);
583	} while (read_seqretry(&n->ha_lock, seq));
584}
585
586static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
587					  int *notify)
588{
589	u8 ndm_flags = 0;
590
591	ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
592	if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
593		if (ndm_flags & NTF_ROUTER)
594			neigh->flags |= NTF_ROUTER;
595		else
596			neigh->flags &= ~NTF_ROUTER;
597		*notify = 1;
598	}
599}
600#endif
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _NET_NEIGHBOUR_H
  3#define _NET_NEIGHBOUR_H
  4
  5#include <linux/neighbour.h>
  6
  7/*
  8 *	Generic neighbour manipulation
  9 *
 10 *	Authors:
 11 *	Pedro Roque		<roque@di.fc.ul.pt>
 12 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
 13 *
 14 * 	Changes:
 15 *
 16 *	Harald Welte:		<laforge@gnumonks.org>
 17 *		- Add neighbour cache statistics like rtstat
 18 */
 19
 20#include <linux/atomic.h>
 21#include <linux/refcount.h>
 22#include <linux/netdevice.h>
 23#include <linux/skbuff.h>
 24#include <linux/rcupdate.h>
 25#include <linux/seq_file.h>
 26#include <linux/bitmap.h>
 27
 28#include <linux/err.h>
 29#include <linux/sysctl.h>
 30#include <linux/workqueue.h>
 31#include <net/rtnetlink.h>
 
 32
 33/*
 34 * NUD stands for "neighbor unreachability detection"
 35 */
 36
 37#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 38#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 39#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 40
 41struct neighbour;
 42
 43enum {
 44	NEIGH_VAR_MCAST_PROBES,
 45	NEIGH_VAR_UCAST_PROBES,
 46	NEIGH_VAR_APP_PROBES,
 47	NEIGH_VAR_MCAST_REPROBES,
 48	NEIGH_VAR_RETRANS_TIME,
 49	NEIGH_VAR_BASE_REACHABLE_TIME,
 50	NEIGH_VAR_DELAY_PROBE_TIME,
 
 51	NEIGH_VAR_GC_STALETIME,
 52	NEIGH_VAR_QUEUE_LEN_BYTES,
 53	NEIGH_VAR_PROXY_QLEN,
 54	NEIGH_VAR_ANYCAST_DELAY,
 55	NEIGH_VAR_PROXY_DELAY,
 56	NEIGH_VAR_LOCKTIME,
 57#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
 58	/* Following are used as a second way to access one of the above */
 59	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
 60	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
 61	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
 62	/* Following are used by "default" only */
 63	NEIGH_VAR_GC_INTERVAL,
 64	NEIGH_VAR_GC_THRESH1,
 65	NEIGH_VAR_GC_THRESH2,
 66	NEIGH_VAR_GC_THRESH3,
 67	NEIGH_VAR_MAX
 68};
 69
 70struct neigh_parms {
 71	possible_net_t net;
 72	struct net_device *dev;
 
 73	struct list_head list;
 74	int	(*neigh_setup)(struct neighbour *);
 75	void	(*neigh_cleanup)(struct neighbour *);
 76	struct neigh_table *tbl;
 77
 78	void	*sysctl_table;
 79
 80	int dead;
 81	refcount_t refcnt;
 82	struct rcu_head rcu_head;
 83
 84	int	reachable_time;
 
 85	int	data[NEIGH_VAR_DATA_MAX];
 86	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 87};
 88
 89static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
 90{
 91	set_bit(index, p->data_state);
 92	p->data[index] = val;
 93}
 94
 95#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
 96
 97/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
 98 * In other cases, NEIGH_VAR_SET should be used.
 99 */
100#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
101#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
102
103static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
104{
105	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
106}
107
108static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
109{
110	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
111}
112
113struct neigh_statistics {
114	unsigned long allocs;		/* number of allocated neighs */
115	unsigned long destroys;		/* number of destroyed neighs */
116	unsigned long hash_grows;	/* number of hash resizes */
117
118	unsigned long res_failed;	/* number of failed resolutions */
119
120	unsigned long lookups;		/* number of lookups */
121	unsigned long hits;		/* number of hits (among lookups) */
122
123	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
124	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
125
126	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
127	unsigned long forced_gc_runs;	/* number of forced GC runs */
128
129	unsigned long unres_discards;	/* number of unresolved drops */
130	unsigned long table_fulls;      /* times even gc couldn't help */
131};
132
133#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
134
135struct neighbour {
136	struct neighbour __rcu	*next;
 
137	struct neigh_table	*tbl;
138	struct neigh_parms	*parms;
139	unsigned long		confirmed;
140	unsigned long		updated;
141	rwlock_t		lock;
142	refcount_t		refcnt;
 
143	struct sk_buff_head	arp_queue;
144	unsigned int		arp_queue_len_bytes;
145	struct timer_list	timer;
146	unsigned long		used;
147	atomic_t		probes;
148	__u8			flags;
149	__u8			nud_state;
150	__u8			type;
151	__u8			dead;
 
152	seqlock_t		ha_lock;
153	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
154	struct hh_cache		hh;
155	int			(*output)(struct neighbour *, struct sk_buff *);
156	const struct neigh_ops	*ops;
 
 
157	struct rcu_head		rcu;
158	struct net_device	*dev;
159	u8			primary_key[0];
 
160} __randomize_layout;
161
162struct neigh_ops {
163	int			family;
164	void			(*solicit)(struct neighbour *, struct sk_buff *);
165	void			(*error_report)(struct neighbour *, struct sk_buff *);
166	int			(*output)(struct neighbour *, struct sk_buff *);
167	int			(*connected_output)(struct neighbour *, struct sk_buff *);
168};
169
170struct pneigh_entry {
171	struct pneigh_entry	*next;
172	possible_net_t		net;
173	struct net_device	*dev;
174	u8			flags;
175	u8			key[0];
 
 
176};
177
178/*
179 *	neighbour table manipulation
180 */
181
182#define NEIGH_NUM_HASH_RND	4
183
184struct neigh_hash_table {
185	struct neighbour __rcu	**hash_buckets;
186	unsigned int		hash_shift;
187	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
188	struct rcu_head		rcu;
189};
190
191
192struct neigh_table {
193	int			family;
194	unsigned int		entry_size;
195	unsigned int		key_len;
196	__be16			protocol;
197	__u32			(*hash)(const void *pkey,
198					const struct net_device *dev,
199					__u32 *hash_rnd);
200	bool			(*key_eq)(const struct neighbour *, const void *pkey);
201	int			(*constructor)(struct neighbour *);
202	int			(*pconstructor)(struct pneigh_entry *);
203	void			(*pdestructor)(struct pneigh_entry *);
204	void			(*proxy_redo)(struct sk_buff *skb);
 
 
 
205	char			*id;
206	struct neigh_parms	parms;
207	struct list_head	parms_list;
208	int			gc_interval;
209	int			gc_thresh1;
210	int			gc_thresh2;
211	int			gc_thresh3;
212	unsigned long		last_flush;
213	struct delayed_work	gc_work;
 
214	struct timer_list 	proxy_timer;
215	struct sk_buff_head	proxy_queue;
216	atomic_t		entries;
 
 
 
217	rwlock_t		lock;
218	unsigned long		last_rand;
219	struct neigh_statistics	__percpu *stats;
220	struct neigh_hash_table __rcu *nht;
221	struct pneigh_entry	**phash_buckets;
222};
223
224enum {
225	NEIGH_ARP_TABLE = 0,
226	NEIGH_ND_TABLE = 1,
227	NEIGH_DN_TABLE = 2,
228	NEIGH_NR_TABLES,
229	NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
230};
231
232static inline int neigh_parms_family(struct neigh_parms *p)
233{
234	return p->tbl->family;
235}
236
237#define NEIGH_PRIV_ALIGN	sizeof(long long)
238#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
239
240static inline void *neighbour_priv(const struct neighbour *n)
241{
242	return (char *)n + n->tbl->entry_size;
243}
244
245/* flags for neigh_update() */
246#define NEIGH_UPDATE_F_OVERRIDE			0x00000001
247#define NEIGH_UPDATE_F_WEAK_OVERRIDE		0x00000002
248#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	0x00000004
249#define NEIGH_UPDATE_F_ISROUTER			0x40000000
250#define NEIGH_UPDATE_F_ADMIN			0x80000000
251
252
253static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
254{
255	return *(const u16 *)n->primary_key == *(const u16 *)pkey;
256}
 
 
 
 
 
 
 
 
 
 
 
 
257
258static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
259{
260	return *(const u32 *)n->primary_key == *(const u32 *)pkey;
261}
262
263static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
264{
265	const u32 *n32 = (const u32 *)n->primary_key;
266	const u32 *p32 = pkey;
267
268	return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
269		(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
270}
271
272static inline struct neighbour *___neigh_lookup_noref(
273	struct neigh_table *tbl,
274	bool (*key_eq)(const struct neighbour *n, const void *pkey),
275	__u32 (*hash)(const void *pkey,
276		      const struct net_device *dev,
277		      __u32 *hash_rnd),
278	const void *pkey,
279	struct net_device *dev)
280{
281	struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
282	struct neighbour *n;
283	u32 hash_val;
284
285	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
286	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
287	     n != NULL;
288	     n = rcu_dereference_bh(n->next)) {
289		if (n->dev == dev && key_eq(n, pkey))
290			return n;
291	}
292
293	return NULL;
294}
295
296static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
297						     const void *pkey,
298						     struct net_device *dev)
299{
300	return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
301}
302
 
 
 
 
 
 
 
 
 
 
 
303void neigh_table_init(int index, struct neigh_table *tbl);
304int neigh_table_clear(int index, struct neigh_table *tbl);
305struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
306			       struct net_device *dev);
307struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
308				     const void *pkey);
309struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
310				 struct net_device *dev, bool want_ref);
311static inline struct neighbour *neigh_create(struct neigh_table *tbl,
312					     const void *pkey,
313					     struct net_device *dev)
314{
315	return __neigh_create(tbl, pkey, dev, true);
316}
317void neigh_destroy(struct neighbour *neigh);
318int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
 
319int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
320		 u32 nlmsg_pid);
321void __neigh_set_probe_once(struct neighbour *neigh);
322bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
323void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
324int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 
325int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
326int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
327int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
328struct neighbour *neigh_event_ns(struct neigh_table *tbl,
329						u8 *lladdr, void *saddr,
330						struct net_device *dev);
331
332struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
333				      struct neigh_table *tbl);
334void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
335
336static inline
337struct net *neigh_parms_net(const struct neigh_parms *parms)
338{
339	return read_pnet(&parms->net);
340}
341
342unsigned long neigh_rand_reach_time(unsigned long base);
343
344void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
345		    struct sk_buff *skb);
346struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
347				   const void *key, struct net_device *dev,
348				   int creat);
349struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
350				     const void *key, struct net_device *dev);
351int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
352		  struct net_device *dev);
353
354static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
355{
356	return read_pnet(&pneigh->net);
357}
358
359void neigh_app_ns(struct neighbour *n);
360void neigh_for_each(struct neigh_table *tbl,
361		    void (*cb)(struct neighbour *, void *), void *cookie);
362void __neigh_for_each_release(struct neigh_table *tbl,
363			      int (*cb)(struct neighbour *));
364int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
365void pneigh_for_each(struct neigh_table *tbl,
366		     void (*cb)(struct pneigh_entry *));
367
368struct neigh_seq_state {
369	struct seq_net_private p;
370	struct neigh_table *tbl;
371	struct neigh_hash_table *nht;
372	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
373				struct neighbour *n, loff_t *pos);
374	unsigned int bucket;
375	unsigned int flags;
376#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
377#define NEIGH_SEQ_IS_PNEIGH	0x00000002
378#define NEIGH_SEQ_SKIP_NOARP	0x00000004
379};
380void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
381		      unsigned int);
382void *neigh_seq_next(struct seq_file *, void *, loff_t *);
383void neigh_seq_stop(struct seq_file *, void *);
384
385int neigh_proc_dointvec(struct ctl_table *ctl, int write,
386			void __user *buffer, size_t *lenp, loff_t *ppos);
387int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
388				void __user *buffer,
389				size_t *lenp, loff_t *ppos);
390int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
391				   void __user *buffer,
392				   size_t *lenp, loff_t *ppos);
393
394int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
395			  proc_handler *proc_handler);
396void neigh_sysctl_unregister(struct neigh_parms *p);
397
398static inline void __neigh_parms_put(struct neigh_parms *parms)
399{
400	refcount_dec(&parms->refcnt);
401}
402
403static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
404{
405	refcount_inc(&parms->refcnt);
406	return parms;
407}
408
409/*
410 *	Neighbour references
411 */
412
413static inline void neigh_release(struct neighbour *neigh)
414{
415	if (refcount_dec_and_test(&neigh->refcnt))
416		neigh_destroy(neigh);
417}
418
419static inline struct neighbour * neigh_clone(struct neighbour *neigh)
420{
421	if (neigh)
422		refcount_inc(&neigh->refcnt);
423	return neigh;
424}
425
426#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
427
428static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
 
429{
430	unsigned long now = jiffies;
431	
432	if (neigh->used != now)
433		neigh->used = now;
434	if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
435		return __neigh_event_send(neigh, skb);
436	return 0;
437}
438
 
 
 
 
 
439#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
440static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
441{
442	unsigned int seq, hh_alen;
443
444	do {
445		seq = read_seqbegin(&hh->hh_lock);
446		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
447		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
448	} while (read_seqretry(&hh->hh_lock, seq));
449	return 0;
450}
451#endif
452
453static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
454{
 
455	unsigned int seq;
456	unsigned int hh_len;
457
458	do {
459		seq = read_seqbegin(&hh->hh_lock);
460		hh_len = hh->hh_len;
461		if (likely(hh_len <= HH_DATA_MOD)) {
462			/* this is inlined by gcc */
463			memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
 
 
 
 
 
 
 
 
 
464		} else {
465			unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
466
467			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
 
 
 
468		}
469	} while (read_seqretry(&hh->hh_lock, seq));
470
471	skb_push(skb, hh_len);
 
 
 
 
 
472	return dev_queue_xmit(skb);
473}
474
475static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
 
476{
477	const struct hh_cache *hh = &n->hh;
478
479	if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
 
 
 
 
 
480		return neigh_hh_output(hh, skb);
481	else
482		return n->output(n, skb);
483}
484
485static inline struct neighbour *
486__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
487{
488	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
489
490	if (n || !creat)
491		return n;
492
493	n = neigh_create(tbl, pkey, dev);
494	return IS_ERR(n) ? NULL : n;
495}
496
497static inline struct neighbour *
498__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
499  struct net_device *dev)
500{
501	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
502
503	if (n)
504		return n;
505
506	return neigh_create(tbl, pkey, dev);
507}
508
509struct neighbour_cb {
510	unsigned long sched_next;
511	unsigned int flags;
512};
513
514#define LOCALLY_ENQUEUED 0x1
515
516#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
517
518static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
519				     const struct net_device *dev)
520{
521	unsigned int seq;
522
523	do {
524		seq = read_seqbegin(&n->ha_lock);
525		memcpy(dst, n->ha, dev->addr_len);
526	} while (read_seqretry(&n->ha_lock, seq));
527}
528
 
 
 
 
529
 
 
 
 
 
 
 
 
 
530#endif