Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _NET_NEIGHBOUR_H
  3#define _NET_NEIGHBOUR_H
  4
  5#include <linux/neighbour.h>
  6
  7/*
  8 *	Generic neighbour manipulation
  9 *
 10 *	Authors:
 11 *	Pedro Roque		<roque@di.fc.ul.pt>
 12 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
 13 *
 14 * 	Changes:
 15 *
 16 *	Harald Welte:		<laforge@gnumonks.org>
 17 *		- Add neighbour cache statistics like rtstat
 18 */
 19
 20#include <linux/atomic.h>
 21#include <linux/refcount.h>
 22#include <linux/netdevice.h>
 23#include <linux/skbuff.h>
 24#include <linux/rcupdate.h>
 25#include <linux/seq_file.h>
 26#include <linux/bitmap.h>
 27
 28#include <linux/err.h>
 29#include <linux/sysctl.h>
 30#include <linux/workqueue.h>
 31#include <net/rtnetlink.h>
 32#include <net/neighbour_tables.h>
 33
 34/*
 35 * NUD stands for "neighbor unreachability detection"
 36 */
 37
 38#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 39#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 40#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 41
 42struct neighbour;
 43
 44enum {
 45	NEIGH_VAR_MCAST_PROBES,
 46	NEIGH_VAR_UCAST_PROBES,
 47	NEIGH_VAR_APP_PROBES,
 48	NEIGH_VAR_MCAST_REPROBES,
 49	NEIGH_VAR_RETRANS_TIME,
 50	NEIGH_VAR_BASE_REACHABLE_TIME,
 51	NEIGH_VAR_DELAY_PROBE_TIME,
 52	NEIGH_VAR_INTERVAL_PROBE_TIME_MS,
 53	NEIGH_VAR_GC_STALETIME,
 54	NEIGH_VAR_QUEUE_LEN_BYTES,
 55	NEIGH_VAR_PROXY_QLEN,
 56	NEIGH_VAR_ANYCAST_DELAY,
 57	NEIGH_VAR_PROXY_DELAY,
 58	NEIGH_VAR_LOCKTIME,
 59#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
 60	/* Following are used as a second way to access one of the above */
 61	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
 62	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
 63	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
 64	/* Following are used by "default" only */
 65	NEIGH_VAR_GC_INTERVAL,
 66	NEIGH_VAR_GC_THRESH1,
 67	NEIGH_VAR_GC_THRESH2,
 68	NEIGH_VAR_GC_THRESH3,
 69	NEIGH_VAR_MAX
 70};
 71
 72struct neigh_parms {
 73	possible_net_t net;
 74	struct net_device *dev;
 75	netdevice_tracker dev_tracker;
 76	struct list_head list;
 77	int	(*neigh_setup)(struct neighbour *);
 78	struct neigh_table *tbl;
 79
 80	void	*sysctl_table;
 81
 82	int dead;
 83	refcount_t refcnt;
 84	struct rcu_head rcu_head;
 85
 86	int	reachable_time;
 87	u32	qlen;
 88	int	data[NEIGH_VAR_DATA_MAX];
 89	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 90};
 91
 92static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
 93{
 94	set_bit(index, p->data_state);
 95	p->data[index] = val;
 96}
 97
 98#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
 99
100/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
101 * In other cases, NEIGH_VAR_SET should be used.
102 */
103#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
104#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
105
106static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
107{
108	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
109}
110
111static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
112{
113	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
114}
115
116struct neigh_statistics {
117	unsigned long allocs;		/* number of allocated neighs */
118	unsigned long destroys;		/* number of destroyed neighs */
119	unsigned long hash_grows;	/* number of hash resizes */
120
121	unsigned long res_failed;	/* number of failed resolutions */
122
123	unsigned long lookups;		/* number of lookups */
124	unsigned long hits;		/* number of hits (among lookups) */
125
126	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
127	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
128
129	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
130	unsigned long forced_gc_runs;	/* number of forced GC runs */
131
132	unsigned long unres_discards;	/* number of unresolved drops */
133	unsigned long table_fulls;      /* times even gc couldn't help */
134};
135
136#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
137
138struct neighbour {
139	struct hlist_node	hash;
140	struct hlist_node	dev_list;
141	struct neigh_table	*tbl;
142	struct neigh_parms	*parms;
143	unsigned long		confirmed;
144	unsigned long		updated;
145	rwlock_t		lock;
146	refcount_t		refcnt;
147	unsigned int		arp_queue_len_bytes;
148	struct sk_buff_head	arp_queue;
149	struct timer_list	timer;
150	unsigned long		used;
151	atomic_t		probes;
152	u8			nud_state;
153	u8			type;
154	u8			dead;
 
155	u8			protocol;
156	u32			flags;
157	seqlock_t		ha_lock;
158	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
159	struct hh_cache		hh;
160	int			(*output)(struct neighbour *, struct sk_buff *);
161	const struct neigh_ops	*ops;
162	struct list_head	gc_list;
163	struct list_head	managed_list;
164	struct rcu_head		rcu;
165	struct net_device	*dev;
166	netdevice_tracker	dev_tracker;
167	u8			primary_key[];
168} __randomize_layout;
169
170struct neigh_ops {
171	int			family;
172	void			(*solicit)(struct neighbour *, struct sk_buff *);
173	void			(*error_report)(struct neighbour *, struct sk_buff *);
174	int			(*output)(struct neighbour *, struct sk_buff *);
175	int			(*connected_output)(struct neighbour *, struct sk_buff *);
176};
177
178struct pneigh_entry {
179	struct pneigh_entry	*next;
180	possible_net_t		net;
181	struct net_device	*dev;
182	netdevice_tracker	dev_tracker;
183	u32			flags;
184	u8			protocol;
185	u32			key[];
186};
187
188/*
189 *	neighbour table manipulation
190 */
191
192#define NEIGH_NUM_HASH_RND	4
193
194struct neigh_hash_table {
195	struct hlist_head	*hash_heads;
196	unsigned int		hash_shift;
197	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
198	struct rcu_head		rcu;
199};
200
201
202struct neigh_table {
203	int			family;
204	unsigned int		entry_size;
205	unsigned int		key_len;
206	__be16			protocol;
207	__u32			(*hash)(const void *pkey,
208					const struct net_device *dev,
209					__u32 *hash_rnd);
210	bool			(*key_eq)(const struct neighbour *, const void *pkey);
211	int			(*constructor)(struct neighbour *);
212	int			(*pconstructor)(struct pneigh_entry *);
213	void			(*pdestructor)(struct pneigh_entry *);
214	void			(*proxy_redo)(struct sk_buff *skb);
215	int			(*is_multicast)(const void *pkey);
216	bool			(*allow_add)(const struct net_device *dev,
217					     struct netlink_ext_ack *extack);
218	char			*id;
219	struct neigh_parms	parms;
220	struct list_head	parms_list;
221	int			gc_interval;
222	int			gc_thresh1;
223	int			gc_thresh2;
224	int			gc_thresh3;
225	unsigned long		last_flush;
226	struct delayed_work	gc_work;
227	struct delayed_work	managed_work;
228	struct timer_list 	proxy_timer;
229	struct sk_buff_head	proxy_queue;
230	atomic_t		entries;
231	atomic_t		gc_entries;
232	struct list_head	gc_list;
233	struct list_head	managed_list;
234	rwlock_t		lock;
235	unsigned long		last_rand;
236	struct neigh_statistics	__percpu *stats;
237	struct neigh_hash_table __rcu *nht;
238	struct pneigh_entry	**phash_buckets;
239};
240
 
 
 
 
 
 
 
 
241static inline int neigh_parms_family(struct neigh_parms *p)
242{
243	return p->tbl->family;
244}
245
246#define NEIGH_PRIV_ALIGN	sizeof(long long)
247#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
248
249static inline void *neighbour_priv(const struct neighbour *n)
250{
251	return (char *)n + n->tbl->entry_size;
252}
253
254/* flags for neigh_update() */
255#define NEIGH_UPDATE_F_OVERRIDE			BIT(0)
256#define NEIGH_UPDATE_F_WEAK_OVERRIDE		BIT(1)
257#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	BIT(2)
258#define NEIGH_UPDATE_F_USE			BIT(3)
259#define NEIGH_UPDATE_F_MANAGED			BIT(4)
260#define NEIGH_UPDATE_F_EXT_LEARNED		BIT(5)
261#define NEIGH_UPDATE_F_ISROUTER			BIT(6)
262#define NEIGH_UPDATE_F_ADMIN			BIT(7)
263
264/* In-kernel representation for NDA_FLAGS_EXT flags: */
265#define NTF_OLD_MASK		0xff
266#define NTF_EXT_SHIFT		8
267#define NTF_EXT_MASK		(NTF_EXT_MANAGED)
268
269#define NTF_MANAGED		(NTF_EXT_MANAGED << NTF_EXT_SHIFT)
270
271extern const struct nla_policy nda_policy[];
272
273#define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash)
274#define neigh_for_each_in_bucket_rcu(pos, head) \
275	hlist_for_each_entry_rcu(pos, head, hash)
276#define neigh_for_each_in_bucket_safe(pos, tmp, head) \
277	hlist_for_each_entry_safe(pos, tmp, head, hash)
278
279static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
280{
281	return *(const u32 *)n->primary_key == *(const u32 *)pkey;
282}
283
284static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
285{
286	const u32 *n32 = (const u32 *)n->primary_key;
287	const u32 *p32 = pkey;
288
289	return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
290		(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
291}
292
293static inline struct neighbour *___neigh_lookup_noref(
294	struct neigh_table *tbl,
295	bool (*key_eq)(const struct neighbour *n, const void *pkey),
296	__u32 (*hash)(const void *pkey,
297		      const struct net_device *dev,
298		      __u32 *hash_rnd),
299	const void *pkey,
300	struct net_device *dev)
301{
302	struct neigh_hash_table *nht = rcu_dereference(tbl->nht);
303	struct neighbour *n;
304	u32 hash_val;
305
306	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
307	neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val])
 
 
308		if (n->dev == dev && key_eq(n, pkey))
309			return n;
 
310
311	return NULL;
312}
313
314static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
315						     const void *pkey,
316						     struct net_device *dev)
317{
318	return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
319}
320
321static inline void neigh_confirm(struct neighbour *n)
322{
323	if (n) {
324		unsigned long now = jiffies;
325
326		/* avoid dirtying neighbour */
327		if (READ_ONCE(n->confirmed) != now)
328			WRITE_ONCE(n->confirmed, now);
329	}
330}
331
332void neigh_table_init(int index, struct neigh_table *tbl);
333int neigh_table_clear(int index, struct neigh_table *tbl);
334struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
335			       struct net_device *dev);
 
 
336struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
337				 struct net_device *dev, bool want_ref);
338static inline struct neighbour *neigh_create(struct neigh_table *tbl,
339					     const void *pkey,
340					     struct net_device *dev)
341{
342	return __neigh_create(tbl, pkey, dev, true);
343}
344void neigh_destroy(struct neighbour *neigh);
345int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
346		       const bool immediate_ok);
347int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
348		 u32 nlmsg_pid);
349void __neigh_set_probe_once(struct neighbour *neigh);
350bool neigh_remove_one(struct neighbour *ndel);
351void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
352int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
353int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
354int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
355int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
356int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
357struct neighbour *neigh_event_ns(struct neigh_table *tbl,
358						u8 *lladdr, void *saddr,
359						struct net_device *dev);
360
361struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
362				      struct neigh_table *tbl);
363void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
364
365static inline
366struct net *neigh_parms_net(const struct neigh_parms *parms)
367{
368	return read_pnet(&parms->net);
369}
370
371unsigned long neigh_rand_reach_time(unsigned long base);
372
373void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
374		    struct sk_buff *skb);
375struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
376				   const void *key, struct net_device *dev,
377				   int creat);
378struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
379				     const void *key, struct net_device *dev);
380int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
381		  struct net_device *dev);
382
383static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
384{
385	return read_pnet(&pneigh->net);
386}
387
388void neigh_app_ns(struct neighbour *n);
389void neigh_for_each(struct neigh_table *tbl,
390		    void (*cb)(struct neighbour *, void *), void *cookie);
391void __neigh_for_each_release(struct neigh_table *tbl,
392			      int (*cb)(struct neighbour *));
393int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
 
 
394
395struct neigh_seq_state {
396	struct seq_net_private p;
397	struct neigh_table *tbl;
398	struct neigh_hash_table *nht;
399	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
400				struct neighbour *n, loff_t *pos);
401	unsigned int bucket;
402	unsigned int flags;
403#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
404#define NEIGH_SEQ_IS_PNEIGH	0x00000002
405#define NEIGH_SEQ_SKIP_NOARP	0x00000004
406};
407void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
408		      unsigned int);
409void *neigh_seq_next(struct seq_file *, void *, loff_t *);
410void neigh_seq_stop(struct seq_file *, void *);
411
412int neigh_proc_dointvec(const struct ctl_table *ctl, int write,
413			void *buffer, size_t *lenp, loff_t *ppos);
414int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write,
415				void *buffer,
416				size_t *lenp, loff_t *ppos);
417int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
418				   void *buffer, size_t *lenp, loff_t *ppos);
419
420int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
421			  proc_handler *proc_handler);
422void neigh_sysctl_unregister(struct neigh_parms *p);
423
424static inline void __neigh_parms_put(struct neigh_parms *parms)
425{
426	refcount_dec(&parms->refcnt);
427}
428
429static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
430{
431	refcount_inc(&parms->refcnt);
432	return parms;
433}
434
435/*
436 *	Neighbour references
437 */
438
439static inline void neigh_release(struct neighbour *neigh)
440{
441	if (refcount_dec_and_test(&neigh->refcnt))
442		neigh_destroy(neigh);
443}
444
445static inline struct neighbour * neigh_clone(struct neighbour *neigh)
446{
447	if (neigh)
448		refcount_inc(&neigh->refcnt);
449	return neigh;
450}
451
452#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
453
454static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
455						  struct sk_buff *skb,
456						  const bool immediate_ok)
457{
458	unsigned long now = jiffies;
459
460	if (READ_ONCE(neigh->used) != now)
461		WRITE_ONCE(neigh->used, now);
462	if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
463		return __neigh_event_send(neigh, skb, immediate_ok);
464	return 0;
465}
466
467static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
468{
469	return neigh_event_send_probe(neigh, skb, true);
470}
471
472#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
473static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
474{
475	unsigned int seq, hh_alen;
476
477	do {
478		seq = read_seqbegin(&hh->hh_lock);
479		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
480		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
481	} while (read_seqretry(&hh->hh_lock, seq));
482	return 0;
483}
484#endif
485
486static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
487{
488	unsigned int hh_alen = 0;
489	unsigned int seq;
490	unsigned int hh_len;
491
492	do {
493		seq = read_seqbegin(&hh->hh_lock);
494		hh_len = READ_ONCE(hh->hh_len);
495		if (likely(hh_len <= HH_DATA_MOD)) {
496			hh_alen = HH_DATA_MOD;
497
498			/* skb_push() would proceed silently if we have room for
499			 * the unaligned size but not for the aligned size:
500			 * check headroom explicitly.
501			 */
502			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
503				/* this is inlined by gcc */
504				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
505				       HH_DATA_MOD);
506			}
507		} else {
508			hh_alen = HH_DATA_ALIGN(hh_len);
509
510			if (likely(skb_headroom(skb) >= hh_alen)) {
511				memcpy(skb->data - hh_alen, hh->hh_data,
512				       hh_alen);
513			}
514		}
515	} while (read_seqretry(&hh->hh_lock, seq));
516
517	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
518		kfree_skb(skb);
519		return NET_XMIT_DROP;
520	}
521
522	__skb_push(skb, hh_len);
523	return dev_queue_xmit(skb);
524}
525
526static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
527			       bool skip_cache)
528{
529	const struct hh_cache *hh = &n->hh;
530
531	/* n->nud_state and hh->hh_len could be changed under us.
532	 * neigh_hh_output() is taking care of the race later.
533	 */
534	if (!skip_cache &&
535	    (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
536	    READ_ONCE(hh->hh_len))
537		return neigh_hh_output(hh, skb);
538
539	return READ_ONCE(n->output)(n, skb);
540}
541
542static inline struct neighbour *
543__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
544{
545	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
546
547	if (n || !creat)
548		return n;
549
550	n = neigh_create(tbl, pkey, dev);
551	return IS_ERR(n) ? NULL : n;
552}
553
554static inline struct neighbour *
555__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
556  struct net_device *dev)
557{
558	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
559
560	if (n)
561		return n;
562
563	return neigh_create(tbl, pkey, dev);
564}
565
566struct neighbour_cb {
567	unsigned long sched_next;
568	unsigned int flags;
569};
570
571#define LOCALLY_ENQUEUED 0x1
572
573#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
574
575static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
576				     const struct net_device *dev)
577{
578	unsigned int seq;
579
580	do {
581		seq = read_seqbegin(&n->ha_lock);
582		memcpy(dst, n->ha, dev->addr_len);
583	} while (read_seqretry(&n->ha_lock, seq));
584}
585
586static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
587					  int *notify)
588{
589	u8 ndm_flags = 0;
590
591	ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
592	if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
593		if (ndm_flags & NTF_ROUTER)
594			neigh->flags |= NTF_ROUTER;
595		else
596			neigh->flags &= ~NTF_ROUTER;
597		*notify = 1;
598	}
599}
600#endif
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _NET_NEIGHBOUR_H
  3#define _NET_NEIGHBOUR_H
  4
  5#include <linux/neighbour.h>
  6
  7/*
  8 *	Generic neighbour manipulation
  9 *
 10 *	Authors:
 11 *	Pedro Roque		<roque@di.fc.ul.pt>
 12 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
 13 *
 14 * 	Changes:
 15 *
 16 *	Harald Welte:		<laforge@gnumonks.org>
 17 *		- Add neighbour cache statistics like rtstat
 18 */
 19
 20#include <linux/atomic.h>
 21#include <linux/refcount.h>
 22#include <linux/netdevice.h>
 23#include <linux/skbuff.h>
 24#include <linux/rcupdate.h>
 25#include <linux/seq_file.h>
 26#include <linux/bitmap.h>
 27
 28#include <linux/err.h>
 29#include <linux/sysctl.h>
 30#include <linux/workqueue.h>
 31#include <net/rtnetlink.h>
 
 32
 33/*
 34 * NUD stands for "neighbor unreachability detection"
 35 */
 36
 37#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 38#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 39#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 40
 41struct neighbour;
 42
 43enum {
 44	NEIGH_VAR_MCAST_PROBES,
 45	NEIGH_VAR_UCAST_PROBES,
 46	NEIGH_VAR_APP_PROBES,
 47	NEIGH_VAR_MCAST_REPROBES,
 48	NEIGH_VAR_RETRANS_TIME,
 49	NEIGH_VAR_BASE_REACHABLE_TIME,
 50	NEIGH_VAR_DELAY_PROBE_TIME,
 
 51	NEIGH_VAR_GC_STALETIME,
 52	NEIGH_VAR_QUEUE_LEN_BYTES,
 53	NEIGH_VAR_PROXY_QLEN,
 54	NEIGH_VAR_ANYCAST_DELAY,
 55	NEIGH_VAR_PROXY_DELAY,
 56	NEIGH_VAR_LOCKTIME,
 57#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
 58	/* Following are used as a second way to access one of the above */
 59	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
 60	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
 61	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
 62	/* Following are used by "default" only */
 63	NEIGH_VAR_GC_INTERVAL,
 64	NEIGH_VAR_GC_THRESH1,
 65	NEIGH_VAR_GC_THRESH2,
 66	NEIGH_VAR_GC_THRESH3,
 67	NEIGH_VAR_MAX
 68};
 69
 70struct neigh_parms {
 71	possible_net_t net;
 72	struct net_device *dev;
 
 73	struct list_head list;
 74	int	(*neigh_setup)(struct neighbour *);
 75	struct neigh_table *tbl;
 76
 77	void	*sysctl_table;
 78
 79	int dead;
 80	refcount_t refcnt;
 81	struct rcu_head rcu_head;
 82
 83	int	reachable_time;
 
 84	int	data[NEIGH_VAR_DATA_MAX];
 85	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 86};
 87
 88static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
 89{
 90	set_bit(index, p->data_state);
 91	p->data[index] = val;
 92}
 93
 94#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
 95
 96/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
 97 * In other cases, NEIGH_VAR_SET should be used.
 98 */
 99#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
100#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
101
102static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
103{
104	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
105}
106
107static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
108{
109	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
110}
111
112struct neigh_statistics {
113	unsigned long allocs;		/* number of allocated neighs */
114	unsigned long destroys;		/* number of destroyed neighs */
115	unsigned long hash_grows;	/* number of hash resizes */
116
117	unsigned long res_failed;	/* number of failed resolutions */
118
119	unsigned long lookups;		/* number of lookups */
120	unsigned long hits;		/* number of hits (among lookups) */
121
122	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
123	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
124
125	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
126	unsigned long forced_gc_runs;	/* number of forced GC runs */
127
128	unsigned long unres_discards;	/* number of unresolved drops */
129	unsigned long table_fulls;      /* times even gc couldn't help */
130};
131
132#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
133
134struct neighbour {
135	struct neighbour __rcu	*next;
 
136	struct neigh_table	*tbl;
137	struct neigh_parms	*parms;
138	unsigned long		confirmed;
139	unsigned long		updated;
140	rwlock_t		lock;
141	refcount_t		refcnt;
142	unsigned int		arp_queue_len_bytes;
143	struct sk_buff_head	arp_queue;
144	struct timer_list	timer;
145	unsigned long		used;
146	atomic_t		probes;
147	__u8			flags;
148	__u8			nud_state;
149	__u8			type;
150	__u8			dead;
151	u8			protocol;
 
152	seqlock_t		ha_lock;
153	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
154	struct hh_cache		hh;
155	int			(*output)(struct neighbour *, struct sk_buff *);
156	const struct neigh_ops	*ops;
157	struct list_head	gc_list;
 
158	struct rcu_head		rcu;
159	struct net_device	*dev;
160	u8			primary_key[0];
 
161} __randomize_layout;
162
163struct neigh_ops {
164	int			family;
165	void			(*solicit)(struct neighbour *, struct sk_buff *);
166	void			(*error_report)(struct neighbour *, struct sk_buff *);
167	int			(*output)(struct neighbour *, struct sk_buff *);
168	int			(*connected_output)(struct neighbour *, struct sk_buff *);
169};
170
171struct pneigh_entry {
172	struct pneigh_entry	*next;
173	possible_net_t		net;
174	struct net_device	*dev;
175	u8			flags;
 
176	u8			protocol;
177	u8			key[];
178};
179
180/*
181 *	neighbour table manipulation
182 */
183
184#define NEIGH_NUM_HASH_RND	4
185
186struct neigh_hash_table {
187	struct neighbour __rcu	**hash_buckets;
188	unsigned int		hash_shift;
189	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
190	struct rcu_head		rcu;
191};
192
193
194struct neigh_table {
195	int			family;
196	unsigned int		entry_size;
197	unsigned int		key_len;
198	__be16			protocol;
199	__u32			(*hash)(const void *pkey,
200					const struct net_device *dev,
201					__u32 *hash_rnd);
202	bool			(*key_eq)(const struct neighbour *, const void *pkey);
203	int			(*constructor)(struct neighbour *);
204	int			(*pconstructor)(struct pneigh_entry *);
205	void			(*pdestructor)(struct pneigh_entry *);
206	void			(*proxy_redo)(struct sk_buff *skb);
 
207	bool			(*allow_add)(const struct net_device *dev,
208					     struct netlink_ext_ack *extack);
209	char			*id;
210	struct neigh_parms	parms;
211	struct list_head	parms_list;
212	int			gc_interval;
213	int			gc_thresh1;
214	int			gc_thresh2;
215	int			gc_thresh3;
216	unsigned long		last_flush;
217	struct delayed_work	gc_work;
 
218	struct timer_list 	proxy_timer;
219	struct sk_buff_head	proxy_queue;
220	atomic_t		entries;
221	atomic_t		gc_entries;
222	struct list_head	gc_list;
 
223	rwlock_t		lock;
224	unsigned long		last_rand;
225	struct neigh_statistics	__percpu *stats;
226	struct neigh_hash_table __rcu *nht;
227	struct pneigh_entry	**phash_buckets;
228};
229
230enum {
231	NEIGH_ARP_TABLE = 0,
232	NEIGH_ND_TABLE = 1,
233	NEIGH_DN_TABLE = 2,
234	NEIGH_NR_TABLES,
235	NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
236};
237
238static inline int neigh_parms_family(struct neigh_parms *p)
239{
240	return p->tbl->family;
241}
242
243#define NEIGH_PRIV_ALIGN	sizeof(long long)
244#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
245
246static inline void *neighbour_priv(const struct neighbour *n)
247{
248	return (char *)n + n->tbl->entry_size;
249}
250
251/* flags for neigh_update() */
252#define NEIGH_UPDATE_F_OVERRIDE			0x00000001
253#define NEIGH_UPDATE_F_WEAK_OVERRIDE		0x00000002
254#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	0x00000004
255#define NEIGH_UPDATE_F_EXT_LEARNED		0x20000000
256#define NEIGH_UPDATE_F_ISROUTER			0x40000000
257#define NEIGH_UPDATE_F_ADMIN			0x80000000
 
 
 
 
 
 
 
 
 
258
259extern const struct nla_policy nda_policy[];
260
261static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
262{
263	return *(const u16 *)n->primary_key == *(const u16 *)pkey;
264}
 
265
266static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
267{
268	return *(const u32 *)n->primary_key == *(const u32 *)pkey;
269}
270
271static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
272{
273	const u32 *n32 = (const u32 *)n->primary_key;
274	const u32 *p32 = pkey;
275
276	return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
277		(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
278}
279
280static inline struct neighbour *___neigh_lookup_noref(
281	struct neigh_table *tbl,
282	bool (*key_eq)(const struct neighbour *n, const void *pkey),
283	__u32 (*hash)(const void *pkey,
284		      const struct net_device *dev,
285		      __u32 *hash_rnd),
286	const void *pkey,
287	struct net_device *dev)
288{
289	struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
290	struct neighbour *n;
291	u32 hash_val;
292
293	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
294	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
295	     n != NULL;
296	     n = rcu_dereference_bh(n->next)) {
297		if (n->dev == dev && key_eq(n, pkey))
298			return n;
299	}
300
301	return NULL;
302}
303
304static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
305						     const void *pkey,
306						     struct net_device *dev)
307{
308	return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
309}
310
 
 
 
 
 
 
 
 
 
 
 
311void neigh_table_init(int index, struct neigh_table *tbl);
312int neigh_table_clear(int index, struct neigh_table *tbl);
313struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
314			       struct net_device *dev);
315struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
316				     const void *pkey);
317struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
318				 struct net_device *dev, bool want_ref);
319static inline struct neighbour *neigh_create(struct neigh_table *tbl,
320					     const void *pkey,
321					     struct net_device *dev)
322{
323	return __neigh_create(tbl, pkey, dev, true);
324}
325void neigh_destroy(struct neighbour *neigh);
326int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
 
327int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
328		 u32 nlmsg_pid);
329void __neigh_set_probe_once(struct neighbour *neigh);
330bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
331void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
332int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
333int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
334int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
335int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
336int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
337struct neighbour *neigh_event_ns(struct neigh_table *tbl,
338						u8 *lladdr, void *saddr,
339						struct net_device *dev);
340
341struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
342				      struct neigh_table *tbl);
343void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
344
345static inline
346struct net *neigh_parms_net(const struct neigh_parms *parms)
347{
348	return read_pnet(&parms->net);
349}
350
351unsigned long neigh_rand_reach_time(unsigned long base);
352
353void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
354		    struct sk_buff *skb);
355struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
356				   const void *key, struct net_device *dev,
357				   int creat);
358struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
359				     const void *key, struct net_device *dev);
360int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
361		  struct net_device *dev);
362
363static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
364{
365	return read_pnet(&pneigh->net);
366}
367
368void neigh_app_ns(struct neighbour *n);
369void neigh_for_each(struct neigh_table *tbl,
370		    void (*cb)(struct neighbour *, void *), void *cookie);
371void __neigh_for_each_release(struct neigh_table *tbl,
372			      int (*cb)(struct neighbour *));
373int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
374void pneigh_for_each(struct neigh_table *tbl,
375		     void (*cb)(struct pneigh_entry *));
376
377struct neigh_seq_state {
378	struct seq_net_private p;
379	struct neigh_table *tbl;
380	struct neigh_hash_table *nht;
381	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
382				struct neighbour *n, loff_t *pos);
383	unsigned int bucket;
384	unsigned int flags;
385#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
386#define NEIGH_SEQ_IS_PNEIGH	0x00000002
387#define NEIGH_SEQ_SKIP_NOARP	0x00000004
388};
389void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
390		      unsigned int);
391void *neigh_seq_next(struct seq_file *, void *, loff_t *);
392void neigh_seq_stop(struct seq_file *, void *);
393
394int neigh_proc_dointvec(struct ctl_table *ctl, int write,
395			void *buffer, size_t *lenp, loff_t *ppos);
396int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
397				void *buffer,
398				size_t *lenp, loff_t *ppos);
399int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
400				   void *buffer, size_t *lenp, loff_t *ppos);
401
402int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
403			  proc_handler *proc_handler);
404void neigh_sysctl_unregister(struct neigh_parms *p);
405
406static inline void __neigh_parms_put(struct neigh_parms *parms)
407{
408	refcount_dec(&parms->refcnt);
409}
410
411static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
412{
413	refcount_inc(&parms->refcnt);
414	return parms;
415}
416
417/*
418 *	Neighbour references
419 */
420
421static inline void neigh_release(struct neighbour *neigh)
422{
423	if (refcount_dec_and_test(&neigh->refcnt))
424		neigh_destroy(neigh);
425}
426
427static inline struct neighbour * neigh_clone(struct neighbour *neigh)
428{
429	if (neigh)
430		refcount_inc(&neigh->refcnt);
431	return neigh;
432}
433
434#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
435
436static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
 
437{
438	unsigned long now = jiffies;
439	
440	if (READ_ONCE(neigh->used) != now)
441		WRITE_ONCE(neigh->used, now);
442	if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
443		return __neigh_event_send(neigh, skb);
444	return 0;
445}
446
 
 
 
 
 
447#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
448static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
449{
450	unsigned int seq, hh_alen;
451
452	do {
453		seq = read_seqbegin(&hh->hh_lock);
454		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
455		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
456	} while (read_seqretry(&hh->hh_lock, seq));
457	return 0;
458}
459#endif
460
461static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
462{
463	unsigned int hh_alen = 0;
464	unsigned int seq;
465	unsigned int hh_len;
466
467	do {
468		seq = read_seqbegin(&hh->hh_lock);
469		hh_len = READ_ONCE(hh->hh_len);
470		if (likely(hh_len <= HH_DATA_MOD)) {
471			hh_alen = HH_DATA_MOD;
472
473			/* skb_push() would proceed silently if we have room for
474			 * the unaligned size but not for the aligned size:
475			 * check headroom explicitly.
476			 */
477			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
478				/* this is inlined by gcc */
479				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
480				       HH_DATA_MOD);
481			}
482		} else {
483			hh_alen = HH_DATA_ALIGN(hh_len);
484
485			if (likely(skb_headroom(skb) >= hh_alen)) {
486				memcpy(skb->data - hh_alen, hh->hh_data,
487				       hh_alen);
488			}
489		}
490	} while (read_seqretry(&hh->hh_lock, seq));
491
492	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
493		kfree_skb(skb);
494		return NET_XMIT_DROP;
495	}
496
497	__skb_push(skb, hh_len);
498	return dev_queue_xmit(skb);
499}
500
501static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
502			       bool skip_cache)
503{
504	const struct hh_cache *hh = &n->hh;
505
506	if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
 
 
 
 
 
507		return neigh_hh_output(hh, skb);
508	else
509		return n->output(n, skb);
510}
511
512static inline struct neighbour *
513__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
514{
515	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
516
517	if (n || !creat)
518		return n;
519
520	n = neigh_create(tbl, pkey, dev);
521	return IS_ERR(n) ? NULL : n;
522}
523
524static inline struct neighbour *
525__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
526  struct net_device *dev)
527{
528	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
529
530	if (n)
531		return n;
532
533	return neigh_create(tbl, pkey, dev);
534}
535
536struct neighbour_cb {
537	unsigned long sched_next;
538	unsigned int flags;
539};
540
541#define LOCALLY_ENQUEUED 0x1
542
543#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
544
545static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
546				     const struct net_device *dev)
547{
548	unsigned int seq;
549
550	do {
551		seq = read_seqbegin(&n->ha_lock);
552		memcpy(dst, n->ha, dev->addr_len);
553	} while (read_seqretry(&n->ha_lock, seq));
554}
555
556static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
557					  int *notify)
558{
559	u8 ndm_flags = 0;
560
561	ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
562	if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
563		if (ndm_flags & NTF_ROUTER)
564			neigh->flags |= NTF_ROUTER;
565		else
566			neigh->flags &= ~NTF_ROUTER;
567		*notify = 1;
568	}
569}
570#endif