Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * net/core/dst.c	Protocol independent destination cache.
  3 *
  4 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  5 *
  6 */
  7
  8#include <linux/bitops.h>
  9#include <linux/errno.h>
 10#include <linux/init.h>
 11#include <linux/kernel.h>
 12#include <linux/workqueue.h>
 13#include <linux/mm.h>
 14#include <linux/module.h>
 15#include <linux/slab.h>
 16#include <linux/netdevice.h>
 17#include <linux/skbuff.h>
 18#include <linux/string.h>
 19#include <linux/types.h>
 20#include <net/net_namespace.h>
 21#include <linux/sched.h>
 22#include <linux/prefetch.h>
 23#include <net/lwtunnel.h>
 
 24
 25#include <net/dst.h>
 26#include <net/dst_metadata.h>
 27
 28/*
 29 * Theory of operations:
 30 * 1) We use a list, protected by a spinlock, to add
 31 *    new entries from both BH and non-BH context.
 32 * 2) In order to keep spinlock held for a small delay,
 33 *    we use a second list where are stored long lived
 34 *    entries, that are handled by the garbage collect thread
 35 *    fired by a workqueue.
 36 * 3) This list is guarded by a mutex,
 37 *    so that the gc_task and dst_dev_event() can be synchronized.
 38 */
 39
 40/*
 41 * We want to keep lock & list close together
 42 * to dirty as few cache lines as possible in __dst_free().
 43 * As this is not a very strong hint, we dont force an alignment on SMP.
 44 */
 45static struct {
 46	spinlock_t		lock;
 47	struct dst_entry	*list;
 48	unsigned long		timer_inc;
 49	unsigned long		timer_expires;
 50} dst_garbage = {
 51	.lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
 52	.timer_inc = DST_GC_MAX,
 53};
 54static void dst_gc_task(struct work_struct *work);
 55static void ___dst_free(struct dst_entry *dst);
 56
 57static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
 58
 59static DEFINE_MUTEX(dst_gc_mutex);
 60/*
 61 * long lived entries are maintained in this list, guarded by dst_gc_mutex
 62 */
 63static struct dst_entry         *dst_busy_list;
 64
 65static void dst_gc_task(struct work_struct *work)
 66{
 67	int    delayed = 0;
 68	int    work_performed = 0;
 69	unsigned long expires = ~0L;
 70	struct dst_entry *dst, *next, head;
 71	struct dst_entry *last = &head;
 72
 73	mutex_lock(&dst_gc_mutex);
 74	next = dst_busy_list;
 75
 76loop:
 77	while ((dst = next) != NULL) {
 78		next = dst->next;
 79		prefetch(&next->next);
 80		cond_resched();
 81		if (likely(atomic_read(&dst->__refcnt))) {
 82			last->next = dst;
 83			last = dst;
 84			delayed++;
 85			continue;
 86		}
 87		work_performed++;
 88
 89		dst = dst_destroy(dst);
 90		if (dst) {
 91			/* NOHASH and still referenced. Unless it is already
 92			 * on gc list, invalidate it and add to gc list.
 93			 *
 94			 * Note: this is temporary. Actually, NOHASH dst's
 95			 * must be obsoleted when parent is obsoleted.
 96			 * But we do not have state "obsoleted, but
 97			 * referenced by parent", so it is right.
 98			 */
 99			if (dst->obsolete > 0)
100				continue;
101
102			___dst_free(dst);
103			dst->next = next;
104			next = dst;
105		}
106	}
107
108	spin_lock_bh(&dst_garbage.lock);
109	next = dst_garbage.list;
110	if (next) {
111		dst_garbage.list = NULL;
112		spin_unlock_bh(&dst_garbage.lock);
113		goto loop;
114	}
115	last->next = NULL;
116	dst_busy_list = head.next;
117	if (!dst_busy_list)
118		dst_garbage.timer_inc = DST_GC_MAX;
119	else {
120		/*
121		 * if we freed less than 1/10 of delayed entries,
122		 * we can sleep longer.
123		 */
124		if (work_performed <= delayed/10) {
125			dst_garbage.timer_expires += dst_garbage.timer_inc;
126			if (dst_garbage.timer_expires > DST_GC_MAX)
127				dst_garbage.timer_expires = DST_GC_MAX;
128			dst_garbage.timer_inc += DST_GC_INC;
129		} else {
130			dst_garbage.timer_inc = DST_GC_INC;
131			dst_garbage.timer_expires = DST_GC_MIN;
132		}
133		expires = dst_garbage.timer_expires;
134		/*
135		 * if the next desired timer is more than 4 seconds in the
136		 * future then round the timer to whole seconds
137		 */
138		if (expires > 4*HZ)
139			expires = round_jiffies_relative(expires);
140		schedule_delayed_work(&dst_gc_work, expires);
141	}
142
143	spin_unlock_bh(&dst_garbage.lock);
144	mutex_unlock(&dst_gc_mutex);
145}
146
147int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
148{
149	kfree_skb(skb);
150	return 0;
151}
152EXPORT_SYMBOL(dst_discard_out);
153
154const u32 dst_default_metrics[RTAX_MAX + 1] = {
155	/* This initializer is needed to force linker to place this variable
156	 * into const section. Otherwise it might end into bss section.
157	 * We really want to avoid false sharing on this variable, and catch
158	 * any writes on it.
159	 */
160	[RTAX_MAX] = 0xdeadbeef,
161};
 
162
163void dst_init(struct dst_entry *dst, struct dst_ops *ops,
164	      struct net_device *dev, int initial_ref, int initial_obsolete,
165	      unsigned short flags)
166{
167	dst->child = NULL;
168	dst->dev = dev;
169	if (dev)
170		dev_hold(dev);
171	dst->ops = ops;
172	dst_init_metrics(dst, dst_default_metrics, true);
173	dst->expires = 0UL;
174	dst->path = dst;
175	dst->from = NULL;
176#ifdef CONFIG_XFRM
177	dst->xfrm = NULL;
178#endif
179	dst->input = dst_discard;
180	dst->output = dst_discard_out;
181	dst->error = 0;
182	dst->obsolete = initial_obsolete;
183	dst->header_len = 0;
184	dst->trailer_len = 0;
185#ifdef CONFIG_IP_ROUTE_CLASSID
186	dst->tclassid = 0;
187#endif
188	dst->lwtstate = NULL;
189	atomic_set(&dst->__refcnt, initial_ref);
 
190	dst->__use = 0;
191	dst->lastuse = jiffies;
192	dst->flags = flags;
193	dst->pending_confirm = 0;
194	dst->next = NULL;
195	if (!(flags & DST_NOCOUNT))
196		dst_entries_add(ops, 1);
197}
198EXPORT_SYMBOL(dst_init);
199
200void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
201		int initial_ref, int initial_obsolete, unsigned short flags)
202{
203	struct dst_entry *dst;
204
205	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
206		if (ops->gc(ops))
207			return NULL;
208	}
209
210	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
211	if (!dst)
212		return NULL;
213
214	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
215
216	return dst;
217}
218EXPORT_SYMBOL(dst_alloc);
219
220static void ___dst_free(struct dst_entry *dst)
221{
222	/* The first case (dev==NULL) is required, when
223	   protocol module is unloaded.
224	 */
225	if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
226		dst->input = dst_discard;
227		dst->output = dst_discard_out;
228	}
229	dst->obsolete = DST_OBSOLETE_DEAD;
230}
231
232void __dst_free(struct dst_entry *dst)
233{
234	spin_lock_bh(&dst_garbage.lock);
235	___dst_free(dst);
236	dst->next = dst_garbage.list;
237	dst_garbage.list = dst;
238	if (dst_garbage.timer_inc > DST_GC_INC) {
239		dst_garbage.timer_inc = DST_GC_INC;
240		dst_garbage.timer_expires = DST_GC_MIN;
241		mod_delayed_work(system_wq, &dst_gc_work,
242				 dst_garbage.timer_expires);
243	}
244	spin_unlock_bh(&dst_garbage.lock);
245}
246EXPORT_SYMBOL(__dst_free);
247
248struct dst_entry *dst_destroy(struct dst_entry * dst)
249{
250	struct dst_entry *child;
251
252	smp_rmb();
253
254again:
255	child = dst->child;
256
257	if (!(dst->flags & DST_NOCOUNT))
258		dst_entries_add(dst->ops, -1);
259
 
 
 
260	if (dst->ops->destroy)
261		dst->ops->destroy(dst);
262	if (dst->dev)
263		dev_put(dst->dev);
264
265	lwtstate_put(dst->lwtstate);
266
267	if (dst->flags & DST_METADATA)
268		metadata_dst_free((struct metadata_dst *)dst);
269	else
270		kmem_cache_free(dst->ops->kmem_cachep, dst);
271
272	dst = child;
273	if (dst) {
274		int nohash = dst->flags & DST_NOHASH;
275
276		if (atomic_dec_and_test(&dst->__refcnt)) {
277			/* We were real parent of this dst, so kill child. */
278			if (nohash)
279				goto again;
280		} else {
281			/* Child is still referenced, return it for freeing. */
282			if (nohash)
283				return dst;
284			/* Child is still in his hash table */
285		}
286	}
287	return NULL;
288}
289EXPORT_SYMBOL(dst_destroy);
290
291static void dst_destroy_rcu(struct rcu_head *head)
292{
293	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
294
295	dst = dst_destroy(dst);
296	if (dst)
297		__dst_free(dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298}
299
300void dst_release(struct dst_entry *dst)
301{
302	if (dst) {
303		int newrefcnt;
304		unsigned short nocache = dst->flags & DST_NOCACHE;
305
306		newrefcnt = atomic_dec_return(&dst->__refcnt);
307		if (unlikely(newrefcnt < 0))
308			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
309					     __func__, dst, newrefcnt);
310		if (!newrefcnt && unlikely(nocache))
311			call_rcu(&dst->rcu_head, dst_destroy_rcu);
312	}
313}
314EXPORT_SYMBOL(dst_release);
315
 
 
 
 
 
 
 
 
 
316u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
317{
318	u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
319
320	if (p) {
321		u32 *old_p = __DST_METRICS_PTR(old);
322		unsigned long prev, new;
323
324		memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
 
325
326		new = (unsigned long) p;
327		prev = cmpxchg(&dst->_metrics, old, new);
328
329		if (prev != old) {
330			kfree(p);
331			p = __DST_METRICS_PTR(prev);
332			if (prev & DST_METRICS_READ_ONLY)
333				p = NULL;
 
 
 
334		}
335	}
336	return p;
 
337}
338EXPORT_SYMBOL(dst_cow_metrics_generic);
339
340/* Caller asserts that dst_metrics_read_only(dst) is false.  */
341void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
342{
343	unsigned long prev, new;
344
345	new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
346	prev = cmpxchg(&dst->_metrics, old, new);
347	if (prev == old)
348		kfree(__DST_METRICS_PTR(old));
349}
350EXPORT_SYMBOL(__dst_destroy_metrics_generic);
351
352static struct dst_ops md_dst_ops = {
353	.family =		AF_UNSPEC,
354};
 
355
356static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
357{
358	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
359	kfree_skb(skb);
360	return 0;
361}
362
363static int dst_md_discard(struct sk_buff *skb)
 
 
364{
365	WARN_ONCE(1, "Attempting to call input on metadata dst\n");
366	kfree_skb(skb);
367	return 0;
368}
369
370static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
 
 
371{
372	struct dst_entry *dst;
 
373
374	dst = &md_dst->dst;
375	dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
376		 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
 
 
377
378	dst->input = dst_md_discard;
379	dst->output = dst_md_discard_out;
 
380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
 
382}
383
384struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
 
385{
386	struct metadata_dst *md_dst;
387
388	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
389	if (!md_dst)
390		return NULL;
391
392	__metadata_dst_init(md_dst, optslen);
393
394	return md_dst;
395}
396EXPORT_SYMBOL_GPL(metadata_dst_alloc);
397
398void metadata_dst_free(struct metadata_dst *md_dst)
399{
400#ifdef CONFIG_DST_CACHE
401	dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
 
402#endif
 
 
403	kfree(md_dst);
404}
 
405
406struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
 
407{
408	int cpu;
409	struct metadata_dst __percpu *md_dst;
410
411	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
412				    __alignof__(struct metadata_dst), flags);
413	if (!md_dst)
414		return NULL;
415
416	for_each_possible_cpu(cpu)
417		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
418
419	return md_dst;
420}
421EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
422
423/* Dirty hack. We did it in 2.2 (in __dst_free),
424 * we have _very_ good reasons not to repeat
425 * this mistake in 2.3, but we have no choice
426 * now. _It_ _is_ _explicit_ _deliberate_
427 * _race_ _condition_.
428 *
429 * Commented and originally written by Alexey.
430 */
431static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
432		       int unregister)
433{
434	if (dst->ops->ifdown)
435		dst->ops->ifdown(dst, dev, unregister);
436
437	if (dev != dst->dev)
438		return;
439
440	if (!unregister) {
441		dst->input = dst_discard;
442		dst->output = dst_discard_out;
443	} else {
444		dst->dev = dev_net(dst->dev)->loopback_dev;
445		dev_hold(dst->dev);
446		dev_put(dev);
447	}
448}
449
450static int dst_dev_event(struct notifier_block *this, unsigned long event,
451			 void *ptr)
452{
453	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
454	struct dst_entry *dst, *last = NULL;
455
456	switch (event) {
457	case NETDEV_UNREGISTER_FINAL:
458	case NETDEV_DOWN:
459		mutex_lock(&dst_gc_mutex);
460		for (dst = dst_busy_list; dst; dst = dst->next) {
461			last = dst;
462			dst_ifdown(dst, dev, event != NETDEV_DOWN);
463		}
464
465		spin_lock_bh(&dst_garbage.lock);
466		dst = dst_garbage.list;
467		dst_garbage.list = NULL;
468		spin_unlock_bh(&dst_garbage.lock);
469
470		if (last)
471			last->next = dst;
472		else
473			dst_busy_list = dst;
474		for (; dst; dst = dst->next)
475			dst_ifdown(dst, dev, event != NETDEV_DOWN);
476		mutex_unlock(&dst_gc_mutex);
477		break;
478	}
479	return NOTIFY_DONE;
480}
481
482static struct notifier_block dst_dev_notifier = {
483	.notifier_call	= dst_dev_event,
484	.priority = -10, /* must be called after other network notifiers */
485};
486
487void __init dst_subsys_init(void)
488{
489	register_netdevice_notifier(&dst_dev_notifier);
490}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/core/dst.c	Protocol independent destination cache.
  4 *
  5 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *
  7 */
  8
  9#include <linux/bitops.h>
 10#include <linux/errno.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/workqueue.h>
 14#include <linux/mm.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <linux/netdevice.h>
 18#include <linux/skbuff.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <net/net_namespace.h>
 22#include <linux/sched.h>
 23#include <linux/prefetch.h>
 24#include <net/lwtunnel.h>
 25#include <net/xfrm.h>
 26
 27#include <net/dst.h>
 28#include <net/dst_metadata.h>
 29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 31{
 32	kfree_skb(skb);
 33	return 0;
 34}
 35EXPORT_SYMBOL(dst_discard_out);
 36
 37const struct dst_metrics dst_default_metrics = {
 38	/* This initializer is needed to force linker to place this variable
 39	 * into const section. Otherwise it might end into bss section.
 40	 * We really want to avoid false sharing on this variable, and catch
 41	 * any writes on it.
 42	 */
 43	.refcnt = REFCOUNT_INIT(1),
 44};
 45EXPORT_SYMBOL(dst_default_metrics);
 46
 47void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 48	      struct net_device *dev, int initial_obsolete,
 49	      unsigned short flags)
 50{
 
 51	dst->dev = dev;
 52	netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC);
 
 53	dst->ops = ops;
 54	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 55	dst->expires = 0UL;
 
 
 56#ifdef CONFIG_XFRM
 57	dst->xfrm = NULL;
 58#endif
 59	dst->input = dst_discard;
 60	dst->output = dst_discard_out;
 61	dst->error = 0;
 62	dst->obsolete = initial_obsolete;
 63	dst->header_len = 0;
 64	dst->trailer_len = 0;
 65#ifdef CONFIG_IP_ROUTE_CLASSID
 66	dst->tclassid = 0;
 67#endif
 68	dst->lwtstate = NULL;
 69	rcuref_init(&dst->__rcuref, 1);
 70	INIT_LIST_HEAD(&dst->rt_uncached);
 71	dst->__use = 0;
 72	dst->lastuse = jiffies;
 73	dst->flags = flags;
 
 
 74	if (!(flags & DST_NOCOUNT))
 75		dst_entries_add(ops, 1);
 76}
 77EXPORT_SYMBOL(dst_init);
 78
 79void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
 80		int initial_obsolete, unsigned short flags)
 81{
 82	struct dst_entry *dst;
 83
 84	if (ops->gc &&
 85	    !(flags & DST_NOCOUNT) &&
 86	    dst_entries_get_fast(ops) > ops->gc_thresh)
 87		ops->gc(ops);
 88
 89	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
 90	if (!dst)
 91		return NULL;
 92
 93	dst_init(dst, ops, dev, initial_obsolete, flags);
 94
 95	return dst;
 96}
 97EXPORT_SYMBOL(dst_alloc);
 98
 99static void dst_destroy(struct dst_entry *dst)
100{
101	struct dst_entry *child = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
103	smp_rmb();
104
105#ifdef CONFIG_XFRM
106	if (dst->xfrm) {
107		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
 
 
108
109		child = xdst->child;
110	}
111#endif
112	if (dst->ops->destroy)
113		dst->ops->destroy(dst);
114	netdev_put(dst->dev, &dst->dev_tracker);
 
115
116	lwtstate_put(dst->lwtstate);
117
118	if (dst->flags & DST_METADATA)
119		metadata_dst_free((struct metadata_dst *)dst);
120	else
121		kmem_cache_free(dst->ops->kmem_cachep, dst);
122
123	dst = child;
124	if (dst)
125		dst_release_immediate(dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
126}
 
127
128static void dst_destroy_rcu(struct rcu_head *head)
129{
130	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
131
132	dst_destroy(dst);
133}
134
135/* Operations to mark dst as DEAD and clean up the net device referenced
136 * by dst:
137 * 1. put the dst under blackhole interface and discard all tx/rx packets
138 *    on this route.
139 * 2. release the net_device
140 * This function should be called when removing routes from the fib tree
141 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
142 * make the next dst_ops->check() fail.
143 */
144void dst_dev_put(struct dst_entry *dst)
145{
146	struct net_device *dev = dst->dev;
147
148	dst->obsolete = DST_OBSOLETE_DEAD;
149	if (dst->ops->ifdown)
150		dst->ops->ifdown(dst, dev);
151	dst->input = dst_discard;
152	dst->output = dst_discard_out;
153	dst->dev = blackhole_netdev;
154	netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
155			   GFP_ATOMIC);
156}
157EXPORT_SYMBOL(dst_dev_put);
158
159static void dst_count_dec(struct dst_entry *dst)
160{
161	if (!(dst->flags & DST_NOCOUNT))
162		dst_entries_add(dst->ops, -1);
163}
164
165void dst_release(struct dst_entry *dst)
166{
167	if (dst && rcuref_put(&dst->__rcuref)) {
168		dst_count_dec(dst);
169		call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
 
 
 
 
 
 
 
170	}
171}
172EXPORT_SYMBOL(dst_release);
173
174void dst_release_immediate(struct dst_entry *dst)
175{
176	if (dst && rcuref_put(&dst->__rcuref)) {
177		dst_count_dec(dst);
178		dst_destroy(dst);
179	}
180}
181EXPORT_SYMBOL(dst_release_immediate);
182
183u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
184{
185	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
186
187	if (p) {
188		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
189		unsigned long prev, new;
190
191		refcount_set(&p->refcnt, 1);
192		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
193
194		new = (unsigned long) p;
195		prev = cmpxchg(&dst->_metrics, old, new);
196
197		if (prev != old) {
198			kfree(p);
199			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
200			if (prev & DST_METRICS_READ_ONLY)
201				p = NULL;
202		} else if (prev & DST_METRICS_REFCOUNTED) {
203			if (refcount_dec_and_test(&old_p->refcnt))
204				kfree(old_p);
205		}
206	}
207	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
208	return (u32 *)p;
209}
210EXPORT_SYMBOL(dst_cow_metrics_generic);
211
212/* Caller asserts that dst_metrics_read_only(dst) is false.  */
213void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
214{
215	unsigned long prev, new;
216
217	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
218	prev = cmpxchg(&dst->_metrics, old, new);
219	if (prev == old)
220		kfree(__DST_METRICS_PTR(old));
221}
222EXPORT_SYMBOL(__dst_destroy_metrics_generic);
223
224struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
225{
226	return NULL;
227}
228
229u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
230{
231	return NULL;
 
 
232}
233
234struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
235					     struct sk_buff *skb,
236					     const void *daddr)
237{
238	return NULL;
 
 
239}
240
241void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
242			       struct sk_buff *skb, u32 mtu,
243			       bool confirm_neigh)
244{
245}
246EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
247
248void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
249			    struct sk_buff *skb)
250{
251}
252EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
253
254unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
255{
256	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
257
258	return mtu ? : dst->dev->mtu;
259}
260EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
261
262static struct dst_ops dst_blackhole_ops = {
263	.family		= AF_UNSPEC,
264	.neigh_lookup	= dst_blackhole_neigh_lookup,
265	.check		= dst_blackhole_check,
266	.cow_metrics	= dst_blackhole_cow_metrics,
267	.update_pmtu	= dst_blackhole_update_pmtu,
268	.redirect	= dst_blackhole_redirect,
269	.mtu		= dst_blackhole_mtu,
270};
271
272static void __metadata_dst_init(struct metadata_dst *md_dst,
273				enum metadata_type type, u8 optslen)
274{
275	struct dst_entry *dst;
276
277	dst = &md_dst->dst;
278	dst_init(dst, &dst_blackhole_ops, NULL, DST_OBSOLETE_NONE,
279		 DST_METADATA | DST_NOCOUNT);
280	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
281	md_dst->type = type;
282}
283
284struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
285					gfp_t flags)
286{
287	struct metadata_dst *md_dst;
288
289	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
290	if (!md_dst)
291		return NULL;
292
293	__metadata_dst_init(md_dst, type, optslen);
294
295	return md_dst;
296}
297EXPORT_SYMBOL_GPL(metadata_dst_alloc);
298
299void metadata_dst_free(struct metadata_dst *md_dst)
300{
301#ifdef CONFIG_DST_CACHE
302	if (md_dst->type == METADATA_IP_TUNNEL)
303		dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
304#endif
305	if (md_dst->type == METADATA_XFRM)
306		dst_release(md_dst->u.xfrm_info.dst_orig);
307	kfree(md_dst);
308}
309EXPORT_SYMBOL_GPL(metadata_dst_free);
310
311struct metadata_dst __percpu *
312metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
313{
314	int cpu;
315	struct metadata_dst __percpu *md_dst;
316
317	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
318				    __alignof__(struct metadata_dst), flags);
319	if (!md_dst)
320		return NULL;
321
322	for_each_possible_cpu(cpu)
323		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
324
325	return md_dst;
326}
327EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
328
329void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330{
331	int cpu;
 
332
333	for_each_possible_cpu(cpu) {
334		struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
 
 
 
 
 
 
335
336#ifdef CONFIG_DST_CACHE
337		if (one_md_dst->type == METADATA_IP_TUNNEL)
338			dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
339#endif
340		if (one_md_dst->type == METADATA_XFRM)
341			dst_release(one_md_dst->u.xfrm_info.dst_orig);
 
 
 
 
 
 
 
342	}
343	free_percpu(md_dst);
 
 
 
 
 
 
 
 
 
 
344}
345EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);