Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/core/dst.c	Protocol independent destination cache.
  4 *
  5 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *
  7 */
  8
  9#include <linux/bitops.h>
 10#include <linux/errno.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/workqueue.h>
 14#include <linux/mm.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <linux/netdevice.h>
 18#include <linux/skbuff.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <net/net_namespace.h>
 22#include <linux/sched.h>
 23#include <linux/prefetch.h>
 24#include <net/lwtunnel.h>
 25#include <net/xfrm.h>
 26
 27#include <net/dst.h>
 28#include <net/dst_metadata.h>
 29
 30int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 31{
 32	kfree_skb(skb);
 33	return 0;
 34}
 35EXPORT_SYMBOL(dst_discard_out);
 36
 37const struct dst_metrics dst_default_metrics = {
 38	/* This initializer is needed to force linker to place this variable
 39	 * into const section. Otherwise it might end into bss section.
 40	 * We really want to avoid false sharing on this variable, and catch
 41	 * any writes on it.
 42	 */
 43	.refcnt = REFCOUNT_INIT(1),
 44};
 45EXPORT_SYMBOL(dst_default_metrics);
 46
 47void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 48	      struct net_device *dev, int initial_ref, int initial_obsolete,
 49	      unsigned short flags)
 50{
 51	dst->dev = dev;
 52	if (dev)
 53		dev_hold(dev);
 54	dst->ops = ops;
 55	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 56	dst->expires = 0UL;
 57#ifdef CONFIG_XFRM
 58	dst->xfrm = NULL;
 59#endif
 60	dst->input = dst_discard;
 61	dst->output = dst_discard_out;
 62	dst->error = 0;
 63	dst->obsolete = initial_obsolete;
 64	dst->header_len = 0;
 65	dst->trailer_len = 0;
 66#ifdef CONFIG_IP_ROUTE_CLASSID
 67	dst->tclassid = 0;
 68#endif
 69	dst->lwtstate = NULL;
 70	atomic_set(&dst->__refcnt, initial_ref);
 
 71	dst->__use = 0;
 72	dst->lastuse = jiffies;
 73	dst->flags = flags;
 74	if (!(flags & DST_NOCOUNT))
 75		dst_entries_add(ops, 1);
 76}
 77EXPORT_SYMBOL(dst_init);
 78
 79void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
 80		int initial_ref, int initial_obsolete, unsigned short flags)
 81{
 82	struct dst_entry *dst;
 83
 84	if (ops->gc &&
 85	    !(flags & DST_NOCOUNT) &&
 86	    dst_entries_get_fast(ops) > ops->gc_thresh) {
 87		if (ops->gc(ops)) {
 88			pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n");
 89			return NULL;
 90		}
 91	}
 92
 93	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
 94	if (!dst)
 95		return NULL;
 96
 97	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
 98
 99	return dst;
100}
101EXPORT_SYMBOL(dst_alloc);
102
103struct dst_entry *dst_destroy(struct dst_entry * dst)
104{
105	struct dst_entry *child = NULL;
106
107	smp_rmb();
108
109#ifdef CONFIG_XFRM
110	if (dst->xfrm) {
111		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
112
113		child = xdst->child;
114	}
115#endif
116	if (!(dst->flags & DST_NOCOUNT))
117		dst_entries_add(dst->ops, -1);
118
119	if (dst->ops->destroy)
120		dst->ops->destroy(dst);
121	if (dst->dev)
122		dev_put(dst->dev);
123
124	lwtstate_put(dst->lwtstate);
125
126	if (dst->flags & DST_METADATA)
127		metadata_dst_free((struct metadata_dst *)dst);
128	else
129		kmem_cache_free(dst->ops->kmem_cachep, dst);
130
131	dst = child;
132	if (dst)
133		dst_release_immediate(dst);
134	return NULL;
135}
136EXPORT_SYMBOL(dst_destroy);
137
138static void dst_destroy_rcu(struct rcu_head *head)
139{
140	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
141
142	dst = dst_destroy(dst);
143}
144
145/* Operations to mark dst as DEAD and clean up the net device referenced
146 * by dst:
147 * 1. put the dst under blackhole interface and discard all tx/rx packets
148 *    on this route.
149 * 2. release the net_device
150 * This function should be called when removing routes from the fib tree
151 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
152 * make the next dst_ops->check() fail.
153 */
154void dst_dev_put(struct dst_entry *dst)
155{
156	struct net_device *dev = dst->dev;
157
158	dst->obsolete = DST_OBSOLETE_DEAD;
159	if (dst->ops->ifdown)
160		dst->ops->ifdown(dst, dev, true);
161	dst->input = dst_discard;
162	dst->output = dst_discard_out;
163	dst->dev = blackhole_netdev;
164	dev_hold(dst->dev);
165	dev_put(dev);
166}
167EXPORT_SYMBOL(dst_dev_put);
168
169void dst_release(struct dst_entry *dst)
170{
171	if (dst) {
172		int newrefcnt;
 
173
174		newrefcnt = atomic_dec_return(&dst->__refcnt);
175		if (WARN_ONCE(newrefcnt < 0, "dst_release underflow"))
176			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
177					     __func__, dst, newrefcnt);
178		if (!newrefcnt)
179			call_rcu(&dst->rcu_head, dst_destroy_rcu);
180	}
181}
182EXPORT_SYMBOL(dst_release);
183
184void dst_release_immediate(struct dst_entry *dst)
185{
186	if (dst) {
187		int newrefcnt;
188
189		newrefcnt = atomic_dec_return(&dst->__refcnt);
190		if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow"))
191			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
192					     __func__, dst, newrefcnt);
193		if (!newrefcnt)
194			dst_destroy(dst);
195	}
196}
197EXPORT_SYMBOL(dst_release_immediate);
198
199u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
200{
201	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
202
203	if (p) {
204		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
205		unsigned long prev, new;
206
207		refcount_set(&p->refcnt, 1);
208		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
209
210		new = (unsigned long) p;
211		prev = cmpxchg(&dst->_metrics, old, new);
212
213		if (prev != old) {
214			kfree(p);
215			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
216			if (prev & DST_METRICS_READ_ONLY)
217				p = NULL;
218		} else if (prev & DST_METRICS_REFCOUNTED) {
219			if (refcount_dec_and_test(&old_p->refcnt))
220				kfree(old_p);
221		}
222	}
223	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
224	return (u32 *)p;
225}
226EXPORT_SYMBOL(dst_cow_metrics_generic);
227
228/* Caller asserts that dst_metrics_read_only(dst) is false.  */
229void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
230{
231	unsigned long prev, new;
232
233	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
234	prev = cmpxchg(&dst->_metrics, old, new);
235	if (prev == old)
236		kfree(__DST_METRICS_PTR(old));
237}
238EXPORT_SYMBOL(__dst_destroy_metrics_generic);
239
240struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
241{
242	return NULL;
243}
244
245u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
246{
247	return NULL;
248}
249
250struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
251					     struct sk_buff *skb,
252					     const void *daddr)
253{
254	return NULL;
255}
256
257void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
258			       struct sk_buff *skb, u32 mtu,
259			       bool confirm_neigh)
260{
261}
262EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
263
264void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
265			    struct sk_buff *skb)
266{
267}
268EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
269
270unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
271{
272	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
273
274	return mtu ? : dst->dev->mtu;
275}
276EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
277
278static struct dst_ops dst_blackhole_ops = {
279	.family		= AF_UNSPEC,
280	.neigh_lookup	= dst_blackhole_neigh_lookup,
281	.check		= dst_blackhole_check,
282	.cow_metrics	= dst_blackhole_cow_metrics,
283	.update_pmtu	= dst_blackhole_update_pmtu,
284	.redirect	= dst_blackhole_redirect,
285	.mtu		= dst_blackhole_mtu,
286};
287
288static void __metadata_dst_init(struct metadata_dst *md_dst,
289				enum metadata_type type, u8 optslen)
290{
291	struct dst_entry *dst;
292
293	dst = &md_dst->dst;
294	dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
295		 DST_METADATA | DST_NOCOUNT);
296	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
297	md_dst->type = type;
298}
299
300struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
301					gfp_t flags)
302{
303	struct metadata_dst *md_dst;
304
305	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
306	if (!md_dst)
307		return NULL;
308
309	__metadata_dst_init(md_dst, type, optslen);
310
311	return md_dst;
312}
313EXPORT_SYMBOL_GPL(metadata_dst_alloc);
314
315void metadata_dst_free(struct metadata_dst *md_dst)
316{
317#ifdef CONFIG_DST_CACHE
318	if (md_dst->type == METADATA_IP_TUNNEL)
319		dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
320#endif
 
 
321	kfree(md_dst);
322}
323EXPORT_SYMBOL_GPL(metadata_dst_free);
324
325struct metadata_dst __percpu *
326metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
327{
328	int cpu;
329	struct metadata_dst __percpu *md_dst;
330
331	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
332				    __alignof__(struct metadata_dst), flags);
333	if (!md_dst)
334		return NULL;
335
336	for_each_possible_cpu(cpu)
337		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
338
339	return md_dst;
340}
341EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
342
343void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
344{
345#ifdef CONFIG_DST_CACHE
346	int cpu;
347
348	for_each_possible_cpu(cpu) {
349		struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
350
 
351		if (one_md_dst->type == METADATA_IP_TUNNEL)
352			dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
353	}
354#endif
 
 
 
355	free_percpu(md_dst);
356}
357EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/core/dst.c	Protocol independent destination cache.
  4 *
  5 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *
  7 */
  8
  9#include <linux/bitops.h>
 10#include <linux/errno.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/workqueue.h>
 14#include <linux/mm.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <linux/netdevice.h>
 18#include <linux/skbuff.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <net/net_namespace.h>
 22#include <linux/sched.h>
 23#include <linux/prefetch.h>
 24#include <net/lwtunnel.h>
 25#include <net/xfrm.h>
 26
 27#include <net/dst.h>
 28#include <net/dst_metadata.h>
 29
 30int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 31{
 32	kfree_skb(skb);
 33	return 0;
 34}
 35EXPORT_SYMBOL(dst_discard_out);
 36
 37const struct dst_metrics dst_default_metrics = {
 38	/* This initializer is needed to force linker to place this variable
 39	 * into const section. Otherwise it might end into bss section.
 40	 * We really want to avoid false sharing on this variable, and catch
 41	 * any writes on it.
 42	 */
 43	.refcnt = REFCOUNT_INIT(1),
 44};
 45EXPORT_SYMBOL(dst_default_metrics);
 46
 47void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 48	      struct net_device *dev, int initial_obsolete,
 49	      unsigned short flags)
 50{
 51	dst->dev = dev;
 52	netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC);
 
 53	dst->ops = ops;
 54	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 55	dst->expires = 0UL;
 56#ifdef CONFIG_XFRM
 57	dst->xfrm = NULL;
 58#endif
 59	dst->input = dst_discard;
 60	dst->output = dst_discard_out;
 61	dst->error = 0;
 62	dst->obsolete = initial_obsolete;
 63	dst->header_len = 0;
 64	dst->trailer_len = 0;
 65#ifdef CONFIG_IP_ROUTE_CLASSID
 66	dst->tclassid = 0;
 67#endif
 68	dst->lwtstate = NULL;
 69	rcuref_init(&dst->__rcuref, 1);
 70	INIT_LIST_HEAD(&dst->rt_uncached);
 71	dst->__use = 0;
 72	dst->lastuse = jiffies;
 73	dst->flags = flags;
 74	if (!(flags & DST_NOCOUNT))
 75		dst_entries_add(ops, 1);
 76}
 77EXPORT_SYMBOL(dst_init);
 78
 79void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
 80		int initial_obsolete, unsigned short flags)
 81{
 82	struct dst_entry *dst;
 83
 84	if (ops->gc &&
 85	    !(flags & DST_NOCOUNT) &&
 86	    dst_entries_get_fast(ops) > ops->gc_thresh)
 87		ops->gc(ops);
 
 
 
 
 88
 89	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
 90	if (!dst)
 91		return NULL;
 92
 93	dst_init(dst, ops, dev, initial_obsolete, flags);
 94
 95	return dst;
 96}
 97EXPORT_SYMBOL(dst_alloc);
 98
 99static void dst_destroy(struct dst_entry *dst)
100{
101	struct dst_entry *child = NULL;
102
103	smp_rmb();
104
105#ifdef CONFIG_XFRM
106	if (dst->xfrm) {
107		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
108
109		child = xdst->child;
110	}
111#endif
 
 
 
112	if (dst->ops->destroy)
113		dst->ops->destroy(dst);
114	netdev_put(dst->dev, &dst->dev_tracker);
 
115
116	lwtstate_put(dst->lwtstate);
117
118	if (dst->flags & DST_METADATA)
119		metadata_dst_free((struct metadata_dst *)dst);
120	else
121		kmem_cache_free(dst->ops->kmem_cachep, dst);
122
123	dst = child;
124	if (dst)
125		dst_release_immediate(dst);
 
126}
 
127
128static void dst_destroy_rcu(struct rcu_head *head)
129{
130	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
131
132	dst_destroy(dst);
133}
134
135/* Operations to mark dst as DEAD and clean up the net device referenced
136 * by dst:
137 * 1. put the dst under blackhole interface and discard all tx/rx packets
138 *    on this route.
139 * 2. release the net_device
140 * This function should be called when removing routes from the fib tree
141 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
142 * make the next dst_ops->check() fail.
143 */
144void dst_dev_put(struct dst_entry *dst)
145{
146	struct net_device *dev = dst->dev;
147
148	dst->obsolete = DST_OBSOLETE_DEAD;
149	if (dst->ops->ifdown)
150		dst->ops->ifdown(dst, dev);
151	dst->input = dst_discard;
152	dst->output = dst_discard_out;
153	dst->dev = blackhole_netdev;
154	netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
155			   GFP_ATOMIC);
156}
157EXPORT_SYMBOL(dst_dev_put);
158
159static void dst_count_dec(struct dst_entry *dst)
160{
161	if (!(dst->flags & DST_NOCOUNT))
162		dst_entries_add(dst->ops, -1);
163}
164
165void dst_release(struct dst_entry *dst)
166{
167	if (dst && rcuref_put(&dst->__rcuref)) {
168		dst_count_dec(dst);
169		call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
 
170	}
171}
172EXPORT_SYMBOL(dst_release);
173
174void dst_release_immediate(struct dst_entry *dst)
175{
176	if (dst && rcuref_put(&dst->__rcuref)) {
177		dst_count_dec(dst);
178		dst_destroy(dst);
 
 
 
 
 
 
179	}
180}
181EXPORT_SYMBOL(dst_release_immediate);
182
183u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
184{
185	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
186
187	if (p) {
188		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
189		unsigned long prev, new;
190
191		refcount_set(&p->refcnt, 1);
192		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
193
194		new = (unsigned long) p;
195		prev = cmpxchg(&dst->_metrics, old, new);
196
197		if (prev != old) {
198			kfree(p);
199			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
200			if (prev & DST_METRICS_READ_ONLY)
201				p = NULL;
202		} else if (prev & DST_METRICS_REFCOUNTED) {
203			if (refcount_dec_and_test(&old_p->refcnt))
204				kfree(old_p);
205		}
206	}
207	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
208	return (u32 *)p;
209}
210EXPORT_SYMBOL(dst_cow_metrics_generic);
211
212/* Caller asserts that dst_metrics_read_only(dst) is false.  */
213void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
214{
215	unsigned long prev, new;
216
217	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
218	prev = cmpxchg(&dst->_metrics, old, new);
219	if (prev == old)
220		kfree(__DST_METRICS_PTR(old));
221}
222EXPORT_SYMBOL(__dst_destroy_metrics_generic);
223
224struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
225{
226	return NULL;
227}
228
229u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
230{
231	return NULL;
232}
233
234struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
235					     struct sk_buff *skb,
236					     const void *daddr)
237{
238	return NULL;
239}
240
241void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
242			       struct sk_buff *skb, u32 mtu,
243			       bool confirm_neigh)
244{
245}
246EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
247
248void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
249			    struct sk_buff *skb)
250{
251}
252EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
253
254unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
255{
256	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
257
258	return mtu ? : dst->dev->mtu;
259}
260EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
261
262static struct dst_ops dst_blackhole_ops = {
263	.family		= AF_UNSPEC,
264	.neigh_lookup	= dst_blackhole_neigh_lookup,
265	.check		= dst_blackhole_check,
266	.cow_metrics	= dst_blackhole_cow_metrics,
267	.update_pmtu	= dst_blackhole_update_pmtu,
268	.redirect	= dst_blackhole_redirect,
269	.mtu		= dst_blackhole_mtu,
270};
271
272static void __metadata_dst_init(struct metadata_dst *md_dst,
273				enum metadata_type type, u8 optslen)
274{
275	struct dst_entry *dst;
276
277	dst = &md_dst->dst;
278	dst_init(dst, &dst_blackhole_ops, NULL, DST_OBSOLETE_NONE,
279		 DST_METADATA | DST_NOCOUNT);
280	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
281	md_dst->type = type;
282}
283
284struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
285					gfp_t flags)
286{
287	struct metadata_dst *md_dst;
288
289	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
290	if (!md_dst)
291		return NULL;
292
293	__metadata_dst_init(md_dst, type, optslen);
294
295	return md_dst;
296}
297EXPORT_SYMBOL_GPL(metadata_dst_alloc);
298
299void metadata_dst_free(struct metadata_dst *md_dst)
300{
301#ifdef CONFIG_DST_CACHE
302	if (md_dst->type == METADATA_IP_TUNNEL)
303		dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
304#endif
305	if (md_dst->type == METADATA_XFRM)
306		dst_release(md_dst->u.xfrm_info.dst_orig);
307	kfree(md_dst);
308}
309EXPORT_SYMBOL_GPL(metadata_dst_free);
310
311struct metadata_dst __percpu *
312metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
313{
314	int cpu;
315	struct metadata_dst __percpu *md_dst;
316
317	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
318				    __alignof__(struct metadata_dst), flags);
319	if (!md_dst)
320		return NULL;
321
322	for_each_possible_cpu(cpu)
323		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
324
325	return md_dst;
326}
327EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
328
329void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
330{
 
331	int cpu;
332
333	for_each_possible_cpu(cpu) {
334		struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
335
336#ifdef CONFIG_DST_CACHE
337		if (one_md_dst->type == METADATA_IP_TUNNEL)
338			dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
 
339#endif
340		if (one_md_dst->type == METADATA_XFRM)
341			dst_release(one_md_dst->u.xfrm_info.dst_orig);
342	}
343	free_percpu(md_dst);
344}
345EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);