Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * net/dst.h Protocol independent destination cache definitions.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 */
8
9#ifndef _NET_DST_H
10#define _NET_DST_H
11
12#include <net/dst_ops.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/rcupdate.h>
16#include <linux/bug.h>
17#include <linux/jiffies.h>
18#include <linux/refcount.h>
19#include <net/neighbour.h>
20#include <asm/processor.h>
21
22#define DST_GC_MIN (HZ/10)
23#define DST_GC_INC (HZ/2)
24#define DST_GC_MAX (120*HZ)
25
26/* Each dst_entry has reference count and sits in some parent list(s).
27 * When it is removed from parent list, it is "freed" (dst_free).
28 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
29 * is zero, it can be destroyed immediately, otherwise it is added
30 * to gc list and garbage collector periodically checks the refcnt.
31 */
32
33struct sk_buff;
34
35struct dst_entry {
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 unsigned long expires;
40#ifdef CONFIG_XFRM
41 struct xfrm_state *xfrm;
42#else
43 void *__pad1;
44#endif
45 int (*input)(struct sk_buff *);
46 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
47
48 unsigned short flags;
49#define DST_HOST 0x0001
50#define DST_NOXFRM 0x0002
51#define DST_NOPOLICY 0x0004
52#define DST_NOCOUNT 0x0008
53#define DST_FAKE_RTABLE 0x0010
54#define DST_XFRM_TUNNEL 0x0020
55#define DST_XFRM_QUEUE 0x0040
56#define DST_METADATA 0x0080
57
58 /* A non-zero value of dst->obsolete forces by-hand validation
59 * of the route entry. Positive values are set by the generic
60 * dst layer to indicate that the entry has been forcefully
61 * destroyed.
62 *
63 * Negative values are used by the implementation layer code to
64 * force invocation of the dst_ops->check() method.
65 */
66 short obsolete;
67#define DST_OBSOLETE_NONE 0
68#define DST_OBSOLETE_DEAD 2
69#define DST_OBSOLETE_FORCE_CHK -1
70#define DST_OBSOLETE_KILL -2
71 unsigned short header_len; /* more space at head required */
72 unsigned short trailer_len; /* space to reserve at tail */
73
74 /*
75 * __refcnt wants to be on a different cache line from
76 * input/output/ops or performance tanks badly
77 */
78#ifdef CONFIG_64BIT
79 atomic_t __refcnt; /* 64-bit offset 64 */
80#endif
81 int __use;
82 unsigned long lastuse;
83 struct lwtunnel_state *lwtstate;
84 struct rcu_head rcu_head;
85 short error;
86 short __pad;
87 __u32 tclassid;
88#ifndef CONFIG_64BIT
89 atomic_t __refcnt; /* 32-bit offset 64 */
90#endif
91};
92
93struct dst_metrics {
94 u32 metrics[RTAX_MAX];
95 refcount_t refcnt;
96};
97extern const struct dst_metrics dst_default_metrics;
98
99u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
100
101#define DST_METRICS_READ_ONLY 0x1UL
102#define DST_METRICS_REFCOUNTED 0x2UL
103#define DST_METRICS_FLAGS 0x3UL
104#define __DST_METRICS_PTR(Y) \
105 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
106#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
107
108static inline bool dst_metrics_read_only(const struct dst_entry *dst)
109{
110 return dst->_metrics & DST_METRICS_READ_ONLY;
111}
112
113void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
114
115static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
116{
117 unsigned long val = dst->_metrics;
118 if (!(val & DST_METRICS_READ_ONLY))
119 __dst_destroy_metrics_generic(dst, val);
120}
121
122static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
123{
124 unsigned long p = dst->_metrics;
125
126 BUG_ON(!p);
127
128 if (p & DST_METRICS_READ_ONLY)
129 return dst->ops->cow_metrics(dst, p);
130 return __DST_METRICS_PTR(p);
131}
132
133/* This may only be invoked before the entry has reached global
134 * visibility.
135 */
136static inline void dst_init_metrics(struct dst_entry *dst,
137 const u32 *src_metrics,
138 bool read_only)
139{
140 dst->_metrics = ((unsigned long) src_metrics) |
141 (read_only ? DST_METRICS_READ_ONLY : 0);
142}
143
144static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
145{
146 u32 *dst_metrics = dst_metrics_write_ptr(dest);
147
148 if (dst_metrics) {
149 u32 *src_metrics = DST_METRICS_PTR(src);
150
151 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
152 }
153}
154
155static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
156{
157 return DST_METRICS_PTR(dst);
158}
159
160static inline u32
161dst_metric_raw(const struct dst_entry *dst, const int metric)
162{
163 u32 *p = DST_METRICS_PTR(dst);
164
165 return p[metric-1];
166}
167
168static inline u32
169dst_metric(const struct dst_entry *dst, const int metric)
170{
171 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
172 metric == RTAX_ADVMSS ||
173 metric == RTAX_MTU);
174 return dst_metric_raw(dst, metric);
175}
176
177static inline u32
178dst_metric_advmss(const struct dst_entry *dst)
179{
180 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
181
182 if (!advmss)
183 advmss = dst->ops->default_advmss(dst);
184
185 return advmss;
186}
187
188static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
189{
190 u32 *p = dst_metrics_write_ptr(dst);
191
192 if (p)
193 p[metric-1] = val;
194}
195
196/* Kernel-internal feature bits that are unallocated in user space. */
197#define DST_FEATURE_ECN_CA (1 << 31)
198
199#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
200#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
201
202static inline u32
203dst_feature(const struct dst_entry *dst, u32 feature)
204{
205 return dst_metric(dst, RTAX_FEATURES) & feature;
206}
207
208static inline u32 dst_mtu(const struct dst_entry *dst)
209{
210 return dst->ops->mtu(dst);
211}
212
213/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
214static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
215{
216 return msecs_to_jiffies(dst_metric(dst, metric));
217}
218
219static inline u32
220dst_allfrag(const struct dst_entry *dst)
221{
222 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
223 return ret;
224}
225
226static inline int
227dst_metric_locked(const struct dst_entry *dst, int metric)
228{
229 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
230}
231
232static inline void dst_hold(struct dst_entry *dst)
233{
234 /*
235 * If your kernel compilation stops here, please check
236 * the placement of __refcnt in struct dst_entry
237 */
238 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
239 WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
240}
241
242static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
243{
244 if (unlikely(time != dst->lastuse)) {
245 dst->__use++;
246 dst->lastuse = time;
247 }
248}
249
250static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
251{
252 dst_hold(dst);
253 dst_use_noref(dst, time);
254}
255
256static inline struct dst_entry *dst_clone(struct dst_entry *dst)
257{
258 if (dst)
259 dst_hold(dst);
260 return dst;
261}
262
263void dst_release(struct dst_entry *dst);
264
265void dst_release_immediate(struct dst_entry *dst);
266
267static inline void refdst_drop(unsigned long refdst)
268{
269 if (!(refdst & SKB_DST_NOREF))
270 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
271}
272
273/**
274 * skb_dst_drop - drops skb dst
275 * @skb: buffer
276 *
277 * Drops dst reference count if a reference was taken.
278 */
279static inline void skb_dst_drop(struct sk_buff *skb)
280{
281 if (skb->_skb_refdst) {
282 refdst_drop(skb->_skb_refdst);
283 skb->_skb_refdst = 0UL;
284 }
285}
286
287static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
288{
289 nskb->_skb_refdst = refdst;
290 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
291 dst_clone(skb_dst(nskb));
292}
293
294static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
295{
296 __skb_dst_copy(nskb, oskb->_skb_refdst);
297}
298
299/**
300 * dst_hold_safe - Take a reference on a dst if possible
301 * @dst: pointer to dst entry
302 *
303 * This helper returns false if it could not safely
304 * take a reference on a dst.
305 */
306static inline bool dst_hold_safe(struct dst_entry *dst)
307{
308 return atomic_inc_not_zero(&dst->__refcnt);
309}
310
311/**
312 * skb_dst_force - makes sure skb dst is refcounted
313 * @skb: buffer
314 *
315 * If dst is not yet refcounted and not destroyed, grab a ref on it.
316 */
317static inline void skb_dst_force(struct sk_buff *skb)
318{
319 if (skb_dst_is_noref(skb)) {
320 struct dst_entry *dst = skb_dst(skb);
321
322 WARN_ON(!rcu_read_lock_held());
323 if (!dst_hold_safe(dst))
324 dst = NULL;
325
326 skb->_skb_refdst = (unsigned long)dst;
327 }
328}
329
330
331/**
332 * __skb_tunnel_rx - prepare skb for rx reinsert
333 * @skb: buffer
334 * @dev: tunnel device
335 * @net: netns for packet i/o
336 *
337 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
338 * so make some cleanups. (no accounting done)
339 */
340static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
341 struct net *net)
342{
343 skb->dev = dev;
344
345 /*
346 * Clear hash so that we can recalulate the hash for the
347 * encapsulated packet, unless we have already determine the hash
348 * over the L4 4-tuple.
349 */
350 skb_clear_hash_if_not_l4(skb);
351 skb_set_queue_mapping(skb, 0);
352 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
353}
354
355/**
356 * skb_tunnel_rx - prepare skb for rx reinsert
357 * @skb: buffer
358 * @dev: tunnel device
359 * @net: netns for packet i/o
360 *
361 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
362 * so make some cleanups, and perform accounting.
363 * Note: this accounting is not SMP safe.
364 */
365static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
366 struct net *net)
367{
368 /* TODO : stats should be SMP safe */
369 dev->stats.rx_packets++;
370 dev->stats.rx_bytes += skb->len;
371 __skb_tunnel_rx(skb, dev, net);
372}
373
374static inline u32 dst_tclassid(const struct sk_buff *skb)
375{
376#ifdef CONFIG_IP_ROUTE_CLASSID
377 const struct dst_entry *dst;
378
379 dst = skb_dst(skb);
380 if (dst)
381 return dst->tclassid;
382#endif
383 return 0;
384}
385
386int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
387static inline int dst_discard(struct sk_buff *skb)
388{
389 return dst_discard_out(&init_net, skb->sk, skb);
390}
391void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
392 int initial_obsolete, unsigned short flags);
393void dst_init(struct dst_entry *dst, struct dst_ops *ops,
394 struct net_device *dev, int initial_ref, int initial_obsolete,
395 unsigned short flags);
396struct dst_entry *dst_destroy(struct dst_entry *dst);
397void dst_dev_put(struct dst_entry *dst);
398
399static inline void dst_confirm(struct dst_entry *dst)
400{
401}
402
403static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
404{
405 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
406 return IS_ERR(n) ? NULL : n;
407}
408
409static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
410 struct sk_buff *skb)
411{
412 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
413 return IS_ERR(n) ? NULL : n;
414}
415
416static inline void dst_confirm_neigh(const struct dst_entry *dst,
417 const void *daddr)
418{
419 if (dst->ops->confirm_neigh)
420 dst->ops->confirm_neigh(dst, daddr);
421}
422
423static inline void dst_link_failure(struct sk_buff *skb)
424{
425 struct dst_entry *dst = skb_dst(skb);
426 if (dst && dst->ops && dst->ops->link_failure)
427 dst->ops->link_failure(skb);
428}
429
430static inline void dst_set_expires(struct dst_entry *dst, int timeout)
431{
432 unsigned long expires = jiffies + timeout;
433
434 if (expires == 0)
435 expires = 1;
436
437 if (dst->expires == 0 || time_before(expires, dst->expires))
438 dst->expires = expires;
439}
440
441/* Output packet to network from transport. */
442static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
443{
444 return skb_dst(skb)->output(net, sk, skb);
445}
446
447/* Input packet from network to transport. */
448static inline int dst_input(struct sk_buff *skb)
449{
450 return skb_dst(skb)->input(skb);
451}
452
453static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
454{
455 if (dst->obsolete)
456 dst = dst->ops->check(dst, cookie);
457 return dst;
458}
459
460/* Flags for xfrm_lookup flags argument. */
461enum {
462 XFRM_LOOKUP_ICMP = 1 << 0,
463 XFRM_LOOKUP_QUEUE = 1 << 1,
464 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
465};
466
467struct flowi;
468#ifndef CONFIG_XFRM
469static inline struct dst_entry *xfrm_lookup(struct net *net,
470 struct dst_entry *dst_orig,
471 const struct flowi *fl,
472 const struct sock *sk,
473 int flags)
474{
475 return dst_orig;
476}
477
478static inline struct dst_entry *xfrm_lookup_route(struct net *net,
479 struct dst_entry *dst_orig,
480 const struct flowi *fl,
481 const struct sock *sk,
482 int flags)
483{
484 return dst_orig;
485}
486
487static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
488{
489 return NULL;
490}
491
492#else
493struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
494 const struct flowi *fl, const struct sock *sk,
495 int flags);
496
497struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
498 const struct flowi *fl, const struct sock *sk,
499 int flags);
500
501/* skb attached with this dst needs transformation if dst->xfrm is valid */
502static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
503{
504 return dst->xfrm;
505}
506#endif
507
508static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
509{
510 struct dst_entry *dst = skb_dst(skb);
511
512 if (dst && dst->ops->update_pmtu)
513 dst->ops->update_pmtu(dst, NULL, skb, mtu);
514}
515
516#endif /* _NET_DST_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * net/dst.h Protocol independent destination cache definitions.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 */
8
9#ifndef _NET_DST_H
10#define _NET_DST_H
11
12#include <net/dst_ops.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/rcupdate.h>
16#include <linux/bug.h>
17#include <linux/jiffies.h>
18#include <linux/refcount.h>
19#include <net/neighbour.h>
20#include <asm/processor.h>
21#include <linux/indirect_call_wrapper.h>
22
23struct sk_buff;
24
25struct dst_entry {
26 struct net_device *dev;
27 struct dst_ops *ops;
28 unsigned long _metrics;
29 unsigned long expires;
30#ifdef CONFIG_XFRM
31 struct xfrm_state *xfrm;
32#else
33 void *__pad1;
34#endif
35 int (*input)(struct sk_buff *);
36 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
37
38 unsigned short flags;
39#define DST_NOXFRM 0x0002
40#define DST_NOPOLICY 0x0004
41#define DST_NOCOUNT 0x0008
42#define DST_FAKE_RTABLE 0x0010
43#define DST_XFRM_TUNNEL 0x0020
44#define DST_XFRM_QUEUE 0x0040
45#define DST_METADATA 0x0080
46
47 /* A non-zero value of dst->obsolete forces by-hand validation
48 * of the route entry. Positive values are set by the generic
49 * dst layer to indicate that the entry has been forcefully
50 * destroyed.
51 *
52 * Negative values are used by the implementation layer code to
53 * force invocation of the dst_ops->check() method.
54 */
55 short obsolete;
56#define DST_OBSOLETE_NONE 0
57#define DST_OBSOLETE_DEAD 2
58#define DST_OBSOLETE_FORCE_CHK -1
59#define DST_OBSOLETE_KILL -2
60 unsigned short header_len; /* more space at head required */
61 unsigned short trailer_len; /* space to reserve at tail */
62
63 /*
64 * __refcnt wants to be on a different cache line from
65 * input/output/ops or performance tanks badly
66 */
67#ifdef CONFIG_64BIT
68 atomic_t __refcnt; /* 64-bit offset 64 */
69#endif
70 int __use;
71 unsigned long lastuse;
72 struct lwtunnel_state *lwtstate;
73 struct rcu_head rcu_head;
74 short error;
75 short __pad;
76 __u32 tclassid;
77#ifndef CONFIG_64BIT
78 atomic_t __refcnt; /* 32-bit offset 64 */
79#endif
80};
81
82struct dst_metrics {
83 u32 metrics[RTAX_MAX];
84 refcount_t refcnt;
85} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
86extern const struct dst_metrics dst_default_metrics;
87
88u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
89
90#define DST_METRICS_READ_ONLY 0x1UL
91#define DST_METRICS_REFCOUNTED 0x2UL
92#define DST_METRICS_FLAGS 0x3UL
93#define __DST_METRICS_PTR(Y) \
94 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
95#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
96
97static inline bool dst_metrics_read_only(const struct dst_entry *dst)
98{
99 return dst->_metrics & DST_METRICS_READ_ONLY;
100}
101
102void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
103
104static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
105{
106 unsigned long val = dst->_metrics;
107 if (!(val & DST_METRICS_READ_ONLY))
108 __dst_destroy_metrics_generic(dst, val);
109}
110
111static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
112{
113 unsigned long p = dst->_metrics;
114
115 BUG_ON(!p);
116
117 if (p & DST_METRICS_READ_ONLY)
118 return dst->ops->cow_metrics(dst, p);
119 return __DST_METRICS_PTR(p);
120}
121
122/* This may only be invoked before the entry has reached global
123 * visibility.
124 */
125static inline void dst_init_metrics(struct dst_entry *dst,
126 const u32 *src_metrics,
127 bool read_only)
128{
129 dst->_metrics = ((unsigned long) src_metrics) |
130 (read_only ? DST_METRICS_READ_ONLY : 0);
131}
132
133static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
134{
135 u32 *dst_metrics = dst_metrics_write_ptr(dest);
136
137 if (dst_metrics) {
138 u32 *src_metrics = DST_METRICS_PTR(src);
139
140 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
141 }
142}
143
144static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
145{
146 return DST_METRICS_PTR(dst);
147}
148
149static inline u32
150dst_metric_raw(const struct dst_entry *dst, const int metric)
151{
152 u32 *p = DST_METRICS_PTR(dst);
153
154 return p[metric-1];
155}
156
157static inline u32
158dst_metric(const struct dst_entry *dst, const int metric)
159{
160 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
161 metric == RTAX_ADVMSS ||
162 metric == RTAX_MTU);
163 return dst_metric_raw(dst, metric);
164}
165
166static inline u32
167dst_metric_advmss(const struct dst_entry *dst)
168{
169 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
170
171 if (!advmss)
172 advmss = dst->ops->default_advmss(dst);
173
174 return advmss;
175}
176
177static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
178{
179 u32 *p = dst_metrics_write_ptr(dst);
180
181 if (p)
182 p[metric-1] = val;
183}
184
185/* Kernel-internal feature bits that are unallocated in user space. */
186#define DST_FEATURE_ECN_CA (1U << 31)
187
188#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
189#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
190
191static inline u32
192dst_feature(const struct dst_entry *dst, u32 feature)
193{
194 return dst_metric(dst, RTAX_FEATURES) & feature;
195}
196
197INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *));
198INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *));
199static inline u32 dst_mtu(const struct dst_entry *dst)
200{
201 return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst);
202}
203
204/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
205static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
206{
207 return msecs_to_jiffies(dst_metric(dst, metric));
208}
209
210static inline u32
211dst_allfrag(const struct dst_entry *dst)
212{
213 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
214 return ret;
215}
216
217static inline int
218dst_metric_locked(const struct dst_entry *dst, int metric)
219{
220 return dst_metric(dst, RTAX_LOCK) & (1 << metric);
221}
222
223static inline void dst_hold(struct dst_entry *dst)
224{
225 /*
226 * If your kernel compilation stops here, please check
227 * the placement of __refcnt in struct dst_entry
228 */
229 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
230 WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
231}
232
233static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
234{
235 if (unlikely(time != dst->lastuse)) {
236 dst->__use++;
237 dst->lastuse = time;
238 }
239}
240
241static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
242{
243 dst_hold(dst);
244 dst_use_noref(dst, time);
245}
246
247static inline struct dst_entry *dst_clone(struct dst_entry *dst)
248{
249 if (dst)
250 dst_hold(dst);
251 return dst;
252}
253
254void dst_release(struct dst_entry *dst);
255
256void dst_release_immediate(struct dst_entry *dst);
257
258static inline void refdst_drop(unsigned long refdst)
259{
260 if (!(refdst & SKB_DST_NOREF))
261 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
262}
263
264/**
265 * skb_dst_drop - drops skb dst
266 * @skb: buffer
267 *
268 * Drops dst reference count if a reference was taken.
269 */
270static inline void skb_dst_drop(struct sk_buff *skb)
271{
272 if (skb->_skb_refdst) {
273 refdst_drop(skb->_skb_refdst);
274 skb->_skb_refdst = 0UL;
275 }
276}
277
278static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
279{
280 nskb->_skb_refdst = refdst;
281 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
282 dst_clone(skb_dst(nskb));
283}
284
285static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
286{
287 __skb_dst_copy(nskb, oskb->_skb_refdst);
288}
289
290/**
291 * dst_hold_safe - Take a reference on a dst if possible
292 * @dst: pointer to dst entry
293 *
294 * This helper returns false if it could not safely
295 * take a reference on a dst.
296 */
297static inline bool dst_hold_safe(struct dst_entry *dst)
298{
299 return atomic_inc_not_zero(&dst->__refcnt);
300}
301
302/**
303 * skb_dst_force - makes sure skb dst is refcounted
304 * @skb: buffer
305 *
306 * If dst is not yet refcounted and not destroyed, grab a ref on it.
307 * Returns true if dst is refcounted.
308 */
309static inline bool skb_dst_force(struct sk_buff *skb)
310{
311 if (skb_dst_is_noref(skb)) {
312 struct dst_entry *dst = skb_dst(skb);
313
314 WARN_ON(!rcu_read_lock_held());
315 if (!dst_hold_safe(dst))
316 dst = NULL;
317
318 skb->_skb_refdst = (unsigned long)dst;
319 }
320
321 return skb->_skb_refdst != 0UL;
322}
323
324
325/**
326 * __skb_tunnel_rx - prepare skb for rx reinsert
327 * @skb: buffer
328 * @dev: tunnel device
329 * @net: netns for packet i/o
330 *
331 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
332 * so make some cleanups. (no accounting done)
333 */
334static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
335 struct net *net)
336{
337 skb->dev = dev;
338
339 /*
340 * Clear hash so that we can recalulate the hash for the
341 * encapsulated packet, unless we have already determine the hash
342 * over the L4 4-tuple.
343 */
344 skb_clear_hash_if_not_l4(skb);
345 skb_set_queue_mapping(skb, 0);
346 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
347}
348
349/**
350 * skb_tunnel_rx - prepare skb for rx reinsert
351 * @skb: buffer
352 * @dev: tunnel device
353 * @net: netns for packet i/o
354 *
355 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
356 * so make some cleanups, and perform accounting.
357 * Note: this accounting is not SMP safe.
358 */
359static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
360 struct net *net)
361{
362 /* TODO : stats should be SMP safe */
363 dev->stats.rx_packets++;
364 dev->stats.rx_bytes += skb->len;
365 __skb_tunnel_rx(skb, dev, net);
366}
367
368static inline u32 dst_tclassid(const struct sk_buff *skb)
369{
370#ifdef CONFIG_IP_ROUTE_CLASSID
371 const struct dst_entry *dst;
372
373 dst = skb_dst(skb);
374 if (dst)
375 return dst->tclassid;
376#endif
377 return 0;
378}
379
380int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
381static inline int dst_discard(struct sk_buff *skb)
382{
383 return dst_discard_out(&init_net, skb->sk, skb);
384}
385void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
386 int initial_obsolete, unsigned short flags);
387void dst_init(struct dst_entry *dst, struct dst_ops *ops,
388 struct net_device *dev, int initial_ref, int initial_obsolete,
389 unsigned short flags);
390struct dst_entry *dst_destroy(struct dst_entry *dst);
391void dst_dev_put(struct dst_entry *dst);
392
393static inline void dst_confirm(struct dst_entry *dst)
394{
395}
396
397static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
398{
399 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
400 return IS_ERR(n) ? NULL : n;
401}
402
403static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
404 struct sk_buff *skb)
405{
406 struct neighbour *n;
407
408 if (WARN_ON_ONCE(!dst->ops->neigh_lookup))
409 return NULL;
410
411 n = dst->ops->neigh_lookup(dst, skb, NULL);
412
413 return IS_ERR(n) ? NULL : n;
414}
415
416static inline void dst_confirm_neigh(const struct dst_entry *dst,
417 const void *daddr)
418{
419 if (dst->ops->confirm_neigh)
420 dst->ops->confirm_neigh(dst, daddr);
421}
422
423static inline void dst_link_failure(struct sk_buff *skb)
424{
425 struct dst_entry *dst = skb_dst(skb);
426 if (dst && dst->ops && dst->ops->link_failure)
427 dst->ops->link_failure(skb);
428}
429
430static inline void dst_set_expires(struct dst_entry *dst, int timeout)
431{
432 unsigned long expires = jiffies + timeout;
433
434 if (expires == 0)
435 expires = 1;
436
437 if (dst->expires == 0 || time_before(expires, dst->expires))
438 dst->expires = expires;
439}
440
441INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
442 struct sk_buff *));
443INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
444 struct sk_buff *));
445/* Output packet to network from transport. */
446static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
447{
448 return INDIRECT_CALL_INET(skb_dst(skb)->output,
449 ip6_output, ip_output,
450 net, sk, skb);
451}
452
453INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *));
454INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
455/* Input packet from network to transport. */
456static inline int dst_input(struct sk_buff *skb)
457{
458 return INDIRECT_CALL_INET(skb_dst(skb)->input,
459 ip6_input, ip_local_deliver, skb);
460}
461
462INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
463 u32));
464INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
465 u32));
466static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
467{
468 if (dst->obsolete)
469 dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
470 ipv4_dst_check, dst, cookie);
471 return dst;
472}
473
474/* Flags for xfrm_lookup flags argument. */
475enum {
476 XFRM_LOOKUP_ICMP = 1 << 0,
477 XFRM_LOOKUP_QUEUE = 1 << 1,
478 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
479};
480
481struct flowi;
482#ifndef CONFIG_XFRM
483static inline struct dst_entry *xfrm_lookup(struct net *net,
484 struct dst_entry *dst_orig,
485 const struct flowi *fl,
486 const struct sock *sk,
487 int flags)
488{
489 return dst_orig;
490}
491
492static inline struct dst_entry *
493xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
494 const struct flowi *fl, const struct sock *sk,
495 int flags, u32 if_id)
496{
497 return dst_orig;
498}
499
500static inline struct dst_entry *xfrm_lookup_route(struct net *net,
501 struct dst_entry *dst_orig,
502 const struct flowi *fl,
503 const struct sock *sk,
504 int flags)
505{
506 return dst_orig;
507}
508
509static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
510{
511 return NULL;
512}
513
514#else
515struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
516 const struct flowi *fl, const struct sock *sk,
517 int flags);
518
519struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
520 struct dst_entry *dst_orig,
521 const struct flowi *fl,
522 const struct sock *sk, int flags,
523 u32 if_id);
524
525struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
526 const struct flowi *fl, const struct sock *sk,
527 int flags);
528
529/* skb attached with this dst needs transformation if dst->xfrm is valid */
530static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
531{
532 return dst->xfrm;
533}
534#endif
535
536static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
537{
538 struct dst_entry *dst = skb_dst(skb);
539
540 if (dst && dst->ops->update_pmtu)
541 dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
542}
543
544/* update dst pmtu but not do neighbor confirm */
545static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
546{
547 struct dst_entry *dst = skb_dst(skb);
548
549 if (dst && dst->ops->update_pmtu)
550 dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
551}
552
553struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
554void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
555 struct sk_buff *skb, u32 mtu, bool confirm_neigh);
556void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
557 struct sk_buff *skb);
558u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
559struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
560 struct sk_buff *skb,
561 const void *daddr);
562unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
563
564#endif /* _NET_DST_H */