Loading...
1/*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#ifndef _NET_DST_H
9#define _NET_DST_H
10
11#include <net/dst_ops.h>
12#include <linux/netdevice.h>
13#include <linux/rtnetlink.h>
14#include <linux/rcupdate.h>
15#include <linux/bug.h>
16#include <linux/jiffies.h>
17#include <net/neighbour.h>
18#include <asm/processor.h>
19
20#define DST_GC_MIN (HZ/10)
21#define DST_GC_INC (HZ/2)
22#define DST_GC_MAX (120*HZ)
23
24/* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31struct sk_buff;
32
33struct dst_entry {
34 struct rcu_head rcu_head;
35 struct dst_entry *child;
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 union {
40 unsigned long expires;
41 /* point to where the dst_entry copied from */
42 struct dst_entry *from;
43 };
44 struct dst_entry *path;
45 struct neighbour __rcu *_neighbour;
46#ifdef CONFIG_XFRM
47 struct xfrm_state *xfrm;
48#else
49 void *__pad1;
50#endif
51 int (*input)(struct sk_buff*);
52 int (*output)(struct sk_buff*);
53
54 int flags;
55#define DST_HOST 0x0001
56#define DST_NOXFRM 0x0002
57#define DST_NOPOLICY 0x0004
58#define DST_NOHASH 0x0008
59#define DST_NOCACHE 0x0010
60#define DST_NOCOUNT 0x0020
61#define DST_NOPEER 0x0040
62#define DST_FAKE_RTABLE 0x0080
63#define DST_XFRM_TUNNEL 0x0100
64
65 short error;
66 short obsolete;
67 unsigned short header_len; /* more space at head required */
68 unsigned short trailer_len; /* space to reserve at tail */
69#ifdef CONFIG_IP_ROUTE_CLASSID
70 __u32 tclassid;
71#else
72 __u32 __pad2;
73#endif
74
75 /*
76 * Align __refcnt to a 64 bytes alignment
77 * (L1_CACHE_SIZE would be too much)
78 */
79#ifdef CONFIG_64BIT
80 long __pad_to_align_refcnt[2];
81#endif
82 /*
83 * __refcnt wants to be on a different cache line from
84 * input/output/ops or performance tanks badly
85 */
86 atomic_t __refcnt; /* client references */
87 int __use;
88 unsigned long lastuse;
89 union {
90 struct dst_entry *next;
91 struct rtable __rcu *rt_next;
92 struct rt6_info *rt6_next;
93 struct dn_route __rcu *dn_next;
94 };
95};
96
97static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
98{
99 return rcu_dereference(dst->_neighbour);
100}
101
102static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
103{
104 return rcu_dereference_raw(dst->_neighbour);
105}
106
107static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh)
108{
109 rcu_assign_pointer(dst->_neighbour, neigh);
110}
111
112extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
113extern const u32 dst_default_metrics[RTAX_MAX];
114
115#define DST_METRICS_READ_ONLY 0x1UL
116#define __DST_METRICS_PTR(Y) \
117 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
118#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
119
120static inline bool dst_metrics_read_only(const struct dst_entry *dst)
121{
122 return dst->_metrics & DST_METRICS_READ_ONLY;
123}
124
125extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
126
127static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
128{
129 unsigned long val = dst->_metrics;
130 if (!(val & DST_METRICS_READ_ONLY))
131 __dst_destroy_metrics_generic(dst, val);
132}
133
134static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
135{
136 unsigned long p = dst->_metrics;
137
138 BUG_ON(!p);
139
140 if (p & DST_METRICS_READ_ONLY)
141 return dst->ops->cow_metrics(dst, p);
142 return __DST_METRICS_PTR(p);
143}
144
145/* This may only be invoked before the entry has reached global
146 * visibility.
147 */
148static inline void dst_init_metrics(struct dst_entry *dst,
149 const u32 *src_metrics,
150 bool read_only)
151{
152 dst->_metrics = ((unsigned long) src_metrics) |
153 (read_only ? DST_METRICS_READ_ONLY : 0);
154}
155
156static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
157{
158 u32 *dst_metrics = dst_metrics_write_ptr(dest);
159
160 if (dst_metrics) {
161 u32 *src_metrics = DST_METRICS_PTR(src);
162
163 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
164 }
165}
166
167static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
168{
169 return DST_METRICS_PTR(dst);
170}
171
172static inline u32
173dst_metric_raw(const struct dst_entry *dst, const int metric)
174{
175 u32 *p = DST_METRICS_PTR(dst);
176
177 return p[metric-1];
178}
179
180static inline u32
181dst_metric(const struct dst_entry *dst, const int metric)
182{
183 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
184 metric == RTAX_ADVMSS ||
185 metric == RTAX_MTU);
186 return dst_metric_raw(dst, metric);
187}
188
189static inline u32
190dst_metric_advmss(const struct dst_entry *dst)
191{
192 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
193
194 if (!advmss)
195 advmss = dst->ops->default_advmss(dst);
196
197 return advmss;
198}
199
200static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
201{
202 u32 *p = dst_metrics_write_ptr(dst);
203
204 if (p)
205 p[metric-1] = val;
206}
207
208static inline u32
209dst_feature(const struct dst_entry *dst, u32 feature)
210{
211 return dst_metric(dst, RTAX_FEATURES) & feature;
212}
213
214static inline u32 dst_mtu(const struct dst_entry *dst)
215{
216 return dst->ops->mtu(dst);
217}
218
219/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
220static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
221{
222 return msecs_to_jiffies(dst_metric(dst, metric));
223}
224
225static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
226 unsigned long rtt)
227{
228 dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
229}
230
231static inline u32
232dst_allfrag(const struct dst_entry *dst)
233{
234 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
235 return ret;
236}
237
238static inline int
239dst_metric_locked(const struct dst_entry *dst, int metric)
240{
241 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
242}
243
244static inline void dst_hold(struct dst_entry * dst)
245{
246 /*
247 * If your kernel compilation stops here, please check
248 * __pad_to_align_refcnt declaration in struct dst_entry
249 */
250 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
251 atomic_inc(&dst->__refcnt);
252}
253
254static inline void dst_use(struct dst_entry *dst, unsigned long time)
255{
256 dst_hold(dst);
257 dst->__use++;
258 dst->lastuse = time;
259}
260
261static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
262{
263 dst->__use++;
264 dst->lastuse = time;
265}
266
267static inline
268struct dst_entry * dst_clone(struct dst_entry * dst)
269{
270 if (dst)
271 atomic_inc(&dst->__refcnt);
272 return dst;
273}
274
275extern void dst_release(struct dst_entry *dst);
276
277static inline void refdst_drop(unsigned long refdst)
278{
279 if (!(refdst & SKB_DST_NOREF))
280 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
281}
282
283/**
284 * skb_dst_drop - drops skb dst
285 * @skb: buffer
286 *
287 * Drops dst reference count if a reference was taken.
288 */
289static inline void skb_dst_drop(struct sk_buff *skb)
290{
291 if (skb->_skb_refdst) {
292 refdst_drop(skb->_skb_refdst);
293 skb->_skb_refdst = 0UL;
294 }
295}
296
297static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
298{
299 nskb->_skb_refdst = oskb->_skb_refdst;
300 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
301 dst_clone(skb_dst(nskb));
302}
303
304/**
305 * skb_dst_force - makes sure skb dst is refcounted
306 * @skb: buffer
307 *
308 * If dst is not yet refcounted, let's do it
309 */
310static inline void skb_dst_force(struct sk_buff *skb)
311{
312 if (skb_dst_is_noref(skb)) {
313 WARN_ON(!rcu_read_lock_held());
314 skb->_skb_refdst &= ~SKB_DST_NOREF;
315 dst_clone(skb_dst(skb));
316 }
317}
318
319
320/**
321 * __skb_tunnel_rx - prepare skb for rx reinsert
322 * @skb: buffer
323 * @dev: tunnel device
324 *
325 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
326 * so make some cleanups. (no accounting done)
327 */
328static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
329{
330 skb->dev = dev;
331
332 /*
333 * Clear rxhash so that we can recalulate the hash for the
334 * encapsulated packet, unless we have already determine the hash
335 * over the L4 4-tuple.
336 */
337 if (!skb->l4_rxhash)
338 skb->rxhash = 0;
339 skb_set_queue_mapping(skb, 0);
340 skb_dst_drop(skb);
341 nf_reset(skb);
342}
343
344/**
345 * skb_tunnel_rx - prepare skb for rx reinsert
346 * @skb: buffer
347 * @dev: tunnel device
348 *
349 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
350 * so make some cleanups, and perform accounting.
351 * Note: this accounting is not SMP safe.
352 */
353static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
354{
355 /* TODO : stats should be SMP safe */
356 dev->stats.rx_packets++;
357 dev->stats.rx_bytes += skb->len;
358 __skb_tunnel_rx(skb, dev);
359}
360
361/* Children define the path of the packet through the
362 * Linux networking. Thus, destinations are stackable.
363 */
364
365static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
366{
367 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
368
369 skb_dst_drop(skb);
370 return child;
371}
372
373extern int dst_discard(struct sk_buff *skb);
374extern void *dst_alloc(struct dst_ops * ops, struct net_device *dev,
375 int initial_ref, int initial_obsolete, int flags);
376extern void __dst_free(struct dst_entry * dst);
377extern struct dst_entry *dst_destroy(struct dst_entry * dst);
378
379static inline void dst_free(struct dst_entry * dst)
380{
381 if (dst->obsolete > 1)
382 return;
383 if (!atomic_read(&dst->__refcnt)) {
384 dst = dst_destroy(dst);
385 if (!dst)
386 return;
387 }
388 __dst_free(dst);
389}
390
391static inline void dst_rcu_free(struct rcu_head *head)
392{
393 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
394 dst_free(dst);
395}
396
397static inline void dst_confirm(struct dst_entry *dst)
398{
399 if (dst) {
400 struct neighbour *n;
401
402 rcu_read_lock();
403 n = dst_get_neighbour_noref(dst);
404 neigh_confirm(n);
405 rcu_read_unlock();
406 }
407}
408
409static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
410{
411 return dst->ops->neigh_lookup(dst, daddr);
412}
413
414static inline void dst_link_failure(struct sk_buff *skb)
415{
416 struct dst_entry *dst = skb_dst(skb);
417 if (dst && dst->ops && dst->ops->link_failure)
418 dst->ops->link_failure(skb);
419}
420
421static inline void dst_set_expires(struct dst_entry *dst, int timeout)
422{
423 unsigned long expires = jiffies + timeout;
424
425 if (expires == 0)
426 expires = 1;
427
428 if (dst->expires == 0 || time_before(expires, dst->expires))
429 dst->expires = expires;
430}
431
432/* Output packet to network from transport. */
433static inline int dst_output(struct sk_buff *skb)
434{
435 return skb_dst(skb)->output(skb);
436}
437
438/* Input packet from network to transport. */
439static inline int dst_input(struct sk_buff *skb)
440{
441 return skb_dst(skb)->input(skb);
442}
443
444static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
445{
446 if (dst->obsolete)
447 dst = dst->ops->check(dst, cookie);
448 return dst;
449}
450
451extern void dst_init(void);
452
453/* Flags for xfrm_lookup flags argument. */
454enum {
455 XFRM_LOOKUP_ICMP = 1 << 0,
456};
457
458struct flowi;
459#ifndef CONFIG_XFRM
460static inline struct dst_entry *xfrm_lookup(struct net *net,
461 struct dst_entry *dst_orig,
462 const struct flowi *fl, struct sock *sk,
463 int flags)
464{
465 return dst_orig;
466}
467#else
468extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
469 const struct flowi *fl, struct sock *sk,
470 int flags);
471#endif
472
473#endif /* _NET_DST_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * net/dst.h Protocol independent destination cache definitions.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 */
8
9#ifndef _NET_DST_H
10#define _NET_DST_H
11
12#include <net/dst_ops.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/rcupdate.h>
16#include <linux/bug.h>
17#include <linux/jiffies.h>
18#include <linux/refcount.h>
19#include <net/neighbour.h>
20#include <asm/processor.h>
21#include <linux/indirect_call_wrapper.h>
22
23struct sk_buff;
24
25struct dst_entry {
26 struct net_device *dev;
27 struct dst_ops *ops;
28 unsigned long _metrics;
29 unsigned long expires;
30#ifdef CONFIG_XFRM
31 struct xfrm_state *xfrm;
32#else
33 void *__pad1;
34#endif
35 int (*input)(struct sk_buff *);
36 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
37
38 unsigned short flags;
39#define DST_NOXFRM 0x0002
40#define DST_NOPOLICY 0x0004
41#define DST_NOCOUNT 0x0008
42#define DST_FAKE_RTABLE 0x0010
43#define DST_XFRM_TUNNEL 0x0020
44#define DST_XFRM_QUEUE 0x0040
45#define DST_METADATA 0x0080
46
47 /* A non-zero value of dst->obsolete forces by-hand validation
48 * of the route entry. Positive values are set by the generic
49 * dst layer to indicate that the entry has been forcefully
50 * destroyed.
51 *
52 * Negative values are used by the implementation layer code to
53 * force invocation of the dst_ops->check() method.
54 */
55 short obsolete;
56#define DST_OBSOLETE_NONE 0
57#define DST_OBSOLETE_DEAD 2
58#define DST_OBSOLETE_FORCE_CHK -1
59#define DST_OBSOLETE_KILL -2
60 unsigned short header_len; /* more space at head required */
61 unsigned short trailer_len; /* space to reserve at tail */
62
63 /*
64 * __refcnt wants to be on a different cache line from
65 * input/output/ops or performance tanks badly
66 */
67#ifdef CONFIG_64BIT
68 atomic_t __refcnt; /* 64-bit offset 64 */
69#endif
70 int __use;
71 unsigned long lastuse;
72 struct lwtunnel_state *lwtstate;
73 struct rcu_head rcu_head;
74 short error;
75 short __pad;
76 __u32 tclassid;
77#ifndef CONFIG_64BIT
78 atomic_t __refcnt; /* 32-bit offset 64 */
79#endif
80 netdevice_tracker dev_tracker;
81};
82
83struct dst_metrics {
84 u32 metrics[RTAX_MAX];
85 refcount_t refcnt;
86} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
87extern const struct dst_metrics dst_default_metrics;
88
89u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
90
91#define DST_METRICS_READ_ONLY 0x1UL
92#define DST_METRICS_REFCOUNTED 0x2UL
93#define DST_METRICS_FLAGS 0x3UL
94#define __DST_METRICS_PTR(Y) \
95 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
96#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
97
98static inline bool dst_metrics_read_only(const struct dst_entry *dst)
99{
100 return dst->_metrics & DST_METRICS_READ_ONLY;
101}
102
103void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
104
105static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
106{
107 unsigned long val = dst->_metrics;
108 if (!(val & DST_METRICS_READ_ONLY))
109 __dst_destroy_metrics_generic(dst, val);
110}
111
112static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
113{
114 unsigned long p = dst->_metrics;
115
116 BUG_ON(!p);
117
118 if (p & DST_METRICS_READ_ONLY)
119 return dst->ops->cow_metrics(dst, p);
120 return __DST_METRICS_PTR(p);
121}
122
123/* This may only be invoked before the entry has reached global
124 * visibility.
125 */
126static inline void dst_init_metrics(struct dst_entry *dst,
127 const u32 *src_metrics,
128 bool read_only)
129{
130 dst->_metrics = ((unsigned long) src_metrics) |
131 (read_only ? DST_METRICS_READ_ONLY : 0);
132}
133
134static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
135{
136 u32 *dst_metrics = dst_metrics_write_ptr(dest);
137
138 if (dst_metrics) {
139 u32 *src_metrics = DST_METRICS_PTR(src);
140
141 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
142 }
143}
144
145static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
146{
147 return DST_METRICS_PTR(dst);
148}
149
150static inline u32
151dst_metric_raw(const struct dst_entry *dst, const int metric)
152{
153 u32 *p = DST_METRICS_PTR(dst);
154
155 return p[metric-1];
156}
157
158static inline u32
159dst_metric(const struct dst_entry *dst, const int metric)
160{
161 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
162 metric == RTAX_ADVMSS ||
163 metric == RTAX_MTU);
164 return dst_metric_raw(dst, metric);
165}
166
167static inline u32
168dst_metric_advmss(const struct dst_entry *dst)
169{
170 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
171
172 if (!advmss)
173 advmss = dst->ops->default_advmss(dst);
174
175 return advmss;
176}
177
178static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
179{
180 u32 *p = dst_metrics_write_ptr(dst);
181
182 if (p)
183 p[metric-1] = val;
184}
185
186/* Kernel-internal feature bits that are unallocated in user space. */
187#define DST_FEATURE_ECN_CA (1U << 31)
188
189#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
190#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
191
192static inline u32
193dst_feature(const struct dst_entry *dst, u32 feature)
194{
195 return dst_metric(dst, RTAX_FEATURES) & feature;
196}
197
198INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *));
199INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *));
200static inline u32 dst_mtu(const struct dst_entry *dst)
201{
202 return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst);
203}
204
205/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
206static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
207{
208 return msecs_to_jiffies(dst_metric(dst, metric));
209}
210
211static inline u32
212dst_allfrag(const struct dst_entry *dst)
213{
214 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
215 return ret;
216}
217
218static inline int
219dst_metric_locked(const struct dst_entry *dst, int metric)
220{
221 return dst_metric(dst, RTAX_LOCK) & (1 << metric);
222}
223
224static inline void dst_hold(struct dst_entry *dst)
225{
226 /*
227 * If your kernel compilation stops here, please check
228 * the placement of __refcnt in struct dst_entry
229 */
230 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
231 WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
232}
233
234static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
235{
236 if (unlikely(time != dst->lastuse)) {
237 dst->__use++;
238 dst->lastuse = time;
239 }
240}
241
242static inline struct dst_entry *dst_clone(struct dst_entry *dst)
243{
244 if (dst)
245 dst_hold(dst);
246 return dst;
247}
248
249void dst_release(struct dst_entry *dst);
250
251void dst_release_immediate(struct dst_entry *dst);
252
253static inline void refdst_drop(unsigned long refdst)
254{
255 if (!(refdst & SKB_DST_NOREF))
256 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
257}
258
259/**
260 * skb_dst_drop - drops skb dst
261 * @skb: buffer
262 *
263 * Drops dst reference count if a reference was taken.
264 */
265static inline void skb_dst_drop(struct sk_buff *skb)
266{
267 if (skb->_skb_refdst) {
268 refdst_drop(skb->_skb_refdst);
269 skb->_skb_refdst = 0UL;
270 }
271}
272
273static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
274{
275 nskb->slow_gro |= !!refdst;
276 nskb->_skb_refdst = refdst;
277 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
278 dst_clone(skb_dst(nskb));
279}
280
281static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
282{
283 __skb_dst_copy(nskb, oskb->_skb_refdst);
284}
285
286/**
287 * dst_hold_safe - Take a reference on a dst if possible
288 * @dst: pointer to dst entry
289 *
290 * This helper returns false if it could not safely
291 * take a reference on a dst.
292 */
293static inline bool dst_hold_safe(struct dst_entry *dst)
294{
295 return atomic_inc_not_zero(&dst->__refcnt);
296}
297
298/**
299 * skb_dst_force - makes sure skb dst is refcounted
300 * @skb: buffer
301 *
302 * If dst is not yet refcounted and not destroyed, grab a ref on it.
303 * Returns true if dst is refcounted.
304 */
305static inline bool skb_dst_force(struct sk_buff *skb)
306{
307 if (skb_dst_is_noref(skb)) {
308 struct dst_entry *dst = skb_dst(skb);
309
310 WARN_ON(!rcu_read_lock_held());
311 if (!dst_hold_safe(dst))
312 dst = NULL;
313
314 skb->_skb_refdst = (unsigned long)dst;
315 skb->slow_gro |= !!dst;
316 }
317
318 return skb->_skb_refdst != 0UL;
319}
320
321
322/**
323 * __skb_tunnel_rx - prepare skb for rx reinsert
324 * @skb: buffer
325 * @dev: tunnel device
326 * @net: netns for packet i/o
327 *
328 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
329 * so make some cleanups. (no accounting done)
330 */
331static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
332 struct net *net)
333{
334 skb->dev = dev;
335
336 /*
337 * Clear hash so that we can recalulate the hash for the
338 * encapsulated packet, unless we have already determine the hash
339 * over the L4 4-tuple.
340 */
341 skb_clear_hash_if_not_l4(skb);
342 skb_set_queue_mapping(skb, 0);
343 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
344}
345
346/**
347 * skb_tunnel_rx - prepare skb for rx reinsert
348 * @skb: buffer
349 * @dev: tunnel device
350 * @net: netns for packet i/o
351 *
352 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
353 * so make some cleanups, and perform accounting.
354 * Note: this accounting is not SMP safe.
355 */
356static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
357 struct net *net)
358{
359 DEV_STATS_INC(dev, rx_packets);
360 DEV_STATS_ADD(dev, rx_bytes, skb->len);
361 __skb_tunnel_rx(skb, dev, net);
362}
363
364static inline u32 dst_tclassid(const struct sk_buff *skb)
365{
366#ifdef CONFIG_IP_ROUTE_CLASSID
367 const struct dst_entry *dst;
368
369 dst = skb_dst(skb);
370 if (dst)
371 return dst->tclassid;
372#endif
373 return 0;
374}
375
376int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
377static inline int dst_discard(struct sk_buff *skb)
378{
379 return dst_discard_out(&init_net, skb->sk, skb);
380}
381void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
382 int initial_obsolete, unsigned short flags);
383void dst_init(struct dst_entry *dst, struct dst_ops *ops,
384 struct net_device *dev, int initial_ref, int initial_obsolete,
385 unsigned short flags);
386struct dst_entry *dst_destroy(struct dst_entry *dst);
387void dst_dev_put(struct dst_entry *dst);
388
389static inline void dst_confirm(struct dst_entry *dst)
390{
391}
392
393static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
394{
395 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
396 return IS_ERR(n) ? NULL : n;
397}
398
399static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
400 struct sk_buff *skb)
401{
402 struct neighbour *n;
403
404 if (WARN_ON_ONCE(!dst->ops->neigh_lookup))
405 return NULL;
406
407 n = dst->ops->neigh_lookup(dst, skb, NULL);
408
409 return IS_ERR(n) ? NULL : n;
410}
411
412static inline void dst_confirm_neigh(const struct dst_entry *dst,
413 const void *daddr)
414{
415 if (dst->ops->confirm_neigh)
416 dst->ops->confirm_neigh(dst, daddr);
417}
418
419static inline void dst_link_failure(struct sk_buff *skb)
420{
421 struct dst_entry *dst = skb_dst(skb);
422 if (dst && dst->ops && dst->ops->link_failure)
423 dst->ops->link_failure(skb);
424}
425
426static inline void dst_set_expires(struct dst_entry *dst, int timeout)
427{
428 unsigned long expires = jiffies + timeout;
429
430 if (expires == 0)
431 expires = 1;
432
433 if (dst->expires == 0 || time_before(expires, dst->expires))
434 dst->expires = expires;
435}
436
437INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
438 struct sk_buff *));
439INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
440 struct sk_buff *));
441/* Output packet to network from transport. */
442static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
443{
444 return INDIRECT_CALL_INET(skb_dst(skb)->output,
445 ip6_output, ip_output,
446 net, sk, skb);
447}
448
449INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *));
450INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
451/* Input packet from network to transport. */
452static inline int dst_input(struct sk_buff *skb)
453{
454 return INDIRECT_CALL_INET(skb_dst(skb)->input,
455 ip6_input, ip_local_deliver, skb);
456}
457
458INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
459 u32));
460INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
461 u32));
462static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
463{
464 if (dst->obsolete)
465 dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
466 ipv4_dst_check, dst, cookie);
467 return dst;
468}
469
470/* Flags for xfrm_lookup flags argument. */
471enum {
472 XFRM_LOOKUP_ICMP = 1 << 0,
473 XFRM_LOOKUP_QUEUE = 1 << 1,
474 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
475};
476
477struct flowi;
478#ifndef CONFIG_XFRM
479static inline struct dst_entry *xfrm_lookup(struct net *net,
480 struct dst_entry *dst_orig,
481 const struct flowi *fl,
482 const struct sock *sk,
483 int flags)
484{
485 return dst_orig;
486}
487
488static inline struct dst_entry *
489xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
490 const struct flowi *fl, const struct sock *sk,
491 int flags, u32 if_id)
492{
493 return dst_orig;
494}
495
496static inline struct dst_entry *xfrm_lookup_route(struct net *net,
497 struct dst_entry *dst_orig,
498 const struct flowi *fl,
499 const struct sock *sk,
500 int flags)
501{
502 return dst_orig;
503}
504
505static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
506{
507 return NULL;
508}
509
510#else
511struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
512 const struct flowi *fl, const struct sock *sk,
513 int flags);
514
515struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
516 struct dst_entry *dst_orig,
517 const struct flowi *fl,
518 const struct sock *sk, int flags,
519 u32 if_id);
520
521struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
522 const struct flowi *fl, const struct sock *sk,
523 int flags);
524
525/* skb attached with this dst needs transformation if dst->xfrm is valid */
526static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
527{
528 return dst->xfrm;
529}
530#endif
531
532static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
533{
534 struct dst_entry *dst = skb_dst(skb);
535
536 if (dst && dst->ops->update_pmtu)
537 dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
538}
539
540/* update dst pmtu but not do neighbor confirm */
541static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
542{
543 struct dst_entry *dst = skb_dst(skb);
544
545 if (dst && dst->ops->update_pmtu)
546 dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
547}
548
549struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
550void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
551 struct sk_buff *skb, u32 mtu, bool confirm_neigh);
552void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
553 struct sk_buff *skb);
554u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
555struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
556 struct sk_buff *skb,
557 const void *daddr);
558unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
559
560#endif /* _NET_DST_H */