Loading...
1/*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#ifndef _NET_DST_H
9#define _NET_DST_H
10
11#include <net/dst_ops.h>
12#include <linux/netdevice.h>
13#include <linux/rtnetlink.h>
14#include <linux/rcupdate.h>
15#include <linux/bug.h>
16#include <linux/jiffies.h>
17#include <net/neighbour.h>
18#include <asm/processor.h>
19
20#define DST_GC_MIN (HZ/10)
21#define DST_GC_INC (HZ/2)
22#define DST_GC_MAX (120*HZ)
23
24/* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31struct sk_buff;
32
33struct dst_entry {
34 struct rcu_head rcu_head;
35 struct dst_entry *child;
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 unsigned long expires;
40 struct dst_entry *path;
41 struct dst_entry *from;
42#ifdef CONFIG_XFRM
43 struct xfrm_state *xfrm;
44#else
45 void *__pad1;
46#endif
47 int (*input)(struct sk_buff *);
48 int (*output)(struct sock *sk, struct sk_buff *skb);
49
50 unsigned short flags;
51#define DST_HOST 0x0001
52#define DST_NOXFRM 0x0002
53#define DST_NOPOLICY 0x0004
54#define DST_NOHASH 0x0008
55#define DST_NOCACHE 0x0010
56#define DST_NOCOUNT 0x0020
57#define DST_FAKE_RTABLE 0x0040
58#define DST_XFRM_TUNNEL 0x0080
59#define DST_XFRM_QUEUE 0x0100
60
61 unsigned short pending_confirm;
62
63 short error;
64
65 /* A non-zero value of dst->obsolete forces by-hand validation
66 * of the route entry. Positive values are set by the generic
67 * dst layer to indicate that the entry has been forcefully
68 * destroyed.
69 *
70 * Negative values are used by the implementation layer code to
71 * force invocation of the dst_ops->check() method.
72 */
73 short obsolete;
74#define DST_OBSOLETE_NONE 0
75#define DST_OBSOLETE_DEAD 2
76#define DST_OBSOLETE_FORCE_CHK -1
77#define DST_OBSOLETE_KILL -2
78 unsigned short header_len; /* more space at head required */
79 unsigned short trailer_len; /* space to reserve at tail */
80#ifdef CONFIG_IP_ROUTE_CLASSID
81 __u32 tclassid;
82#else
83 __u32 __pad2;
84#endif
85
86 /*
87 * Align __refcnt to a 64 bytes alignment
88 * (L1_CACHE_SIZE would be too much)
89 */
90#ifdef CONFIG_64BIT
91 long __pad_to_align_refcnt[2];
92#endif
93 /*
94 * __refcnt wants to be on a different cache line from
95 * input/output/ops or performance tanks badly
96 */
97 atomic_t __refcnt; /* client references */
98 int __use;
99 unsigned long lastuse;
100 union {
101 struct dst_entry *next;
102 struct rtable __rcu *rt_next;
103 struct rt6_info *rt6_next;
104 struct dn_route __rcu *dn_next;
105 };
106};
107
108u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
109extern const u32 dst_default_metrics[];
110
111#define DST_METRICS_READ_ONLY 0x1UL
112#define DST_METRICS_FORCE_OVERWRITE 0x2UL
113#define DST_METRICS_FLAGS 0x3UL
114#define __DST_METRICS_PTR(Y) \
115 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
116#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
117
118static inline bool dst_metrics_read_only(const struct dst_entry *dst)
119{
120 return dst->_metrics & DST_METRICS_READ_ONLY;
121}
122
123static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
124{
125 dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
126}
127
128void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
129
130static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
131{
132 unsigned long val = dst->_metrics;
133 if (!(val & DST_METRICS_READ_ONLY))
134 __dst_destroy_metrics_generic(dst, val);
135}
136
137static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
138{
139 unsigned long p = dst->_metrics;
140
141 BUG_ON(!p);
142
143 if (p & DST_METRICS_READ_ONLY)
144 return dst->ops->cow_metrics(dst, p);
145 return __DST_METRICS_PTR(p);
146}
147
148/* This may only be invoked before the entry has reached global
149 * visibility.
150 */
151static inline void dst_init_metrics(struct dst_entry *dst,
152 const u32 *src_metrics,
153 bool read_only)
154{
155 dst->_metrics = ((unsigned long) src_metrics) |
156 (read_only ? DST_METRICS_READ_ONLY : 0);
157}
158
159static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
160{
161 u32 *dst_metrics = dst_metrics_write_ptr(dest);
162
163 if (dst_metrics) {
164 u32 *src_metrics = DST_METRICS_PTR(src);
165
166 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
167 }
168}
169
170static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
171{
172 return DST_METRICS_PTR(dst);
173}
174
175static inline u32
176dst_metric_raw(const struct dst_entry *dst, const int metric)
177{
178 u32 *p = DST_METRICS_PTR(dst);
179
180 return p[metric-1];
181}
182
183static inline u32
184dst_metric(const struct dst_entry *dst, const int metric)
185{
186 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
187 metric == RTAX_ADVMSS ||
188 metric == RTAX_MTU);
189 return dst_metric_raw(dst, metric);
190}
191
192static inline u32
193dst_metric_advmss(const struct dst_entry *dst)
194{
195 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
196
197 if (!advmss)
198 advmss = dst->ops->default_advmss(dst);
199
200 return advmss;
201}
202
203static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
204{
205 u32 *p = dst_metrics_write_ptr(dst);
206
207 if (p)
208 p[metric-1] = val;
209}
210
211static inline u32
212dst_feature(const struct dst_entry *dst, u32 feature)
213{
214 return dst_metric(dst, RTAX_FEATURES) & feature;
215}
216
217static inline u32 dst_mtu(const struct dst_entry *dst)
218{
219 return dst->ops->mtu(dst);
220}
221
222/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
223static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
224{
225 return msecs_to_jiffies(dst_metric(dst, metric));
226}
227
228static inline u32
229dst_allfrag(const struct dst_entry *dst)
230{
231 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
232 return ret;
233}
234
235static inline int
236dst_metric_locked(const struct dst_entry *dst, int metric)
237{
238 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
239}
240
241static inline void dst_hold(struct dst_entry *dst)
242{
243 /*
244 * If your kernel compilation stops here, please check
245 * __pad_to_align_refcnt declaration in struct dst_entry
246 */
247 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
248 atomic_inc(&dst->__refcnt);
249}
250
251static inline void dst_use(struct dst_entry *dst, unsigned long time)
252{
253 dst_hold(dst);
254 dst->__use++;
255 dst->lastuse = time;
256}
257
258static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
259{
260 dst->__use++;
261 dst->lastuse = time;
262}
263
264static inline struct dst_entry *dst_clone(struct dst_entry *dst)
265{
266 if (dst)
267 atomic_inc(&dst->__refcnt);
268 return dst;
269}
270
271void dst_release(struct dst_entry *dst);
272
273static inline void refdst_drop(unsigned long refdst)
274{
275 if (!(refdst & SKB_DST_NOREF))
276 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
277}
278
279/**
280 * skb_dst_drop - drops skb dst
281 * @skb: buffer
282 *
283 * Drops dst reference count if a reference was taken.
284 */
285static inline void skb_dst_drop(struct sk_buff *skb)
286{
287 if (skb->_skb_refdst) {
288 refdst_drop(skb->_skb_refdst);
289 skb->_skb_refdst = 0UL;
290 }
291}
292
293static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
294{
295 nskb->_skb_refdst = oskb->_skb_refdst;
296 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
297 dst_clone(skb_dst(nskb));
298}
299
300/**
301 * skb_dst_force - makes sure skb dst is refcounted
302 * @skb: buffer
303 *
304 * If dst is not yet refcounted, let's do it
305 */
306static inline void skb_dst_force(struct sk_buff *skb)
307{
308 if (skb_dst_is_noref(skb)) {
309 WARN_ON(!rcu_read_lock_held());
310 skb->_skb_refdst &= ~SKB_DST_NOREF;
311 dst_clone(skb_dst(skb));
312 }
313}
314
315
316/**
317 * __skb_tunnel_rx - prepare skb for rx reinsert
318 * @skb: buffer
319 * @dev: tunnel device
320 * @net: netns for packet i/o
321 *
322 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
323 * so make some cleanups. (no accounting done)
324 */
325static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
326 struct net *net)
327{
328 skb->dev = dev;
329
330 /*
331 * Clear hash so that we can recalulate the hash for the
332 * encapsulated packet, unless we have already determine the hash
333 * over the L4 4-tuple.
334 */
335 skb_clear_hash_if_not_l4(skb);
336 skb_set_queue_mapping(skb, 0);
337 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
338}
339
340/**
341 * skb_tunnel_rx - prepare skb for rx reinsert
342 * @skb: buffer
343 * @dev: tunnel device
344 *
345 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
346 * so make some cleanups, and perform accounting.
347 * Note: this accounting is not SMP safe.
348 */
349static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
350 struct net *net)
351{
352 /* TODO : stats should be SMP safe */
353 dev->stats.rx_packets++;
354 dev->stats.rx_bytes += skb->len;
355 __skb_tunnel_rx(skb, dev, net);
356}
357
358/* Children define the path of the packet through the
359 * Linux networking. Thus, destinations are stackable.
360 */
361
362static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
363{
364 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
365
366 skb_dst_drop(skb);
367 return child;
368}
369
370int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
371static inline int dst_discard(struct sk_buff *skb)
372{
373 return dst_discard_sk(skb->sk, skb);
374}
375void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
376 int initial_obsolete, unsigned short flags);
377void __dst_free(struct dst_entry *dst);
378struct dst_entry *dst_destroy(struct dst_entry *dst);
379
380static inline void dst_free(struct dst_entry *dst)
381{
382 if (dst->obsolete > 0)
383 return;
384 if (!atomic_read(&dst->__refcnt)) {
385 dst = dst_destroy(dst);
386 if (!dst)
387 return;
388 }
389 __dst_free(dst);
390}
391
392static inline void dst_rcu_free(struct rcu_head *head)
393{
394 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
395 dst_free(dst);
396}
397
398static inline void dst_confirm(struct dst_entry *dst)
399{
400 dst->pending_confirm = 1;
401}
402
403static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
404 struct sk_buff *skb)
405{
406 const struct hh_cache *hh;
407
408 if (dst->pending_confirm) {
409 unsigned long now = jiffies;
410
411 dst->pending_confirm = 0;
412 /* avoid dirtying neighbour */
413 if (n->confirmed != now)
414 n->confirmed = now;
415 }
416
417 hh = &n->hh;
418 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
419 return neigh_hh_output(hh, skb);
420 else
421 return n->output(n, skb);
422}
423
424static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
425{
426 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
427 return IS_ERR(n) ? NULL : n;
428}
429
430static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
431 struct sk_buff *skb)
432{
433 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
434 return IS_ERR(n) ? NULL : n;
435}
436
437static inline void dst_link_failure(struct sk_buff *skb)
438{
439 struct dst_entry *dst = skb_dst(skb);
440 if (dst && dst->ops && dst->ops->link_failure)
441 dst->ops->link_failure(skb);
442}
443
444static inline void dst_set_expires(struct dst_entry *dst, int timeout)
445{
446 unsigned long expires = jiffies + timeout;
447
448 if (expires == 0)
449 expires = 1;
450
451 if (dst->expires == 0 || time_before(expires, dst->expires))
452 dst->expires = expires;
453}
454
455/* Output packet to network from transport. */
456static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
457{
458 return skb_dst(skb)->output(sk, skb);
459}
460static inline int dst_output(struct sk_buff *skb)
461{
462 return dst_output_sk(skb->sk, skb);
463}
464
465/* Input packet from network to transport. */
466static inline int dst_input(struct sk_buff *skb)
467{
468 return skb_dst(skb)->input(skb);
469}
470
471static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
472{
473 if (dst->obsolete)
474 dst = dst->ops->check(dst, cookie);
475 return dst;
476}
477
478void dst_init(void);
479
480/* Flags for xfrm_lookup flags argument. */
481enum {
482 XFRM_LOOKUP_ICMP = 1 << 0,
483};
484
485struct flowi;
486#ifndef CONFIG_XFRM
487static inline struct dst_entry *xfrm_lookup(struct net *net,
488 struct dst_entry *dst_orig,
489 const struct flowi *fl, struct sock *sk,
490 int flags)
491{
492 return dst_orig;
493}
494
495static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
496{
497 return NULL;
498}
499
500#else
501struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
502 const struct flowi *fl, struct sock *sk,
503 int flags);
504
505/* skb attached with this dst needs transformation if dst->xfrm is valid */
506static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
507{
508 return dst->xfrm;
509}
510#endif
511
512#endif /* _NET_DST_H */
1/*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#ifndef _NET_DST_H
9#define _NET_DST_H
10
11#include <net/dst_ops.h>
12#include <linux/netdevice.h>
13#include <linux/rtnetlink.h>
14#include <linux/rcupdate.h>
15#include <linux/bug.h>
16#include <linux/jiffies.h>
17#include <net/neighbour.h>
18#include <asm/processor.h>
19
20#define DST_GC_MIN (HZ/10)
21#define DST_GC_INC (HZ/2)
22#define DST_GC_MAX (120*HZ)
23
24/* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31struct sk_buff;
32
33struct dst_entry {
34 struct rcu_head rcu_head;
35 struct dst_entry *child;
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 unsigned long expires;
40 struct dst_entry *path;
41 struct dst_entry *from;
42#ifdef CONFIG_XFRM
43 struct xfrm_state *xfrm;
44#else
45 void *__pad1;
46#endif
47 int (*input)(struct sk_buff *);
48 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
49
50 unsigned short flags;
51#define DST_HOST 0x0001
52#define DST_NOXFRM 0x0002
53#define DST_NOPOLICY 0x0004
54#define DST_NOHASH 0x0008
55#define DST_NOCACHE 0x0010
56#define DST_NOCOUNT 0x0020
57#define DST_FAKE_RTABLE 0x0040
58#define DST_XFRM_TUNNEL 0x0080
59#define DST_XFRM_QUEUE 0x0100
60#define DST_METADATA 0x0200
61
62 unsigned short pending_confirm;
63
64 short error;
65
66 /* A non-zero value of dst->obsolete forces by-hand validation
67 * of the route entry. Positive values are set by the generic
68 * dst layer to indicate that the entry has been forcefully
69 * destroyed.
70 *
71 * Negative values are used by the implementation layer code to
72 * force invocation of the dst_ops->check() method.
73 */
74 short obsolete;
75#define DST_OBSOLETE_NONE 0
76#define DST_OBSOLETE_DEAD 2
77#define DST_OBSOLETE_FORCE_CHK -1
78#define DST_OBSOLETE_KILL -2
79 unsigned short header_len; /* more space at head required */
80 unsigned short trailer_len; /* space to reserve at tail */
81#ifdef CONFIG_IP_ROUTE_CLASSID
82 __u32 tclassid;
83#else
84 __u32 __pad2;
85#endif
86
87#ifdef CONFIG_64BIT
88 /*
89 * Align __refcnt to a 64 bytes alignment
90 * (L1_CACHE_SIZE would be too much)
91 */
92 long __pad_to_align_refcnt[2];
93#endif
94 /*
95 * __refcnt wants to be on a different cache line from
96 * input/output/ops or performance tanks badly
97 */
98 atomic_t __refcnt; /* client references */
99 int __use;
100 unsigned long lastuse;
101 struct lwtunnel_state *lwtstate;
102 union {
103 struct dst_entry *next;
104 struct rtable __rcu *rt_next;
105 struct rt6_info *rt6_next;
106 struct dn_route __rcu *dn_next;
107 };
108};
109
110u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
111extern const u32 dst_default_metrics[];
112
113#define DST_METRICS_READ_ONLY 0x1UL
114#define DST_METRICS_FLAGS 0x3UL
115#define __DST_METRICS_PTR(Y) \
116 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
117#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
118
119static inline bool dst_metrics_read_only(const struct dst_entry *dst)
120{
121 return dst->_metrics & DST_METRICS_READ_ONLY;
122}
123
124void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
125
126static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
127{
128 unsigned long val = dst->_metrics;
129 if (!(val & DST_METRICS_READ_ONLY))
130 __dst_destroy_metrics_generic(dst, val);
131}
132
133static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
134{
135 unsigned long p = dst->_metrics;
136
137 BUG_ON(!p);
138
139 if (p & DST_METRICS_READ_ONLY)
140 return dst->ops->cow_metrics(dst, p);
141 return __DST_METRICS_PTR(p);
142}
143
144/* This may only be invoked before the entry has reached global
145 * visibility.
146 */
147static inline void dst_init_metrics(struct dst_entry *dst,
148 const u32 *src_metrics,
149 bool read_only)
150{
151 dst->_metrics = ((unsigned long) src_metrics) |
152 (read_only ? DST_METRICS_READ_ONLY : 0);
153}
154
155static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
156{
157 u32 *dst_metrics = dst_metrics_write_ptr(dest);
158
159 if (dst_metrics) {
160 u32 *src_metrics = DST_METRICS_PTR(src);
161
162 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
163 }
164}
165
166static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
167{
168 return DST_METRICS_PTR(dst);
169}
170
171static inline u32
172dst_metric_raw(const struct dst_entry *dst, const int metric)
173{
174 u32 *p = DST_METRICS_PTR(dst);
175
176 return p[metric-1];
177}
178
179static inline u32
180dst_metric(const struct dst_entry *dst, const int metric)
181{
182 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
183 metric == RTAX_ADVMSS ||
184 metric == RTAX_MTU);
185 return dst_metric_raw(dst, metric);
186}
187
188static inline u32
189dst_metric_advmss(const struct dst_entry *dst)
190{
191 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
192
193 if (!advmss)
194 advmss = dst->ops->default_advmss(dst);
195
196 return advmss;
197}
198
199static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
200{
201 u32 *p = dst_metrics_write_ptr(dst);
202
203 if (p)
204 p[metric-1] = val;
205}
206
207/* Kernel-internal feature bits that are unallocated in user space. */
208#define DST_FEATURE_ECN_CA (1 << 31)
209
210#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
211#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
212
213static inline u32
214dst_feature(const struct dst_entry *dst, u32 feature)
215{
216 return dst_metric(dst, RTAX_FEATURES) & feature;
217}
218
219static inline u32 dst_mtu(const struct dst_entry *dst)
220{
221 return dst->ops->mtu(dst);
222}
223
224/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
225static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
226{
227 return msecs_to_jiffies(dst_metric(dst, metric));
228}
229
230static inline u32
231dst_allfrag(const struct dst_entry *dst)
232{
233 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
234 return ret;
235}
236
237static inline int
238dst_metric_locked(const struct dst_entry *dst, int metric)
239{
240 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
241}
242
243static inline void dst_hold(struct dst_entry *dst)
244{
245 /*
246 * If your kernel compilation stops here, please check
247 * __pad_to_align_refcnt declaration in struct dst_entry
248 */
249 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
250 atomic_inc(&dst->__refcnt);
251}
252
253static inline void dst_use(struct dst_entry *dst, unsigned long time)
254{
255 dst_hold(dst);
256 dst->__use++;
257 dst->lastuse = time;
258}
259
260static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
261{
262 dst->__use++;
263 dst->lastuse = time;
264}
265
266static inline struct dst_entry *dst_clone(struct dst_entry *dst)
267{
268 if (dst)
269 atomic_inc(&dst->__refcnt);
270 return dst;
271}
272
273void dst_release(struct dst_entry *dst);
274
275static inline void refdst_drop(unsigned long refdst)
276{
277 if (!(refdst & SKB_DST_NOREF))
278 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
279}
280
281/**
282 * skb_dst_drop - drops skb dst
283 * @skb: buffer
284 *
285 * Drops dst reference count if a reference was taken.
286 */
287static inline void skb_dst_drop(struct sk_buff *skb)
288{
289 if (skb->_skb_refdst) {
290 refdst_drop(skb->_skb_refdst);
291 skb->_skb_refdst = 0UL;
292 }
293}
294
295static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
296{
297 nskb->_skb_refdst = refdst;
298 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
299 dst_clone(skb_dst(nskb));
300}
301
302static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
303{
304 __skb_dst_copy(nskb, oskb->_skb_refdst);
305}
306
307/**
308 * skb_dst_force - makes sure skb dst is refcounted
309 * @skb: buffer
310 *
311 * If dst is not yet refcounted, let's do it
312 */
313static inline void skb_dst_force(struct sk_buff *skb)
314{
315 if (skb_dst_is_noref(skb)) {
316 WARN_ON(!rcu_read_lock_held());
317 skb->_skb_refdst &= ~SKB_DST_NOREF;
318 dst_clone(skb_dst(skb));
319 }
320}
321
322/**
323 * dst_hold_safe - Take a reference on a dst if possible
324 * @dst: pointer to dst entry
325 *
326 * This helper returns false if it could not safely
327 * take a reference on a dst.
328 */
329static inline bool dst_hold_safe(struct dst_entry *dst)
330{
331 if (dst->flags & DST_NOCACHE)
332 return atomic_inc_not_zero(&dst->__refcnt);
333 dst_hold(dst);
334 return true;
335}
336
337/**
338 * skb_dst_force_safe - makes sure skb dst is refcounted
339 * @skb: buffer
340 *
341 * If dst is not yet refcounted and not destroyed, grab a ref on it.
342 */
343static inline void skb_dst_force_safe(struct sk_buff *skb)
344{
345 if (skb_dst_is_noref(skb)) {
346 struct dst_entry *dst = skb_dst(skb);
347
348 if (!dst_hold_safe(dst))
349 dst = NULL;
350
351 skb->_skb_refdst = (unsigned long)dst;
352 }
353}
354
355
356/**
357 * __skb_tunnel_rx - prepare skb for rx reinsert
358 * @skb: buffer
359 * @dev: tunnel device
360 * @net: netns for packet i/o
361 *
362 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
363 * so make some cleanups. (no accounting done)
364 */
365static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
366 struct net *net)
367{
368 skb->dev = dev;
369
370 /*
371 * Clear hash so that we can recalulate the hash for the
372 * encapsulated packet, unless we have already determine the hash
373 * over the L4 4-tuple.
374 */
375 skb_clear_hash_if_not_l4(skb);
376 skb_set_queue_mapping(skb, 0);
377 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
378}
379
380/**
381 * skb_tunnel_rx - prepare skb for rx reinsert
382 * @skb: buffer
383 * @dev: tunnel device
384 *
385 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
386 * so make some cleanups, and perform accounting.
387 * Note: this accounting is not SMP safe.
388 */
389static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
390 struct net *net)
391{
392 /* TODO : stats should be SMP safe */
393 dev->stats.rx_packets++;
394 dev->stats.rx_bytes += skb->len;
395 __skb_tunnel_rx(skb, dev, net);
396}
397
398static inline u32 dst_tclassid(const struct sk_buff *skb)
399{
400#ifdef CONFIG_IP_ROUTE_CLASSID
401 const struct dst_entry *dst;
402
403 dst = skb_dst(skb);
404 if (dst)
405 return dst->tclassid;
406#endif
407 return 0;
408}
409
410int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
411static inline int dst_discard(struct sk_buff *skb)
412{
413 return dst_discard_out(&init_net, skb->sk, skb);
414}
415void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
416 int initial_obsolete, unsigned short flags);
417void dst_init(struct dst_entry *dst, struct dst_ops *ops,
418 struct net_device *dev, int initial_ref, int initial_obsolete,
419 unsigned short flags);
420void __dst_free(struct dst_entry *dst);
421struct dst_entry *dst_destroy(struct dst_entry *dst);
422
423static inline void dst_free(struct dst_entry *dst)
424{
425 if (dst->obsolete > 0)
426 return;
427 if (!atomic_read(&dst->__refcnt)) {
428 dst = dst_destroy(dst);
429 if (!dst)
430 return;
431 }
432 __dst_free(dst);
433}
434
435static inline void dst_rcu_free(struct rcu_head *head)
436{
437 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
438 dst_free(dst);
439}
440
441static inline void dst_confirm(struct dst_entry *dst)
442{
443 dst->pending_confirm = 1;
444}
445
446static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
447 struct sk_buff *skb)
448{
449 const struct hh_cache *hh;
450
451 if (dst->pending_confirm) {
452 unsigned long now = jiffies;
453
454 dst->pending_confirm = 0;
455 /* avoid dirtying neighbour */
456 if (n->confirmed != now)
457 n->confirmed = now;
458 }
459
460 hh = &n->hh;
461 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
462 return neigh_hh_output(hh, skb);
463 else
464 return n->output(n, skb);
465}
466
467static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
468{
469 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
470 return IS_ERR(n) ? NULL : n;
471}
472
473static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
474 struct sk_buff *skb)
475{
476 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
477 return IS_ERR(n) ? NULL : n;
478}
479
480static inline void dst_link_failure(struct sk_buff *skb)
481{
482 struct dst_entry *dst = skb_dst(skb);
483 if (dst && dst->ops && dst->ops->link_failure)
484 dst->ops->link_failure(skb);
485}
486
487static inline void dst_set_expires(struct dst_entry *dst, int timeout)
488{
489 unsigned long expires = jiffies + timeout;
490
491 if (expires == 0)
492 expires = 1;
493
494 if (dst->expires == 0 || time_before(expires, dst->expires))
495 dst->expires = expires;
496}
497
498/* Output packet to network from transport. */
499static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
500{
501 return skb_dst(skb)->output(net, sk, skb);
502}
503
504/* Input packet from network to transport. */
505static inline int dst_input(struct sk_buff *skb)
506{
507 return skb_dst(skb)->input(skb);
508}
509
510static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
511{
512 if (dst->obsolete)
513 dst = dst->ops->check(dst, cookie);
514 return dst;
515}
516
517void dst_subsys_init(void);
518
519/* Flags for xfrm_lookup flags argument. */
520enum {
521 XFRM_LOOKUP_ICMP = 1 << 0,
522 XFRM_LOOKUP_QUEUE = 1 << 1,
523 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
524};
525
526struct flowi;
527#ifndef CONFIG_XFRM
528static inline struct dst_entry *xfrm_lookup(struct net *net,
529 struct dst_entry *dst_orig,
530 const struct flowi *fl,
531 const struct sock *sk,
532 int flags)
533{
534 return dst_orig;
535}
536
537static inline struct dst_entry *xfrm_lookup_route(struct net *net,
538 struct dst_entry *dst_orig,
539 const struct flowi *fl,
540 const struct sock *sk,
541 int flags)
542{
543 return dst_orig;
544}
545
546static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
547{
548 return NULL;
549}
550
551#else
552struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
553 const struct flowi *fl, const struct sock *sk,
554 int flags);
555
556struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
557 const struct flowi *fl, const struct sock *sk,
558 int flags);
559
560/* skb attached with this dst needs transformation if dst->xfrm is valid */
561static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
562{
563 return dst->xfrm;
564}
565#endif
566
567#endif /* _NET_DST_H */