Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/skbuff.h>
  2#include <linux/export.h>
  3#include <linux/ip.h>
  4#include <linux/ipv6.h>
  5#include <linux/if_vlan.h>
 
 
  6#include <net/ip.h>
  7#include <net/ipv6.h>
 
 
 
  8#include <linux/igmp.h>
  9#include <linux/icmp.h>
 10#include <linux/sctp.h>
 11#include <linux/dccp.h>
 12#include <linux/if_tunnel.h>
 13#include <linux/if_pppox.h>
 14#include <linux/ppp_defs.h>
 15#include <net/flow_keys.h>
 
 
 
 
 
 
 16
 17/* copy saddr & daddr, possibly using 64bit load/store
 18 * Equivalent to :	flow->src = iph->saddr;
 19 *			flow->dst = iph->daddr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20 */
 21static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
 
 22{
 23	BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
 24		     offsetof(typeof(*flow), src) + sizeof(flow->src));
 25	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
 
 
 
 
 26}
 27
 28/**
 29 * skb_flow_get_ports - extract the upper layer ports and return them
 30 * @skb: buffer to extract the ports from
 31 * @thoff: transport header offset
 32 * @ip_proto: protocol for which to get port offset
 
 
 33 *
 34 * The function will try to retrieve the ports at offset thoff + poff where poff
 35 * is the protocol port offset returned from proto_ports_offset
 36 */
 37__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
 
 38{
 39	int poff = proto_ports_offset(ip_proto);
 40
 
 
 
 
 
 41	if (poff >= 0) {
 42		__be32 *ports, _ports;
 43
 44		ports = skb_header_pointer(skb, thoff + poff,
 45					   sizeof(_ports), &_ports);
 46		if (ports)
 47			return *ports;
 48	}
 49
 50	return 0;
 51}
 52EXPORT_SYMBOL(skb_flow_get_ports);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 
 
 
 55{
 56	int nhoff = skb_network_offset(skb);
 57	u8 ip_proto;
 58	__be16 proto = skb->protocol;
 59
 60	memset(flow, 0, sizeof(*flow));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61
 62again:
 63	switch (proto) {
 64	case htons(ETH_P_IP): {
 65		const struct iphdr *iph;
 66		struct iphdr _iph;
 67ip:
 68		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
 69		if (!iph || iph->ihl < 5)
 70			return false;
 
 
 
 71		nhoff += iph->ihl * 4;
 72
 73		ip_proto = iph->protocol;
 74		if (ip_is_fragment(iph))
 75			ip_proto = 0;
 76
 77		iph_to_flow_copy_addrs(flow, iph);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78		break;
 79	}
 80	case htons(ETH_P_IPV6): {
 81		const struct ipv6hdr *iph;
 82		struct ipv6hdr _iph;
 83ipv6:
 84		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
 85		if (!iph)
 86			return false;
 
 
 87
 88		ip_proto = iph->nexthdr;
 89		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
 90		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
 91		nhoff += sizeof(struct ipv6hdr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92		break;
 93	}
 94	case htons(ETH_P_8021AD):
 95	case htons(ETH_P_8021Q): {
 96		const struct vlan_hdr *vlan;
 97		struct vlan_hdr _vlan;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98
 99		vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
100		if (!vlan)
101			return false;
102
103		proto = vlan->h_vlan_encapsulated_proto;
104		nhoff += sizeof(*vlan);
105		goto again;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106	}
107	case htons(ETH_P_PPP_SES): {
108		struct {
109			struct pppoe_hdr hdr;
110			__be16 proto;
111		} *hdr, _hdr;
112		hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
113		if (!hdr)
114			return false;
 
 
 
115		proto = hdr->proto;
116		nhoff += PPPOE_SES_HLEN;
117		switch (proto) {
118		case htons(PPP_IP):
119			goto ip;
 
 
120		case htons(PPP_IPV6):
121			goto ipv6;
 
 
122		default:
123			return false;
 
124		}
 
125	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126	default:
127		return false;
128	}
129
 
 
 
130	switch (ip_proto) {
131	case IPPROTO_GRE: {
132		struct gre_hdr {
133			__be16 flags;
134			__be16 proto;
135		} *hdr, _hdr;
136
137		hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
138		if (!hdr)
139			return false;
140		/*
141		 * Only look inside GRE if version zero and no
142		 * routing
143		 */
144		if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
145			proto = hdr->proto;
146			nhoff += 4;
147			if (hdr->flags & GRE_CSUM)
148				nhoff += 4;
149			if (hdr->flags & GRE_KEY)
150				nhoff += 4;
151			if (hdr->flags & GRE_SEQ)
152				nhoff += 4;
153			if (proto == htons(ETH_P_TEB)) {
154				const struct ethhdr *eth;
155				struct ethhdr _eth;
156
157				eth = skb_header_pointer(skb, nhoff,
158							 sizeof(_eth), &_eth);
159				if (!eth)
160					return false;
161				proto = eth->h_proto;
162				nhoff += sizeof(*eth);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163			}
164			goto again;
165		}
 
 
166		break;
167	}
168	case IPPROTO_IPIP:
169		proto = htons(ETH_P_IP);
170		goto ip;
 
 
 
 
 
 
 
 
 
171	case IPPROTO_IPV6:
172		proto = htons(ETH_P_IPV6);
173		goto ipv6;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174	default:
175		break;
176	}
177
178	flow->ip_proto = ip_proto;
179	flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
180	flow->thoff = (u16) nhoff;
 
 
 
 
 
181
182	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183}
184EXPORT_SYMBOL(skb_flow_dissect);
185
186static u32 hashrnd __read_mostly;
187static __always_inline void __flow_hash_secret_init(void)
188{
189	net_get_random_once(&hashrnd, sizeof(hashrnd));
190}
191
192static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
 
193{
194	__flow_hash_secret_init();
195	return jhash_3words(a, b, c, hashrnd);
196}
197
198static __always_inline u32 __flow_hash_1word(u32 a)
199{
200	__flow_hash_secret_init();
201	return jhash_1word(a, hashrnd);
 
 
202}
203
204/*
205 * __skb_get_hash: calculate a flow hash based on src/dst addresses
206 * and src/dst port numbers.  Sets hash in skb to non-zero hash value
207 * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
208 * if hash is a canonical 4-tuple hash over transport ports.
209 */
210void __skb_get_hash(struct sk_buff *skb)
211{
212	struct flow_keys keys;
213	u32 hash;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
215	if (!skb_flow_dissect(skb, &keys))
216		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
217
218	if (keys.ports)
219		skb->l4_hash = 1;
 
 
 
 
 
 
 
 
 
 
 
220
221	/* get a consistent hash (same value on both flow directions) */
222	if (((__force u32)keys.dst < (__force u32)keys.src) ||
223	    (((__force u32)keys.dst == (__force u32)keys.src) &&
224	     ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
225		swap(keys.dst, keys.src);
226		swap(keys.port16[0], keys.port16[1]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227	}
 
 
 
 
 
 
 
228
229	hash = __flow_hash_3words((__force u32)keys.dst,
230				  (__force u32)keys.src,
231				  (__force u32)keys.ports);
232	if (!hash)
233		hash = 1;
234
235	skb->hash = hash;
236}
237EXPORT_SYMBOL(__skb_get_hash);
238
239/*
240 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
241 * to be used as a distribution range.
242 */
243u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
244		  unsigned int num_tx_queues)
245{
246	u32 hash;
247	u16 qoffset = 0;
248	u16 qcount = num_tx_queues;
 
249
250	if (skb_rx_queue_recorded(skb)) {
251		hash = skb_get_rx_queue(skb);
252		while (unlikely(hash >= num_tx_queues))
253			hash -= num_tx_queues;
254		return hash;
255	}
256
257	if (dev->num_tc) {
258		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
259		qoffset = dev->tc_to_txq[tc].offset;
260		qcount = dev->tc_to_txq[tc].count;
261	}
 
 
 
 
 
 
262
263	if (skb->sk && skb->sk->sk_hash)
264		hash = skb->sk->sk_hash;
265	else
266		hash = (__force u16) skb->protocol;
267	hash = __flow_hash_1word(hash);
 
 
 
 
268
269	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
 
 
 
 
270}
271EXPORT_SYMBOL(__skb_tx_hash);
272
273/* __skb_get_poff() returns the offset to the payload as far as it could
274 * be dissected. The main user is currently BPF, so that we can dynamically
275 * truncate packets without needing to push actual payload to the user
276 * space and can analyze headers only, instead.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277 */
278u32 __skb_get_poff(const struct sk_buff *skb)
279{
280	struct flow_keys keys;
281	u32 poff = 0;
282
283	if (!skb_flow_dissect(skb, &keys))
284		return 0;
285
286	poff += keys.thoff;
287	switch (keys.ip_proto) {
288	case IPPROTO_TCP: {
289		const struct tcphdr *tcph;
290		struct tcphdr _tcph;
291
292		tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
293		if (!tcph)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294			return poff;
295
296		poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
297		break;
298	}
299	case IPPROTO_UDP:
300	case IPPROTO_UDPLITE:
301		poff += sizeof(struct udphdr);
302		break;
303	/* For the rest, we do not really care about header
304	 * extensions at this point for now.
305	 */
306	case IPPROTO_ICMP:
307		poff += sizeof(struct icmphdr);
308		break;
309	case IPPROTO_ICMPV6:
310		poff += sizeof(struct icmp6hdr);
311		break;
312	case IPPROTO_IGMP:
313		poff += sizeof(struct igmphdr);
314		break;
315	case IPPROTO_DCCP:
316		poff += sizeof(struct dccp_hdr);
317		break;
318	case IPPROTO_SCTP:
319		poff += sizeof(struct sctphdr);
320		break;
321	}
322
323	return poff;
324}
325
326static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
327{
328#ifdef CONFIG_XPS
329	struct xps_dev_maps *dev_maps;
330	struct xps_map *map;
331	int queue_index = -1;
332
333	rcu_read_lock();
334	dev_maps = rcu_dereference(dev->xps_maps);
335	if (dev_maps) {
336		map = rcu_dereference(
337		    dev_maps->cpu_map[raw_smp_processor_id()]);
338		if (map) {
339			if (map->len == 1)
340				queue_index = map->queues[0];
341			else {
342				u32 hash;
343				if (skb->sk && skb->sk->sk_hash)
344					hash = skb->sk->sk_hash;
345				else
346					hash = (__force u16) skb->protocol ^
347					    skb->hash;
348				hash = __flow_hash_1word(hash);
349				queue_index = map->queues[
350				    ((u64)hash * map->len) >> 32];
351			}
352			if (unlikely(queue_index >= dev->real_num_tx_queues))
353				queue_index = -1;
354		}
355	}
356	rcu_read_unlock();
357
358	return queue_index;
359#else
360	return -1;
361#endif
362}
363
364static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
365{
366	struct sock *sk = skb->sk;
367	int queue_index = sk_tx_queue_get(sk);
368
369	if (queue_index < 0 || skb->ooo_okay ||
370	    queue_index >= dev->real_num_tx_queues) {
371		int new_index = get_xps_queue(dev, skb);
372		if (new_index < 0)
373			new_index = skb_tx_hash(dev, skb);
374
375		if (queue_index != new_index && sk &&
376		    rcu_access_pointer(sk->sk_dst_cache))
377			sk_tx_queue_set(sk, new_index);
378
379		queue_index = new_index;
380	}
 
 
 
 
381
382	return queue_index;
383}
 
384
385struct netdev_queue *netdev_pick_tx(struct net_device *dev,
386				    struct sk_buff *skb,
387				    void *accel_priv)
388{
389	int queue_index = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390
391	if (dev->real_num_tx_queues != 1) {
392		const struct net_device_ops *ops = dev->netdev_ops;
393		if (ops->ndo_select_queue)
394			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
395							    __netdev_pick_tx);
396		else
397			queue_index = __netdev_pick_tx(dev, skb);
398
399		if (!accel_priv)
400			queue_index = netdev_cap_txqueue(dev, queue_index);
401	}
402
403	skb_set_queue_mapping(skb, queue_index);
404	return netdev_get_tx_queue(dev, queue_index);
 
 
 
 
 
 
 
 
 
 
405}
v4.17
   1#include <linux/kernel.h>
   2#include <linux/skbuff.h>
   3#include <linux/export.h>
   4#include <linux/ip.h>
   5#include <linux/ipv6.h>
   6#include <linux/if_vlan.h>
   7#include <net/dsa.h>
   8#include <net/dst_metadata.h>
   9#include <net/ip.h>
  10#include <net/ipv6.h>
  11#include <net/gre.h>
  12#include <net/pptp.h>
  13#include <net/tipc.h>
  14#include <linux/igmp.h>
  15#include <linux/icmp.h>
  16#include <linux/sctp.h>
  17#include <linux/dccp.h>
  18#include <linux/if_tunnel.h>
  19#include <linux/if_pppox.h>
  20#include <linux/ppp_defs.h>
  21#include <linux/stddef.h>
  22#include <linux/if_ether.h>
  23#include <linux/mpls.h>
  24#include <linux/tcp.h>
  25#include <net/flow_dissector.h>
  26#include <scsi/fc/fc_fcoe.h>
  27#include <uapi/linux/batadv_packet.h>
  28
  29static void dissector_set_key(struct flow_dissector *flow_dissector,
  30			      enum flow_dissector_key_id key_id)
  31{
  32	flow_dissector->used_keys |= (1 << key_id);
  33}
  34
  35void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
  36			     const struct flow_dissector_key *key,
  37			     unsigned int key_count)
  38{
  39	unsigned int i;
  40
  41	memset(flow_dissector, 0, sizeof(*flow_dissector));
  42
  43	for (i = 0; i < key_count; i++, key++) {
  44		/* User should make sure that every key target offset is withing
  45		 * boundaries of unsigned short.
  46		 */
  47		BUG_ON(key->offset > USHRT_MAX);
  48		BUG_ON(dissector_uses_key(flow_dissector,
  49					  key->key_id));
  50
  51		dissector_set_key(flow_dissector, key->key_id);
  52		flow_dissector->offset[key->key_id] = key->offset;
  53	}
  54
  55	/* Ensure that the dissector always includes control and basic key.
  56	 * That way we are able to avoid handling lack of these in fast path.
  57	 */
  58	BUG_ON(!dissector_uses_key(flow_dissector,
  59				   FLOW_DISSECTOR_KEY_CONTROL));
  60	BUG_ON(!dissector_uses_key(flow_dissector,
  61				   FLOW_DISSECTOR_KEY_BASIC));
  62}
  63EXPORT_SYMBOL(skb_flow_dissector_init);
  64
  65/**
  66 * skb_flow_get_be16 - extract be16 entity
  67 * @skb: sk_buff to extract from
  68 * @poff: offset to extract at
  69 * @data: raw buffer pointer to the packet
  70 * @hlen: packet header length
  71 *
  72 * The function will try to retrieve a be32 entity at
  73 * offset poff
  74 */
  75static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
  76				void *data, int hlen)
  77{
  78	__be16 *u, _u;
  79
  80	u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
  81	if (u)
  82		return *u;
  83
  84	return 0;
  85}
  86
  87/**
  88 * __skb_flow_get_ports - extract the upper layer ports and return them
  89 * @skb: sk_buff to extract the ports from
  90 * @thoff: transport header offset
  91 * @ip_proto: protocol for which to get port offset
  92 * @data: raw buffer pointer to the packet, if NULL use skb->data
  93 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  94 *
  95 * The function will try to retrieve the ports at offset thoff + poff where poff
  96 * is the protocol port offset returned from proto_ports_offset
  97 */
  98__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
  99			    void *data, int hlen)
 100{
 101	int poff = proto_ports_offset(ip_proto);
 102
 103	if (!data) {
 104		data = skb->data;
 105		hlen = skb_headlen(skb);
 106	}
 107
 108	if (poff >= 0) {
 109		__be32 *ports, _ports;
 110
 111		ports = __skb_header_pointer(skb, thoff + poff,
 112					     sizeof(_ports), data, hlen, &_ports);
 113		if (ports)
 114			return *ports;
 115	}
 116
 117	return 0;
 118}
 119EXPORT_SYMBOL(__skb_flow_get_ports);
 120
 121static void
 122skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
 123				   struct flow_dissector *flow_dissector,
 124				   void *target_container)
 125{
 126	struct flow_dissector_key_control *ctrl;
 127
 128	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
 129		return;
 130
 131	ctrl = skb_flow_dissector_target(flow_dissector,
 132					 FLOW_DISSECTOR_KEY_ENC_CONTROL,
 133					 target_container);
 134	ctrl->addr_type = type;
 135}
 136
 137void
 138skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
 139			     struct flow_dissector *flow_dissector,
 140			     void *target_container)
 141{
 142	struct ip_tunnel_info *info;
 143	struct ip_tunnel_key *key;
 
 144
 145	/* A quick check to see if there might be something to do. */
 146	if (!dissector_uses_key(flow_dissector,
 147				FLOW_DISSECTOR_KEY_ENC_KEYID) &&
 148	    !dissector_uses_key(flow_dissector,
 149				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
 150	    !dissector_uses_key(flow_dissector,
 151				FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
 152	    !dissector_uses_key(flow_dissector,
 153				FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
 154	    !dissector_uses_key(flow_dissector,
 155				FLOW_DISSECTOR_KEY_ENC_PORTS))
 156		return;
 157
 158	info = skb_tunnel_info(skb);
 159	if (!info)
 160		return;
 161
 162	key = &info->key;
 163
 164	switch (ip_tunnel_info_af(info)) {
 165	case AF_INET:
 166		skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
 167						   flow_dissector,
 168						   target_container);
 169		if (dissector_uses_key(flow_dissector,
 170				       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
 171			struct flow_dissector_key_ipv4_addrs *ipv4;
 172
 173			ipv4 = skb_flow_dissector_target(flow_dissector,
 174							 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
 175							 target_container);
 176			ipv4->src = key->u.ipv4.src;
 177			ipv4->dst = key->u.ipv4.dst;
 178		}
 179		break;
 180	case AF_INET6:
 181		skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
 182						   flow_dissector,
 183						   target_container);
 184		if (dissector_uses_key(flow_dissector,
 185				       FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
 186			struct flow_dissector_key_ipv6_addrs *ipv6;
 187
 188			ipv6 = skb_flow_dissector_target(flow_dissector,
 189							 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
 190							 target_container);
 191			ipv6->src = key->u.ipv6.src;
 192			ipv6->dst = key->u.ipv6.dst;
 193		}
 194		break;
 195	}
 196
 197	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
 198		struct flow_dissector_key_keyid *keyid;
 199
 200		keyid = skb_flow_dissector_target(flow_dissector,
 201						  FLOW_DISSECTOR_KEY_ENC_KEYID,
 202						  target_container);
 203		keyid->keyid = tunnel_id_to_key32(key->tun_id);
 204	}
 205
 206	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
 207		struct flow_dissector_key_ports *tp;
 208
 209		tp = skb_flow_dissector_target(flow_dissector,
 210					       FLOW_DISSECTOR_KEY_ENC_PORTS,
 211					       target_container);
 212		tp->src = key->tp_src;
 213		tp->dst = key->tp_dst;
 214	}
 215}
 216EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
 217
 218static enum flow_dissect_ret
 219__skb_flow_dissect_mpls(const struct sk_buff *skb,
 220			struct flow_dissector *flow_dissector,
 221			void *target_container, void *data, int nhoff, int hlen)
 222{
 223	struct flow_dissector_key_keyid *key_keyid;
 224	struct mpls_label *hdr, _hdr[2];
 225	u32 entry, label;
 226
 227	if (!dissector_uses_key(flow_dissector,
 228				FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
 229	    !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
 230		return FLOW_DISSECT_RET_OUT_GOOD;
 231
 232	hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
 233				   hlen, &_hdr);
 234	if (!hdr)
 235		return FLOW_DISSECT_RET_OUT_BAD;
 236
 237	entry = ntohl(hdr[0].entry);
 238	label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
 239
 240	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
 241		struct flow_dissector_key_mpls *key_mpls;
 242
 243		key_mpls = skb_flow_dissector_target(flow_dissector,
 244						     FLOW_DISSECTOR_KEY_MPLS,
 245						     target_container);
 246		key_mpls->mpls_label = label;
 247		key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
 248					>> MPLS_LS_TTL_SHIFT;
 249		key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
 250					>> MPLS_LS_TC_SHIFT;
 251		key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
 252					>> MPLS_LS_S_SHIFT;
 253	}
 254
 255	if (label == MPLS_LABEL_ENTROPY) {
 256		key_keyid = skb_flow_dissector_target(flow_dissector,
 257						      FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
 258						      target_container);
 259		key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
 260	}
 261	return FLOW_DISSECT_RET_OUT_GOOD;
 262}
 263
 264static enum flow_dissect_ret
 265__skb_flow_dissect_arp(const struct sk_buff *skb,
 266		       struct flow_dissector *flow_dissector,
 267		       void *target_container, void *data, int nhoff, int hlen)
 268{
 269	struct flow_dissector_key_arp *key_arp;
 270	struct {
 271		unsigned char ar_sha[ETH_ALEN];
 272		unsigned char ar_sip[4];
 273		unsigned char ar_tha[ETH_ALEN];
 274		unsigned char ar_tip[4];
 275	} *arp_eth, _arp_eth;
 276	const struct arphdr *arp;
 277	struct arphdr _arp;
 278
 279	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
 280		return FLOW_DISSECT_RET_OUT_GOOD;
 281
 282	arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
 283				   hlen, &_arp);
 284	if (!arp)
 285		return FLOW_DISSECT_RET_OUT_BAD;
 286
 287	if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
 288	    arp->ar_pro != htons(ETH_P_IP) ||
 289	    arp->ar_hln != ETH_ALEN ||
 290	    arp->ar_pln != 4 ||
 291	    (arp->ar_op != htons(ARPOP_REPLY) &&
 292	     arp->ar_op != htons(ARPOP_REQUEST)))
 293		return FLOW_DISSECT_RET_OUT_BAD;
 294
 295	arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
 296				       sizeof(_arp_eth), data,
 297				       hlen, &_arp_eth);
 298	if (!arp_eth)
 299		return FLOW_DISSECT_RET_OUT_BAD;
 300
 301	key_arp = skb_flow_dissector_target(flow_dissector,
 302					    FLOW_DISSECTOR_KEY_ARP,
 303					    target_container);
 304
 305	memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
 306	memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
 307
 308	/* Only store the lower byte of the opcode;
 309	 * this covers ARPOP_REPLY and ARPOP_REQUEST.
 310	 */
 311	key_arp->op = ntohs(arp->ar_op) & 0xff;
 312
 313	ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
 314	ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
 315
 316	return FLOW_DISSECT_RET_OUT_GOOD;
 317}
 318
 319static enum flow_dissect_ret
 320__skb_flow_dissect_gre(const struct sk_buff *skb,
 321		       struct flow_dissector_key_control *key_control,
 322		       struct flow_dissector *flow_dissector,
 323		       void *target_container, void *data,
 324		       __be16 *p_proto, int *p_nhoff, int *p_hlen,
 325		       unsigned int flags)
 326{
 327	struct flow_dissector_key_keyid *key_keyid;
 328	struct gre_base_hdr *hdr, _hdr;
 329	int offset = 0;
 330	u16 gre_ver;
 331
 332	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
 333				   data, *p_hlen, &_hdr);
 334	if (!hdr)
 335		return FLOW_DISSECT_RET_OUT_BAD;
 336
 337	/* Only look inside GRE without routing */
 338	if (hdr->flags & GRE_ROUTING)
 339		return FLOW_DISSECT_RET_OUT_GOOD;
 340
 341	/* Only look inside GRE for version 0 and 1 */
 342	gre_ver = ntohs(hdr->flags & GRE_VERSION);
 343	if (gre_ver > 1)
 344		return FLOW_DISSECT_RET_OUT_GOOD;
 345
 346	*p_proto = hdr->protocol;
 347	if (gre_ver) {
 348		/* Version1 must be PPTP, and check the flags */
 349		if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
 350			return FLOW_DISSECT_RET_OUT_GOOD;
 351	}
 352
 353	offset += sizeof(struct gre_base_hdr);
 354
 355	if (hdr->flags & GRE_CSUM)
 356		offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
 357			  sizeof(((struct gre_full_hdr *) 0)->reserved1);
 358
 359	if (hdr->flags & GRE_KEY) {
 360		const __be32 *keyid;
 361		__be32 _keyid;
 362
 363		keyid = __skb_header_pointer(skb, *p_nhoff + offset,
 364					     sizeof(_keyid),
 365					     data, *p_hlen, &_keyid);
 366		if (!keyid)
 367			return FLOW_DISSECT_RET_OUT_BAD;
 368
 369		if (dissector_uses_key(flow_dissector,
 370				       FLOW_DISSECTOR_KEY_GRE_KEYID)) {
 371			key_keyid = skb_flow_dissector_target(flow_dissector,
 372							      FLOW_DISSECTOR_KEY_GRE_KEYID,
 373							      target_container);
 374			if (gre_ver == 0)
 375				key_keyid->keyid = *keyid;
 376			else
 377				key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
 378		}
 379		offset += sizeof(((struct gre_full_hdr *) 0)->key);
 380	}
 381
 382	if (hdr->flags & GRE_SEQ)
 383		offset += sizeof(((struct pptp_gre_header *) 0)->seq);
 384
 385	if (gre_ver == 0) {
 386		if (*p_proto == htons(ETH_P_TEB)) {
 387			const struct ethhdr *eth;
 388			struct ethhdr _eth;
 389
 390			eth = __skb_header_pointer(skb, *p_nhoff + offset,
 391						   sizeof(_eth),
 392						   data, *p_hlen, &_eth);
 393			if (!eth)
 394				return FLOW_DISSECT_RET_OUT_BAD;
 395			*p_proto = eth->h_proto;
 396			offset += sizeof(*eth);
 397
 398			/* Cap headers that we access via pointers at the
 399			 * end of the Ethernet header as our maximum alignment
 400			 * at that point is only 2 bytes.
 401			 */
 402			if (NET_IP_ALIGN)
 403				*p_hlen = *p_nhoff + offset;
 404		}
 405	} else { /* version 1, must be PPTP */
 406		u8 _ppp_hdr[PPP_HDRLEN];
 407		u8 *ppp_hdr;
 408
 409		if (hdr->flags & GRE_ACK)
 410			offset += sizeof(((struct pptp_gre_header *) 0)->ack);
 411
 412		ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
 413					       sizeof(_ppp_hdr),
 414					       data, *p_hlen, _ppp_hdr);
 415		if (!ppp_hdr)
 416			return FLOW_DISSECT_RET_OUT_BAD;
 417
 418		switch (PPP_PROTOCOL(ppp_hdr)) {
 419		case PPP_IP:
 420			*p_proto = htons(ETH_P_IP);
 421			break;
 422		case PPP_IPV6:
 423			*p_proto = htons(ETH_P_IPV6);
 424			break;
 425		default:
 426			/* Could probably catch some more like MPLS */
 427			break;
 428		}
 429
 430		offset += PPP_HDRLEN;
 431	}
 432
 433	*p_nhoff += offset;
 434	key_control->flags |= FLOW_DIS_ENCAPSULATION;
 435	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
 436		return FLOW_DISSECT_RET_OUT_GOOD;
 437
 438	return FLOW_DISSECT_RET_PROTO_AGAIN;
 439}
 440
 441/**
 442 * __skb_flow_dissect_batadv() - dissect batman-adv header
 443 * @skb: sk_buff to with the batman-adv header
 444 * @key_control: flow dissectors control key
 445 * @data: raw buffer pointer to the packet, if NULL use skb->data
 446 * @p_proto: pointer used to update the protocol to process next
 447 * @p_nhoff: pointer used to update inner network header offset
 448 * @hlen: packet header length
 449 * @flags: any combination of FLOW_DISSECTOR_F_*
 450 *
 451 * ETH_P_BATMAN packets are tried to be dissected. Only
 452 * &struct batadv_unicast packets are actually processed because they contain an
 453 * inner ethernet header and are usually followed by actual network header. This
 454 * allows the flow dissector to continue processing the packet.
 455 *
 456 * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
 457 *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
 458 *  otherwise FLOW_DISSECT_RET_OUT_BAD
 459 */
 460static enum flow_dissect_ret
 461__skb_flow_dissect_batadv(const struct sk_buff *skb,
 462			  struct flow_dissector_key_control *key_control,
 463			  void *data, __be16 *p_proto, int *p_nhoff, int hlen,
 464			  unsigned int flags)
 465{
 466	struct {
 467		struct batadv_unicast_packet batadv_unicast;
 468		struct ethhdr eth;
 469	} *hdr, _hdr;
 470
 471	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
 472				   &_hdr);
 473	if (!hdr)
 474		return FLOW_DISSECT_RET_OUT_BAD;
 475
 476	if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
 477		return FLOW_DISSECT_RET_OUT_BAD;
 478
 479	if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
 480		return FLOW_DISSECT_RET_OUT_BAD;
 481
 482	*p_proto = hdr->eth.h_proto;
 483	*p_nhoff += sizeof(*hdr);
 484
 485	key_control->flags |= FLOW_DIS_ENCAPSULATION;
 486	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
 487		return FLOW_DISSECT_RET_OUT_GOOD;
 488
 489	return FLOW_DISSECT_RET_PROTO_AGAIN;
 490}
 491
 492static void
 493__skb_flow_dissect_tcp(const struct sk_buff *skb,
 494		       struct flow_dissector *flow_dissector,
 495		       void *target_container, void *data, int thoff, int hlen)
 496{
 497	struct flow_dissector_key_tcp *key_tcp;
 498	struct tcphdr *th, _th;
 499
 500	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
 501		return;
 502
 503	th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
 504	if (!th)
 505		return;
 506
 507	if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
 508		return;
 509
 510	key_tcp = skb_flow_dissector_target(flow_dissector,
 511					    FLOW_DISSECTOR_KEY_TCP,
 512					    target_container);
 513	key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
 514}
 515
 516static void
 517__skb_flow_dissect_ipv4(const struct sk_buff *skb,
 518			struct flow_dissector *flow_dissector,
 519			void *target_container, void *data, const struct iphdr *iph)
 520{
 521	struct flow_dissector_key_ip *key_ip;
 522
 523	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
 524		return;
 525
 526	key_ip = skb_flow_dissector_target(flow_dissector,
 527					   FLOW_DISSECTOR_KEY_IP,
 528					   target_container);
 529	key_ip->tos = iph->tos;
 530	key_ip->ttl = iph->ttl;
 531}
 532
 533static void
 534__skb_flow_dissect_ipv6(const struct sk_buff *skb,
 535			struct flow_dissector *flow_dissector,
 536			void *target_container, void *data, const struct ipv6hdr *iph)
 537{
 538	struct flow_dissector_key_ip *key_ip;
 539
 540	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
 541		return;
 542
 543	key_ip = skb_flow_dissector_target(flow_dissector,
 544					   FLOW_DISSECTOR_KEY_IP,
 545					   target_container);
 546	key_ip->tos = ipv6_get_dsfield(iph);
 547	key_ip->ttl = iph->hop_limit;
 548}
 549
 550/* Maximum number of protocol headers that can be parsed in
 551 * __skb_flow_dissect
 552 */
 553#define MAX_FLOW_DISSECT_HDRS	15
 554
 555static bool skb_flow_dissect_allowed(int *num_hdrs)
 556{
 557	++*num_hdrs;
 558
 559	return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
 560}
 561
 562/**
 563 * __skb_flow_dissect - extract the flow_keys struct and return it
 564 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
 565 * @flow_dissector: list of keys to dissect
 566 * @target_container: target structure to put dissected values into
 567 * @data: raw buffer pointer to the packet, if NULL use skb->data
 568 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
 569 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
 570 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
 571 *
 572 * The function will try to retrieve individual keys into target specified
 573 * by flow_dissector from either the skbuff or a raw buffer specified by the
 574 * rest parameters.
 575 *
 576 * Caller must take care of zeroing target container memory.
 577 */
 578bool __skb_flow_dissect(const struct sk_buff *skb,
 579			struct flow_dissector *flow_dissector,
 580			void *target_container,
 581			void *data, __be16 proto, int nhoff, int hlen,
 582			unsigned int flags)
 583{
 584	struct flow_dissector_key_control *key_control;
 585	struct flow_dissector_key_basic *key_basic;
 586	struct flow_dissector_key_addrs *key_addrs;
 587	struct flow_dissector_key_ports *key_ports;
 588	struct flow_dissector_key_icmp *key_icmp;
 589	struct flow_dissector_key_tags *key_tags;
 590	struct flow_dissector_key_vlan *key_vlan;
 591	enum flow_dissect_ret fdret;
 592	bool skip_vlan = false;
 593	int num_hdrs = 0;
 594	u8 ip_proto = 0;
 595	bool ret;
 596
 597	if (!data) {
 598		data = skb->data;
 599		proto = skb_vlan_tag_present(skb) ?
 600			 skb->vlan_proto : skb->protocol;
 601		nhoff = skb_network_offset(skb);
 602		hlen = skb_headlen(skb);
 603#if IS_ENABLED(CONFIG_NET_DSA)
 604		if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
 605			const struct dsa_device_ops *ops;
 606			int offset;
 607
 608			ops = skb->dev->dsa_ptr->tag_ops;
 609			if (ops->flow_dissect &&
 610			    !ops->flow_dissect(skb, &proto, &offset)) {
 611				hlen -= offset;
 612				nhoff += offset;
 613			}
 614		}
 615#endif
 616	}
 617
 618	/* It is ensured by skb_flow_dissector_init() that control key will
 619	 * be always present.
 620	 */
 621	key_control = skb_flow_dissector_target(flow_dissector,
 622						FLOW_DISSECTOR_KEY_CONTROL,
 623						target_container);
 624
 625	/* It is ensured by skb_flow_dissector_init() that basic key will
 626	 * be always present.
 627	 */
 628	key_basic = skb_flow_dissector_target(flow_dissector,
 629					      FLOW_DISSECTOR_KEY_BASIC,
 630					      target_container);
 631
 632	if (dissector_uses_key(flow_dissector,
 633			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 634		struct ethhdr *eth = eth_hdr(skb);
 635		struct flow_dissector_key_eth_addrs *key_eth_addrs;
 636
 637		key_eth_addrs = skb_flow_dissector_target(flow_dissector,
 638							  FLOW_DISSECTOR_KEY_ETH_ADDRS,
 639							  target_container);
 640		memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
 641	}
 642
 643proto_again:
 644	fdret = FLOW_DISSECT_RET_CONTINUE;
 645
 
 646	switch (proto) {
 647	case htons(ETH_P_IP): {
 648		const struct iphdr *iph;
 649		struct iphdr _iph;
 650
 651		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
 652		if (!iph || iph->ihl < 5) {
 653			fdret = FLOW_DISSECT_RET_OUT_BAD;
 654			break;
 655		}
 656
 657		nhoff += iph->ihl * 4;
 658
 659		ip_proto = iph->protocol;
 
 
 660
 661		if (dissector_uses_key(flow_dissector,
 662				       FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
 663			key_addrs = skb_flow_dissector_target(flow_dissector,
 664							      FLOW_DISSECTOR_KEY_IPV4_ADDRS,
 665							      target_container);
 666
 667			memcpy(&key_addrs->v4addrs, &iph->saddr,
 668			       sizeof(key_addrs->v4addrs));
 669			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 670		}
 671
 672		if (ip_is_fragment(iph)) {
 673			key_control->flags |= FLOW_DIS_IS_FRAGMENT;
 674
 675			if (iph->frag_off & htons(IP_OFFSET)) {
 676				fdret = FLOW_DISSECT_RET_OUT_GOOD;
 677				break;
 678			} else {
 679				key_control->flags |= FLOW_DIS_FIRST_FRAG;
 680				if (!(flags &
 681				      FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
 682					fdret = FLOW_DISSECT_RET_OUT_GOOD;
 683					break;
 684				}
 685			}
 686		}
 687
 688		__skb_flow_dissect_ipv4(skb, flow_dissector,
 689					target_container, data, iph);
 690
 691		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) {
 692			fdret = FLOW_DISSECT_RET_OUT_GOOD;
 693			break;
 694		}
 695
 696		break;
 697	}
 698	case htons(ETH_P_IPV6): {
 699		const struct ipv6hdr *iph;
 700		struct ipv6hdr _iph;
 701
 702		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
 703		if (!iph) {
 704			fdret = FLOW_DISSECT_RET_OUT_BAD;
 705			break;
 706		}
 707
 708		ip_proto = iph->nexthdr;
 
 
 709		nhoff += sizeof(struct ipv6hdr);
 710
 711		if (dissector_uses_key(flow_dissector,
 712				       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
 713			key_addrs = skb_flow_dissector_target(flow_dissector,
 714							      FLOW_DISSECTOR_KEY_IPV6_ADDRS,
 715							      target_container);
 716
 717			memcpy(&key_addrs->v6addrs, &iph->saddr,
 718			       sizeof(key_addrs->v6addrs));
 719			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
 720		}
 721
 722		if ((dissector_uses_key(flow_dissector,
 723					FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
 724		     (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
 725		    ip6_flowlabel(iph)) {
 726			__be32 flow_label = ip6_flowlabel(iph);
 727
 728			if (dissector_uses_key(flow_dissector,
 729					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
 730				key_tags = skb_flow_dissector_target(flow_dissector,
 731								     FLOW_DISSECTOR_KEY_FLOW_LABEL,
 732								     target_container);
 733				key_tags->flow_label = ntohl(flow_label);
 734			}
 735			if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
 736				fdret = FLOW_DISSECT_RET_OUT_GOOD;
 737				break;
 738			}
 739		}
 740
 741		__skb_flow_dissect_ipv6(skb, flow_dissector,
 742					target_container, data, iph);
 743
 744		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
 745			fdret = FLOW_DISSECT_RET_OUT_GOOD;
 746
 747		break;
 748	}
 749	case htons(ETH_P_8021AD):
 750	case htons(ETH_P_8021Q): {
 751		const struct vlan_hdr *vlan;
 752		struct vlan_hdr _vlan;
 753		bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
 754
 755		if (vlan_tag_present)
 756			proto = skb->protocol;
 757
 758		if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
 759			vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
 760						    data, hlen, &_vlan);
 761			if (!vlan) {
 762				fdret = FLOW_DISSECT_RET_OUT_BAD;
 763				break;
 764			}
 765
 766			proto = vlan->h_vlan_encapsulated_proto;
 767			nhoff += sizeof(*vlan);
 768			if (skip_vlan) {
 769				fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 770				break;
 771			}
 772		}
 773
 774		skip_vlan = true;
 775		if (dissector_uses_key(flow_dissector,
 776				       FLOW_DISSECTOR_KEY_VLAN)) {
 777			key_vlan = skb_flow_dissector_target(flow_dissector,
 778							     FLOW_DISSECTOR_KEY_VLAN,
 779							     target_container);
 780
 781			if (vlan_tag_present) {
 782				key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
 783				key_vlan->vlan_priority =
 784					(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
 785			} else {
 786				key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
 787					VLAN_VID_MASK;
 788				key_vlan->vlan_priority =
 789					(ntohs(vlan->h_vlan_TCI) &
 790					 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 791			}
 792		}
 793
 794		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 795		break;
 796	}
 797	case htons(ETH_P_PPP_SES): {
 798		struct {
 799			struct pppoe_hdr hdr;
 800			__be16 proto;
 801		} *hdr, _hdr;
 802		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 803		if (!hdr) {
 804			fdret = FLOW_DISSECT_RET_OUT_BAD;
 805			break;
 806		}
 807
 808		proto = hdr->proto;
 809		nhoff += PPPOE_SES_HLEN;
 810		switch (proto) {
 811		case htons(PPP_IP):
 812			proto = htons(ETH_P_IP);
 813			fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 814			break;
 815		case htons(PPP_IPV6):
 816			proto = htons(ETH_P_IPV6);
 817			fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 818			break;
 819		default:
 820			fdret = FLOW_DISSECT_RET_OUT_BAD;
 821			break;
 822		}
 823		break;
 824	}
 825	case htons(ETH_P_TIPC): {
 826		struct tipc_basic_hdr *hdr, _hdr;
 827
 828		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
 829					   data, hlen, &_hdr);
 830		if (!hdr) {
 831			fdret = FLOW_DISSECT_RET_OUT_BAD;
 832			break;
 833		}
 834
 835		if (dissector_uses_key(flow_dissector,
 836				       FLOW_DISSECTOR_KEY_TIPC)) {
 837			key_addrs = skb_flow_dissector_target(flow_dissector,
 838							      FLOW_DISSECTOR_KEY_TIPC,
 839							      target_container);
 840			key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
 841			key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
 842		}
 843		fdret = FLOW_DISSECT_RET_OUT_GOOD;
 844		break;
 845	}
 846
 847	case htons(ETH_P_MPLS_UC):
 848	case htons(ETH_P_MPLS_MC):
 849		fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
 850						target_container, data,
 851						nhoff, hlen);
 852		break;
 853	case htons(ETH_P_FCOE):
 854		if ((hlen - nhoff) < FCOE_HEADER_LEN) {
 855			fdret = FLOW_DISSECT_RET_OUT_BAD;
 856			break;
 857		}
 858
 859		nhoff += FCOE_HEADER_LEN;
 860		fdret = FLOW_DISSECT_RET_OUT_GOOD;
 861		break;
 862
 863	case htons(ETH_P_ARP):
 864	case htons(ETH_P_RARP):
 865		fdret = __skb_flow_dissect_arp(skb, flow_dissector,
 866					       target_container, data,
 867					       nhoff, hlen);
 868		break;
 869
 870	case htons(ETH_P_BATMAN):
 871		fdret = __skb_flow_dissect_batadv(skb, key_control, data,
 872						  &proto, &nhoff, hlen, flags);
 873		break;
 874
 875	default:
 876		fdret = FLOW_DISSECT_RET_OUT_BAD;
 877		break;
 878	}
 879
 880	/* Process result of proto processing */
 881	switch (fdret) {
 882	case FLOW_DISSECT_RET_OUT_GOOD:
 883		goto out_good;
 884	case FLOW_DISSECT_RET_PROTO_AGAIN:
 885		if (skb_flow_dissect_allowed(&num_hdrs))
 886			goto proto_again;
 887		goto out_good;
 888	case FLOW_DISSECT_RET_CONTINUE:
 889	case FLOW_DISSECT_RET_IPPROTO_AGAIN:
 890		break;
 891	case FLOW_DISSECT_RET_OUT_BAD:
 892	default:
 893		goto out_bad;
 894	}
 895
 896ip_proto_again:
 897	fdret = FLOW_DISSECT_RET_CONTINUE;
 898
 899	switch (ip_proto) {
 900	case IPPROTO_GRE:
 901		fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
 902					       target_container, data,
 903					       &proto, &nhoff, &hlen, flags);
 904		break;
 905
 906	case NEXTHDR_HOP:
 907	case NEXTHDR_ROUTING:
 908	case NEXTHDR_DEST: {
 909		u8 _opthdr[2], *opthdr;
 910
 911		if (proto != htons(ETH_P_IPV6))
 912			break;
 913
 914		opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
 915					      data, hlen, &_opthdr);
 916		if (!opthdr) {
 917			fdret = FLOW_DISSECT_RET_OUT_BAD;
 918			break;
 919		}
 920
 921		ip_proto = opthdr[0];
 922		nhoff += (opthdr[1] + 1) << 3;
 923
 924		fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
 925		break;
 926	}
 927	case NEXTHDR_FRAGMENT: {
 928		struct frag_hdr _fh, *fh;
 929
 930		if (proto != htons(ETH_P_IPV6))
 931			break;
 932
 933		fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
 934					  data, hlen, &_fh);
 935
 936		if (!fh) {
 937			fdret = FLOW_DISSECT_RET_OUT_BAD;
 938			break;
 939		}
 940
 941		key_control->flags |= FLOW_DIS_IS_FRAGMENT;
 942
 943		nhoff += sizeof(_fh);
 944		ip_proto = fh->nexthdr;
 945
 946		if (!(fh->frag_off & htons(IP6_OFFSET))) {
 947			key_control->flags |= FLOW_DIS_FIRST_FRAG;
 948			if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
 949				fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
 950				break;
 951			}
 
 952		}
 953
 954		fdret = FLOW_DISSECT_RET_OUT_GOOD;
 955		break;
 956	}
 957	case IPPROTO_IPIP:
 958		proto = htons(ETH_P_IP);
 959
 960		key_control->flags |= FLOW_DIS_ENCAPSULATION;
 961		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
 962			fdret = FLOW_DISSECT_RET_OUT_GOOD;
 963			break;
 964		}
 965
 966		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 967		break;
 968
 969	case IPPROTO_IPV6:
 970		proto = htons(ETH_P_IPV6);
 971
 972		key_control->flags |= FLOW_DIS_ENCAPSULATION;
 973		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
 974			fdret = FLOW_DISSECT_RET_OUT_GOOD;
 975			break;
 976		}
 977
 978		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 979		break;
 980
 981
 982	case IPPROTO_MPLS:
 983		proto = htons(ETH_P_MPLS_UC);
 984		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
 985		break;
 986
 987	case IPPROTO_TCP:
 988		__skb_flow_dissect_tcp(skb, flow_dissector, target_container,
 989				       data, nhoff, hlen);
 990		break;
 991
 992	default:
 993		break;
 994	}
 995
 996	if (dissector_uses_key(flow_dissector,
 997			       FLOW_DISSECTOR_KEY_PORTS)) {
 998		key_ports = skb_flow_dissector_target(flow_dissector,
 999						      FLOW_DISSECTOR_KEY_PORTS,
1000						      target_container);
1001		key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
1002							data, hlen);
1003	}
1004
1005	if (dissector_uses_key(flow_dissector,
1006			       FLOW_DISSECTOR_KEY_ICMP)) {
1007		key_icmp = skb_flow_dissector_target(flow_dissector,
1008						     FLOW_DISSECTOR_KEY_ICMP,
1009						     target_container);
1010		key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
1011	}
1012
1013	/* Process result of IP proto processing */
1014	switch (fdret) {
1015	case FLOW_DISSECT_RET_PROTO_AGAIN:
1016		if (skb_flow_dissect_allowed(&num_hdrs))
1017			goto proto_again;
1018		break;
1019	case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1020		if (skb_flow_dissect_allowed(&num_hdrs))
1021			goto ip_proto_again;
1022		break;
1023	case FLOW_DISSECT_RET_OUT_GOOD:
1024	case FLOW_DISSECT_RET_CONTINUE:
1025		break;
1026	case FLOW_DISSECT_RET_OUT_BAD:
1027	default:
1028		goto out_bad;
1029	}
1030
1031out_good:
1032	ret = true;
1033
1034out:
1035	key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1036	key_basic->n_proto = proto;
1037	key_basic->ip_proto = ip_proto;
1038
1039	return ret;
1040
1041out_bad:
1042	ret = false;
1043	goto out;
1044}
1045EXPORT_SYMBOL(__skb_flow_dissect);
1046
1047static u32 hashrnd __read_mostly;
1048static __always_inline void __flow_hash_secret_init(void)
1049{
1050	net_get_random_once(&hashrnd, sizeof(hashrnd));
1051}
1052
1053static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
1054					     u32 keyval)
1055{
1056	return jhash2(words, length, keyval);
 
1057}
1058
1059static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
1060{
1061	const void *p = flow;
1062
1063	BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
1064	return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
1065}
1066
1067static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
 
 
 
 
 
 
1068{
1069	size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1070	BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1071	BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
1072		     sizeof(*flow) - sizeof(flow->addrs));
1073
1074	switch (flow->control.addr_type) {
1075	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1076		diff -= sizeof(flow->addrs.v4addrs);
1077		break;
1078	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1079		diff -= sizeof(flow->addrs.v6addrs);
1080		break;
1081	case FLOW_DISSECTOR_KEY_TIPC:
1082		diff -= sizeof(flow->addrs.tipckey);
1083		break;
1084	}
1085	return (sizeof(*flow) - diff) / sizeof(u32);
1086}
1087
1088__be32 flow_get_u32_src(const struct flow_keys *flow)
1089{
1090	switch (flow->control.addr_type) {
1091	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1092		return flow->addrs.v4addrs.src;
1093	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1094		return (__force __be32)ipv6_addr_hash(
1095			&flow->addrs.v6addrs.src);
1096	case FLOW_DISSECTOR_KEY_TIPC:
1097		return flow->addrs.tipckey.key;
1098	default:
1099		return 0;
1100	}
1101}
1102EXPORT_SYMBOL(flow_get_u32_src);
1103
1104__be32 flow_get_u32_dst(const struct flow_keys *flow)
1105{
1106	switch (flow->control.addr_type) {
1107	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1108		return flow->addrs.v4addrs.dst;
1109	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1110		return (__force __be32)ipv6_addr_hash(
1111			&flow->addrs.v6addrs.dst);
1112	default:
1113		return 0;
1114	}
1115}
1116EXPORT_SYMBOL(flow_get_u32_dst);
1117
1118static inline void __flow_hash_consistentify(struct flow_keys *keys)
1119{
1120	int addr_diff, i;
1121
1122	switch (keys->control.addr_type) {
1123	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1124		addr_diff = (__force u32)keys->addrs.v4addrs.dst -
1125			    (__force u32)keys->addrs.v4addrs.src;
1126		if ((addr_diff < 0) ||
1127		    (addr_diff == 0 &&
1128		     ((__force u16)keys->ports.dst <
1129		      (__force u16)keys->ports.src))) {
1130			swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
1131			swap(keys->ports.src, keys->ports.dst);
1132		}
1133		break;
1134	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1135		addr_diff = memcmp(&keys->addrs.v6addrs.dst,
1136				   &keys->addrs.v6addrs.src,
1137				   sizeof(keys->addrs.v6addrs.dst));
1138		if ((addr_diff < 0) ||
1139		    (addr_diff == 0 &&
1140		     ((__force u16)keys->ports.dst <
1141		      (__force u16)keys->ports.src))) {
1142			for (i = 0; i < 4; i++)
1143				swap(keys->addrs.v6addrs.src.s6_addr32[i],
1144				     keys->addrs.v6addrs.dst.s6_addr32[i]);
1145			swap(keys->ports.src, keys->ports.dst);
1146		}
1147		break;
1148	}
1149}
1150
1151static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
1152{
1153	u32 hash;
1154
1155	__flow_hash_consistentify(keys);
1156
1157	hash = __flow_hash_words(flow_keys_hash_start(keys),
1158				 flow_keys_hash_length(keys), keyval);
 
1159	if (!hash)
1160		hash = 1;
1161
1162	return hash;
1163}
 
1164
1165u32 flow_hash_from_keys(struct flow_keys *keys)
 
 
 
 
 
1166{
1167	__flow_hash_secret_init();
1168	return __flow_hash_from_keys(keys, hashrnd);
1169}
1170EXPORT_SYMBOL(flow_hash_from_keys);
1171
1172static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1173				  struct flow_keys *keys, u32 keyval)
1174{
1175	skb_flow_dissect_flow_keys(skb, keys,
1176				   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
1177
1178	return __flow_hash_from_keys(keys, keyval);
1179}
1180
1181struct _flow_keys_digest_data {
1182	__be16	n_proto;
1183	u8	ip_proto;
1184	u8	padding;
1185	__be32	ports;
1186	__be32	src;
1187	__be32	dst;
1188};
1189
1190void make_flow_keys_digest(struct flow_keys_digest *digest,
1191			   const struct flow_keys *flow)
1192{
1193	struct _flow_keys_digest_data *data =
1194	    (struct _flow_keys_digest_data *)digest;
1195
1196	BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1197
1198	memset(digest, 0, sizeof(*digest));
1199
1200	data->n_proto = flow->basic.n_proto;
1201	data->ip_proto = flow->basic.ip_proto;
1202	data->ports = flow->ports.ports;
1203	data->src = flow->addrs.v4addrs.src;
1204	data->dst = flow->addrs.v4addrs.dst;
1205}
1206EXPORT_SYMBOL(make_flow_keys_digest);
1207
1208static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1209
1210u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1211{
1212	struct flow_keys keys;
1213
1214	__flow_hash_secret_init();
1215
1216	memset(&keys, 0, sizeof(keys));
1217	__skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
1218			   NULL, 0, 0, 0,
1219			   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1220
1221	return __flow_hash_from_keys(&keys, hashrnd);
1222}
1223EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1224
1225/**
1226 * __skb_get_hash: calculate a flow hash
1227 * @skb: sk_buff to calculate flow hash from
1228 *
1229 * This function calculates a flow hash based on src/dst addresses
1230 * and src/dst port numbers.  Sets hash in skb to non-zero hash value
1231 * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
1232 * if hash is a canonical 4-tuple hash over transport ports.
1233 */
1234void __skb_get_hash(struct sk_buff *skb)
1235{
1236	struct flow_keys keys;
1237	u32 hash;
1238
1239	__flow_hash_secret_init();
 
1240
1241	hash = ___skb_get_hash(skb, &keys, hashrnd);
 
 
 
 
1242
1243	__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1244}
1245EXPORT_SYMBOL(__skb_get_hash);
1246
1247__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
1248{
1249	struct flow_keys keys;
1250
1251	return ___skb_get_hash(skb, &keys, perturb);
1252}
1253EXPORT_SYMBOL(skb_get_hash_perturb);
1254
1255u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1256		   const struct flow_keys *keys, int hlen)
1257{
1258	u32 poff = keys->control.thoff;
1259
1260	/* skip L4 headers for fragments after the first */
1261	if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1262	    !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1263		return poff;
1264
1265	switch (keys->basic.ip_proto) {
1266	case IPPROTO_TCP: {
1267		/* access doff as u8 to avoid unaligned access */
1268		const u8 *doff;
1269		u8 _doff;
1270
1271		doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1272					    data, hlen, &_doff);
1273		if (!doff)
1274			return poff;
1275
1276		poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
1277		break;
1278	}
1279	case IPPROTO_UDP:
1280	case IPPROTO_UDPLITE:
1281		poff += sizeof(struct udphdr);
1282		break;
1283	/* For the rest, we do not really care about header
1284	 * extensions at this point for now.
1285	 */
1286	case IPPROTO_ICMP:
1287		poff += sizeof(struct icmphdr);
1288		break;
1289	case IPPROTO_ICMPV6:
1290		poff += sizeof(struct icmp6hdr);
1291		break;
1292	case IPPROTO_IGMP:
1293		poff += sizeof(struct igmphdr);
1294		break;
1295	case IPPROTO_DCCP:
1296		poff += sizeof(struct dccp_hdr);
1297		break;
1298	case IPPROTO_SCTP:
1299		poff += sizeof(struct sctphdr);
1300		break;
1301	}
1302
1303	return poff;
1304}
1305
1306/**
1307 * skb_get_poff - get the offset to the payload
1308 * @skb: sk_buff to get the payload offset from
1309 *
1310 * The function will get the offset to the payload as far as it could
1311 * be dissected.  The main user is currently BPF, so that we can dynamically
1312 * truncate packets without needing to push actual payload to the user
1313 * space and can analyze headers only, instead.
1314 */
1315u32 skb_get_poff(const struct sk_buff *skb)
1316{
1317	struct flow_keys keys;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318
1319	if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
1320		return 0;
1321
1322	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1323}
1324
1325__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1326{
1327	memset(keys, 0, sizeof(*keys));
 
 
 
 
 
 
 
1328
1329	memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1330	    sizeof(keys->addrs.v6addrs.src));
1331	memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1332	    sizeof(keys->addrs.v6addrs.dst));
1333	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1334	keys->ports.src = fl6->fl6_sport;
1335	keys->ports.dst = fl6->fl6_dport;
1336	keys->keyid.keyid = fl6->fl6_gre_key;
1337	keys->tags.flow_label = (__force u32)fl6->flowlabel;
1338	keys->basic.ip_proto = fl6->flowi6_proto;
1339
1340	return flow_hash_from_keys(keys);
1341}
1342EXPORT_SYMBOL(__get_hash_from_flowi6);
1343
1344static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1345	{
1346		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
1347		.offset = offsetof(struct flow_keys, control),
1348	},
1349	{
1350		.key_id = FLOW_DISSECTOR_KEY_BASIC,
1351		.offset = offsetof(struct flow_keys, basic),
1352	},
1353	{
1354		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1355		.offset = offsetof(struct flow_keys, addrs.v4addrs),
1356	},
1357	{
1358		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1359		.offset = offsetof(struct flow_keys, addrs.v6addrs),
1360	},
1361	{
1362		.key_id = FLOW_DISSECTOR_KEY_TIPC,
1363		.offset = offsetof(struct flow_keys, addrs.tipckey),
1364	},
1365	{
1366		.key_id = FLOW_DISSECTOR_KEY_PORTS,
1367		.offset = offsetof(struct flow_keys, ports),
1368	},
1369	{
1370		.key_id = FLOW_DISSECTOR_KEY_VLAN,
1371		.offset = offsetof(struct flow_keys, vlan),
1372	},
1373	{
1374		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1375		.offset = offsetof(struct flow_keys, tags),
1376	},
1377	{
1378		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1379		.offset = offsetof(struct flow_keys, keyid),
1380	},
1381};
1382
1383static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1384	{
1385		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
1386		.offset = offsetof(struct flow_keys, control),
1387	},
1388	{
1389		.key_id = FLOW_DISSECTOR_KEY_BASIC,
1390		.offset = offsetof(struct flow_keys, basic),
1391	},
1392	{
1393		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1394		.offset = offsetof(struct flow_keys, addrs.v4addrs),
1395	},
1396	{
1397		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1398		.offset = offsetof(struct flow_keys, addrs.v6addrs),
1399	},
1400	{
1401		.key_id = FLOW_DISSECTOR_KEY_PORTS,
1402		.offset = offsetof(struct flow_keys, ports),
1403	},
1404};
1405
1406static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
1407	{
1408		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
1409		.offset = offsetof(struct flow_keys, control),
1410	},
1411	{
1412		.key_id = FLOW_DISSECTOR_KEY_BASIC,
1413		.offset = offsetof(struct flow_keys, basic),
1414	},
1415};
1416
1417struct flow_dissector flow_keys_dissector __read_mostly;
1418EXPORT_SYMBOL(flow_keys_dissector);
 
 
 
 
 
1419
1420struct flow_dissector flow_keys_buf_dissector __read_mostly;
 
 
1421
1422static int __init init_default_flow_dissectors(void)
1423{
1424	skb_flow_dissector_init(&flow_keys_dissector,
1425				flow_keys_dissector_keys,
1426				ARRAY_SIZE(flow_keys_dissector_keys));
1427	skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1428				flow_keys_dissector_symmetric_keys,
1429				ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
1430	skb_flow_dissector_init(&flow_keys_buf_dissector,
1431				flow_keys_buf_dissector_keys,
1432				ARRAY_SIZE(flow_keys_buf_dissector_keys));
1433	return 0;
1434}
1435
1436core_initcall(init_default_flow_dissectors);