Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2007-2017 Nicira, Inc.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/skbuff.h>
   9#include <linux/in.h>
  10#include <linux/ip.h>
  11#include <linux/openvswitch.h>
  12#include <linux/sctp.h>
  13#include <linux/tcp.h>
  14#include <linux/udp.h>
  15#include <linux/in6.h>
  16#include <linux/if_arp.h>
  17#include <linux/if_vlan.h>
  18
  19#include <net/dst.h>
 
  20#include <net/ip.h>
  21#include <net/ipv6.h>
  22#include <net/ip6_fib.h>
  23#include <net/checksum.h>
  24#include <net/dsfield.h>
  25#include <net/mpls.h>
 
 
 
 
 
  26#include <net/sctp/checksum.h>
  27
  28#include "datapath.h"
 
  29#include "flow.h"
  30#include "conntrack.h"
  31#include "vport.h"
  32#include "flow_netlink.h"
  33#include "openvswitch_trace.h"
  34
  35struct deferred_action {
  36	struct sk_buff *skb;
  37	const struct nlattr *actions;
  38	int actions_len;
  39
  40	/* Store pkt_key clone when creating deferred action. */
  41	struct sw_flow_key pkt_key;
  42};
  43
  44#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
  45struct ovs_frag_data {
  46	unsigned long dst;
  47	struct vport *vport;
  48	struct ovs_skb_cb cb;
  49	__be16 inner_protocol;
  50	u16 network_offset;	/* valid only for MPLS */
  51	u16 vlan_tci;
  52	__be16 vlan_proto;
  53	unsigned int l2_len;
  54	u8 mac_proto;
  55	u8 l2_data[MAX_L2_LEN];
  56};
  57
  58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
  59
  60#define DEFERRED_ACTION_FIFO_SIZE 10
  61#define OVS_RECURSION_LIMIT 5
  62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
  63struct action_fifo {
  64	int head;
  65	int tail;
  66	/* Deferred action fifo queue storage. */
  67	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
  68};
  69
  70struct action_flow_keys {
  71	struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
  72};
  73
  74static struct action_fifo __percpu *action_fifos;
  75static struct action_flow_keys __percpu *flow_keys;
  76static DEFINE_PER_CPU(int, exec_actions_level);
  77
  78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
  79 * space. Return NULL if out of key spaces.
  80 */
  81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
  82{
  83	struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
  84	int level = this_cpu_read(exec_actions_level);
  85	struct sw_flow_key *key = NULL;
  86
  87	if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
  88		key = &keys->key[level - 1];
  89		*key = *key_;
  90	}
  91
  92	return key;
  93}
  94
  95static void action_fifo_init(struct action_fifo *fifo)
  96{
  97	fifo->head = 0;
  98	fifo->tail = 0;
  99}
 100
 101static bool action_fifo_is_empty(const struct action_fifo *fifo)
 102{
 103	return (fifo->head == fifo->tail);
 104}
 105
 106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
 107{
 108	if (action_fifo_is_empty(fifo))
 109		return NULL;
 110
 111	return &fifo->fifo[fifo->tail++];
 112}
 113
 114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
 115{
 116	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
 117		return NULL;
 118
 119	return &fifo->fifo[fifo->head++];
 120}
 121
 122/* Return true if fifo is not full */
 123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
 124				    const struct sw_flow_key *key,
 125				    const struct nlattr *actions,
 126				    const int actions_len)
 127{
 128	struct action_fifo *fifo;
 129	struct deferred_action *da;
 130
 131	fifo = this_cpu_ptr(action_fifos);
 132	da = action_fifo_put(fifo);
 133	if (da) {
 134		da->skb = skb;
 135		da->actions = actions;
 136		da->actions_len = actions_len;
 137		da->pkt_key = *key;
 138	}
 139
 140	return da;
 141}
 142
 143static void invalidate_flow_key(struct sw_flow_key *key)
 144{
 145	key->mac_proto |= SW_FLOW_KEY_INVALID;
 146}
 147
 148static bool is_flow_key_valid(const struct sw_flow_key *key)
 149{
 150	return !(key->mac_proto & SW_FLOW_KEY_INVALID);
 151}
 152
 153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
 154			 struct sw_flow_key *key,
 155			 u32 recirc_id,
 156			 const struct nlattr *actions, int len,
 157			 bool last, bool clone_flow_key);
 158
 159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 160			      struct sw_flow_key *key,
 161			      const struct nlattr *attr, int len);
 162
 163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 164		     __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
 165{
 166	int err;
 167
 168	err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
 169	if (err)
 170		return err;
 171
 172	if (!mac_len)
 173		key->mac_proto = MAC_PROTO_NONE;
 174
 175	invalidate_flow_key(key);
 176	return 0;
 177}
 178
 179static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 180		    const __be16 ethertype)
 181{
 182	int err;
 183
 184	err = skb_mpls_pop(skb, ethertype, skb->mac_len,
 185			   ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
 186	if (err)
 187		return err;
 188
 189	if (ethertype == htons(ETH_P_TEB))
 190		key->mac_proto = MAC_PROTO_ETHERNET;
 191
 192	invalidate_flow_key(key);
 193	return 0;
 194}
 195
 196static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
 197		    const __be32 *mpls_lse, const __be32 *mask)
 198{
 199	struct mpls_shim_hdr *stack;
 200	__be32 lse;
 201	int err;
 202
 203	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
 204		return -ENOMEM;
 205
 206	stack = mpls_hdr(skb);
 207	lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
 208	err = skb_mpls_update_lse(skb, lse);
 209	if (err)
 210		return err;
 211
 212	flow_key->mpls.lse[0] = lse;
 213	return 0;
 214}
 215
 216static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 217{
 218	int err;
 219
 220	err = skb_vlan_pop(skb);
 221	if (skb_vlan_tag_present(skb)) {
 222		invalidate_flow_key(key);
 223	} else {
 224		key->eth.vlan.tci = 0;
 225		key->eth.vlan.tpid = 0;
 226	}
 227	return err;
 228}
 229
 230static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
 231		     const struct ovs_action_push_vlan *vlan)
 232{
 
 
 233	if (skb_vlan_tag_present(skb)) {
 234		invalidate_flow_key(key);
 235	} else {
 236		key->eth.vlan.tci = vlan->vlan_tci;
 237		key->eth.vlan.tpid = vlan->vlan_tpid;
 238	}
 239	return skb_vlan_push(skb, vlan->vlan_tpid,
 240			     ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
 
 
 241}
 242
 243/* 'src' is already properly masked. */
 244static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
 245{
 246	u16 *dst = (u16 *)dst_;
 247	const u16 *src = (const u16 *)src_;
 248	const u16 *mask = (const u16 *)mask_;
 249
 250	OVS_SET_MASKED(dst[0], src[0], mask[0]);
 251	OVS_SET_MASKED(dst[1], src[1], mask[1]);
 252	OVS_SET_MASKED(dst[2], src[2], mask[2]);
 253}
 254
 255static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
 256			const struct ovs_key_ethernet *key,
 257			const struct ovs_key_ethernet *mask)
 258{
 259	int err;
 260
 261	err = skb_ensure_writable(skb, ETH_HLEN);
 262	if (unlikely(err))
 263		return err;
 264
 265	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 266
 267	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
 268			       mask->eth_src);
 269	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
 270			       mask->eth_dst);
 271
 272	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 273
 274	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
 275	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
 276	return 0;
 277}
 278
 279/* pop_eth does not support VLAN packets as this action is never called
 280 * for them.
 281 */
 282static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
 283{
 284	int err;
 285
 286	err = skb_eth_pop(skb);
 287	if (err)
 288		return err;
 289
 290	/* safe right before invalidate_flow_key */
 291	key->mac_proto = MAC_PROTO_NONE;
 292	invalidate_flow_key(key);
 293	return 0;
 294}
 295
 296static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
 297		    const struct ovs_action_push_eth *ethh)
 298{
 299	int err;
 300
 301	err = skb_eth_push(skb, ethh->addresses.eth_dst,
 302			   ethh->addresses.eth_src);
 303	if (err)
 304		return err;
 305
 306	/* safe right before invalidate_flow_key */
 307	key->mac_proto = MAC_PROTO_ETHERNET;
 308	invalidate_flow_key(key);
 309	return 0;
 310}
 311
 312static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
 313		    const struct nshhdr *nh)
 
 314{
 
 
 315	int err;
 316
 
 
 
 
 317	err = nsh_push(skb, nh);
 318	if (err)
 319		return err;
 320
 321	/* safe right before invalidate_flow_key */
 322	key->mac_proto = MAC_PROTO_NONE;
 323	invalidate_flow_key(key);
 324	return 0;
 325}
 326
 327static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
 328{
 329	int err;
 330
 331	err = nsh_pop(skb);
 332	if (err)
 333		return err;
 334
 335	/* safe right before invalidate_flow_key */
 336	if (skb->protocol == htons(ETH_P_TEB))
 337		key->mac_proto = MAC_PROTO_ETHERNET;
 338	else
 339		key->mac_proto = MAC_PROTO_NONE;
 340	invalidate_flow_key(key);
 341	return 0;
 342}
 343
 344static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
 345				  __be32 addr, __be32 new_addr)
 346{
 347	int transport_len = skb->len - skb_transport_offset(skb);
 348
 349	if (nh->frag_off & htons(IP_OFFSET))
 350		return;
 351
 352	if (nh->protocol == IPPROTO_TCP) {
 353		if (likely(transport_len >= sizeof(struct tcphdr)))
 354			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
 355						 addr, new_addr, true);
 356	} else if (nh->protocol == IPPROTO_UDP) {
 357		if (likely(transport_len >= sizeof(struct udphdr))) {
 358			struct udphdr *uh = udp_hdr(skb);
 359
 360			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 361				inet_proto_csum_replace4(&uh->check, skb,
 362							 addr, new_addr, true);
 363				if (!uh->check)
 364					uh->check = CSUM_MANGLED_0;
 365			}
 366		}
 367	}
 368}
 369
 370static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
 371			__be32 *addr, __be32 new_addr)
 372{
 373	update_ip_l4_checksum(skb, nh, *addr, new_addr);
 374	csum_replace4(&nh->check, *addr, new_addr);
 375	skb_clear_hash(skb);
 
 376	*addr = new_addr;
 377}
 378
 379static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
 380				 __be32 addr[4], const __be32 new_addr[4])
 381{
 382	int transport_len = skb->len - skb_transport_offset(skb);
 383
 384	if (l4_proto == NEXTHDR_TCP) {
 385		if (likely(transport_len >= sizeof(struct tcphdr)))
 386			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
 387						  addr, new_addr, true);
 388	} else if (l4_proto == NEXTHDR_UDP) {
 389		if (likely(transport_len >= sizeof(struct udphdr))) {
 390			struct udphdr *uh = udp_hdr(skb);
 391
 392			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 393				inet_proto_csum_replace16(&uh->check, skb,
 394							  addr, new_addr, true);
 395				if (!uh->check)
 396					uh->check = CSUM_MANGLED_0;
 397			}
 398		}
 399	} else if (l4_proto == NEXTHDR_ICMP) {
 400		if (likely(transport_len >= sizeof(struct icmp6hdr)))
 401			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
 402						  skb, addr, new_addr, true);
 403	}
 404}
 405
 406static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
 407			   const __be32 mask[4], __be32 masked[4])
 408{
 409	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
 410	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
 411	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
 412	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
 413}
 414
 415static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
 416			  __be32 addr[4], const __be32 new_addr[4],
 417			  bool recalculate_csum)
 418{
 419	if (recalculate_csum)
 420		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
 421
 422	skb_clear_hash(skb);
 
 423	memcpy(addr, new_addr, sizeof(__be32[4]));
 424}
 425
 426static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
 427{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428	/* Bits 21-24 are always unmasked, so this retains their values. */
 429	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
 430	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
 431	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 432}
 433
 434static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
 435		       u8 mask)
 436{
 437	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
 438
 439	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
 440	nh->ttl = new_ttl;
 441}
 442
 443static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
 444		    const struct ovs_key_ipv4 *key,
 445		    const struct ovs_key_ipv4 *mask)
 446{
 447	struct iphdr *nh;
 448	__be32 new_addr;
 449	int err;
 450
 451	err = skb_ensure_writable(skb, skb_network_offset(skb) +
 452				  sizeof(struct iphdr));
 453	if (unlikely(err))
 454		return err;
 455
 456	nh = ip_hdr(skb);
 457
 458	/* Setting an IP addresses is typically only a side effect of
 459	 * matching on them in the current userspace implementation, so it
 460	 * makes sense to check if the value actually changed.
 461	 */
 462	if (mask->ipv4_src) {
 463		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 464
 465		if (unlikely(new_addr != nh->saddr)) {
 466			set_ip_addr(skb, nh, &nh->saddr, new_addr);
 467			flow_key->ipv4.addr.src = new_addr;
 468		}
 469	}
 470	if (mask->ipv4_dst) {
 471		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 472
 473		if (unlikely(new_addr != nh->daddr)) {
 474			set_ip_addr(skb, nh, &nh->daddr, new_addr);
 475			flow_key->ipv4.addr.dst = new_addr;
 476		}
 477	}
 478	if (mask->ipv4_tos) {
 479		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
 480		flow_key->ip.tos = nh->tos;
 481	}
 482	if (mask->ipv4_ttl) {
 483		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
 484		flow_key->ip.ttl = nh->ttl;
 485	}
 486
 487	return 0;
 488}
 489
 490static bool is_ipv6_mask_nonzero(const __be32 addr[4])
 491{
 492	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
 493}
 494
 495static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
 496		    const struct ovs_key_ipv6 *key,
 497		    const struct ovs_key_ipv6 *mask)
 498{
 499	struct ipv6hdr *nh;
 500	int err;
 501
 502	err = skb_ensure_writable(skb, skb_network_offset(skb) +
 503				  sizeof(struct ipv6hdr));
 504	if (unlikely(err))
 505		return err;
 506
 507	nh = ipv6_hdr(skb);
 508
 509	/* Setting an IP addresses is typically only a side effect of
 510	 * matching on them in the current userspace implementation, so it
 511	 * makes sense to check if the value actually changed.
 512	 */
 513	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
 514		__be32 *saddr = (__be32 *)&nh->saddr;
 515		__be32 masked[4];
 516
 517		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 518
 519		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
 520			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
 521				      true);
 522			memcpy(&flow_key->ipv6.addr.src, masked,
 523			       sizeof(flow_key->ipv6.addr.src));
 524		}
 525	}
 526	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
 527		unsigned int offset = 0;
 528		int flags = IP6_FH_F_SKIP_RH;
 529		bool recalc_csum = true;
 530		__be32 *daddr = (__be32 *)&nh->daddr;
 531		__be32 masked[4];
 532
 533		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
 534
 535		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
 536			if (ipv6_ext_hdr(nh->nexthdr))
 537				recalc_csum = (ipv6_find_hdr(skb, &offset,
 538							     NEXTHDR_ROUTING,
 539							     NULL, &flags)
 540					       != NEXTHDR_ROUTING);
 541
 542			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
 543				      recalc_csum);
 544			memcpy(&flow_key->ipv6.addr.dst, masked,
 545			       sizeof(flow_key->ipv6.addr.dst));
 546		}
 547	}
 548	if (mask->ipv6_tclass) {
 549		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
 550		flow_key->ip.tos = ipv6_get_dsfield(nh);
 551	}
 552	if (mask->ipv6_label) {
 553		set_ipv6_fl(nh, ntohl(key->ipv6_label),
 554			    ntohl(mask->ipv6_label));
 555		flow_key->ipv6.label =
 556		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
 557	}
 558	if (mask->ipv6_hlimit) {
 559		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
 560			       mask->ipv6_hlimit);
 561		flow_key->ip.ttl = nh->hop_limit;
 562	}
 563	return 0;
 564}
 565
 566static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
 567		   const struct nlattr *a)
 568{
 569	struct nshhdr *nh;
 570	size_t length;
 571	int err;
 572	u8 flags;
 573	u8 ttl;
 574	int i;
 575
 576	struct ovs_key_nsh key;
 577	struct ovs_key_nsh mask;
 578
 579	err = nsh_key_from_nlattr(a, &key, &mask);
 580	if (err)
 581		return err;
 582
 583	/* Make sure the NSH base header is there */
 584	if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
 585		return -ENOMEM;
 586
 587	nh = nsh_hdr(skb);
 588	length = nsh_hdr_len(nh);
 589
 590	/* Make sure the whole NSH header is there */
 591	err = skb_ensure_writable(skb, skb_network_offset(skb) +
 592				       length);
 593	if (unlikely(err))
 594		return err;
 595
 596	nh = nsh_hdr(skb);
 597	skb_postpull_rcsum(skb, nh, length);
 598	flags = nsh_get_flags(nh);
 599	flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
 600	flow_key->nsh.base.flags = flags;
 601	ttl = nsh_get_ttl(nh);
 602	ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
 603	flow_key->nsh.base.ttl = ttl;
 604	nsh_set_flags_and_ttl(nh, flags, ttl);
 605	nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
 606				  mask.base.path_hdr);
 607	flow_key->nsh.base.path_hdr = nh->path_hdr;
 608	switch (nh->mdtype) {
 609	case NSH_M_TYPE1:
 610		for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
 611			nh->md1.context[i] =
 612			    OVS_MASKED(nh->md1.context[i], key.context[i],
 613				       mask.context[i]);
 614		}
 615		memcpy(flow_key->nsh.context, nh->md1.context,
 616		       sizeof(nh->md1.context));
 617		break;
 618	case NSH_M_TYPE2:
 619		memset(flow_key->nsh.context, 0,
 620		       sizeof(flow_key->nsh.context));
 621		break;
 622	default:
 623		return -EINVAL;
 624	}
 625	skb_postpush_rcsum(skb, nh, length);
 626	return 0;
 627}
 628
 629/* Must follow skb_ensure_writable() since that can move the skb data. */
 630static void set_tp_port(struct sk_buff *skb, __be16 *port,
 631			__be16 new_port, __sum16 *check)
 632{
 
 633	inet_proto_csum_replace2(check, skb, *port, new_port, false);
 634	*port = new_port;
 635}
 636
 637static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 638		   const struct ovs_key_udp *key,
 639		   const struct ovs_key_udp *mask)
 640{
 641	struct udphdr *uh;
 642	__be16 src, dst;
 643	int err;
 644
 645	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 646				  sizeof(struct udphdr));
 647	if (unlikely(err))
 648		return err;
 649
 650	uh = udp_hdr(skb);
 651	/* Either of the masks is non-zero, so do not bother checking them. */
 652	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
 653	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 654
 655	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
 656		if (likely(src != uh->source)) {
 657			set_tp_port(skb, &uh->source, src, &uh->check);
 658			flow_key->tp.src = src;
 659		}
 660		if (likely(dst != uh->dest)) {
 661			set_tp_port(skb, &uh->dest, dst, &uh->check);
 662			flow_key->tp.dst = dst;
 663		}
 664
 665		if (unlikely(!uh->check))
 666			uh->check = CSUM_MANGLED_0;
 667	} else {
 668		uh->source = src;
 669		uh->dest = dst;
 670		flow_key->tp.src = src;
 671		flow_key->tp.dst = dst;
 
 672	}
 673
 674	skb_clear_hash(skb);
 675
 676	return 0;
 677}
 678
 679static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 680		   const struct ovs_key_tcp *key,
 681		   const struct ovs_key_tcp *mask)
 682{
 683	struct tcphdr *th;
 684	__be16 src, dst;
 685	int err;
 686
 687	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 688				  sizeof(struct tcphdr));
 689	if (unlikely(err))
 690		return err;
 691
 692	th = tcp_hdr(skb);
 693	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
 694	if (likely(src != th->source)) {
 695		set_tp_port(skb, &th->source, src, &th->check);
 696		flow_key->tp.src = src;
 697	}
 698	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
 699	if (likely(dst != th->dest)) {
 700		set_tp_port(skb, &th->dest, dst, &th->check);
 701		flow_key->tp.dst = dst;
 702	}
 703	skb_clear_hash(skb);
 704
 705	return 0;
 706}
 707
 708static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 709		    const struct ovs_key_sctp *key,
 710		    const struct ovs_key_sctp *mask)
 711{
 712	unsigned int sctphoff = skb_transport_offset(skb);
 713	struct sctphdr *sh;
 714	__le32 old_correct_csum, new_csum, old_csum;
 715	int err;
 716
 717	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
 718	if (unlikely(err))
 719		return err;
 720
 721	sh = sctp_hdr(skb);
 722	old_csum = sh->checksum;
 723	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 724
 725	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
 726	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 727
 728	new_csum = sctp_compute_cksum(skb, sctphoff);
 729
 730	/* Carry any checksum errors through. */
 731	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
 732
 733	skb_clear_hash(skb);
 
 
 734	flow_key->tp.src = sh->source;
 735	flow_key->tp.dst = sh->dest;
 736
 737	return 0;
 738}
 739
 740static int ovs_vport_output(struct net *net, struct sock *sk,
 741			    struct sk_buff *skb)
 742{
 743	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
 744	struct vport *vport = data->vport;
 745
 746	if (skb_cow_head(skb, data->l2_len) < 0) {
 747		kfree_skb(skb);
 748		return -ENOMEM;
 749	}
 750
 751	__skb_dst_copy(skb, data->dst);
 752	*OVS_CB(skb) = data->cb;
 753	skb->inner_protocol = data->inner_protocol;
 754	if (data->vlan_tci & VLAN_CFI_MASK)
 755		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
 756	else
 757		__vlan_hwaccel_clear_tag(skb);
 758
 759	/* Reconstruct the MAC header.  */
 760	skb_push(skb, data->l2_len);
 761	memcpy(skb->data, &data->l2_data, data->l2_len);
 762	skb_postpush_rcsum(skb, skb->data, data->l2_len);
 763	skb_reset_mac_header(skb);
 764
 765	if (eth_p_mpls(skb->protocol)) {
 766		skb->inner_network_header = skb->network_header;
 767		skb_set_network_header(skb, data->network_offset);
 768		skb_reset_mac_len(skb);
 769	}
 770
 771	ovs_vport_send(vport, skb, data->mac_proto);
 772	return 0;
 773}
 774
 775static unsigned int
 776ovs_dst_get_mtu(const struct dst_entry *dst)
 777{
 778	return dst->dev->mtu;
 779}
 780
 781static struct dst_ops ovs_dst_ops = {
 782	.family = AF_UNSPEC,
 783	.mtu = ovs_dst_get_mtu,
 784};
 785
 786/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 787 * ovs_vport_output(), which is called once per fragmented packet.
 788 */
 789static void prepare_frag(struct vport *vport, struct sk_buff *skb,
 790			 u16 orig_network_offset, u8 mac_proto)
 791{
 792	unsigned int hlen = skb_network_offset(skb);
 793	struct ovs_frag_data *data;
 794
 795	data = this_cpu_ptr(&ovs_frag_data_storage);
 796	data->dst = skb->_skb_refdst;
 797	data->vport = vport;
 798	data->cb = *OVS_CB(skb);
 799	data->inner_protocol = skb->inner_protocol;
 800	data->network_offset = orig_network_offset;
 801	if (skb_vlan_tag_present(skb))
 802		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
 803	else
 804		data->vlan_tci = 0;
 805	data->vlan_proto = skb->vlan_proto;
 806	data->mac_proto = mac_proto;
 807	data->l2_len = hlen;
 808	memcpy(&data->l2_data, skb->data, hlen);
 809
 810	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 811	skb_pull(skb, hlen);
 812}
 813
 814static void ovs_fragment(struct net *net, struct vport *vport,
 815			 struct sk_buff *skb, u16 mru,
 816			 struct sw_flow_key *key)
 817{
 
 818	u16 orig_network_offset = 0;
 819
 820	if (eth_p_mpls(skb->protocol)) {
 821		orig_network_offset = skb_network_offset(skb);
 822		skb->network_header = skb->inner_network_header;
 823	}
 824
 825	if (skb_network_offset(skb) > MAX_L2_LEN) {
 826		OVS_NLERR(1, "L2 header too long to fragment");
 
 827		goto err;
 828	}
 829
 830	if (key->eth.type == htons(ETH_P_IP)) {
 831		struct rtable ovs_rt = { 0 };
 832		unsigned long orig_dst;
 833
 834		prepare_frag(vport, skb, orig_network_offset,
 835			     ovs_key_mac_proto(key));
 836		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
 837			 DST_OBSOLETE_NONE, DST_NOCOUNT);
 838		ovs_rt.dst.dev = vport->dev;
 839
 840		orig_dst = skb->_skb_refdst;
 841		skb_dst_set_noref(skb, &ovs_rt.dst);
 842		IPCB(skb)->frag_max_size = mru;
 843
 844		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
 845		refdst_drop(orig_dst);
 846	} else if (key->eth.type == htons(ETH_P_IPV6)) {
 847		unsigned long orig_dst;
 848		struct rt6_info ovs_rt;
 849
 850		prepare_frag(vport, skb, orig_network_offset,
 851			     ovs_key_mac_proto(key));
 852		memset(&ovs_rt, 0, sizeof(ovs_rt));
 853		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
 854			 DST_OBSOLETE_NONE, DST_NOCOUNT);
 855		ovs_rt.dst.dev = vport->dev;
 856
 857		orig_dst = skb->_skb_refdst;
 858		skb_dst_set_noref(skb, &ovs_rt.dst);
 859		IP6CB(skb)->frag_max_size = mru;
 860
 861		ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
 862		refdst_drop(orig_dst);
 863	} else {
 864		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
 865			  ovs_vport_name(vport), ntohs(key->eth.type), mru,
 866			  vport->dev->mtu);
 
 867		goto err;
 868	}
 869
 870	return;
 871err:
 872	kfree_skb(skb);
 873}
 874
 875static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
 876		      struct sw_flow_key *key)
 877{
 878	struct vport *vport = ovs_vport_rcu(dp, out_port);
 879
 880	if (likely(vport)) {
 
 
 881		u16 mru = OVS_CB(skb)->mru;
 882		u32 cutlen = OVS_CB(skb)->cutlen;
 883
 884		if (unlikely(cutlen > 0)) {
 885			if (skb->len - cutlen > ovs_mac_header_len(key))
 886				pskb_trim(skb, skb->len - cutlen);
 887			else
 888				pskb_trim(skb, ovs_mac_header_len(key));
 889		}
 890
 
 
 
 
 
 
 891		if (likely(!mru ||
 892		           (skb->len <= mru + vport->dev->hard_header_len))) {
 893			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
 894		} else if (mru <= vport->dev->mtu) {
 895			struct net *net = read_pnet(&dp->net);
 896
 897			ovs_fragment(net, vport, skb, mru, key);
 898		} else {
 899			kfree_skb(skb);
 900		}
 901	} else {
 902		kfree_skb(skb);
 903	}
 904}
 905
 906static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 907			    struct sw_flow_key *key, const struct nlattr *attr,
 908			    const struct nlattr *actions, int actions_len,
 909			    uint32_t cutlen)
 910{
 911	struct dp_upcall_info upcall;
 912	const struct nlattr *a;
 913	int rem;
 914
 915	memset(&upcall, 0, sizeof(upcall));
 916	upcall.cmd = OVS_PACKET_CMD_ACTION;
 917	upcall.mru = OVS_CB(skb)->mru;
 918
 919	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 920	     a = nla_next(a, &rem)) {
 921		switch (nla_type(a)) {
 922		case OVS_USERSPACE_ATTR_USERDATA:
 923			upcall.userdata = a;
 924			break;
 925
 926		case OVS_USERSPACE_ATTR_PID:
 927			upcall.portid = nla_get_u32(a);
 
 
 
 
 
 
 928			break;
 929
 930		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
 931			/* Get out tunnel info. */
 932			struct vport *vport;
 933
 934			vport = ovs_vport_rcu(dp, nla_get_u32(a));
 935			if (vport) {
 936				int err;
 937
 938				err = dev_fill_metadata_dst(vport->dev, skb);
 939				if (!err)
 940					upcall.egress_tun_info = skb_tunnel_info(skb);
 941			}
 942
 943			break;
 944		}
 945
 946		case OVS_USERSPACE_ATTR_ACTIONS: {
 947			/* Include actions. */
 948			upcall.actions = actions;
 949			upcall.actions_len = actions_len;
 950			break;
 951		}
 952
 953		} /* End of switch. */
 954	}
 955
 956	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
 957}
 958
 959static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
 960				     struct sw_flow_key *key,
 961				     const struct nlattr *attr)
 962{
 963	/* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
 964	struct nlattr *actions = nla_data(attr);
 965
 966	if (nla_len(actions))
 967		return clone_execute(dp, skb, key, 0, nla_data(actions),
 968				     nla_len(actions), true, false);
 969
 970	consume_skb(skb);
 971	return 0;
 972}
 973
 974/* When 'last' is true, sample() should always consume the 'skb'.
 975 * Otherwise, sample() should keep 'skb' intact regardless what
 976 * actions are executed within sample().
 977 */
 978static int sample(struct datapath *dp, struct sk_buff *skb,
 979		  struct sw_flow_key *key, const struct nlattr *attr,
 980		  bool last)
 981{
 982	struct nlattr *actions;
 983	struct nlattr *sample_arg;
 984	int rem = nla_len(attr);
 985	const struct sample_arg *arg;
 
 986	bool clone_flow_key;
 
 987
 988	/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
 989	sample_arg = nla_data(attr);
 990	arg = nla_data(sample_arg);
 991	actions = nla_next(sample_arg, &rem);
 
 992
 993	if ((arg->probability != U32_MAX) &&
 994	    (!arg->probability || prandom_u32() > arg->probability)) {
 995		if (last)
 996			consume_skb(skb);
 997		return 0;
 998	}
 999
 
 
1000	clone_flow_key = !arg->exec;
1001	return clone_execute(dp, skb, key, 0, actions, rem, last,
1002			     clone_flow_key);
 
 
 
 
 
1003}
1004
1005/* When 'last' is true, clone() should always consume the 'skb'.
1006 * Otherwise, clone() should keep 'skb' intact regardless what
1007 * actions are executed within clone().
1008 */
1009static int clone(struct datapath *dp, struct sk_buff *skb,
1010		 struct sw_flow_key *key, const struct nlattr *attr,
1011		 bool last)
1012{
1013	struct nlattr *actions;
1014	struct nlattr *clone_arg;
1015	int rem = nla_len(attr);
1016	bool dont_clone_flow_key;
1017
1018	/* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1019	clone_arg = nla_data(attr);
1020	dont_clone_flow_key = nla_get_u32(clone_arg);
1021	actions = nla_next(clone_arg, &rem);
1022
1023	return clone_execute(dp, skb, key, 0, actions, rem, last,
1024			     !dont_clone_flow_key);
1025}
1026
1027static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1028			 const struct nlattr *attr)
1029{
1030	struct ovs_action_hash *hash_act = nla_data(attr);
1031	u32 hash = 0;
1032
1033	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
1034	hash = skb_get_hash(skb);
 
 
 
 
 
 
 
 
1035	hash = jhash_1word(hash, hash_act->hash_basis);
1036	if (!hash)
1037		hash = 0x1;
1038
1039	key->ovs_flow_hash = hash;
1040}
1041
1042static int execute_set_action(struct sk_buff *skb,
1043			      struct sw_flow_key *flow_key,
1044			      const struct nlattr *a)
1045{
1046	/* Only tunnel set execution is supported without a mask. */
1047	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1048		struct ovs_tunnel_info *tun = nla_data(a);
1049
1050		skb_dst_drop(skb);
1051		dst_hold((struct dst_entry *)tun->tun_dst);
1052		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1053		return 0;
1054	}
1055
1056	return -EINVAL;
1057}
1058
1059/* Mask is at the midpoint of the data. */
1060#define get_mask(a, type) ((const type)nla_data(a) + 1)
1061
1062static int execute_masked_set_action(struct sk_buff *skb,
1063				     struct sw_flow_key *flow_key,
1064				     const struct nlattr *a)
1065{
1066	int err = 0;
1067
1068	switch (nla_type(a)) {
1069	case OVS_KEY_ATTR_PRIORITY:
1070		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1071			       *get_mask(a, u32 *));
1072		flow_key->phy.priority = skb->priority;
1073		break;
1074
1075	case OVS_KEY_ATTR_SKB_MARK:
1076		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1077		flow_key->phy.skb_mark = skb->mark;
1078		break;
1079
1080	case OVS_KEY_ATTR_TUNNEL_INFO:
1081		/* Masked data not supported for tunnel. */
1082		err = -EINVAL;
1083		break;
1084
1085	case OVS_KEY_ATTR_ETHERNET:
1086		err = set_eth_addr(skb, flow_key, nla_data(a),
1087				   get_mask(a, struct ovs_key_ethernet *));
1088		break;
1089
1090	case OVS_KEY_ATTR_NSH:
1091		err = set_nsh(skb, flow_key, a);
1092		break;
1093
1094	case OVS_KEY_ATTR_IPV4:
1095		err = set_ipv4(skb, flow_key, nla_data(a),
1096			       get_mask(a, struct ovs_key_ipv4 *));
1097		break;
1098
1099	case OVS_KEY_ATTR_IPV6:
1100		err = set_ipv6(skb, flow_key, nla_data(a),
1101			       get_mask(a, struct ovs_key_ipv6 *));
1102		break;
1103
1104	case OVS_KEY_ATTR_TCP:
1105		err = set_tcp(skb, flow_key, nla_data(a),
1106			      get_mask(a, struct ovs_key_tcp *));
1107		break;
1108
1109	case OVS_KEY_ATTR_UDP:
1110		err = set_udp(skb, flow_key, nla_data(a),
1111			      get_mask(a, struct ovs_key_udp *));
1112		break;
1113
1114	case OVS_KEY_ATTR_SCTP:
1115		err = set_sctp(skb, flow_key, nla_data(a),
1116			       get_mask(a, struct ovs_key_sctp *));
1117		break;
1118
1119	case OVS_KEY_ATTR_MPLS:
1120		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1121								    __be32 *));
1122		break;
1123
1124	case OVS_KEY_ATTR_CT_STATE:
1125	case OVS_KEY_ATTR_CT_ZONE:
1126	case OVS_KEY_ATTR_CT_MARK:
1127	case OVS_KEY_ATTR_CT_LABELS:
1128	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1129	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1130		err = -EINVAL;
1131		break;
1132	}
1133
1134	return err;
1135}
1136
1137static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1138			  struct sw_flow_key *key,
1139			  const struct nlattr *a, bool last)
1140{
1141	u32 recirc_id;
1142
1143	if (!is_flow_key_valid(key)) {
1144		int err;
1145
1146		err = ovs_flow_key_update(skb, key);
1147		if (err)
1148			return err;
1149	}
1150	BUG_ON(!is_flow_key_valid(key));
1151
1152	recirc_id = nla_get_u32(a);
1153	return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1154}
1155
1156static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1157				 struct sw_flow_key *key,
1158				 const struct nlattr *attr, bool last)
1159{
1160	struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1161	const struct nlattr *actions, *cpl_arg;
1162	int len, max_len, rem = nla_len(attr);
1163	const struct check_pkt_len_arg *arg;
1164	bool clone_flow_key;
1165
1166	/* The first netlink attribute in 'attr' is always
1167	 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1168	 */
1169	cpl_arg = nla_data(attr);
1170	arg = nla_data(cpl_arg);
1171
1172	len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1173	max_len = arg->pkt_len;
1174
1175	if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1176	    len <= max_len) {
1177		/* Second netlink attribute in 'attr' is always
1178		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1179		 */
1180		actions = nla_next(cpl_arg, &rem);
1181		clone_flow_key = !arg->exec_for_lesser_equal;
1182	} else {
1183		/* Third netlink attribute in 'attr' is always
1184		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1185		 */
1186		actions = nla_next(cpl_arg, &rem);
1187		actions = nla_next(actions, &rem);
1188		clone_flow_key = !arg->exec_for_greater;
1189	}
1190
1191	return clone_execute(dp, skb, key, 0, nla_data(actions),
1192			     nla_len(actions), last, clone_flow_key);
1193}
1194
1195static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1196{
1197	int err;
1198
1199	if (skb->protocol == htons(ETH_P_IPV6)) {
1200		struct ipv6hdr *nh;
1201
1202		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1203					  sizeof(*nh));
1204		if (unlikely(err))
1205			return err;
1206
1207		nh = ipv6_hdr(skb);
1208
1209		if (nh->hop_limit <= 1)
1210			return -EHOSTUNREACH;
1211
1212		key->ip.ttl = --nh->hop_limit;
1213	} else if (skb->protocol == htons(ETH_P_IP)) {
1214		struct iphdr *nh;
1215		u8 old_ttl;
1216
1217		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1218					  sizeof(*nh));
1219		if (unlikely(err))
1220			return err;
1221
1222		nh = ip_hdr(skb);
1223		if (nh->ttl <= 1)
1224			return -EHOSTUNREACH;
1225
1226		old_ttl = nh->ttl--;
1227		csum_replace2(&nh->check, htons(old_ttl << 8),
1228			      htons(nh->ttl << 8));
1229		key->ip.ttl = nh->ttl;
1230	}
1231	return 0;
1232}
1233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234/* Execute a list of actions against 'skb'. */
1235static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1236			      struct sw_flow_key *key,
1237			      const struct nlattr *attr, int len)
1238{
1239	const struct nlattr *a;
1240	int rem;
1241
1242	for (a = attr, rem = len; rem > 0;
1243	     a = nla_next(a, &rem)) {
1244		int err = 0;
1245
1246		if (trace_ovs_do_execute_action_enabled())
1247			trace_ovs_do_execute_action(dp, skb, key, a, rem);
1248
 
 
 
1249		switch (nla_type(a)) {
1250		case OVS_ACTION_ATTR_OUTPUT: {
1251			int port = nla_get_u32(a);
1252			struct sk_buff *clone;
1253
1254			/* Every output action needs a separate clone
1255			 * of 'skb', In case the output action is the
1256			 * last action, cloning can be avoided.
1257			 */
1258			if (nla_is_last(a, rem)) {
1259				do_output(dp, skb, port, key);
1260				/* 'skb' has been used for output.
1261				 */
1262				return 0;
1263			}
1264
1265			clone = skb_clone(skb, GFP_ATOMIC);
1266			if (clone)
1267				do_output(dp, clone, port, key);
1268			OVS_CB(skb)->cutlen = 0;
1269			break;
1270		}
1271
1272		case OVS_ACTION_ATTR_TRUNC: {
1273			struct ovs_action_trunc *trunc = nla_data(a);
1274
1275			if (skb->len > trunc->max_len)
1276				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1277			break;
1278		}
1279
1280		case OVS_ACTION_ATTR_USERSPACE:
1281			output_userspace(dp, skb, key, a, attr,
1282						     len, OVS_CB(skb)->cutlen);
1283			OVS_CB(skb)->cutlen = 0;
 
 
 
 
1284			break;
1285
1286		case OVS_ACTION_ATTR_HASH:
1287			execute_hash(skb, key, a);
1288			break;
1289
1290		case OVS_ACTION_ATTR_PUSH_MPLS: {
1291			struct ovs_action_push_mpls *mpls = nla_data(a);
1292
1293			err = push_mpls(skb, key, mpls->mpls_lse,
1294					mpls->mpls_ethertype, skb->mac_len);
1295			break;
1296		}
1297		case OVS_ACTION_ATTR_ADD_MPLS: {
1298			struct ovs_action_add_mpls *mpls = nla_data(a);
1299			__u16 mac_len = 0;
1300
1301			if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1302				mac_len = skb->mac_len;
1303
1304			err = push_mpls(skb, key, mpls->mpls_lse,
1305					mpls->mpls_ethertype, mac_len);
1306			break;
1307		}
1308		case OVS_ACTION_ATTR_POP_MPLS:
1309			err = pop_mpls(skb, key, nla_get_be16(a));
1310			break;
1311
1312		case OVS_ACTION_ATTR_PUSH_VLAN:
1313			err = push_vlan(skb, key, nla_data(a));
1314			break;
1315
1316		case OVS_ACTION_ATTR_POP_VLAN:
1317			err = pop_vlan(skb, key);
1318			break;
1319
1320		case OVS_ACTION_ATTR_RECIRC: {
1321			bool last = nla_is_last(a, rem);
1322
1323			err = execute_recirc(dp, skb, key, a, last);
1324			if (last) {
1325				/* If this is the last action, the skb has
1326				 * been consumed or freed.
1327				 * Return immediately.
1328				 */
1329				return err;
1330			}
1331			break;
1332		}
1333
1334		case OVS_ACTION_ATTR_SET:
1335			err = execute_set_action(skb, key, nla_data(a));
1336			break;
1337
1338		case OVS_ACTION_ATTR_SET_MASKED:
1339		case OVS_ACTION_ATTR_SET_TO_MASKED:
1340			err = execute_masked_set_action(skb, key, nla_data(a));
1341			break;
1342
1343		case OVS_ACTION_ATTR_SAMPLE: {
1344			bool last = nla_is_last(a, rem);
1345
1346			err = sample(dp, skb, key, a, last);
1347			if (last)
1348				return err;
1349
1350			break;
1351		}
1352
1353		case OVS_ACTION_ATTR_CT:
1354			if (!is_flow_key_valid(key)) {
1355				err = ovs_flow_key_update(skb, key);
1356				if (err)
1357					return err;
1358			}
1359
1360			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1361					     nla_data(a));
1362
1363			/* Hide stolen IP fragments from user space. */
1364			if (err)
1365				return err == -EINPROGRESS ? 0 : err;
1366			break;
1367
1368		case OVS_ACTION_ATTR_CT_CLEAR:
1369			err = ovs_ct_clear(skb, key);
1370			break;
1371
1372		case OVS_ACTION_ATTR_PUSH_ETH:
1373			err = push_eth(skb, key, nla_data(a));
1374			break;
1375
1376		case OVS_ACTION_ATTR_POP_ETH:
1377			err = pop_eth(skb, key);
1378			break;
1379
1380		case OVS_ACTION_ATTR_PUSH_NSH: {
1381			u8 buffer[NSH_HDR_MAX_LEN];
1382			struct nshhdr *nh = (struct nshhdr *)buffer;
1383
1384			err = nsh_hdr_from_nlattr(nla_data(a), nh,
1385						  NSH_HDR_MAX_LEN);
1386			if (unlikely(err))
1387				break;
1388			err = push_nsh(skb, key, nh);
1389			break;
1390		}
1391
1392		case OVS_ACTION_ATTR_POP_NSH:
1393			err = pop_nsh(skb, key);
1394			break;
1395
1396		case OVS_ACTION_ATTR_METER:
1397			if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1398				consume_skb(skb);
1399				return 0;
1400			}
1401			break;
1402
1403		case OVS_ACTION_ATTR_CLONE: {
1404			bool last = nla_is_last(a, rem);
1405
1406			err = clone(dp, skb, key, a, last);
1407			if (last)
1408				return err;
1409
1410			break;
1411		}
1412
1413		case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1414			bool last = nla_is_last(a, rem);
1415
1416			err = execute_check_pkt_len(dp, skb, key, a, last);
1417			if (last)
1418				return err;
1419
1420			break;
1421		}
1422
1423		case OVS_ACTION_ATTR_DEC_TTL:
1424			err = execute_dec_ttl(skb, key);
1425			if (err == -EHOSTUNREACH)
1426				return dec_ttl_exception_handler(dp, skb,
1427								 key, a);
1428			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1429		}
1430
1431		if (unlikely(err)) {
1432			kfree_skb(skb);
1433			return err;
1434		}
1435	}
1436
1437	consume_skb(skb);
1438	return 0;
1439}
1440
1441/* Execute the actions on the clone of the packet. The effect of the
1442 * execution does not affect the original 'skb' nor the original 'key'.
1443 *
1444 * The execution may be deferred in case the actions can not be executed
1445 * immediately.
1446 */
1447static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1448			 struct sw_flow_key *key, u32 recirc_id,
1449			 const struct nlattr *actions, int len,
1450			 bool last, bool clone_flow_key)
1451{
1452	struct deferred_action *da;
1453	struct sw_flow_key *clone;
1454
1455	skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1456	if (!skb) {
1457		/* Out of memory, skip this action.
1458		 */
1459		return 0;
1460	}
1461
1462	/* When clone_flow_key is false, the 'key' will not be change
1463	 * by the actions, then the 'key' can be used directly.
1464	 * Otherwise, try to clone key from the next recursion level of
1465	 * 'flow_keys'. If clone is successful, execute the actions
1466	 * without deferring.
1467	 */
1468	clone = clone_flow_key ? clone_key(key) : key;
1469	if (clone) {
1470		int err = 0;
1471
1472		if (actions) { /* Sample action */
1473			if (clone_flow_key)
1474				__this_cpu_inc(exec_actions_level);
1475
1476			err = do_execute_actions(dp, skb, clone,
1477						 actions, len);
1478
1479			if (clone_flow_key)
1480				__this_cpu_dec(exec_actions_level);
1481		} else { /* Recirc action */
1482			clone->recirc_id = recirc_id;
1483			ovs_dp_process_packet(skb, clone);
1484		}
1485		return err;
1486	}
1487
1488	/* Out of 'flow_keys' space. Defer actions */
1489	da = add_deferred_actions(skb, key, actions, len);
1490	if (da) {
1491		if (!actions) { /* Recirc action */
1492			key = &da->pkt_key;
1493			key->recirc_id = recirc_id;
1494		}
1495	} else {
1496		/* Out of per CPU action FIFO space. Drop the 'skb' and
1497		 * log an error.
1498		 */
1499		kfree_skb(skb);
1500
1501		if (net_ratelimit()) {
1502			if (actions) { /* Sample action */
1503				pr_warn("%s: deferred action limit reached, drop sample action\n",
1504					ovs_dp_name(dp));
1505			} else {  /* Recirc action */
1506				pr_warn("%s: deferred action limit reached, drop recirc action\n",
1507					ovs_dp_name(dp));
1508			}
1509		}
1510	}
1511	return 0;
1512}
1513
1514static void process_deferred_actions(struct datapath *dp)
1515{
1516	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1517
1518	/* Do not touch the FIFO in case there is no deferred actions. */
1519	if (action_fifo_is_empty(fifo))
1520		return;
1521
1522	/* Finishing executing all deferred actions. */
1523	do {
1524		struct deferred_action *da = action_fifo_get(fifo);
1525		struct sk_buff *skb = da->skb;
1526		struct sw_flow_key *key = &da->pkt_key;
1527		const struct nlattr *actions = da->actions;
1528		int actions_len = da->actions_len;
1529
1530		if (actions)
1531			do_execute_actions(dp, skb, key, actions, actions_len);
1532		else
1533			ovs_dp_process_packet(skb, key);
1534	} while (!action_fifo_is_empty(fifo));
1535
1536	/* Reset FIFO for the next packet.  */
1537	action_fifo_init(fifo);
1538}
1539
1540/* Execute a list of actions against 'skb'. */
1541int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1542			const struct sw_flow_actions *acts,
1543			struct sw_flow_key *key)
1544{
1545	int err, level;
1546
1547	level = __this_cpu_inc_return(exec_actions_level);
1548	if (unlikely(level > OVS_RECURSION_LIMIT)) {
1549		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1550				     ovs_dp_name(dp));
1551		kfree_skb(skb);
1552		err = -ENETDOWN;
1553		goto out;
1554	}
1555
1556	OVS_CB(skb)->acts_origlen = acts->orig_len;
1557	err = do_execute_actions(dp, skb, key,
1558				 acts->actions, acts->actions_len);
1559
1560	if (level == 1)
1561		process_deferred_actions(dp);
1562
1563out:
1564	__this_cpu_dec(exec_actions_level);
1565	return err;
1566}
1567
1568int action_fifos_init(void)
1569{
1570	action_fifos = alloc_percpu(struct action_fifo);
1571	if (!action_fifos)
1572		return -ENOMEM;
1573
1574	flow_keys = alloc_percpu(struct action_flow_keys);
1575	if (!flow_keys) {
1576		free_percpu(action_fifos);
1577		return -ENOMEM;
1578	}
1579
1580	return 0;
1581}
1582
1583void action_fifos_exit(void)
1584{
1585	free_percpu(action_fifos);
1586	free_percpu(flow_keys);
1587}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2007-2017 Nicira, Inc.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/skbuff.h>
   9#include <linux/in.h>
  10#include <linux/ip.h>
  11#include <linux/openvswitch.h>
  12#include <linux/sctp.h>
  13#include <linux/tcp.h>
  14#include <linux/udp.h>
  15#include <linux/in6.h>
  16#include <linux/if_arp.h>
  17#include <linux/if_vlan.h>
  18
  19#include <net/dst.h>
  20#include <net/gso.h>
  21#include <net/ip.h>
  22#include <net/ipv6.h>
  23#include <net/ip6_fib.h>
  24#include <net/checksum.h>
  25#include <net/dsfield.h>
  26#include <net/mpls.h>
  27
  28#if IS_ENABLED(CONFIG_PSAMPLE)
  29#include <net/psample.h>
  30#endif
  31
  32#include <net/sctp/checksum.h>
  33
  34#include "datapath.h"
  35#include "drop.h"
  36#include "flow.h"
  37#include "conntrack.h"
  38#include "vport.h"
  39#include "flow_netlink.h"
  40#include "openvswitch_trace.h"
  41
  42struct deferred_action {
  43	struct sk_buff *skb;
  44	const struct nlattr *actions;
  45	int actions_len;
  46
  47	/* Store pkt_key clone when creating deferred action. */
  48	struct sw_flow_key pkt_key;
  49};
  50
  51#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
  52struct ovs_frag_data {
  53	unsigned long dst;
  54	struct vport *vport;
  55	struct ovs_skb_cb cb;
  56	__be16 inner_protocol;
  57	u16 network_offset;	/* valid only for MPLS */
  58	u16 vlan_tci;
  59	__be16 vlan_proto;
  60	unsigned int l2_len;
  61	u8 mac_proto;
  62	u8 l2_data[MAX_L2_LEN];
  63};
  64
  65static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
  66
  67#define DEFERRED_ACTION_FIFO_SIZE 10
  68#define OVS_RECURSION_LIMIT 5
  69#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
  70struct action_fifo {
  71	int head;
  72	int tail;
  73	/* Deferred action fifo queue storage. */
  74	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
  75};
  76
  77struct action_flow_keys {
  78	struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
  79};
  80
  81static struct action_fifo __percpu *action_fifos;
  82static struct action_flow_keys __percpu *flow_keys;
  83static DEFINE_PER_CPU(int, exec_actions_level);
  84
  85/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
  86 * space. Return NULL if out of key spaces.
  87 */
  88static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
  89{
  90	struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
  91	int level = this_cpu_read(exec_actions_level);
  92	struct sw_flow_key *key = NULL;
  93
  94	if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
  95		key = &keys->key[level - 1];
  96		*key = *key_;
  97	}
  98
  99	return key;
 100}
 101
 102static void action_fifo_init(struct action_fifo *fifo)
 103{
 104	fifo->head = 0;
 105	fifo->tail = 0;
 106}
 107
 108static bool action_fifo_is_empty(const struct action_fifo *fifo)
 109{
 110	return (fifo->head == fifo->tail);
 111}
 112
 113static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
 114{
 115	if (action_fifo_is_empty(fifo))
 116		return NULL;
 117
 118	return &fifo->fifo[fifo->tail++];
 119}
 120
 121static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
 122{
 123	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
 124		return NULL;
 125
 126	return &fifo->fifo[fifo->head++];
 127}
 128
 129/* Return true if fifo is not full */
 130static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
 131				    const struct sw_flow_key *key,
 132				    const struct nlattr *actions,
 133				    const int actions_len)
 134{
 135	struct action_fifo *fifo;
 136	struct deferred_action *da;
 137
 138	fifo = this_cpu_ptr(action_fifos);
 139	da = action_fifo_put(fifo);
 140	if (da) {
 141		da->skb = skb;
 142		da->actions = actions;
 143		da->actions_len = actions_len;
 144		da->pkt_key = *key;
 145	}
 146
 147	return da;
 148}
 149
 150static void invalidate_flow_key(struct sw_flow_key *key)
 151{
 152	key->mac_proto |= SW_FLOW_KEY_INVALID;
 153}
 154
 155static bool is_flow_key_valid(const struct sw_flow_key *key)
 156{
 157	return !(key->mac_proto & SW_FLOW_KEY_INVALID);
 158}
 159
 160static int clone_execute(struct datapath *dp, struct sk_buff *skb,
 161			 struct sw_flow_key *key,
 162			 u32 recirc_id,
 163			 const struct nlattr *actions, int len,
 164			 bool last, bool clone_flow_key);
 165
 166static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 167			      struct sw_flow_key *key,
 168			      const struct nlattr *attr, int len);
 169
 170static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 171		     __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
 172{
 173	int err;
 174
 175	err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
 176	if (err)
 177		return err;
 178
 179	if (!mac_len)
 180		key->mac_proto = MAC_PROTO_NONE;
 181
 182	invalidate_flow_key(key);
 183	return 0;
 184}
 185
 186static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 187		    const __be16 ethertype)
 188{
 189	int err;
 190
 191	err = skb_mpls_pop(skb, ethertype, skb->mac_len,
 192			   ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
 193	if (err)
 194		return err;
 195
 196	if (ethertype == htons(ETH_P_TEB))
 197		key->mac_proto = MAC_PROTO_ETHERNET;
 198
 199	invalidate_flow_key(key);
 200	return 0;
 201}
 202
 203static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
 204		    const __be32 *mpls_lse, const __be32 *mask)
 205{
 206	struct mpls_shim_hdr *stack;
 207	__be32 lse;
 208	int err;
 209
 210	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
 211		return -ENOMEM;
 212
 213	stack = mpls_hdr(skb);
 214	lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
 215	err = skb_mpls_update_lse(skb, lse);
 216	if (err)
 217		return err;
 218
 219	flow_key->mpls.lse[0] = lse;
 220	return 0;
 221}
 222
 223static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 224{
 225	int err;
 226
 227	err = skb_vlan_pop(skb);
 228	if (skb_vlan_tag_present(skb)) {
 229		invalidate_flow_key(key);
 230	} else {
 231		key->eth.vlan.tci = 0;
 232		key->eth.vlan.tpid = 0;
 233	}
 234	return err;
 235}
 236
 237static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
 238		     const struct ovs_action_push_vlan *vlan)
 239{
 240	int err;
 241
 242	if (skb_vlan_tag_present(skb)) {
 243		invalidate_flow_key(key);
 244	} else {
 245		key->eth.vlan.tci = vlan->vlan_tci;
 246		key->eth.vlan.tpid = vlan->vlan_tpid;
 247	}
 248	err = skb_vlan_push(skb, vlan->vlan_tpid,
 249			    ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
 250	skb_reset_mac_len(skb);
 251	return err;
 252}
 253
 254/* 'src' is already properly masked. */
 255static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
 256{
 257	u16 *dst = (u16 *)dst_;
 258	const u16 *src = (const u16 *)src_;
 259	const u16 *mask = (const u16 *)mask_;
 260
 261	OVS_SET_MASKED(dst[0], src[0], mask[0]);
 262	OVS_SET_MASKED(dst[1], src[1], mask[1]);
 263	OVS_SET_MASKED(dst[2], src[2], mask[2]);
 264}
 265
 266static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
 267			const struct ovs_key_ethernet *key,
 268			const struct ovs_key_ethernet *mask)
 269{
 270	int err;
 271
 272	err = skb_ensure_writable(skb, ETH_HLEN);
 273	if (unlikely(err))
 274		return err;
 275
 276	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 277
 278	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
 279			       mask->eth_src);
 280	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
 281			       mask->eth_dst);
 282
 283	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 284
 285	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
 286	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
 287	return 0;
 288}
 289
 290/* pop_eth does not support VLAN packets as this action is never called
 291 * for them.
 292 */
 293static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
 294{
 295	int err;
 296
 297	err = skb_eth_pop(skb);
 298	if (err)
 299		return err;
 300
 301	/* safe right before invalidate_flow_key */
 302	key->mac_proto = MAC_PROTO_NONE;
 303	invalidate_flow_key(key);
 304	return 0;
 305}
 306
 307static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
 308		    const struct ovs_action_push_eth *ethh)
 309{
 310	int err;
 311
 312	err = skb_eth_push(skb, ethh->addresses.eth_dst,
 313			   ethh->addresses.eth_src);
 314	if (err)
 315		return err;
 316
 317	/* safe right before invalidate_flow_key */
 318	key->mac_proto = MAC_PROTO_ETHERNET;
 319	invalidate_flow_key(key);
 320	return 0;
 321}
 322
 323static noinline_for_stack int push_nsh(struct sk_buff *skb,
 324				       struct sw_flow_key *key,
 325				       const struct nlattr *a)
 326{
 327	u8 buffer[NSH_HDR_MAX_LEN];
 328	struct nshhdr *nh = (struct nshhdr *)buffer;
 329	int err;
 330
 331	err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
 332	if (err)
 333		return err;
 334
 335	err = nsh_push(skb, nh);
 336	if (err)
 337		return err;
 338
 339	/* safe right before invalidate_flow_key */
 340	key->mac_proto = MAC_PROTO_NONE;
 341	invalidate_flow_key(key);
 342	return 0;
 343}
 344
 345static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
 346{
 347	int err;
 348
 349	err = nsh_pop(skb);
 350	if (err)
 351		return err;
 352
 353	/* safe right before invalidate_flow_key */
 354	if (skb->protocol == htons(ETH_P_TEB))
 355		key->mac_proto = MAC_PROTO_ETHERNET;
 356	else
 357		key->mac_proto = MAC_PROTO_NONE;
 358	invalidate_flow_key(key);
 359	return 0;
 360}
 361
 362static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
 363				  __be32 addr, __be32 new_addr)
 364{
 365	int transport_len = skb->len - skb_transport_offset(skb);
 366
 367	if (nh->frag_off & htons(IP_OFFSET))
 368		return;
 369
 370	if (nh->protocol == IPPROTO_TCP) {
 371		if (likely(transport_len >= sizeof(struct tcphdr)))
 372			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
 373						 addr, new_addr, true);
 374	} else if (nh->protocol == IPPROTO_UDP) {
 375		if (likely(transport_len >= sizeof(struct udphdr))) {
 376			struct udphdr *uh = udp_hdr(skb);
 377
 378			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 379				inet_proto_csum_replace4(&uh->check, skb,
 380							 addr, new_addr, true);
 381				if (!uh->check)
 382					uh->check = CSUM_MANGLED_0;
 383			}
 384		}
 385	}
 386}
 387
 388static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
 389			__be32 *addr, __be32 new_addr)
 390{
 391	update_ip_l4_checksum(skb, nh, *addr, new_addr);
 392	csum_replace4(&nh->check, *addr, new_addr);
 393	skb_clear_hash(skb);
 394	ovs_ct_clear(skb, NULL);
 395	*addr = new_addr;
 396}
 397
 398static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
 399				 __be32 addr[4], const __be32 new_addr[4])
 400{
 401	int transport_len = skb->len - skb_transport_offset(skb);
 402
 403	if (l4_proto == NEXTHDR_TCP) {
 404		if (likely(transport_len >= sizeof(struct tcphdr)))
 405			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
 406						  addr, new_addr, true);
 407	} else if (l4_proto == NEXTHDR_UDP) {
 408		if (likely(transport_len >= sizeof(struct udphdr))) {
 409			struct udphdr *uh = udp_hdr(skb);
 410
 411			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 412				inet_proto_csum_replace16(&uh->check, skb,
 413							  addr, new_addr, true);
 414				if (!uh->check)
 415					uh->check = CSUM_MANGLED_0;
 416			}
 417		}
 418	} else if (l4_proto == NEXTHDR_ICMP) {
 419		if (likely(transport_len >= sizeof(struct icmp6hdr)))
 420			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
 421						  skb, addr, new_addr, true);
 422	}
 423}
 424
 425static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
 426			   const __be32 mask[4], __be32 masked[4])
 427{
 428	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
 429	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
 430	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
 431	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
 432}
 433
 434static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
 435			  __be32 addr[4], const __be32 new_addr[4],
 436			  bool recalculate_csum)
 437{
 438	if (recalculate_csum)
 439		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
 440
 441	skb_clear_hash(skb);
 442	ovs_ct_clear(skb, NULL);
 443	memcpy(addr, new_addr, sizeof(__be32[4]));
 444}
 445
 446static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
 447{
 448	u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
 449
 450	ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
 451
 452	if (skb->ip_summed == CHECKSUM_COMPLETE)
 453		csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
 454			     (__force __wsum)(ipv6_tclass << 12));
 455
 456	ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
 457}
 458
 459static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
 460{
 461	u32 ofl;
 462
 463	ofl = nh->flow_lbl[0] << 16 |  nh->flow_lbl[1] << 8 |  nh->flow_lbl[2];
 464	fl = OVS_MASKED(ofl, fl, mask);
 465
 466	/* Bits 21-24 are always unmasked, so this retains their values. */
 467	nh->flow_lbl[0] = (u8)(fl >> 16);
 468	nh->flow_lbl[1] = (u8)(fl >> 8);
 469	nh->flow_lbl[2] = (u8)fl;
 470
 471	if (skb->ip_summed == CHECKSUM_COMPLETE)
 472		csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
 473}
 474
 475static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
 476{
 477	new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
 478
 479	if (skb->ip_summed == CHECKSUM_COMPLETE)
 480		csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
 481			     (__force __wsum)(new_ttl << 8));
 482	nh->hop_limit = new_ttl;
 483}
 484
 485static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
 486		       u8 mask)
 487{
 488	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
 489
 490	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
 491	nh->ttl = new_ttl;
 492}
 493
 494static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
 495		    const struct ovs_key_ipv4 *key,
 496		    const struct ovs_key_ipv4 *mask)
 497{
 498	struct iphdr *nh;
 499	__be32 new_addr;
 500	int err;
 501
 502	err = skb_ensure_writable(skb, skb_network_offset(skb) +
 503				  sizeof(struct iphdr));
 504	if (unlikely(err))
 505		return err;
 506
 507	nh = ip_hdr(skb);
 508
 509	/* Setting an IP addresses is typically only a side effect of
 510	 * matching on them in the current userspace implementation, so it
 511	 * makes sense to check if the value actually changed.
 512	 */
 513	if (mask->ipv4_src) {
 514		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 515
 516		if (unlikely(new_addr != nh->saddr)) {
 517			set_ip_addr(skb, nh, &nh->saddr, new_addr);
 518			flow_key->ipv4.addr.src = new_addr;
 519		}
 520	}
 521	if (mask->ipv4_dst) {
 522		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 523
 524		if (unlikely(new_addr != nh->daddr)) {
 525			set_ip_addr(skb, nh, &nh->daddr, new_addr);
 526			flow_key->ipv4.addr.dst = new_addr;
 527		}
 528	}
 529	if (mask->ipv4_tos) {
 530		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
 531		flow_key->ip.tos = nh->tos;
 532	}
 533	if (mask->ipv4_ttl) {
 534		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
 535		flow_key->ip.ttl = nh->ttl;
 536	}
 537
 538	return 0;
 539}
 540
 541static bool is_ipv6_mask_nonzero(const __be32 addr[4])
 542{
 543	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
 544}
 545
 546static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
 547		    const struct ovs_key_ipv6 *key,
 548		    const struct ovs_key_ipv6 *mask)
 549{
 550	struct ipv6hdr *nh;
 551	int err;
 552
 553	err = skb_ensure_writable(skb, skb_network_offset(skb) +
 554				  sizeof(struct ipv6hdr));
 555	if (unlikely(err))
 556		return err;
 557
 558	nh = ipv6_hdr(skb);
 559
 560	/* Setting an IP addresses is typically only a side effect of
 561	 * matching on them in the current userspace implementation, so it
 562	 * makes sense to check if the value actually changed.
 563	 */
 564	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
 565		__be32 *saddr = (__be32 *)&nh->saddr;
 566		__be32 masked[4];
 567
 568		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 569
 570		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
 571			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
 572				      true);
 573			memcpy(&flow_key->ipv6.addr.src, masked,
 574			       sizeof(flow_key->ipv6.addr.src));
 575		}
 576	}
 577	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
 578		unsigned int offset = 0;
 579		int flags = IP6_FH_F_SKIP_RH;
 580		bool recalc_csum = true;
 581		__be32 *daddr = (__be32 *)&nh->daddr;
 582		__be32 masked[4];
 583
 584		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
 585
 586		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
 587			if (ipv6_ext_hdr(nh->nexthdr))
 588				recalc_csum = (ipv6_find_hdr(skb, &offset,
 589							     NEXTHDR_ROUTING,
 590							     NULL, &flags)
 591					       != NEXTHDR_ROUTING);
 592
 593			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
 594				      recalc_csum);
 595			memcpy(&flow_key->ipv6.addr.dst, masked,
 596			       sizeof(flow_key->ipv6.addr.dst));
 597		}
 598	}
 599	if (mask->ipv6_tclass) {
 600		set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
 601		flow_key->ip.tos = ipv6_get_dsfield(nh);
 602	}
 603	if (mask->ipv6_label) {
 604		set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
 605			    ntohl(mask->ipv6_label));
 606		flow_key->ipv6.label =
 607		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
 608	}
 609	if (mask->ipv6_hlimit) {
 610		set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
 
 611		flow_key->ip.ttl = nh->hop_limit;
 612	}
 613	return 0;
 614}
 615
 616static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
 617		   const struct nlattr *a)
 618{
 619	struct nshhdr *nh;
 620	size_t length;
 621	int err;
 622	u8 flags;
 623	u8 ttl;
 624	int i;
 625
 626	struct ovs_key_nsh key;
 627	struct ovs_key_nsh mask;
 628
 629	err = nsh_key_from_nlattr(a, &key, &mask);
 630	if (err)
 631		return err;
 632
 633	/* Make sure the NSH base header is there */
 634	if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
 635		return -ENOMEM;
 636
 637	nh = nsh_hdr(skb);
 638	length = nsh_hdr_len(nh);
 639
 640	/* Make sure the whole NSH header is there */
 641	err = skb_ensure_writable(skb, skb_network_offset(skb) +
 642				       length);
 643	if (unlikely(err))
 644		return err;
 645
 646	nh = nsh_hdr(skb);
 647	skb_postpull_rcsum(skb, nh, length);
 648	flags = nsh_get_flags(nh);
 649	flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
 650	flow_key->nsh.base.flags = flags;
 651	ttl = nsh_get_ttl(nh);
 652	ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
 653	flow_key->nsh.base.ttl = ttl;
 654	nsh_set_flags_and_ttl(nh, flags, ttl);
 655	nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
 656				  mask.base.path_hdr);
 657	flow_key->nsh.base.path_hdr = nh->path_hdr;
 658	switch (nh->mdtype) {
 659	case NSH_M_TYPE1:
 660		for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
 661			nh->md1.context[i] =
 662			    OVS_MASKED(nh->md1.context[i], key.context[i],
 663				       mask.context[i]);
 664		}
 665		memcpy(flow_key->nsh.context, nh->md1.context,
 666		       sizeof(nh->md1.context));
 667		break;
 668	case NSH_M_TYPE2:
 669		memset(flow_key->nsh.context, 0,
 670		       sizeof(flow_key->nsh.context));
 671		break;
 672	default:
 673		return -EINVAL;
 674	}
 675	skb_postpush_rcsum(skb, nh, length);
 676	return 0;
 677}
 678
 679/* Must follow skb_ensure_writable() since that can move the skb data. */
 680static void set_tp_port(struct sk_buff *skb, __be16 *port,
 681			__be16 new_port, __sum16 *check)
 682{
 683	ovs_ct_clear(skb, NULL);
 684	inet_proto_csum_replace2(check, skb, *port, new_port, false);
 685	*port = new_port;
 686}
 687
 688static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 689		   const struct ovs_key_udp *key,
 690		   const struct ovs_key_udp *mask)
 691{
 692	struct udphdr *uh;
 693	__be16 src, dst;
 694	int err;
 695
 696	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 697				  sizeof(struct udphdr));
 698	if (unlikely(err))
 699		return err;
 700
 701	uh = udp_hdr(skb);
 702	/* Either of the masks is non-zero, so do not bother checking them. */
 703	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
 704	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 705
 706	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
 707		if (likely(src != uh->source)) {
 708			set_tp_port(skb, &uh->source, src, &uh->check);
 709			flow_key->tp.src = src;
 710		}
 711		if (likely(dst != uh->dest)) {
 712			set_tp_port(skb, &uh->dest, dst, &uh->check);
 713			flow_key->tp.dst = dst;
 714		}
 715
 716		if (unlikely(!uh->check))
 717			uh->check = CSUM_MANGLED_0;
 718	} else {
 719		uh->source = src;
 720		uh->dest = dst;
 721		flow_key->tp.src = src;
 722		flow_key->tp.dst = dst;
 723		ovs_ct_clear(skb, NULL);
 724	}
 725
 726	skb_clear_hash(skb);
 727
 728	return 0;
 729}
 730
 731static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 732		   const struct ovs_key_tcp *key,
 733		   const struct ovs_key_tcp *mask)
 734{
 735	struct tcphdr *th;
 736	__be16 src, dst;
 737	int err;
 738
 739	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 740				  sizeof(struct tcphdr));
 741	if (unlikely(err))
 742		return err;
 743
 744	th = tcp_hdr(skb);
 745	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
 746	if (likely(src != th->source)) {
 747		set_tp_port(skb, &th->source, src, &th->check);
 748		flow_key->tp.src = src;
 749	}
 750	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
 751	if (likely(dst != th->dest)) {
 752		set_tp_port(skb, &th->dest, dst, &th->check);
 753		flow_key->tp.dst = dst;
 754	}
 755	skb_clear_hash(skb);
 756
 757	return 0;
 758}
 759
 760static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 761		    const struct ovs_key_sctp *key,
 762		    const struct ovs_key_sctp *mask)
 763{
 764	unsigned int sctphoff = skb_transport_offset(skb);
 765	struct sctphdr *sh;
 766	__le32 old_correct_csum, new_csum, old_csum;
 767	int err;
 768
 769	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
 770	if (unlikely(err))
 771		return err;
 772
 773	sh = sctp_hdr(skb);
 774	old_csum = sh->checksum;
 775	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 776
 777	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
 778	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 779
 780	new_csum = sctp_compute_cksum(skb, sctphoff);
 781
 782	/* Carry any checksum errors through. */
 783	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
 784
 785	skb_clear_hash(skb);
 786	ovs_ct_clear(skb, NULL);
 787
 788	flow_key->tp.src = sh->source;
 789	flow_key->tp.dst = sh->dest;
 790
 791	return 0;
 792}
 793
 794static int ovs_vport_output(struct net *net, struct sock *sk,
 795			    struct sk_buff *skb)
 796{
 797	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
 798	struct vport *vport = data->vport;
 799
 800	if (skb_cow_head(skb, data->l2_len) < 0) {
 801		kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
 802		return -ENOMEM;
 803	}
 804
 805	__skb_dst_copy(skb, data->dst);
 806	*OVS_CB(skb) = data->cb;
 807	skb->inner_protocol = data->inner_protocol;
 808	if (data->vlan_tci & VLAN_CFI_MASK)
 809		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
 810	else
 811		__vlan_hwaccel_clear_tag(skb);
 812
 813	/* Reconstruct the MAC header.  */
 814	skb_push(skb, data->l2_len);
 815	memcpy(skb->data, &data->l2_data, data->l2_len);
 816	skb_postpush_rcsum(skb, skb->data, data->l2_len);
 817	skb_reset_mac_header(skb);
 818
 819	if (eth_p_mpls(skb->protocol)) {
 820		skb->inner_network_header = skb->network_header;
 821		skb_set_network_header(skb, data->network_offset);
 822		skb_reset_mac_len(skb);
 823	}
 824
 825	ovs_vport_send(vport, skb, data->mac_proto);
 826	return 0;
 827}
 828
 829static unsigned int
 830ovs_dst_get_mtu(const struct dst_entry *dst)
 831{
 832	return dst->dev->mtu;
 833}
 834
 835static struct dst_ops ovs_dst_ops = {
 836	.family = AF_UNSPEC,
 837	.mtu = ovs_dst_get_mtu,
 838};
 839
 840/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 841 * ovs_vport_output(), which is called once per fragmented packet.
 842 */
 843static void prepare_frag(struct vport *vport, struct sk_buff *skb,
 844			 u16 orig_network_offset, u8 mac_proto)
 845{
 846	unsigned int hlen = skb_network_offset(skb);
 847	struct ovs_frag_data *data;
 848
 849	data = this_cpu_ptr(&ovs_frag_data_storage);
 850	data->dst = skb->_skb_refdst;
 851	data->vport = vport;
 852	data->cb = *OVS_CB(skb);
 853	data->inner_protocol = skb->inner_protocol;
 854	data->network_offset = orig_network_offset;
 855	if (skb_vlan_tag_present(skb))
 856		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
 857	else
 858		data->vlan_tci = 0;
 859	data->vlan_proto = skb->vlan_proto;
 860	data->mac_proto = mac_proto;
 861	data->l2_len = hlen;
 862	memcpy(&data->l2_data, skb->data, hlen);
 863
 864	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 865	skb_pull(skb, hlen);
 866}
 867
 868static void ovs_fragment(struct net *net, struct vport *vport,
 869			 struct sk_buff *skb, u16 mru,
 870			 struct sw_flow_key *key)
 871{
 872	enum ovs_drop_reason reason;
 873	u16 orig_network_offset = 0;
 874
 875	if (eth_p_mpls(skb->protocol)) {
 876		orig_network_offset = skb_network_offset(skb);
 877		skb->network_header = skb->inner_network_header;
 878	}
 879
 880	if (skb_network_offset(skb) > MAX_L2_LEN) {
 881		OVS_NLERR(1, "L2 header too long to fragment");
 882		reason = OVS_DROP_FRAG_L2_TOO_LONG;
 883		goto err;
 884	}
 885
 886	if (key->eth.type == htons(ETH_P_IP)) {
 887		struct rtable ovs_rt = { 0 };
 888		unsigned long orig_dst;
 889
 890		prepare_frag(vport, skb, orig_network_offset,
 891			     ovs_key_mac_proto(key));
 892		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
 893			 DST_OBSOLETE_NONE, DST_NOCOUNT);
 894		ovs_rt.dst.dev = vport->dev;
 895
 896		orig_dst = skb->_skb_refdst;
 897		skb_dst_set_noref(skb, &ovs_rt.dst);
 898		IPCB(skb)->frag_max_size = mru;
 899
 900		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
 901		refdst_drop(orig_dst);
 902	} else if (key->eth.type == htons(ETH_P_IPV6)) {
 903		unsigned long orig_dst;
 904		struct rt6_info ovs_rt;
 905
 906		prepare_frag(vport, skb, orig_network_offset,
 907			     ovs_key_mac_proto(key));
 908		memset(&ovs_rt, 0, sizeof(ovs_rt));
 909		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
 910			 DST_OBSOLETE_NONE, DST_NOCOUNT);
 911		ovs_rt.dst.dev = vport->dev;
 912
 913		orig_dst = skb->_skb_refdst;
 914		skb_dst_set_noref(skb, &ovs_rt.dst);
 915		IP6CB(skb)->frag_max_size = mru;
 916
 917		ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
 918		refdst_drop(orig_dst);
 919	} else {
 920		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
 921			  ovs_vport_name(vport), ntohs(key->eth.type), mru,
 922			  vport->dev->mtu);
 923		reason = OVS_DROP_FRAG_INVALID_PROTO;
 924		goto err;
 925	}
 926
 927	return;
 928err:
 929	ovs_kfree_skb_reason(skb, reason);
 930}
 931
 932static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
 933		      struct sw_flow_key *key)
 934{
 935	struct vport *vport = ovs_vport_rcu(dp, out_port);
 936
 937	if (likely(vport &&
 938		   netif_running(vport->dev) &&
 939		   netif_carrier_ok(vport->dev))) {
 940		u16 mru = OVS_CB(skb)->mru;
 941		u32 cutlen = OVS_CB(skb)->cutlen;
 942
 943		if (unlikely(cutlen > 0)) {
 944			if (skb->len - cutlen > ovs_mac_header_len(key))
 945				pskb_trim(skb, skb->len - cutlen);
 946			else
 947				pskb_trim(skb, ovs_mac_header_len(key));
 948		}
 949
 950		/* Need to set the pkt_type to involve the routing layer.  The
 951		 * packet movement through the OVS datapath doesn't generally
 952		 * use routing, but this is needed for tunnel cases.
 953		 */
 954		skb->pkt_type = PACKET_OUTGOING;
 955
 956		if (likely(!mru ||
 957		           (skb->len <= mru + vport->dev->hard_header_len))) {
 958			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
 959		} else if (mru <= vport->dev->mtu) {
 960			struct net *net = read_pnet(&dp->net);
 961
 962			ovs_fragment(net, vport, skb, mru, key);
 963		} else {
 964			kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
 965		}
 966	} else {
 967		kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
 968	}
 969}
 970
 971static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 972			    struct sw_flow_key *key, const struct nlattr *attr,
 973			    const struct nlattr *actions, int actions_len,
 974			    uint32_t cutlen)
 975{
 976	struct dp_upcall_info upcall;
 977	const struct nlattr *a;
 978	int rem;
 979
 980	memset(&upcall, 0, sizeof(upcall));
 981	upcall.cmd = OVS_PACKET_CMD_ACTION;
 982	upcall.mru = OVS_CB(skb)->mru;
 983
 984	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 985	     a = nla_next(a, &rem)) {
 986		switch (nla_type(a)) {
 987		case OVS_USERSPACE_ATTR_USERDATA:
 988			upcall.userdata = a;
 989			break;
 990
 991		case OVS_USERSPACE_ATTR_PID:
 992			if (dp->user_features &
 993			    OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
 994				upcall.portid =
 995				  ovs_dp_get_upcall_portid(dp,
 996							   smp_processor_id());
 997			else
 998				upcall.portid = nla_get_u32(a);
 999			break;
1000
1001		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1002			/* Get out tunnel info. */
1003			struct vport *vport;
1004
1005			vport = ovs_vport_rcu(dp, nla_get_u32(a));
1006			if (vport) {
1007				int err;
1008
1009				err = dev_fill_metadata_dst(vport->dev, skb);
1010				if (!err)
1011					upcall.egress_tun_info = skb_tunnel_info(skb);
1012			}
1013
1014			break;
1015		}
1016
1017		case OVS_USERSPACE_ATTR_ACTIONS: {
1018			/* Include actions. */
1019			upcall.actions = actions;
1020			upcall.actions_len = actions_len;
1021			break;
1022		}
1023
1024		} /* End of switch. */
1025	}
1026
1027	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1028}
1029
1030static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1031				     struct sw_flow_key *key,
1032				     const struct nlattr *attr)
1033{
1034	/* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1035	struct nlattr *actions = nla_data(attr);
1036
1037	if (nla_len(actions))
1038		return clone_execute(dp, skb, key, 0, nla_data(actions),
1039				     nla_len(actions), true, false);
1040
1041	ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
1042	return 0;
1043}
1044
1045/* When 'last' is true, sample() should always consume the 'skb'.
1046 * Otherwise, sample() should keep 'skb' intact regardless what
1047 * actions are executed within sample().
1048 */
1049static int sample(struct datapath *dp, struct sk_buff *skb,
1050		  struct sw_flow_key *key, const struct nlattr *attr,
1051		  bool last)
1052{
1053	struct nlattr *actions;
1054	struct nlattr *sample_arg;
1055	int rem = nla_len(attr);
1056	const struct sample_arg *arg;
1057	u32 init_probability;
1058	bool clone_flow_key;
1059	int err;
1060
1061	/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1062	sample_arg = nla_data(attr);
1063	arg = nla_data(sample_arg);
1064	actions = nla_next(sample_arg, &rem);
1065	init_probability = OVS_CB(skb)->probability;
1066
1067	if ((arg->probability != U32_MAX) &&
1068	    (!arg->probability || get_random_u32() > arg->probability)) {
1069		if (last)
1070			ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1071		return 0;
1072	}
1073
1074	OVS_CB(skb)->probability = arg->probability;
1075
1076	clone_flow_key = !arg->exec;
1077	err = clone_execute(dp, skb, key, 0, actions, rem, last,
1078			    clone_flow_key);
1079
1080	if (!last)
1081		OVS_CB(skb)->probability = init_probability;
1082
1083	return err;
1084}
1085
1086/* When 'last' is true, clone() should always consume the 'skb'.
1087 * Otherwise, clone() should keep 'skb' intact regardless what
1088 * actions are executed within clone().
1089 */
1090static int clone(struct datapath *dp, struct sk_buff *skb,
1091		 struct sw_flow_key *key, const struct nlattr *attr,
1092		 bool last)
1093{
1094	struct nlattr *actions;
1095	struct nlattr *clone_arg;
1096	int rem = nla_len(attr);
1097	bool dont_clone_flow_key;
1098
1099	/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1100	clone_arg = nla_data(attr);
1101	dont_clone_flow_key = nla_get_u32(clone_arg);
1102	actions = nla_next(clone_arg, &rem);
1103
1104	return clone_execute(dp, skb, key, 0, actions, rem, last,
1105			     !dont_clone_flow_key);
1106}
1107
1108static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1109			 const struct nlattr *attr)
1110{
1111	struct ovs_action_hash *hash_act = nla_data(attr);
1112	u32 hash = 0;
1113
1114	if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1115		/* OVS_HASH_ALG_L4 hasing type. */
1116		hash = skb_get_hash(skb);
1117	} else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1118		/* OVS_HASH_ALG_SYM_L4 hashing type.  NOTE: this doesn't
1119		 * extend past an encapsulated header.
1120		 */
1121		hash = __skb_get_hash_symmetric(skb);
1122	}
1123
1124	hash = jhash_1word(hash, hash_act->hash_basis);
1125	if (!hash)
1126		hash = 0x1;
1127
1128	key->ovs_flow_hash = hash;
1129}
1130
1131static int execute_set_action(struct sk_buff *skb,
1132			      struct sw_flow_key *flow_key,
1133			      const struct nlattr *a)
1134{
1135	/* Only tunnel set execution is supported without a mask. */
1136	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1137		struct ovs_tunnel_info *tun = nla_data(a);
1138
1139		skb_dst_drop(skb);
1140		dst_hold((struct dst_entry *)tun->tun_dst);
1141		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1142		return 0;
1143	}
1144
1145	return -EINVAL;
1146}
1147
1148/* Mask is at the midpoint of the data. */
1149#define get_mask(a, type) ((const type)nla_data(a) + 1)
1150
1151static int execute_masked_set_action(struct sk_buff *skb,
1152				     struct sw_flow_key *flow_key,
1153				     const struct nlattr *a)
1154{
1155	int err = 0;
1156
1157	switch (nla_type(a)) {
1158	case OVS_KEY_ATTR_PRIORITY:
1159		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1160			       *get_mask(a, u32 *));
1161		flow_key->phy.priority = skb->priority;
1162		break;
1163
1164	case OVS_KEY_ATTR_SKB_MARK:
1165		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1166		flow_key->phy.skb_mark = skb->mark;
1167		break;
1168
1169	case OVS_KEY_ATTR_TUNNEL_INFO:
1170		/* Masked data not supported for tunnel. */
1171		err = -EINVAL;
1172		break;
1173
1174	case OVS_KEY_ATTR_ETHERNET:
1175		err = set_eth_addr(skb, flow_key, nla_data(a),
1176				   get_mask(a, struct ovs_key_ethernet *));
1177		break;
1178
1179	case OVS_KEY_ATTR_NSH:
1180		err = set_nsh(skb, flow_key, a);
1181		break;
1182
1183	case OVS_KEY_ATTR_IPV4:
1184		err = set_ipv4(skb, flow_key, nla_data(a),
1185			       get_mask(a, struct ovs_key_ipv4 *));
1186		break;
1187
1188	case OVS_KEY_ATTR_IPV6:
1189		err = set_ipv6(skb, flow_key, nla_data(a),
1190			       get_mask(a, struct ovs_key_ipv6 *));
1191		break;
1192
1193	case OVS_KEY_ATTR_TCP:
1194		err = set_tcp(skb, flow_key, nla_data(a),
1195			      get_mask(a, struct ovs_key_tcp *));
1196		break;
1197
1198	case OVS_KEY_ATTR_UDP:
1199		err = set_udp(skb, flow_key, nla_data(a),
1200			      get_mask(a, struct ovs_key_udp *));
1201		break;
1202
1203	case OVS_KEY_ATTR_SCTP:
1204		err = set_sctp(skb, flow_key, nla_data(a),
1205			       get_mask(a, struct ovs_key_sctp *));
1206		break;
1207
1208	case OVS_KEY_ATTR_MPLS:
1209		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1210								    __be32 *));
1211		break;
1212
1213	case OVS_KEY_ATTR_CT_STATE:
1214	case OVS_KEY_ATTR_CT_ZONE:
1215	case OVS_KEY_ATTR_CT_MARK:
1216	case OVS_KEY_ATTR_CT_LABELS:
1217	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1218	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1219		err = -EINVAL;
1220		break;
1221	}
1222
1223	return err;
1224}
1225
1226static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1227			  struct sw_flow_key *key,
1228			  const struct nlattr *a, bool last)
1229{
1230	u32 recirc_id;
1231
1232	if (!is_flow_key_valid(key)) {
1233		int err;
1234
1235		err = ovs_flow_key_update(skb, key);
1236		if (err)
1237			return err;
1238	}
1239	BUG_ON(!is_flow_key_valid(key));
1240
1241	recirc_id = nla_get_u32(a);
1242	return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1243}
1244
1245static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1246				 struct sw_flow_key *key,
1247				 const struct nlattr *attr, bool last)
1248{
1249	struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1250	const struct nlattr *actions, *cpl_arg;
1251	int len, max_len, rem = nla_len(attr);
1252	const struct check_pkt_len_arg *arg;
1253	bool clone_flow_key;
1254
1255	/* The first netlink attribute in 'attr' is always
1256	 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1257	 */
1258	cpl_arg = nla_data(attr);
1259	arg = nla_data(cpl_arg);
1260
1261	len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1262	max_len = arg->pkt_len;
1263
1264	if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1265	    len <= max_len) {
1266		/* Second netlink attribute in 'attr' is always
1267		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1268		 */
1269		actions = nla_next(cpl_arg, &rem);
1270		clone_flow_key = !arg->exec_for_lesser_equal;
1271	} else {
1272		/* Third netlink attribute in 'attr' is always
1273		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1274		 */
1275		actions = nla_next(cpl_arg, &rem);
1276		actions = nla_next(actions, &rem);
1277		clone_flow_key = !arg->exec_for_greater;
1278	}
1279
1280	return clone_execute(dp, skb, key, 0, nla_data(actions),
1281			     nla_len(actions), last, clone_flow_key);
1282}
1283
1284static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1285{
1286	int err;
1287
1288	if (skb->protocol == htons(ETH_P_IPV6)) {
1289		struct ipv6hdr *nh;
1290
1291		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1292					  sizeof(*nh));
1293		if (unlikely(err))
1294			return err;
1295
1296		nh = ipv6_hdr(skb);
1297
1298		if (nh->hop_limit <= 1)
1299			return -EHOSTUNREACH;
1300
1301		key->ip.ttl = --nh->hop_limit;
1302	} else if (skb->protocol == htons(ETH_P_IP)) {
1303		struct iphdr *nh;
1304		u8 old_ttl;
1305
1306		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1307					  sizeof(*nh));
1308		if (unlikely(err))
1309			return err;
1310
1311		nh = ip_hdr(skb);
1312		if (nh->ttl <= 1)
1313			return -EHOSTUNREACH;
1314
1315		old_ttl = nh->ttl--;
1316		csum_replace2(&nh->check, htons(old_ttl << 8),
1317			      htons(nh->ttl << 8));
1318		key->ip.ttl = nh->ttl;
1319	}
1320	return 0;
1321}
1322
1323#if IS_ENABLED(CONFIG_PSAMPLE)
1324static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1325			    const struct nlattr *attr)
1326{
1327	struct psample_group psample_group = {};
1328	struct psample_metadata md = {};
1329	const struct nlattr *a;
1330	u32 rate;
1331	int rem;
1332
1333	nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1334		switch (nla_type(a)) {
1335		case OVS_PSAMPLE_ATTR_GROUP:
1336			psample_group.group_num = nla_get_u32(a);
1337			break;
1338
1339		case OVS_PSAMPLE_ATTR_COOKIE:
1340			md.user_cookie = nla_data(a);
1341			md.user_cookie_len = nla_len(a);
1342			break;
1343		}
1344	}
1345
1346	psample_group.net = ovs_dp_get_net(dp);
1347	md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1348	md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1349	md.rate_as_probability = 1;
1350
1351	rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1352
1353	psample_sample_packet(&psample_group, skb, rate, &md);
1354}
1355#else
1356static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1357			    const struct nlattr *attr)
1358{}
1359#endif
1360
1361/* Execute a list of actions against 'skb'. */
1362static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1363			      struct sw_flow_key *key,
1364			      const struct nlattr *attr, int len)
1365{
1366	const struct nlattr *a;
1367	int rem;
1368
1369	for (a = attr, rem = len; rem > 0;
1370	     a = nla_next(a, &rem)) {
1371		int err = 0;
1372
1373		if (trace_ovs_do_execute_action_enabled())
1374			trace_ovs_do_execute_action(dp, skb, key, a, rem);
1375
1376		/* Actions that rightfully have to consume the skb should do it
1377		 * and return directly.
1378		 */
1379		switch (nla_type(a)) {
1380		case OVS_ACTION_ATTR_OUTPUT: {
1381			int port = nla_get_u32(a);
1382			struct sk_buff *clone;
1383
1384			/* Every output action needs a separate clone
1385			 * of 'skb', In case the output action is the
1386			 * last action, cloning can be avoided.
1387			 */
1388			if (nla_is_last(a, rem)) {
1389				do_output(dp, skb, port, key);
1390				/* 'skb' has been used for output.
1391				 */
1392				return 0;
1393			}
1394
1395			clone = skb_clone(skb, GFP_ATOMIC);
1396			if (clone)
1397				do_output(dp, clone, port, key);
1398			OVS_CB(skb)->cutlen = 0;
1399			break;
1400		}
1401
1402		case OVS_ACTION_ATTR_TRUNC: {
1403			struct ovs_action_trunc *trunc = nla_data(a);
1404
1405			if (skb->len > trunc->max_len)
1406				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1407			break;
1408		}
1409
1410		case OVS_ACTION_ATTR_USERSPACE:
1411			output_userspace(dp, skb, key, a, attr,
1412						     len, OVS_CB(skb)->cutlen);
1413			OVS_CB(skb)->cutlen = 0;
1414			if (nla_is_last(a, rem)) {
1415				consume_skb(skb);
1416				return 0;
1417			}
1418			break;
1419
1420		case OVS_ACTION_ATTR_HASH:
1421			execute_hash(skb, key, a);
1422			break;
1423
1424		case OVS_ACTION_ATTR_PUSH_MPLS: {
1425			struct ovs_action_push_mpls *mpls = nla_data(a);
1426
1427			err = push_mpls(skb, key, mpls->mpls_lse,
1428					mpls->mpls_ethertype, skb->mac_len);
1429			break;
1430		}
1431		case OVS_ACTION_ATTR_ADD_MPLS: {
1432			struct ovs_action_add_mpls *mpls = nla_data(a);
1433			__u16 mac_len = 0;
1434
1435			if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1436				mac_len = skb->mac_len;
1437
1438			err = push_mpls(skb, key, mpls->mpls_lse,
1439					mpls->mpls_ethertype, mac_len);
1440			break;
1441		}
1442		case OVS_ACTION_ATTR_POP_MPLS:
1443			err = pop_mpls(skb, key, nla_get_be16(a));
1444			break;
1445
1446		case OVS_ACTION_ATTR_PUSH_VLAN:
1447			err = push_vlan(skb, key, nla_data(a));
1448			break;
1449
1450		case OVS_ACTION_ATTR_POP_VLAN:
1451			err = pop_vlan(skb, key);
1452			break;
1453
1454		case OVS_ACTION_ATTR_RECIRC: {
1455			bool last = nla_is_last(a, rem);
1456
1457			err = execute_recirc(dp, skb, key, a, last);
1458			if (last) {
1459				/* If this is the last action, the skb has
1460				 * been consumed or freed.
1461				 * Return immediately.
1462				 */
1463				return err;
1464			}
1465			break;
1466		}
1467
1468		case OVS_ACTION_ATTR_SET:
1469			err = execute_set_action(skb, key, nla_data(a));
1470			break;
1471
1472		case OVS_ACTION_ATTR_SET_MASKED:
1473		case OVS_ACTION_ATTR_SET_TO_MASKED:
1474			err = execute_masked_set_action(skb, key, nla_data(a));
1475			break;
1476
1477		case OVS_ACTION_ATTR_SAMPLE: {
1478			bool last = nla_is_last(a, rem);
1479
1480			err = sample(dp, skb, key, a, last);
1481			if (last)
1482				return err;
1483
1484			break;
1485		}
1486
1487		case OVS_ACTION_ATTR_CT:
1488			if (!is_flow_key_valid(key)) {
1489				err = ovs_flow_key_update(skb, key);
1490				if (err)
1491					return err;
1492			}
1493
1494			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1495					     nla_data(a));
1496
1497			/* Hide stolen IP fragments from user space. */
1498			if (err)
1499				return err == -EINPROGRESS ? 0 : err;
1500			break;
1501
1502		case OVS_ACTION_ATTR_CT_CLEAR:
1503			err = ovs_ct_clear(skb, key);
1504			break;
1505
1506		case OVS_ACTION_ATTR_PUSH_ETH:
1507			err = push_eth(skb, key, nla_data(a));
1508			break;
1509
1510		case OVS_ACTION_ATTR_POP_ETH:
1511			err = pop_eth(skb, key);
1512			break;
1513
1514		case OVS_ACTION_ATTR_PUSH_NSH:
1515			err = push_nsh(skb, key, nla_data(a));
 
 
 
 
 
 
 
1516			break;
 
1517
1518		case OVS_ACTION_ATTR_POP_NSH:
1519			err = pop_nsh(skb, key);
1520			break;
1521
1522		case OVS_ACTION_ATTR_METER:
1523			if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1524				ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1525				return 0;
1526			}
1527			break;
1528
1529		case OVS_ACTION_ATTR_CLONE: {
1530			bool last = nla_is_last(a, rem);
1531
1532			err = clone(dp, skb, key, a, last);
1533			if (last)
1534				return err;
1535
1536			break;
1537		}
1538
1539		case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1540			bool last = nla_is_last(a, rem);
1541
1542			err = execute_check_pkt_len(dp, skb, key, a, last);
1543			if (last)
1544				return err;
1545
1546			break;
1547		}
1548
1549		case OVS_ACTION_ATTR_DEC_TTL:
1550			err = execute_dec_ttl(skb, key);
1551			if (err == -EHOSTUNREACH)
1552				return dec_ttl_exception_handler(dp, skb,
1553								 key, a);
1554			break;
1555
1556		case OVS_ACTION_ATTR_DROP: {
1557			enum ovs_drop_reason reason = nla_get_u32(a)
1558				? OVS_DROP_EXPLICIT_WITH_ERROR
1559				: OVS_DROP_EXPLICIT;
1560
1561			ovs_kfree_skb_reason(skb, reason);
1562			return 0;
1563		}
1564
1565		case OVS_ACTION_ATTR_PSAMPLE:
1566			execute_psample(dp, skb, a);
1567			OVS_CB(skb)->cutlen = 0;
1568			if (nla_is_last(a, rem)) {
1569				consume_skb(skb);
1570				return 0;
1571			}
1572			break;
1573		}
1574
1575		if (unlikely(err)) {
1576			ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1577			return err;
1578		}
1579	}
1580
1581	ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1582	return 0;
1583}
1584
1585/* Execute the actions on the clone of the packet. The effect of the
1586 * execution does not affect the original 'skb' nor the original 'key'.
1587 *
1588 * The execution may be deferred in case the actions can not be executed
1589 * immediately.
1590 */
1591static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1592			 struct sw_flow_key *key, u32 recirc_id,
1593			 const struct nlattr *actions, int len,
1594			 bool last, bool clone_flow_key)
1595{
1596	struct deferred_action *da;
1597	struct sw_flow_key *clone;
1598
1599	skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1600	if (!skb) {
1601		/* Out of memory, skip this action.
1602		 */
1603		return 0;
1604	}
1605
1606	/* When clone_flow_key is false, the 'key' will not be change
1607	 * by the actions, then the 'key' can be used directly.
1608	 * Otherwise, try to clone key from the next recursion level of
1609	 * 'flow_keys'. If clone is successful, execute the actions
1610	 * without deferring.
1611	 */
1612	clone = clone_flow_key ? clone_key(key) : key;
1613	if (clone) {
1614		int err = 0;
1615
1616		if (actions) { /* Sample action */
1617			if (clone_flow_key)
1618				__this_cpu_inc(exec_actions_level);
1619
1620			err = do_execute_actions(dp, skb, clone,
1621						 actions, len);
1622
1623			if (clone_flow_key)
1624				__this_cpu_dec(exec_actions_level);
1625		} else { /* Recirc action */
1626			clone->recirc_id = recirc_id;
1627			ovs_dp_process_packet(skb, clone);
1628		}
1629		return err;
1630	}
1631
1632	/* Out of 'flow_keys' space. Defer actions */
1633	da = add_deferred_actions(skb, key, actions, len);
1634	if (da) {
1635		if (!actions) { /* Recirc action */
1636			key = &da->pkt_key;
1637			key->recirc_id = recirc_id;
1638		}
1639	} else {
1640		/* Out of per CPU action FIFO space. Drop the 'skb' and
1641		 * log an error.
1642		 */
1643		ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1644
1645		if (net_ratelimit()) {
1646			if (actions) { /* Sample action */
1647				pr_warn("%s: deferred action limit reached, drop sample action\n",
1648					ovs_dp_name(dp));
1649			} else {  /* Recirc action */
1650				pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1651					ovs_dp_name(dp), recirc_id);
1652			}
1653		}
1654	}
1655	return 0;
1656}
1657
1658static void process_deferred_actions(struct datapath *dp)
1659{
1660	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1661
1662	/* Do not touch the FIFO in case there is no deferred actions. */
1663	if (action_fifo_is_empty(fifo))
1664		return;
1665
1666	/* Finishing executing all deferred actions. */
1667	do {
1668		struct deferred_action *da = action_fifo_get(fifo);
1669		struct sk_buff *skb = da->skb;
1670		struct sw_flow_key *key = &da->pkt_key;
1671		const struct nlattr *actions = da->actions;
1672		int actions_len = da->actions_len;
1673
1674		if (actions)
1675			do_execute_actions(dp, skb, key, actions, actions_len);
1676		else
1677			ovs_dp_process_packet(skb, key);
1678	} while (!action_fifo_is_empty(fifo));
1679
1680	/* Reset FIFO for the next packet.  */
1681	action_fifo_init(fifo);
1682}
1683
1684/* Execute a list of actions against 'skb'. */
1685int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1686			const struct sw_flow_actions *acts,
1687			struct sw_flow_key *key)
1688{
1689	int err, level;
1690
1691	level = __this_cpu_inc_return(exec_actions_level);
1692	if (unlikely(level > OVS_RECURSION_LIMIT)) {
1693		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1694				     ovs_dp_name(dp));
1695		ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1696		err = -ENETDOWN;
1697		goto out;
1698	}
1699
1700	OVS_CB(skb)->acts_origlen = acts->orig_len;
1701	err = do_execute_actions(dp, skb, key,
1702				 acts->actions, acts->actions_len);
1703
1704	if (level == 1)
1705		process_deferred_actions(dp);
1706
1707out:
1708	__this_cpu_dec(exec_actions_level);
1709	return err;
1710}
1711
1712int action_fifos_init(void)
1713{
1714	action_fifos = alloc_percpu(struct action_fifo);
1715	if (!action_fifos)
1716		return -ENOMEM;
1717
1718	flow_keys = alloc_percpu(struct action_flow_keys);
1719	if (!flow_keys) {
1720		free_percpu(action_fifos);
1721		return -ENOMEM;
1722	}
1723
1724	return 0;
1725}
1726
1727void action_fifos_exit(void)
1728{
1729	free_percpu(action_fifos);
1730	free_percpu(flow_keys);
1731}