Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
12#include <linux/sctp.h>
13#include <linux/tcp.h>
14#include <linux/udp.h>
15#include <linux/in6.h>
16#include <linux/if_arp.h>
17#include <linux/if_vlan.h>
18
19#include <net/dst.h>
20#include <net/ip.h>
21#include <net/ipv6.h>
22#include <net/ip6_fib.h>
23#include <net/checksum.h>
24#include <net/dsfield.h>
25#include <net/mpls.h>
26#include <net/sctp/checksum.h>
27
28#include "datapath.h"
29#include "flow.h"
30#include "conntrack.h"
31#include "vport.h"
32#include "flow_netlink.h"
33#include "openvswitch_trace.h"
34
35struct deferred_action {
36 struct sk_buff *skb;
37 const struct nlattr *actions;
38 int actions_len;
39
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key;
42};
43
44#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45struct ovs_frag_data {
46 unsigned long dst;
47 struct vport *vport;
48 struct ovs_skb_cb cb;
49 __be16 inner_protocol;
50 u16 network_offset; /* valid only for MPLS */
51 u16 vlan_tci;
52 __be16 vlan_proto;
53 unsigned int l2_len;
54 u8 mac_proto;
55 u8 l2_data[MAX_L2_LEN];
56};
57
58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
59
60#define DEFERRED_ACTION_FIFO_SIZE 10
61#define OVS_RECURSION_LIMIT 5
62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
63struct action_fifo {
64 int head;
65 int tail;
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
68};
69
70struct action_flow_keys {
71 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
72};
73
74static struct action_fifo __percpu *action_fifos;
75static struct action_flow_keys __percpu *flow_keys;
76static DEFINE_PER_CPU(int, exec_actions_level);
77
78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
80 */
81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
82{
83 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
84 int level = this_cpu_read(exec_actions_level);
85 struct sw_flow_key *key = NULL;
86
87 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
88 key = &keys->key[level - 1];
89 *key = *key_;
90 }
91
92 return key;
93}
94
95static void action_fifo_init(struct action_fifo *fifo)
96{
97 fifo->head = 0;
98 fifo->tail = 0;
99}
100
101static bool action_fifo_is_empty(const struct action_fifo *fifo)
102{
103 return (fifo->head == fifo->tail);
104}
105
106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
107{
108 if (action_fifo_is_empty(fifo))
109 return NULL;
110
111 return &fifo->fifo[fifo->tail++];
112}
113
114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
115{
116 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
117 return NULL;
118
119 return &fifo->fifo[fifo->head++];
120}
121
122/* Return true if fifo is not full */
123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
124 const struct sw_flow_key *key,
125 const struct nlattr *actions,
126 const int actions_len)
127{
128 struct action_fifo *fifo;
129 struct deferred_action *da;
130
131 fifo = this_cpu_ptr(action_fifos);
132 da = action_fifo_put(fifo);
133 if (da) {
134 da->skb = skb;
135 da->actions = actions;
136 da->actions_len = actions_len;
137 da->pkt_key = *key;
138 }
139
140 return da;
141}
142
143static void invalidate_flow_key(struct sw_flow_key *key)
144{
145 key->mac_proto |= SW_FLOW_KEY_INVALID;
146}
147
148static bool is_flow_key_valid(const struct sw_flow_key *key)
149{
150 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
151}
152
153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
154 struct sw_flow_key *key,
155 u32 recirc_id,
156 const struct nlattr *actions, int len,
157 bool last, bool clone_flow_key);
158
159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
160 struct sw_flow_key *key,
161 const struct nlattr *attr, int len);
162
163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
164 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
165{
166 int err;
167
168 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
169 if (err)
170 return err;
171
172 if (!mac_len)
173 key->mac_proto = MAC_PROTO_NONE;
174
175 invalidate_flow_key(key);
176 return 0;
177}
178
179static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
180 const __be16 ethertype)
181{
182 int err;
183
184 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
185 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
186 if (err)
187 return err;
188
189 if (ethertype == htons(ETH_P_TEB))
190 key->mac_proto = MAC_PROTO_ETHERNET;
191
192 invalidate_flow_key(key);
193 return 0;
194}
195
196static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
197 const __be32 *mpls_lse, const __be32 *mask)
198{
199 struct mpls_shim_hdr *stack;
200 __be32 lse;
201 int err;
202
203 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
204 return -ENOMEM;
205
206 stack = mpls_hdr(skb);
207 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
208 err = skb_mpls_update_lse(skb, lse);
209 if (err)
210 return err;
211
212 flow_key->mpls.lse[0] = lse;
213 return 0;
214}
215
216static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
217{
218 int err;
219
220 err = skb_vlan_pop(skb);
221 if (skb_vlan_tag_present(skb)) {
222 invalidate_flow_key(key);
223 } else {
224 key->eth.vlan.tci = 0;
225 key->eth.vlan.tpid = 0;
226 }
227 return err;
228}
229
230static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
231 const struct ovs_action_push_vlan *vlan)
232{
233 if (skb_vlan_tag_present(skb)) {
234 invalidate_flow_key(key);
235 } else {
236 key->eth.vlan.tci = vlan->vlan_tci;
237 key->eth.vlan.tpid = vlan->vlan_tpid;
238 }
239 return skb_vlan_push(skb, vlan->vlan_tpid,
240 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
241}
242
243/* 'src' is already properly masked. */
244static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
245{
246 u16 *dst = (u16 *)dst_;
247 const u16 *src = (const u16 *)src_;
248 const u16 *mask = (const u16 *)mask_;
249
250 OVS_SET_MASKED(dst[0], src[0], mask[0]);
251 OVS_SET_MASKED(dst[1], src[1], mask[1]);
252 OVS_SET_MASKED(dst[2], src[2], mask[2]);
253}
254
255static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const struct ovs_key_ethernet *key,
257 const struct ovs_key_ethernet *mask)
258{
259 int err;
260
261 err = skb_ensure_writable(skb, ETH_HLEN);
262 if (unlikely(err))
263 return err;
264
265 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
266
267 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
268 mask->eth_src);
269 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
270 mask->eth_dst);
271
272 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
273
274 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
275 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
276 return 0;
277}
278
279/* pop_eth does not support VLAN packets as this action is never called
280 * for them.
281 */
282static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
283{
284 int err;
285
286 err = skb_eth_pop(skb);
287 if (err)
288 return err;
289
290 /* safe right before invalidate_flow_key */
291 key->mac_proto = MAC_PROTO_NONE;
292 invalidate_flow_key(key);
293 return 0;
294}
295
296static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
297 const struct ovs_action_push_eth *ethh)
298{
299 int err;
300
301 err = skb_eth_push(skb, ethh->addresses.eth_dst,
302 ethh->addresses.eth_src);
303 if (err)
304 return err;
305
306 /* safe right before invalidate_flow_key */
307 key->mac_proto = MAC_PROTO_ETHERNET;
308 invalidate_flow_key(key);
309 return 0;
310}
311
312static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
313 const struct nshhdr *nh)
314{
315 int err;
316
317 err = nsh_push(skb, nh);
318 if (err)
319 return err;
320
321 /* safe right before invalidate_flow_key */
322 key->mac_proto = MAC_PROTO_NONE;
323 invalidate_flow_key(key);
324 return 0;
325}
326
327static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
328{
329 int err;
330
331 err = nsh_pop(skb);
332 if (err)
333 return err;
334
335 /* safe right before invalidate_flow_key */
336 if (skb->protocol == htons(ETH_P_TEB))
337 key->mac_proto = MAC_PROTO_ETHERNET;
338 else
339 key->mac_proto = MAC_PROTO_NONE;
340 invalidate_flow_key(key);
341 return 0;
342}
343
344static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
345 __be32 addr, __be32 new_addr)
346{
347 int transport_len = skb->len - skb_transport_offset(skb);
348
349 if (nh->frag_off & htons(IP_OFFSET))
350 return;
351
352 if (nh->protocol == IPPROTO_TCP) {
353 if (likely(transport_len >= sizeof(struct tcphdr)))
354 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
355 addr, new_addr, true);
356 } else if (nh->protocol == IPPROTO_UDP) {
357 if (likely(transport_len >= sizeof(struct udphdr))) {
358 struct udphdr *uh = udp_hdr(skb);
359
360 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
361 inet_proto_csum_replace4(&uh->check, skb,
362 addr, new_addr, true);
363 if (!uh->check)
364 uh->check = CSUM_MANGLED_0;
365 }
366 }
367 }
368}
369
370static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
371 __be32 *addr, __be32 new_addr)
372{
373 update_ip_l4_checksum(skb, nh, *addr, new_addr);
374 csum_replace4(&nh->check, *addr, new_addr);
375 skb_clear_hash(skb);
376 *addr = new_addr;
377}
378
379static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
380 __be32 addr[4], const __be32 new_addr[4])
381{
382 int transport_len = skb->len - skb_transport_offset(skb);
383
384 if (l4_proto == NEXTHDR_TCP) {
385 if (likely(transport_len >= sizeof(struct tcphdr)))
386 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
387 addr, new_addr, true);
388 } else if (l4_proto == NEXTHDR_UDP) {
389 if (likely(transport_len >= sizeof(struct udphdr))) {
390 struct udphdr *uh = udp_hdr(skb);
391
392 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
393 inet_proto_csum_replace16(&uh->check, skb,
394 addr, new_addr, true);
395 if (!uh->check)
396 uh->check = CSUM_MANGLED_0;
397 }
398 }
399 } else if (l4_proto == NEXTHDR_ICMP) {
400 if (likely(transport_len >= sizeof(struct icmp6hdr)))
401 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
402 skb, addr, new_addr, true);
403 }
404}
405
406static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
407 const __be32 mask[4], __be32 masked[4])
408{
409 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
410 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
411 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
412 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
413}
414
415static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
416 __be32 addr[4], const __be32 new_addr[4],
417 bool recalculate_csum)
418{
419 if (recalculate_csum)
420 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
421
422 skb_clear_hash(skb);
423 memcpy(addr, new_addr, sizeof(__be32[4]));
424}
425
426static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
427{
428 /* Bits 21-24 are always unmasked, so this retains their values. */
429 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
430 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
431 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
432}
433
434static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
435 u8 mask)
436{
437 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
438
439 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
440 nh->ttl = new_ttl;
441}
442
443static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
444 const struct ovs_key_ipv4 *key,
445 const struct ovs_key_ipv4 *mask)
446{
447 struct iphdr *nh;
448 __be32 new_addr;
449 int err;
450
451 err = skb_ensure_writable(skb, skb_network_offset(skb) +
452 sizeof(struct iphdr));
453 if (unlikely(err))
454 return err;
455
456 nh = ip_hdr(skb);
457
458 /* Setting an IP addresses is typically only a side effect of
459 * matching on them in the current userspace implementation, so it
460 * makes sense to check if the value actually changed.
461 */
462 if (mask->ipv4_src) {
463 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
464
465 if (unlikely(new_addr != nh->saddr)) {
466 set_ip_addr(skb, nh, &nh->saddr, new_addr);
467 flow_key->ipv4.addr.src = new_addr;
468 }
469 }
470 if (mask->ipv4_dst) {
471 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
472
473 if (unlikely(new_addr != nh->daddr)) {
474 set_ip_addr(skb, nh, &nh->daddr, new_addr);
475 flow_key->ipv4.addr.dst = new_addr;
476 }
477 }
478 if (mask->ipv4_tos) {
479 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
480 flow_key->ip.tos = nh->tos;
481 }
482 if (mask->ipv4_ttl) {
483 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
484 flow_key->ip.ttl = nh->ttl;
485 }
486
487 return 0;
488}
489
490static bool is_ipv6_mask_nonzero(const __be32 addr[4])
491{
492 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
493}
494
495static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
496 const struct ovs_key_ipv6 *key,
497 const struct ovs_key_ipv6 *mask)
498{
499 struct ipv6hdr *nh;
500 int err;
501
502 err = skb_ensure_writable(skb, skb_network_offset(skb) +
503 sizeof(struct ipv6hdr));
504 if (unlikely(err))
505 return err;
506
507 nh = ipv6_hdr(skb);
508
509 /* Setting an IP addresses is typically only a side effect of
510 * matching on them in the current userspace implementation, so it
511 * makes sense to check if the value actually changed.
512 */
513 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
514 __be32 *saddr = (__be32 *)&nh->saddr;
515 __be32 masked[4];
516
517 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
518
519 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
520 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
521 true);
522 memcpy(&flow_key->ipv6.addr.src, masked,
523 sizeof(flow_key->ipv6.addr.src));
524 }
525 }
526 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
527 unsigned int offset = 0;
528 int flags = IP6_FH_F_SKIP_RH;
529 bool recalc_csum = true;
530 __be32 *daddr = (__be32 *)&nh->daddr;
531 __be32 masked[4];
532
533 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
534
535 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
536 if (ipv6_ext_hdr(nh->nexthdr))
537 recalc_csum = (ipv6_find_hdr(skb, &offset,
538 NEXTHDR_ROUTING,
539 NULL, &flags)
540 != NEXTHDR_ROUTING);
541
542 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
543 recalc_csum);
544 memcpy(&flow_key->ipv6.addr.dst, masked,
545 sizeof(flow_key->ipv6.addr.dst));
546 }
547 }
548 if (mask->ipv6_tclass) {
549 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
550 flow_key->ip.tos = ipv6_get_dsfield(nh);
551 }
552 if (mask->ipv6_label) {
553 set_ipv6_fl(nh, ntohl(key->ipv6_label),
554 ntohl(mask->ipv6_label));
555 flow_key->ipv6.label =
556 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
557 }
558 if (mask->ipv6_hlimit) {
559 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
560 mask->ipv6_hlimit);
561 flow_key->ip.ttl = nh->hop_limit;
562 }
563 return 0;
564}
565
566static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
567 const struct nlattr *a)
568{
569 struct nshhdr *nh;
570 size_t length;
571 int err;
572 u8 flags;
573 u8 ttl;
574 int i;
575
576 struct ovs_key_nsh key;
577 struct ovs_key_nsh mask;
578
579 err = nsh_key_from_nlattr(a, &key, &mask);
580 if (err)
581 return err;
582
583 /* Make sure the NSH base header is there */
584 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
585 return -ENOMEM;
586
587 nh = nsh_hdr(skb);
588 length = nsh_hdr_len(nh);
589
590 /* Make sure the whole NSH header is there */
591 err = skb_ensure_writable(skb, skb_network_offset(skb) +
592 length);
593 if (unlikely(err))
594 return err;
595
596 nh = nsh_hdr(skb);
597 skb_postpull_rcsum(skb, nh, length);
598 flags = nsh_get_flags(nh);
599 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
600 flow_key->nsh.base.flags = flags;
601 ttl = nsh_get_ttl(nh);
602 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
603 flow_key->nsh.base.ttl = ttl;
604 nsh_set_flags_and_ttl(nh, flags, ttl);
605 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
606 mask.base.path_hdr);
607 flow_key->nsh.base.path_hdr = nh->path_hdr;
608 switch (nh->mdtype) {
609 case NSH_M_TYPE1:
610 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
611 nh->md1.context[i] =
612 OVS_MASKED(nh->md1.context[i], key.context[i],
613 mask.context[i]);
614 }
615 memcpy(flow_key->nsh.context, nh->md1.context,
616 sizeof(nh->md1.context));
617 break;
618 case NSH_M_TYPE2:
619 memset(flow_key->nsh.context, 0,
620 sizeof(flow_key->nsh.context));
621 break;
622 default:
623 return -EINVAL;
624 }
625 skb_postpush_rcsum(skb, nh, length);
626 return 0;
627}
628
629/* Must follow skb_ensure_writable() since that can move the skb data. */
630static void set_tp_port(struct sk_buff *skb, __be16 *port,
631 __be16 new_port, __sum16 *check)
632{
633 inet_proto_csum_replace2(check, skb, *port, new_port, false);
634 *port = new_port;
635}
636
637static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
638 const struct ovs_key_udp *key,
639 const struct ovs_key_udp *mask)
640{
641 struct udphdr *uh;
642 __be16 src, dst;
643 int err;
644
645 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
646 sizeof(struct udphdr));
647 if (unlikely(err))
648 return err;
649
650 uh = udp_hdr(skb);
651 /* Either of the masks is non-zero, so do not bother checking them. */
652 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
653 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
654
655 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
656 if (likely(src != uh->source)) {
657 set_tp_port(skb, &uh->source, src, &uh->check);
658 flow_key->tp.src = src;
659 }
660 if (likely(dst != uh->dest)) {
661 set_tp_port(skb, &uh->dest, dst, &uh->check);
662 flow_key->tp.dst = dst;
663 }
664
665 if (unlikely(!uh->check))
666 uh->check = CSUM_MANGLED_0;
667 } else {
668 uh->source = src;
669 uh->dest = dst;
670 flow_key->tp.src = src;
671 flow_key->tp.dst = dst;
672 }
673
674 skb_clear_hash(skb);
675
676 return 0;
677}
678
679static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
680 const struct ovs_key_tcp *key,
681 const struct ovs_key_tcp *mask)
682{
683 struct tcphdr *th;
684 __be16 src, dst;
685 int err;
686
687 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
688 sizeof(struct tcphdr));
689 if (unlikely(err))
690 return err;
691
692 th = tcp_hdr(skb);
693 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
694 if (likely(src != th->source)) {
695 set_tp_port(skb, &th->source, src, &th->check);
696 flow_key->tp.src = src;
697 }
698 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
699 if (likely(dst != th->dest)) {
700 set_tp_port(skb, &th->dest, dst, &th->check);
701 flow_key->tp.dst = dst;
702 }
703 skb_clear_hash(skb);
704
705 return 0;
706}
707
708static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
709 const struct ovs_key_sctp *key,
710 const struct ovs_key_sctp *mask)
711{
712 unsigned int sctphoff = skb_transport_offset(skb);
713 struct sctphdr *sh;
714 __le32 old_correct_csum, new_csum, old_csum;
715 int err;
716
717 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
718 if (unlikely(err))
719 return err;
720
721 sh = sctp_hdr(skb);
722 old_csum = sh->checksum;
723 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
724
725 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
726 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
727
728 new_csum = sctp_compute_cksum(skb, sctphoff);
729
730 /* Carry any checksum errors through. */
731 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
732
733 skb_clear_hash(skb);
734 flow_key->tp.src = sh->source;
735 flow_key->tp.dst = sh->dest;
736
737 return 0;
738}
739
740static int ovs_vport_output(struct net *net, struct sock *sk,
741 struct sk_buff *skb)
742{
743 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
744 struct vport *vport = data->vport;
745
746 if (skb_cow_head(skb, data->l2_len) < 0) {
747 kfree_skb(skb);
748 return -ENOMEM;
749 }
750
751 __skb_dst_copy(skb, data->dst);
752 *OVS_CB(skb) = data->cb;
753 skb->inner_protocol = data->inner_protocol;
754 if (data->vlan_tci & VLAN_CFI_MASK)
755 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
756 else
757 __vlan_hwaccel_clear_tag(skb);
758
759 /* Reconstruct the MAC header. */
760 skb_push(skb, data->l2_len);
761 memcpy(skb->data, &data->l2_data, data->l2_len);
762 skb_postpush_rcsum(skb, skb->data, data->l2_len);
763 skb_reset_mac_header(skb);
764
765 if (eth_p_mpls(skb->protocol)) {
766 skb->inner_network_header = skb->network_header;
767 skb_set_network_header(skb, data->network_offset);
768 skb_reset_mac_len(skb);
769 }
770
771 ovs_vport_send(vport, skb, data->mac_proto);
772 return 0;
773}
774
775static unsigned int
776ovs_dst_get_mtu(const struct dst_entry *dst)
777{
778 return dst->dev->mtu;
779}
780
781static struct dst_ops ovs_dst_ops = {
782 .family = AF_UNSPEC,
783 .mtu = ovs_dst_get_mtu,
784};
785
786/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
787 * ovs_vport_output(), which is called once per fragmented packet.
788 */
789static void prepare_frag(struct vport *vport, struct sk_buff *skb,
790 u16 orig_network_offset, u8 mac_proto)
791{
792 unsigned int hlen = skb_network_offset(skb);
793 struct ovs_frag_data *data;
794
795 data = this_cpu_ptr(&ovs_frag_data_storage);
796 data->dst = skb->_skb_refdst;
797 data->vport = vport;
798 data->cb = *OVS_CB(skb);
799 data->inner_protocol = skb->inner_protocol;
800 data->network_offset = orig_network_offset;
801 if (skb_vlan_tag_present(skb))
802 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
803 else
804 data->vlan_tci = 0;
805 data->vlan_proto = skb->vlan_proto;
806 data->mac_proto = mac_proto;
807 data->l2_len = hlen;
808 memcpy(&data->l2_data, skb->data, hlen);
809
810 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
811 skb_pull(skb, hlen);
812}
813
814static void ovs_fragment(struct net *net, struct vport *vport,
815 struct sk_buff *skb, u16 mru,
816 struct sw_flow_key *key)
817{
818 u16 orig_network_offset = 0;
819
820 if (eth_p_mpls(skb->protocol)) {
821 orig_network_offset = skb_network_offset(skb);
822 skb->network_header = skb->inner_network_header;
823 }
824
825 if (skb_network_offset(skb) > MAX_L2_LEN) {
826 OVS_NLERR(1, "L2 header too long to fragment");
827 goto err;
828 }
829
830 if (key->eth.type == htons(ETH_P_IP)) {
831 struct rtable ovs_rt = { 0 };
832 unsigned long orig_dst;
833
834 prepare_frag(vport, skb, orig_network_offset,
835 ovs_key_mac_proto(key));
836 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
837 DST_OBSOLETE_NONE, DST_NOCOUNT);
838 ovs_rt.dst.dev = vport->dev;
839
840 orig_dst = skb->_skb_refdst;
841 skb_dst_set_noref(skb, &ovs_rt.dst);
842 IPCB(skb)->frag_max_size = mru;
843
844 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
845 refdst_drop(orig_dst);
846 } else if (key->eth.type == htons(ETH_P_IPV6)) {
847 unsigned long orig_dst;
848 struct rt6_info ovs_rt;
849
850 prepare_frag(vport, skb, orig_network_offset,
851 ovs_key_mac_proto(key));
852 memset(&ovs_rt, 0, sizeof(ovs_rt));
853 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
854 DST_OBSOLETE_NONE, DST_NOCOUNT);
855 ovs_rt.dst.dev = vport->dev;
856
857 orig_dst = skb->_skb_refdst;
858 skb_dst_set_noref(skb, &ovs_rt.dst);
859 IP6CB(skb)->frag_max_size = mru;
860
861 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
862 refdst_drop(orig_dst);
863 } else {
864 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
865 ovs_vport_name(vport), ntohs(key->eth.type), mru,
866 vport->dev->mtu);
867 goto err;
868 }
869
870 return;
871err:
872 kfree_skb(skb);
873}
874
875static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
876 struct sw_flow_key *key)
877{
878 struct vport *vport = ovs_vport_rcu(dp, out_port);
879
880 if (likely(vport)) {
881 u16 mru = OVS_CB(skb)->mru;
882 u32 cutlen = OVS_CB(skb)->cutlen;
883
884 if (unlikely(cutlen > 0)) {
885 if (skb->len - cutlen > ovs_mac_header_len(key))
886 pskb_trim(skb, skb->len - cutlen);
887 else
888 pskb_trim(skb, ovs_mac_header_len(key));
889 }
890
891 if (likely(!mru ||
892 (skb->len <= mru + vport->dev->hard_header_len))) {
893 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
894 } else if (mru <= vport->dev->mtu) {
895 struct net *net = read_pnet(&dp->net);
896
897 ovs_fragment(net, vport, skb, mru, key);
898 } else {
899 kfree_skb(skb);
900 }
901 } else {
902 kfree_skb(skb);
903 }
904}
905
906static int output_userspace(struct datapath *dp, struct sk_buff *skb,
907 struct sw_flow_key *key, const struct nlattr *attr,
908 const struct nlattr *actions, int actions_len,
909 uint32_t cutlen)
910{
911 struct dp_upcall_info upcall;
912 const struct nlattr *a;
913 int rem;
914
915 memset(&upcall, 0, sizeof(upcall));
916 upcall.cmd = OVS_PACKET_CMD_ACTION;
917 upcall.mru = OVS_CB(skb)->mru;
918
919 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
920 a = nla_next(a, &rem)) {
921 switch (nla_type(a)) {
922 case OVS_USERSPACE_ATTR_USERDATA:
923 upcall.userdata = a;
924 break;
925
926 case OVS_USERSPACE_ATTR_PID:
927 upcall.portid = nla_get_u32(a);
928 break;
929
930 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
931 /* Get out tunnel info. */
932 struct vport *vport;
933
934 vport = ovs_vport_rcu(dp, nla_get_u32(a));
935 if (vport) {
936 int err;
937
938 err = dev_fill_metadata_dst(vport->dev, skb);
939 if (!err)
940 upcall.egress_tun_info = skb_tunnel_info(skb);
941 }
942
943 break;
944 }
945
946 case OVS_USERSPACE_ATTR_ACTIONS: {
947 /* Include actions. */
948 upcall.actions = actions;
949 upcall.actions_len = actions_len;
950 break;
951 }
952
953 } /* End of switch. */
954 }
955
956 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
957}
958
959static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
960 struct sw_flow_key *key,
961 const struct nlattr *attr)
962{
963 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
964 struct nlattr *actions = nla_data(attr);
965
966 if (nla_len(actions))
967 return clone_execute(dp, skb, key, 0, nla_data(actions),
968 nla_len(actions), true, false);
969
970 consume_skb(skb);
971 return 0;
972}
973
974/* When 'last' is true, sample() should always consume the 'skb'.
975 * Otherwise, sample() should keep 'skb' intact regardless what
976 * actions are executed within sample().
977 */
978static int sample(struct datapath *dp, struct sk_buff *skb,
979 struct sw_flow_key *key, const struct nlattr *attr,
980 bool last)
981{
982 struct nlattr *actions;
983 struct nlattr *sample_arg;
984 int rem = nla_len(attr);
985 const struct sample_arg *arg;
986 bool clone_flow_key;
987
988 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
989 sample_arg = nla_data(attr);
990 arg = nla_data(sample_arg);
991 actions = nla_next(sample_arg, &rem);
992
993 if ((arg->probability != U32_MAX) &&
994 (!arg->probability || prandom_u32() > arg->probability)) {
995 if (last)
996 consume_skb(skb);
997 return 0;
998 }
999
1000 clone_flow_key = !arg->exec;
1001 return clone_execute(dp, skb, key, 0, actions, rem, last,
1002 clone_flow_key);
1003}
1004
1005/* When 'last' is true, clone() should always consume the 'skb'.
1006 * Otherwise, clone() should keep 'skb' intact regardless what
1007 * actions are executed within clone().
1008 */
1009static int clone(struct datapath *dp, struct sk_buff *skb,
1010 struct sw_flow_key *key, const struct nlattr *attr,
1011 bool last)
1012{
1013 struct nlattr *actions;
1014 struct nlattr *clone_arg;
1015 int rem = nla_len(attr);
1016 bool dont_clone_flow_key;
1017
1018 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1019 clone_arg = nla_data(attr);
1020 dont_clone_flow_key = nla_get_u32(clone_arg);
1021 actions = nla_next(clone_arg, &rem);
1022
1023 return clone_execute(dp, skb, key, 0, actions, rem, last,
1024 !dont_clone_flow_key);
1025}
1026
1027static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1028 const struct nlattr *attr)
1029{
1030 struct ovs_action_hash *hash_act = nla_data(attr);
1031 u32 hash = 0;
1032
1033 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1034 hash = skb_get_hash(skb);
1035 hash = jhash_1word(hash, hash_act->hash_basis);
1036 if (!hash)
1037 hash = 0x1;
1038
1039 key->ovs_flow_hash = hash;
1040}
1041
1042static int execute_set_action(struct sk_buff *skb,
1043 struct sw_flow_key *flow_key,
1044 const struct nlattr *a)
1045{
1046 /* Only tunnel set execution is supported without a mask. */
1047 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1048 struct ovs_tunnel_info *tun = nla_data(a);
1049
1050 skb_dst_drop(skb);
1051 dst_hold((struct dst_entry *)tun->tun_dst);
1052 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1053 return 0;
1054 }
1055
1056 return -EINVAL;
1057}
1058
1059/* Mask is at the midpoint of the data. */
1060#define get_mask(a, type) ((const type)nla_data(a) + 1)
1061
1062static int execute_masked_set_action(struct sk_buff *skb,
1063 struct sw_flow_key *flow_key,
1064 const struct nlattr *a)
1065{
1066 int err = 0;
1067
1068 switch (nla_type(a)) {
1069 case OVS_KEY_ATTR_PRIORITY:
1070 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1071 *get_mask(a, u32 *));
1072 flow_key->phy.priority = skb->priority;
1073 break;
1074
1075 case OVS_KEY_ATTR_SKB_MARK:
1076 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1077 flow_key->phy.skb_mark = skb->mark;
1078 break;
1079
1080 case OVS_KEY_ATTR_TUNNEL_INFO:
1081 /* Masked data not supported for tunnel. */
1082 err = -EINVAL;
1083 break;
1084
1085 case OVS_KEY_ATTR_ETHERNET:
1086 err = set_eth_addr(skb, flow_key, nla_data(a),
1087 get_mask(a, struct ovs_key_ethernet *));
1088 break;
1089
1090 case OVS_KEY_ATTR_NSH:
1091 err = set_nsh(skb, flow_key, a);
1092 break;
1093
1094 case OVS_KEY_ATTR_IPV4:
1095 err = set_ipv4(skb, flow_key, nla_data(a),
1096 get_mask(a, struct ovs_key_ipv4 *));
1097 break;
1098
1099 case OVS_KEY_ATTR_IPV6:
1100 err = set_ipv6(skb, flow_key, nla_data(a),
1101 get_mask(a, struct ovs_key_ipv6 *));
1102 break;
1103
1104 case OVS_KEY_ATTR_TCP:
1105 err = set_tcp(skb, flow_key, nla_data(a),
1106 get_mask(a, struct ovs_key_tcp *));
1107 break;
1108
1109 case OVS_KEY_ATTR_UDP:
1110 err = set_udp(skb, flow_key, nla_data(a),
1111 get_mask(a, struct ovs_key_udp *));
1112 break;
1113
1114 case OVS_KEY_ATTR_SCTP:
1115 err = set_sctp(skb, flow_key, nla_data(a),
1116 get_mask(a, struct ovs_key_sctp *));
1117 break;
1118
1119 case OVS_KEY_ATTR_MPLS:
1120 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1121 __be32 *));
1122 break;
1123
1124 case OVS_KEY_ATTR_CT_STATE:
1125 case OVS_KEY_ATTR_CT_ZONE:
1126 case OVS_KEY_ATTR_CT_MARK:
1127 case OVS_KEY_ATTR_CT_LABELS:
1128 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1129 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1130 err = -EINVAL;
1131 break;
1132 }
1133
1134 return err;
1135}
1136
1137static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1138 struct sw_flow_key *key,
1139 const struct nlattr *a, bool last)
1140{
1141 u32 recirc_id;
1142
1143 if (!is_flow_key_valid(key)) {
1144 int err;
1145
1146 err = ovs_flow_key_update(skb, key);
1147 if (err)
1148 return err;
1149 }
1150 BUG_ON(!is_flow_key_valid(key));
1151
1152 recirc_id = nla_get_u32(a);
1153 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1154}
1155
1156static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1157 struct sw_flow_key *key,
1158 const struct nlattr *attr, bool last)
1159{
1160 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1161 const struct nlattr *actions, *cpl_arg;
1162 int len, max_len, rem = nla_len(attr);
1163 const struct check_pkt_len_arg *arg;
1164 bool clone_flow_key;
1165
1166 /* The first netlink attribute in 'attr' is always
1167 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1168 */
1169 cpl_arg = nla_data(attr);
1170 arg = nla_data(cpl_arg);
1171
1172 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1173 max_len = arg->pkt_len;
1174
1175 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1176 len <= max_len) {
1177 /* Second netlink attribute in 'attr' is always
1178 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1179 */
1180 actions = nla_next(cpl_arg, &rem);
1181 clone_flow_key = !arg->exec_for_lesser_equal;
1182 } else {
1183 /* Third netlink attribute in 'attr' is always
1184 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1185 */
1186 actions = nla_next(cpl_arg, &rem);
1187 actions = nla_next(actions, &rem);
1188 clone_flow_key = !arg->exec_for_greater;
1189 }
1190
1191 return clone_execute(dp, skb, key, 0, nla_data(actions),
1192 nla_len(actions), last, clone_flow_key);
1193}
1194
1195static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1196{
1197 int err;
1198
1199 if (skb->protocol == htons(ETH_P_IPV6)) {
1200 struct ipv6hdr *nh;
1201
1202 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1203 sizeof(*nh));
1204 if (unlikely(err))
1205 return err;
1206
1207 nh = ipv6_hdr(skb);
1208
1209 if (nh->hop_limit <= 1)
1210 return -EHOSTUNREACH;
1211
1212 key->ip.ttl = --nh->hop_limit;
1213 } else if (skb->protocol == htons(ETH_P_IP)) {
1214 struct iphdr *nh;
1215 u8 old_ttl;
1216
1217 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1218 sizeof(*nh));
1219 if (unlikely(err))
1220 return err;
1221
1222 nh = ip_hdr(skb);
1223 if (nh->ttl <= 1)
1224 return -EHOSTUNREACH;
1225
1226 old_ttl = nh->ttl--;
1227 csum_replace2(&nh->check, htons(old_ttl << 8),
1228 htons(nh->ttl << 8));
1229 key->ip.ttl = nh->ttl;
1230 }
1231 return 0;
1232}
1233
1234/* Execute a list of actions against 'skb'. */
1235static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1236 struct sw_flow_key *key,
1237 const struct nlattr *attr, int len)
1238{
1239 const struct nlattr *a;
1240 int rem;
1241
1242 for (a = attr, rem = len; rem > 0;
1243 a = nla_next(a, &rem)) {
1244 int err = 0;
1245
1246 if (trace_ovs_do_execute_action_enabled())
1247 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1248
1249 switch (nla_type(a)) {
1250 case OVS_ACTION_ATTR_OUTPUT: {
1251 int port = nla_get_u32(a);
1252 struct sk_buff *clone;
1253
1254 /* Every output action needs a separate clone
1255 * of 'skb', In case the output action is the
1256 * last action, cloning can be avoided.
1257 */
1258 if (nla_is_last(a, rem)) {
1259 do_output(dp, skb, port, key);
1260 /* 'skb' has been used for output.
1261 */
1262 return 0;
1263 }
1264
1265 clone = skb_clone(skb, GFP_ATOMIC);
1266 if (clone)
1267 do_output(dp, clone, port, key);
1268 OVS_CB(skb)->cutlen = 0;
1269 break;
1270 }
1271
1272 case OVS_ACTION_ATTR_TRUNC: {
1273 struct ovs_action_trunc *trunc = nla_data(a);
1274
1275 if (skb->len > trunc->max_len)
1276 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1277 break;
1278 }
1279
1280 case OVS_ACTION_ATTR_USERSPACE:
1281 output_userspace(dp, skb, key, a, attr,
1282 len, OVS_CB(skb)->cutlen);
1283 OVS_CB(skb)->cutlen = 0;
1284 break;
1285
1286 case OVS_ACTION_ATTR_HASH:
1287 execute_hash(skb, key, a);
1288 break;
1289
1290 case OVS_ACTION_ATTR_PUSH_MPLS: {
1291 struct ovs_action_push_mpls *mpls = nla_data(a);
1292
1293 err = push_mpls(skb, key, mpls->mpls_lse,
1294 mpls->mpls_ethertype, skb->mac_len);
1295 break;
1296 }
1297 case OVS_ACTION_ATTR_ADD_MPLS: {
1298 struct ovs_action_add_mpls *mpls = nla_data(a);
1299 __u16 mac_len = 0;
1300
1301 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1302 mac_len = skb->mac_len;
1303
1304 err = push_mpls(skb, key, mpls->mpls_lse,
1305 mpls->mpls_ethertype, mac_len);
1306 break;
1307 }
1308 case OVS_ACTION_ATTR_POP_MPLS:
1309 err = pop_mpls(skb, key, nla_get_be16(a));
1310 break;
1311
1312 case OVS_ACTION_ATTR_PUSH_VLAN:
1313 err = push_vlan(skb, key, nla_data(a));
1314 break;
1315
1316 case OVS_ACTION_ATTR_POP_VLAN:
1317 err = pop_vlan(skb, key);
1318 break;
1319
1320 case OVS_ACTION_ATTR_RECIRC: {
1321 bool last = nla_is_last(a, rem);
1322
1323 err = execute_recirc(dp, skb, key, a, last);
1324 if (last) {
1325 /* If this is the last action, the skb has
1326 * been consumed or freed.
1327 * Return immediately.
1328 */
1329 return err;
1330 }
1331 break;
1332 }
1333
1334 case OVS_ACTION_ATTR_SET:
1335 err = execute_set_action(skb, key, nla_data(a));
1336 break;
1337
1338 case OVS_ACTION_ATTR_SET_MASKED:
1339 case OVS_ACTION_ATTR_SET_TO_MASKED:
1340 err = execute_masked_set_action(skb, key, nla_data(a));
1341 break;
1342
1343 case OVS_ACTION_ATTR_SAMPLE: {
1344 bool last = nla_is_last(a, rem);
1345
1346 err = sample(dp, skb, key, a, last);
1347 if (last)
1348 return err;
1349
1350 break;
1351 }
1352
1353 case OVS_ACTION_ATTR_CT:
1354 if (!is_flow_key_valid(key)) {
1355 err = ovs_flow_key_update(skb, key);
1356 if (err)
1357 return err;
1358 }
1359
1360 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1361 nla_data(a));
1362
1363 /* Hide stolen IP fragments from user space. */
1364 if (err)
1365 return err == -EINPROGRESS ? 0 : err;
1366 break;
1367
1368 case OVS_ACTION_ATTR_CT_CLEAR:
1369 err = ovs_ct_clear(skb, key);
1370 break;
1371
1372 case OVS_ACTION_ATTR_PUSH_ETH:
1373 err = push_eth(skb, key, nla_data(a));
1374 break;
1375
1376 case OVS_ACTION_ATTR_POP_ETH:
1377 err = pop_eth(skb, key);
1378 break;
1379
1380 case OVS_ACTION_ATTR_PUSH_NSH: {
1381 u8 buffer[NSH_HDR_MAX_LEN];
1382 struct nshhdr *nh = (struct nshhdr *)buffer;
1383
1384 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1385 NSH_HDR_MAX_LEN);
1386 if (unlikely(err))
1387 break;
1388 err = push_nsh(skb, key, nh);
1389 break;
1390 }
1391
1392 case OVS_ACTION_ATTR_POP_NSH:
1393 err = pop_nsh(skb, key);
1394 break;
1395
1396 case OVS_ACTION_ATTR_METER:
1397 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1398 consume_skb(skb);
1399 return 0;
1400 }
1401 break;
1402
1403 case OVS_ACTION_ATTR_CLONE: {
1404 bool last = nla_is_last(a, rem);
1405
1406 err = clone(dp, skb, key, a, last);
1407 if (last)
1408 return err;
1409
1410 break;
1411 }
1412
1413 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1414 bool last = nla_is_last(a, rem);
1415
1416 err = execute_check_pkt_len(dp, skb, key, a, last);
1417 if (last)
1418 return err;
1419
1420 break;
1421 }
1422
1423 case OVS_ACTION_ATTR_DEC_TTL:
1424 err = execute_dec_ttl(skb, key);
1425 if (err == -EHOSTUNREACH)
1426 return dec_ttl_exception_handler(dp, skb,
1427 key, a);
1428 break;
1429 }
1430
1431 if (unlikely(err)) {
1432 kfree_skb(skb);
1433 return err;
1434 }
1435 }
1436
1437 consume_skb(skb);
1438 return 0;
1439}
1440
1441/* Execute the actions on the clone of the packet. The effect of the
1442 * execution does not affect the original 'skb' nor the original 'key'.
1443 *
1444 * The execution may be deferred in case the actions can not be executed
1445 * immediately.
1446 */
1447static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1448 struct sw_flow_key *key, u32 recirc_id,
1449 const struct nlattr *actions, int len,
1450 bool last, bool clone_flow_key)
1451{
1452 struct deferred_action *da;
1453 struct sw_flow_key *clone;
1454
1455 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1456 if (!skb) {
1457 /* Out of memory, skip this action.
1458 */
1459 return 0;
1460 }
1461
1462 /* When clone_flow_key is false, the 'key' will not be change
1463 * by the actions, then the 'key' can be used directly.
1464 * Otherwise, try to clone key from the next recursion level of
1465 * 'flow_keys'. If clone is successful, execute the actions
1466 * without deferring.
1467 */
1468 clone = clone_flow_key ? clone_key(key) : key;
1469 if (clone) {
1470 int err = 0;
1471
1472 if (actions) { /* Sample action */
1473 if (clone_flow_key)
1474 __this_cpu_inc(exec_actions_level);
1475
1476 err = do_execute_actions(dp, skb, clone,
1477 actions, len);
1478
1479 if (clone_flow_key)
1480 __this_cpu_dec(exec_actions_level);
1481 } else { /* Recirc action */
1482 clone->recirc_id = recirc_id;
1483 ovs_dp_process_packet(skb, clone);
1484 }
1485 return err;
1486 }
1487
1488 /* Out of 'flow_keys' space. Defer actions */
1489 da = add_deferred_actions(skb, key, actions, len);
1490 if (da) {
1491 if (!actions) { /* Recirc action */
1492 key = &da->pkt_key;
1493 key->recirc_id = recirc_id;
1494 }
1495 } else {
1496 /* Out of per CPU action FIFO space. Drop the 'skb' and
1497 * log an error.
1498 */
1499 kfree_skb(skb);
1500
1501 if (net_ratelimit()) {
1502 if (actions) { /* Sample action */
1503 pr_warn("%s: deferred action limit reached, drop sample action\n",
1504 ovs_dp_name(dp));
1505 } else { /* Recirc action */
1506 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1507 ovs_dp_name(dp));
1508 }
1509 }
1510 }
1511 return 0;
1512}
1513
1514static void process_deferred_actions(struct datapath *dp)
1515{
1516 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1517
1518 /* Do not touch the FIFO in case there is no deferred actions. */
1519 if (action_fifo_is_empty(fifo))
1520 return;
1521
1522 /* Finishing executing all deferred actions. */
1523 do {
1524 struct deferred_action *da = action_fifo_get(fifo);
1525 struct sk_buff *skb = da->skb;
1526 struct sw_flow_key *key = &da->pkt_key;
1527 const struct nlattr *actions = da->actions;
1528 int actions_len = da->actions_len;
1529
1530 if (actions)
1531 do_execute_actions(dp, skb, key, actions, actions_len);
1532 else
1533 ovs_dp_process_packet(skb, key);
1534 } while (!action_fifo_is_empty(fifo));
1535
1536 /* Reset FIFO for the next packet. */
1537 action_fifo_init(fifo);
1538}
1539
1540/* Execute a list of actions against 'skb'. */
1541int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1542 const struct sw_flow_actions *acts,
1543 struct sw_flow_key *key)
1544{
1545 int err, level;
1546
1547 level = __this_cpu_inc_return(exec_actions_level);
1548 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1549 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1550 ovs_dp_name(dp));
1551 kfree_skb(skb);
1552 err = -ENETDOWN;
1553 goto out;
1554 }
1555
1556 OVS_CB(skb)->acts_origlen = acts->orig_len;
1557 err = do_execute_actions(dp, skb, key,
1558 acts->actions, acts->actions_len);
1559
1560 if (level == 1)
1561 process_deferred_actions(dp);
1562
1563out:
1564 __this_cpu_dec(exec_actions_level);
1565 return err;
1566}
1567
1568int action_fifos_init(void)
1569{
1570 action_fifos = alloc_percpu(struct action_fifo);
1571 if (!action_fifos)
1572 return -ENOMEM;
1573
1574 flow_keys = alloc_percpu(struct action_flow_keys);
1575 if (!flow_keys) {
1576 free_percpu(action_fifos);
1577 return -ENOMEM;
1578 }
1579
1580 return 0;
1581}
1582
1583void action_fifos_exit(void)
1584{
1585 free_percpu(action_fifos);
1586 free_percpu(flow_keys);
1587}
1/*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
25#include <linux/netfilter_ipv6.h>
26#include <linux/sctp.h>
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
32
33#include <net/dst.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/ip6_fib.h>
37#include <net/checksum.h>
38#include <net/dsfield.h>
39#include <net/mpls.h>
40#include <net/sctp/checksum.h>
41
42#include "datapath.h"
43#include "flow.h"
44#include "conntrack.h"
45#include "vport.h"
46
47static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 struct sw_flow_key *key,
49 const struct nlattr *attr, int len);
50
51struct deferred_action {
52 struct sk_buff *skb;
53 const struct nlattr *actions;
54
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
57};
58
59#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60struct ovs_frag_data {
61 unsigned long dst;
62 struct vport *vport;
63 struct ovs_skb_cb cb;
64 __be16 inner_protocol;
65 __u16 vlan_tci;
66 __be16 vlan_proto;
67 unsigned int l2_len;
68 u8 l2_data[MAX_L2_LEN];
69};
70
71static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
73#define DEFERRED_ACTION_FIFO_SIZE 10
74struct action_fifo {
75 int head;
76 int tail;
77 /* Deferred action fifo queue storage. */
78 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
79};
80
81static struct action_fifo __percpu *action_fifos;
82static DEFINE_PER_CPU(int, exec_actions_level);
83
84static void action_fifo_init(struct action_fifo *fifo)
85{
86 fifo->head = 0;
87 fifo->tail = 0;
88}
89
90static bool action_fifo_is_empty(const struct action_fifo *fifo)
91{
92 return (fifo->head == fifo->tail);
93}
94
95static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
96{
97 if (action_fifo_is_empty(fifo))
98 return NULL;
99
100 return &fifo->fifo[fifo->tail++];
101}
102
103static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
104{
105 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
106 return NULL;
107
108 return &fifo->fifo[fifo->head++];
109}
110
111/* Return true if fifo is not full */
112static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
113 const struct sw_flow_key *key,
114 const struct nlattr *attr)
115{
116 struct action_fifo *fifo;
117 struct deferred_action *da;
118
119 fifo = this_cpu_ptr(action_fifos);
120 da = action_fifo_put(fifo);
121 if (da) {
122 da->skb = skb;
123 da->actions = attr;
124 da->pkt_key = *key;
125 }
126
127 return da;
128}
129
130static void invalidate_flow_key(struct sw_flow_key *key)
131{
132 key->eth.type = htons(0);
133}
134
135static bool is_flow_key_valid(const struct sw_flow_key *key)
136{
137 return !!key->eth.type;
138}
139
140static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
141 const struct ovs_action_push_mpls *mpls)
142{
143 __be32 *new_mpls_lse;
144 struct ethhdr *hdr;
145
146 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
147 if (skb->encapsulation)
148 return -ENOTSUPP;
149
150 if (skb_cow_head(skb, MPLS_HLEN) < 0)
151 return -ENOMEM;
152
153 skb_push(skb, MPLS_HLEN);
154 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
155 skb->mac_len);
156 skb_reset_mac_header(skb);
157
158 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
159 *new_mpls_lse = mpls->mpls_lse;
160
161 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
162
163 hdr = eth_hdr(skb);
164 hdr->h_proto = mpls->mpls_ethertype;
165
166 if (!skb->inner_protocol)
167 skb_set_inner_protocol(skb, skb->protocol);
168 skb->protocol = mpls->mpls_ethertype;
169
170 invalidate_flow_key(key);
171 return 0;
172}
173
174static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
175 const __be16 ethertype)
176{
177 struct ethhdr *hdr;
178 int err;
179
180 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
181 if (unlikely(err))
182 return err;
183
184 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
185
186 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
187 skb->mac_len);
188
189 __skb_pull(skb, MPLS_HLEN);
190 skb_reset_mac_header(skb);
191
192 /* skb_mpls_header() is used to locate the ethertype
193 * field correctly in the presence of VLAN tags.
194 */
195 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
196 hdr->h_proto = ethertype;
197 if (eth_p_mpls(skb->protocol))
198 skb->protocol = ethertype;
199
200 invalidate_flow_key(key);
201 return 0;
202}
203
204static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
205 const __be32 *mpls_lse, const __be32 *mask)
206{
207 __be32 *stack;
208 __be32 lse;
209 int err;
210
211 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
212 if (unlikely(err))
213 return err;
214
215 stack = (__be32 *)skb_mpls_header(skb);
216 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
217 if (skb->ip_summed == CHECKSUM_COMPLETE) {
218 __be32 diff[] = { ~(*stack), lse };
219
220 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
221 ~skb->csum);
222 }
223
224 *stack = lse;
225 flow_key->mpls.top_lse = lse;
226 return 0;
227}
228
229static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
230{
231 int err;
232
233 err = skb_vlan_pop(skb);
234 if (skb_vlan_tag_present(skb))
235 invalidate_flow_key(key);
236 else
237 key->eth.tci = 0;
238 return err;
239}
240
241static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
242 const struct ovs_action_push_vlan *vlan)
243{
244 if (skb_vlan_tag_present(skb))
245 invalidate_flow_key(key);
246 else
247 key->eth.tci = vlan->vlan_tci;
248 return skb_vlan_push(skb, vlan->vlan_tpid,
249 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
250}
251
252/* 'src' is already properly masked. */
253static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
254{
255 u16 *dst = (u16 *)dst_;
256 const u16 *src = (const u16 *)src_;
257 const u16 *mask = (const u16 *)mask_;
258
259 OVS_SET_MASKED(dst[0], src[0], mask[0]);
260 OVS_SET_MASKED(dst[1], src[1], mask[1]);
261 OVS_SET_MASKED(dst[2], src[2], mask[2]);
262}
263
264static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
265 const struct ovs_key_ethernet *key,
266 const struct ovs_key_ethernet *mask)
267{
268 int err;
269
270 err = skb_ensure_writable(skb, ETH_HLEN);
271 if (unlikely(err))
272 return err;
273
274 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
275
276 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
277 mask->eth_src);
278 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
279 mask->eth_dst);
280
281 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
282
283 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
284 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
285 return 0;
286}
287
288static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
289 __be32 addr, __be32 new_addr)
290{
291 int transport_len = skb->len - skb_transport_offset(skb);
292
293 if (nh->frag_off & htons(IP_OFFSET))
294 return;
295
296 if (nh->protocol == IPPROTO_TCP) {
297 if (likely(transport_len >= sizeof(struct tcphdr)))
298 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
299 addr, new_addr, true);
300 } else if (nh->protocol == IPPROTO_UDP) {
301 if (likely(transport_len >= sizeof(struct udphdr))) {
302 struct udphdr *uh = udp_hdr(skb);
303
304 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
305 inet_proto_csum_replace4(&uh->check, skb,
306 addr, new_addr, true);
307 if (!uh->check)
308 uh->check = CSUM_MANGLED_0;
309 }
310 }
311 }
312}
313
314static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
315 __be32 *addr, __be32 new_addr)
316{
317 update_ip_l4_checksum(skb, nh, *addr, new_addr);
318 csum_replace4(&nh->check, *addr, new_addr);
319 skb_clear_hash(skb);
320 *addr = new_addr;
321}
322
323static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
324 __be32 addr[4], const __be32 new_addr[4])
325{
326 int transport_len = skb->len - skb_transport_offset(skb);
327
328 if (l4_proto == NEXTHDR_TCP) {
329 if (likely(transport_len >= sizeof(struct tcphdr)))
330 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
331 addr, new_addr, true);
332 } else if (l4_proto == NEXTHDR_UDP) {
333 if (likely(transport_len >= sizeof(struct udphdr))) {
334 struct udphdr *uh = udp_hdr(skb);
335
336 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
337 inet_proto_csum_replace16(&uh->check, skb,
338 addr, new_addr, true);
339 if (!uh->check)
340 uh->check = CSUM_MANGLED_0;
341 }
342 }
343 } else if (l4_proto == NEXTHDR_ICMP) {
344 if (likely(transport_len >= sizeof(struct icmp6hdr)))
345 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
346 skb, addr, new_addr, true);
347 }
348}
349
350static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
351 const __be32 mask[4], __be32 masked[4])
352{
353 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
354 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
355 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
356 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
357}
358
359static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
360 __be32 addr[4], const __be32 new_addr[4],
361 bool recalculate_csum)
362{
363 if (recalculate_csum)
364 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
365
366 skb_clear_hash(skb);
367 memcpy(addr, new_addr, sizeof(__be32[4]));
368}
369
370static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
371{
372 /* Bits 21-24 are always unmasked, so this retains their values. */
373 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
374 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
375 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
376}
377
378static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
379 u8 mask)
380{
381 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
382
383 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
384 nh->ttl = new_ttl;
385}
386
387static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
388 const struct ovs_key_ipv4 *key,
389 const struct ovs_key_ipv4 *mask)
390{
391 struct iphdr *nh;
392 __be32 new_addr;
393 int err;
394
395 err = skb_ensure_writable(skb, skb_network_offset(skb) +
396 sizeof(struct iphdr));
397 if (unlikely(err))
398 return err;
399
400 nh = ip_hdr(skb);
401
402 /* Setting an IP addresses is typically only a side effect of
403 * matching on them in the current userspace implementation, so it
404 * makes sense to check if the value actually changed.
405 */
406 if (mask->ipv4_src) {
407 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
408
409 if (unlikely(new_addr != nh->saddr)) {
410 set_ip_addr(skb, nh, &nh->saddr, new_addr);
411 flow_key->ipv4.addr.src = new_addr;
412 }
413 }
414 if (mask->ipv4_dst) {
415 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
416
417 if (unlikely(new_addr != nh->daddr)) {
418 set_ip_addr(skb, nh, &nh->daddr, new_addr);
419 flow_key->ipv4.addr.dst = new_addr;
420 }
421 }
422 if (mask->ipv4_tos) {
423 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
424 flow_key->ip.tos = nh->tos;
425 }
426 if (mask->ipv4_ttl) {
427 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
428 flow_key->ip.ttl = nh->ttl;
429 }
430
431 return 0;
432}
433
434static bool is_ipv6_mask_nonzero(const __be32 addr[4])
435{
436 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
437}
438
439static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
440 const struct ovs_key_ipv6 *key,
441 const struct ovs_key_ipv6 *mask)
442{
443 struct ipv6hdr *nh;
444 int err;
445
446 err = skb_ensure_writable(skb, skb_network_offset(skb) +
447 sizeof(struct ipv6hdr));
448 if (unlikely(err))
449 return err;
450
451 nh = ipv6_hdr(skb);
452
453 /* Setting an IP addresses is typically only a side effect of
454 * matching on them in the current userspace implementation, so it
455 * makes sense to check if the value actually changed.
456 */
457 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
458 __be32 *saddr = (__be32 *)&nh->saddr;
459 __be32 masked[4];
460
461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
462
463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
464 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
465 true);
466 memcpy(&flow_key->ipv6.addr.src, masked,
467 sizeof(flow_key->ipv6.addr.src));
468 }
469 }
470 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
471 unsigned int offset = 0;
472 int flags = IP6_FH_F_SKIP_RH;
473 bool recalc_csum = true;
474 __be32 *daddr = (__be32 *)&nh->daddr;
475 __be32 masked[4];
476
477 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
478
479 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
480 if (ipv6_ext_hdr(nh->nexthdr))
481 recalc_csum = (ipv6_find_hdr(skb, &offset,
482 NEXTHDR_ROUTING,
483 NULL, &flags)
484 != NEXTHDR_ROUTING);
485
486 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
487 recalc_csum);
488 memcpy(&flow_key->ipv6.addr.dst, masked,
489 sizeof(flow_key->ipv6.addr.dst));
490 }
491 }
492 if (mask->ipv6_tclass) {
493 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
494 flow_key->ip.tos = ipv6_get_dsfield(nh);
495 }
496 if (mask->ipv6_label) {
497 set_ipv6_fl(nh, ntohl(key->ipv6_label),
498 ntohl(mask->ipv6_label));
499 flow_key->ipv6.label =
500 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
501 }
502 if (mask->ipv6_hlimit) {
503 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
504 mask->ipv6_hlimit);
505 flow_key->ip.ttl = nh->hop_limit;
506 }
507 return 0;
508}
509
510/* Must follow skb_ensure_writable() since that can move the skb data. */
511static void set_tp_port(struct sk_buff *skb, __be16 *port,
512 __be16 new_port, __sum16 *check)
513{
514 inet_proto_csum_replace2(check, skb, *port, new_port, false);
515 *port = new_port;
516}
517
518static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
519 const struct ovs_key_udp *key,
520 const struct ovs_key_udp *mask)
521{
522 struct udphdr *uh;
523 __be16 src, dst;
524 int err;
525
526 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
527 sizeof(struct udphdr));
528 if (unlikely(err))
529 return err;
530
531 uh = udp_hdr(skb);
532 /* Either of the masks is non-zero, so do not bother checking them. */
533 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
534 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
535
536 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
537 if (likely(src != uh->source)) {
538 set_tp_port(skb, &uh->source, src, &uh->check);
539 flow_key->tp.src = src;
540 }
541 if (likely(dst != uh->dest)) {
542 set_tp_port(skb, &uh->dest, dst, &uh->check);
543 flow_key->tp.dst = dst;
544 }
545
546 if (unlikely(!uh->check))
547 uh->check = CSUM_MANGLED_0;
548 } else {
549 uh->source = src;
550 uh->dest = dst;
551 flow_key->tp.src = src;
552 flow_key->tp.dst = dst;
553 }
554
555 skb_clear_hash(skb);
556
557 return 0;
558}
559
560static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
561 const struct ovs_key_tcp *key,
562 const struct ovs_key_tcp *mask)
563{
564 struct tcphdr *th;
565 __be16 src, dst;
566 int err;
567
568 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
569 sizeof(struct tcphdr));
570 if (unlikely(err))
571 return err;
572
573 th = tcp_hdr(skb);
574 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
575 if (likely(src != th->source)) {
576 set_tp_port(skb, &th->source, src, &th->check);
577 flow_key->tp.src = src;
578 }
579 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
580 if (likely(dst != th->dest)) {
581 set_tp_port(skb, &th->dest, dst, &th->check);
582 flow_key->tp.dst = dst;
583 }
584 skb_clear_hash(skb);
585
586 return 0;
587}
588
589static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
590 const struct ovs_key_sctp *key,
591 const struct ovs_key_sctp *mask)
592{
593 unsigned int sctphoff = skb_transport_offset(skb);
594 struct sctphdr *sh;
595 __le32 old_correct_csum, new_csum, old_csum;
596 int err;
597
598 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
599 if (unlikely(err))
600 return err;
601
602 sh = sctp_hdr(skb);
603 old_csum = sh->checksum;
604 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
605
606 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
607 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
608
609 new_csum = sctp_compute_cksum(skb, sctphoff);
610
611 /* Carry any checksum errors through. */
612 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
613
614 skb_clear_hash(skb);
615 flow_key->tp.src = sh->source;
616 flow_key->tp.dst = sh->dest;
617
618 return 0;
619}
620
621static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
622{
623 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
624 struct vport *vport = data->vport;
625
626 if (skb_cow_head(skb, data->l2_len) < 0) {
627 kfree_skb(skb);
628 return -ENOMEM;
629 }
630
631 __skb_dst_copy(skb, data->dst);
632 *OVS_CB(skb) = data->cb;
633 skb->inner_protocol = data->inner_protocol;
634 skb->vlan_tci = data->vlan_tci;
635 skb->vlan_proto = data->vlan_proto;
636
637 /* Reconstruct the MAC header. */
638 skb_push(skb, data->l2_len);
639 memcpy(skb->data, &data->l2_data, data->l2_len);
640 skb_postpush_rcsum(skb, skb->data, data->l2_len);
641 skb_reset_mac_header(skb);
642
643 ovs_vport_send(vport, skb);
644 return 0;
645}
646
647static unsigned int
648ovs_dst_get_mtu(const struct dst_entry *dst)
649{
650 return dst->dev->mtu;
651}
652
653static struct dst_ops ovs_dst_ops = {
654 .family = AF_UNSPEC,
655 .mtu = ovs_dst_get_mtu,
656};
657
658/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
659 * ovs_vport_output(), which is called once per fragmented packet.
660 */
661static void prepare_frag(struct vport *vport, struct sk_buff *skb)
662{
663 unsigned int hlen = skb_network_offset(skb);
664 struct ovs_frag_data *data;
665
666 data = this_cpu_ptr(&ovs_frag_data_storage);
667 data->dst = skb->_skb_refdst;
668 data->vport = vport;
669 data->cb = *OVS_CB(skb);
670 data->inner_protocol = skb->inner_protocol;
671 data->vlan_tci = skb->vlan_tci;
672 data->vlan_proto = skb->vlan_proto;
673 data->l2_len = hlen;
674 memcpy(&data->l2_data, skb->data, hlen);
675
676 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
677 skb_pull(skb, hlen);
678}
679
680static void ovs_fragment(struct net *net, struct vport *vport,
681 struct sk_buff *skb, u16 mru, __be16 ethertype)
682{
683 if (skb_network_offset(skb) > MAX_L2_LEN) {
684 OVS_NLERR(1, "L2 header too long to fragment");
685 goto err;
686 }
687
688 if (ethertype == htons(ETH_P_IP)) {
689 struct dst_entry ovs_dst;
690 unsigned long orig_dst;
691
692 prepare_frag(vport, skb);
693 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
694 DST_OBSOLETE_NONE, DST_NOCOUNT);
695 ovs_dst.dev = vport->dev;
696
697 orig_dst = skb->_skb_refdst;
698 skb_dst_set_noref(skb, &ovs_dst);
699 IPCB(skb)->frag_max_size = mru;
700
701 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
702 refdst_drop(orig_dst);
703 } else if (ethertype == htons(ETH_P_IPV6)) {
704 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
705 unsigned long orig_dst;
706 struct rt6_info ovs_rt;
707
708 if (!v6ops) {
709 goto err;
710 }
711
712 prepare_frag(vport, skb);
713 memset(&ovs_rt, 0, sizeof(ovs_rt));
714 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
715 DST_OBSOLETE_NONE, DST_NOCOUNT);
716 ovs_rt.dst.dev = vport->dev;
717
718 orig_dst = skb->_skb_refdst;
719 skb_dst_set_noref(skb, &ovs_rt.dst);
720 IP6CB(skb)->frag_max_size = mru;
721
722 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
723 refdst_drop(orig_dst);
724 } else {
725 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
726 ovs_vport_name(vport), ntohs(ethertype), mru,
727 vport->dev->mtu);
728 goto err;
729 }
730
731 return;
732err:
733 kfree_skb(skb);
734}
735
736static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
737 struct sw_flow_key *key)
738{
739 struct vport *vport = ovs_vport_rcu(dp, out_port);
740
741 if (likely(vport)) {
742 u16 mru = OVS_CB(skb)->mru;
743
744 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
745 ovs_vport_send(vport, skb);
746 } else if (mru <= vport->dev->mtu) {
747 struct net *net = read_pnet(&dp->net);
748 __be16 ethertype = key->eth.type;
749
750 if (!is_flow_key_valid(key)) {
751 if (eth_p_mpls(skb->protocol))
752 ethertype = skb->inner_protocol;
753 else
754 ethertype = vlan_get_protocol(skb);
755 }
756
757 ovs_fragment(net, vport, skb, mru, ethertype);
758 } else {
759 kfree_skb(skb);
760 }
761 } else {
762 kfree_skb(skb);
763 }
764}
765
766static int output_userspace(struct datapath *dp, struct sk_buff *skb,
767 struct sw_flow_key *key, const struct nlattr *attr,
768 const struct nlattr *actions, int actions_len)
769{
770 struct dp_upcall_info upcall;
771 const struct nlattr *a;
772 int rem;
773
774 memset(&upcall, 0, sizeof(upcall));
775 upcall.cmd = OVS_PACKET_CMD_ACTION;
776 upcall.mru = OVS_CB(skb)->mru;
777
778 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
779 a = nla_next(a, &rem)) {
780 switch (nla_type(a)) {
781 case OVS_USERSPACE_ATTR_USERDATA:
782 upcall.userdata = a;
783 break;
784
785 case OVS_USERSPACE_ATTR_PID:
786 upcall.portid = nla_get_u32(a);
787 break;
788
789 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
790 /* Get out tunnel info. */
791 struct vport *vport;
792
793 vport = ovs_vport_rcu(dp, nla_get_u32(a));
794 if (vport) {
795 int err;
796
797 err = dev_fill_metadata_dst(vport->dev, skb);
798 if (!err)
799 upcall.egress_tun_info = skb_tunnel_info(skb);
800 }
801
802 break;
803 }
804
805 case OVS_USERSPACE_ATTR_ACTIONS: {
806 /* Include actions. */
807 upcall.actions = actions;
808 upcall.actions_len = actions_len;
809 break;
810 }
811
812 } /* End of switch. */
813 }
814
815 return ovs_dp_upcall(dp, skb, key, &upcall);
816}
817
818static int sample(struct datapath *dp, struct sk_buff *skb,
819 struct sw_flow_key *key, const struct nlattr *attr,
820 const struct nlattr *actions, int actions_len)
821{
822 const struct nlattr *acts_list = NULL;
823 const struct nlattr *a;
824 int rem;
825
826 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
827 a = nla_next(a, &rem)) {
828 u32 probability;
829
830 switch (nla_type(a)) {
831 case OVS_SAMPLE_ATTR_PROBABILITY:
832 probability = nla_get_u32(a);
833 if (!probability || prandom_u32() > probability)
834 return 0;
835 break;
836
837 case OVS_SAMPLE_ATTR_ACTIONS:
838 acts_list = a;
839 break;
840 }
841 }
842
843 rem = nla_len(acts_list);
844 a = nla_data(acts_list);
845
846 /* Actions list is empty, do nothing */
847 if (unlikely(!rem))
848 return 0;
849
850 /* The only known usage of sample action is having a single user-space
851 * action. Treat this usage as a special case.
852 * The output_userspace() should clone the skb to be sent to the
853 * user space. This skb will be consumed by its caller.
854 */
855 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
856 nla_is_last(a, rem)))
857 return output_userspace(dp, skb, key, a, actions, actions_len);
858
859 skb = skb_clone(skb, GFP_ATOMIC);
860 if (!skb)
861 /* Skip the sample action when out of memory. */
862 return 0;
863
864 if (!add_deferred_actions(skb, key, a)) {
865 if (net_ratelimit())
866 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
867 ovs_dp_name(dp));
868
869 kfree_skb(skb);
870 }
871 return 0;
872}
873
874static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
875 const struct nlattr *attr)
876{
877 struct ovs_action_hash *hash_act = nla_data(attr);
878 u32 hash = 0;
879
880 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
881 hash = skb_get_hash(skb);
882 hash = jhash_1word(hash, hash_act->hash_basis);
883 if (!hash)
884 hash = 0x1;
885
886 key->ovs_flow_hash = hash;
887}
888
889static int execute_set_action(struct sk_buff *skb,
890 struct sw_flow_key *flow_key,
891 const struct nlattr *a)
892{
893 /* Only tunnel set execution is supported without a mask. */
894 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
895 struct ovs_tunnel_info *tun = nla_data(a);
896
897 skb_dst_drop(skb);
898 dst_hold((struct dst_entry *)tun->tun_dst);
899 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
900 return 0;
901 }
902
903 return -EINVAL;
904}
905
906/* Mask is at the midpoint of the data. */
907#define get_mask(a, type) ((const type)nla_data(a) + 1)
908
909static int execute_masked_set_action(struct sk_buff *skb,
910 struct sw_flow_key *flow_key,
911 const struct nlattr *a)
912{
913 int err = 0;
914
915 switch (nla_type(a)) {
916 case OVS_KEY_ATTR_PRIORITY:
917 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
918 *get_mask(a, u32 *));
919 flow_key->phy.priority = skb->priority;
920 break;
921
922 case OVS_KEY_ATTR_SKB_MARK:
923 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
924 flow_key->phy.skb_mark = skb->mark;
925 break;
926
927 case OVS_KEY_ATTR_TUNNEL_INFO:
928 /* Masked data not supported for tunnel. */
929 err = -EINVAL;
930 break;
931
932 case OVS_KEY_ATTR_ETHERNET:
933 err = set_eth_addr(skb, flow_key, nla_data(a),
934 get_mask(a, struct ovs_key_ethernet *));
935 break;
936
937 case OVS_KEY_ATTR_IPV4:
938 err = set_ipv4(skb, flow_key, nla_data(a),
939 get_mask(a, struct ovs_key_ipv4 *));
940 break;
941
942 case OVS_KEY_ATTR_IPV6:
943 err = set_ipv6(skb, flow_key, nla_data(a),
944 get_mask(a, struct ovs_key_ipv6 *));
945 break;
946
947 case OVS_KEY_ATTR_TCP:
948 err = set_tcp(skb, flow_key, nla_data(a),
949 get_mask(a, struct ovs_key_tcp *));
950 break;
951
952 case OVS_KEY_ATTR_UDP:
953 err = set_udp(skb, flow_key, nla_data(a),
954 get_mask(a, struct ovs_key_udp *));
955 break;
956
957 case OVS_KEY_ATTR_SCTP:
958 err = set_sctp(skb, flow_key, nla_data(a),
959 get_mask(a, struct ovs_key_sctp *));
960 break;
961
962 case OVS_KEY_ATTR_MPLS:
963 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
964 __be32 *));
965 break;
966
967 case OVS_KEY_ATTR_CT_STATE:
968 case OVS_KEY_ATTR_CT_ZONE:
969 case OVS_KEY_ATTR_CT_MARK:
970 case OVS_KEY_ATTR_CT_LABELS:
971 err = -EINVAL;
972 break;
973 }
974
975 return err;
976}
977
978static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
979 struct sw_flow_key *key,
980 const struct nlattr *a, int rem)
981{
982 struct deferred_action *da;
983
984 if (!is_flow_key_valid(key)) {
985 int err;
986
987 err = ovs_flow_key_update(skb, key);
988 if (err)
989 return err;
990 }
991 BUG_ON(!is_flow_key_valid(key));
992
993 if (!nla_is_last(a, rem)) {
994 /* Recirc action is the not the last action
995 * of the action list, need to clone the skb.
996 */
997 skb = skb_clone(skb, GFP_ATOMIC);
998
999 /* Skip the recirc action when out of memory, but
1000 * continue on with the rest of the action list.
1001 */
1002 if (!skb)
1003 return 0;
1004 }
1005
1006 da = add_deferred_actions(skb, key, NULL);
1007 if (da) {
1008 da->pkt_key.recirc_id = nla_get_u32(a);
1009 } else {
1010 kfree_skb(skb);
1011
1012 if (net_ratelimit())
1013 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1014 ovs_dp_name(dp));
1015 }
1016
1017 return 0;
1018}
1019
1020/* Execute a list of actions against 'skb'. */
1021static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1022 struct sw_flow_key *key,
1023 const struct nlattr *attr, int len)
1024{
1025 /* Every output action needs a separate clone of 'skb', but the common
1026 * case is just a single output action, so that doing a clone and
1027 * then freeing the original skbuff is wasteful. So the following code
1028 * is slightly obscure just to avoid that.
1029 */
1030 int prev_port = -1;
1031 const struct nlattr *a;
1032 int rem;
1033
1034 for (a = attr, rem = len; rem > 0;
1035 a = nla_next(a, &rem)) {
1036 int err = 0;
1037
1038 if (unlikely(prev_port != -1)) {
1039 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1040
1041 if (out_skb)
1042 do_output(dp, out_skb, prev_port, key);
1043
1044 prev_port = -1;
1045 }
1046
1047 switch (nla_type(a)) {
1048 case OVS_ACTION_ATTR_OUTPUT:
1049 prev_port = nla_get_u32(a);
1050 break;
1051
1052 case OVS_ACTION_ATTR_USERSPACE:
1053 output_userspace(dp, skb, key, a, attr, len);
1054 break;
1055
1056 case OVS_ACTION_ATTR_HASH:
1057 execute_hash(skb, key, a);
1058 break;
1059
1060 case OVS_ACTION_ATTR_PUSH_MPLS:
1061 err = push_mpls(skb, key, nla_data(a));
1062 break;
1063
1064 case OVS_ACTION_ATTR_POP_MPLS:
1065 err = pop_mpls(skb, key, nla_get_be16(a));
1066 break;
1067
1068 case OVS_ACTION_ATTR_PUSH_VLAN:
1069 err = push_vlan(skb, key, nla_data(a));
1070 break;
1071
1072 case OVS_ACTION_ATTR_POP_VLAN:
1073 err = pop_vlan(skb, key);
1074 break;
1075
1076 case OVS_ACTION_ATTR_RECIRC:
1077 err = execute_recirc(dp, skb, key, a, rem);
1078 if (nla_is_last(a, rem)) {
1079 /* If this is the last action, the skb has
1080 * been consumed or freed.
1081 * Return immediately.
1082 */
1083 return err;
1084 }
1085 break;
1086
1087 case OVS_ACTION_ATTR_SET:
1088 err = execute_set_action(skb, key, nla_data(a));
1089 break;
1090
1091 case OVS_ACTION_ATTR_SET_MASKED:
1092 case OVS_ACTION_ATTR_SET_TO_MASKED:
1093 err = execute_masked_set_action(skb, key, nla_data(a));
1094 break;
1095
1096 case OVS_ACTION_ATTR_SAMPLE:
1097 err = sample(dp, skb, key, a, attr, len);
1098 break;
1099
1100 case OVS_ACTION_ATTR_CT:
1101 if (!is_flow_key_valid(key)) {
1102 err = ovs_flow_key_update(skb, key);
1103 if (err)
1104 return err;
1105 }
1106
1107 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1108 nla_data(a));
1109
1110 /* Hide stolen IP fragments from user space. */
1111 if (err)
1112 return err == -EINPROGRESS ? 0 : err;
1113 break;
1114 }
1115
1116 if (unlikely(err)) {
1117 kfree_skb(skb);
1118 return err;
1119 }
1120 }
1121
1122 if (prev_port != -1)
1123 do_output(dp, skb, prev_port, key);
1124 else
1125 consume_skb(skb);
1126
1127 return 0;
1128}
1129
1130static void process_deferred_actions(struct datapath *dp)
1131{
1132 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1133
1134 /* Do not touch the FIFO in case there is no deferred actions. */
1135 if (action_fifo_is_empty(fifo))
1136 return;
1137
1138 /* Finishing executing all deferred actions. */
1139 do {
1140 struct deferred_action *da = action_fifo_get(fifo);
1141 struct sk_buff *skb = da->skb;
1142 struct sw_flow_key *key = &da->pkt_key;
1143 const struct nlattr *actions = da->actions;
1144
1145 if (actions)
1146 do_execute_actions(dp, skb, key, actions,
1147 nla_len(actions));
1148 else
1149 ovs_dp_process_packet(skb, key);
1150 } while (!action_fifo_is_empty(fifo));
1151
1152 /* Reset FIFO for the next packet. */
1153 action_fifo_init(fifo);
1154}
1155
1156/* Execute a list of actions against 'skb'. */
1157int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1158 const struct sw_flow_actions *acts,
1159 struct sw_flow_key *key)
1160{
1161 static const int ovs_recursion_limit = 5;
1162 int err, level;
1163
1164 level = __this_cpu_inc_return(exec_actions_level);
1165 if (unlikely(level > ovs_recursion_limit)) {
1166 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1167 ovs_dp_name(dp));
1168 kfree_skb(skb);
1169 err = -ENETDOWN;
1170 goto out;
1171 }
1172
1173 err = do_execute_actions(dp, skb, key,
1174 acts->actions, acts->actions_len);
1175
1176 if (level == 1)
1177 process_deferred_actions(dp);
1178
1179out:
1180 __this_cpu_dec(exec_actions_level);
1181 return err;
1182}
1183
1184int action_fifos_init(void)
1185{
1186 action_fifos = alloc_percpu(struct action_fifo);
1187 if (!action_fifos)
1188 return -ENOMEM;
1189
1190 return 0;
1191}
1192
1193void action_fifos_exit(void)
1194{
1195 free_percpu(action_fifos);
1196}