Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
12#include <linux/netfilter_ipv6.h>
13#include <linux/sctp.h>
14#include <linux/tcp.h>
15#include <linux/udp.h>
16#include <linux/in6.h>
17#include <linux/if_arp.h>
18#include <linux/if_vlan.h>
19
20#include <net/dst.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/ip6_fib.h>
24#include <net/checksum.h>
25#include <net/dsfield.h>
26#include <net/mpls.h>
27#include <net/sctp/checksum.h>
28
29#include "datapath.h"
30#include "flow.h"
31#include "conntrack.h"
32#include "vport.h"
33#include "flow_netlink.h"
34
35struct deferred_action {
36 struct sk_buff *skb;
37 const struct nlattr *actions;
38 int actions_len;
39
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key;
42};
43
44#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45struct ovs_frag_data {
46 unsigned long dst;
47 struct vport *vport;
48 struct ovs_skb_cb cb;
49 __be16 inner_protocol;
50 u16 network_offset; /* valid only for MPLS */
51 u16 vlan_tci;
52 __be16 vlan_proto;
53 unsigned int l2_len;
54 u8 mac_proto;
55 u8 l2_data[MAX_L2_LEN];
56};
57
58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
59
60#define DEFERRED_ACTION_FIFO_SIZE 10
61#define OVS_RECURSION_LIMIT 5
62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
63struct action_fifo {
64 int head;
65 int tail;
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
68};
69
70struct action_flow_keys {
71 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
72};
73
74static struct action_fifo __percpu *action_fifos;
75static struct action_flow_keys __percpu *flow_keys;
76static DEFINE_PER_CPU(int, exec_actions_level);
77
78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
80 */
81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
82{
83 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
84 int level = this_cpu_read(exec_actions_level);
85 struct sw_flow_key *key = NULL;
86
87 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
88 key = &keys->key[level - 1];
89 *key = *key_;
90 }
91
92 return key;
93}
94
95static void action_fifo_init(struct action_fifo *fifo)
96{
97 fifo->head = 0;
98 fifo->tail = 0;
99}
100
101static bool action_fifo_is_empty(const struct action_fifo *fifo)
102{
103 return (fifo->head == fifo->tail);
104}
105
106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
107{
108 if (action_fifo_is_empty(fifo))
109 return NULL;
110
111 return &fifo->fifo[fifo->tail++];
112}
113
114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
115{
116 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
117 return NULL;
118
119 return &fifo->fifo[fifo->head++];
120}
121
122/* Return true if fifo is not full */
123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
124 const struct sw_flow_key *key,
125 const struct nlattr *actions,
126 const int actions_len)
127{
128 struct action_fifo *fifo;
129 struct deferred_action *da;
130
131 fifo = this_cpu_ptr(action_fifos);
132 da = action_fifo_put(fifo);
133 if (da) {
134 da->skb = skb;
135 da->actions = actions;
136 da->actions_len = actions_len;
137 da->pkt_key = *key;
138 }
139
140 return da;
141}
142
143static void invalidate_flow_key(struct sw_flow_key *key)
144{
145 key->mac_proto |= SW_FLOW_KEY_INVALID;
146}
147
148static bool is_flow_key_valid(const struct sw_flow_key *key)
149{
150 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
151}
152
153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
154 struct sw_flow_key *key,
155 u32 recirc_id,
156 const struct nlattr *actions, int len,
157 bool last, bool clone_flow_key);
158
159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
160 struct sw_flow_key *key,
161 const struct nlattr *attr, int len);
162
163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
164 const struct ovs_action_push_mpls *mpls)
165{
166 int err;
167
168 err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
169 skb->mac_len);
170 if (err)
171 return err;
172
173 invalidate_flow_key(key);
174 return 0;
175}
176
177static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
178 const __be16 ethertype)
179{
180 int err;
181
182 err = skb_mpls_pop(skb, ethertype, skb->mac_len);
183 if (err)
184 return err;
185
186 invalidate_flow_key(key);
187 return 0;
188}
189
190static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
191 const __be32 *mpls_lse, const __be32 *mask)
192{
193 struct mpls_shim_hdr *stack;
194 __be32 lse;
195 int err;
196
197 stack = mpls_hdr(skb);
198 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
199 err = skb_mpls_update_lse(skb, lse);
200 if (err)
201 return err;
202
203 flow_key->mpls.top_lse = lse;
204 return 0;
205}
206
207static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
208{
209 int err;
210
211 err = skb_vlan_pop(skb);
212 if (skb_vlan_tag_present(skb)) {
213 invalidate_flow_key(key);
214 } else {
215 key->eth.vlan.tci = 0;
216 key->eth.vlan.tpid = 0;
217 }
218 return err;
219}
220
221static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
222 const struct ovs_action_push_vlan *vlan)
223{
224 if (skb_vlan_tag_present(skb)) {
225 invalidate_flow_key(key);
226 } else {
227 key->eth.vlan.tci = vlan->vlan_tci;
228 key->eth.vlan.tpid = vlan->vlan_tpid;
229 }
230 return skb_vlan_push(skb, vlan->vlan_tpid,
231 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
232}
233
234/* 'src' is already properly masked. */
235static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
236{
237 u16 *dst = (u16 *)dst_;
238 const u16 *src = (const u16 *)src_;
239 const u16 *mask = (const u16 *)mask_;
240
241 OVS_SET_MASKED(dst[0], src[0], mask[0]);
242 OVS_SET_MASKED(dst[1], src[1], mask[1]);
243 OVS_SET_MASKED(dst[2], src[2], mask[2]);
244}
245
246static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
247 const struct ovs_key_ethernet *key,
248 const struct ovs_key_ethernet *mask)
249{
250 int err;
251
252 err = skb_ensure_writable(skb, ETH_HLEN);
253 if (unlikely(err))
254 return err;
255
256 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
257
258 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
259 mask->eth_src);
260 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
261 mask->eth_dst);
262
263 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
264
265 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
266 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
267 return 0;
268}
269
270/* pop_eth does not support VLAN packets as this action is never called
271 * for them.
272 */
273static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
274{
275 skb_pull_rcsum(skb, ETH_HLEN);
276 skb_reset_mac_header(skb);
277 skb_reset_mac_len(skb);
278
279 /* safe right before invalidate_flow_key */
280 key->mac_proto = MAC_PROTO_NONE;
281 invalidate_flow_key(key);
282 return 0;
283}
284
285static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
286 const struct ovs_action_push_eth *ethh)
287{
288 struct ethhdr *hdr;
289
290 /* Add the new Ethernet header */
291 if (skb_cow_head(skb, ETH_HLEN) < 0)
292 return -ENOMEM;
293
294 skb_push(skb, ETH_HLEN);
295 skb_reset_mac_header(skb);
296 skb_reset_mac_len(skb);
297
298 hdr = eth_hdr(skb);
299 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
300 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
301 hdr->h_proto = skb->protocol;
302
303 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
304
305 /* safe right before invalidate_flow_key */
306 key->mac_proto = MAC_PROTO_ETHERNET;
307 invalidate_flow_key(key);
308 return 0;
309}
310
311static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
312 const struct nshhdr *nh)
313{
314 int err;
315
316 err = nsh_push(skb, nh);
317 if (err)
318 return err;
319
320 /* safe right before invalidate_flow_key */
321 key->mac_proto = MAC_PROTO_NONE;
322 invalidate_flow_key(key);
323 return 0;
324}
325
326static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
327{
328 int err;
329
330 err = nsh_pop(skb);
331 if (err)
332 return err;
333
334 /* safe right before invalidate_flow_key */
335 if (skb->protocol == htons(ETH_P_TEB))
336 key->mac_proto = MAC_PROTO_ETHERNET;
337 else
338 key->mac_proto = MAC_PROTO_NONE;
339 invalidate_flow_key(key);
340 return 0;
341}
342
343static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
344 __be32 addr, __be32 new_addr)
345{
346 int transport_len = skb->len - skb_transport_offset(skb);
347
348 if (nh->frag_off & htons(IP_OFFSET))
349 return;
350
351 if (nh->protocol == IPPROTO_TCP) {
352 if (likely(transport_len >= sizeof(struct tcphdr)))
353 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
354 addr, new_addr, true);
355 } else if (nh->protocol == IPPROTO_UDP) {
356 if (likely(transport_len >= sizeof(struct udphdr))) {
357 struct udphdr *uh = udp_hdr(skb);
358
359 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
360 inet_proto_csum_replace4(&uh->check, skb,
361 addr, new_addr, true);
362 if (!uh->check)
363 uh->check = CSUM_MANGLED_0;
364 }
365 }
366 }
367}
368
369static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
370 __be32 *addr, __be32 new_addr)
371{
372 update_ip_l4_checksum(skb, nh, *addr, new_addr);
373 csum_replace4(&nh->check, *addr, new_addr);
374 skb_clear_hash(skb);
375 *addr = new_addr;
376}
377
378static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
379 __be32 addr[4], const __be32 new_addr[4])
380{
381 int transport_len = skb->len - skb_transport_offset(skb);
382
383 if (l4_proto == NEXTHDR_TCP) {
384 if (likely(transport_len >= sizeof(struct tcphdr)))
385 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
386 addr, new_addr, true);
387 } else if (l4_proto == NEXTHDR_UDP) {
388 if (likely(transport_len >= sizeof(struct udphdr))) {
389 struct udphdr *uh = udp_hdr(skb);
390
391 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
392 inet_proto_csum_replace16(&uh->check, skb,
393 addr, new_addr, true);
394 if (!uh->check)
395 uh->check = CSUM_MANGLED_0;
396 }
397 }
398 } else if (l4_proto == NEXTHDR_ICMP) {
399 if (likely(transport_len >= sizeof(struct icmp6hdr)))
400 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
401 skb, addr, new_addr, true);
402 }
403}
404
405static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
406 const __be32 mask[4], __be32 masked[4])
407{
408 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
409 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
410 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
411 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
412}
413
414static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
415 __be32 addr[4], const __be32 new_addr[4],
416 bool recalculate_csum)
417{
418 if (recalculate_csum)
419 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
420
421 skb_clear_hash(skb);
422 memcpy(addr, new_addr, sizeof(__be32[4]));
423}
424
425static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
426{
427 /* Bits 21-24 are always unmasked, so this retains their values. */
428 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
429 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
430 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
431}
432
433static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
434 u8 mask)
435{
436 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
437
438 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
439 nh->ttl = new_ttl;
440}
441
442static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
443 const struct ovs_key_ipv4 *key,
444 const struct ovs_key_ipv4 *mask)
445{
446 struct iphdr *nh;
447 __be32 new_addr;
448 int err;
449
450 err = skb_ensure_writable(skb, skb_network_offset(skb) +
451 sizeof(struct iphdr));
452 if (unlikely(err))
453 return err;
454
455 nh = ip_hdr(skb);
456
457 /* Setting an IP addresses is typically only a side effect of
458 * matching on them in the current userspace implementation, so it
459 * makes sense to check if the value actually changed.
460 */
461 if (mask->ipv4_src) {
462 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
463
464 if (unlikely(new_addr != nh->saddr)) {
465 set_ip_addr(skb, nh, &nh->saddr, new_addr);
466 flow_key->ipv4.addr.src = new_addr;
467 }
468 }
469 if (mask->ipv4_dst) {
470 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
471
472 if (unlikely(new_addr != nh->daddr)) {
473 set_ip_addr(skb, nh, &nh->daddr, new_addr);
474 flow_key->ipv4.addr.dst = new_addr;
475 }
476 }
477 if (mask->ipv4_tos) {
478 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
479 flow_key->ip.tos = nh->tos;
480 }
481 if (mask->ipv4_ttl) {
482 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
483 flow_key->ip.ttl = nh->ttl;
484 }
485
486 return 0;
487}
488
489static bool is_ipv6_mask_nonzero(const __be32 addr[4])
490{
491 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
492}
493
494static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
495 const struct ovs_key_ipv6 *key,
496 const struct ovs_key_ipv6 *mask)
497{
498 struct ipv6hdr *nh;
499 int err;
500
501 err = skb_ensure_writable(skb, skb_network_offset(skb) +
502 sizeof(struct ipv6hdr));
503 if (unlikely(err))
504 return err;
505
506 nh = ipv6_hdr(skb);
507
508 /* Setting an IP addresses is typically only a side effect of
509 * matching on them in the current userspace implementation, so it
510 * makes sense to check if the value actually changed.
511 */
512 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
513 __be32 *saddr = (__be32 *)&nh->saddr;
514 __be32 masked[4];
515
516 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
517
518 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
519 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
520 true);
521 memcpy(&flow_key->ipv6.addr.src, masked,
522 sizeof(flow_key->ipv6.addr.src));
523 }
524 }
525 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
526 unsigned int offset = 0;
527 int flags = IP6_FH_F_SKIP_RH;
528 bool recalc_csum = true;
529 __be32 *daddr = (__be32 *)&nh->daddr;
530 __be32 masked[4];
531
532 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
533
534 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
535 if (ipv6_ext_hdr(nh->nexthdr))
536 recalc_csum = (ipv6_find_hdr(skb, &offset,
537 NEXTHDR_ROUTING,
538 NULL, &flags)
539 != NEXTHDR_ROUTING);
540
541 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
542 recalc_csum);
543 memcpy(&flow_key->ipv6.addr.dst, masked,
544 sizeof(flow_key->ipv6.addr.dst));
545 }
546 }
547 if (mask->ipv6_tclass) {
548 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
549 flow_key->ip.tos = ipv6_get_dsfield(nh);
550 }
551 if (mask->ipv6_label) {
552 set_ipv6_fl(nh, ntohl(key->ipv6_label),
553 ntohl(mask->ipv6_label));
554 flow_key->ipv6.label =
555 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
556 }
557 if (mask->ipv6_hlimit) {
558 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
559 mask->ipv6_hlimit);
560 flow_key->ip.ttl = nh->hop_limit;
561 }
562 return 0;
563}
564
565static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
566 const struct nlattr *a)
567{
568 struct nshhdr *nh;
569 size_t length;
570 int err;
571 u8 flags;
572 u8 ttl;
573 int i;
574
575 struct ovs_key_nsh key;
576 struct ovs_key_nsh mask;
577
578 err = nsh_key_from_nlattr(a, &key, &mask);
579 if (err)
580 return err;
581
582 /* Make sure the NSH base header is there */
583 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
584 return -ENOMEM;
585
586 nh = nsh_hdr(skb);
587 length = nsh_hdr_len(nh);
588
589 /* Make sure the whole NSH header is there */
590 err = skb_ensure_writable(skb, skb_network_offset(skb) +
591 length);
592 if (unlikely(err))
593 return err;
594
595 nh = nsh_hdr(skb);
596 skb_postpull_rcsum(skb, nh, length);
597 flags = nsh_get_flags(nh);
598 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
599 flow_key->nsh.base.flags = flags;
600 ttl = nsh_get_ttl(nh);
601 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
602 flow_key->nsh.base.ttl = ttl;
603 nsh_set_flags_and_ttl(nh, flags, ttl);
604 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
605 mask.base.path_hdr);
606 flow_key->nsh.base.path_hdr = nh->path_hdr;
607 switch (nh->mdtype) {
608 case NSH_M_TYPE1:
609 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
610 nh->md1.context[i] =
611 OVS_MASKED(nh->md1.context[i], key.context[i],
612 mask.context[i]);
613 }
614 memcpy(flow_key->nsh.context, nh->md1.context,
615 sizeof(nh->md1.context));
616 break;
617 case NSH_M_TYPE2:
618 memset(flow_key->nsh.context, 0,
619 sizeof(flow_key->nsh.context));
620 break;
621 default:
622 return -EINVAL;
623 }
624 skb_postpush_rcsum(skb, nh, length);
625 return 0;
626}
627
628/* Must follow skb_ensure_writable() since that can move the skb data. */
629static void set_tp_port(struct sk_buff *skb, __be16 *port,
630 __be16 new_port, __sum16 *check)
631{
632 inet_proto_csum_replace2(check, skb, *port, new_port, false);
633 *port = new_port;
634}
635
636static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
637 const struct ovs_key_udp *key,
638 const struct ovs_key_udp *mask)
639{
640 struct udphdr *uh;
641 __be16 src, dst;
642 int err;
643
644 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
645 sizeof(struct udphdr));
646 if (unlikely(err))
647 return err;
648
649 uh = udp_hdr(skb);
650 /* Either of the masks is non-zero, so do not bother checking them. */
651 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
652 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
653
654 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
655 if (likely(src != uh->source)) {
656 set_tp_port(skb, &uh->source, src, &uh->check);
657 flow_key->tp.src = src;
658 }
659 if (likely(dst != uh->dest)) {
660 set_tp_port(skb, &uh->dest, dst, &uh->check);
661 flow_key->tp.dst = dst;
662 }
663
664 if (unlikely(!uh->check))
665 uh->check = CSUM_MANGLED_0;
666 } else {
667 uh->source = src;
668 uh->dest = dst;
669 flow_key->tp.src = src;
670 flow_key->tp.dst = dst;
671 }
672
673 skb_clear_hash(skb);
674
675 return 0;
676}
677
678static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
679 const struct ovs_key_tcp *key,
680 const struct ovs_key_tcp *mask)
681{
682 struct tcphdr *th;
683 __be16 src, dst;
684 int err;
685
686 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
687 sizeof(struct tcphdr));
688 if (unlikely(err))
689 return err;
690
691 th = tcp_hdr(skb);
692 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
693 if (likely(src != th->source)) {
694 set_tp_port(skb, &th->source, src, &th->check);
695 flow_key->tp.src = src;
696 }
697 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
698 if (likely(dst != th->dest)) {
699 set_tp_port(skb, &th->dest, dst, &th->check);
700 flow_key->tp.dst = dst;
701 }
702 skb_clear_hash(skb);
703
704 return 0;
705}
706
707static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
708 const struct ovs_key_sctp *key,
709 const struct ovs_key_sctp *mask)
710{
711 unsigned int sctphoff = skb_transport_offset(skb);
712 struct sctphdr *sh;
713 __le32 old_correct_csum, new_csum, old_csum;
714 int err;
715
716 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
717 if (unlikely(err))
718 return err;
719
720 sh = sctp_hdr(skb);
721 old_csum = sh->checksum;
722 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
723
724 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
725 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
726
727 new_csum = sctp_compute_cksum(skb, sctphoff);
728
729 /* Carry any checksum errors through. */
730 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
731
732 skb_clear_hash(skb);
733 flow_key->tp.src = sh->source;
734 flow_key->tp.dst = sh->dest;
735
736 return 0;
737}
738
739static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
740{
741 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
742 struct vport *vport = data->vport;
743
744 if (skb_cow_head(skb, data->l2_len) < 0) {
745 kfree_skb(skb);
746 return -ENOMEM;
747 }
748
749 __skb_dst_copy(skb, data->dst);
750 *OVS_CB(skb) = data->cb;
751 skb->inner_protocol = data->inner_protocol;
752 if (data->vlan_tci & VLAN_CFI_MASK)
753 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
754 else
755 __vlan_hwaccel_clear_tag(skb);
756
757 /* Reconstruct the MAC header. */
758 skb_push(skb, data->l2_len);
759 memcpy(skb->data, &data->l2_data, data->l2_len);
760 skb_postpush_rcsum(skb, skb->data, data->l2_len);
761 skb_reset_mac_header(skb);
762
763 if (eth_p_mpls(skb->protocol)) {
764 skb->inner_network_header = skb->network_header;
765 skb_set_network_header(skb, data->network_offset);
766 skb_reset_mac_len(skb);
767 }
768
769 ovs_vport_send(vport, skb, data->mac_proto);
770 return 0;
771}
772
773static unsigned int
774ovs_dst_get_mtu(const struct dst_entry *dst)
775{
776 return dst->dev->mtu;
777}
778
779static struct dst_ops ovs_dst_ops = {
780 .family = AF_UNSPEC,
781 .mtu = ovs_dst_get_mtu,
782};
783
784/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
785 * ovs_vport_output(), which is called once per fragmented packet.
786 */
787static void prepare_frag(struct vport *vport, struct sk_buff *skb,
788 u16 orig_network_offset, u8 mac_proto)
789{
790 unsigned int hlen = skb_network_offset(skb);
791 struct ovs_frag_data *data;
792
793 data = this_cpu_ptr(&ovs_frag_data_storage);
794 data->dst = skb->_skb_refdst;
795 data->vport = vport;
796 data->cb = *OVS_CB(skb);
797 data->inner_protocol = skb->inner_protocol;
798 data->network_offset = orig_network_offset;
799 if (skb_vlan_tag_present(skb))
800 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
801 else
802 data->vlan_tci = 0;
803 data->vlan_proto = skb->vlan_proto;
804 data->mac_proto = mac_proto;
805 data->l2_len = hlen;
806 memcpy(&data->l2_data, skb->data, hlen);
807
808 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
809 skb_pull(skb, hlen);
810}
811
812static void ovs_fragment(struct net *net, struct vport *vport,
813 struct sk_buff *skb, u16 mru,
814 struct sw_flow_key *key)
815{
816 u16 orig_network_offset = 0;
817
818 if (eth_p_mpls(skb->protocol)) {
819 orig_network_offset = skb_network_offset(skb);
820 skb->network_header = skb->inner_network_header;
821 }
822
823 if (skb_network_offset(skb) > MAX_L2_LEN) {
824 OVS_NLERR(1, "L2 header too long to fragment");
825 goto err;
826 }
827
828 if (key->eth.type == htons(ETH_P_IP)) {
829 struct dst_entry ovs_dst;
830 unsigned long orig_dst;
831
832 prepare_frag(vport, skb, orig_network_offset,
833 ovs_key_mac_proto(key));
834 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
835 DST_OBSOLETE_NONE, DST_NOCOUNT);
836 ovs_dst.dev = vport->dev;
837
838 orig_dst = skb->_skb_refdst;
839 skb_dst_set_noref(skb, &ovs_dst);
840 IPCB(skb)->frag_max_size = mru;
841
842 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
843 refdst_drop(orig_dst);
844 } else if (key->eth.type == htons(ETH_P_IPV6)) {
845 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
846 unsigned long orig_dst;
847 struct rt6_info ovs_rt;
848
849 if (!v6ops)
850 goto err;
851
852 prepare_frag(vport, skb, orig_network_offset,
853 ovs_key_mac_proto(key));
854 memset(&ovs_rt, 0, sizeof(ovs_rt));
855 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
856 DST_OBSOLETE_NONE, DST_NOCOUNT);
857 ovs_rt.dst.dev = vport->dev;
858
859 orig_dst = skb->_skb_refdst;
860 skb_dst_set_noref(skb, &ovs_rt.dst);
861 IP6CB(skb)->frag_max_size = mru;
862
863 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
864 refdst_drop(orig_dst);
865 } else {
866 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
867 ovs_vport_name(vport), ntohs(key->eth.type), mru,
868 vport->dev->mtu);
869 goto err;
870 }
871
872 return;
873err:
874 kfree_skb(skb);
875}
876
877static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
878 struct sw_flow_key *key)
879{
880 struct vport *vport = ovs_vport_rcu(dp, out_port);
881
882 if (likely(vport)) {
883 u16 mru = OVS_CB(skb)->mru;
884 u32 cutlen = OVS_CB(skb)->cutlen;
885
886 if (unlikely(cutlen > 0)) {
887 if (skb->len - cutlen > ovs_mac_header_len(key))
888 pskb_trim(skb, skb->len - cutlen);
889 else
890 pskb_trim(skb, ovs_mac_header_len(key));
891 }
892
893 if (likely(!mru ||
894 (skb->len <= mru + vport->dev->hard_header_len))) {
895 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
896 } else if (mru <= vport->dev->mtu) {
897 struct net *net = read_pnet(&dp->net);
898
899 ovs_fragment(net, vport, skb, mru, key);
900 } else {
901 kfree_skb(skb);
902 }
903 } else {
904 kfree_skb(skb);
905 }
906}
907
908static int output_userspace(struct datapath *dp, struct sk_buff *skb,
909 struct sw_flow_key *key, const struct nlattr *attr,
910 const struct nlattr *actions, int actions_len,
911 uint32_t cutlen)
912{
913 struct dp_upcall_info upcall;
914 const struct nlattr *a;
915 int rem;
916
917 memset(&upcall, 0, sizeof(upcall));
918 upcall.cmd = OVS_PACKET_CMD_ACTION;
919 upcall.mru = OVS_CB(skb)->mru;
920
921 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
922 a = nla_next(a, &rem)) {
923 switch (nla_type(a)) {
924 case OVS_USERSPACE_ATTR_USERDATA:
925 upcall.userdata = a;
926 break;
927
928 case OVS_USERSPACE_ATTR_PID:
929 upcall.portid = nla_get_u32(a);
930 break;
931
932 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
933 /* Get out tunnel info. */
934 struct vport *vport;
935
936 vport = ovs_vport_rcu(dp, nla_get_u32(a));
937 if (vport) {
938 int err;
939
940 err = dev_fill_metadata_dst(vport->dev, skb);
941 if (!err)
942 upcall.egress_tun_info = skb_tunnel_info(skb);
943 }
944
945 break;
946 }
947
948 case OVS_USERSPACE_ATTR_ACTIONS: {
949 /* Include actions. */
950 upcall.actions = actions;
951 upcall.actions_len = actions_len;
952 break;
953 }
954
955 } /* End of switch. */
956 }
957
958 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
959}
960
961/* When 'last' is true, sample() should always consume the 'skb'.
962 * Otherwise, sample() should keep 'skb' intact regardless what
963 * actions are executed within sample().
964 */
965static int sample(struct datapath *dp, struct sk_buff *skb,
966 struct sw_flow_key *key, const struct nlattr *attr,
967 bool last)
968{
969 struct nlattr *actions;
970 struct nlattr *sample_arg;
971 int rem = nla_len(attr);
972 const struct sample_arg *arg;
973 bool clone_flow_key;
974
975 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
976 sample_arg = nla_data(attr);
977 arg = nla_data(sample_arg);
978 actions = nla_next(sample_arg, &rem);
979
980 if ((arg->probability != U32_MAX) &&
981 (!arg->probability || prandom_u32() > arg->probability)) {
982 if (last)
983 consume_skb(skb);
984 return 0;
985 }
986
987 clone_flow_key = !arg->exec;
988 return clone_execute(dp, skb, key, 0, actions, rem, last,
989 clone_flow_key);
990}
991
992/* When 'last' is true, clone() should always consume the 'skb'.
993 * Otherwise, clone() should keep 'skb' intact regardless what
994 * actions are executed within clone().
995 */
996static int clone(struct datapath *dp, struct sk_buff *skb,
997 struct sw_flow_key *key, const struct nlattr *attr,
998 bool last)
999{
1000 struct nlattr *actions;
1001 struct nlattr *clone_arg;
1002 int rem = nla_len(attr);
1003 bool dont_clone_flow_key;
1004
1005 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1006 clone_arg = nla_data(attr);
1007 dont_clone_flow_key = nla_get_u32(clone_arg);
1008 actions = nla_next(clone_arg, &rem);
1009
1010 return clone_execute(dp, skb, key, 0, actions, rem, last,
1011 !dont_clone_flow_key);
1012}
1013
1014static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1015 const struct nlattr *attr)
1016{
1017 struct ovs_action_hash *hash_act = nla_data(attr);
1018 u32 hash = 0;
1019
1020 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1021 hash = skb_get_hash(skb);
1022 hash = jhash_1word(hash, hash_act->hash_basis);
1023 if (!hash)
1024 hash = 0x1;
1025
1026 key->ovs_flow_hash = hash;
1027}
1028
1029static int execute_set_action(struct sk_buff *skb,
1030 struct sw_flow_key *flow_key,
1031 const struct nlattr *a)
1032{
1033 /* Only tunnel set execution is supported without a mask. */
1034 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1035 struct ovs_tunnel_info *tun = nla_data(a);
1036
1037 skb_dst_drop(skb);
1038 dst_hold((struct dst_entry *)tun->tun_dst);
1039 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1040 return 0;
1041 }
1042
1043 return -EINVAL;
1044}
1045
1046/* Mask is at the midpoint of the data. */
1047#define get_mask(a, type) ((const type)nla_data(a) + 1)
1048
1049static int execute_masked_set_action(struct sk_buff *skb,
1050 struct sw_flow_key *flow_key,
1051 const struct nlattr *a)
1052{
1053 int err = 0;
1054
1055 switch (nla_type(a)) {
1056 case OVS_KEY_ATTR_PRIORITY:
1057 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1058 *get_mask(a, u32 *));
1059 flow_key->phy.priority = skb->priority;
1060 break;
1061
1062 case OVS_KEY_ATTR_SKB_MARK:
1063 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1064 flow_key->phy.skb_mark = skb->mark;
1065 break;
1066
1067 case OVS_KEY_ATTR_TUNNEL_INFO:
1068 /* Masked data not supported for tunnel. */
1069 err = -EINVAL;
1070 break;
1071
1072 case OVS_KEY_ATTR_ETHERNET:
1073 err = set_eth_addr(skb, flow_key, nla_data(a),
1074 get_mask(a, struct ovs_key_ethernet *));
1075 break;
1076
1077 case OVS_KEY_ATTR_NSH:
1078 err = set_nsh(skb, flow_key, a);
1079 break;
1080
1081 case OVS_KEY_ATTR_IPV4:
1082 err = set_ipv4(skb, flow_key, nla_data(a),
1083 get_mask(a, struct ovs_key_ipv4 *));
1084 break;
1085
1086 case OVS_KEY_ATTR_IPV6:
1087 err = set_ipv6(skb, flow_key, nla_data(a),
1088 get_mask(a, struct ovs_key_ipv6 *));
1089 break;
1090
1091 case OVS_KEY_ATTR_TCP:
1092 err = set_tcp(skb, flow_key, nla_data(a),
1093 get_mask(a, struct ovs_key_tcp *));
1094 break;
1095
1096 case OVS_KEY_ATTR_UDP:
1097 err = set_udp(skb, flow_key, nla_data(a),
1098 get_mask(a, struct ovs_key_udp *));
1099 break;
1100
1101 case OVS_KEY_ATTR_SCTP:
1102 err = set_sctp(skb, flow_key, nla_data(a),
1103 get_mask(a, struct ovs_key_sctp *));
1104 break;
1105
1106 case OVS_KEY_ATTR_MPLS:
1107 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1108 __be32 *));
1109 break;
1110
1111 case OVS_KEY_ATTR_CT_STATE:
1112 case OVS_KEY_ATTR_CT_ZONE:
1113 case OVS_KEY_ATTR_CT_MARK:
1114 case OVS_KEY_ATTR_CT_LABELS:
1115 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1116 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1117 err = -EINVAL;
1118 break;
1119 }
1120
1121 return err;
1122}
1123
1124static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1125 struct sw_flow_key *key,
1126 const struct nlattr *a, bool last)
1127{
1128 u32 recirc_id;
1129
1130 if (!is_flow_key_valid(key)) {
1131 int err;
1132
1133 err = ovs_flow_key_update(skb, key);
1134 if (err)
1135 return err;
1136 }
1137 BUG_ON(!is_flow_key_valid(key));
1138
1139 recirc_id = nla_get_u32(a);
1140 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1141}
1142
1143static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1144 struct sw_flow_key *key,
1145 const struct nlattr *attr, bool last)
1146{
1147 const struct nlattr *actions, *cpl_arg;
1148 const struct check_pkt_len_arg *arg;
1149 int rem = nla_len(attr);
1150 bool clone_flow_key;
1151
1152 /* The first netlink attribute in 'attr' is always
1153 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1154 */
1155 cpl_arg = nla_data(attr);
1156 arg = nla_data(cpl_arg);
1157
1158 if (skb->len <= arg->pkt_len) {
1159 /* Second netlink attribute in 'attr' is always
1160 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1161 */
1162 actions = nla_next(cpl_arg, &rem);
1163 clone_flow_key = !arg->exec_for_lesser_equal;
1164 } else {
1165 /* Third netlink attribute in 'attr' is always
1166 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1167 */
1168 actions = nla_next(cpl_arg, &rem);
1169 actions = nla_next(actions, &rem);
1170 clone_flow_key = !arg->exec_for_greater;
1171 }
1172
1173 return clone_execute(dp, skb, key, 0, nla_data(actions),
1174 nla_len(actions), last, clone_flow_key);
1175}
1176
1177/* Execute a list of actions against 'skb'. */
1178static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1179 struct sw_flow_key *key,
1180 const struct nlattr *attr, int len)
1181{
1182 const struct nlattr *a;
1183 int rem;
1184
1185 for (a = attr, rem = len; rem > 0;
1186 a = nla_next(a, &rem)) {
1187 int err = 0;
1188
1189 switch (nla_type(a)) {
1190 case OVS_ACTION_ATTR_OUTPUT: {
1191 int port = nla_get_u32(a);
1192 struct sk_buff *clone;
1193
1194 /* Every output action needs a separate clone
1195 * of 'skb', In case the output action is the
1196 * last action, cloning can be avoided.
1197 */
1198 if (nla_is_last(a, rem)) {
1199 do_output(dp, skb, port, key);
1200 /* 'skb' has been used for output.
1201 */
1202 return 0;
1203 }
1204
1205 clone = skb_clone(skb, GFP_ATOMIC);
1206 if (clone)
1207 do_output(dp, clone, port, key);
1208 OVS_CB(skb)->cutlen = 0;
1209 break;
1210 }
1211
1212 case OVS_ACTION_ATTR_TRUNC: {
1213 struct ovs_action_trunc *trunc = nla_data(a);
1214
1215 if (skb->len > trunc->max_len)
1216 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1217 break;
1218 }
1219
1220 case OVS_ACTION_ATTR_USERSPACE:
1221 output_userspace(dp, skb, key, a, attr,
1222 len, OVS_CB(skb)->cutlen);
1223 OVS_CB(skb)->cutlen = 0;
1224 break;
1225
1226 case OVS_ACTION_ATTR_HASH:
1227 execute_hash(skb, key, a);
1228 break;
1229
1230 case OVS_ACTION_ATTR_PUSH_MPLS:
1231 err = push_mpls(skb, key, nla_data(a));
1232 break;
1233
1234 case OVS_ACTION_ATTR_POP_MPLS:
1235 err = pop_mpls(skb, key, nla_get_be16(a));
1236 break;
1237
1238 case OVS_ACTION_ATTR_PUSH_VLAN:
1239 err = push_vlan(skb, key, nla_data(a));
1240 break;
1241
1242 case OVS_ACTION_ATTR_POP_VLAN:
1243 err = pop_vlan(skb, key);
1244 break;
1245
1246 case OVS_ACTION_ATTR_RECIRC: {
1247 bool last = nla_is_last(a, rem);
1248
1249 err = execute_recirc(dp, skb, key, a, last);
1250 if (last) {
1251 /* If this is the last action, the skb has
1252 * been consumed or freed.
1253 * Return immediately.
1254 */
1255 return err;
1256 }
1257 break;
1258 }
1259
1260 case OVS_ACTION_ATTR_SET:
1261 err = execute_set_action(skb, key, nla_data(a));
1262 break;
1263
1264 case OVS_ACTION_ATTR_SET_MASKED:
1265 case OVS_ACTION_ATTR_SET_TO_MASKED:
1266 err = execute_masked_set_action(skb, key, nla_data(a));
1267 break;
1268
1269 case OVS_ACTION_ATTR_SAMPLE: {
1270 bool last = nla_is_last(a, rem);
1271
1272 err = sample(dp, skb, key, a, last);
1273 if (last)
1274 return err;
1275
1276 break;
1277 }
1278
1279 case OVS_ACTION_ATTR_CT:
1280 if (!is_flow_key_valid(key)) {
1281 err = ovs_flow_key_update(skb, key);
1282 if (err)
1283 return err;
1284 }
1285
1286 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1287 nla_data(a));
1288
1289 /* Hide stolen IP fragments from user space. */
1290 if (err)
1291 return err == -EINPROGRESS ? 0 : err;
1292 break;
1293
1294 case OVS_ACTION_ATTR_CT_CLEAR:
1295 err = ovs_ct_clear(skb, key);
1296 break;
1297
1298 case OVS_ACTION_ATTR_PUSH_ETH:
1299 err = push_eth(skb, key, nla_data(a));
1300 break;
1301
1302 case OVS_ACTION_ATTR_POP_ETH:
1303 err = pop_eth(skb, key);
1304 break;
1305
1306 case OVS_ACTION_ATTR_PUSH_NSH: {
1307 u8 buffer[NSH_HDR_MAX_LEN];
1308 struct nshhdr *nh = (struct nshhdr *)buffer;
1309
1310 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1311 NSH_HDR_MAX_LEN);
1312 if (unlikely(err))
1313 break;
1314 err = push_nsh(skb, key, nh);
1315 break;
1316 }
1317
1318 case OVS_ACTION_ATTR_POP_NSH:
1319 err = pop_nsh(skb, key);
1320 break;
1321
1322 case OVS_ACTION_ATTR_METER:
1323 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1324 consume_skb(skb);
1325 return 0;
1326 }
1327 break;
1328
1329 case OVS_ACTION_ATTR_CLONE: {
1330 bool last = nla_is_last(a, rem);
1331
1332 err = clone(dp, skb, key, a, last);
1333 if (last)
1334 return err;
1335
1336 break;
1337 }
1338
1339 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1340 bool last = nla_is_last(a, rem);
1341
1342 err = execute_check_pkt_len(dp, skb, key, a, last);
1343 if (last)
1344 return err;
1345
1346 break;
1347 }
1348 }
1349
1350 if (unlikely(err)) {
1351 kfree_skb(skb);
1352 return err;
1353 }
1354 }
1355
1356 consume_skb(skb);
1357 return 0;
1358}
1359
1360/* Execute the actions on the clone of the packet. The effect of the
1361 * execution does not affect the original 'skb' nor the original 'key'.
1362 *
1363 * The execution may be deferred in case the actions can not be executed
1364 * immediately.
1365 */
1366static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1367 struct sw_flow_key *key, u32 recirc_id,
1368 const struct nlattr *actions, int len,
1369 bool last, bool clone_flow_key)
1370{
1371 struct deferred_action *da;
1372 struct sw_flow_key *clone;
1373
1374 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1375 if (!skb) {
1376 /* Out of memory, skip this action.
1377 */
1378 return 0;
1379 }
1380
1381 /* When clone_flow_key is false, the 'key' will not be change
1382 * by the actions, then the 'key' can be used directly.
1383 * Otherwise, try to clone key from the next recursion level of
1384 * 'flow_keys'. If clone is successful, execute the actions
1385 * without deferring.
1386 */
1387 clone = clone_flow_key ? clone_key(key) : key;
1388 if (clone) {
1389 int err = 0;
1390
1391 if (actions) { /* Sample action */
1392 if (clone_flow_key)
1393 __this_cpu_inc(exec_actions_level);
1394
1395 err = do_execute_actions(dp, skb, clone,
1396 actions, len);
1397
1398 if (clone_flow_key)
1399 __this_cpu_dec(exec_actions_level);
1400 } else { /* Recirc action */
1401 clone->recirc_id = recirc_id;
1402 ovs_dp_process_packet(skb, clone);
1403 }
1404 return err;
1405 }
1406
1407 /* Out of 'flow_keys' space. Defer actions */
1408 da = add_deferred_actions(skb, key, actions, len);
1409 if (da) {
1410 if (!actions) { /* Recirc action */
1411 key = &da->pkt_key;
1412 key->recirc_id = recirc_id;
1413 }
1414 } else {
1415 /* Out of per CPU action FIFO space. Drop the 'skb' and
1416 * log an error.
1417 */
1418 kfree_skb(skb);
1419
1420 if (net_ratelimit()) {
1421 if (actions) { /* Sample action */
1422 pr_warn("%s: deferred action limit reached, drop sample action\n",
1423 ovs_dp_name(dp));
1424 } else { /* Recirc action */
1425 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1426 ovs_dp_name(dp));
1427 }
1428 }
1429 }
1430 return 0;
1431}
1432
1433static void process_deferred_actions(struct datapath *dp)
1434{
1435 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1436
1437 /* Do not touch the FIFO in case there is no deferred actions. */
1438 if (action_fifo_is_empty(fifo))
1439 return;
1440
1441 /* Finishing executing all deferred actions. */
1442 do {
1443 struct deferred_action *da = action_fifo_get(fifo);
1444 struct sk_buff *skb = da->skb;
1445 struct sw_flow_key *key = &da->pkt_key;
1446 const struct nlattr *actions = da->actions;
1447 int actions_len = da->actions_len;
1448
1449 if (actions)
1450 do_execute_actions(dp, skb, key, actions, actions_len);
1451 else
1452 ovs_dp_process_packet(skb, key);
1453 } while (!action_fifo_is_empty(fifo));
1454
1455 /* Reset FIFO for the next packet. */
1456 action_fifo_init(fifo);
1457}
1458
1459/* Execute a list of actions against 'skb'. */
1460int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1461 const struct sw_flow_actions *acts,
1462 struct sw_flow_key *key)
1463{
1464 int err, level;
1465
1466 level = __this_cpu_inc_return(exec_actions_level);
1467 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1468 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1469 ovs_dp_name(dp));
1470 kfree_skb(skb);
1471 err = -ENETDOWN;
1472 goto out;
1473 }
1474
1475 OVS_CB(skb)->acts_origlen = acts->orig_len;
1476 err = do_execute_actions(dp, skb, key,
1477 acts->actions, acts->actions_len);
1478
1479 if (level == 1)
1480 process_deferred_actions(dp);
1481
1482out:
1483 __this_cpu_dec(exec_actions_level);
1484 return err;
1485}
1486
1487int action_fifos_init(void)
1488{
1489 action_fifos = alloc_percpu(struct action_fifo);
1490 if (!action_fifos)
1491 return -ENOMEM;
1492
1493 flow_keys = alloc_percpu(struct action_flow_keys);
1494 if (!flow_keys) {
1495 free_percpu(action_fifos);
1496 return -ENOMEM;
1497 }
1498
1499 return 0;
1500}
1501
1502void action_fifos_exit(void)
1503{
1504 free_percpu(action_fifos);
1505 free_percpu(flow_keys);
1506}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
12#include <linux/sctp.h>
13#include <linux/tcp.h>
14#include <linux/udp.h>
15#include <linux/in6.h>
16#include <linux/if_arp.h>
17#include <linux/if_vlan.h>
18
19#include <net/dst.h>
20#include <net/gso.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/ip6_fib.h>
24#include <net/checksum.h>
25#include <net/dsfield.h>
26#include <net/mpls.h>
27
28#if IS_ENABLED(CONFIG_PSAMPLE)
29#include <net/psample.h>
30#endif
31
32#include <net/sctp/checksum.h>
33
34#include "datapath.h"
35#include "drop.h"
36#include "flow.h"
37#include "conntrack.h"
38#include "vport.h"
39#include "flow_netlink.h"
40#include "openvswitch_trace.h"
41
42struct deferred_action {
43 struct sk_buff *skb;
44 const struct nlattr *actions;
45 int actions_len;
46
47 /* Store pkt_key clone when creating deferred action. */
48 struct sw_flow_key pkt_key;
49};
50
51#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
52struct ovs_frag_data {
53 unsigned long dst;
54 struct vport *vport;
55 struct ovs_skb_cb cb;
56 __be16 inner_protocol;
57 u16 network_offset; /* valid only for MPLS */
58 u16 vlan_tci;
59 __be16 vlan_proto;
60 unsigned int l2_len;
61 u8 mac_proto;
62 u8 l2_data[MAX_L2_LEN];
63};
64
65static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
66
67#define DEFERRED_ACTION_FIFO_SIZE 10
68#define OVS_RECURSION_LIMIT 5
69#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
70struct action_fifo {
71 int head;
72 int tail;
73 /* Deferred action fifo queue storage. */
74 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
75};
76
77struct action_flow_keys {
78 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
79};
80
81static struct action_fifo __percpu *action_fifos;
82static struct action_flow_keys __percpu *flow_keys;
83static DEFINE_PER_CPU(int, exec_actions_level);
84
85/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
86 * space. Return NULL if out of key spaces.
87 */
88static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
89{
90 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
91 int level = this_cpu_read(exec_actions_level);
92 struct sw_flow_key *key = NULL;
93
94 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
95 key = &keys->key[level - 1];
96 *key = *key_;
97 }
98
99 return key;
100}
101
102static void action_fifo_init(struct action_fifo *fifo)
103{
104 fifo->head = 0;
105 fifo->tail = 0;
106}
107
108static bool action_fifo_is_empty(const struct action_fifo *fifo)
109{
110 return (fifo->head == fifo->tail);
111}
112
113static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
114{
115 if (action_fifo_is_empty(fifo))
116 return NULL;
117
118 return &fifo->fifo[fifo->tail++];
119}
120
121static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
122{
123 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
124 return NULL;
125
126 return &fifo->fifo[fifo->head++];
127}
128
129/* Return true if fifo is not full */
130static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
131 const struct sw_flow_key *key,
132 const struct nlattr *actions,
133 const int actions_len)
134{
135 struct action_fifo *fifo;
136 struct deferred_action *da;
137
138 fifo = this_cpu_ptr(action_fifos);
139 da = action_fifo_put(fifo);
140 if (da) {
141 da->skb = skb;
142 da->actions = actions;
143 da->actions_len = actions_len;
144 da->pkt_key = *key;
145 }
146
147 return da;
148}
149
150static void invalidate_flow_key(struct sw_flow_key *key)
151{
152 key->mac_proto |= SW_FLOW_KEY_INVALID;
153}
154
155static bool is_flow_key_valid(const struct sw_flow_key *key)
156{
157 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
158}
159
160static int clone_execute(struct datapath *dp, struct sk_buff *skb,
161 struct sw_flow_key *key,
162 u32 recirc_id,
163 const struct nlattr *actions, int len,
164 bool last, bool clone_flow_key);
165
166static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
168 const struct nlattr *attr, int len);
169
170static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
171 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
172{
173 int err;
174
175 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
176 if (err)
177 return err;
178
179 if (!mac_len)
180 key->mac_proto = MAC_PROTO_NONE;
181
182 invalidate_flow_key(key);
183 return 0;
184}
185
186static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
187 const __be16 ethertype)
188{
189 int err;
190
191 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
192 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
193 if (err)
194 return err;
195
196 if (ethertype == htons(ETH_P_TEB))
197 key->mac_proto = MAC_PROTO_ETHERNET;
198
199 invalidate_flow_key(key);
200 return 0;
201}
202
203static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
204 const __be32 *mpls_lse, const __be32 *mask)
205{
206 struct mpls_shim_hdr *stack;
207 __be32 lse;
208 int err;
209
210 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
211 return -ENOMEM;
212
213 stack = mpls_hdr(skb);
214 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
215 err = skb_mpls_update_lse(skb, lse);
216 if (err)
217 return err;
218
219 flow_key->mpls.lse[0] = lse;
220 return 0;
221}
222
223static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
224{
225 int err;
226
227 err = skb_vlan_pop(skb);
228 if (skb_vlan_tag_present(skb)) {
229 invalidate_flow_key(key);
230 } else {
231 key->eth.vlan.tci = 0;
232 key->eth.vlan.tpid = 0;
233 }
234 return err;
235}
236
237static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
238 const struct ovs_action_push_vlan *vlan)
239{
240 int err;
241
242 if (skb_vlan_tag_present(skb)) {
243 invalidate_flow_key(key);
244 } else {
245 key->eth.vlan.tci = vlan->vlan_tci;
246 key->eth.vlan.tpid = vlan->vlan_tpid;
247 }
248 err = skb_vlan_push(skb, vlan->vlan_tpid,
249 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
250 skb_reset_mac_len(skb);
251 return err;
252}
253
254/* 'src' is already properly masked. */
255static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
256{
257 u16 *dst = (u16 *)dst_;
258 const u16 *src = (const u16 *)src_;
259 const u16 *mask = (const u16 *)mask_;
260
261 OVS_SET_MASKED(dst[0], src[0], mask[0]);
262 OVS_SET_MASKED(dst[1], src[1], mask[1]);
263 OVS_SET_MASKED(dst[2], src[2], mask[2]);
264}
265
266static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
267 const struct ovs_key_ethernet *key,
268 const struct ovs_key_ethernet *mask)
269{
270 int err;
271
272 err = skb_ensure_writable(skb, ETH_HLEN);
273 if (unlikely(err))
274 return err;
275
276 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
277
278 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
279 mask->eth_src);
280 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
281 mask->eth_dst);
282
283 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
284
285 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
286 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
287 return 0;
288}
289
290/* pop_eth does not support VLAN packets as this action is never called
291 * for them.
292 */
293static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
294{
295 int err;
296
297 err = skb_eth_pop(skb);
298 if (err)
299 return err;
300
301 /* safe right before invalidate_flow_key */
302 key->mac_proto = MAC_PROTO_NONE;
303 invalidate_flow_key(key);
304 return 0;
305}
306
307static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
308 const struct ovs_action_push_eth *ethh)
309{
310 int err;
311
312 err = skb_eth_push(skb, ethh->addresses.eth_dst,
313 ethh->addresses.eth_src);
314 if (err)
315 return err;
316
317 /* safe right before invalidate_flow_key */
318 key->mac_proto = MAC_PROTO_ETHERNET;
319 invalidate_flow_key(key);
320 return 0;
321}
322
323static noinline_for_stack int push_nsh(struct sk_buff *skb,
324 struct sw_flow_key *key,
325 const struct nlattr *a)
326{
327 u8 buffer[NSH_HDR_MAX_LEN];
328 struct nshhdr *nh = (struct nshhdr *)buffer;
329 int err;
330
331 err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
332 if (err)
333 return err;
334
335 err = nsh_push(skb, nh);
336 if (err)
337 return err;
338
339 /* safe right before invalidate_flow_key */
340 key->mac_proto = MAC_PROTO_NONE;
341 invalidate_flow_key(key);
342 return 0;
343}
344
345static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
346{
347 int err;
348
349 err = nsh_pop(skb);
350 if (err)
351 return err;
352
353 /* safe right before invalidate_flow_key */
354 if (skb->protocol == htons(ETH_P_TEB))
355 key->mac_proto = MAC_PROTO_ETHERNET;
356 else
357 key->mac_proto = MAC_PROTO_NONE;
358 invalidate_flow_key(key);
359 return 0;
360}
361
362static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
363 __be32 addr, __be32 new_addr)
364{
365 int transport_len = skb->len - skb_transport_offset(skb);
366
367 if (nh->frag_off & htons(IP_OFFSET))
368 return;
369
370 if (nh->protocol == IPPROTO_TCP) {
371 if (likely(transport_len >= sizeof(struct tcphdr)))
372 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
373 addr, new_addr, true);
374 } else if (nh->protocol == IPPROTO_UDP) {
375 if (likely(transport_len >= sizeof(struct udphdr))) {
376 struct udphdr *uh = udp_hdr(skb);
377
378 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
379 inet_proto_csum_replace4(&uh->check, skb,
380 addr, new_addr, true);
381 if (!uh->check)
382 uh->check = CSUM_MANGLED_0;
383 }
384 }
385 }
386}
387
388static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
389 __be32 *addr, __be32 new_addr)
390{
391 update_ip_l4_checksum(skb, nh, *addr, new_addr);
392 csum_replace4(&nh->check, *addr, new_addr);
393 skb_clear_hash(skb);
394 ovs_ct_clear(skb, NULL);
395 *addr = new_addr;
396}
397
398static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
399 __be32 addr[4], const __be32 new_addr[4])
400{
401 int transport_len = skb->len - skb_transport_offset(skb);
402
403 if (l4_proto == NEXTHDR_TCP) {
404 if (likely(transport_len >= sizeof(struct tcphdr)))
405 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
406 addr, new_addr, true);
407 } else if (l4_proto == NEXTHDR_UDP) {
408 if (likely(transport_len >= sizeof(struct udphdr))) {
409 struct udphdr *uh = udp_hdr(skb);
410
411 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
412 inet_proto_csum_replace16(&uh->check, skb,
413 addr, new_addr, true);
414 if (!uh->check)
415 uh->check = CSUM_MANGLED_0;
416 }
417 }
418 } else if (l4_proto == NEXTHDR_ICMP) {
419 if (likely(transport_len >= sizeof(struct icmp6hdr)))
420 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
421 skb, addr, new_addr, true);
422 }
423}
424
425static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
426 const __be32 mask[4], __be32 masked[4])
427{
428 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
429 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
430 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
431 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
432}
433
434static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
435 __be32 addr[4], const __be32 new_addr[4],
436 bool recalculate_csum)
437{
438 if (recalculate_csum)
439 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
440
441 skb_clear_hash(skb);
442 ovs_ct_clear(skb, NULL);
443 memcpy(addr, new_addr, sizeof(__be32[4]));
444}
445
446static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
447{
448 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
449
450 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
451
452 if (skb->ip_summed == CHECKSUM_COMPLETE)
453 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
454 (__force __wsum)(ipv6_tclass << 12));
455
456 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
457}
458
459static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
460{
461 u32 ofl;
462
463 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
464 fl = OVS_MASKED(ofl, fl, mask);
465
466 /* Bits 21-24 are always unmasked, so this retains their values. */
467 nh->flow_lbl[0] = (u8)(fl >> 16);
468 nh->flow_lbl[1] = (u8)(fl >> 8);
469 nh->flow_lbl[2] = (u8)fl;
470
471 if (skb->ip_summed == CHECKSUM_COMPLETE)
472 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
473}
474
475static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
476{
477 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
478
479 if (skb->ip_summed == CHECKSUM_COMPLETE)
480 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
481 (__force __wsum)(new_ttl << 8));
482 nh->hop_limit = new_ttl;
483}
484
485static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
486 u8 mask)
487{
488 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
489
490 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
491 nh->ttl = new_ttl;
492}
493
494static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
495 const struct ovs_key_ipv4 *key,
496 const struct ovs_key_ipv4 *mask)
497{
498 struct iphdr *nh;
499 __be32 new_addr;
500 int err;
501
502 err = skb_ensure_writable(skb, skb_network_offset(skb) +
503 sizeof(struct iphdr));
504 if (unlikely(err))
505 return err;
506
507 nh = ip_hdr(skb);
508
509 /* Setting an IP addresses is typically only a side effect of
510 * matching on them in the current userspace implementation, so it
511 * makes sense to check if the value actually changed.
512 */
513 if (mask->ipv4_src) {
514 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
515
516 if (unlikely(new_addr != nh->saddr)) {
517 set_ip_addr(skb, nh, &nh->saddr, new_addr);
518 flow_key->ipv4.addr.src = new_addr;
519 }
520 }
521 if (mask->ipv4_dst) {
522 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
523
524 if (unlikely(new_addr != nh->daddr)) {
525 set_ip_addr(skb, nh, &nh->daddr, new_addr);
526 flow_key->ipv4.addr.dst = new_addr;
527 }
528 }
529 if (mask->ipv4_tos) {
530 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
531 flow_key->ip.tos = nh->tos;
532 }
533 if (mask->ipv4_ttl) {
534 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
535 flow_key->ip.ttl = nh->ttl;
536 }
537
538 return 0;
539}
540
541static bool is_ipv6_mask_nonzero(const __be32 addr[4])
542{
543 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
544}
545
546static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
547 const struct ovs_key_ipv6 *key,
548 const struct ovs_key_ipv6 *mask)
549{
550 struct ipv6hdr *nh;
551 int err;
552
553 err = skb_ensure_writable(skb, skb_network_offset(skb) +
554 sizeof(struct ipv6hdr));
555 if (unlikely(err))
556 return err;
557
558 nh = ipv6_hdr(skb);
559
560 /* Setting an IP addresses is typically only a side effect of
561 * matching on them in the current userspace implementation, so it
562 * makes sense to check if the value actually changed.
563 */
564 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
565 __be32 *saddr = (__be32 *)&nh->saddr;
566 __be32 masked[4];
567
568 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
569
570 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
571 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
572 true);
573 memcpy(&flow_key->ipv6.addr.src, masked,
574 sizeof(flow_key->ipv6.addr.src));
575 }
576 }
577 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
578 unsigned int offset = 0;
579 int flags = IP6_FH_F_SKIP_RH;
580 bool recalc_csum = true;
581 __be32 *daddr = (__be32 *)&nh->daddr;
582 __be32 masked[4];
583
584 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
585
586 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
587 if (ipv6_ext_hdr(nh->nexthdr))
588 recalc_csum = (ipv6_find_hdr(skb, &offset,
589 NEXTHDR_ROUTING,
590 NULL, &flags)
591 != NEXTHDR_ROUTING);
592
593 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
594 recalc_csum);
595 memcpy(&flow_key->ipv6.addr.dst, masked,
596 sizeof(flow_key->ipv6.addr.dst));
597 }
598 }
599 if (mask->ipv6_tclass) {
600 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
601 flow_key->ip.tos = ipv6_get_dsfield(nh);
602 }
603 if (mask->ipv6_label) {
604 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
605 ntohl(mask->ipv6_label));
606 flow_key->ipv6.label =
607 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
608 }
609 if (mask->ipv6_hlimit) {
610 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
611 flow_key->ip.ttl = nh->hop_limit;
612 }
613 return 0;
614}
615
616static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
617 const struct nlattr *a)
618{
619 struct nshhdr *nh;
620 size_t length;
621 int err;
622 u8 flags;
623 u8 ttl;
624 int i;
625
626 struct ovs_key_nsh key;
627 struct ovs_key_nsh mask;
628
629 err = nsh_key_from_nlattr(a, &key, &mask);
630 if (err)
631 return err;
632
633 /* Make sure the NSH base header is there */
634 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
635 return -ENOMEM;
636
637 nh = nsh_hdr(skb);
638 length = nsh_hdr_len(nh);
639
640 /* Make sure the whole NSH header is there */
641 err = skb_ensure_writable(skb, skb_network_offset(skb) +
642 length);
643 if (unlikely(err))
644 return err;
645
646 nh = nsh_hdr(skb);
647 skb_postpull_rcsum(skb, nh, length);
648 flags = nsh_get_flags(nh);
649 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
650 flow_key->nsh.base.flags = flags;
651 ttl = nsh_get_ttl(nh);
652 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
653 flow_key->nsh.base.ttl = ttl;
654 nsh_set_flags_and_ttl(nh, flags, ttl);
655 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
656 mask.base.path_hdr);
657 flow_key->nsh.base.path_hdr = nh->path_hdr;
658 switch (nh->mdtype) {
659 case NSH_M_TYPE1:
660 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
661 nh->md1.context[i] =
662 OVS_MASKED(nh->md1.context[i], key.context[i],
663 mask.context[i]);
664 }
665 memcpy(flow_key->nsh.context, nh->md1.context,
666 sizeof(nh->md1.context));
667 break;
668 case NSH_M_TYPE2:
669 memset(flow_key->nsh.context, 0,
670 sizeof(flow_key->nsh.context));
671 break;
672 default:
673 return -EINVAL;
674 }
675 skb_postpush_rcsum(skb, nh, length);
676 return 0;
677}
678
679/* Must follow skb_ensure_writable() since that can move the skb data. */
680static void set_tp_port(struct sk_buff *skb, __be16 *port,
681 __be16 new_port, __sum16 *check)
682{
683 ovs_ct_clear(skb, NULL);
684 inet_proto_csum_replace2(check, skb, *port, new_port, false);
685 *port = new_port;
686}
687
688static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
689 const struct ovs_key_udp *key,
690 const struct ovs_key_udp *mask)
691{
692 struct udphdr *uh;
693 __be16 src, dst;
694 int err;
695
696 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
697 sizeof(struct udphdr));
698 if (unlikely(err))
699 return err;
700
701 uh = udp_hdr(skb);
702 /* Either of the masks is non-zero, so do not bother checking them. */
703 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
704 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
705
706 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
707 if (likely(src != uh->source)) {
708 set_tp_port(skb, &uh->source, src, &uh->check);
709 flow_key->tp.src = src;
710 }
711 if (likely(dst != uh->dest)) {
712 set_tp_port(skb, &uh->dest, dst, &uh->check);
713 flow_key->tp.dst = dst;
714 }
715
716 if (unlikely(!uh->check))
717 uh->check = CSUM_MANGLED_0;
718 } else {
719 uh->source = src;
720 uh->dest = dst;
721 flow_key->tp.src = src;
722 flow_key->tp.dst = dst;
723 ovs_ct_clear(skb, NULL);
724 }
725
726 skb_clear_hash(skb);
727
728 return 0;
729}
730
731static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
732 const struct ovs_key_tcp *key,
733 const struct ovs_key_tcp *mask)
734{
735 struct tcphdr *th;
736 __be16 src, dst;
737 int err;
738
739 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
740 sizeof(struct tcphdr));
741 if (unlikely(err))
742 return err;
743
744 th = tcp_hdr(skb);
745 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
746 if (likely(src != th->source)) {
747 set_tp_port(skb, &th->source, src, &th->check);
748 flow_key->tp.src = src;
749 }
750 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
751 if (likely(dst != th->dest)) {
752 set_tp_port(skb, &th->dest, dst, &th->check);
753 flow_key->tp.dst = dst;
754 }
755 skb_clear_hash(skb);
756
757 return 0;
758}
759
760static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
761 const struct ovs_key_sctp *key,
762 const struct ovs_key_sctp *mask)
763{
764 unsigned int sctphoff = skb_transport_offset(skb);
765 struct sctphdr *sh;
766 __le32 old_correct_csum, new_csum, old_csum;
767 int err;
768
769 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
770 if (unlikely(err))
771 return err;
772
773 sh = sctp_hdr(skb);
774 old_csum = sh->checksum;
775 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
776
777 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
778 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
779
780 new_csum = sctp_compute_cksum(skb, sctphoff);
781
782 /* Carry any checksum errors through. */
783 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
784
785 skb_clear_hash(skb);
786 ovs_ct_clear(skb, NULL);
787
788 flow_key->tp.src = sh->source;
789 flow_key->tp.dst = sh->dest;
790
791 return 0;
792}
793
794static int ovs_vport_output(struct net *net, struct sock *sk,
795 struct sk_buff *skb)
796{
797 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
798 struct vport *vport = data->vport;
799
800 if (skb_cow_head(skb, data->l2_len) < 0) {
801 kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
802 return -ENOMEM;
803 }
804
805 __skb_dst_copy(skb, data->dst);
806 *OVS_CB(skb) = data->cb;
807 skb->inner_protocol = data->inner_protocol;
808 if (data->vlan_tci & VLAN_CFI_MASK)
809 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
810 else
811 __vlan_hwaccel_clear_tag(skb);
812
813 /* Reconstruct the MAC header. */
814 skb_push(skb, data->l2_len);
815 memcpy(skb->data, &data->l2_data, data->l2_len);
816 skb_postpush_rcsum(skb, skb->data, data->l2_len);
817 skb_reset_mac_header(skb);
818
819 if (eth_p_mpls(skb->protocol)) {
820 skb->inner_network_header = skb->network_header;
821 skb_set_network_header(skb, data->network_offset);
822 skb_reset_mac_len(skb);
823 }
824
825 ovs_vport_send(vport, skb, data->mac_proto);
826 return 0;
827}
828
829static unsigned int
830ovs_dst_get_mtu(const struct dst_entry *dst)
831{
832 return dst->dev->mtu;
833}
834
835static struct dst_ops ovs_dst_ops = {
836 .family = AF_UNSPEC,
837 .mtu = ovs_dst_get_mtu,
838};
839
840/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
841 * ovs_vport_output(), which is called once per fragmented packet.
842 */
843static void prepare_frag(struct vport *vport, struct sk_buff *skb,
844 u16 orig_network_offset, u8 mac_proto)
845{
846 unsigned int hlen = skb_network_offset(skb);
847 struct ovs_frag_data *data;
848
849 data = this_cpu_ptr(&ovs_frag_data_storage);
850 data->dst = skb->_skb_refdst;
851 data->vport = vport;
852 data->cb = *OVS_CB(skb);
853 data->inner_protocol = skb->inner_protocol;
854 data->network_offset = orig_network_offset;
855 if (skb_vlan_tag_present(skb))
856 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
857 else
858 data->vlan_tci = 0;
859 data->vlan_proto = skb->vlan_proto;
860 data->mac_proto = mac_proto;
861 data->l2_len = hlen;
862 memcpy(&data->l2_data, skb->data, hlen);
863
864 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
865 skb_pull(skb, hlen);
866}
867
868static void ovs_fragment(struct net *net, struct vport *vport,
869 struct sk_buff *skb, u16 mru,
870 struct sw_flow_key *key)
871{
872 enum ovs_drop_reason reason;
873 u16 orig_network_offset = 0;
874
875 if (eth_p_mpls(skb->protocol)) {
876 orig_network_offset = skb_network_offset(skb);
877 skb->network_header = skb->inner_network_header;
878 }
879
880 if (skb_network_offset(skb) > MAX_L2_LEN) {
881 OVS_NLERR(1, "L2 header too long to fragment");
882 reason = OVS_DROP_FRAG_L2_TOO_LONG;
883 goto err;
884 }
885
886 if (key->eth.type == htons(ETH_P_IP)) {
887 struct rtable ovs_rt = { 0 };
888 unsigned long orig_dst;
889
890 prepare_frag(vport, skb, orig_network_offset,
891 ovs_key_mac_proto(key));
892 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
893 DST_OBSOLETE_NONE, DST_NOCOUNT);
894 ovs_rt.dst.dev = vport->dev;
895
896 orig_dst = skb->_skb_refdst;
897 skb_dst_set_noref(skb, &ovs_rt.dst);
898 IPCB(skb)->frag_max_size = mru;
899
900 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
901 refdst_drop(orig_dst);
902 } else if (key->eth.type == htons(ETH_P_IPV6)) {
903 unsigned long orig_dst;
904 struct rt6_info ovs_rt;
905
906 prepare_frag(vport, skb, orig_network_offset,
907 ovs_key_mac_proto(key));
908 memset(&ovs_rt, 0, sizeof(ovs_rt));
909 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
910 DST_OBSOLETE_NONE, DST_NOCOUNT);
911 ovs_rt.dst.dev = vport->dev;
912
913 orig_dst = skb->_skb_refdst;
914 skb_dst_set_noref(skb, &ovs_rt.dst);
915 IP6CB(skb)->frag_max_size = mru;
916
917 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
918 refdst_drop(orig_dst);
919 } else {
920 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
921 ovs_vport_name(vport), ntohs(key->eth.type), mru,
922 vport->dev->mtu);
923 reason = OVS_DROP_FRAG_INVALID_PROTO;
924 goto err;
925 }
926
927 return;
928err:
929 ovs_kfree_skb_reason(skb, reason);
930}
931
932static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
933 struct sw_flow_key *key)
934{
935 struct vport *vport = ovs_vport_rcu(dp, out_port);
936
937 if (likely(vport &&
938 netif_running(vport->dev) &&
939 netif_carrier_ok(vport->dev))) {
940 u16 mru = OVS_CB(skb)->mru;
941 u32 cutlen = OVS_CB(skb)->cutlen;
942
943 if (unlikely(cutlen > 0)) {
944 if (skb->len - cutlen > ovs_mac_header_len(key))
945 pskb_trim(skb, skb->len - cutlen);
946 else
947 pskb_trim(skb, ovs_mac_header_len(key));
948 }
949
950 /* Need to set the pkt_type to involve the routing layer. The
951 * packet movement through the OVS datapath doesn't generally
952 * use routing, but this is needed for tunnel cases.
953 */
954 skb->pkt_type = PACKET_OUTGOING;
955
956 if (likely(!mru ||
957 (skb->len <= mru + vport->dev->hard_header_len))) {
958 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
959 } else if (mru <= vport->dev->mtu) {
960 struct net *net = read_pnet(&dp->net);
961
962 ovs_fragment(net, vport, skb, mru, key);
963 } else {
964 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
965 }
966 } else {
967 kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
968 }
969}
970
971static int output_userspace(struct datapath *dp, struct sk_buff *skb,
972 struct sw_flow_key *key, const struct nlattr *attr,
973 const struct nlattr *actions, int actions_len,
974 uint32_t cutlen)
975{
976 struct dp_upcall_info upcall;
977 const struct nlattr *a;
978 int rem;
979
980 memset(&upcall, 0, sizeof(upcall));
981 upcall.cmd = OVS_PACKET_CMD_ACTION;
982 upcall.mru = OVS_CB(skb)->mru;
983
984 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
985 a = nla_next(a, &rem)) {
986 switch (nla_type(a)) {
987 case OVS_USERSPACE_ATTR_USERDATA:
988 upcall.userdata = a;
989 break;
990
991 case OVS_USERSPACE_ATTR_PID:
992 if (dp->user_features &
993 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
994 upcall.portid =
995 ovs_dp_get_upcall_portid(dp,
996 smp_processor_id());
997 else
998 upcall.portid = nla_get_u32(a);
999 break;
1000
1001 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1002 /* Get out tunnel info. */
1003 struct vport *vport;
1004
1005 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1006 if (vport) {
1007 int err;
1008
1009 err = dev_fill_metadata_dst(vport->dev, skb);
1010 if (!err)
1011 upcall.egress_tun_info = skb_tunnel_info(skb);
1012 }
1013
1014 break;
1015 }
1016
1017 case OVS_USERSPACE_ATTR_ACTIONS: {
1018 /* Include actions. */
1019 upcall.actions = actions;
1020 upcall.actions_len = actions_len;
1021 break;
1022 }
1023
1024 } /* End of switch. */
1025 }
1026
1027 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1028}
1029
1030static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1031 struct sw_flow_key *key,
1032 const struct nlattr *attr)
1033{
1034 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1035 struct nlattr *actions = nla_data(attr);
1036
1037 if (nla_len(actions))
1038 return clone_execute(dp, skb, key, 0, nla_data(actions),
1039 nla_len(actions), true, false);
1040
1041 ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
1042 return 0;
1043}
1044
1045/* When 'last' is true, sample() should always consume the 'skb'.
1046 * Otherwise, sample() should keep 'skb' intact regardless what
1047 * actions are executed within sample().
1048 */
1049static int sample(struct datapath *dp, struct sk_buff *skb,
1050 struct sw_flow_key *key, const struct nlattr *attr,
1051 bool last)
1052{
1053 struct nlattr *actions;
1054 struct nlattr *sample_arg;
1055 int rem = nla_len(attr);
1056 const struct sample_arg *arg;
1057 u32 init_probability;
1058 bool clone_flow_key;
1059 int err;
1060
1061 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1062 sample_arg = nla_data(attr);
1063 arg = nla_data(sample_arg);
1064 actions = nla_next(sample_arg, &rem);
1065 init_probability = OVS_CB(skb)->probability;
1066
1067 if ((arg->probability != U32_MAX) &&
1068 (!arg->probability || get_random_u32() > arg->probability)) {
1069 if (last)
1070 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1071 return 0;
1072 }
1073
1074 OVS_CB(skb)->probability = arg->probability;
1075
1076 clone_flow_key = !arg->exec;
1077 err = clone_execute(dp, skb, key, 0, actions, rem, last,
1078 clone_flow_key);
1079
1080 if (!last)
1081 OVS_CB(skb)->probability = init_probability;
1082
1083 return err;
1084}
1085
1086/* When 'last' is true, clone() should always consume the 'skb'.
1087 * Otherwise, clone() should keep 'skb' intact regardless what
1088 * actions are executed within clone().
1089 */
1090static int clone(struct datapath *dp, struct sk_buff *skb,
1091 struct sw_flow_key *key, const struct nlattr *attr,
1092 bool last)
1093{
1094 struct nlattr *actions;
1095 struct nlattr *clone_arg;
1096 int rem = nla_len(attr);
1097 bool dont_clone_flow_key;
1098
1099 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1100 clone_arg = nla_data(attr);
1101 dont_clone_flow_key = nla_get_u32(clone_arg);
1102 actions = nla_next(clone_arg, &rem);
1103
1104 return clone_execute(dp, skb, key, 0, actions, rem, last,
1105 !dont_clone_flow_key);
1106}
1107
1108static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1109 const struct nlattr *attr)
1110{
1111 struct ovs_action_hash *hash_act = nla_data(attr);
1112 u32 hash = 0;
1113
1114 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1115 /* OVS_HASH_ALG_L4 hasing type. */
1116 hash = skb_get_hash(skb);
1117 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1118 /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
1119 * extend past an encapsulated header.
1120 */
1121 hash = __skb_get_hash_symmetric(skb);
1122 }
1123
1124 hash = jhash_1word(hash, hash_act->hash_basis);
1125 if (!hash)
1126 hash = 0x1;
1127
1128 key->ovs_flow_hash = hash;
1129}
1130
1131static int execute_set_action(struct sk_buff *skb,
1132 struct sw_flow_key *flow_key,
1133 const struct nlattr *a)
1134{
1135 /* Only tunnel set execution is supported without a mask. */
1136 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1137 struct ovs_tunnel_info *tun = nla_data(a);
1138
1139 skb_dst_drop(skb);
1140 dst_hold((struct dst_entry *)tun->tun_dst);
1141 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1142 return 0;
1143 }
1144
1145 return -EINVAL;
1146}
1147
1148/* Mask is at the midpoint of the data. */
1149#define get_mask(a, type) ((const type)nla_data(a) + 1)
1150
1151static int execute_masked_set_action(struct sk_buff *skb,
1152 struct sw_flow_key *flow_key,
1153 const struct nlattr *a)
1154{
1155 int err = 0;
1156
1157 switch (nla_type(a)) {
1158 case OVS_KEY_ATTR_PRIORITY:
1159 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1160 *get_mask(a, u32 *));
1161 flow_key->phy.priority = skb->priority;
1162 break;
1163
1164 case OVS_KEY_ATTR_SKB_MARK:
1165 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1166 flow_key->phy.skb_mark = skb->mark;
1167 break;
1168
1169 case OVS_KEY_ATTR_TUNNEL_INFO:
1170 /* Masked data not supported for tunnel. */
1171 err = -EINVAL;
1172 break;
1173
1174 case OVS_KEY_ATTR_ETHERNET:
1175 err = set_eth_addr(skb, flow_key, nla_data(a),
1176 get_mask(a, struct ovs_key_ethernet *));
1177 break;
1178
1179 case OVS_KEY_ATTR_NSH:
1180 err = set_nsh(skb, flow_key, a);
1181 break;
1182
1183 case OVS_KEY_ATTR_IPV4:
1184 err = set_ipv4(skb, flow_key, nla_data(a),
1185 get_mask(a, struct ovs_key_ipv4 *));
1186 break;
1187
1188 case OVS_KEY_ATTR_IPV6:
1189 err = set_ipv6(skb, flow_key, nla_data(a),
1190 get_mask(a, struct ovs_key_ipv6 *));
1191 break;
1192
1193 case OVS_KEY_ATTR_TCP:
1194 err = set_tcp(skb, flow_key, nla_data(a),
1195 get_mask(a, struct ovs_key_tcp *));
1196 break;
1197
1198 case OVS_KEY_ATTR_UDP:
1199 err = set_udp(skb, flow_key, nla_data(a),
1200 get_mask(a, struct ovs_key_udp *));
1201 break;
1202
1203 case OVS_KEY_ATTR_SCTP:
1204 err = set_sctp(skb, flow_key, nla_data(a),
1205 get_mask(a, struct ovs_key_sctp *));
1206 break;
1207
1208 case OVS_KEY_ATTR_MPLS:
1209 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1210 __be32 *));
1211 break;
1212
1213 case OVS_KEY_ATTR_CT_STATE:
1214 case OVS_KEY_ATTR_CT_ZONE:
1215 case OVS_KEY_ATTR_CT_MARK:
1216 case OVS_KEY_ATTR_CT_LABELS:
1217 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1218 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1219 err = -EINVAL;
1220 break;
1221 }
1222
1223 return err;
1224}
1225
1226static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1227 struct sw_flow_key *key,
1228 const struct nlattr *a, bool last)
1229{
1230 u32 recirc_id;
1231
1232 if (!is_flow_key_valid(key)) {
1233 int err;
1234
1235 err = ovs_flow_key_update(skb, key);
1236 if (err)
1237 return err;
1238 }
1239 BUG_ON(!is_flow_key_valid(key));
1240
1241 recirc_id = nla_get_u32(a);
1242 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1243}
1244
1245static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1246 struct sw_flow_key *key,
1247 const struct nlattr *attr, bool last)
1248{
1249 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1250 const struct nlattr *actions, *cpl_arg;
1251 int len, max_len, rem = nla_len(attr);
1252 const struct check_pkt_len_arg *arg;
1253 bool clone_flow_key;
1254
1255 /* The first netlink attribute in 'attr' is always
1256 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1257 */
1258 cpl_arg = nla_data(attr);
1259 arg = nla_data(cpl_arg);
1260
1261 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1262 max_len = arg->pkt_len;
1263
1264 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1265 len <= max_len) {
1266 /* Second netlink attribute in 'attr' is always
1267 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1268 */
1269 actions = nla_next(cpl_arg, &rem);
1270 clone_flow_key = !arg->exec_for_lesser_equal;
1271 } else {
1272 /* Third netlink attribute in 'attr' is always
1273 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1274 */
1275 actions = nla_next(cpl_arg, &rem);
1276 actions = nla_next(actions, &rem);
1277 clone_flow_key = !arg->exec_for_greater;
1278 }
1279
1280 return clone_execute(dp, skb, key, 0, nla_data(actions),
1281 nla_len(actions), last, clone_flow_key);
1282}
1283
1284static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1285{
1286 int err;
1287
1288 if (skb->protocol == htons(ETH_P_IPV6)) {
1289 struct ipv6hdr *nh;
1290
1291 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1292 sizeof(*nh));
1293 if (unlikely(err))
1294 return err;
1295
1296 nh = ipv6_hdr(skb);
1297
1298 if (nh->hop_limit <= 1)
1299 return -EHOSTUNREACH;
1300
1301 key->ip.ttl = --nh->hop_limit;
1302 } else if (skb->protocol == htons(ETH_P_IP)) {
1303 struct iphdr *nh;
1304 u8 old_ttl;
1305
1306 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1307 sizeof(*nh));
1308 if (unlikely(err))
1309 return err;
1310
1311 nh = ip_hdr(skb);
1312 if (nh->ttl <= 1)
1313 return -EHOSTUNREACH;
1314
1315 old_ttl = nh->ttl--;
1316 csum_replace2(&nh->check, htons(old_ttl << 8),
1317 htons(nh->ttl << 8));
1318 key->ip.ttl = nh->ttl;
1319 }
1320 return 0;
1321}
1322
1323#if IS_ENABLED(CONFIG_PSAMPLE)
1324static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1325 const struct nlattr *attr)
1326{
1327 struct psample_group psample_group = {};
1328 struct psample_metadata md = {};
1329 const struct nlattr *a;
1330 u32 rate;
1331 int rem;
1332
1333 nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1334 switch (nla_type(a)) {
1335 case OVS_PSAMPLE_ATTR_GROUP:
1336 psample_group.group_num = nla_get_u32(a);
1337 break;
1338
1339 case OVS_PSAMPLE_ATTR_COOKIE:
1340 md.user_cookie = nla_data(a);
1341 md.user_cookie_len = nla_len(a);
1342 break;
1343 }
1344 }
1345
1346 psample_group.net = ovs_dp_get_net(dp);
1347 md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1348 md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1349 md.rate_as_probability = 1;
1350
1351 rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1352
1353 psample_sample_packet(&psample_group, skb, rate, &md);
1354}
1355#else
1356static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1357 const struct nlattr *attr)
1358{}
1359#endif
1360
1361/* Execute a list of actions against 'skb'. */
1362static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1363 struct sw_flow_key *key,
1364 const struct nlattr *attr, int len)
1365{
1366 const struct nlattr *a;
1367 int rem;
1368
1369 for (a = attr, rem = len; rem > 0;
1370 a = nla_next(a, &rem)) {
1371 int err = 0;
1372
1373 if (trace_ovs_do_execute_action_enabled())
1374 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1375
1376 /* Actions that rightfully have to consume the skb should do it
1377 * and return directly.
1378 */
1379 switch (nla_type(a)) {
1380 case OVS_ACTION_ATTR_OUTPUT: {
1381 int port = nla_get_u32(a);
1382 struct sk_buff *clone;
1383
1384 /* Every output action needs a separate clone
1385 * of 'skb', In case the output action is the
1386 * last action, cloning can be avoided.
1387 */
1388 if (nla_is_last(a, rem)) {
1389 do_output(dp, skb, port, key);
1390 /* 'skb' has been used for output.
1391 */
1392 return 0;
1393 }
1394
1395 clone = skb_clone(skb, GFP_ATOMIC);
1396 if (clone)
1397 do_output(dp, clone, port, key);
1398 OVS_CB(skb)->cutlen = 0;
1399 break;
1400 }
1401
1402 case OVS_ACTION_ATTR_TRUNC: {
1403 struct ovs_action_trunc *trunc = nla_data(a);
1404
1405 if (skb->len > trunc->max_len)
1406 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1407 break;
1408 }
1409
1410 case OVS_ACTION_ATTR_USERSPACE:
1411 output_userspace(dp, skb, key, a, attr,
1412 len, OVS_CB(skb)->cutlen);
1413 OVS_CB(skb)->cutlen = 0;
1414 if (nla_is_last(a, rem)) {
1415 consume_skb(skb);
1416 return 0;
1417 }
1418 break;
1419
1420 case OVS_ACTION_ATTR_HASH:
1421 execute_hash(skb, key, a);
1422 break;
1423
1424 case OVS_ACTION_ATTR_PUSH_MPLS: {
1425 struct ovs_action_push_mpls *mpls = nla_data(a);
1426
1427 err = push_mpls(skb, key, mpls->mpls_lse,
1428 mpls->mpls_ethertype, skb->mac_len);
1429 break;
1430 }
1431 case OVS_ACTION_ATTR_ADD_MPLS: {
1432 struct ovs_action_add_mpls *mpls = nla_data(a);
1433 __u16 mac_len = 0;
1434
1435 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1436 mac_len = skb->mac_len;
1437
1438 err = push_mpls(skb, key, mpls->mpls_lse,
1439 mpls->mpls_ethertype, mac_len);
1440 break;
1441 }
1442 case OVS_ACTION_ATTR_POP_MPLS:
1443 err = pop_mpls(skb, key, nla_get_be16(a));
1444 break;
1445
1446 case OVS_ACTION_ATTR_PUSH_VLAN:
1447 err = push_vlan(skb, key, nla_data(a));
1448 break;
1449
1450 case OVS_ACTION_ATTR_POP_VLAN:
1451 err = pop_vlan(skb, key);
1452 break;
1453
1454 case OVS_ACTION_ATTR_RECIRC: {
1455 bool last = nla_is_last(a, rem);
1456
1457 err = execute_recirc(dp, skb, key, a, last);
1458 if (last) {
1459 /* If this is the last action, the skb has
1460 * been consumed or freed.
1461 * Return immediately.
1462 */
1463 return err;
1464 }
1465 break;
1466 }
1467
1468 case OVS_ACTION_ATTR_SET:
1469 err = execute_set_action(skb, key, nla_data(a));
1470 break;
1471
1472 case OVS_ACTION_ATTR_SET_MASKED:
1473 case OVS_ACTION_ATTR_SET_TO_MASKED:
1474 err = execute_masked_set_action(skb, key, nla_data(a));
1475 break;
1476
1477 case OVS_ACTION_ATTR_SAMPLE: {
1478 bool last = nla_is_last(a, rem);
1479
1480 err = sample(dp, skb, key, a, last);
1481 if (last)
1482 return err;
1483
1484 break;
1485 }
1486
1487 case OVS_ACTION_ATTR_CT:
1488 if (!is_flow_key_valid(key)) {
1489 err = ovs_flow_key_update(skb, key);
1490 if (err)
1491 return err;
1492 }
1493
1494 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1495 nla_data(a));
1496
1497 /* Hide stolen IP fragments from user space. */
1498 if (err)
1499 return err == -EINPROGRESS ? 0 : err;
1500 break;
1501
1502 case OVS_ACTION_ATTR_CT_CLEAR:
1503 err = ovs_ct_clear(skb, key);
1504 break;
1505
1506 case OVS_ACTION_ATTR_PUSH_ETH:
1507 err = push_eth(skb, key, nla_data(a));
1508 break;
1509
1510 case OVS_ACTION_ATTR_POP_ETH:
1511 err = pop_eth(skb, key);
1512 break;
1513
1514 case OVS_ACTION_ATTR_PUSH_NSH:
1515 err = push_nsh(skb, key, nla_data(a));
1516 break;
1517
1518 case OVS_ACTION_ATTR_POP_NSH:
1519 err = pop_nsh(skb, key);
1520 break;
1521
1522 case OVS_ACTION_ATTR_METER:
1523 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1524 ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1525 return 0;
1526 }
1527 break;
1528
1529 case OVS_ACTION_ATTR_CLONE: {
1530 bool last = nla_is_last(a, rem);
1531
1532 err = clone(dp, skb, key, a, last);
1533 if (last)
1534 return err;
1535
1536 break;
1537 }
1538
1539 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1540 bool last = nla_is_last(a, rem);
1541
1542 err = execute_check_pkt_len(dp, skb, key, a, last);
1543 if (last)
1544 return err;
1545
1546 break;
1547 }
1548
1549 case OVS_ACTION_ATTR_DEC_TTL:
1550 err = execute_dec_ttl(skb, key);
1551 if (err == -EHOSTUNREACH)
1552 return dec_ttl_exception_handler(dp, skb,
1553 key, a);
1554 break;
1555
1556 case OVS_ACTION_ATTR_DROP: {
1557 enum ovs_drop_reason reason = nla_get_u32(a)
1558 ? OVS_DROP_EXPLICIT_WITH_ERROR
1559 : OVS_DROP_EXPLICIT;
1560
1561 ovs_kfree_skb_reason(skb, reason);
1562 return 0;
1563 }
1564
1565 case OVS_ACTION_ATTR_PSAMPLE:
1566 execute_psample(dp, skb, a);
1567 OVS_CB(skb)->cutlen = 0;
1568 if (nla_is_last(a, rem)) {
1569 consume_skb(skb);
1570 return 0;
1571 }
1572 break;
1573 }
1574
1575 if (unlikely(err)) {
1576 ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1577 return err;
1578 }
1579 }
1580
1581 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1582 return 0;
1583}
1584
1585/* Execute the actions on the clone of the packet. The effect of the
1586 * execution does not affect the original 'skb' nor the original 'key'.
1587 *
1588 * The execution may be deferred in case the actions can not be executed
1589 * immediately.
1590 */
1591static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1592 struct sw_flow_key *key, u32 recirc_id,
1593 const struct nlattr *actions, int len,
1594 bool last, bool clone_flow_key)
1595{
1596 struct deferred_action *da;
1597 struct sw_flow_key *clone;
1598
1599 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1600 if (!skb) {
1601 /* Out of memory, skip this action.
1602 */
1603 return 0;
1604 }
1605
1606 /* When clone_flow_key is false, the 'key' will not be change
1607 * by the actions, then the 'key' can be used directly.
1608 * Otherwise, try to clone key from the next recursion level of
1609 * 'flow_keys'. If clone is successful, execute the actions
1610 * without deferring.
1611 */
1612 clone = clone_flow_key ? clone_key(key) : key;
1613 if (clone) {
1614 int err = 0;
1615
1616 if (actions) { /* Sample action */
1617 if (clone_flow_key)
1618 __this_cpu_inc(exec_actions_level);
1619
1620 err = do_execute_actions(dp, skb, clone,
1621 actions, len);
1622
1623 if (clone_flow_key)
1624 __this_cpu_dec(exec_actions_level);
1625 } else { /* Recirc action */
1626 clone->recirc_id = recirc_id;
1627 ovs_dp_process_packet(skb, clone);
1628 }
1629 return err;
1630 }
1631
1632 /* Out of 'flow_keys' space. Defer actions */
1633 da = add_deferred_actions(skb, key, actions, len);
1634 if (da) {
1635 if (!actions) { /* Recirc action */
1636 key = &da->pkt_key;
1637 key->recirc_id = recirc_id;
1638 }
1639 } else {
1640 /* Out of per CPU action FIFO space. Drop the 'skb' and
1641 * log an error.
1642 */
1643 ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1644
1645 if (net_ratelimit()) {
1646 if (actions) { /* Sample action */
1647 pr_warn("%s: deferred action limit reached, drop sample action\n",
1648 ovs_dp_name(dp));
1649 } else { /* Recirc action */
1650 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1651 ovs_dp_name(dp), recirc_id);
1652 }
1653 }
1654 }
1655 return 0;
1656}
1657
1658static void process_deferred_actions(struct datapath *dp)
1659{
1660 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1661
1662 /* Do not touch the FIFO in case there is no deferred actions. */
1663 if (action_fifo_is_empty(fifo))
1664 return;
1665
1666 /* Finishing executing all deferred actions. */
1667 do {
1668 struct deferred_action *da = action_fifo_get(fifo);
1669 struct sk_buff *skb = da->skb;
1670 struct sw_flow_key *key = &da->pkt_key;
1671 const struct nlattr *actions = da->actions;
1672 int actions_len = da->actions_len;
1673
1674 if (actions)
1675 do_execute_actions(dp, skb, key, actions, actions_len);
1676 else
1677 ovs_dp_process_packet(skb, key);
1678 } while (!action_fifo_is_empty(fifo));
1679
1680 /* Reset FIFO for the next packet. */
1681 action_fifo_init(fifo);
1682}
1683
1684/* Execute a list of actions against 'skb'. */
1685int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1686 const struct sw_flow_actions *acts,
1687 struct sw_flow_key *key)
1688{
1689 int err, level;
1690
1691 level = __this_cpu_inc_return(exec_actions_level);
1692 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1693 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1694 ovs_dp_name(dp));
1695 ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1696 err = -ENETDOWN;
1697 goto out;
1698 }
1699
1700 OVS_CB(skb)->acts_origlen = acts->orig_len;
1701 err = do_execute_actions(dp, skb, key,
1702 acts->actions, acts->actions_len);
1703
1704 if (level == 1)
1705 process_deferred_actions(dp);
1706
1707out:
1708 __this_cpu_dec(exec_actions_level);
1709 return err;
1710}
1711
1712int action_fifos_init(void)
1713{
1714 action_fifos = alloc_percpu(struct action_fifo);
1715 if (!action_fifos)
1716 return -ENOMEM;
1717
1718 flow_keys = alloc_percpu(struct action_flow_keys);
1719 if (!flow_keys) {
1720 free_percpu(action_fifos);
1721 return -ENOMEM;
1722 }
1723
1724 return 0;
1725}
1726
1727void action_fifos_exit(void)
1728{
1729 free_percpu(action_fifos);
1730 free_percpu(flow_keys);
1731}