Loading...
1/*
2 * Copyright (c) 2007-2017 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
25#include <linux/netfilter_ipv6.h>
26#include <linux/sctp.h>
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
32
33#include <net/dst.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/ip6_fib.h>
37#include <net/checksum.h>
38#include <net/dsfield.h>
39#include <net/mpls.h>
40#include <net/sctp/checksum.h>
41
42#include "datapath.h"
43#include "flow.h"
44#include "conntrack.h"
45#include "vport.h"
46#include "flow_netlink.h"
47
48struct deferred_action {
49 struct sk_buff *skb;
50 const struct nlattr *actions;
51 int actions_len;
52
53 /* Store pkt_key clone when creating deferred action. */
54 struct sw_flow_key pkt_key;
55};
56
57#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
58struct ovs_frag_data {
59 unsigned long dst;
60 struct vport *vport;
61 struct ovs_skb_cb cb;
62 __be16 inner_protocol;
63 u16 network_offset; /* valid only for MPLS */
64 u16 vlan_tci;
65 __be16 vlan_proto;
66 unsigned int l2_len;
67 u8 mac_proto;
68 u8 l2_data[MAX_L2_LEN];
69};
70
71static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
73#define DEFERRED_ACTION_FIFO_SIZE 10
74#define OVS_RECURSION_LIMIT 5
75#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
76struct action_fifo {
77 int head;
78 int tail;
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
81};
82
83struct action_flow_keys {
84 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
85};
86
87static struct action_fifo __percpu *action_fifos;
88static struct action_flow_keys __percpu *flow_keys;
89static DEFINE_PER_CPU(int, exec_actions_level);
90
91/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
92 * space. Return NULL if out of key spaces.
93 */
94static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
95{
96 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
97 int level = this_cpu_read(exec_actions_level);
98 struct sw_flow_key *key = NULL;
99
100 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
101 key = &keys->key[level - 1];
102 *key = *key_;
103 }
104
105 return key;
106}
107
108static void action_fifo_init(struct action_fifo *fifo)
109{
110 fifo->head = 0;
111 fifo->tail = 0;
112}
113
114static bool action_fifo_is_empty(const struct action_fifo *fifo)
115{
116 return (fifo->head == fifo->tail);
117}
118
119static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
120{
121 if (action_fifo_is_empty(fifo))
122 return NULL;
123
124 return &fifo->fifo[fifo->tail++];
125}
126
127static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
128{
129 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
130 return NULL;
131
132 return &fifo->fifo[fifo->head++];
133}
134
135/* Return true if fifo is not full */
136static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
137 const struct sw_flow_key *key,
138 const struct nlattr *actions,
139 const int actions_len)
140{
141 struct action_fifo *fifo;
142 struct deferred_action *da;
143
144 fifo = this_cpu_ptr(action_fifos);
145 da = action_fifo_put(fifo);
146 if (da) {
147 da->skb = skb;
148 da->actions = actions;
149 da->actions_len = actions_len;
150 da->pkt_key = *key;
151 }
152
153 return da;
154}
155
156static void invalidate_flow_key(struct sw_flow_key *key)
157{
158 key->mac_proto |= SW_FLOW_KEY_INVALID;
159}
160
161static bool is_flow_key_valid(const struct sw_flow_key *key)
162{
163 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
164}
165
166static int clone_execute(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
168 u32 recirc_id,
169 const struct nlattr *actions, int len,
170 bool last, bool clone_flow_key);
171
172static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
173 __be16 ethertype)
174{
175 if (skb->ip_summed == CHECKSUM_COMPLETE) {
176 __be16 diff[] = { ~(hdr->h_proto), ethertype };
177
178 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
179 ~skb->csum);
180 }
181
182 hdr->h_proto = ethertype;
183}
184
185static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
186 const struct ovs_action_push_mpls *mpls)
187{
188 struct mpls_shim_hdr *new_mpls_lse;
189
190 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
191 if (skb->encapsulation)
192 return -ENOTSUPP;
193
194 if (skb_cow_head(skb, MPLS_HLEN) < 0)
195 return -ENOMEM;
196
197 if (!skb->inner_protocol) {
198 skb_set_inner_network_header(skb, skb->mac_len);
199 skb_set_inner_protocol(skb, skb->protocol);
200 }
201
202 skb_push(skb, MPLS_HLEN);
203 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
204 skb->mac_len);
205 skb_reset_mac_header(skb);
206 skb_set_network_header(skb, skb->mac_len);
207
208 new_mpls_lse = mpls_hdr(skb);
209 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
210
211 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
212
213 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
214 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
215 skb->protocol = mpls->mpls_ethertype;
216
217 invalidate_flow_key(key);
218 return 0;
219}
220
221static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
222 const __be16 ethertype)
223{
224 int err;
225
226 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
227 if (unlikely(err))
228 return err;
229
230 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
231
232 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
233 skb->mac_len);
234
235 __skb_pull(skb, MPLS_HLEN);
236 skb_reset_mac_header(skb);
237 skb_set_network_header(skb, skb->mac_len);
238
239 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
240 struct ethhdr *hdr;
241
242 /* mpls_hdr() is used to locate the ethertype field correctly in the
243 * presence of VLAN tags.
244 */
245 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
246 update_ethertype(skb, hdr, ethertype);
247 }
248 if (eth_p_mpls(skb->protocol))
249 skb->protocol = ethertype;
250
251 invalidate_flow_key(key);
252 return 0;
253}
254
255static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const __be32 *mpls_lse, const __be32 *mask)
257{
258 struct mpls_shim_hdr *stack;
259 __be32 lse;
260 int err;
261
262 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
263 if (unlikely(err))
264 return err;
265
266 stack = mpls_hdr(skb);
267 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
268 if (skb->ip_summed == CHECKSUM_COMPLETE) {
269 __be32 diff[] = { ~(stack->label_stack_entry), lse };
270
271 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
272 ~skb->csum);
273 }
274
275 stack->label_stack_entry = lse;
276 flow_key->mpls.top_lse = lse;
277 return 0;
278}
279
280static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
281{
282 int err;
283
284 err = skb_vlan_pop(skb);
285 if (skb_vlan_tag_present(skb)) {
286 invalidate_flow_key(key);
287 } else {
288 key->eth.vlan.tci = 0;
289 key->eth.vlan.tpid = 0;
290 }
291 return err;
292}
293
294static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
295 const struct ovs_action_push_vlan *vlan)
296{
297 if (skb_vlan_tag_present(skb)) {
298 invalidate_flow_key(key);
299 } else {
300 key->eth.vlan.tci = vlan->vlan_tci;
301 key->eth.vlan.tpid = vlan->vlan_tpid;
302 }
303 return skb_vlan_push(skb, vlan->vlan_tpid,
304 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
305}
306
307/* 'src' is already properly masked. */
308static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
309{
310 u16 *dst = (u16 *)dst_;
311 const u16 *src = (const u16 *)src_;
312 const u16 *mask = (const u16 *)mask_;
313
314 OVS_SET_MASKED(dst[0], src[0], mask[0]);
315 OVS_SET_MASKED(dst[1], src[1], mask[1]);
316 OVS_SET_MASKED(dst[2], src[2], mask[2]);
317}
318
319static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
320 const struct ovs_key_ethernet *key,
321 const struct ovs_key_ethernet *mask)
322{
323 int err;
324
325 err = skb_ensure_writable(skb, ETH_HLEN);
326 if (unlikely(err))
327 return err;
328
329 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
330
331 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
332 mask->eth_src);
333 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
334 mask->eth_dst);
335
336 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
337
338 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
339 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
340 return 0;
341}
342
343/* pop_eth does not support VLAN packets as this action is never called
344 * for them.
345 */
346static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
347{
348 skb_pull_rcsum(skb, ETH_HLEN);
349 skb_reset_mac_header(skb);
350 skb_reset_mac_len(skb);
351
352 /* safe right before invalidate_flow_key */
353 key->mac_proto = MAC_PROTO_NONE;
354 invalidate_flow_key(key);
355 return 0;
356}
357
358static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
359 const struct ovs_action_push_eth *ethh)
360{
361 struct ethhdr *hdr;
362
363 /* Add the new Ethernet header */
364 if (skb_cow_head(skb, ETH_HLEN) < 0)
365 return -ENOMEM;
366
367 skb_push(skb, ETH_HLEN);
368 skb_reset_mac_header(skb);
369 skb_reset_mac_len(skb);
370
371 hdr = eth_hdr(skb);
372 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
373 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
374 hdr->h_proto = skb->protocol;
375
376 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
377
378 /* safe right before invalidate_flow_key */
379 key->mac_proto = MAC_PROTO_ETHERNET;
380 invalidate_flow_key(key);
381 return 0;
382}
383
384static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
385 const struct nshhdr *nh)
386{
387 int err;
388
389 err = nsh_push(skb, nh);
390 if (err)
391 return err;
392
393 /* safe right before invalidate_flow_key */
394 key->mac_proto = MAC_PROTO_NONE;
395 invalidate_flow_key(key);
396 return 0;
397}
398
399static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
400{
401 int err;
402
403 err = nsh_pop(skb);
404 if (err)
405 return err;
406
407 /* safe right before invalidate_flow_key */
408 if (skb->protocol == htons(ETH_P_TEB))
409 key->mac_proto = MAC_PROTO_ETHERNET;
410 else
411 key->mac_proto = MAC_PROTO_NONE;
412 invalidate_flow_key(key);
413 return 0;
414}
415
416static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
417 __be32 addr, __be32 new_addr)
418{
419 int transport_len = skb->len - skb_transport_offset(skb);
420
421 if (nh->frag_off & htons(IP_OFFSET))
422 return;
423
424 if (nh->protocol == IPPROTO_TCP) {
425 if (likely(transport_len >= sizeof(struct tcphdr)))
426 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
427 addr, new_addr, true);
428 } else if (nh->protocol == IPPROTO_UDP) {
429 if (likely(transport_len >= sizeof(struct udphdr))) {
430 struct udphdr *uh = udp_hdr(skb);
431
432 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
433 inet_proto_csum_replace4(&uh->check, skb,
434 addr, new_addr, true);
435 if (!uh->check)
436 uh->check = CSUM_MANGLED_0;
437 }
438 }
439 }
440}
441
442static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
443 __be32 *addr, __be32 new_addr)
444{
445 update_ip_l4_checksum(skb, nh, *addr, new_addr);
446 csum_replace4(&nh->check, *addr, new_addr);
447 skb_clear_hash(skb);
448 *addr = new_addr;
449}
450
451static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
452 __be32 addr[4], const __be32 new_addr[4])
453{
454 int transport_len = skb->len - skb_transport_offset(skb);
455
456 if (l4_proto == NEXTHDR_TCP) {
457 if (likely(transport_len >= sizeof(struct tcphdr)))
458 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
459 addr, new_addr, true);
460 } else if (l4_proto == NEXTHDR_UDP) {
461 if (likely(transport_len >= sizeof(struct udphdr))) {
462 struct udphdr *uh = udp_hdr(skb);
463
464 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
465 inet_proto_csum_replace16(&uh->check, skb,
466 addr, new_addr, true);
467 if (!uh->check)
468 uh->check = CSUM_MANGLED_0;
469 }
470 }
471 } else if (l4_proto == NEXTHDR_ICMP) {
472 if (likely(transport_len >= sizeof(struct icmp6hdr)))
473 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
474 skb, addr, new_addr, true);
475 }
476}
477
478static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
479 const __be32 mask[4], __be32 masked[4])
480{
481 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
482 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
483 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
484 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
485}
486
487static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
488 __be32 addr[4], const __be32 new_addr[4],
489 bool recalculate_csum)
490{
491 if (recalculate_csum)
492 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
493
494 skb_clear_hash(skb);
495 memcpy(addr, new_addr, sizeof(__be32[4]));
496}
497
498static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
499{
500 /* Bits 21-24 are always unmasked, so this retains their values. */
501 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
502 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
503 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
504}
505
506static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
507 u8 mask)
508{
509 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
510
511 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
512 nh->ttl = new_ttl;
513}
514
515static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
516 const struct ovs_key_ipv4 *key,
517 const struct ovs_key_ipv4 *mask)
518{
519 struct iphdr *nh;
520 __be32 new_addr;
521 int err;
522
523 err = skb_ensure_writable(skb, skb_network_offset(skb) +
524 sizeof(struct iphdr));
525 if (unlikely(err))
526 return err;
527
528 nh = ip_hdr(skb);
529
530 /* Setting an IP addresses is typically only a side effect of
531 * matching on them in the current userspace implementation, so it
532 * makes sense to check if the value actually changed.
533 */
534 if (mask->ipv4_src) {
535 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
536
537 if (unlikely(new_addr != nh->saddr)) {
538 set_ip_addr(skb, nh, &nh->saddr, new_addr);
539 flow_key->ipv4.addr.src = new_addr;
540 }
541 }
542 if (mask->ipv4_dst) {
543 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
544
545 if (unlikely(new_addr != nh->daddr)) {
546 set_ip_addr(skb, nh, &nh->daddr, new_addr);
547 flow_key->ipv4.addr.dst = new_addr;
548 }
549 }
550 if (mask->ipv4_tos) {
551 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
552 flow_key->ip.tos = nh->tos;
553 }
554 if (mask->ipv4_ttl) {
555 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
556 flow_key->ip.ttl = nh->ttl;
557 }
558
559 return 0;
560}
561
562static bool is_ipv6_mask_nonzero(const __be32 addr[4])
563{
564 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
565}
566
567static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
568 const struct ovs_key_ipv6 *key,
569 const struct ovs_key_ipv6 *mask)
570{
571 struct ipv6hdr *nh;
572 int err;
573
574 err = skb_ensure_writable(skb, skb_network_offset(skb) +
575 sizeof(struct ipv6hdr));
576 if (unlikely(err))
577 return err;
578
579 nh = ipv6_hdr(skb);
580
581 /* Setting an IP addresses is typically only a side effect of
582 * matching on them in the current userspace implementation, so it
583 * makes sense to check if the value actually changed.
584 */
585 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
586 __be32 *saddr = (__be32 *)&nh->saddr;
587 __be32 masked[4];
588
589 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
590
591 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
592 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
593 true);
594 memcpy(&flow_key->ipv6.addr.src, masked,
595 sizeof(flow_key->ipv6.addr.src));
596 }
597 }
598 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
599 unsigned int offset = 0;
600 int flags = IP6_FH_F_SKIP_RH;
601 bool recalc_csum = true;
602 __be32 *daddr = (__be32 *)&nh->daddr;
603 __be32 masked[4];
604
605 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
606
607 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
608 if (ipv6_ext_hdr(nh->nexthdr))
609 recalc_csum = (ipv6_find_hdr(skb, &offset,
610 NEXTHDR_ROUTING,
611 NULL, &flags)
612 != NEXTHDR_ROUTING);
613
614 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
615 recalc_csum);
616 memcpy(&flow_key->ipv6.addr.dst, masked,
617 sizeof(flow_key->ipv6.addr.dst));
618 }
619 }
620 if (mask->ipv6_tclass) {
621 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
622 flow_key->ip.tos = ipv6_get_dsfield(nh);
623 }
624 if (mask->ipv6_label) {
625 set_ipv6_fl(nh, ntohl(key->ipv6_label),
626 ntohl(mask->ipv6_label));
627 flow_key->ipv6.label =
628 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
629 }
630 if (mask->ipv6_hlimit) {
631 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
632 mask->ipv6_hlimit);
633 flow_key->ip.ttl = nh->hop_limit;
634 }
635 return 0;
636}
637
638static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
639 const struct nlattr *a)
640{
641 struct nshhdr *nh;
642 size_t length;
643 int err;
644 u8 flags;
645 u8 ttl;
646 int i;
647
648 struct ovs_key_nsh key;
649 struct ovs_key_nsh mask;
650
651 err = nsh_key_from_nlattr(a, &key, &mask);
652 if (err)
653 return err;
654
655 /* Make sure the NSH base header is there */
656 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
657 return -ENOMEM;
658
659 nh = nsh_hdr(skb);
660 length = nsh_hdr_len(nh);
661
662 /* Make sure the whole NSH header is there */
663 err = skb_ensure_writable(skb, skb_network_offset(skb) +
664 length);
665 if (unlikely(err))
666 return err;
667
668 nh = nsh_hdr(skb);
669 skb_postpull_rcsum(skb, nh, length);
670 flags = nsh_get_flags(nh);
671 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
672 flow_key->nsh.base.flags = flags;
673 ttl = nsh_get_ttl(nh);
674 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
675 flow_key->nsh.base.ttl = ttl;
676 nsh_set_flags_and_ttl(nh, flags, ttl);
677 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
678 mask.base.path_hdr);
679 flow_key->nsh.base.path_hdr = nh->path_hdr;
680 switch (nh->mdtype) {
681 case NSH_M_TYPE1:
682 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
683 nh->md1.context[i] =
684 OVS_MASKED(nh->md1.context[i], key.context[i],
685 mask.context[i]);
686 }
687 memcpy(flow_key->nsh.context, nh->md1.context,
688 sizeof(nh->md1.context));
689 break;
690 case NSH_M_TYPE2:
691 memset(flow_key->nsh.context, 0,
692 sizeof(flow_key->nsh.context));
693 break;
694 default:
695 return -EINVAL;
696 }
697 skb_postpush_rcsum(skb, nh, length);
698 return 0;
699}
700
701/* Must follow skb_ensure_writable() since that can move the skb data. */
702static void set_tp_port(struct sk_buff *skb, __be16 *port,
703 __be16 new_port, __sum16 *check)
704{
705 inet_proto_csum_replace2(check, skb, *port, new_port, false);
706 *port = new_port;
707}
708
709static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
710 const struct ovs_key_udp *key,
711 const struct ovs_key_udp *mask)
712{
713 struct udphdr *uh;
714 __be16 src, dst;
715 int err;
716
717 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
718 sizeof(struct udphdr));
719 if (unlikely(err))
720 return err;
721
722 uh = udp_hdr(skb);
723 /* Either of the masks is non-zero, so do not bother checking them. */
724 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
725 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
726
727 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
728 if (likely(src != uh->source)) {
729 set_tp_port(skb, &uh->source, src, &uh->check);
730 flow_key->tp.src = src;
731 }
732 if (likely(dst != uh->dest)) {
733 set_tp_port(skb, &uh->dest, dst, &uh->check);
734 flow_key->tp.dst = dst;
735 }
736
737 if (unlikely(!uh->check))
738 uh->check = CSUM_MANGLED_0;
739 } else {
740 uh->source = src;
741 uh->dest = dst;
742 flow_key->tp.src = src;
743 flow_key->tp.dst = dst;
744 }
745
746 skb_clear_hash(skb);
747
748 return 0;
749}
750
751static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
752 const struct ovs_key_tcp *key,
753 const struct ovs_key_tcp *mask)
754{
755 struct tcphdr *th;
756 __be16 src, dst;
757 int err;
758
759 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
760 sizeof(struct tcphdr));
761 if (unlikely(err))
762 return err;
763
764 th = tcp_hdr(skb);
765 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
766 if (likely(src != th->source)) {
767 set_tp_port(skb, &th->source, src, &th->check);
768 flow_key->tp.src = src;
769 }
770 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
771 if (likely(dst != th->dest)) {
772 set_tp_port(skb, &th->dest, dst, &th->check);
773 flow_key->tp.dst = dst;
774 }
775 skb_clear_hash(skb);
776
777 return 0;
778}
779
780static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
781 const struct ovs_key_sctp *key,
782 const struct ovs_key_sctp *mask)
783{
784 unsigned int sctphoff = skb_transport_offset(skb);
785 struct sctphdr *sh;
786 __le32 old_correct_csum, new_csum, old_csum;
787 int err;
788
789 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
790 if (unlikely(err))
791 return err;
792
793 sh = sctp_hdr(skb);
794 old_csum = sh->checksum;
795 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
796
797 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
798 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
799
800 new_csum = sctp_compute_cksum(skb, sctphoff);
801
802 /* Carry any checksum errors through. */
803 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
804
805 skb_clear_hash(skb);
806 flow_key->tp.src = sh->source;
807 flow_key->tp.dst = sh->dest;
808
809 return 0;
810}
811
812static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
813{
814 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
815 struct vport *vport = data->vport;
816
817 if (skb_cow_head(skb, data->l2_len) < 0) {
818 kfree_skb(skb);
819 return -ENOMEM;
820 }
821
822 __skb_dst_copy(skb, data->dst);
823 *OVS_CB(skb) = data->cb;
824 skb->inner_protocol = data->inner_protocol;
825 skb->vlan_tci = data->vlan_tci;
826 skb->vlan_proto = data->vlan_proto;
827
828 /* Reconstruct the MAC header. */
829 skb_push(skb, data->l2_len);
830 memcpy(skb->data, &data->l2_data, data->l2_len);
831 skb_postpush_rcsum(skb, skb->data, data->l2_len);
832 skb_reset_mac_header(skb);
833
834 if (eth_p_mpls(skb->protocol)) {
835 skb->inner_network_header = skb->network_header;
836 skb_set_network_header(skb, data->network_offset);
837 skb_reset_mac_len(skb);
838 }
839
840 ovs_vport_send(vport, skb, data->mac_proto);
841 return 0;
842}
843
844static unsigned int
845ovs_dst_get_mtu(const struct dst_entry *dst)
846{
847 return dst->dev->mtu;
848}
849
850static struct dst_ops ovs_dst_ops = {
851 .family = AF_UNSPEC,
852 .mtu = ovs_dst_get_mtu,
853};
854
855/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
856 * ovs_vport_output(), which is called once per fragmented packet.
857 */
858static void prepare_frag(struct vport *vport, struct sk_buff *skb,
859 u16 orig_network_offset, u8 mac_proto)
860{
861 unsigned int hlen = skb_network_offset(skb);
862 struct ovs_frag_data *data;
863
864 data = this_cpu_ptr(&ovs_frag_data_storage);
865 data->dst = skb->_skb_refdst;
866 data->vport = vport;
867 data->cb = *OVS_CB(skb);
868 data->inner_protocol = skb->inner_protocol;
869 data->network_offset = orig_network_offset;
870 data->vlan_tci = skb->vlan_tci;
871 data->vlan_proto = skb->vlan_proto;
872 data->mac_proto = mac_proto;
873 data->l2_len = hlen;
874 memcpy(&data->l2_data, skb->data, hlen);
875
876 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
877 skb_pull(skb, hlen);
878}
879
880static void ovs_fragment(struct net *net, struct vport *vport,
881 struct sk_buff *skb, u16 mru,
882 struct sw_flow_key *key)
883{
884 u16 orig_network_offset = 0;
885
886 if (eth_p_mpls(skb->protocol)) {
887 orig_network_offset = skb_network_offset(skb);
888 skb->network_header = skb->inner_network_header;
889 }
890
891 if (skb_network_offset(skb) > MAX_L2_LEN) {
892 OVS_NLERR(1, "L2 header too long to fragment");
893 goto err;
894 }
895
896 if (key->eth.type == htons(ETH_P_IP)) {
897 struct dst_entry ovs_dst;
898 unsigned long orig_dst;
899
900 prepare_frag(vport, skb, orig_network_offset,
901 ovs_key_mac_proto(key));
902 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
903 DST_OBSOLETE_NONE, DST_NOCOUNT);
904 ovs_dst.dev = vport->dev;
905
906 orig_dst = skb->_skb_refdst;
907 skb_dst_set_noref(skb, &ovs_dst);
908 IPCB(skb)->frag_max_size = mru;
909
910 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
911 refdst_drop(orig_dst);
912 } else if (key->eth.type == htons(ETH_P_IPV6)) {
913 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
914 unsigned long orig_dst;
915 struct rt6_info ovs_rt;
916
917 if (!v6ops)
918 goto err;
919
920 prepare_frag(vport, skb, orig_network_offset,
921 ovs_key_mac_proto(key));
922 memset(&ovs_rt, 0, sizeof(ovs_rt));
923 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
924 DST_OBSOLETE_NONE, DST_NOCOUNT);
925 ovs_rt.dst.dev = vport->dev;
926
927 orig_dst = skb->_skb_refdst;
928 skb_dst_set_noref(skb, &ovs_rt.dst);
929 IP6CB(skb)->frag_max_size = mru;
930
931 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
932 refdst_drop(orig_dst);
933 } else {
934 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
935 ovs_vport_name(vport), ntohs(key->eth.type), mru,
936 vport->dev->mtu);
937 goto err;
938 }
939
940 return;
941err:
942 kfree_skb(skb);
943}
944
945static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
946 struct sw_flow_key *key)
947{
948 struct vport *vport = ovs_vport_rcu(dp, out_port);
949
950 if (likely(vport)) {
951 u16 mru = OVS_CB(skb)->mru;
952 u32 cutlen = OVS_CB(skb)->cutlen;
953
954 if (unlikely(cutlen > 0)) {
955 if (skb->len - cutlen > ovs_mac_header_len(key))
956 pskb_trim(skb, skb->len - cutlen);
957 else
958 pskb_trim(skb, ovs_mac_header_len(key));
959 }
960
961 if (likely(!mru ||
962 (skb->len <= mru + vport->dev->hard_header_len))) {
963 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
964 } else if (mru <= vport->dev->mtu) {
965 struct net *net = read_pnet(&dp->net);
966
967 ovs_fragment(net, vport, skb, mru, key);
968 } else {
969 kfree_skb(skb);
970 }
971 } else {
972 kfree_skb(skb);
973 }
974}
975
976static int output_userspace(struct datapath *dp, struct sk_buff *skb,
977 struct sw_flow_key *key, const struct nlattr *attr,
978 const struct nlattr *actions, int actions_len,
979 uint32_t cutlen)
980{
981 struct dp_upcall_info upcall;
982 const struct nlattr *a;
983 int rem;
984
985 memset(&upcall, 0, sizeof(upcall));
986 upcall.cmd = OVS_PACKET_CMD_ACTION;
987 upcall.mru = OVS_CB(skb)->mru;
988
989 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
990 a = nla_next(a, &rem)) {
991 switch (nla_type(a)) {
992 case OVS_USERSPACE_ATTR_USERDATA:
993 upcall.userdata = a;
994 break;
995
996 case OVS_USERSPACE_ATTR_PID:
997 upcall.portid = nla_get_u32(a);
998 break;
999
1000 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1001 /* Get out tunnel info. */
1002 struct vport *vport;
1003
1004 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1005 if (vport) {
1006 int err;
1007
1008 err = dev_fill_metadata_dst(vport->dev, skb);
1009 if (!err)
1010 upcall.egress_tun_info = skb_tunnel_info(skb);
1011 }
1012
1013 break;
1014 }
1015
1016 case OVS_USERSPACE_ATTR_ACTIONS: {
1017 /* Include actions. */
1018 upcall.actions = actions;
1019 upcall.actions_len = actions_len;
1020 break;
1021 }
1022
1023 } /* End of switch. */
1024 }
1025
1026 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1027}
1028
1029/* When 'last' is true, sample() should always consume the 'skb'.
1030 * Otherwise, sample() should keep 'skb' intact regardless what
1031 * actions are executed within sample().
1032 */
1033static int sample(struct datapath *dp, struct sk_buff *skb,
1034 struct sw_flow_key *key, const struct nlattr *attr,
1035 bool last)
1036{
1037 struct nlattr *actions;
1038 struct nlattr *sample_arg;
1039 int rem = nla_len(attr);
1040 const struct sample_arg *arg;
1041 bool clone_flow_key;
1042
1043 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1044 sample_arg = nla_data(attr);
1045 arg = nla_data(sample_arg);
1046 actions = nla_next(sample_arg, &rem);
1047
1048 if ((arg->probability != U32_MAX) &&
1049 (!arg->probability || prandom_u32() > arg->probability)) {
1050 if (last)
1051 consume_skb(skb);
1052 return 0;
1053 }
1054
1055 clone_flow_key = !arg->exec;
1056 return clone_execute(dp, skb, key, 0, actions, rem, last,
1057 clone_flow_key);
1058}
1059
1060static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1061 const struct nlattr *attr)
1062{
1063 struct ovs_action_hash *hash_act = nla_data(attr);
1064 u32 hash = 0;
1065
1066 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1067 hash = skb_get_hash(skb);
1068 hash = jhash_1word(hash, hash_act->hash_basis);
1069 if (!hash)
1070 hash = 0x1;
1071
1072 key->ovs_flow_hash = hash;
1073}
1074
1075static int execute_set_action(struct sk_buff *skb,
1076 struct sw_flow_key *flow_key,
1077 const struct nlattr *a)
1078{
1079 /* Only tunnel set execution is supported without a mask. */
1080 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1081 struct ovs_tunnel_info *tun = nla_data(a);
1082
1083 skb_dst_drop(skb);
1084 dst_hold((struct dst_entry *)tun->tun_dst);
1085 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1086 return 0;
1087 }
1088
1089 return -EINVAL;
1090}
1091
1092/* Mask is at the midpoint of the data. */
1093#define get_mask(a, type) ((const type)nla_data(a) + 1)
1094
1095static int execute_masked_set_action(struct sk_buff *skb,
1096 struct sw_flow_key *flow_key,
1097 const struct nlattr *a)
1098{
1099 int err = 0;
1100
1101 switch (nla_type(a)) {
1102 case OVS_KEY_ATTR_PRIORITY:
1103 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1104 *get_mask(a, u32 *));
1105 flow_key->phy.priority = skb->priority;
1106 break;
1107
1108 case OVS_KEY_ATTR_SKB_MARK:
1109 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1110 flow_key->phy.skb_mark = skb->mark;
1111 break;
1112
1113 case OVS_KEY_ATTR_TUNNEL_INFO:
1114 /* Masked data not supported for tunnel. */
1115 err = -EINVAL;
1116 break;
1117
1118 case OVS_KEY_ATTR_ETHERNET:
1119 err = set_eth_addr(skb, flow_key, nla_data(a),
1120 get_mask(a, struct ovs_key_ethernet *));
1121 break;
1122
1123 case OVS_KEY_ATTR_NSH:
1124 err = set_nsh(skb, flow_key, a);
1125 break;
1126
1127 case OVS_KEY_ATTR_IPV4:
1128 err = set_ipv4(skb, flow_key, nla_data(a),
1129 get_mask(a, struct ovs_key_ipv4 *));
1130 break;
1131
1132 case OVS_KEY_ATTR_IPV6:
1133 err = set_ipv6(skb, flow_key, nla_data(a),
1134 get_mask(a, struct ovs_key_ipv6 *));
1135 break;
1136
1137 case OVS_KEY_ATTR_TCP:
1138 err = set_tcp(skb, flow_key, nla_data(a),
1139 get_mask(a, struct ovs_key_tcp *));
1140 break;
1141
1142 case OVS_KEY_ATTR_UDP:
1143 err = set_udp(skb, flow_key, nla_data(a),
1144 get_mask(a, struct ovs_key_udp *));
1145 break;
1146
1147 case OVS_KEY_ATTR_SCTP:
1148 err = set_sctp(skb, flow_key, nla_data(a),
1149 get_mask(a, struct ovs_key_sctp *));
1150 break;
1151
1152 case OVS_KEY_ATTR_MPLS:
1153 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1154 __be32 *));
1155 break;
1156
1157 case OVS_KEY_ATTR_CT_STATE:
1158 case OVS_KEY_ATTR_CT_ZONE:
1159 case OVS_KEY_ATTR_CT_MARK:
1160 case OVS_KEY_ATTR_CT_LABELS:
1161 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1162 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1163 err = -EINVAL;
1164 break;
1165 }
1166
1167 return err;
1168}
1169
1170static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1171 struct sw_flow_key *key,
1172 const struct nlattr *a, bool last)
1173{
1174 u32 recirc_id;
1175
1176 if (!is_flow_key_valid(key)) {
1177 int err;
1178
1179 err = ovs_flow_key_update(skb, key);
1180 if (err)
1181 return err;
1182 }
1183 BUG_ON(!is_flow_key_valid(key));
1184
1185 recirc_id = nla_get_u32(a);
1186 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1187}
1188
1189/* Execute a list of actions against 'skb'. */
1190static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1191 struct sw_flow_key *key,
1192 const struct nlattr *attr, int len)
1193{
1194 const struct nlattr *a;
1195 int rem;
1196
1197 for (a = attr, rem = len; rem > 0;
1198 a = nla_next(a, &rem)) {
1199 int err = 0;
1200
1201 switch (nla_type(a)) {
1202 case OVS_ACTION_ATTR_OUTPUT: {
1203 int port = nla_get_u32(a);
1204 struct sk_buff *clone;
1205
1206 /* Every output action needs a separate clone
1207 * of 'skb', In case the output action is the
1208 * last action, cloning can be avoided.
1209 */
1210 if (nla_is_last(a, rem)) {
1211 do_output(dp, skb, port, key);
1212 /* 'skb' has been used for output.
1213 */
1214 return 0;
1215 }
1216
1217 clone = skb_clone(skb, GFP_ATOMIC);
1218 if (clone)
1219 do_output(dp, clone, port, key);
1220 OVS_CB(skb)->cutlen = 0;
1221 break;
1222 }
1223
1224 case OVS_ACTION_ATTR_TRUNC: {
1225 struct ovs_action_trunc *trunc = nla_data(a);
1226
1227 if (skb->len > trunc->max_len)
1228 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1229 break;
1230 }
1231
1232 case OVS_ACTION_ATTR_USERSPACE:
1233 output_userspace(dp, skb, key, a, attr,
1234 len, OVS_CB(skb)->cutlen);
1235 OVS_CB(skb)->cutlen = 0;
1236 break;
1237
1238 case OVS_ACTION_ATTR_HASH:
1239 execute_hash(skb, key, a);
1240 break;
1241
1242 case OVS_ACTION_ATTR_PUSH_MPLS:
1243 err = push_mpls(skb, key, nla_data(a));
1244 break;
1245
1246 case OVS_ACTION_ATTR_POP_MPLS:
1247 err = pop_mpls(skb, key, nla_get_be16(a));
1248 break;
1249
1250 case OVS_ACTION_ATTR_PUSH_VLAN:
1251 err = push_vlan(skb, key, nla_data(a));
1252 break;
1253
1254 case OVS_ACTION_ATTR_POP_VLAN:
1255 err = pop_vlan(skb, key);
1256 break;
1257
1258 case OVS_ACTION_ATTR_RECIRC: {
1259 bool last = nla_is_last(a, rem);
1260
1261 err = execute_recirc(dp, skb, key, a, last);
1262 if (last) {
1263 /* If this is the last action, the skb has
1264 * been consumed or freed.
1265 * Return immediately.
1266 */
1267 return err;
1268 }
1269 break;
1270 }
1271
1272 case OVS_ACTION_ATTR_SET:
1273 err = execute_set_action(skb, key, nla_data(a));
1274 break;
1275
1276 case OVS_ACTION_ATTR_SET_MASKED:
1277 case OVS_ACTION_ATTR_SET_TO_MASKED:
1278 err = execute_masked_set_action(skb, key, nla_data(a));
1279 break;
1280
1281 case OVS_ACTION_ATTR_SAMPLE: {
1282 bool last = nla_is_last(a, rem);
1283
1284 err = sample(dp, skb, key, a, last);
1285 if (last)
1286 return err;
1287
1288 break;
1289 }
1290
1291 case OVS_ACTION_ATTR_CT:
1292 if (!is_flow_key_valid(key)) {
1293 err = ovs_flow_key_update(skb, key);
1294 if (err)
1295 return err;
1296 }
1297
1298 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1299 nla_data(a));
1300
1301 /* Hide stolen IP fragments from user space. */
1302 if (err)
1303 return err == -EINPROGRESS ? 0 : err;
1304 break;
1305
1306 case OVS_ACTION_ATTR_CT_CLEAR:
1307 err = ovs_ct_clear(skb, key);
1308 break;
1309
1310 case OVS_ACTION_ATTR_PUSH_ETH:
1311 err = push_eth(skb, key, nla_data(a));
1312 break;
1313
1314 case OVS_ACTION_ATTR_POP_ETH:
1315 err = pop_eth(skb, key);
1316 break;
1317
1318 case OVS_ACTION_ATTR_PUSH_NSH: {
1319 u8 buffer[NSH_HDR_MAX_LEN];
1320 struct nshhdr *nh = (struct nshhdr *)buffer;
1321
1322 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1323 NSH_HDR_MAX_LEN);
1324 if (unlikely(err))
1325 break;
1326 err = push_nsh(skb, key, nh);
1327 break;
1328 }
1329
1330 case OVS_ACTION_ATTR_POP_NSH:
1331 err = pop_nsh(skb, key);
1332 break;
1333
1334 case OVS_ACTION_ATTR_METER:
1335 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1336 consume_skb(skb);
1337 return 0;
1338 }
1339 }
1340
1341 if (unlikely(err)) {
1342 kfree_skb(skb);
1343 return err;
1344 }
1345 }
1346
1347 consume_skb(skb);
1348 return 0;
1349}
1350
1351/* Execute the actions on the clone of the packet. The effect of the
1352 * execution does not affect the original 'skb' nor the original 'key'.
1353 *
1354 * The execution may be deferred in case the actions can not be executed
1355 * immediately.
1356 */
1357static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1358 struct sw_flow_key *key, u32 recirc_id,
1359 const struct nlattr *actions, int len,
1360 bool last, bool clone_flow_key)
1361{
1362 struct deferred_action *da;
1363 struct sw_flow_key *clone;
1364
1365 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1366 if (!skb) {
1367 /* Out of memory, skip this action.
1368 */
1369 return 0;
1370 }
1371
1372 /* When clone_flow_key is false, the 'key' will not be change
1373 * by the actions, then the 'key' can be used directly.
1374 * Otherwise, try to clone key from the next recursion level of
1375 * 'flow_keys'. If clone is successful, execute the actions
1376 * without deferring.
1377 */
1378 clone = clone_flow_key ? clone_key(key) : key;
1379 if (clone) {
1380 int err = 0;
1381
1382 if (actions) { /* Sample action */
1383 if (clone_flow_key)
1384 __this_cpu_inc(exec_actions_level);
1385
1386 err = do_execute_actions(dp, skb, clone,
1387 actions, len);
1388
1389 if (clone_flow_key)
1390 __this_cpu_dec(exec_actions_level);
1391 } else { /* Recirc action */
1392 clone->recirc_id = recirc_id;
1393 ovs_dp_process_packet(skb, clone);
1394 }
1395 return err;
1396 }
1397
1398 /* Out of 'flow_keys' space. Defer actions */
1399 da = add_deferred_actions(skb, key, actions, len);
1400 if (da) {
1401 if (!actions) { /* Recirc action */
1402 key = &da->pkt_key;
1403 key->recirc_id = recirc_id;
1404 }
1405 } else {
1406 /* Out of per CPU action FIFO space. Drop the 'skb' and
1407 * log an error.
1408 */
1409 kfree_skb(skb);
1410
1411 if (net_ratelimit()) {
1412 if (actions) { /* Sample action */
1413 pr_warn("%s: deferred action limit reached, drop sample action\n",
1414 ovs_dp_name(dp));
1415 } else { /* Recirc action */
1416 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1417 ovs_dp_name(dp));
1418 }
1419 }
1420 }
1421 return 0;
1422}
1423
1424static void process_deferred_actions(struct datapath *dp)
1425{
1426 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1427
1428 /* Do not touch the FIFO in case there is no deferred actions. */
1429 if (action_fifo_is_empty(fifo))
1430 return;
1431
1432 /* Finishing executing all deferred actions. */
1433 do {
1434 struct deferred_action *da = action_fifo_get(fifo);
1435 struct sk_buff *skb = da->skb;
1436 struct sw_flow_key *key = &da->pkt_key;
1437 const struct nlattr *actions = da->actions;
1438 int actions_len = da->actions_len;
1439
1440 if (actions)
1441 do_execute_actions(dp, skb, key, actions, actions_len);
1442 else
1443 ovs_dp_process_packet(skb, key);
1444 } while (!action_fifo_is_empty(fifo));
1445
1446 /* Reset FIFO for the next packet. */
1447 action_fifo_init(fifo);
1448}
1449
1450/* Execute a list of actions against 'skb'. */
1451int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1452 const struct sw_flow_actions *acts,
1453 struct sw_flow_key *key)
1454{
1455 int err, level;
1456
1457 level = __this_cpu_inc_return(exec_actions_level);
1458 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1459 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1460 ovs_dp_name(dp));
1461 kfree_skb(skb);
1462 err = -ENETDOWN;
1463 goto out;
1464 }
1465
1466 OVS_CB(skb)->acts_origlen = acts->orig_len;
1467 err = do_execute_actions(dp, skb, key,
1468 acts->actions, acts->actions_len);
1469
1470 if (level == 1)
1471 process_deferred_actions(dp);
1472
1473out:
1474 __this_cpu_dec(exec_actions_level);
1475 return err;
1476}
1477
1478int action_fifos_init(void)
1479{
1480 action_fifos = alloc_percpu(struct action_fifo);
1481 if (!action_fifos)
1482 return -ENOMEM;
1483
1484 flow_keys = alloc_percpu(struct action_flow_keys);
1485 if (!flow_keys) {
1486 free_percpu(action_fifos);
1487 return -ENOMEM;
1488 }
1489
1490 return 0;
1491}
1492
1493void action_fifos_exit(void)
1494{
1495 free_percpu(action_fifos);
1496 free_percpu(flow_keys);
1497}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
12#include <linux/sctp.h>
13#include <linux/tcp.h>
14#include <linux/udp.h>
15#include <linux/in6.h>
16#include <linux/if_arp.h>
17#include <linux/if_vlan.h>
18
19#include <net/dst.h>
20#include <net/gso.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/ip6_fib.h>
24#include <net/checksum.h>
25#include <net/dsfield.h>
26#include <net/mpls.h>
27
28#if IS_ENABLED(CONFIG_PSAMPLE)
29#include <net/psample.h>
30#endif
31
32#include <net/sctp/checksum.h>
33
34#include "datapath.h"
35#include "drop.h"
36#include "flow.h"
37#include "conntrack.h"
38#include "vport.h"
39#include "flow_netlink.h"
40#include "openvswitch_trace.h"
41
42struct deferred_action {
43 struct sk_buff *skb;
44 const struct nlattr *actions;
45 int actions_len;
46
47 /* Store pkt_key clone when creating deferred action. */
48 struct sw_flow_key pkt_key;
49};
50
51#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
52struct ovs_frag_data {
53 unsigned long dst;
54 struct vport *vport;
55 struct ovs_skb_cb cb;
56 __be16 inner_protocol;
57 u16 network_offset; /* valid only for MPLS */
58 u16 vlan_tci;
59 __be16 vlan_proto;
60 unsigned int l2_len;
61 u8 mac_proto;
62 u8 l2_data[MAX_L2_LEN];
63};
64
65static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
66
67#define DEFERRED_ACTION_FIFO_SIZE 10
68#define OVS_RECURSION_LIMIT 5
69#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
70struct action_fifo {
71 int head;
72 int tail;
73 /* Deferred action fifo queue storage. */
74 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
75};
76
77struct action_flow_keys {
78 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
79};
80
81static struct action_fifo __percpu *action_fifos;
82static struct action_flow_keys __percpu *flow_keys;
83static DEFINE_PER_CPU(int, exec_actions_level);
84
85/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
86 * space. Return NULL if out of key spaces.
87 */
88static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
89{
90 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
91 int level = this_cpu_read(exec_actions_level);
92 struct sw_flow_key *key = NULL;
93
94 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
95 key = &keys->key[level - 1];
96 *key = *key_;
97 }
98
99 return key;
100}
101
102static void action_fifo_init(struct action_fifo *fifo)
103{
104 fifo->head = 0;
105 fifo->tail = 0;
106}
107
108static bool action_fifo_is_empty(const struct action_fifo *fifo)
109{
110 return (fifo->head == fifo->tail);
111}
112
113static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
114{
115 if (action_fifo_is_empty(fifo))
116 return NULL;
117
118 return &fifo->fifo[fifo->tail++];
119}
120
121static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
122{
123 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
124 return NULL;
125
126 return &fifo->fifo[fifo->head++];
127}
128
129/* Return true if fifo is not full */
130static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
131 const struct sw_flow_key *key,
132 const struct nlattr *actions,
133 const int actions_len)
134{
135 struct action_fifo *fifo;
136 struct deferred_action *da;
137
138 fifo = this_cpu_ptr(action_fifos);
139 da = action_fifo_put(fifo);
140 if (da) {
141 da->skb = skb;
142 da->actions = actions;
143 da->actions_len = actions_len;
144 da->pkt_key = *key;
145 }
146
147 return da;
148}
149
150static void invalidate_flow_key(struct sw_flow_key *key)
151{
152 key->mac_proto |= SW_FLOW_KEY_INVALID;
153}
154
155static bool is_flow_key_valid(const struct sw_flow_key *key)
156{
157 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
158}
159
160static int clone_execute(struct datapath *dp, struct sk_buff *skb,
161 struct sw_flow_key *key,
162 u32 recirc_id,
163 const struct nlattr *actions, int len,
164 bool last, bool clone_flow_key);
165
166static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
168 const struct nlattr *attr, int len);
169
170static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
171 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
172{
173 int err;
174
175 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
176 if (err)
177 return err;
178
179 if (!mac_len)
180 key->mac_proto = MAC_PROTO_NONE;
181
182 invalidate_flow_key(key);
183 return 0;
184}
185
186static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
187 const __be16 ethertype)
188{
189 int err;
190
191 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
192 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
193 if (err)
194 return err;
195
196 if (ethertype == htons(ETH_P_TEB))
197 key->mac_proto = MAC_PROTO_ETHERNET;
198
199 invalidate_flow_key(key);
200 return 0;
201}
202
203static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
204 const __be32 *mpls_lse, const __be32 *mask)
205{
206 struct mpls_shim_hdr *stack;
207 __be32 lse;
208 int err;
209
210 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
211 return -ENOMEM;
212
213 stack = mpls_hdr(skb);
214 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
215 err = skb_mpls_update_lse(skb, lse);
216 if (err)
217 return err;
218
219 flow_key->mpls.lse[0] = lse;
220 return 0;
221}
222
223static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
224{
225 int err;
226
227 err = skb_vlan_pop(skb);
228 if (skb_vlan_tag_present(skb)) {
229 invalidate_flow_key(key);
230 } else {
231 key->eth.vlan.tci = 0;
232 key->eth.vlan.tpid = 0;
233 }
234 return err;
235}
236
237static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
238 const struct ovs_action_push_vlan *vlan)
239{
240 int err;
241
242 if (skb_vlan_tag_present(skb)) {
243 invalidate_flow_key(key);
244 } else {
245 key->eth.vlan.tci = vlan->vlan_tci;
246 key->eth.vlan.tpid = vlan->vlan_tpid;
247 }
248 err = skb_vlan_push(skb, vlan->vlan_tpid,
249 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
250 skb_reset_mac_len(skb);
251 return err;
252}
253
254/* 'src' is already properly masked. */
255static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
256{
257 u16 *dst = (u16 *)dst_;
258 const u16 *src = (const u16 *)src_;
259 const u16 *mask = (const u16 *)mask_;
260
261 OVS_SET_MASKED(dst[0], src[0], mask[0]);
262 OVS_SET_MASKED(dst[1], src[1], mask[1]);
263 OVS_SET_MASKED(dst[2], src[2], mask[2]);
264}
265
266static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
267 const struct ovs_key_ethernet *key,
268 const struct ovs_key_ethernet *mask)
269{
270 int err;
271
272 err = skb_ensure_writable(skb, ETH_HLEN);
273 if (unlikely(err))
274 return err;
275
276 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
277
278 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
279 mask->eth_src);
280 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
281 mask->eth_dst);
282
283 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
284
285 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
286 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
287 return 0;
288}
289
290/* pop_eth does not support VLAN packets as this action is never called
291 * for them.
292 */
293static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
294{
295 int err;
296
297 err = skb_eth_pop(skb);
298 if (err)
299 return err;
300
301 /* safe right before invalidate_flow_key */
302 key->mac_proto = MAC_PROTO_NONE;
303 invalidate_flow_key(key);
304 return 0;
305}
306
307static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
308 const struct ovs_action_push_eth *ethh)
309{
310 int err;
311
312 err = skb_eth_push(skb, ethh->addresses.eth_dst,
313 ethh->addresses.eth_src);
314 if (err)
315 return err;
316
317 /* safe right before invalidate_flow_key */
318 key->mac_proto = MAC_PROTO_ETHERNET;
319 invalidate_flow_key(key);
320 return 0;
321}
322
323static noinline_for_stack int push_nsh(struct sk_buff *skb,
324 struct sw_flow_key *key,
325 const struct nlattr *a)
326{
327 u8 buffer[NSH_HDR_MAX_LEN];
328 struct nshhdr *nh = (struct nshhdr *)buffer;
329 int err;
330
331 err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
332 if (err)
333 return err;
334
335 err = nsh_push(skb, nh);
336 if (err)
337 return err;
338
339 /* safe right before invalidate_flow_key */
340 key->mac_proto = MAC_PROTO_NONE;
341 invalidate_flow_key(key);
342 return 0;
343}
344
345static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
346{
347 int err;
348
349 err = nsh_pop(skb);
350 if (err)
351 return err;
352
353 /* safe right before invalidate_flow_key */
354 if (skb->protocol == htons(ETH_P_TEB))
355 key->mac_proto = MAC_PROTO_ETHERNET;
356 else
357 key->mac_proto = MAC_PROTO_NONE;
358 invalidate_flow_key(key);
359 return 0;
360}
361
362static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
363 __be32 addr, __be32 new_addr)
364{
365 int transport_len = skb->len - skb_transport_offset(skb);
366
367 if (nh->frag_off & htons(IP_OFFSET))
368 return;
369
370 if (nh->protocol == IPPROTO_TCP) {
371 if (likely(transport_len >= sizeof(struct tcphdr)))
372 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
373 addr, new_addr, true);
374 } else if (nh->protocol == IPPROTO_UDP) {
375 if (likely(transport_len >= sizeof(struct udphdr))) {
376 struct udphdr *uh = udp_hdr(skb);
377
378 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
379 inet_proto_csum_replace4(&uh->check, skb,
380 addr, new_addr, true);
381 if (!uh->check)
382 uh->check = CSUM_MANGLED_0;
383 }
384 }
385 }
386}
387
388static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
389 __be32 *addr, __be32 new_addr)
390{
391 update_ip_l4_checksum(skb, nh, *addr, new_addr);
392 csum_replace4(&nh->check, *addr, new_addr);
393 skb_clear_hash(skb);
394 ovs_ct_clear(skb, NULL);
395 *addr = new_addr;
396}
397
398static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
399 __be32 addr[4], const __be32 new_addr[4])
400{
401 int transport_len = skb->len - skb_transport_offset(skb);
402
403 if (l4_proto == NEXTHDR_TCP) {
404 if (likely(transport_len >= sizeof(struct tcphdr)))
405 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
406 addr, new_addr, true);
407 } else if (l4_proto == NEXTHDR_UDP) {
408 if (likely(transport_len >= sizeof(struct udphdr))) {
409 struct udphdr *uh = udp_hdr(skb);
410
411 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
412 inet_proto_csum_replace16(&uh->check, skb,
413 addr, new_addr, true);
414 if (!uh->check)
415 uh->check = CSUM_MANGLED_0;
416 }
417 }
418 } else if (l4_proto == NEXTHDR_ICMP) {
419 if (likely(transport_len >= sizeof(struct icmp6hdr)))
420 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
421 skb, addr, new_addr, true);
422 }
423}
424
425static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
426 const __be32 mask[4], __be32 masked[4])
427{
428 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
429 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
430 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
431 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
432}
433
434static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
435 __be32 addr[4], const __be32 new_addr[4],
436 bool recalculate_csum)
437{
438 if (recalculate_csum)
439 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
440
441 skb_clear_hash(skb);
442 ovs_ct_clear(skb, NULL);
443 memcpy(addr, new_addr, sizeof(__be32[4]));
444}
445
446static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
447{
448 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
449
450 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
451
452 if (skb->ip_summed == CHECKSUM_COMPLETE)
453 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
454 (__force __wsum)(ipv6_tclass << 12));
455
456 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
457}
458
459static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
460{
461 u32 ofl;
462
463 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
464 fl = OVS_MASKED(ofl, fl, mask);
465
466 /* Bits 21-24 are always unmasked, so this retains their values. */
467 nh->flow_lbl[0] = (u8)(fl >> 16);
468 nh->flow_lbl[1] = (u8)(fl >> 8);
469 nh->flow_lbl[2] = (u8)fl;
470
471 if (skb->ip_summed == CHECKSUM_COMPLETE)
472 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
473}
474
475static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
476{
477 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
478
479 if (skb->ip_summed == CHECKSUM_COMPLETE)
480 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
481 (__force __wsum)(new_ttl << 8));
482 nh->hop_limit = new_ttl;
483}
484
485static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
486 u8 mask)
487{
488 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
489
490 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
491 nh->ttl = new_ttl;
492}
493
494static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
495 const struct ovs_key_ipv4 *key,
496 const struct ovs_key_ipv4 *mask)
497{
498 struct iphdr *nh;
499 __be32 new_addr;
500 int err;
501
502 err = skb_ensure_writable(skb, skb_network_offset(skb) +
503 sizeof(struct iphdr));
504 if (unlikely(err))
505 return err;
506
507 nh = ip_hdr(skb);
508
509 /* Setting an IP addresses is typically only a side effect of
510 * matching on them in the current userspace implementation, so it
511 * makes sense to check if the value actually changed.
512 */
513 if (mask->ipv4_src) {
514 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
515
516 if (unlikely(new_addr != nh->saddr)) {
517 set_ip_addr(skb, nh, &nh->saddr, new_addr);
518 flow_key->ipv4.addr.src = new_addr;
519 }
520 }
521 if (mask->ipv4_dst) {
522 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
523
524 if (unlikely(new_addr != nh->daddr)) {
525 set_ip_addr(skb, nh, &nh->daddr, new_addr);
526 flow_key->ipv4.addr.dst = new_addr;
527 }
528 }
529 if (mask->ipv4_tos) {
530 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
531 flow_key->ip.tos = nh->tos;
532 }
533 if (mask->ipv4_ttl) {
534 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
535 flow_key->ip.ttl = nh->ttl;
536 }
537
538 return 0;
539}
540
541static bool is_ipv6_mask_nonzero(const __be32 addr[4])
542{
543 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
544}
545
546static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
547 const struct ovs_key_ipv6 *key,
548 const struct ovs_key_ipv6 *mask)
549{
550 struct ipv6hdr *nh;
551 int err;
552
553 err = skb_ensure_writable(skb, skb_network_offset(skb) +
554 sizeof(struct ipv6hdr));
555 if (unlikely(err))
556 return err;
557
558 nh = ipv6_hdr(skb);
559
560 /* Setting an IP addresses is typically only a side effect of
561 * matching on them in the current userspace implementation, so it
562 * makes sense to check if the value actually changed.
563 */
564 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
565 __be32 *saddr = (__be32 *)&nh->saddr;
566 __be32 masked[4];
567
568 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
569
570 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
571 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
572 true);
573 memcpy(&flow_key->ipv6.addr.src, masked,
574 sizeof(flow_key->ipv6.addr.src));
575 }
576 }
577 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
578 unsigned int offset = 0;
579 int flags = IP6_FH_F_SKIP_RH;
580 bool recalc_csum = true;
581 __be32 *daddr = (__be32 *)&nh->daddr;
582 __be32 masked[4];
583
584 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
585
586 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
587 if (ipv6_ext_hdr(nh->nexthdr))
588 recalc_csum = (ipv6_find_hdr(skb, &offset,
589 NEXTHDR_ROUTING,
590 NULL, &flags)
591 != NEXTHDR_ROUTING);
592
593 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
594 recalc_csum);
595 memcpy(&flow_key->ipv6.addr.dst, masked,
596 sizeof(flow_key->ipv6.addr.dst));
597 }
598 }
599 if (mask->ipv6_tclass) {
600 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
601 flow_key->ip.tos = ipv6_get_dsfield(nh);
602 }
603 if (mask->ipv6_label) {
604 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
605 ntohl(mask->ipv6_label));
606 flow_key->ipv6.label =
607 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
608 }
609 if (mask->ipv6_hlimit) {
610 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
611 flow_key->ip.ttl = nh->hop_limit;
612 }
613 return 0;
614}
615
616static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
617 const struct nlattr *a)
618{
619 struct nshhdr *nh;
620 size_t length;
621 int err;
622 u8 flags;
623 u8 ttl;
624 int i;
625
626 struct ovs_key_nsh key;
627 struct ovs_key_nsh mask;
628
629 err = nsh_key_from_nlattr(a, &key, &mask);
630 if (err)
631 return err;
632
633 /* Make sure the NSH base header is there */
634 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
635 return -ENOMEM;
636
637 nh = nsh_hdr(skb);
638 length = nsh_hdr_len(nh);
639
640 /* Make sure the whole NSH header is there */
641 err = skb_ensure_writable(skb, skb_network_offset(skb) +
642 length);
643 if (unlikely(err))
644 return err;
645
646 nh = nsh_hdr(skb);
647 skb_postpull_rcsum(skb, nh, length);
648 flags = nsh_get_flags(nh);
649 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
650 flow_key->nsh.base.flags = flags;
651 ttl = nsh_get_ttl(nh);
652 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
653 flow_key->nsh.base.ttl = ttl;
654 nsh_set_flags_and_ttl(nh, flags, ttl);
655 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
656 mask.base.path_hdr);
657 flow_key->nsh.base.path_hdr = nh->path_hdr;
658 switch (nh->mdtype) {
659 case NSH_M_TYPE1:
660 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
661 nh->md1.context[i] =
662 OVS_MASKED(nh->md1.context[i], key.context[i],
663 mask.context[i]);
664 }
665 memcpy(flow_key->nsh.context, nh->md1.context,
666 sizeof(nh->md1.context));
667 break;
668 case NSH_M_TYPE2:
669 memset(flow_key->nsh.context, 0,
670 sizeof(flow_key->nsh.context));
671 break;
672 default:
673 return -EINVAL;
674 }
675 skb_postpush_rcsum(skb, nh, length);
676 return 0;
677}
678
679/* Must follow skb_ensure_writable() since that can move the skb data. */
680static void set_tp_port(struct sk_buff *skb, __be16 *port,
681 __be16 new_port, __sum16 *check)
682{
683 ovs_ct_clear(skb, NULL);
684 inet_proto_csum_replace2(check, skb, *port, new_port, false);
685 *port = new_port;
686}
687
688static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
689 const struct ovs_key_udp *key,
690 const struct ovs_key_udp *mask)
691{
692 struct udphdr *uh;
693 __be16 src, dst;
694 int err;
695
696 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
697 sizeof(struct udphdr));
698 if (unlikely(err))
699 return err;
700
701 uh = udp_hdr(skb);
702 /* Either of the masks is non-zero, so do not bother checking them. */
703 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
704 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
705
706 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
707 if (likely(src != uh->source)) {
708 set_tp_port(skb, &uh->source, src, &uh->check);
709 flow_key->tp.src = src;
710 }
711 if (likely(dst != uh->dest)) {
712 set_tp_port(skb, &uh->dest, dst, &uh->check);
713 flow_key->tp.dst = dst;
714 }
715
716 if (unlikely(!uh->check))
717 uh->check = CSUM_MANGLED_0;
718 } else {
719 uh->source = src;
720 uh->dest = dst;
721 flow_key->tp.src = src;
722 flow_key->tp.dst = dst;
723 ovs_ct_clear(skb, NULL);
724 }
725
726 skb_clear_hash(skb);
727
728 return 0;
729}
730
731static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
732 const struct ovs_key_tcp *key,
733 const struct ovs_key_tcp *mask)
734{
735 struct tcphdr *th;
736 __be16 src, dst;
737 int err;
738
739 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
740 sizeof(struct tcphdr));
741 if (unlikely(err))
742 return err;
743
744 th = tcp_hdr(skb);
745 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
746 if (likely(src != th->source)) {
747 set_tp_port(skb, &th->source, src, &th->check);
748 flow_key->tp.src = src;
749 }
750 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
751 if (likely(dst != th->dest)) {
752 set_tp_port(skb, &th->dest, dst, &th->check);
753 flow_key->tp.dst = dst;
754 }
755 skb_clear_hash(skb);
756
757 return 0;
758}
759
760static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
761 const struct ovs_key_sctp *key,
762 const struct ovs_key_sctp *mask)
763{
764 unsigned int sctphoff = skb_transport_offset(skb);
765 struct sctphdr *sh;
766 __le32 old_correct_csum, new_csum, old_csum;
767 int err;
768
769 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
770 if (unlikely(err))
771 return err;
772
773 sh = sctp_hdr(skb);
774 old_csum = sh->checksum;
775 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
776
777 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
778 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
779
780 new_csum = sctp_compute_cksum(skb, sctphoff);
781
782 /* Carry any checksum errors through. */
783 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
784
785 skb_clear_hash(skb);
786 ovs_ct_clear(skb, NULL);
787
788 flow_key->tp.src = sh->source;
789 flow_key->tp.dst = sh->dest;
790
791 return 0;
792}
793
794static int ovs_vport_output(struct net *net, struct sock *sk,
795 struct sk_buff *skb)
796{
797 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
798 struct vport *vport = data->vport;
799
800 if (skb_cow_head(skb, data->l2_len) < 0) {
801 kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
802 return -ENOMEM;
803 }
804
805 __skb_dst_copy(skb, data->dst);
806 *OVS_CB(skb) = data->cb;
807 skb->inner_protocol = data->inner_protocol;
808 if (data->vlan_tci & VLAN_CFI_MASK)
809 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
810 else
811 __vlan_hwaccel_clear_tag(skb);
812
813 /* Reconstruct the MAC header. */
814 skb_push(skb, data->l2_len);
815 memcpy(skb->data, &data->l2_data, data->l2_len);
816 skb_postpush_rcsum(skb, skb->data, data->l2_len);
817 skb_reset_mac_header(skb);
818
819 if (eth_p_mpls(skb->protocol)) {
820 skb->inner_network_header = skb->network_header;
821 skb_set_network_header(skb, data->network_offset);
822 skb_reset_mac_len(skb);
823 }
824
825 ovs_vport_send(vport, skb, data->mac_proto);
826 return 0;
827}
828
829static unsigned int
830ovs_dst_get_mtu(const struct dst_entry *dst)
831{
832 return dst->dev->mtu;
833}
834
835static struct dst_ops ovs_dst_ops = {
836 .family = AF_UNSPEC,
837 .mtu = ovs_dst_get_mtu,
838};
839
840/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
841 * ovs_vport_output(), which is called once per fragmented packet.
842 */
843static void prepare_frag(struct vport *vport, struct sk_buff *skb,
844 u16 orig_network_offset, u8 mac_proto)
845{
846 unsigned int hlen = skb_network_offset(skb);
847 struct ovs_frag_data *data;
848
849 data = this_cpu_ptr(&ovs_frag_data_storage);
850 data->dst = skb->_skb_refdst;
851 data->vport = vport;
852 data->cb = *OVS_CB(skb);
853 data->inner_protocol = skb->inner_protocol;
854 data->network_offset = orig_network_offset;
855 if (skb_vlan_tag_present(skb))
856 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
857 else
858 data->vlan_tci = 0;
859 data->vlan_proto = skb->vlan_proto;
860 data->mac_proto = mac_proto;
861 data->l2_len = hlen;
862 memcpy(&data->l2_data, skb->data, hlen);
863
864 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
865 skb_pull(skb, hlen);
866}
867
868static void ovs_fragment(struct net *net, struct vport *vport,
869 struct sk_buff *skb, u16 mru,
870 struct sw_flow_key *key)
871{
872 enum ovs_drop_reason reason;
873 u16 orig_network_offset = 0;
874
875 if (eth_p_mpls(skb->protocol)) {
876 orig_network_offset = skb_network_offset(skb);
877 skb->network_header = skb->inner_network_header;
878 }
879
880 if (skb_network_offset(skb) > MAX_L2_LEN) {
881 OVS_NLERR(1, "L2 header too long to fragment");
882 reason = OVS_DROP_FRAG_L2_TOO_LONG;
883 goto err;
884 }
885
886 if (key->eth.type == htons(ETH_P_IP)) {
887 struct rtable ovs_rt = { 0 };
888 unsigned long orig_dst;
889
890 prepare_frag(vport, skb, orig_network_offset,
891 ovs_key_mac_proto(key));
892 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
893 DST_OBSOLETE_NONE, DST_NOCOUNT);
894 ovs_rt.dst.dev = vport->dev;
895
896 orig_dst = skb->_skb_refdst;
897 skb_dst_set_noref(skb, &ovs_rt.dst);
898 IPCB(skb)->frag_max_size = mru;
899
900 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
901 refdst_drop(orig_dst);
902 } else if (key->eth.type == htons(ETH_P_IPV6)) {
903 unsigned long orig_dst;
904 struct rt6_info ovs_rt;
905
906 prepare_frag(vport, skb, orig_network_offset,
907 ovs_key_mac_proto(key));
908 memset(&ovs_rt, 0, sizeof(ovs_rt));
909 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
910 DST_OBSOLETE_NONE, DST_NOCOUNT);
911 ovs_rt.dst.dev = vport->dev;
912
913 orig_dst = skb->_skb_refdst;
914 skb_dst_set_noref(skb, &ovs_rt.dst);
915 IP6CB(skb)->frag_max_size = mru;
916
917 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
918 refdst_drop(orig_dst);
919 } else {
920 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
921 ovs_vport_name(vport), ntohs(key->eth.type), mru,
922 vport->dev->mtu);
923 reason = OVS_DROP_FRAG_INVALID_PROTO;
924 goto err;
925 }
926
927 return;
928err:
929 ovs_kfree_skb_reason(skb, reason);
930}
931
932static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
933 struct sw_flow_key *key)
934{
935 struct vport *vport = ovs_vport_rcu(dp, out_port);
936
937 if (likely(vport &&
938 netif_running(vport->dev) &&
939 netif_carrier_ok(vport->dev))) {
940 u16 mru = OVS_CB(skb)->mru;
941 u32 cutlen = OVS_CB(skb)->cutlen;
942
943 if (unlikely(cutlen > 0)) {
944 if (skb->len - cutlen > ovs_mac_header_len(key))
945 pskb_trim(skb, skb->len - cutlen);
946 else
947 pskb_trim(skb, ovs_mac_header_len(key));
948 }
949
950 /* Need to set the pkt_type to involve the routing layer. The
951 * packet movement through the OVS datapath doesn't generally
952 * use routing, but this is needed for tunnel cases.
953 */
954 skb->pkt_type = PACKET_OUTGOING;
955
956 if (likely(!mru ||
957 (skb->len <= mru + vport->dev->hard_header_len))) {
958 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
959 } else if (mru <= vport->dev->mtu) {
960 struct net *net = read_pnet(&dp->net);
961
962 ovs_fragment(net, vport, skb, mru, key);
963 } else {
964 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
965 }
966 } else {
967 kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
968 }
969}
970
971static int output_userspace(struct datapath *dp, struct sk_buff *skb,
972 struct sw_flow_key *key, const struct nlattr *attr,
973 const struct nlattr *actions, int actions_len,
974 uint32_t cutlen)
975{
976 struct dp_upcall_info upcall;
977 const struct nlattr *a;
978 int rem;
979
980 memset(&upcall, 0, sizeof(upcall));
981 upcall.cmd = OVS_PACKET_CMD_ACTION;
982 upcall.mru = OVS_CB(skb)->mru;
983
984 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
985 a = nla_next(a, &rem)) {
986 switch (nla_type(a)) {
987 case OVS_USERSPACE_ATTR_USERDATA:
988 upcall.userdata = a;
989 break;
990
991 case OVS_USERSPACE_ATTR_PID:
992 if (dp->user_features &
993 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
994 upcall.portid =
995 ovs_dp_get_upcall_portid(dp,
996 smp_processor_id());
997 else
998 upcall.portid = nla_get_u32(a);
999 break;
1000
1001 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1002 /* Get out tunnel info. */
1003 struct vport *vport;
1004
1005 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1006 if (vport) {
1007 int err;
1008
1009 err = dev_fill_metadata_dst(vport->dev, skb);
1010 if (!err)
1011 upcall.egress_tun_info = skb_tunnel_info(skb);
1012 }
1013
1014 break;
1015 }
1016
1017 case OVS_USERSPACE_ATTR_ACTIONS: {
1018 /* Include actions. */
1019 upcall.actions = actions;
1020 upcall.actions_len = actions_len;
1021 break;
1022 }
1023
1024 } /* End of switch. */
1025 }
1026
1027 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1028}
1029
1030static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1031 struct sw_flow_key *key,
1032 const struct nlattr *attr)
1033{
1034 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1035 struct nlattr *actions = nla_data(attr);
1036
1037 if (nla_len(actions))
1038 return clone_execute(dp, skb, key, 0, nla_data(actions),
1039 nla_len(actions), true, false);
1040
1041 ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
1042 return 0;
1043}
1044
1045/* When 'last' is true, sample() should always consume the 'skb'.
1046 * Otherwise, sample() should keep 'skb' intact regardless what
1047 * actions are executed within sample().
1048 */
1049static int sample(struct datapath *dp, struct sk_buff *skb,
1050 struct sw_flow_key *key, const struct nlattr *attr,
1051 bool last)
1052{
1053 struct nlattr *actions;
1054 struct nlattr *sample_arg;
1055 int rem = nla_len(attr);
1056 const struct sample_arg *arg;
1057 u32 init_probability;
1058 bool clone_flow_key;
1059 int err;
1060
1061 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1062 sample_arg = nla_data(attr);
1063 arg = nla_data(sample_arg);
1064 actions = nla_next(sample_arg, &rem);
1065 init_probability = OVS_CB(skb)->probability;
1066
1067 if ((arg->probability != U32_MAX) &&
1068 (!arg->probability || get_random_u32() > arg->probability)) {
1069 if (last)
1070 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1071 return 0;
1072 }
1073
1074 OVS_CB(skb)->probability = arg->probability;
1075
1076 clone_flow_key = !arg->exec;
1077 err = clone_execute(dp, skb, key, 0, actions, rem, last,
1078 clone_flow_key);
1079
1080 if (!last)
1081 OVS_CB(skb)->probability = init_probability;
1082
1083 return err;
1084}
1085
1086/* When 'last' is true, clone() should always consume the 'skb'.
1087 * Otherwise, clone() should keep 'skb' intact regardless what
1088 * actions are executed within clone().
1089 */
1090static int clone(struct datapath *dp, struct sk_buff *skb,
1091 struct sw_flow_key *key, const struct nlattr *attr,
1092 bool last)
1093{
1094 struct nlattr *actions;
1095 struct nlattr *clone_arg;
1096 int rem = nla_len(attr);
1097 bool dont_clone_flow_key;
1098
1099 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1100 clone_arg = nla_data(attr);
1101 dont_clone_flow_key = nla_get_u32(clone_arg);
1102 actions = nla_next(clone_arg, &rem);
1103
1104 return clone_execute(dp, skb, key, 0, actions, rem, last,
1105 !dont_clone_flow_key);
1106}
1107
1108static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1109 const struct nlattr *attr)
1110{
1111 struct ovs_action_hash *hash_act = nla_data(attr);
1112 u32 hash = 0;
1113
1114 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1115 /* OVS_HASH_ALG_L4 hasing type. */
1116 hash = skb_get_hash(skb);
1117 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1118 /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
1119 * extend past an encapsulated header.
1120 */
1121 hash = __skb_get_hash_symmetric(skb);
1122 }
1123
1124 hash = jhash_1word(hash, hash_act->hash_basis);
1125 if (!hash)
1126 hash = 0x1;
1127
1128 key->ovs_flow_hash = hash;
1129}
1130
1131static int execute_set_action(struct sk_buff *skb,
1132 struct sw_flow_key *flow_key,
1133 const struct nlattr *a)
1134{
1135 /* Only tunnel set execution is supported without a mask. */
1136 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1137 struct ovs_tunnel_info *tun = nla_data(a);
1138
1139 skb_dst_drop(skb);
1140 dst_hold((struct dst_entry *)tun->tun_dst);
1141 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1142 return 0;
1143 }
1144
1145 return -EINVAL;
1146}
1147
1148/* Mask is at the midpoint of the data. */
1149#define get_mask(a, type) ((const type)nla_data(a) + 1)
1150
1151static int execute_masked_set_action(struct sk_buff *skb,
1152 struct sw_flow_key *flow_key,
1153 const struct nlattr *a)
1154{
1155 int err = 0;
1156
1157 switch (nla_type(a)) {
1158 case OVS_KEY_ATTR_PRIORITY:
1159 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1160 *get_mask(a, u32 *));
1161 flow_key->phy.priority = skb->priority;
1162 break;
1163
1164 case OVS_KEY_ATTR_SKB_MARK:
1165 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1166 flow_key->phy.skb_mark = skb->mark;
1167 break;
1168
1169 case OVS_KEY_ATTR_TUNNEL_INFO:
1170 /* Masked data not supported for tunnel. */
1171 err = -EINVAL;
1172 break;
1173
1174 case OVS_KEY_ATTR_ETHERNET:
1175 err = set_eth_addr(skb, flow_key, nla_data(a),
1176 get_mask(a, struct ovs_key_ethernet *));
1177 break;
1178
1179 case OVS_KEY_ATTR_NSH:
1180 err = set_nsh(skb, flow_key, a);
1181 break;
1182
1183 case OVS_KEY_ATTR_IPV4:
1184 err = set_ipv4(skb, flow_key, nla_data(a),
1185 get_mask(a, struct ovs_key_ipv4 *));
1186 break;
1187
1188 case OVS_KEY_ATTR_IPV6:
1189 err = set_ipv6(skb, flow_key, nla_data(a),
1190 get_mask(a, struct ovs_key_ipv6 *));
1191 break;
1192
1193 case OVS_KEY_ATTR_TCP:
1194 err = set_tcp(skb, flow_key, nla_data(a),
1195 get_mask(a, struct ovs_key_tcp *));
1196 break;
1197
1198 case OVS_KEY_ATTR_UDP:
1199 err = set_udp(skb, flow_key, nla_data(a),
1200 get_mask(a, struct ovs_key_udp *));
1201 break;
1202
1203 case OVS_KEY_ATTR_SCTP:
1204 err = set_sctp(skb, flow_key, nla_data(a),
1205 get_mask(a, struct ovs_key_sctp *));
1206 break;
1207
1208 case OVS_KEY_ATTR_MPLS:
1209 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1210 __be32 *));
1211 break;
1212
1213 case OVS_KEY_ATTR_CT_STATE:
1214 case OVS_KEY_ATTR_CT_ZONE:
1215 case OVS_KEY_ATTR_CT_MARK:
1216 case OVS_KEY_ATTR_CT_LABELS:
1217 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1218 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1219 err = -EINVAL;
1220 break;
1221 }
1222
1223 return err;
1224}
1225
1226static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1227 struct sw_flow_key *key,
1228 const struct nlattr *a, bool last)
1229{
1230 u32 recirc_id;
1231
1232 if (!is_flow_key_valid(key)) {
1233 int err;
1234
1235 err = ovs_flow_key_update(skb, key);
1236 if (err)
1237 return err;
1238 }
1239 BUG_ON(!is_flow_key_valid(key));
1240
1241 recirc_id = nla_get_u32(a);
1242 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1243}
1244
1245static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1246 struct sw_flow_key *key,
1247 const struct nlattr *attr, bool last)
1248{
1249 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1250 const struct nlattr *actions, *cpl_arg;
1251 int len, max_len, rem = nla_len(attr);
1252 const struct check_pkt_len_arg *arg;
1253 bool clone_flow_key;
1254
1255 /* The first netlink attribute in 'attr' is always
1256 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1257 */
1258 cpl_arg = nla_data(attr);
1259 arg = nla_data(cpl_arg);
1260
1261 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1262 max_len = arg->pkt_len;
1263
1264 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1265 len <= max_len) {
1266 /* Second netlink attribute in 'attr' is always
1267 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1268 */
1269 actions = nla_next(cpl_arg, &rem);
1270 clone_flow_key = !arg->exec_for_lesser_equal;
1271 } else {
1272 /* Third netlink attribute in 'attr' is always
1273 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1274 */
1275 actions = nla_next(cpl_arg, &rem);
1276 actions = nla_next(actions, &rem);
1277 clone_flow_key = !arg->exec_for_greater;
1278 }
1279
1280 return clone_execute(dp, skb, key, 0, nla_data(actions),
1281 nla_len(actions), last, clone_flow_key);
1282}
1283
1284static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1285{
1286 int err;
1287
1288 if (skb->protocol == htons(ETH_P_IPV6)) {
1289 struct ipv6hdr *nh;
1290
1291 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1292 sizeof(*nh));
1293 if (unlikely(err))
1294 return err;
1295
1296 nh = ipv6_hdr(skb);
1297
1298 if (nh->hop_limit <= 1)
1299 return -EHOSTUNREACH;
1300
1301 key->ip.ttl = --nh->hop_limit;
1302 } else if (skb->protocol == htons(ETH_P_IP)) {
1303 struct iphdr *nh;
1304 u8 old_ttl;
1305
1306 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1307 sizeof(*nh));
1308 if (unlikely(err))
1309 return err;
1310
1311 nh = ip_hdr(skb);
1312 if (nh->ttl <= 1)
1313 return -EHOSTUNREACH;
1314
1315 old_ttl = nh->ttl--;
1316 csum_replace2(&nh->check, htons(old_ttl << 8),
1317 htons(nh->ttl << 8));
1318 key->ip.ttl = nh->ttl;
1319 }
1320 return 0;
1321}
1322
1323#if IS_ENABLED(CONFIG_PSAMPLE)
1324static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1325 const struct nlattr *attr)
1326{
1327 struct psample_group psample_group = {};
1328 struct psample_metadata md = {};
1329 const struct nlattr *a;
1330 u32 rate;
1331 int rem;
1332
1333 nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1334 switch (nla_type(a)) {
1335 case OVS_PSAMPLE_ATTR_GROUP:
1336 psample_group.group_num = nla_get_u32(a);
1337 break;
1338
1339 case OVS_PSAMPLE_ATTR_COOKIE:
1340 md.user_cookie = nla_data(a);
1341 md.user_cookie_len = nla_len(a);
1342 break;
1343 }
1344 }
1345
1346 psample_group.net = ovs_dp_get_net(dp);
1347 md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1348 md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1349 md.rate_as_probability = 1;
1350
1351 rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1352
1353 psample_sample_packet(&psample_group, skb, rate, &md);
1354}
1355#else
1356static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1357 const struct nlattr *attr)
1358{}
1359#endif
1360
1361/* Execute a list of actions against 'skb'. */
1362static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1363 struct sw_flow_key *key,
1364 const struct nlattr *attr, int len)
1365{
1366 const struct nlattr *a;
1367 int rem;
1368
1369 for (a = attr, rem = len; rem > 0;
1370 a = nla_next(a, &rem)) {
1371 int err = 0;
1372
1373 if (trace_ovs_do_execute_action_enabled())
1374 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1375
1376 /* Actions that rightfully have to consume the skb should do it
1377 * and return directly.
1378 */
1379 switch (nla_type(a)) {
1380 case OVS_ACTION_ATTR_OUTPUT: {
1381 int port = nla_get_u32(a);
1382 struct sk_buff *clone;
1383
1384 /* Every output action needs a separate clone
1385 * of 'skb', In case the output action is the
1386 * last action, cloning can be avoided.
1387 */
1388 if (nla_is_last(a, rem)) {
1389 do_output(dp, skb, port, key);
1390 /* 'skb' has been used for output.
1391 */
1392 return 0;
1393 }
1394
1395 clone = skb_clone(skb, GFP_ATOMIC);
1396 if (clone)
1397 do_output(dp, clone, port, key);
1398 OVS_CB(skb)->cutlen = 0;
1399 break;
1400 }
1401
1402 case OVS_ACTION_ATTR_TRUNC: {
1403 struct ovs_action_trunc *trunc = nla_data(a);
1404
1405 if (skb->len > trunc->max_len)
1406 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1407 break;
1408 }
1409
1410 case OVS_ACTION_ATTR_USERSPACE:
1411 output_userspace(dp, skb, key, a, attr,
1412 len, OVS_CB(skb)->cutlen);
1413 OVS_CB(skb)->cutlen = 0;
1414 if (nla_is_last(a, rem)) {
1415 consume_skb(skb);
1416 return 0;
1417 }
1418 break;
1419
1420 case OVS_ACTION_ATTR_HASH:
1421 execute_hash(skb, key, a);
1422 break;
1423
1424 case OVS_ACTION_ATTR_PUSH_MPLS: {
1425 struct ovs_action_push_mpls *mpls = nla_data(a);
1426
1427 err = push_mpls(skb, key, mpls->mpls_lse,
1428 mpls->mpls_ethertype, skb->mac_len);
1429 break;
1430 }
1431 case OVS_ACTION_ATTR_ADD_MPLS: {
1432 struct ovs_action_add_mpls *mpls = nla_data(a);
1433 __u16 mac_len = 0;
1434
1435 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1436 mac_len = skb->mac_len;
1437
1438 err = push_mpls(skb, key, mpls->mpls_lse,
1439 mpls->mpls_ethertype, mac_len);
1440 break;
1441 }
1442 case OVS_ACTION_ATTR_POP_MPLS:
1443 err = pop_mpls(skb, key, nla_get_be16(a));
1444 break;
1445
1446 case OVS_ACTION_ATTR_PUSH_VLAN:
1447 err = push_vlan(skb, key, nla_data(a));
1448 break;
1449
1450 case OVS_ACTION_ATTR_POP_VLAN:
1451 err = pop_vlan(skb, key);
1452 break;
1453
1454 case OVS_ACTION_ATTR_RECIRC: {
1455 bool last = nla_is_last(a, rem);
1456
1457 err = execute_recirc(dp, skb, key, a, last);
1458 if (last) {
1459 /* If this is the last action, the skb has
1460 * been consumed or freed.
1461 * Return immediately.
1462 */
1463 return err;
1464 }
1465 break;
1466 }
1467
1468 case OVS_ACTION_ATTR_SET:
1469 err = execute_set_action(skb, key, nla_data(a));
1470 break;
1471
1472 case OVS_ACTION_ATTR_SET_MASKED:
1473 case OVS_ACTION_ATTR_SET_TO_MASKED:
1474 err = execute_masked_set_action(skb, key, nla_data(a));
1475 break;
1476
1477 case OVS_ACTION_ATTR_SAMPLE: {
1478 bool last = nla_is_last(a, rem);
1479
1480 err = sample(dp, skb, key, a, last);
1481 if (last)
1482 return err;
1483
1484 break;
1485 }
1486
1487 case OVS_ACTION_ATTR_CT:
1488 if (!is_flow_key_valid(key)) {
1489 err = ovs_flow_key_update(skb, key);
1490 if (err)
1491 return err;
1492 }
1493
1494 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1495 nla_data(a));
1496
1497 /* Hide stolen IP fragments from user space. */
1498 if (err)
1499 return err == -EINPROGRESS ? 0 : err;
1500 break;
1501
1502 case OVS_ACTION_ATTR_CT_CLEAR:
1503 err = ovs_ct_clear(skb, key);
1504 break;
1505
1506 case OVS_ACTION_ATTR_PUSH_ETH:
1507 err = push_eth(skb, key, nla_data(a));
1508 break;
1509
1510 case OVS_ACTION_ATTR_POP_ETH:
1511 err = pop_eth(skb, key);
1512 break;
1513
1514 case OVS_ACTION_ATTR_PUSH_NSH:
1515 err = push_nsh(skb, key, nla_data(a));
1516 break;
1517
1518 case OVS_ACTION_ATTR_POP_NSH:
1519 err = pop_nsh(skb, key);
1520 break;
1521
1522 case OVS_ACTION_ATTR_METER:
1523 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1524 ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1525 return 0;
1526 }
1527 break;
1528
1529 case OVS_ACTION_ATTR_CLONE: {
1530 bool last = nla_is_last(a, rem);
1531
1532 err = clone(dp, skb, key, a, last);
1533 if (last)
1534 return err;
1535
1536 break;
1537 }
1538
1539 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1540 bool last = nla_is_last(a, rem);
1541
1542 err = execute_check_pkt_len(dp, skb, key, a, last);
1543 if (last)
1544 return err;
1545
1546 break;
1547 }
1548
1549 case OVS_ACTION_ATTR_DEC_TTL:
1550 err = execute_dec_ttl(skb, key);
1551 if (err == -EHOSTUNREACH)
1552 return dec_ttl_exception_handler(dp, skb,
1553 key, a);
1554 break;
1555
1556 case OVS_ACTION_ATTR_DROP: {
1557 enum ovs_drop_reason reason = nla_get_u32(a)
1558 ? OVS_DROP_EXPLICIT_WITH_ERROR
1559 : OVS_DROP_EXPLICIT;
1560
1561 ovs_kfree_skb_reason(skb, reason);
1562 return 0;
1563 }
1564
1565 case OVS_ACTION_ATTR_PSAMPLE:
1566 execute_psample(dp, skb, a);
1567 OVS_CB(skb)->cutlen = 0;
1568 if (nla_is_last(a, rem)) {
1569 consume_skb(skb);
1570 return 0;
1571 }
1572 break;
1573 }
1574
1575 if (unlikely(err)) {
1576 ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1577 return err;
1578 }
1579 }
1580
1581 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1582 return 0;
1583}
1584
1585/* Execute the actions on the clone of the packet. The effect of the
1586 * execution does not affect the original 'skb' nor the original 'key'.
1587 *
1588 * The execution may be deferred in case the actions can not be executed
1589 * immediately.
1590 */
1591static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1592 struct sw_flow_key *key, u32 recirc_id,
1593 const struct nlattr *actions, int len,
1594 bool last, bool clone_flow_key)
1595{
1596 struct deferred_action *da;
1597 struct sw_flow_key *clone;
1598
1599 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1600 if (!skb) {
1601 /* Out of memory, skip this action.
1602 */
1603 return 0;
1604 }
1605
1606 /* When clone_flow_key is false, the 'key' will not be change
1607 * by the actions, then the 'key' can be used directly.
1608 * Otherwise, try to clone key from the next recursion level of
1609 * 'flow_keys'. If clone is successful, execute the actions
1610 * without deferring.
1611 */
1612 clone = clone_flow_key ? clone_key(key) : key;
1613 if (clone) {
1614 int err = 0;
1615
1616 if (actions) { /* Sample action */
1617 if (clone_flow_key)
1618 __this_cpu_inc(exec_actions_level);
1619
1620 err = do_execute_actions(dp, skb, clone,
1621 actions, len);
1622
1623 if (clone_flow_key)
1624 __this_cpu_dec(exec_actions_level);
1625 } else { /* Recirc action */
1626 clone->recirc_id = recirc_id;
1627 ovs_dp_process_packet(skb, clone);
1628 }
1629 return err;
1630 }
1631
1632 /* Out of 'flow_keys' space. Defer actions */
1633 da = add_deferred_actions(skb, key, actions, len);
1634 if (da) {
1635 if (!actions) { /* Recirc action */
1636 key = &da->pkt_key;
1637 key->recirc_id = recirc_id;
1638 }
1639 } else {
1640 /* Out of per CPU action FIFO space. Drop the 'skb' and
1641 * log an error.
1642 */
1643 ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1644
1645 if (net_ratelimit()) {
1646 if (actions) { /* Sample action */
1647 pr_warn("%s: deferred action limit reached, drop sample action\n",
1648 ovs_dp_name(dp));
1649 } else { /* Recirc action */
1650 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1651 ovs_dp_name(dp), recirc_id);
1652 }
1653 }
1654 }
1655 return 0;
1656}
1657
1658static void process_deferred_actions(struct datapath *dp)
1659{
1660 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1661
1662 /* Do not touch the FIFO in case there is no deferred actions. */
1663 if (action_fifo_is_empty(fifo))
1664 return;
1665
1666 /* Finishing executing all deferred actions. */
1667 do {
1668 struct deferred_action *da = action_fifo_get(fifo);
1669 struct sk_buff *skb = da->skb;
1670 struct sw_flow_key *key = &da->pkt_key;
1671 const struct nlattr *actions = da->actions;
1672 int actions_len = da->actions_len;
1673
1674 if (actions)
1675 do_execute_actions(dp, skb, key, actions, actions_len);
1676 else
1677 ovs_dp_process_packet(skb, key);
1678 } while (!action_fifo_is_empty(fifo));
1679
1680 /* Reset FIFO for the next packet. */
1681 action_fifo_init(fifo);
1682}
1683
1684/* Execute a list of actions against 'skb'. */
1685int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1686 const struct sw_flow_actions *acts,
1687 struct sw_flow_key *key)
1688{
1689 int err, level;
1690
1691 level = __this_cpu_inc_return(exec_actions_level);
1692 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1693 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1694 ovs_dp_name(dp));
1695 ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1696 err = -ENETDOWN;
1697 goto out;
1698 }
1699
1700 OVS_CB(skb)->acts_origlen = acts->orig_len;
1701 err = do_execute_actions(dp, skb, key,
1702 acts->actions, acts->actions_len);
1703
1704 if (level == 1)
1705 process_deferred_actions(dp);
1706
1707out:
1708 __this_cpu_dec(exec_actions_level);
1709 return err;
1710}
1711
1712int action_fifos_init(void)
1713{
1714 action_fifos = alloc_percpu(struct action_fifo);
1715 if (!action_fifos)
1716 return -ENOMEM;
1717
1718 flow_keys = alloc_percpu(struct action_flow_keys);
1719 if (!flow_keys) {
1720 free_percpu(action_fifos);
1721 return -ENOMEM;
1722 }
1723
1724 return 0;
1725}
1726
1727void action_fifos_exit(void)
1728{
1729 free_percpu(action_fifos);
1730 free_percpu(flow_keys);
1731}