Loading...
1/*
2 * Copyright (c) 2007-2017 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
25#include <linux/netfilter_ipv6.h>
26#include <linux/sctp.h>
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
32
33#include <net/dst.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/ip6_fib.h>
37#include <net/checksum.h>
38#include <net/dsfield.h>
39#include <net/mpls.h>
40#include <net/sctp/checksum.h>
41
42#include "datapath.h"
43#include "flow.h"
44#include "conntrack.h"
45#include "vport.h"
46#include "flow_netlink.h"
47
48struct deferred_action {
49 struct sk_buff *skb;
50 const struct nlattr *actions;
51 int actions_len;
52
53 /* Store pkt_key clone when creating deferred action. */
54 struct sw_flow_key pkt_key;
55};
56
57#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
58struct ovs_frag_data {
59 unsigned long dst;
60 struct vport *vport;
61 struct ovs_skb_cb cb;
62 __be16 inner_protocol;
63 u16 network_offset; /* valid only for MPLS */
64 u16 vlan_tci;
65 __be16 vlan_proto;
66 unsigned int l2_len;
67 u8 mac_proto;
68 u8 l2_data[MAX_L2_LEN];
69};
70
71static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
73#define DEFERRED_ACTION_FIFO_SIZE 10
74#define OVS_RECURSION_LIMIT 5
75#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
76struct action_fifo {
77 int head;
78 int tail;
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
81};
82
83struct action_flow_keys {
84 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
85};
86
87static struct action_fifo __percpu *action_fifos;
88static struct action_flow_keys __percpu *flow_keys;
89static DEFINE_PER_CPU(int, exec_actions_level);
90
91/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
92 * space. Return NULL if out of key spaces.
93 */
94static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
95{
96 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
97 int level = this_cpu_read(exec_actions_level);
98 struct sw_flow_key *key = NULL;
99
100 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
101 key = &keys->key[level - 1];
102 *key = *key_;
103 }
104
105 return key;
106}
107
108static void action_fifo_init(struct action_fifo *fifo)
109{
110 fifo->head = 0;
111 fifo->tail = 0;
112}
113
114static bool action_fifo_is_empty(const struct action_fifo *fifo)
115{
116 return (fifo->head == fifo->tail);
117}
118
119static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
120{
121 if (action_fifo_is_empty(fifo))
122 return NULL;
123
124 return &fifo->fifo[fifo->tail++];
125}
126
127static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
128{
129 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
130 return NULL;
131
132 return &fifo->fifo[fifo->head++];
133}
134
135/* Return true if fifo is not full */
136static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
137 const struct sw_flow_key *key,
138 const struct nlattr *actions,
139 const int actions_len)
140{
141 struct action_fifo *fifo;
142 struct deferred_action *da;
143
144 fifo = this_cpu_ptr(action_fifos);
145 da = action_fifo_put(fifo);
146 if (da) {
147 da->skb = skb;
148 da->actions = actions;
149 da->actions_len = actions_len;
150 da->pkt_key = *key;
151 }
152
153 return da;
154}
155
156static void invalidate_flow_key(struct sw_flow_key *key)
157{
158 key->mac_proto |= SW_FLOW_KEY_INVALID;
159}
160
161static bool is_flow_key_valid(const struct sw_flow_key *key)
162{
163 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
164}
165
166static int clone_execute(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
168 u32 recirc_id,
169 const struct nlattr *actions, int len,
170 bool last, bool clone_flow_key);
171
172static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
173 __be16 ethertype)
174{
175 if (skb->ip_summed == CHECKSUM_COMPLETE) {
176 __be16 diff[] = { ~(hdr->h_proto), ethertype };
177
178 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
179 ~skb->csum);
180 }
181
182 hdr->h_proto = ethertype;
183}
184
185static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
186 const struct ovs_action_push_mpls *mpls)
187{
188 struct mpls_shim_hdr *new_mpls_lse;
189
190 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
191 if (skb->encapsulation)
192 return -ENOTSUPP;
193
194 if (skb_cow_head(skb, MPLS_HLEN) < 0)
195 return -ENOMEM;
196
197 if (!skb->inner_protocol) {
198 skb_set_inner_network_header(skb, skb->mac_len);
199 skb_set_inner_protocol(skb, skb->protocol);
200 }
201
202 skb_push(skb, MPLS_HLEN);
203 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
204 skb->mac_len);
205 skb_reset_mac_header(skb);
206 skb_set_network_header(skb, skb->mac_len);
207
208 new_mpls_lse = mpls_hdr(skb);
209 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
210
211 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
212
213 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
214 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
215 skb->protocol = mpls->mpls_ethertype;
216
217 invalidate_flow_key(key);
218 return 0;
219}
220
221static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
222 const __be16 ethertype)
223{
224 int err;
225
226 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
227 if (unlikely(err))
228 return err;
229
230 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
231
232 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
233 skb->mac_len);
234
235 __skb_pull(skb, MPLS_HLEN);
236 skb_reset_mac_header(skb);
237 skb_set_network_header(skb, skb->mac_len);
238
239 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
240 struct ethhdr *hdr;
241
242 /* mpls_hdr() is used to locate the ethertype field correctly in the
243 * presence of VLAN tags.
244 */
245 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
246 update_ethertype(skb, hdr, ethertype);
247 }
248 if (eth_p_mpls(skb->protocol))
249 skb->protocol = ethertype;
250
251 invalidate_flow_key(key);
252 return 0;
253}
254
255static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const __be32 *mpls_lse, const __be32 *mask)
257{
258 struct mpls_shim_hdr *stack;
259 __be32 lse;
260 int err;
261
262 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
263 if (unlikely(err))
264 return err;
265
266 stack = mpls_hdr(skb);
267 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
268 if (skb->ip_summed == CHECKSUM_COMPLETE) {
269 __be32 diff[] = { ~(stack->label_stack_entry), lse };
270
271 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
272 ~skb->csum);
273 }
274
275 stack->label_stack_entry = lse;
276 flow_key->mpls.top_lse = lse;
277 return 0;
278}
279
280static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
281{
282 int err;
283
284 err = skb_vlan_pop(skb);
285 if (skb_vlan_tag_present(skb)) {
286 invalidate_flow_key(key);
287 } else {
288 key->eth.vlan.tci = 0;
289 key->eth.vlan.tpid = 0;
290 }
291 return err;
292}
293
294static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
295 const struct ovs_action_push_vlan *vlan)
296{
297 if (skb_vlan_tag_present(skb)) {
298 invalidate_flow_key(key);
299 } else {
300 key->eth.vlan.tci = vlan->vlan_tci;
301 key->eth.vlan.tpid = vlan->vlan_tpid;
302 }
303 return skb_vlan_push(skb, vlan->vlan_tpid,
304 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
305}
306
307/* 'src' is already properly masked. */
308static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
309{
310 u16 *dst = (u16 *)dst_;
311 const u16 *src = (const u16 *)src_;
312 const u16 *mask = (const u16 *)mask_;
313
314 OVS_SET_MASKED(dst[0], src[0], mask[0]);
315 OVS_SET_MASKED(dst[1], src[1], mask[1]);
316 OVS_SET_MASKED(dst[2], src[2], mask[2]);
317}
318
319static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
320 const struct ovs_key_ethernet *key,
321 const struct ovs_key_ethernet *mask)
322{
323 int err;
324
325 err = skb_ensure_writable(skb, ETH_HLEN);
326 if (unlikely(err))
327 return err;
328
329 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
330
331 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
332 mask->eth_src);
333 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
334 mask->eth_dst);
335
336 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
337
338 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
339 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
340 return 0;
341}
342
343/* pop_eth does not support VLAN packets as this action is never called
344 * for them.
345 */
346static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
347{
348 skb_pull_rcsum(skb, ETH_HLEN);
349 skb_reset_mac_header(skb);
350 skb_reset_mac_len(skb);
351
352 /* safe right before invalidate_flow_key */
353 key->mac_proto = MAC_PROTO_NONE;
354 invalidate_flow_key(key);
355 return 0;
356}
357
358static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
359 const struct ovs_action_push_eth *ethh)
360{
361 struct ethhdr *hdr;
362
363 /* Add the new Ethernet header */
364 if (skb_cow_head(skb, ETH_HLEN) < 0)
365 return -ENOMEM;
366
367 skb_push(skb, ETH_HLEN);
368 skb_reset_mac_header(skb);
369 skb_reset_mac_len(skb);
370
371 hdr = eth_hdr(skb);
372 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
373 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
374 hdr->h_proto = skb->protocol;
375
376 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
377
378 /* safe right before invalidate_flow_key */
379 key->mac_proto = MAC_PROTO_ETHERNET;
380 invalidate_flow_key(key);
381 return 0;
382}
383
384static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
385 const struct nshhdr *nh)
386{
387 int err;
388
389 err = nsh_push(skb, nh);
390 if (err)
391 return err;
392
393 /* safe right before invalidate_flow_key */
394 key->mac_proto = MAC_PROTO_NONE;
395 invalidate_flow_key(key);
396 return 0;
397}
398
399static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
400{
401 int err;
402
403 err = nsh_pop(skb);
404 if (err)
405 return err;
406
407 /* safe right before invalidate_flow_key */
408 if (skb->protocol == htons(ETH_P_TEB))
409 key->mac_proto = MAC_PROTO_ETHERNET;
410 else
411 key->mac_proto = MAC_PROTO_NONE;
412 invalidate_flow_key(key);
413 return 0;
414}
415
416static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
417 __be32 addr, __be32 new_addr)
418{
419 int transport_len = skb->len - skb_transport_offset(skb);
420
421 if (nh->frag_off & htons(IP_OFFSET))
422 return;
423
424 if (nh->protocol == IPPROTO_TCP) {
425 if (likely(transport_len >= sizeof(struct tcphdr)))
426 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
427 addr, new_addr, true);
428 } else if (nh->protocol == IPPROTO_UDP) {
429 if (likely(transport_len >= sizeof(struct udphdr))) {
430 struct udphdr *uh = udp_hdr(skb);
431
432 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
433 inet_proto_csum_replace4(&uh->check, skb,
434 addr, new_addr, true);
435 if (!uh->check)
436 uh->check = CSUM_MANGLED_0;
437 }
438 }
439 }
440}
441
442static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
443 __be32 *addr, __be32 new_addr)
444{
445 update_ip_l4_checksum(skb, nh, *addr, new_addr);
446 csum_replace4(&nh->check, *addr, new_addr);
447 skb_clear_hash(skb);
448 *addr = new_addr;
449}
450
451static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
452 __be32 addr[4], const __be32 new_addr[4])
453{
454 int transport_len = skb->len - skb_transport_offset(skb);
455
456 if (l4_proto == NEXTHDR_TCP) {
457 if (likely(transport_len >= sizeof(struct tcphdr)))
458 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
459 addr, new_addr, true);
460 } else if (l4_proto == NEXTHDR_UDP) {
461 if (likely(transport_len >= sizeof(struct udphdr))) {
462 struct udphdr *uh = udp_hdr(skb);
463
464 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
465 inet_proto_csum_replace16(&uh->check, skb,
466 addr, new_addr, true);
467 if (!uh->check)
468 uh->check = CSUM_MANGLED_0;
469 }
470 }
471 } else if (l4_proto == NEXTHDR_ICMP) {
472 if (likely(transport_len >= sizeof(struct icmp6hdr)))
473 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
474 skb, addr, new_addr, true);
475 }
476}
477
478static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
479 const __be32 mask[4], __be32 masked[4])
480{
481 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
482 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
483 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
484 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
485}
486
487static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
488 __be32 addr[4], const __be32 new_addr[4],
489 bool recalculate_csum)
490{
491 if (recalculate_csum)
492 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
493
494 skb_clear_hash(skb);
495 memcpy(addr, new_addr, sizeof(__be32[4]));
496}
497
498static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
499{
500 /* Bits 21-24 are always unmasked, so this retains their values. */
501 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
502 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
503 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
504}
505
506static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
507 u8 mask)
508{
509 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
510
511 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
512 nh->ttl = new_ttl;
513}
514
515static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
516 const struct ovs_key_ipv4 *key,
517 const struct ovs_key_ipv4 *mask)
518{
519 struct iphdr *nh;
520 __be32 new_addr;
521 int err;
522
523 err = skb_ensure_writable(skb, skb_network_offset(skb) +
524 sizeof(struct iphdr));
525 if (unlikely(err))
526 return err;
527
528 nh = ip_hdr(skb);
529
530 /* Setting an IP addresses is typically only a side effect of
531 * matching on them in the current userspace implementation, so it
532 * makes sense to check if the value actually changed.
533 */
534 if (mask->ipv4_src) {
535 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
536
537 if (unlikely(new_addr != nh->saddr)) {
538 set_ip_addr(skb, nh, &nh->saddr, new_addr);
539 flow_key->ipv4.addr.src = new_addr;
540 }
541 }
542 if (mask->ipv4_dst) {
543 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
544
545 if (unlikely(new_addr != nh->daddr)) {
546 set_ip_addr(skb, nh, &nh->daddr, new_addr);
547 flow_key->ipv4.addr.dst = new_addr;
548 }
549 }
550 if (mask->ipv4_tos) {
551 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
552 flow_key->ip.tos = nh->tos;
553 }
554 if (mask->ipv4_ttl) {
555 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
556 flow_key->ip.ttl = nh->ttl;
557 }
558
559 return 0;
560}
561
562static bool is_ipv6_mask_nonzero(const __be32 addr[4])
563{
564 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
565}
566
567static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
568 const struct ovs_key_ipv6 *key,
569 const struct ovs_key_ipv6 *mask)
570{
571 struct ipv6hdr *nh;
572 int err;
573
574 err = skb_ensure_writable(skb, skb_network_offset(skb) +
575 sizeof(struct ipv6hdr));
576 if (unlikely(err))
577 return err;
578
579 nh = ipv6_hdr(skb);
580
581 /* Setting an IP addresses is typically only a side effect of
582 * matching on them in the current userspace implementation, so it
583 * makes sense to check if the value actually changed.
584 */
585 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
586 __be32 *saddr = (__be32 *)&nh->saddr;
587 __be32 masked[4];
588
589 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
590
591 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
592 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
593 true);
594 memcpy(&flow_key->ipv6.addr.src, masked,
595 sizeof(flow_key->ipv6.addr.src));
596 }
597 }
598 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
599 unsigned int offset = 0;
600 int flags = IP6_FH_F_SKIP_RH;
601 bool recalc_csum = true;
602 __be32 *daddr = (__be32 *)&nh->daddr;
603 __be32 masked[4];
604
605 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
606
607 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
608 if (ipv6_ext_hdr(nh->nexthdr))
609 recalc_csum = (ipv6_find_hdr(skb, &offset,
610 NEXTHDR_ROUTING,
611 NULL, &flags)
612 != NEXTHDR_ROUTING);
613
614 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
615 recalc_csum);
616 memcpy(&flow_key->ipv6.addr.dst, masked,
617 sizeof(flow_key->ipv6.addr.dst));
618 }
619 }
620 if (mask->ipv6_tclass) {
621 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
622 flow_key->ip.tos = ipv6_get_dsfield(nh);
623 }
624 if (mask->ipv6_label) {
625 set_ipv6_fl(nh, ntohl(key->ipv6_label),
626 ntohl(mask->ipv6_label));
627 flow_key->ipv6.label =
628 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
629 }
630 if (mask->ipv6_hlimit) {
631 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
632 mask->ipv6_hlimit);
633 flow_key->ip.ttl = nh->hop_limit;
634 }
635 return 0;
636}
637
638static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
639 const struct nlattr *a)
640{
641 struct nshhdr *nh;
642 size_t length;
643 int err;
644 u8 flags;
645 u8 ttl;
646 int i;
647
648 struct ovs_key_nsh key;
649 struct ovs_key_nsh mask;
650
651 err = nsh_key_from_nlattr(a, &key, &mask);
652 if (err)
653 return err;
654
655 /* Make sure the NSH base header is there */
656 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
657 return -ENOMEM;
658
659 nh = nsh_hdr(skb);
660 length = nsh_hdr_len(nh);
661
662 /* Make sure the whole NSH header is there */
663 err = skb_ensure_writable(skb, skb_network_offset(skb) +
664 length);
665 if (unlikely(err))
666 return err;
667
668 nh = nsh_hdr(skb);
669 skb_postpull_rcsum(skb, nh, length);
670 flags = nsh_get_flags(nh);
671 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
672 flow_key->nsh.base.flags = flags;
673 ttl = nsh_get_ttl(nh);
674 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
675 flow_key->nsh.base.ttl = ttl;
676 nsh_set_flags_and_ttl(nh, flags, ttl);
677 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
678 mask.base.path_hdr);
679 flow_key->nsh.base.path_hdr = nh->path_hdr;
680 switch (nh->mdtype) {
681 case NSH_M_TYPE1:
682 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
683 nh->md1.context[i] =
684 OVS_MASKED(nh->md1.context[i], key.context[i],
685 mask.context[i]);
686 }
687 memcpy(flow_key->nsh.context, nh->md1.context,
688 sizeof(nh->md1.context));
689 break;
690 case NSH_M_TYPE2:
691 memset(flow_key->nsh.context, 0,
692 sizeof(flow_key->nsh.context));
693 break;
694 default:
695 return -EINVAL;
696 }
697 skb_postpush_rcsum(skb, nh, length);
698 return 0;
699}
700
701/* Must follow skb_ensure_writable() since that can move the skb data. */
702static void set_tp_port(struct sk_buff *skb, __be16 *port,
703 __be16 new_port, __sum16 *check)
704{
705 inet_proto_csum_replace2(check, skb, *port, new_port, false);
706 *port = new_port;
707}
708
709static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
710 const struct ovs_key_udp *key,
711 const struct ovs_key_udp *mask)
712{
713 struct udphdr *uh;
714 __be16 src, dst;
715 int err;
716
717 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
718 sizeof(struct udphdr));
719 if (unlikely(err))
720 return err;
721
722 uh = udp_hdr(skb);
723 /* Either of the masks is non-zero, so do not bother checking them. */
724 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
725 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
726
727 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
728 if (likely(src != uh->source)) {
729 set_tp_port(skb, &uh->source, src, &uh->check);
730 flow_key->tp.src = src;
731 }
732 if (likely(dst != uh->dest)) {
733 set_tp_port(skb, &uh->dest, dst, &uh->check);
734 flow_key->tp.dst = dst;
735 }
736
737 if (unlikely(!uh->check))
738 uh->check = CSUM_MANGLED_0;
739 } else {
740 uh->source = src;
741 uh->dest = dst;
742 flow_key->tp.src = src;
743 flow_key->tp.dst = dst;
744 }
745
746 skb_clear_hash(skb);
747
748 return 0;
749}
750
751static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
752 const struct ovs_key_tcp *key,
753 const struct ovs_key_tcp *mask)
754{
755 struct tcphdr *th;
756 __be16 src, dst;
757 int err;
758
759 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
760 sizeof(struct tcphdr));
761 if (unlikely(err))
762 return err;
763
764 th = tcp_hdr(skb);
765 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
766 if (likely(src != th->source)) {
767 set_tp_port(skb, &th->source, src, &th->check);
768 flow_key->tp.src = src;
769 }
770 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
771 if (likely(dst != th->dest)) {
772 set_tp_port(skb, &th->dest, dst, &th->check);
773 flow_key->tp.dst = dst;
774 }
775 skb_clear_hash(skb);
776
777 return 0;
778}
779
780static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
781 const struct ovs_key_sctp *key,
782 const struct ovs_key_sctp *mask)
783{
784 unsigned int sctphoff = skb_transport_offset(skb);
785 struct sctphdr *sh;
786 __le32 old_correct_csum, new_csum, old_csum;
787 int err;
788
789 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
790 if (unlikely(err))
791 return err;
792
793 sh = sctp_hdr(skb);
794 old_csum = sh->checksum;
795 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
796
797 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
798 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
799
800 new_csum = sctp_compute_cksum(skb, sctphoff);
801
802 /* Carry any checksum errors through. */
803 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
804
805 skb_clear_hash(skb);
806 flow_key->tp.src = sh->source;
807 flow_key->tp.dst = sh->dest;
808
809 return 0;
810}
811
812static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
813{
814 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
815 struct vport *vport = data->vport;
816
817 if (skb_cow_head(skb, data->l2_len) < 0) {
818 kfree_skb(skb);
819 return -ENOMEM;
820 }
821
822 __skb_dst_copy(skb, data->dst);
823 *OVS_CB(skb) = data->cb;
824 skb->inner_protocol = data->inner_protocol;
825 skb->vlan_tci = data->vlan_tci;
826 skb->vlan_proto = data->vlan_proto;
827
828 /* Reconstruct the MAC header. */
829 skb_push(skb, data->l2_len);
830 memcpy(skb->data, &data->l2_data, data->l2_len);
831 skb_postpush_rcsum(skb, skb->data, data->l2_len);
832 skb_reset_mac_header(skb);
833
834 if (eth_p_mpls(skb->protocol)) {
835 skb->inner_network_header = skb->network_header;
836 skb_set_network_header(skb, data->network_offset);
837 skb_reset_mac_len(skb);
838 }
839
840 ovs_vport_send(vport, skb, data->mac_proto);
841 return 0;
842}
843
844static unsigned int
845ovs_dst_get_mtu(const struct dst_entry *dst)
846{
847 return dst->dev->mtu;
848}
849
850static struct dst_ops ovs_dst_ops = {
851 .family = AF_UNSPEC,
852 .mtu = ovs_dst_get_mtu,
853};
854
855/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
856 * ovs_vport_output(), which is called once per fragmented packet.
857 */
858static void prepare_frag(struct vport *vport, struct sk_buff *skb,
859 u16 orig_network_offset, u8 mac_proto)
860{
861 unsigned int hlen = skb_network_offset(skb);
862 struct ovs_frag_data *data;
863
864 data = this_cpu_ptr(&ovs_frag_data_storage);
865 data->dst = skb->_skb_refdst;
866 data->vport = vport;
867 data->cb = *OVS_CB(skb);
868 data->inner_protocol = skb->inner_protocol;
869 data->network_offset = orig_network_offset;
870 data->vlan_tci = skb->vlan_tci;
871 data->vlan_proto = skb->vlan_proto;
872 data->mac_proto = mac_proto;
873 data->l2_len = hlen;
874 memcpy(&data->l2_data, skb->data, hlen);
875
876 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
877 skb_pull(skb, hlen);
878}
879
880static void ovs_fragment(struct net *net, struct vport *vport,
881 struct sk_buff *skb, u16 mru,
882 struct sw_flow_key *key)
883{
884 u16 orig_network_offset = 0;
885
886 if (eth_p_mpls(skb->protocol)) {
887 orig_network_offset = skb_network_offset(skb);
888 skb->network_header = skb->inner_network_header;
889 }
890
891 if (skb_network_offset(skb) > MAX_L2_LEN) {
892 OVS_NLERR(1, "L2 header too long to fragment");
893 goto err;
894 }
895
896 if (key->eth.type == htons(ETH_P_IP)) {
897 struct dst_entry ovs_dst;
898 unsigned long orig_dst;
899
900 prepare_frag(vport, skb, orig_network_offset,
901 ovs_key_mac_proto(key));
902 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
903 DST_OBSOLETE_NONE, DST_NOCOUNT);
904 ovs_dst.dev = vport->dev;
905
906 orig_dst = skb->_skb_refdst;
907 skb_dst_set_noref(skb, &ovs_dst);
908 IPCB(skb)->frag_max_size = mru;
909
910 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
911 refdst_drop(orig_dst);
912 } else if (key->eth.type == htons(ETH_P_IPV6)) {
913 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
914 unsigned long orig_dst;
915 struct rt6_info ovs_rt;
916
917 if (!v6ops)
918 goto err;
919
920 prepare_frag(vport, skb, orig_network_offset,
921 ovs_key_mac_proto(key));
922 memset(&ovs_rt, 0, sizeof(ovs_rt));
923 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
924 DST_OBSOLETE_NONE, DST_NOCOUNT);
925 ovs_rt.dst.dev = vport->dev;
926
927 orig_dst = skb->_skb_refdst;
928 skb_dst_set_noref(skb, &ovs_rt.dst);
929 IP6CB(skb)->frag_max_size = mru;
930
931 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
932 refdst_drop(orig_dst);
933 } else {
934 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
935 ovs_vport_name(vport), ntohs(key->eth.type), mru,
936 vport->dev->mtu);
937 goto err;
938 }
939
940 return;
941err:
942 kfree_skb(skb);
943}
944
945static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
946 struct sw_flow_key *key)
947{
948 struct vport *vport = ovs_vport_rcu(dp, out_port);
949
950 if (likely(vport)) {
951 u16 mru = OVS_CB(skb)->mru;
952 u32 cutlen = OVS_CB(skb)->cutlen;
953
954 if (unlikely(cutlen > 0)) {
955 if (skb->len - cutlen > ovs_mac_header_len(key))
956 pskb_trim(skb, skb->len - cutlen);
957 else
958 pskb_trim(skb, ovs_mac_header_len(key));
959 }
960
961 if (likely(!mru ||
962 (skb->len <= mru + vport->dev->hard_header_len))) {
963 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
964 } else if (mru <= vport->dev->mtu) {
965 struct net *net = read_pnet(&dp->net);
966
967 ovs_fragment(net, vport, skb, mru, key);
968 } else {
969 kfree_skb(skb);
970 }
971 } else {
972 kfree_skb(skb);
973 }
974}
975
976static int output_userspace(struct datapath *dp, struct sk_buff *skb,
977 struct sw_flow_key *key, const struct nlattr *attr,
978 const struct nlattr *actions, int actions_len,
979 uint32_t cutlen)
980{
981 struct dp_upcall_info upcall;
982 const struct nlattr *a;
983 int rem;
984
985 memset(&upcall, 0, sizeof(upcall));
986 upcall.cmd = OVS_PACKET_CMD_ACTION;
987 upcall.mru = OVS_CB(skb)->mru;
988
989 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
990 a = nla_next(a, &rem)) {
991 switch (nla_type(a)) {
992 case OVS_USERSPACE_ATTR_USERDATA:
993 upcall.userdata = a;
994 break;
995
996 case OVS_USERSPACE_ATTR_PID:
997 upcall.portid = nla_get_u32(a);
998 break;
999
1000 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1001 /* Get out tunnel info. */
1002 struct vport *vport;
1003
1004 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1005 if (vport) {
1006 int err;
1007
1008 err = dev_fill_metadata_dst(vport->dev, skb);
1009 if (!err)
1010 upcall.egress_tun_info = skb_tunnel_info(skb);
1011 }
1012
1013 break;
1014 }
1015
1016 case OVS_USERSPACE_ATTR_ACTIONS: {
1017 /* Include actions. */
1018 upcall.actions = actions;
1019 upcall.actions_len = actions_len;
1020 break;
1021 }
1022
1023 } /* End of switch. */
1024 }
1025
1026 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1027}
1028
1029/* When 'last' is true, sample() should always consume the 'skb'.
1030 * Otherwise, sample() should keep 'skb' intact regardless what
1031 * actions are executed within sample().
1032 */
1033static int sample(struct datapath *dp, struct sk_buff *skb,
1034 struct sw_flow_key *key, const struct nlattr *attr,
1035 bool last)
1036{
1037 struct nlattr *actions;
1038 struct nlattr *sample_arg;
1039 int rem = nla_len(attr);
1040 const struct sample_arg *arg;
1041 bool clone_flow_key;
1042
1043 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1044 sample_arg = nla_data(attr);
1045 arg = nla_data(sample_arg);
1046 actions = nla_next(sample_arg, &rem);
1047
1048 if ((arg->probability != U32_MAX) &&
1049 (!arg->probability || prandom_u32() > arg->probability)) {
1050 if (last)
1051 consume_skb(skb);
1052 return 0;
1053 }
1054
1055 clone_flow_key = !arg->exec;
1056 return clone_execute(dp, skb, key, 0, actions, rem, last,
1057 clone_flow_key);
1058}
1059
1060static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1061 const struct nlattr *attr)
1062{
1063 struct ovs_action_hash *hash_act = nla_data(attr);
1064 u32 hash = 0;
1065
1066 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1067 hash = skb_get_hash(skb);
1068 hash = jhash_1word(hash, hash_act->hash_basis);
1069 if (!hash)
1070 hash = 0x1;
1071
1072 key->ovs_flow_hash = hash;
1073}
1074
1075static int execute_set_action(struct sk_buff *skb,
1076 struct sw_flow_key *flow_key,
1077 const struct nlattr *a)
1078{
1079 /* Only tunnel set execution is supported without a mask. */
1080 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1081 struct ovs_tunnel_info *tun = nla_data(a);
1082
1083 skb_dst_drop(skb);
1084 dst_hold((struct dst_entry *)tun->tun_dst);
1085 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1086 return 0;
1087 }
1088
1089 return -EINVAL;
1090}
1091
1092/* Mask is at the midpoint of the data. */
1093#define get_mask(a, type) ((const type)nla_data(a) + 1)
1094
1095static int execute_masked_set_action(struct sk_buff *skb,
1096 struct sw_flow_key *flow_key,
1097 const struct nlattr *a)
1098{
1099 int err = 0;
1100
1101 switch (nla_type(a)) {
1102 case OVS_KEY_ATTR_PRIORITY:
1103 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1104 *get_mask(a, u32 *));
1105 flow_key->phy.priority = skb->priority;
1106 break;
1107
1108 case OVS_KEY_ATTR_SKB_MARK:
1109 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1110 flow_key->phy.skb_mark = skb->mark;
1111 break;
1112
1113 case OVS_KEY_ATTR_TUNNEL_INFO:
1114 /* Masked data not supported for tunnel. */
1115 err = -EINVAL;
1116 break;
1117
1118 case OVS_KEY_ATTR_ETHERNET:
1119 err = set_eth_addr(skb, flow_key, nla_data(a),
1120 get_mask(a, struct ovs_key_ethernet *));
1121 break;
1122
1123 case OVS_KEY_ATTR_NSH:
1124 err = set_nsh(skb, flow_key, a);
1125 break;
1126
1127 case OVS_KEY_ATTR_IPV4:
1128 err = set_ipv4(skb, flow_key, nla_data(a),
1129 get_mask(a, struct ovs_key_ipv4 *));
1130 break;
1131
1132 case OVS_KEY_ATTR_IPV6:
1133 err = set_ipv6(skb, flow_key, nla_data(a),
1134 get_mask(a, struct ovs_key_ipv6 *));
1135 break;
1136
1137 case OVS_KEY_ATTR_TCP:
1138 err = set_tcp(skb, flow_key, nla_data(a),
1139 get_mask(a, struct ovs_key_tcp *));
1140 break;
1141
1142 case OVS_KEY_ATTR_UDP:
1143 err = set_udp(skb, flow_key, nla_data(a),
1144 get_mask(a, struct ovs_key_udp *));
1145 break;
1146
1147 case OVS_KEY_ATTR_SCTP:
1148 err = set_sctp(skb, flow_key, nla_data(a),
1149 get_mask(a, struct ovs_key_sctp *));
1150 break;
1151
1152 case OVS_KEY_ATTR_MPLS:
1153 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1154 __be32 *));
1155 break;
1156
1157 case OVS_KEY_ATTR_CT_STATE:
1158 case OVS_KEY_ATTR_CT_ZONE:
1159 case OVS_KEY_ATTR_CT_MARK:
1160 case OVS_KEY_ATTR_CT_LABELS:
1161 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1162 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1163 err = -EINVAL;
1164 break;
1165 }
1166
1167 return err;
1168}
1169
1170static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1171 struct sw_flow_key *key,
1172 const struct nlattr *a, bool last)
1173{
1174 u32 recirc_id;
1175
1176 if (!is_flow_key_valid(key)) {
1177 int err;
1178
1179 err = ovs_flow_key_update(skb, key);
1180 if (err)
1181 return err;
1182 }
1183 BUG_ON(!is_flow_key_valid(key));
1184
1185 recirc_id = nla_get_u32(a);
1186 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1187}
1188
1189/* Execute a list of actions against 'skb'. */
1190static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1191 struct sw_flow_key *key,
1192 const struct nlattr *attr, int len)
1193{
1194 const struct nlattr *a;
1195 int rem;
1196
1197 for (a = attr, rem = len; rem > 0;
1198 a = nla_next(a, &rem)) {
1199 int err = 0;
1200
1201 switch (nla_type(a)) {
1202 case OVS_ACTION_ATTR_OUTPUT: {
1203 int port = nla_get_u32(a);
1204 struct sk_buff *clone;
1205
1206 /* Every output action needs a separate clone
1207 * of 'skb', In case the output action is the
1208 * last action, cloning can be avoided.
1209 */
1210 if (nla_is_last(a, rem)) {
1211 do_output(dp, skb, port, key);
1212 /* 'skb' has been used for output.
1213 */
1214 return 0;
1215 }
1216
1217 clone = skb_clone(skb, GFP_ATOMIC);
1218 if (clone)
1219 do_output(dp, clone, port, key);
1220 OVS_CB(skb)->cutlen = 0;
1221 break;
1222 }
1223
1224 case OVS_ACTION_ATTR_TRUNC: {
1225 struct ovs_action_trunc *trunc = nla_data(a);
1226
1227 if (skb->len > trunc->max_len)
1228 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1229 break;
1230 }
1231
1232 case OVS_ACTION_ATTR_USERSPACE:
1233 output_userspace(dp, skb, key, a, attr,
1234 len, OVS_CB(skb)->cutlen);
1235 OVS_CB(skb)->cutlen = 0;
1236 break;
1237
1238 case OVS_ACTION_ATTR_HASH:
1239 execute_hash(skb, key, a);
1240 break;
1241
1242 case OVS_ACTION_ATTR_PUSH_MPLS:
1243 err = push_mpls(skb, key, nla_data(a));
1244 break;
1245
1246 case OVS_ACTION_ATTR_POP_MPLS:
1247 err = pop_mpls(skb, key, nla_get_be16(a));
1248 break;
1249
1250 case OVS_ACTION_ATTR_PUSH_VLAN:
1251 err = push_vlan(skb, key, nla_data(a));
1252 break;
1253
1254 case OVS_ACTION_ATTR_POP_VLAN:
1255 err = pop_vlan(skb, key);
1256 break;
1257
1258 case OVS_ACTION_ATTR_RECIRC: {
1259 bool last = nla_is_last(a, rem);
1260
1261 err = execute_recirc(dp, skb, key, a, last);
1262 if (last) {
1263 /* If this is the last action, the skb has
1264 * been consumed or freed.
1265 * Return immediately.
1266 */
1267 return err;
1268 }
1269 break;
1270 }
1271
1272 case OVS_ACTION_ATTR_SET:
1273 err = execute_set_action(skb, key, nla_data(a));
1274 break;
1275
1276 case OVS_ACTION_ATTR_SET_MASKED:
1277 case OVS_ACTION_ATTR_SET_TO_MASKED:
1278 err = execute_masked_set_action(skb, key, nla_data(a));
1279 break;
1280
1281 case OVS_ACTION_ATTR_SAMPLE: {
1282 bool last = nla_is_last(a, rem);
1283
1284 err = sample(dp, skb, key, a, last);
1285 if (last)
1286 return err;
1287
1288 break;
1289 }
1290
1291 case OVS_ACTION_ATTR_CT:
1292 if (!is_flow_key_valid(key)) {
1293 err = ovs_flow_key_update(skb, key);
1294 if (err)
1295 return err;
1296 }
1297
1298 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1299 nla_data(a));
1300
1301 /* Hide stolen IP fragments from user space. */
1302 if (err)
1303 return err == -EINPROGRESS ? 0 : err;
1304 break;
1305
1306 case OVS_ACTION_ATTR_CT_CLEAR:
1307 err = ovs_ct_clear(skb, key);
1308 break;
1309
1310 case OVS_ACTION_ATTR_PUSH_ETH:
1311 err = push_eth(skb, key, nla_data(a));
1312 break;
1313
1314 case OVS_ACTION_ATTR_POP_ETH:
1315 err = pop_eth(skb, key);
1316 break;
1317
1318 case OVS_ACTION_ATTR_PUSH_NSH: {
1319 u8 buffer[NSH_HDR_MAX_LEN];
1320 struct nshhdr *nh = (struct nshhdr *)buffer;
1321
1322 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1323 NSH_HDR_MAX_LEN);
1324 if (unlikely(err))
1325 break;
1326 err = push_nsh(skb, key, nh);
1327 break;
1328 }
1329
1330 case OVS_ACTION_ATTR_POP_NSH:
1331 err = pop_nsh(skb, key);
1332 break;
1333
1334 case OVS_ACTION_ATTR_METER:
1335 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1336 consume_skb(skb);
1337 return 0;
1338 }
1339 }
1340
1341 if (unlikely(err)) {
1342 kfree_skb(skb);
1343 return err;
1344 }
1345 }
1346
1347 consume_skb(skb);
1348 return 0;
1349}
1350
1351/* Execute the actions on the clone of the packet. The effect of the
1352 * execution does not affect the original 'skb' nor the original 'key'.
1353 *
1354 * The execution may be deferred in case the actions can not be executed
1355 * immediately.
1356 */
1357static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1358 struct sw_flow_key *key, u32 recirc_id,
1359 const struct nlattr *actions, int len,
1360 bool last, bool clone_flow_key)
1361{
1362 struct deferred_action *da;
1363 struct sw_flow_key *clone;
1364
1365 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1366 if (!skb) {
1367 /* Out of memory, skip this action.
1368 */
1369 return 0;
1370 }
1371
1372 /* When clone_flow_key is false, the 'key' will not be change
1373 * by the actions, then the 'key' can be used directly.
1374 * Otherwise, try to clone key from the next recursion level of
1375 * 'flow_keys'. If clone is successful, execute the actions
1376 * without deferring.
1377 */
1378 clone = clone_flow_key ? clone_key(key) : key;
1379 if (clone) {
1380 int err = 0;
1381
1382 if (actions) { /* Sample action */
1383 if (clone_flow_key)
1384 __this_cpu_inc(exec_actions_level);
1385
1386 err = do_execute_actions(dp, skb, clone,
1387 actions, len);
1388
1389 if (clone_flow_key)
1390 __this_cpu_dec(exec_actions_level);
1391 } else { /* Recirc action */
1392 clone->recirc_id = recirc_id;
1393 ovs_dp_process_packet(skb, clone);
1394 }
1395 return err;
1396 }
1397
1398 /* Out of 'flow_keys' space. Defer actions */
1399 da = add_deferred_actions(skb, key, actions, len);
1400 if (da) {
1401 if (!actions) { /* Recirc action */
1402 key = &da->pkt_key;
1403 key->recirc_id = recirc_id;
1404 }
1405 } else {
1406 /* Out of per CPU action FIFO space. Drop the 'skb' and
1407 * log an error.
1408 */
1409 kfree_skb(skb);
1410
1411 if (net_ratelimit()) {
1412 if (actions) { /* Sample action */
1413 pr_warn("%s: deferred action limit reached, drop sample action\n",
1414 ovs_dp_name(dp));
1415 } else { /* Recirc action */
1416 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1417 ovs_dp_name(dp));
1418 }
1419 }
1420 }
1421 return 0;
1422}
1423
1424static void process_deferred_actions(struct datapath *dp)
1425{
1426 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1427
1428 /* Do not touch the FIFO in case there is no deferred actions. */
1429 if (action_fifo_is_empty(fifo))
1430 return;
1431
1432 /* Finishing executing all deferred actions. */
1433 do {
1434 struct deferred_action *da = action_fifo_get(fifo);
1435 struct sk_buff *skb = da->skb;
1436 struct sw_flow_key *key = &da->pkt_key;
1437 const struct nlattr *actions = da->actions;
1438 int actions_len = da->actions_len;
1439
1440 if (actions)
1441 do_execute_actions(dp, skb, key, actions, actions_len);
1442 else
1443 ovs_dp_process_packet(skb, key);
1444 } while (!action_fifo_is_empty(fifo));
1445
1446 /* Reset FIFO for the next packet. */
1447 action_fifo_init(fifo);
1448}
1449
1450/* Execute a list of actions against 'skb'. */
1451int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1452 const struct sw_flow_actions *acts,
1453 struct sw_flow_key *key)
1454{
1455 int err, level;
1456
1457 level = __this_cpu_inc_return(exec_actions_level);
1458 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1459 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1460 ovs_dp_name(dp));
1461 kfree_skb(skb);
1462 err = -ENETDOWN;
1463 goto out;
1464 }
1465
1466 OVS_CB(skb)->acts_origlen = acts->orig_len;
1467 err = do_execute_actions(dp, skb, key,
1468 acts->actions, acts->actions_len);
1469
1470 if (level == 1)
1471 process_deferred_actions(dp);
1472
1473out:
1474 __this_cpu_dec(exec_actions_level);
1475 return err;
1476}
1477
1478int action_fifos_init(void)
1479{
1480 action_fifos = alloc_percpu(struct action_fifo);
1481 if (!action_fifos)
1482 return -ENOMEM;
1483
1484 flow_keys = alloc_percpu(struct action_flow_keys);
1485 if (!flow_keys) {
1486 free_percpu(action_fifos);
1487 return -ENOMEM;
1488 }
1489
1490 return 0;
1491}
1492
1493void action_fifos_exit(void)
1494{
1495 free_percpu(action_fifos);
1496 free_percpu(flow_keys);
1497}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
12#include <linux/sctp.h>
13#include <linux/tcp.h>
14#include <linux/udp.h>
15#include <linux/in6.h>
16#include <linux/if_arp.h>
17#include <linux/if_vlan.h>
18
19#include <net/dst.h>
20#include <net/ip.h>
21#include <net/ipv6.h>
22#include <net/ip6_fib.h>
23#include <net/checksum.h>
24#include <net/dsfield.h>
25#include <net/mpls.h>
26#include <net/sctp/checksum.h>
27
28#include "datapath.h"
29#include "flow.h"
30#include "conntrack.h"
31#include "vport.h"
32#include "flow_netlink.h"
33#include "openvswitch_trace.h"
34
35struct deferred_action {
36 struct sk_buff *skb;
37 const struct nlattr *actions;
38 int actions_len;
39
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key;
42};
43
44#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45struct ovs_frag_data {
46 unsigned long dst;
47 struct vport *vport;
48 struct ovs_skb_cb cb;
49 __be16 inner_protocol;
50 u16 network_offset; /* valid only for MPLS */
51 u16 vlan_tci;
52 __be16 vlan_proto;
53 unsigned int l2_len;
54 u8 mac_proto;
55 u8 l2_data[MAX_L2_LEN];
56};
57
58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
59
60#define DEFERRED_ACTION_FIFO_SIZE 10
61#define OVS_RECURSION_LIMIT 5
62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
63struct action_fifo {
64 int head;
65 int tail;
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
68};
69
70struct action_flow_keys {
71 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
72};
73
74static struct action_fifo __percpu *action_fifos;
75static struct action_flow_keys __percpu *flow_keys;
76static DEFINE_PER_CPU(int, exec_actions_level);
77
78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
80 */
81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
82{
83 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
84 int level = this_cpu_read(exec_actions_level);
85 struct sw_flow_key *key = NULL;
86
87 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
88 key = &keys->key[level - 1];
89 *key = *key_;
90 }
91
92 return key;
93}
94
95static void action_fifo_init(struct action_fifo *fifo)
96{
97 fifo->head = 0;
98 fifo->tail = 0;
99}
100
101static bool action_fifo_is_empty(const struct action_fifo *fifo)
102{
103 return (fifo->head == fifo->tail);
104}
105
106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
107{
108 if (action_fifo_is_empty(fifo))
109 return NULL;
110
111 return &fifo->fifo[fifo->tail++];
112}
113
114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
115{
116 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
117 return NULL;
118
119 return &fifo->fifo[fifo->head++];
120}
121
122/* Return true if fifo is not full */
123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
124 const struct sw_flow_key *key,
125 const struct nlattr *actions,
126 const int actions_len)
127{
128 struct action_fifo *fifo;
129 struct deferred_action *da;
130
131 fifo = this_cpu_ptr(action_fifos);
132 da = action_fifo_put(fifo);
133 if (da) {
134 da->skb = skb;
135 da->actions = actions;
136 da->actions_len = actions_len;
137 da->pkt_key = *key;
138 }
139
140 return da;
141}
142
143static void invalidate_flow_key(struct sw_flow_key *key)
144{
145 key->mac_proto |= SW_FLOW_KEY_INVALID;
146}
147
148static bool is_flow_key_valid(const struct sw_flow_key *key)
149{
150 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
151}
152
153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
154 struct sw_flow_key *key,
155 u32 recirc_id,
156 const struct nlattr *actions, int len,
157 bool last, bool clone_flow_key);
158
159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
160 struct sw_flow_key *key,
161 const struct nlattr *attr, int len);
162
163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
164 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
165{
166 int err;
167
168 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
169 if (err)
170 return err;
171
172 if (!mac_len)
173 key->mac_proto = MAC_PROTO_NONE;
174
175 invalidate_flow_key(key);
176 return 0;
177}
178
179static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
180 const __be16 ethertype)
181{
182 int err;
183
184 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
185 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
186 if (err)
187 return err;
188
189 if (ethertype == htons(ETH_P_TEB))
190 key->mac_proto = MAC_PROTO_ETHERNET;
191
192 invalidate_flow_key(key);
193 return 0;
194}
195
196static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
197 const __be32 *mpls_lse, const __be32 *mask)
198{
199 struct mpls_shim_hdr *stack;
200 __be32 lse;
201 int err;
202
203 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
204 return -ENOMEM;
205
206 stack = mpls_hdr(skb);
207 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
208 err = skb_mpls_update_lse(skb, lse);
209 if (err)
210 return err;
211
212 flow_key->mpls.lse[0] = lse;
213 return 0;
214}
215
216static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
217{
218 int err;
219
220 err = skb_vlan_pop(skb);
221 if (skb_vlan_tag_present(skb)) {
222 invalidate_flow_key(key);
223 } else {
224 key->eth.vlan.tci = 0;
225 key->eth.vlan.tpid = 0;
226 }
227 return err;
228}
229
230static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
231 const struct ovs_action_push_vlan *vlan)
232{
233 if (skb_vlan_tag_present(skb)) {
234 invalidate_flow_key(key);
235 } else {
236 key->eth.vlan.tci = vlan->vlan_tci;
237 key->eth.vlan.tpid = vlan->vlan_tpid;
238 }
239 return skb_vlan_push(skb, vlan->vlan_tpid,
240 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
241}
242
243/* 'src' is already properly masked. */
244static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
245{
246 u16 *dst = (u16 *)dst_;
247 const u16 *src = (const u16 *)src_;
248 const u16 *mask = (const u16 *)mask_;
249
250 OVS_SET_MASKED(dst[0], src[0], mask[0]);
251 OVS_SET_MASKED(dst[1], src[1], mask[1]);
252 OVS_SET_MASKED(dst[2], src[2], mask[2]);
253}
254
255static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const struct ovs_key_ethernet *key,
257 const struct ovs_key_ethernet *mask)
258{
259 int err;
260
261 err = skb_ensure_writable(skb, ETH_HLEN);
262 if (unlikely(err))
263 return err;
264
265 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
266
267 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
268 mask->eth_src);
269 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
270 mask->eth_dst);
271
272 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
273
274 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
275 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
276 return 0;
277}
278
279/* pop_eth does not support VLAN packets as this action is never called
280 * for them.
281 */
282static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
283{
284 int err;
285
286 err = skb_eth_pop(skb);
287 if (err)
288 return err;
289
290 /* safe right before invalidate_flow_key */
291 key->mac_proto = MAC_PROTO_NONE;
292 invalidate_flow_key(key);
293 return 0;
294}
295
296static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
297 const struct ovs_action_push_eth *ethh)
298{
299 int err;
300
301 err = skb_eth_push(skb, ethh->addresses.eth_dst,
302 ethh->addresses.eth_src);
303 if (err)
304 return err;
305
306 /* safe right before invalidate_flow_key */
307 key->mac_proto = MAC_PROTO_ETHERNET;
308 invalidate_flow_key(key);
309 return 0;
310}
311
312static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
313 const struct nshhdr *nh)
314{
315 int err;
316
317 err = nsh_push(skb, nh);
318 if (err)
319 return err;
320
321 /* safe right before invalidate_flow_key */
322 key->mac_proto = MAC_PROTO_NONE;
323 invalidate_flow_key(key);
324 return 0;
325}
326
327static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
328{
329 int err;
330
331 err = nsh_pop(skb);
332 if (err)
333 return err;
334
335 /* safe right before invalidate_flow_key */
336 if (skb->protocol == htons(ETH_P_TEB))
337 key->mac_proto = MAC_PROTO_ETHERNET;
338 else
339 key->mac_proto = MAC_PROTO_NONE;
340 invalidate_flow_key(key);
341 return 0;
342}
343
344static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
345 __be32 addr, __be32 new_addr)
346{
347 int transport_len = skb->len - skb_transport_offset(skb);
348
349 if (nh->frag_off & htons(IP_OFFSET))
350 return;
351
352 if (nh->protocol == IPPROTO_TCP) {
353 if (likely(transport_len >= sizeof(struct tcphdr)))
354 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
355 addr, new_addr, true);
356 } else if (nh->protocol == IPPROTO_UDP) {
357 if (likely(transport_len >= sizeof(struct udphdr))) {
358 struct udphdr *uh = udp_hdr(skb);
359
360 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
361 inet_proto_csum_replace4(&uh->check, skb,
362 addr, new_addr, true);
363 if (!uh->check)
364 uh->check = CSUM_MANGLED_0;
365 }
366 }
367 }
368}
369
370static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
371 __be32 *addr, __be32 new_addr)
372{
373 update_ip_l4_checksum(skb, nh, *addr, new_addr);
374 csum_replace4(&nh->check, *addr, new_addr);
375 skb_clear_hash(skb);
376 ovs_ct_clear(skb, NULL);
377 *addr = new_addr;
378}
379
380static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
381 __be32 addr[4], const __be32 new_addr[4])
382{
383 int transport_len = skb->len - skb_transport_offset(skb);
384
385 if (l4_proto == NEXTHDR_TCP) {
386 if (likely(transport_len >= sizeof(struct tcphdr)))
387 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
388 addr, new_addr, true);
389 } else if (l4_proto == NEXTHDR_UDP) {
390 if (likely(transport_len >= sizeof(struct udphdr))) {
391 struct udphdr *uh = udp_hdr(skb);
392
393 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
394 inet_proto_csum_replace16(&uh->check, skb,
395 addr, new_addr, true);
396 if (!uh->check)
397 uh->check = CSUM_MANGLED_0;
398 }
399 }
400 } else if (l4_proto == NEXTHDR_ICMP) {
401 if (likely(transport_len >= sizeof(struct icmp6hdr)))
402 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
403 skb, addr, new_addr, true);
404 }
405}
406
407static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
408 const __be32 mask[4], __be32 masked[4])
409{
410 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
411 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
412 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
413 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
414}
415
416static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
417 __be32 addr[4], const __be32 new_addr[4],
418 bool recalculate_csum)
419{
420 if (recalculate_csum)
421 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
422
423 skb_clear_hash(skb);
424 ovs_ct_clear(skb, NULL);
425 memcpy(addr, new_addr, sizeof(__be32[4]));
426}
427
428static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
429{
430 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
431
432 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
433
434 if (skb->ip_summed == CHECKSUM_COMPLETE)
435 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
436 (__force __wsum)(ipv6_tclass << 12));
437
438 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
439}
440
441static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
442{
443 u32 ofl;
444
445 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
446 fl = OVS_MASKED(ofl, fl, mask);
447
448 /* Bits 21-24 are always unmasked, so this retains their values. */
449 nh->flow_lbl[0] = (u8)(fl >> 16);
450 nh->flow_lbl[1] = (u8)(fl >> 8);
451 nh->flow_lbl[2] = (u8)fl;
452
453 if (skb->ip_summed == CHECKSUM_COMPLETE)
454 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
455}
456
457static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
458{
459 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
460
461 if (skb->ip_summed == CHECKSUM_COMPLETE)
462 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
463 (__force __wsum)(new_ttl << 8));
464 nh->hop_limit = new_ttl;
465}
466
467static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
468 u8 mask)
469{
470 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
471
472 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
473 nh->ttl = new_ttl;
474}
475
476static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
477 const struct ovs_key_ipv4 *key,
478 const struct ovs_key_ipv4 *mask)
479{
480 struct iphdr *nh;
481 __be32 new_addr;
482 int err;
483
484 err = skb_ensure_writable(skb, skb_network_offset(skb) +
485 sizeof(struct iphdr));
486 if (unlikely(err))
487 return err;
488
489 nh = ip_hdr(skb);
490
491 /* Setting an IP addresses is typically only a side effect of
492 * matching on them in the current userspace implementation, so it
493 * makes sense to check if the value actually changed.
494 */
495 if (mask->ipv4_src) {
496 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
497
498 if (unlikely(new_addr != nh->saddr)) {
499 set_ip_addr(skb, nh, &nh->saddr, new_addr);
500 flow_key->ipv4.addr.src = new_addr;
501 }
502 }
503 if (mask->ipv4_dst) {
504 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
505
506 if (unlikely(new_addr != nh->daddr)) {
507 set_ip_addr(skb, nh, &nh->daddr, new_addr);
508 flow_key->ipv4.addr.dst = new_addr;
509 }
510 }
511 if (mask->ipv4_tos) {
512 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
513 flow_key->ip.tos = nh->tos;
514 }
515 if (mask->ipv4_ttl) {
516 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
517 flow_key->ip.ttl = nh->ttl;
518 }
519
520 return 0;
521}
522
523static bool is_ipv6_mask_nonzero(const __be32 addr[4])
524{
525 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
526}
527
528static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
529 const struct ovs_key_ipv6 *key,
530 const struct ovs_key_ipv6 *mask)
531{
532 struct ipv6hdr *nh;
533 int err;
534
535 err = skb_ensure_writable(skb, skb_network_offset(skb) +
536 sizeof(struct ipv6hdr));
537 if (unlikely(err))
538 return err;
539
540 nh = ipv6_hdr(skb);
541
542 /* Setting an IP addresses is typically only a side effect of
543 * matching on them in the current userspace implementation, so it
544 * makes sense to check if the value actually changed.
545 */
546 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
547 __be32 *saddr = (__be32 *)&nh->saddr;
548 __be32 masked[4];
549
550 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
551
552 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
553 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
554 true);
555 memcpy(&flow_key->ipv6.addr.src, masked,
556 sizeof(flow_key->ipv6.addr.src));
557 }
558 }
559 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
560 unsigned int offset = 0;
561 int flags = IP6_FH_F_SKIP_RH;
562 bool recalc_csum = true;
563 __be32 *daddr = (__be32 *)&nh->daddr;
564 __be32 masked[4];
565
566 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
567
568 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
569 if (ipv6_ext_hdr(nh->nexthdr))
570 recalc_csum = (ipv6_find_hdr(skb, &offset,
571 NEXTHDR_ROUTING,
572 NULL, &flags)
573 != NEXTHDR_ROUTING);
574
575 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
576 recalc_csum);
577 memcpy(&flow_key->ipv6.addr.dst, masked,
578 sizeof(flow_key->ipv6.addr.dst));
579 }
580 }
581 if (mask->ipv6_tclass) {
582 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
583 flow_key->ip.tos = ipv6_get_dsfield(nh);
584 }
585 if (mask->ipv6_label) {
586 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
587 ntohl(mask->ipv6_label));
588 flow_key->ipv6.label =
589 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
590 }
591 if (mask->ipv6_hlimit) {
592 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
593 flow_key->ip.ttl = nh->hop_limit;
594 }
595 return 0;
596}
597
598static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
599 const struct nlattr *a)
600{
601 struct nshhdr *nh;
602 size_t length;
603 int err;
604 u8 flags;
605 u8 ttl;
606 int i;
607
608 struct ovs_key_nsh key;
609 struct ovs_key_nsh mask;
610
611 err = nsh_key_from_nlattr(a, &key, &mask);
612 if (err)
613 return err;
614
615 /* Make sure the NSH base header is there */
616 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
617 return -ENOMEM;
618
619 nh = nsh_hdr(skb);
620 length = nsh_hdr_len(nh);
621
622 /* Make sure the whole NSH header is there */
623 err = skb_ensure_writable(skb, skb_network_offset(skb) +
624 length);
625 if (unlikely(err))
626 return err;
627
628 nh = nsh_hdr(skb);
629 skb_postpull_rcsum(skb, nh, length);
630 flags = nsh_get_flags(nh);
631 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
632 flow_key->nsh.base.flags = flags;
633 ttl = nsh_get_ttl(nh);
634 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
635 flow_key->nsh.base.ttl = ttl;
636 nsh_set_flags_and_ttl(nh, flags, ttl);
637 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
638 mask.base.path_hdr);
639 flow_key->nsh.base.path_hdr = nh->path_hdr;
640 switch (nh->mdtype) {
641 case NSH_M_TYPE1:
642 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
643 nh->md1.context[i] =
644 OVS_MASKED(nh->md1.context[i], key.context[i],
645 mask.context[i]);
646 }
647 memcpy(flow_key->nsh.context, nh->md1.context,
648 sizeof(nh->md1.context));
649 break;
650 case NSH_M_TYPE2:
651 memset(flow_key->nsh.context, 0,
652 sizeof(flow_key->nsh.context));
653 break;
654 default:
655 return -EINVAL;
656 }
657 skb_postpush_rcsum(skb, nh, length);
658 return 0;
659}
660
661/* Must follow skb_ensure_writable() since that can move the skb data. */
662static void set_tp_port(struct sk_buff *skb, __be16 *port,
663 __be16 new_port, __sum16 *check)
664{
665 ovs_ct_clear(skb, NULL);
666 inet_proto_csum_replace2(check, skb, *port, new_port, false);
667 *port = new_port;
668}
669
670static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
671 const struct ovs_key_udp *key,
672 const struct ovs_key_udp *mask)
673{
674 struct udphdr *uh;
675 __be16 src, dst;
676 int err;
677
678 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
679 sizeof(struct udphdr));
680 if (unlikely(err))
681 return err;
682
683 uh = udp_hdr(skb);
684 /* Either of the masks is non-zero, so do not bother checking them. */
685 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
686 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
687
688 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
689 if (likely(src != uh->source)) {
690 set_tp_port(skb, &uh->source, src, &uh->check);
691 flow_key->tp.src = src;
692 }
693 if (likely(dst != uh->dest)) {
694 set_tp_port(skb, &uh->dest, dst, &uh->check);
695 flow_key->tp.dst = dst;
696 }
697
698 if (unlikely(!uh->check))
699 uh->check = CSUM_MANGLED_0;
700 } else {
701 uh->source = src;
702 uh->dest = dst;
703 flow_key->tp.src = src;
704 flow_key->tp.dst = dst;
705 ovs_ct_clear(skb, NULL);
706 }
707
708 skb_clear_hash(skb);
709
710 return 0;
711}
712
713static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
714 const struct ovs_key_tcp *key,
715 const struct ovs_key_tcp *mask)
716{
717 struct tcphdr *th;
718 __be16 src, dst;
719 int err;
720
721 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
722 sizeof(struct tcphdr));
723 if (unlikely(err))
724 return err;
725
726 th = tcp_hdr(skb);
727 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
728 if (likely(src != th->source)) {
729 set_tp_port(skb, &th->source, src, &th->check);
730 flow_key->tp.src = src;
731 }
732 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
733 if (likely(dst != th->dest)) {
734 set_tp_port(skb, &th->dest, dst, &th->check);
735 flow_key->tp.dst = dst;
736 }
737 skb_clear_hash(skb);
738
739 return 0;
740}
741
742static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
743 const struct ovs_key_sctp *key,
744 const struct ovs_key_sctp *mask)
745{
746 unsigned int sctphoff = skb_transport_offset(skb);
747 struct sctphdr *sh;
748 __le32 old_correct_csum, new_csum, old_csum;
749 int err;
750
751 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
752 if (unlikely(err))
753 return err;
754
755 sh = sctp_hdr(skb);
756 old_csum = sh->checksum;
757 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
758
759 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
760 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
761
762 new_csum = sctp_compute_cksum(skb, sctphoff);
763
764 /* Carry any checksum errors through. */
765 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
766
767 skb_clear_hash(skb);
768 ovs_ct_clear(skb, NULL);
769
770 flow_key->tp.src = sh->source;
771 flow_key->tp.dst = sh->dest;
772
773 return 0;
774}
775
776static int ovs_vport_output(struct net *net, struct sock *sk,
777 struct sk_buff *skb)
778{
779 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
780 struct vport *vport = data->vport;
781
782 if (skb_cow_head(skb, data->l2_len) < 0) {
783 kfree_skb(skb);
784 return -ENOMEM;
785 }
786
787 __skb_dst_copy(skb, data->dst);
788 *OVS_CB(skb) = data->cb;
789 skb->inner_protocol = data->inner_protocol;
790 if (data->vlan_tci & VLAN_CFI_MASK)
791 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
792 else
793 __vlan_hwaccel_clear_tag(skb);
794
795 /* Reconstruct the MAC header. */
796 skb_push(skb, data->l2_len);
797 memcpy(skb->data, &data->l2_data, data->l2_len);
798 skb_postpush_rcsum(skb, skb->data, data->l2_len);
799 skb_reset_mac_header(skb);
800
801 if (eth_p_mpls(skb->protocol)) {
802 skb->inner_network_header = skb->network_header;
803 skb_set_network_header(skb, data->network_offset);
804 skb_reset_mac_len(skb);
805 }
806
807 ovs_vport_send(vport, skb, data->mac_proto);
808 return 0;
809}
810
811static unsigned int
812ovs_dst_get_mtu(const struct dst_entry *dst)
813{
814 return dst->dev->mtu;
815}
816
817static struct dst_ops ovs_dst_ops = {
818 .family = AF_UNSPEC,
819 .mtu = ovs_dst_get_mtu,
820};
821
822/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
823 * ovs_vport_output(), which is called once per fragmented packet.
824 */
825static void prepare_frag(struct vport *vport, struct sk_buff *skb,
826 u16 orig_network_offset, u8 mac_proto)
827{
828 unsigned int hlen = skb_network_offset(skb);
829 struct ovs_frag_data *data;
830
831 data = this_cpu_ptr(&ovs_frag_data_storage);
832 data->dst = skb->_skb_refdst;
833 data->vport = vport;
834 data->cb = *OVS_CB(skb);
835 data->inner_protocol = skb->inner_protocol;
836 data->network_offset = orig_network_offset;
837 if (skb_vlan_tag_present(skb))
838 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
839 else
840 data->vlan_tci = 0;
841 data->vlan_proto = skb->vlan_proto;
842 data->mac_proto = mac_proto;
843 data->l2_len = hlen;
844 memcpy(&data->l2_data, skb->data, hlen);
845
846 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
847 skb_pull(skb, hlen);
848}
849
850static void ovs_fragment(struct net *net, struct vport *vport,
851 struct sk_buff *skb, u16 mru,
852 struct sw_flow_key *key)
853{
854 u16 orig_network_offset = 0;
855
856 if (eth_p_mpls(skb->protocol)) {
857 orig_network_offset = skb_network_offset(skb);
858 skb->network_header = skb->inner_network_header;
859 }
860
861 if (skb_network_offset(skb) > MAX_L2_LEN) {
862 OVS_NLERR(1, "L2 header too long to fragment");
863 goto err;
864 }
865
866 if (key->eth.type == htons(ETH_P_IP)) {
867 struct rtable ovs_rt = { 0 };
868 unsigned long orig_dst;
869
870 prepare_frag(vport, skb, orig_network_offset,
871 ovs_key_mac_proto(key));
872 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
873 DST_OBSOLETE_NONE, DST_NOCOUNT);
874 ovs_rt.dst.dev = vport->dev;
875
876 orig_dst = skb->_skb_refdst;
877 skb_dst_set_noref(skb, &ovs_rt.dst);
878 IPCB(skb)->frag_max_size = mru;
879
880 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
881 refdst_drop(orig_dst);
882 } else if (key->eth.type == htons(ETH_P_IPV6)) {
883 unsigned long orig_dst;
884 struct rt6_info ovs_rt;
885
886 prepare_frag(vport, skb, orig_network_offset,
887 ovs_key_mac_proto(key));
888 memset(&ovs_rt, 0, sizeof(ovs_rt));
889 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
890 DST_OBSOLETE_NONE, DST_NOCOUNT);
891 ovs_rt.dst.dev = vport->dev;
892
893 orig_dst = skb->_skb_refdst;
894 skb_dst_set_noref(skb, &ovs_rt.dst);
895 IP6CB(skb)->frag_max_size = mru;
896
897 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
898 refdst_drop(orig_dst);
899 } else {
900 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
901 ovs_vport_name(vport), ntohs(key->eth.type), mru,
902 vport->dev->mtu);
903 goto err;
904 }
905
906 return;
907err:
908 kfree_skb(skb);
909}
910
911static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
912 struct sw_flow_key *key)
913{
914 struct vport *vport = ovs_vport_rcu(dp, out_port);
915
916 if (likely(vport)) {
917 u16 mru = OVS_CB(skb)->mru;
918 u32 cutlen = OVS_CB(skb)->cutlen;
919
920 if (unlikely(cutlen > 0)) {
921 if (skb->len - cutlen > ovs_mac_header_len(key))
922 pskb_trim(skb, skb->len - cutlen);
923 else
924 pskb_trim(skb, ovs_mac_header_len(key));
925 }
926
927 if (likely(!mru ||
928 (skb->len <= mru + vport->dev->hard_header_len))) {
929 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
930 } else if (mru <= vport->dev->mtu) {
931 struct net *net = read_pnet(&dp->net);
932
933 ovs_fragment(net, vport, skb, mru, key);
934 } else {
935 kfree_skb(skb);
936 }
937 } else {
938 kfree_skb(skb);
939 }
940}
941
942static int output_userspace(struct datapath *dp, struct sk_buff *skb,
943 struct sw_flow_key *key, const struct nlattr *attr,
944 const struct nlattr *actions, int actions_len,
945 uint32_t cutlen)
946{
947 struct dp_upcall_info upcall;
948 const struct nlattr *a;
949 int rem;
950
951 memset(&upcall, 0, sizeof(upcall));
952 upcall.cmd = OVS_PACKET_CMD_ACTION;
953 upcall.mru = OVS_CB(skb)->mru;
954
955 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
956 a = nla_next(a, &rem)) {
957 switch (nla_type(a)) {
958 case OVS_USERSPACE_ATTR_USERDATA:
959 upcall.userdata = a;
960 break;
961
962 case OVS_USERSPACE_ATTR_PID:
963 if (dp->user_features &
964 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
965 upcall.portid =
966 ovs_dp_get_upcall_portid(dp,
967 smp_processor_id());
968 else
969 upcall.portid = nla_get_u32(a);
970 break;
971
972 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
973 /* Get out tunnel info. */
974 struct vport *vport;
975
976 vport = ovs_vport_rcu(dp, nla_get_u32(a));
977 if (vport) {
978 int err;
979
980 err = dev_fill_metadata_dst(vport->dev, skb);
981 if (!err)
982 upcall.egress_tun_info = skb_tunnel_info(skb);
983 }
984
985 break;
986 }
987
988 case OVS_USERSPACE_ATTR_ACTIONS: {
989 /* Include actions. */
990 upcall.actions = actions;
991 upcall.actions_len = actions_len;
992 break;
993 }
994
995 } /* End of switch. */
996 }
997
998 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
999}
1000
1001static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1002 struct sw_flow_key *key,
1003 const struct nlattr *attr)
1004{
1005 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1006 struct nlattr *actions = nla_data(attr);
1007
1008 if (nla_len(actions))
1009 return clone_execute(dp, skb, key, 0, nla_data(actions),
1010 nla_len(actions), true, false);
1011
1012 consume_skb(skb);
1013 return 0;
1014}
1015
1016/* When 'last' is true, sample() should always consume the 'skb'.
1017 * Otherwise, sample() should keep 'skb' intact regardless what
1018 * actions are executed within sample().
1019 */
1020static int sample(struct datapath *dp, struct sk_buff *skb,
1021 struct sw_flow_key *key, const struct nlattr *attr,
1022 bool last)
1023{
1024 struct nlattr *actions;
1025 struct nlattr *sample_arg;
1026 int rem = nla_len(attr);
1027 const struct sample_arg *arg;
1028 bool clone_flow_key;
1029
1030 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1031 sample_arg = nla_data(attr);
1032 arg = nla_data(sample_arg);
1033 actions = nla_next(sample_arg, &rem);
1034
1035 if ((arg->probability != U32_MAX) &&
1036 (!arg->probability || get_random_u32() > arg->probability)) {
1037 if (last)
1038 consume_skb(skb);
1039 return 0;
1040 }
1041
1042 clone_flow_key = !arg->exec;
1043 return clone_execute(dp, skb, key, 0, actions, rem, last,
1044 clone_flow_key);
1045}
1046
1047/* When 'last' is true, clone() should always consume the 'skb'.
1048 * Otherwise, clone() should keep 'skb' intact regardless what
1049 * actions are executed within clone().
1050 */
1051static int clone(struct datapath *dp, struct sk_buff *skb,
1052 struct sw_flow_key *key, const struct nlattr *attr,
1053 bool last)
1054{
1055 struct nlattr *actions;
1056 struct nlattr *clone_arg;
1057 int rem = nla_len(attr);
1058 bool dont_clone_flow_key;
1059
1060 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1061 clone_arg = nla_data(attr);
1062 dont_clone_flow_key = nla_get_u32(clone_arg);
1063 actions = nla_next(clone_arg, &rem);
1064
1065 return clone_execute(dp, skb, key, 0, actions, rem, last,
1066 !dont_clone_flow_key);
1067}
1068
1069static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1070 const struct nlattr *attr)
1071{
1072 struct ovs_action_hash *hash_act = nla_data(attr);
1073 u32 hash = 0;
1074
1075 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1076 hash = skb_get_hash(skb);
1077 hash = jhash_1word(hash, hash_act->hash_basis);
1078 if (!hash)
1079 hash = 0x1;
1080
1081 key->ovs_flow_hash = hash;
1082}
1083
1084static int execute_set_action(struct sk_buff *skb,
1085 struct sw_flow_key *flow_key,
1086 const struct nlattr *a)
1087{
1088 /* Only tunnel set execution is supported without a mask. */
1089 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1090 struct ovs_tunnel_info *tun = nla_data(a);
1091
1092 skb_dst_drop(skb);
1093 dst_hold((struct dst_entry *)tun->tun_dst);
1094 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1095 return 0;
1096 }
1097
1098 return -EINVAL;
1099}
1100
1101/* Mask is at the midpoint of the data. */
1102#define get_mask(a, type) ((const type)nla_data(a) + 1)
1103
1104static int execute_masked_set_action(struct sk_buff *skb,
1105 struct sw_flow_key *flow_key,
1106 const struct nlattr *a)
1107{
1108 int err = 0;
1109
1110 switch (nla_type(a)) {
1111 case OVS_KEY_ATTR_PRIORITY:
1112 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1113 *get_mask(a, u32 *));
1114 flow_key->phy.priority = skb->priority;
1115 break;
1116
1117 case OVS_KEY_ATTR_SKB_MARK:
1118 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1119 flow_key->phy.skb_mark = skb->mark;
1120 break;
1121
1122 case OVS_KEY_ATTR_TUNNEL_INFO:
1123 /* Masked data not supported for tunnel. */
1124 err = -EINVAL;
1125 break;
1126
1127 case OVS_KEY_ATTR_ETHERNET:
1128 err = set_eth_addr(skb, flow_key, nla_data(a),
1129 get_mask(a, struct ovs_key_ethernet *));
1130 break;
1131
1132 case OVS_KEY_ATTR_NSH:
1133 err = set_nsh(skb, flow_key, a);
1134 break;
1135
1136 case OVS_KEY_ATTR_IPV4:
1137 err = set_ipv4(skb, flow_key, nla_data(a),
1138 get_mask(a, struct ovs_key_ipv4 *));
1139 break;
1140
1141 case OVS_KEY_ATTR_IPV6:
1142 err = set_ipv6(skb, flow_key, nla_data(a),
1143 get_mask(a, struct ovs_key_ipv6 *));
1144 break;
1145
1146 case OVS_KEY_ATTR_TCP:
1147 err = set_tcp(skb, flow_key, nla_data(a),
1148 get_mask(a, struct ovs_key_tcp *));
1149 break;
1150
1151 case OVS_KEY_ATTR_UDP:
1152 err = set_udp(skb, flow_key, nla_data(a),
1153 get_mask(a, struct ovs_key_udp *));
1154 break;
1155
1156 case OVS_KEY_ATTR_SCTP:
1157 err = set_sctp(skb, flow_key, nla_data(a),
1158 get_mask(a, struct ovs_key_sctp *));
1159 break;
1160
1161 case OVS_KEY_ATTR_MPLS:
1162 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1163 __be32 *));
1164 break;
1165
1166 case OVS_KEY_ATTR_CT_STATE:
1167 case OVS_KEY_ATTR_CT_ZONE:
1168 case OVS_KEY_ATTR_CT_MARK:
1169 case OVS_KEY_ATTR_CT_LABELS:
1170 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1171 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1172 err = -EINVAL;
1173 break;
1174 }
1175
1176 return err;
1177}
1178
1179static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1180 struct sw_flow_key *key,
1181 const struct nlattr *a, bool last)
1182{
1183 u32 recirc_id;
1184
1185 if (!is_flow_key_valid(key)) {
1186 int err;
1187
1188 err = ovs_flow_key_update(skb, key);
1189 if (err)
1190 return err;
1191 }
1192 BUG_ON(!is_flow_key_valid(key));
1193
1194 recirc_id = nla_get_u32(a);
1195 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1196}
1197
1198static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1199 struct sw_flow_key *key,
1200 const struct nlattr *attr, bool last)
1201{
1202 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1203 const struct nlattr *actions, *cpl_arg;
1204 int len, max_len, rem = nla_len(attr);
1205 const struct check_pkt_len_arg *arg;
1206 bool clone_flow_key;
1207
1208 /* The first netlink attribute in 'attr' is always
1209 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1210 */
1211 cpl_arg = nla_data(attr);
1212 arg = nla_data(cpl_arg);
1213
1214 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1215 max_len = arg->pkt_len;
1216
1217 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1218 len <= max_len) {
1219 /* Second netlink attribute in 'attr' is always
1220 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1221 */
1222 actions = nla_next(cpl_arg, &rem);
1223 clone_flow_key = !arg->exec_for_lesser_equal;
1224 } else {
1225 /* Third netlink attribute in 'attr' is always
1226 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1227 */
1228 actions = nla_next(cpl_arg, &rem);
1229 actions = nla_next(actions, &rem);
1230 clone_flow_key = !arg->exec_for_greater;
1231 }
1232
1233 return clone_execute(dp, skb, key, 0, nla_data(actions),
1234 nla_len(actions), last, clone_flow_key);
1235}
1236
1237static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1238{
1239 int err;
1240
1241 if (skb->protocol == htons(ETH_P_IPV6)) {
1242 struct ipv6hdr *nh;
1243
1244 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1245 sizeof(*nh));
1246 if (unlikely(err))
1247 return err;
1248
1249 nh = ipv6_hdr(skb);
1250
1251 if (nh->hop_limit <= 1)
1252 return -EHOSTUNREACH;
1253
1254 key->ip.ttl = --nh->hop_limit;
1255 } else if (skb->protocol == htons(ETH_P_IP)) {
1256 struct iphdr *nh;
1257 u8 old_ttl;
1258
1259 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1260 sizeof(*nh));
1261 if (unlikely(err))
1262 return err;
1263
1264 nh = ip_hdr(skb);
1265 if (nh->ttl <= 1)
1266 return -EHOSTUNREACH;
1267
1268 old_ttl = nh->ttl--;
1269 csum_replace2(&nh->check, htons(old_ttl << 8),
1270 htons(nh->ttl << 8));
1271 key->ip.ttl = nh->ttl;
1272 }
1273 return 0;
1274}
1275
1276/* Execute a list of actions against 'skb'. */
1277static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1278 struct sw_flow_key *key,
1279 const struct nlattr *attr, int len)
1280{
1281 const struct nlattr *a;
1282 int rem;
1283
1284 for (a = attr, rem = len; rem > 0;
1285 a = nla_next(a, &rem)) {
1286 int err = 0;
1287
1288 if (trace_ovs_do_execute_action_enabled())
1289 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1290
1291 switch (nla_type(a)) {
1292 case OVS_ACTION_ATTR_OUTPUT: {
1293 int port = nla_get_u32(a);
1294 struct sk_buff *clone;
1295
1296 /* Every output action needs a separate clone
1297 * of 'skb', In case the output action is the
1298 * last action, cloning can be avoided.
1299 */
1300 if (nla_is_last(a, rem)) {
1301 do_output(dp, skb, port, key);
1302 /* 'skb' has been used for output.
1303 */
1304 return 0;
1305 }
1306
1307 clone = skb_clone(skb, GFP_ATOMIC);
1308 if (clone)
1309 do_output(dp, clone, port, key);
1310 OVS_CB(skb)->cutlen = 0;
1311 break;
1312 }
1313
1314 case OVS_ACTION_ATTR_TRUNC: {
1315 struct ovs_action_trunc *trunc = nla_data(a);
1316
1317 if (skb->len > trunc->max_len)
1318 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1319 break;
1320 }
1321
1322 case OVS_ACTION_ATTR_USERSPACE:
1323 output_userspace(dp, skb, key, a, attr,
1324 len, OVS_CB(skb)->cutlen);
1325 OVS_CB(skb)->cutlen = 0;
1326 break;
1327
1328 case OVS_ACTION_ATTR_HASH:
1329 execute_hash(skb, key, a);
1330 break;
1331
1332 case OVS_ACTION_ATTR_PUSH_MPLS: {
1333 struct ovs_action_push_mpls *mpls = nla_data(a);
1334
1335 err = push_mpls(skb, key, mpls->mpls_lse,
1336 mpls->mpls_ethertype, skb->mac_len);
1337 break;
1338 }
1339 case OVS_ACTION_ATTR_ADD_MPLS: {
1340 struct ovs_action_add_mpls *mpls = nla_data(a);
1341 __u16 mac_len = 0;
1342
1343 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1344 mac_len = skb->mac_len;
1345
1346 err = push_mpls(skb, key, mpls->mpls_lse,
1347 mpls->mpls_ethertype, mac_len);
1348 break;
1349 }
1350 case OVS_ACTION_ATTR_POP_MPLS:
1351 err = pop_mpls(skb, key, nla_get_be16(a));
1352 break;
1353
1354 case OVS_ACTION_ATTR_PUSH_VLAN:
1355 err = push_vlan(skb, key, nla_data(a));
1356 break;
1357
1358 case OVS_ACTION_ATTR_POP_VLAN:
1359 err = pop_vlan(skb, key);
1360 break;
1361
1362 case OVS_ACTION_ATTR_RECIRC: {
1363 bool last = nla_is_last(a, rem);
1364
1365 err = execute_recirc(dp, skb, key, a, last);
1366 if (last) {
1367 /* If this is the last action, the skb has
1368 * been consumed or freed.
1369 * Return immediately.
1370 */
1371 return err;
1372 }
1373 break;
1374 }
1375
1376 case OVS_ACTION_ATTR_SET:
1377 err = execute_set_action(skb, key, nla_data(a));
1378 break;
1379
1380 case OVS_ACTION_ATTR_SET_MASKED:
1381 case OVS_ACTION_ATTR_SET_TO_MASKED:
1382 err = execute_masked_set_action(skb, key, nla_data(a));
1383 break;
1384
1385 case OVS_ACTION_ATTR_SAMPLE: {
1386 bool last = nla_is_last(a, rem);
1387
1388 err = sample(dp, skb, key, a, last);
1389 if (last)
1390 return err;
1391
1392 break;
1393 }
1394
1395 case OVS_ACTION_ATTR_CT:
1396 if (!is_flow_key_valid(key)) {
1397 err = ovs_flow_key_update(skb, key);
1398 if (err)
1399 return err;
1400 }
1401
1402 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1403 nla_data(a));
1404
1405 /* Hide stolen IP fragments from user space. */
1406 if (err)
1407 return err == -EINPROGRESS ? 0 : err;
1408 break;
1409
1410 case OVS_ACTION_ATTR_CT_CLEAR:
1411 err = ovs_ct_clear(skb, key);
1412 break;
1413
1414 case OVS_ACTION_ATTR_PUSH_ETH:
1415 err = push_eth(skb, key, nla_data(a));
1416 break;
1417
1418 case OVS_ACTION_ATTR_POP_ETH:
1419 err = pop_eth(skb, key);
1420 break;
1421
1422 case OVS_ACTION_ATTR_PUSH_NSH: {
1423 u8 buffer[NSH_HDR_MAX_LEN];
1424 struct nshhdr *nh = (struct nshhdr *)buffer;
1425
1426 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1427 NSH_HDR_MAX_LEN);
1428 if (unlikely(err))
1429 break;
1430 err = push_nsh(skb, key, nh);
1431 break;
1432 }
1433
1434 case OVS_ACTION_ATTR_POP_NSH:
1435 err = pop_nsh(skb, key);
1436 break;
1437
1438 case OVS_ACTION_ATTR_METER:
1439 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1440 consume_skb(skb);
1441 return 0;
1442 }
1443 break;
1444
1445 case OVS_ACTION_ATTR_CLONE: {
1446 bool last = nla_is_last(a, rem);
1447
1448 err = clone(dp, skb, key, a, last);
1449 if (last)
1450 return err;
1451
1452 break;
1453 }
1454
1455 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1456 bool last = nla_is_last(a, rem);
1457
1458 err = execute_check_pkt_len(dp, skb, key, a, last);
1459 if (last)
1460 return err;
1461
1462 break;
1463 }
1464
1465 case OVS_ACTION_ATTR_DEC_TTL:
1466 err = execute_dec_ttl(skb, key);
1467 if (err == -EHOSTUNREACH)
1468 return dec_ttl_exception_handler(dp, skb,
1469 key, a);
1470 break;
1471 }
1472
1473 if (unlikely(err)) {
1474 kfree_skb(skb);
1475 return err;
1476 }
1477 }
1478
1479 consume_skb(skb);
1480 return 0;
1481}
1482
1483/* Execute the actions on the clone of the packet. The effect of the
1484 * execution does not affect the original 'skb' nor the original 'key'.
1485 *
1486 * The execution may be deferred in case the actions can not be executed
1487 * immediately.
1488 */
1489static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1490 struct sw_flow_key *key, u32 recirc_id,
1491 const struct nlattr *actions, int len,
1492 bool last, bool clone_flow_key)
1493{
1494 struct deferred_action *da;
1495 struct sw_flow_key *clone;
1496
1497 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1498 if (!skb) {
1499 /* Out of memory, skip this action.
1500 */
1501 return 0;
1502 }
1503
1504 /* When clone_flow_key is false, the 'key' will not be change
1505 * by the actions, then the 'key' can be used directly.
1506 * Otherwise, try to clone key from the next recursion level of
1507 * 'flow_keys'. If clone is successful, execute the actions
1508 * without deferring.
1509 */
1510 clone = clone_flow_key ? clone_key(key) : key;
1511 if (clone) {
1512 int err = 0;
1513
1514 if (actions) { /* Sample action */
1515 if (clone_flow_key)
1516 __this_cpu_inc(exec_actions_level);
1517
1518 err = do_execute_actions(dp, skb, clone,
1519 actions, len);
1520
1521 if (clone_flow_key)
1522 __this_cpu_dec(exec_actions_level);
1523 } else { /* Recirc action */
1524 clone->recirc_id = recirc_id;
1525 ovs_dp_process_packet(skb, clone);
1526 }
1527 return err;
1528 }
1529
1530 /* Out of 'flow_keys' space. Defer actions */
1531 da = add_deferred_actions(skb, key, actions, len);
1532 if (da) {
1533 if (!actions) { /* Recirc action */
1534 key = &da->pkt_key;
1535 key->recirc_id = recirc_id;
1536 }
1537 } else {
1538 /* Out of per CPU action FIFO space. Drop the 'skb' and
1539 * log an error.
1540 */
1541 kfree_skb(skb);
1542
1543 if (net_ratelimit()) {
1544 if (actions) { /* Sample action */
1545 pr_warn("%s: deferred action limit reached, drop sample action\n",
1546 ovs_dp_name(dp));
1547 } else { /* Recirc action */
1548 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1549 ovs_dp_name(dp), recirc_id);
1550 }
1551 }
1552 }
1553 return 0;
1554}
1555
1556static void process_deferred_actions(struct datapath *dp)
1557{
1558 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1559
1560 /* Do not touch the FIFO in case there is no deferred actions. */
1561 if (action_fifo_is_empty(fifo))
1562 return;
1563
1564 /* Finishing executing all deferred actions. */
1565 do {
1566 struct deferred_action *da = action_fifo_get(fifo);
1567 struct sk_buff *skb = da->skb;
1568 struct sw_flow_key *key = &da->pkt_key;
1569 const struct nlattr *actions = da->actions;
1570 int actions_len = da->actions_len;
1571
1572 if (actions)
1573 do_execute_actions(dp, skb, key, actions, actions_len);
1574 else
1575 ovs_dp_process_packet(skb, key);
1576 } while (!action_fifo_is_empty(fifo));
1577
1578 /* Reset FIFO for the next packet. */
1579 action_fifo_init(fifo);
1580}
1581
1582/* Execute a list of actions against 'skb'. */
1583int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1584 const struct sw_flow_actions *acts,
1585 struct sw_flow_key *key)
1586{
1587 int err, level;
1588
1589 level = __this_cpu_inc_return(exec_actions_level);
1590 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1591 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1592 ovs_dp_name(dp));
1593 kfree_skb(skb);
1594 err = -ENETDOWN;
1595 goto out;
1596 }
1597
1598 OVS_CB(skb)->acts_origlen = acts->orig_len;
1599 err = do_execute_actions(dp, skb, key,
1600 acts->actions, acts->actions_len);
1601
1602 if (level == 1)
1603 process_deferred_actions(dp);
1604
1605out:
1606 __this_cpu_dec(exec_actions_level);
1607 return err;
1608}
1609
1610int action_fifos_init(void)
1611{
1612 action_fifos = alloc_percpu(struct action_fifo);
1613 if (!action_fifos)
1614 return -ENOMEM;
1615
1616 flow_keys = alloc_percpu(struct action_flow_keys);
1617 if (!flow_keys) {
1618 free_percpu(action_fifos);
1619 return -ENOMEM;
1620 }
1621
1622 return 0;
1623}
1624
1625void action_fifos_exit(void)
1626{
1627 free_percpu(action_fifos);
1628 free_percpu(flow_keys);
1629}