Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
  5 *
  6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/if_vlan.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/netlink.h>
 14#include <linux/netfilter.h>
 15#include <linux/netfilter/nf_tables.h>
 16#include <net/netfilter/nf_tables_core.h>
 17#include <net/netfilter/nf_tables.h>
 18#include <net/netfilter/nf_tables_offload.h>
 19/* For layer 4 checksum field offset. */
 20#include <linux/tcp.h>
 21#include <linux/udp.h>
 22#include <linux/icmpv6.h>
 23#include <linux/ip.h>
 24#include <linux/ipv6.h>
 25#include <net/sctp/checksum.h>
 26
 27static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
 28					 struct vlan_ethhdr *veth)
 29{
 30	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
 31		return false;
 32
 33	veth->h_vlan_proto = skb->vlan_proto;
 34	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 35	veth->h_vlan_encapsulated_proto = skb->protocol;
 36
 37	return true;
 38}
 39
 40/* add vlan header into the user buffer for if tag was removed by offloads */
 41static bool
 42nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
 43{
 44	int mac_off = skb_mac_header(skb) - skb->data;
 45	u8 *vlanh, *dst_u8 = (u8 *) d;
 46	struct vlan_ethhdr veth;
 47	u8 vlan_hlen = 0;
 48
 49	if ((skb->protocol == htons(ETH_P_8021AD) ||
 50	     skb->protocol == htons(ETH_P_8021Q)) &&
 51	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
 52		vlan_hlen += VLAN_HLEN;
 53
 54	vlanh = (u8 *) &veth;
 55	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
 56		u8 ethlen = len;
 57
 58		if (vlan_hlen &&
 59		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
 60			return false;
 61		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
 62			return false;
 63
 64		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
 65			ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
 66
 67		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
 68
 69		len -= ethlen;
 70		if (len == 0)
 71			return true;
 72
 73		dst_u8 += ethlen;
 74		offset = ETH_HLEN + vlan_hlen;
 75	} else {
 76		offset -= VLAN_HLEN + vlan_hlen;
 77	}
 78
 79	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
 80}
 81
 82void nft_payload_eval(const struct nft_expr *expr,
 83		      struct nft_regs *regs,
 84		      const struct nft_pktinfo *pkt)
 85{
 86	const struct nft_payload *priv = nft_expr_priv(expr);
 87	const struct sk_buff *skb = pkt->skb;
 88	u32 *dest = &regs->data[priv->dreg];
 89	int offset;
 90
 91	if (priv->len % NFT_REG32_SIZE)
 92		dest[priv->len / NFT_REG32_SIZE] = 0;
 93
 94	switch (priv->base) {
 95	case NFT_PAYLOAD_LL_HEADER:
 96		if (!skb_mac_header_was_set(skb))
 97			goto err;
 98
 99		if (skb_vlan_tag_present(skb)) {
100			if (!nft_payload_copy_vlan(dest, skb,
101						   priv->offset, priv->len))
102				goto err;
103			return;
104		}
105		offset = skb_mac_header(skb) - skb->data;
106		break;
107	case NFT_PAYLOAD_NETWORK_HEADER:
108		offset = skb_network_offset(skb);
109		break;
110	case NFT_PAYLOAD_TRANSPORT_HEADER:
111		if (!pkt->tprot_set)
112			goto err;
113		offset = nft_thoff(pkt);
114		break;
115	default:
116		BUG();
117	}
118	offset += priv->offset;
119
120	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
121		goto err;
122	return;
123err:
124	regs->verdict.code = NFT_BREAK;
125}
126
127static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
128	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
129	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
130	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
131	[NFTA_PAYLOAD_OFFSET]		= { .type = NLA_U32 },
132	[NFTA_PAYLOAD_LEN]		= { .type = NLA_U32 },
133	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
134	[NFTA_PAYLOAD_CSUM_OFFSET]	= { .type = NLA_U32 },
135	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
136};
137
138static int nft_payload_init(const struct nft_ctx *ctx,
139			    const struct nft_expr *expr,
140			    const struct nlattr * const tb[])
141{
142	struct nft_payload *priv = nft_expr_priv(expr);
143
144	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
145	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
146	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
147
148	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
149					&priv->dreg, NULL, NFT_DATA_VALUE,
150					priv->len);
151}
152
153static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
154{
155	const struct nft_payload *priv = nft_expr_priv(expr);
156
157	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
158	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
159	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
160	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
161		goto nla_put_failure;
162	return 0;
163
164nla_put_failure:
165	return -1;
166}
167
168static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
169				     u32 priv_len, u32 field_len)
170{
171	unsigned int remainder, delta, k;
172	struct nft_data mask = {};
173	__be32 remainder_mask;
174
175	if (priv_len == field_len) {
176		memset(&reg->mask, 0xff, priv_len);
177		return true;
178	} else if (priv_len > field_len) {
179		return false;
180	}
181
182	memset(&mask, 0xff, field_len);
183	remainder = priv_len % sizeof(u32);
184	if (remainder) {
185		k = priv_len / sizeof(u32);
186		delta = field_len - priv_len;
187		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
188		mask.data[k] = (__force u32)remainder_mask;
189	}
190
191	memcpy(&reg->mask, &mask, field_len);
192
193	return true;
194}
195
196static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
197				  struct nft_flow_rule *flow,
198				  const struct nft_payload *priv)
199{
200	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
201
202	switch (priv->offset) {
203	case offsetof(struct ethhdr, h_source):
204		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
205			return -EOPNOTSUPP;
206
207		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
208				  src, ETH_ALEN, reg);
209		break;
210	case offsetof(struct ethhdr, h_dest):
211		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
212			return -EOPNOTSUPP;
213
214		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
215				  dst, ETH_ALEN, reg);
216		break;
217	case offsetof(struct ethhdr, h_proto):
218		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
219			return -EOPNOTSUPP;
220
221		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
222				  n_proto, sizeof(__be16), reg);
223		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
224		break;
225	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
226		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
227			return -EOPNOTSUPP;
228
229		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
230					vlan_tci, sizeof(__be16), reg,
231					NFT_OFFLOAD_F_NETWORK2HOST);
232		break;
233	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
234		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
235			return -EOPNOTSUPP;
236
237		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
238				  vlan_tpid, sizeof(__be16), reg);
239		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
240		break;
241	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
242		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
243			return -EOPNOTSUPP;
244
245		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
246					vlan_tci, sizeof(__be16), reg,
247					NFT_OFFLOAD_F_NETWORK2HOST);
248		break;
249	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
250							sizeof(struct vlan_hdr):
251		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
252			return -EOPNOTSUPP;
253
254		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
255				  vlan_tpid, sizeof(__be16), reg);
256		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
257		break;
258	default:
259		return -EOPNOTSUPP;
260	}
261
262	return 0;
263}
264
265static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
266				  struct nft_flow_rule *flow,
267				  const struct nft_payload *priv)
268{
269	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
270
271	switch (priv->offset) {
272	case offsetof(struct iphdr, saddr):
273		if (!nft_payload_offload_mask(reg, priv->len,
274					      sizeof(struct in_addr)))
275			return -EOPNOTSUPP;
276
277		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
278				  sizeof(struct in_addr), reg);
279		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
280		break;
281	case offsetof(struct iphdr, daddr):
282		if (!nft_payload_offload_mask(reg, priv->len,
283					      sizeof(struct in_addr)))
284			return -EOPNOTSUPP;
285
286		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
287				  sizeof(struct in_addr), reg);
288		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
289		break;
290	case offsetof(struct iphdr, protocol):
291		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
292			return -EOPNOTSUPP;
293
294		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
295				  sizeof(__u8), reg);
296		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
297		break;
298	default:
299		return -EOPNOTSUPP;
300	}
301
302	return 0;
303}
304
305static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
306				  struct nft_flow_rule *flow,
307				  const struct nft_payload *priv)
308{
309	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
310
311	switch (priv->offset) {
312	case offsetof(struct ipv6hdr, saddr):
313		if (!nft_payload_offload_mask(reg, priv->len,
314					      sizeof(struct in6_addr)))
315			return -EOPNOTSUPP;
316
317		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
318				  sizeof(struct in6_addr), reg);
319		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
320		break;
321	case offsetof(struct ipv6hdr, daddr):
322		if (!nft_payload_offload_mask(reg, priv->len,
323					      sizeof(struct in6_addr)))
324			return -EOPNOTSUPP;
325
326		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
327				  sizeof(struct in6_addr), reg);
328		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
329		break;
330	case offsetof(struct ipv6hdr, nexthdr):
331		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
332			return -EOPNOTSUPP;
333
334		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
335				  sizeof(__u8), reg);
336		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
337		break;
338	default:
339		return -EOPNOTSUPP;
340	}
341
342	return 0;
343}
344
345static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
346				  struct nft_flow_rule *flow,
347				  const struct nft_payload *priv)
348{
349	int err;
350
351	switch (ctx->dep.l3num) {
352	case htons(ETH_P_IP):
353		err = nft_payload_offload_ip(ctx, flow, priv);
354		break;
355	case htons(ETH_P_IPV6):
356		err = nft_payload_offload_ip6(ctx, flow, priv);
357		break;
358	default:
359		return -EOPNOTSUPP;
360	}
361
362	return err;
363}
364
365static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
366				   struct nft_flow_rule *flow,
367				   const struct nft_payload *priv)
368{
369	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
370
371	switch (priv->offset) {
372	case offsetof(struct tcphdr, source):
373		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
374			return -EOPNOTSUPP;
375
376		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
377				  sizeof(__be16), reg);
378		break;
379	case offsetof(struct tcphdr, dest):
380		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
381			return -EOPNOTSUPP;
382
383		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
384				  sizeof(__be16), reg);
385		break;
386	default:
387		return -EOPNOTSUPP;
388	}
389
390	return 0;
391}
392
393static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
394				   struct nft_flow_rule *flow,
395				   const struct nft_payload *priv)
396{
397	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
398
399	switch (priv->offset) {
400	case offsetof(struct udphdr, source):
401		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
402			return -EOPNOTSUPP;
403
404		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
405				  sizeof(__be16), reg);
406		break;
407	case offsetof(struct udphdr, dest):
408		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
409			return -EOPNOTSUPP;
410
411		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
412				  sizeof(__be16), reg);
413		break;
414	default:
415		return -EOPNOTSUPP;
416	}
417
418	return 0;
419}
420
421static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
422				  struct nft_flow_rule *flow,
423				  const struct nft_payload *priv)
424{
425	int err;
426
427	switch (ctx->dep.protonum) {
428	case IPPROTO_TCP:
429		err = nft_payload_offload_tcp(ctx, flow, priv);
430		break;
431	case IPPROTO_UDP:
432		err = nft_payload_offload_udp(ctx, flow, priv);
433		break;
434	default:
435		return -EOPNOTSUPP;
436	}
437
438	return err;
439}
440
441static int nft_payload_offload(struct nft_offload_ctx *ctx,
442			       struct nft_flow_rule *flow,
443			       const struct nft_expr *expr)
444{
445	const struct nft_payload *priv = nft_expr_priv(expr);
446	int err;
447
448	switch (priv->base) {
449	case NFT_PAYLOAD_LL_HEADER:
450		err = nft_payload_offload_ll(ctx, flow, priv);
451		break;
452	case NFT_PAYLOAD_NETWORK_HEADER:
453		err = nft_payload_offload_nh(ctx, flow, priv);
454		break;
455	case NFT_PAYLOAD_TRANSPORT_HEADER:
456		err = nft_payload_offload_th(ctx, flow, priv);
457		break;
458	default:
459		err = -EOPNOTSUPP;
460		break;
461	}
462	return err;
463}
464
465static const struct nft_expr_ops nft_payload_ops = {
466	.type		= &nft_payload_type,
467	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
468	.eval		= nft_payload_eval,
469	.init		= nft_payload_init,
470	.dump		= nft_payload_dump,
471	.offload	= nft_payload_offload,
472};
473
474const struct nft_expr_ops nft_payload_fast_ops = {
475	.type		= &nft_payload_type,
476	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
477	.eval		= nft_payload_eval,
478	.init		= nft_payload_init,
479	.dump		= nft_payload_dump,
480	.offload	= nft_payload_offload,
481};
482
483static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
484{
485	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
486	if (*sum == 0)
487		*sum = CSUM_MANGLED_0;
488}
489
490static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
491{
492	struct udphdr *uh, _uh;
493
494	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
495	if (!uh)
496		return false;
497
498	return (__force bool)uh->check;
499}
500
501static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
502				     struct sk_buff *skb,
503				     unsigned int *l4csum_offset)
504{
505	switch (pkt->tprot) {
506	case IPPROTO_TCP:
507		*l4csum_offset = offsetof(struct tcphdr, check);
508		break;
509	case IPPROTO_UDP:
510		if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
511			return -1;
512		fallthrough;
513	case IPPROTO_UDPLITE:
514		*l4csum_offset = offsetof(struct udphdr, check);
515		break;
516	case IPPROTO_ICMPV6:
517		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
518		break;
519	default:
520		return -1;
521	}
522
523	*l4csum_offset += nft_thoff(pkt);
524	return 0;
525}
526
527static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
528{
529	struct sctphdr *sh;
530
531	if (skb_ensure_writable(skb, offset + sizeof(*sh)))
532		return -1;
533
534	sh = (struct sctphdr *)(skb->data + offset);
535	sh->checksum = sctp_compute_cksum(skb, offset);
536	skb->ip_summed = CHECKSUM_UNNECESSARY;
537	return 0;
538}
539
540static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
541				     struct sk_buff *skb,
542				     __wsum fsum, __wsum tsum)
543{
544	int l4csum_offset;
545	__sum16 sum;
546
547	/* If we cannot determine layer 4 checksum offset or this packet doesn't
548	 * require layer 4 checksum recalculation, skip this packet.
549	 */
550	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
551		return 0;
552
553	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
554		return -1;
555
556	/* Checksum mangling for an arbitrary amount of bytes, based on
557	 * inet_proto_csum_replace*() functions.
558	 */
559	if (skb->ip_summed != CHECKSUM_PARTIAL) {
560		nft_csum_replace(&sum, fsum, tsum);
561		if (skb->ip_summed == CHECKSUM_COMPLETE) {
562			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
563					      tsum);
564		}
565	} else {
566		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
567					  tsum));
568	}
569
570	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
571	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
572		return -1;
573
574	return 0;
575}
576
577static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
578				 __wsum fsum, __wsum tsum, int csum_offset)
579{
580	__sum16 sum;
581
582	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
583		return -1;
584
585	nft_csum_replace(&sum, fsum, tsum);
586	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
587	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
588		return -1;
589
590	return 0;
591}
592
593static void nft_payload_set_eval(const struct nft_expr *expr,
594				 struct nft_regs *regs,
595				 const struct nft_pktinfo *pkt)
596{
597	const struct nft_payload_set *priv = nft_expr_priv(expr);
598	struct sk_buff *skb = pkt->skb;
599	const u32 *src = &regs->data[priv->sreg];
600	int offset, csum_offset;
601	__wsum fsum, tsum;
602
603	switch (priv->base) {
604	case NFT_PAYLOAD_LL_HEADER:
605		if (!skb_mac_header_was_set(skb))
606			goto err;
607		offset = skb_mac_header(skb) - skb->data;
608		break;
609	case NFT_PAYLOAD_NETWORK_HEADER:
610		offset = skb_network_offset(skb);
611		break;
612	case NFT_PAYLOAD_TRANSPORT_HEADER:
613		if (!pkt->tprot_set)
614			goto err;
615		offset = nft_thoff(pkt);
616		break;
617	default:
618		BUG();
619	}
620
621	csum_offset = offset + priv->csum_offset;
622	offset += priv->offset;
623
624	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
625	    (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
626	     skb->ip_summed != CHECKSUM_PARTIAL)) {
627		fsum = skb_checksum(skb, offset, priv->len, 0);
628		tsum = csum_partial(src, priv->len, 0);
629
630		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
631		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
632			goto err;
633
634		if (priv->csum_flags &&
635		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
636			goto err;
637	}
638
639	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
640	    skb_store_bits(skb, offset, src, priv->len) < 0)
641		goto err;
642
643	if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
644	    pkt->tprot == IPPROTO_SCTP &&
645	    skb->ip_summed != CHECKSUM_PARTIAL) {
646		if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
647			goto err;
648	}
649
650	return;
651err:
652	regs->verdict.code = NFT_BREAK;
653}
654
655static int nft_payload_set_init(const struct nft_ctx *ctx,
656				const struct nft_expr *expr,
657				const struct nlattr * const tb[])
658{
659	struct nft_payload_set *priv = nft_expr_priv(expr);
660
661	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
662	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
663	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
664
665	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
666		priv->csum_type =
667			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
668	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
669		priv->csum_offset =
670			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
671	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
672		u32 flags;
673
674		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
675		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
676			return -EINVAL;
677
678		priv->csum_flags = flags;
679	}
680
681	switch (priv->csum_type) {
682	case NFT_PAYLOAD_CSUM_NONE:
683	case NFT_PAYLOAD_CSUM_INET:
684		break;
685	case NFT_PAYLOAD_CSUM_SCTP:
686		if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
687			return -EINVAL;
688
689		if (priv->csum_offset != offsetof(struct sctphdr, checksum))
690			return -EINVAL;
691		break;
692	default:
693		return -EOPNOTSUPP;
694	}
695
696	return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
697				       priv->len);
698}
699
700static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
701{
702	const struct nft_payload_set *priv = nft_expr_priv(expr);
703
704	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
705	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
706	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
707	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
708	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
709	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
710			 htonl(priv->csum_offset)) ||
711	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
712		goto nla_put_failure;
713	return 0;
714
715nla_put_failure:
716	return -1;
717}
718
719static const struct nft_expr_ops nft_payload_set_ops = {
720	.type		= &nft_payload_type,
721	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
722	.eval		= nft_payload_set_eval,
723	.init		= nft_payload_set_init,
724	.dump		= nft_payload_set_dump,
725};
726
727static const struct nft_expr_ops *
728nft_payload_select_ops(const struct nft_ctx *ctx,
729		       const struct nlattr * const tb[])
730{
731	enum nft_payload_bases base;
732	unsigned int offset, len;
733
734	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
735	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
736	    tb[NFTA_PAYLOAD_LEN] == NULL)
737		return ERR_PTR(-EINVAL);
738
739	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
740	switch (base) {
741	case NFT_PAYLOAD_LL_HEADER:
742	case NFT_PAYLOAD_NETWORK_HEADER:
743	case NFT_PAYLOAD_TRANSPORT_HEADER:
744		break;
745	default:
746		return ERR_PTR(-EOPNOTSUPP);
747	}
748
749	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
750		if (tb[NFTA_PAYLOAD_DREG] != NULL)
751			return ERR_PTR(-EINVAL);
752		return &nft_payload_set_ops;
753	}
754
755	if (tb[NFTA_PAYLOAD_DREG] == NULL)
756		return ERR_PTR(-EINVAL);
757
758	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
759	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
760
761	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
762	    base != NFT_PAYLOAD_LL_HEADER)
763		return &nft_payload_fast_ops;
764	else
765		return &nft_payload_ops;
766}
767
768struct nft_expr_type nft_payload_type __read_mostly = {
769	.name		= "payload",
770	.select_ops	= nft_payload_select_ops,
771	.policy		= nft_payload_policy,
772	.maxattr	= NFTA_PAYLOAD_MAX,
773	.owner		= THIS_MODULE,
774};
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
  5 *
  6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/if_vlan.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/netlink.h>
 14#include <linux/netfilter.h>
 15#include <linux/netfilter/nf_tables.h>
 16#include <net/netfilter/nf_tables_core.h>
 17#include <net/netfilter/nf_tables.h>
 18#include <net/netfilter/nf_tables_offload.h>
 19/* For layer 4 checksum field offset. */
 20#include <linux/tcp.h>
 21#include <linux/udp.h>
 22#include <linux/icmpv6.h>
 23#include <linux/ip.h>
 24#include <linux/ipv6.h>
 
 25
 26static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
 27					 struct vlan_ethhdr *veth)
 28{
 29	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
 30		return false;
 31
 32	veth->h_vlan_proto = skb->vlan_proto;
 33	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 34	veth->h_vlan_encapsulated_proto = skb->protocol;
 35
 36	return true;
 37}
 38
 39/* add vlan header into the user buffer for if tag was removed by offloads */
 40static bool
 41nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
 42{
 43	int mac_off = skb_mac_header(skb) - skb->data;
 44	u8 *vlanh, *dst_u8 = (u8 *) d;
 45	struct vlan_ethhdr veth;
 46	u8 vlan_hlen = 0;
 47
 48	if ((skb->protocol == htons(ETH_P_8021AD) ||
 49	     skb->protocol == htons(ETH_P_8021Q)) &&
 50	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
 51		vlan_hlen += VLAN_HLEN;
 52
 53	vlanh = (u8 *) &veth;
 54	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
 55		u8 ethlen = len;
 56
 57		if (vlan_hlen &&
 58		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
 59			return false;
 60		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
 61			return false;
 62
 63		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
 64			ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
 65
 66		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
 67
 68		len -= ethlen;
 69		if (len == 0)
 70			return true;
 71
 72		dst_u8 += ethlen;
 73		offset = ETH_HLEN + vlan_hlen;
 74	} else {
 75		offset -= VLAN_HLEN + vlan_hlen;
 76	}
 77
 78	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
 79}
 80
 81void nft_payload_eval(const struct nft_expr *expr,
 82		      struct nft_regs *regs,
 83		      const struct nft_pktinfo *pkt)
 84{
 85	const struct nft_payload *priv = nft_expr_priv(expr);
 86	const struct sk_buff *skb = pkt->skb;
 87	u32 *dest = &regs->data[priv->dreg];
 88	int offset;
 89
 90	if (priv->len % NFT_REG32_SIZE)
 91		dest[priv->len / NFT_REG32_SIZE] = 0;
 92
 93	switch (priv->base) {
 94	case NFT_PAYLOAD_LL_HEADER:
 95		if (!skb_mac_header_was_set(skb))
 96			goto err;
 97
 98		if (skb_vlan_tag_present(skb)) {
 99			if (!nft_payload_copy_vlan(dest, skb,
100						   priv->offset, priv->len))
101				goto err;
102			return;
103		}
104		offset = skb_mac_header(skb) - skb->data;
105		break;
106	case NFT_PAYLOAD_NETWORK_HEADER:
107		offset = skb_network_offset(skb);
108		break;
109	case NFT_PAYLOAD_TRANSPORT_HEADER:
110		if (!pkt->tprot_set)
111			goto err;
112		offset = pkt->xt.thoff;
113		break;
114	default:
115		BUG();
116	}
117	offset += priv->offset;
118
119	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
120		goto err;
121	return;
122err:
123	regs->verdict.code = NFT_BREAK;
124}
125
126static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
127	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
128	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
129	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
130	[NFTA_PAYLOAD_OFFSET]		= { .type = NLA_U32 },
131	[NFTA_PAYLOAD_LEN]		= { .type = NLA_U32 },
132	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
133	[NFTA_PAYLOAD_CSUM_OFFSET]	= { .type = NLA_U32 },
134	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
135};
136
137static int nft_payload_init(const struct nft_ctx *ctx,
138			    const struct nft_expr *expr,
139			    const struct nlattr * const tb[])
140{
141	struct nft_payload *priv = nft_expr_priv(expr);
142
143	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
144	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
145	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
146	priv->dreg   = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
147
148	return nft_validate_register_store(ctx, priv->dreg, NULL,
149					   NFT_DATA_VALUE, priv->len);
 
150}
151
152static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
153{
154	const struct nft_payload *priv = nft_expr_priv(expr);
155
156	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
157	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
158	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
159	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
160		goto nla_put_failure;
161	return 0;
162
163nla_put_failure:
164	return -1;
165}
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
168				  struct nft_flow_rule *flow,
169				  const struct nft_payload *priv)
170{
171	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
172
173	switch (priv->offset) {
174	case offsetof(struct ethhdr, h_source):
175		if (priv->len != ETH_ALEN)
176			return -EOPNOTSUPP;
177
178		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
179				  src, ETH_ALEN, reg);
180		break;
181	case offsetof(struct ethhdr, h_dest):
182		if (priv->len != ETH_ALEN)
183			return -EOPNOTSUPP;
184
185		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
186				  dst, ETH_ALEN, reg);
187		break;
188	case offsetof(struct ethhdr, h_proto):
189		if (priv->len != sizeof(__be16))
190			return -EOPNOTSUPP;
191
192		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
193				  n_proto, sizeof(__be16), reg);
194		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
195		break;
196	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
197		if (priv->len != sizeof(__be16))
198			return -EOPNOTSUPP;
199
200		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
201				  vlan_tci, sizeof(__be16), reg);
 
202		break;
203	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
204		if (priv->len != sizeof(__be16))
205			return -EOPNOTSUPP;
206
207		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
208				  vlan_tpid, sizeof(__be16), reg);
209		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
210		break;
211	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
212		if (priv->len != sizeof(__be16))
213			return -EOPNOTSUPP;
214
215		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
216				  vlan_tci, sizeof(__be16), reg);
 
217		break;
218	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
219							sizeof(struct vlan_hdr):
220		if (priv->len != sizeof(__be16))
221			return -EOPNOTSUPP;
222
223		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
224				  vlan_tpid, sizeof(__be16), reg);
 
225		break;
226	default:
227		return -EOPNOTSUPP;
228	}
229
230	return 0;
231}
232
233static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
234				  struct nft_flow_rule *flow,
235				  const struct nft_payload *priv)
236{
237	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
238
239	switch (priv->offset) {
240	case offsetof(struct iphdr, saddr):
241		if (priv->len != sizeof(struct in_addr))
 
242			return -EOPNOTSUPP;
243
244		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
245				  sizeof(struct in_addr), reg);
 
246		break;
247	case offsetof(struct iphdr, daddr):
248		if (priv->len != sizeof(struct in_addr))
 
249			return -EOPNOTSUPP;
250
251		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
252				  sizeof(struct in_addr), reg);
 
253		break;
254	case offsetof(struct iphdr, protocol):
255		if (priv->len != sizeof(__u8))
256			return -EOPNOTSUPP;
257
258		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
259				  sizeof(__u8), reg);
260		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
261		break;
262	default:
263		return -EOPNOTSUPP;
264	}
265
266	return 0;
267}
268
269static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
270				  struct nft_flow_rule *flow,
271				  const struct nft_payload *priv)
272{
273	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
274
275	switch (priv->offset) {
276	case offsetof(struct ipv6hdr, saddr):
277		if (priv->len != sizeof(struct in6_addr))
 
278			return -EOPNOTSUPP;
279
280		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
281				  sizeof(struct in6_addr), reg);
 
282		break;
283	case offsetof(struct ipv6hdr, daddr):
284		if (priv->len != sizeof(struct in6_addr))
 
285			return -EOPNOTSUPP;
286
287		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
288				  sizeof(struct in6_addr), reg);
 
289		break;
290	case offsetof(struct ipv6hdr, nexthdr):
291		if (priv->len != sizeof(__u8))
292			return -EOPNOTSUPP;
293
294		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
295				  sizeof(__u8), reg);
296		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
297		break;
298	default:
299		return -EOPNOTSUPP;
300	}
301
302	return 0;
303}
304
305static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
306				  struct nft_flow_rule *flow,
307				  const struct nft_payload *priv)
308{
309	int err;
310
311	switch (ctx->dep.l3num) {
312	case htons(ETH_P_IP):
313		err = nft_payload_offload_ip(ctx, flow, priv);
314		break;
315	case htons(ETH_P_IPV6):
316		err = nft_payload_offload_ip6(ctx, flow, priv);
317		break;
318	default:
319		return -EOPNOTSUPP;
320	}
321
322	return err;
323}
324
325static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
326				   struct nft_flow_rule *flow,
327				   const struct nft_payload *priv)
328{
329	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
330
331	switch (priv->offset) {
332	case offsetof(struct tcphdr, source):
333		if (priv->len != sizeof(__be16))
334			return -EOPNOTSUPP;
335
336		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
337				  sizeof(__be16), reg);
338		break;
339	case offsetof(struct tcphdr, dest):
340		if (priv->len != sizeof(__be16))
341			return -EOPNOTSUPP;
342
343		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
344				  sizeof(__be16), reg);
345		break;
346	default:
347		return -EOPNOTSUPP;
348	}
349
350	return 0;
351}
352
353static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
354				   struct nft_flow_rule *flow,
355				   const struct nft_payload *priv)
356{
357	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
358
359	switch (priv->offset) {
360	case offsetof(struct udphdr, source):
361		if (priv->len != sizeof(__be16))
362			return -EOPNOTSUPP;
363
364		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
365				  sizeof(__be16), reg);
366		break;
367	case offsetof(struct udphdr, dest):
368		if (priv->len != sizeof(__be16))
369			return -EOPNOTSUPP;
370
371		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
372				  sizeof(__be16), reg);
373		break;
374	default:
375		return -EOPNOTSUPP;
376	}
377
378	return 0;
379}
380
381static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
382				  struct nft_flow_rule *flow,
383				  const struct nft_payload *priv)
384{
385	int err;
386
387	switch (ctx->dep.protonum) {
388	case IPPROTO_TCP:
389		err = nft_payload_offload_tcp(ctx, flow, priv);
390		break;
391	case IPPROTO_UDP:
392		err = nft_payload_offload_udp(ctx, flow, priv);
393		break;
394	default:
395		return -EOPNOTSUPP;
396	}
397
398	return err;
399}
400
401static int nft_payload_offload(struct nft_offload_ctx *ctx,
402			       struct nft_flow_rule *flow,
403			       const struct nft_expr *expr)
404{
405	const struct nft_payload *priv = nft_expr_priv(expr);
406	int err;
407
408	switch (priv->base) {
409	case NFT_PAYLOAD_LL_HEADER:
410		err = nft_payload_offload_ll(ctx, flow, priv);
411		break;
412	case NFT_PAYLOAD_NETWORK_HEADER:
413		err = nft_payload_offload_nh(ctx, flow, priv);
414		break;
415	case NFT_PAYLOAD_TRANSPORT_HEADER:
416		err = nft_payload_offload_th(ctx, flow, priv);
417		break;
418	default:
419		err = -EOPNOTSUPP;
420		break;
421	}
422	return err;
423}
424
425static const struct nft_expr_ops nft_payload_ops = {
426	.type		= &nft_payload_type,
427	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
428	.eval		= nft_payload_eval,
429	.init		= nft_payload_init,
430	.dump		= nft_payload_dump,
431	.offload	= nft_payload_offload,
432};
433
434const struct nft_expr_ops nft_payload_fast_ops = {
435	.type		= &nft_payload_type,
436	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
437	.eval		= nft_payload_eval,
438	.init		= nft_payload_init,
439	.dump		= nft_payload_dump,
440	.offload	= nft_payload_offload,
441};
442
443static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
444{
445	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
446	if (*sum == 0)
447		*sum = CSUM_MANGLED_0;
448}
449
450static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
451{
452	struct udphdr *uh, _uh;
453
454	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
455	if (!uh)
456		return false;
457
458	return (__force bool)uh->check;
459}
460
461static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
462				     struct sk_buff *skb,
463				     unsigned int *l4csum_offset)
464{
465	switch (pkt->tprot) {
466	case IPPROTO_TCP:
467		*l4csum_offset = offsetof(struct tcphdr, check);
468		break;
469	case IPPROTO_UDP:
470		if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
471			return -1;
472		fallthrough;
473	case IPPROTO_UDPLITE:
474		*l4csum_offset = offsetof(struct udphdr, check);
475		break;
476	case IPPROTO_ICMPV6:
477		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
478		break;
479	default:
480		return -1;
481	}
482
483	*l4csum_offset += pkt->xt.thoff;
 
 
 
 
 
 
 
 
 
 
 
 
 
484	return 0;
485}
486
487static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
488				     struct sk_buff *skb,
489				     __wsum fsum, __wsum tsum)
490{
491	int l4csum_offset;
492	__sum16 sum;
493
494	/* If we cannot determine layer 4 checksum offset or this packet doesn't
495	 * require layer 4 checksum recalculation, skip this packet.
496	 */
497	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
498		return 0;
499
500	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
501		return -1;
502
503	/* Checksum mangling for an arbitrary amount of bytes, based on
504	 * inet_proto_csum_replace*() functions.
505	 */
506	if (skb->ip_summed != CHECKSUM_PARTIAL) {
507		nft_csum_replace(&sum, fsum, tsum);
508		if (skb->ip_summed == CHECKSUM_COMPLETE) {
509			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
510					      tsum);
511		}
512	} else {
513		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
514					  tsum));
515	}
516
517	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
518	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
519		return -1;
520
521	return 0;
522}
523
524static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
525				 __wsum fsum, __wsum tsum, int csum_offset)
526{
527	__sum16 sum;
528
529	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
530		return -1;
531
532	nft_csum_replace(&sum, fsum, tsum);
533	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
534	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
535		return -1;
536
537	return 0;
538}
539
540static void nft_payload_set_eval(const struct nft_expr *expr,
541				 struct nft_regs *regs,
542				 const struct nft_pktinfo *pkt)
543{
544	const struct nft_payload_set *priv = nft_expr_priv(expr);
545	struct sk_buff *skb = pkt->skb;
546	const u32 *src = &regs->data[priv->sreg];
547	int offset, csum_offset;
548	__wsum fsum, tsum;
549
550	switch (priv->base) {
551	case NFT_PAYLOAD_LL_HEADER:
552		if (!skb_mac_header_was_set(skb))
553			goto err;
554		offset = skb_mac_header(skb) - skb->data;
555		break;
556	case NFT_PAYLOAD_NETWORK_HEADER:
557		offset = skb_network_offset(skb);
558		break;
559	case NFT_PAYLOAD_TRANSPORT_HEADER:
560		if (!pkt->tprot_set)
561			goto err;
562		offset = pkt->xt.thoff;
563		break;
564	default:
565		BUG();
566	}
567
568	csum_offset = offset + priv->csum_offset;
569	offset += priv->offset;
570
571	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
572	    (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
573	     skb->ip_summed != CHECKSUM_PARTIAL)) {
574		fsum = skb_checksum(skb, offset, priv->len, 0);
575		tsum = csum_partial(src, priv->len, 0);
576
577		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
578		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
579			goto err;
580
581		if (priv->csum_flags &&
582		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
583			goto err;
584	}
585
586	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
587	    skb_store_bits(skb, offset, src, priv->len) < 0)
588		goto err;
589
 
 
 
 
 
 
 
590	return;
591err:
592	regs->verdict.code = NFT_BREAK;
593}
594
595static int nft_payload_set_init(const struct nft_ctx *ctx,
596				const struct nft_expr *expr,
597				const struct nlattr * const tb[])
598{
599	struct nft_payload_set *priv = nft_expr_priv(expr);
600
601	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
602	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
603	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
604	priv->sreg        = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
605
606	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
607		priv->csum_type =
608			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
609	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
610		priv->csum_offset =
611			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
612	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
613		u32 flags;
614
615		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
616		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
617			return -EINVAL;
618
619		priv->csum_flags = flags;
620	}
621
622	switch (priv->csum_type) {
623	case NFT_PAYLOAD_CSUM_NONE:
624	case NFT_PAYLOAD_CSUM_INET:
625		break;
 
 
 
 
 
 
 
626	default:
627		return -EOPNOTSUPP;
628	}
629
630	return nft_validate_register_load(priv->sreg, priv->len);
 
631}
632
633static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
634{
635	const struct nft_payload_set *priv = nft_expr_priv(expr);
636
637	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
638	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
639	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
640	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
641	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
642	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
643			 htonl(priv->csum_offset)) ||
644	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
645		goto nla_put_failure;
646	return 0;
647
648nla_put_failure:
649	return -1;
650}
651
652static const struct nft_expr_ops nft_payload_set_ops = {
653	.type		= &nft_payload_type,
654	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
655	.eval		= nft_payload_set_eval,
656	.init		= nft_payload_set_init,
657	.dump		= nft_payload_set_dump,
658};
659
660static const struct nft_expr_ops *
661nft_payload_select_ops(const struct nft_ctx *ctx,
662		       const struct nlattr * const tb[])
663{
664	enum nft_payload_bases base;
665	unsigned int offset, len;
666
667	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
668	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
669	    tb[NFTA_PAYLOAD_LEN] == NULL)
670		return ERR_PTR(-EINVAL);
671
672	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
673	switch (base) {
674	case NFT_PAYLOAD_LL_HEADER:
675	case NFT_PAYLOAD_NETWORK_HEADER:
676	case NFT_PAYLOAD_TRANSPORT_HEADER:
677		break;
678	default:
679		return ERR_PTR(-EOPNOTSUPP);
680	}
681
682	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
683		if (tb[NFTA_PAYLOAD_DREG] != NULL)
684			return ERR_PTR(-EINVAL);
685		return &nft_payload_set_ops;
686	}
687
688	if (tb[NFTA_PAYLOAD_DREG] == NULL)
689		return ERR_PTR(-EINVAL);
690
691	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
692	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
693
694	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
695	    base != NFT_PAYLOAD_LL_HEADER)
696		return &nft_payload_fast_ops;
697	else
698		return &nft_payload_ops;
699}
700
701struct nft_expr_type nft_payload_type __read_mostly = {
702	.name		= "payload",
703	.select_ops	= nft_payload_select_ops,
704	.policy		= nft_payload_policy,
705	.maxattr	= NFTA_PAYLOAD_MAX,
706	.owner		= THIS_MODULE,
707};