Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
  5 *
  6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/if_vlan.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/netlink.h>
 14#include <linux/netfilter.h>
 15#include <linux/netfilter/nf_tables.h>
 16#include <net/netfilter/nf_tables_core.h>
 17#include <net/netfilter/nf_tables.h>
 18#include <net/netfilter/nf_tables_offload.h>
 19/* For layer 4 checksum field offset. */
 20#include <linux/tcp.h>
 21#include <linux/udp.h>
 
 22#include <linux/icmpv6.h>
 23#include <linux/ip.h>
 24#include <linux/ipv6.h>
 25#include <net/sctp/checksum.h>
 26
 27static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
 28					 struct vlan_ethhdr *veth)
 29{
 30	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
 31		return false;
 32
 33	veth->h_vlan_proto = skb->vlan_proto;
 34	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 35	veth->h_vlan_encapsulated_proto = skb->protocol;
 36
 37	return true;
 38}
 39
 40/* add vlan header into the user buffer for if tag was removed by offloads */
 41static bool
 42nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
 43{
 44	int mac_off = skb_mac_header(skb) - skb->data;
 45	u8 *vlanh, *dst_u8 = (u8 *) d;
 46	struct vlan_ethhdr veth;
 47	u8 vlan_hlen = 0;
 48
 49	if ((skb->protocol == htons(ETH_P_8021AD) ||
 50	     skb->protocol == htons(ETH_P_8021Q)) &&
 51	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
 52		vlan_hlen += VLAN_HLEN;
 53
 54	vlanh = (u8 *) &veth;
 55	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
 56		u8 ethlen = len;
 57
 58		if (vlan_hlen &&
 59		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
 60			return false;
 61		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
 62			return false;
 63
 64		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
 65			ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
 66
 67		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
 68
 69		len -= ethlen;
 70		if (len == 0)
 71			return true;
 72
 73		dst_u8 += ethlen;
 74		offset = ETH_HLEN + vlan_hlen;
 75	} else {
 76		offset -= VLAN_HLEN + vlan_hlen;
 77	}
 78
 79	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
 80}
 81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82void nft_payload_eval(const struct nft_expr *expr,
 83		      struct nft_regs *regs,
 84		      const struct nft_pktinfo *pkt)
 85{
 86	const struct nft_payload *priv = nft_expr_priv(expr);
 87	const struct sk_buff *skb = pkt->skb;
 88	u32 *dest = &regs->data[priv->dreg];
 89	int offset;
 90
 91	if (priv->len % NFT_REG32_SIZE)
 92		dest[priv->len / NFT_REG32_SIZE] = 0;
 93
 94	switch (priv->base) {
 95	case NFT_PAYLOAD_LL_HEADER:
 96		if (!skb_mac_header_was_set(skb))
 97			goto err;
 98
 99		if (skb_vlan_tag_present(skb)) {
 
100			if (!nft_payload_copy_vlan(dest, skb,
101						   priv->offset, priv->len))
102				goto err;
103			return;
104		}
105		offset = skb_mac_header(skb) - skb->data;
106		break;
107	case NFT_PAYLOAD_NETWORK_HEADER:
108		offset = skb_network_offset(skb);
109		break;
110	case NFT_PAYLOAD_TRANSPORT_HEADER:
111		if (!pkt->tprot_set)
112			goto err;
113		offset = nft_thoff(pkt);
114		break;
 
 
 
 
 
115	default:
116		BUG();
 
117	}
118	offset += priv->offset;
119
120	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
121		goto err;
122	return;
123err:
124	regs->verdict.code = NFT_BREAK;
125}
126
127static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
128	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
129	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
130	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
131	[NFTA_PAYLOAD_OFFSET]		= { .type = NLA_U32 },
132	[NFTA_PAYLOAD_LEN]		= { .type = NLA_U32 },
133	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
134	[NFTA_PAYLOAD_CSUM_OFFSET]	= { .type = NLA_U32 },
135	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
136};
137
138static int nft_payload_init(const struct nft_ctx *ctx,
139			    const struct nft_expr *expr,
140			    const struct nlattr * const tb[])
141{
142	struct nft_payload *priv = nft_expr_priv(expr);
143
144	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
145	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
146	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
147
148	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
149					&priv->dreg, NULL, NFT_DATA_VALUE,
150					priv->len);
151}
152
153static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
 
154{
155	const struct nft_payload *priv = nft_expr_priv(expr);
156
157	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
158	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
159	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
160	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
161		goto nla_put_failure;
162	return 0;
163
164nla_put_failure:
165	return -1;
166}
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
169				     u32 priv_len, u32 field_len)
170{
171	unsigned int remainder, delta, k;
172	struct nft_data mask = {};
173	__be32 remainder_mask;
174
175	if (priv_len == field_len) {
176		memset(&reg->mask, 0xff, priv_len);
177		return true;
178	} else if (priv_len > field_len) {
179		return false;
180	}
181
182	memset(&mask, 0xff, field_len);
183	remainder = priv_len % sizeof(u32);
184	if (remainder) {
185		k = priv_len / sizeof(u32);
186		delta = field_len - priv_len;
187		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
188		mask.data[k] = (__force u32)remainder_mask;
189	}
190
191	memcpy(&reg->mask, &mask, field_len);
192
193	return true;
194}
195
196static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
197				  struct nft_flow_rule *flow,
198				  const struct nft_payload *priv)
199{
200	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
201
202	switch (priv->offset) {
203	case offsetof(struct ethhdr, h_source):
204		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
205			return -EOPNOTSUPP;
206
207		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
208				  src, ETH_ALEN, reg);
209		break;
210	case offsetof(struct ethhdr, h_dest):
211		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
212			return -EOPNOTSUPP;
213
214		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
215				  dst, ETH_ALEN, reg);
216		break;
217	case offsetof(struct ethhdr, h_proto):
218		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
219			return -EOPNOTSUPP;
220
221		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
222				  n_proto, sizeof(__be16), reg);
223		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
224		break;
225	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
226		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
227			return -EOPNOTSUPP;
228
229		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
230					vlan_tci, sizeof(__be16), reg,
231					NFT_OFFLOAD_F_NETWORK2HOST);
232		break;
233	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
234		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
235			return -EOPNOTSUPP;
236
237		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
238				  vlan_tpid, sizeof(__be16), reg);
239		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
240		break;
241	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
242		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
243			return -EOPNOTSUPP;
244
245		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
246					vlan_tci, sizeof(__be16), reg,
247					NFT_OFFLOAD_F_NETWORK2HOST);
248		break;
249	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
250							sizeof(struct vlan_hdr):
251		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
252			return -EOPNOTSUPP;
253
254		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
255				  vlan_tpid, sizeof(__be16), reg);
256		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
257		break;
258	default:
259		return -EOPNOTSUPP;
260	}
261
262	return 0;
263}
264
265static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
266				  struct nft_flow_rule *flow,
267				  const struct nft_payload *priv)
268{
269	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
270
271	switch (priv->offset) {
272	case offsetof(struct iphdr, saddr):
273		if (!nft_payload_offload_mask(reg, priv->len,
274					      sizeof(struct in_addr)))
275			return -EOPNOTSUPP;
276
277		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
278				  sizeof(struct in_addr), reg);
279		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
280		break;
281	case offsetof(struct iphdr, daddr):
282		if (!nft_payload_offload_mask(reg, priv->len,
283					      sizeof(struct in_addr)))
284			return -EOPNOTSUPP;
285
286		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
287				  sizeof(struct in_addr), reg);
288		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
289		break;
290	case offsetof(struct iphdr, protocol):
291		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
292			return -EOPNOTSUPP;
293
294		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
295				  sizeof(__u8), reg);
296		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
297		break;
298	default:
299		return -EOPNOTSUPP;
300	}
301
302	return 0;
303}
304
305static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
306				  struct nft_flow_rule *flow,
307				  const struct nft_payload *priv)
308{
309	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
310
311	switch (priv->offset) {
312	case offsetof(struct ipv6hdr, saddr):
313		if (!nft_payload_offload_mask(reg, priv->len,
314					      sizeof(struct in6_addr)))
315			return -EOPNOTSUPP;
316
317		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
318				  sizeof(struct in6_addr), reg);
319		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
320		break;
321	case offsetof(struct ipv6hdr, daddr):
322		if (!nft_payload_offload_mask(reg, priv->len,
323					      sizeof(struct in6_addr)))
324			return -EOPNOTSUPP;
325
326		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
327				  sizeof(struct in6_addr), reg);
328		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
329		break;
330	case offsetof(struct ipv6hdr, nexthdr):
331		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
332			return -EOPNOTSUPP;
333
334		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
335				  sizeof(__u8), reg);
336		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
337		break;
338	default:
339		return -EOPNOTSUPP;
340	}
341
342	return 0;
343}
344
345static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
346				  struct nft_flow_rule *flow,
347				  const struct nft_payload *priv)
348{
349	int err;
350
351	switch (ctx->dep.l3num) {
352	case htons(ETH_P_IP):
353		err = nft_payload_offload_ip(ctx, flow, priv);
354		break;
355	case htons(ETH_P_IPV6):
356		err = nft_payload_offload_ip6(ctx, flow, priv);
357		break;
358	default:
359		return -EOPNOTSUPP;
360	}
361
362	return err;
363}
364
365static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
366				   struct nft_flow_rule *flow,
367				   const struct nft_payload *priv)
368{
369	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
370
371	switch (priv->offset) {
372	case offsetof(struct tcphdr, source):
373		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
374			return -EOPNOTSUPP;
375
376		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
377				  sizeof(__be16), reg);
378		break;
379	case offsetof(struct tcphdr, dest):
380		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
381			return -EOPNOTSUPP;
382
383		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
384				  sizeof(__be16), reg);
385		break;
386	default:
387		return -EOPNOTSUPP;
388	}
389
390	return 0;
391}
392
393static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
394				   struct nft_flow_rule *flow,
395				   const struct nft_payload *priv)
396{
397	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
398
399	switch (priv->offset) {
400	case offsetof(struct udphdr, source):
401		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
402			return -EOPNOTSUPP;
403
404		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
405				  sizeof(__be16), reg);
406		break;
407	case offsetof(struct udphdr, dest):
408		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
409			return -EOPNOTSUPP;
410
411		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
412				  sizeof(__be16), reg);
413		break;
414	default:
415		return -EOPNOTSUPP;
416	}
417
418	return 0;
419}
420
421static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
422				  struct nft_flow_rule *flow,
423				  const struct nft_payload *priv)
424{
425	int err;
426
427	switch (ctx->dep.protonum) {
428	case IPPROTO_TCP:
429		err = nft_payload_offload_tcp(ctx, flow, priv);
430		break;
431	case IPPROTO_UDP:
432		err = nft_payload_offload_udp(ctx, flow, priv);
433		break;
434	default:
435		return -EOPNOTSUPP;
436	}
437
438	return err;
439}
440
441static int nft_payload_offload(struct nft_offload_ctx *ctx,
442			       struct nft_flow_rule *flow,
443			       const struct nft_expr *expr)
444{
445	const struct nft_payload *priv = nft_expr_priv(expr);
446	int err;
447
448	switch (priv->base) {
449	case NFT_PAYLOAD_LL_HEADER:
450		err = nft_payload_offload_ll(ctx, flow, priv);
451		break;
452	case NFT_PAYLOAD_NETWORK_HEADER:
453		err = nft_payload_offload_nh(ctx, flow, priv);
454		break;
455	case NFT_PAYLOAD_TRANSPORT_HEADER:
456		err = nft_payload_offload_th(ctx, flow, priv);
457		break;
458	default:
459		err = -EOPNOTSUPP;
460		break;
461	}
462	return err;
463}
464
465static const struct nft_expr_ops nft_payload_ops = {
466	.type		= &nft_payload_type,
467	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
468	.eval		= nft_payload_eval,
469	.init		= nft_payload_init,
470	.dump		= nft_payload_dump,
 
471	.offload	= nft_payload_offload,
472};
473
474const struct nft_expr_ops nft_payload_fast_ops = {
475	.type		= &nft_payload_type,
476	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
477	.eval		= nft_payload_eval,
478	.init		= nft_payload_init,
479	.dump		= nft_payload_dump,
 
480	.offload	= nft_payload_offload,
481};
482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
484{
485	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
486	if (*sum == 0)
487		*sum = CSUM_MANGLED_0;
488}
489
490static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
491{
492	struct udphdr *uh, _uh;
493
494	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
495	if (!uh)
496		return false;
497
498	return (__force bool)uh->check;
499}
500
501static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
502				     struct sk_buff *skb,
503				     unsigned int *l4csum_offset)
504{
 
 
 
505	switch (pkt->tprot) {
506	case IPPROTO_TCP:
507		*l4csum_offset = offsetof(struct tcphdr, check);
508		break;
509	case IPPROTO_UDP:
510		if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
511			return -1;
512		fallthrough;
513	case IPPROTO_UDPLITE:
514		*l4csum_offset = offsetof(struct udphdr, check);
515		break;
516	case IPPROTO_ICMPV6:
517		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
518		break;
519	default:
520		return -1;
521	}
522
523	*l4csum_offset += nft_thoff(pkt);
524	return 0;
525}
526
527static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
528{
529	struct sctphdr *sh;
530
531	if (skb_ensure_writable(skb, offset + sizeof(*sh)))
532		return -1;
533
534	sh = (struct sctphdr *)(skb->data + offset);
535	sh->checksum = sctp_compute_cksum(skb, offset);
536	skb->ip_summed = CHECKSUM_UNNECESSARY;
537	return 0;
538}
539
540static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
541				     struct sk_buff *skb,
542				     __wsum fsum, __wsum tsum)
543{
544	int l4csum_offset;
545	__sum16 sum;
546
547	/* If we cannot determine layer 4 checksum offset or this packet doesn't
548	 * require layer 4 checksum recalculation, skip this packet.
549	 */
550	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
551		return 0;
552
553	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
554		return -1;
555
556	/* Checksum mangling for an arbitrary amount of bytes, based on
557	 * inet_proto_csum_replace*() functions.
558	 */
559	if (skb->ip_summed != CHECKSUM_PARTIAL) {
560		nft_csum_replace(&sum, fsum, tsum);
561		if (skb->ip_summed == CHECKSUM_COMPLETE) {
562			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
563					      tsum);
564		}
565	} else {
566		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
567					  tsum));
568	}
569
570	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
571	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
572		return -1;
573
574	return 0;
575}
576
577static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
578				 __wsum fsum, __wsum tsum, int csum_offset)
579{
580	__sum16 sum;
581
582	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
583		return -1;
584
585	nft_csum_replace(&sum, fsum, tsum);
586	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
587	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
588		return -1;
589
590	return 0;
591}
592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593static void nft_payload_set_eval(const struct nft_expr *expr,
594				 struct nft_regs *regs,
595				 const struct nft_pktinfo *pkt)
596{
597	const struct nft_payload_set *priv = nft_expr_priv(expr);
598	struct sk_buff *skb = pkt->skb;
599	const u32 *src = &regs->data[priv->sreg];
600	int offset, csum_offset;
 
601	__wsum fsum, tsum;
602
603	switch (priv->base) {
604	case NFT_PAYLOAD_LL_HEADER:
605		if (!skb_mac_header_was_set(skb))
606			goto err;
607		offset = skb_mac_header(skb) - skb->data;
 
 
 
 
 
 
 
 
 
 
 
 
608		break;
609	case NFT_PAYLOAD_NETWORK_HEADER:
610		offset = skb_network_offset(skb);
611		break;
612	case NFT_PAYLOAD_TRANSPORT_HEADER:
613		if (!pkt->tprot_set)
614			goto err;
615		offset = nft_thoff(pkt);
616		break;
 
 
 
 
 
617	default:
618		BUG();
 
619	}
620
621	csum_offset = offset + priv->csum_offset;
622	offset += priv->offset;
623
624	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
625	    (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
 
626	     skb->ip_summed != CHECKSUM_PARTIAL)) {
627		fsum = skb_checksum(skb, offset, priv->len, 0);
628		tsum = csum_partial(src, priv->len, 0);
629
630		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
631		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
632			goto err;
633
634		if (priv->csum_flags &&
635		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
636			goto err;
637	}
638
639	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
640	    skb_store_bits(skb, offset, src, priv->len) < 0)
641		goto err;
642
643	if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
644	    pkt->tprot == IPPROTO_SCTP &&
645	    skb->ip_summed != CHECKSUM_PARTIAL) {
646		if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
 
647			goto err;
648	}
649
650	return;
651err:
652	regs->verdict.code = NFT_BREAK;
653}
654
655static int nft_payload_set_init(const struct nft_ctx *ctx,
656				const struct nft_expr *expr,
657				const struct nlattr * const tb[])
658{
659	struct nft_payload_set *priv = nft_expr_priv(expr);
 
 
660
661	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
662	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
663	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
664
665	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
666		priv->csum_type =
667			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
668	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
669		priv->csum_offset =
670			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
 
 
 
 
671	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
672		u32 flags;
673
674		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
675		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
676			return -EINVAL;
677
678		priv->csum_flags = flags;
679	}
680
681	switch (priv->csum_type) {
682	case NFT_PAYLOAD_CSUM_NONE:
683	case NFT_PAYLOAD_CSUM_INET:
684		break;
685	case NFT_PAYLOAD_CSUM_SCTP:
686		if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
687			return -EINVAL;
688
689		if (priv->csum_offset != offsetof(struct sctphdr, checksum))
690			return -EINVAL;
691		break;
692	default:
693		return -EOPNOTSUPP;
694	}
 
695
696	return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
697				       priv->len);
698}
699
700static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
 
701{
702	const struct nft_payload_set *priv = nft_expr_priv(expr);
703
704	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
705	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
706	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
707	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
708	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
709	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
710			 htonl(priv->csum_offset)) ||
711	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
712		goto nla_put_failure;
713	return 0;
714
715nla_put_failure:
716	return -1;
717}
718
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
719static const struct nft_expr_ops nft_payload_set_ops = {
720	.type		= &nft_payload_type,
721	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
722	.eval		= nft_payload_set_eval,
723	.init		= nft_payload_set_init,
724	.dump		= nft_payload_set_dump,
 
725};
726
727static const struct nft_expr_ops *
728nft_payload_select_ops(const struct nft_ctx *ctx,
729		       const struct nlattr * const tb[])
730{
731	enum nft_payload_bases base;
732	unsigned int offset, len;
 
733
734	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
735	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
736	    tb[NFTA_PAYLOAD_LEN] == NULL)
737		return ERR_PTR(-EINVAL);
738
739	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
740	switch (base) {
741	case NFT_PAYLOAD_LL_HEADER:
742	case NFT_PAYLOAD_NETWORK_HEADER:
743	case NFT_PAYLOAD_TRANSPORT_HEADER:
 
744		break;
745	default:
746		return ERR_PTR(-EOPNOTSUPP);
747	}
748
749	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
750		if (tb[NFTA_PAYLOAD_DREG] != NULL)
751			return ERR_PTR(-EINVAL);
752		return &nft_payload_set_ops;
753	}
754
755	if (tb[NFTA_PAYLOAD_DREG] == NULL)
756		return ERR_PTR(-EINVAL);
757
758	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
759	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
 
 
 
 
760
761	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
762	    base != NFT_PAYLOAD_LL_HEADER)
763		return &nft_payload_fast_ops;
764	else
765		return &nft_payload_ops;
766}
767
768struct nft_expr_type nft_payload_type __read_mostly = {
769	.name		= "payload",
770	.select_ops	= nft_payload_select_ops,
 
771	.policy		= nft_payload_policy,
772	.maxattr	= NFTA_PAYLOAD_MAX,
773	.owner		= THIS_MODULE,
774};
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
   4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
   5 *
   6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/if_vlan.h>
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/netlink.h>
  14#include <linux/netfilter.h>
  15#include <linux/netfilter/nf_tables.h>
  16#include <net/netfilter/nf_tables_core.h>
  17#include <net/netfilter/nf_tables.h>
  18#include <net/netfilter/nf_tables_offload.h>
  19/* For layer 4 checksum field offset. */
  20#include <linux/tcp.h>
  21#include <linux/udp.h>
  22#include <net/gre.h>
  23#include <linux/icmpv6.h>
  24#include <linux/ip.h>
  25#include <linux/ipv6.h>
  26#include <net/sctp/checksum.h>
  27
  28static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
  29					 struct vlan_ethhdr *veth)
  30{
  31	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
  32		return false;
  33
  34	veth->h_vlan_proto = skb->vlan_proto;
  35	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
  36	veth->h_vlan_encapsulated_proto = skb->protocol;
  37
  38	return true;
  39}
  40
  41/* add vlan header into the user buffer for if tag was removed by offloads */
  42static bool
  43nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
  44{
  45	int mac_off = skb_mac_header(skb) - skb->data;
  46	u8 *vlanh, *dst_u8 = (u8 *) d;
  47	struct vlan_ethhdr veth;
 
 
 
 
 
 
  48
  49	vlanh = (u8 *) &veth;
  50	if (offset < VLAN_ETH_HLEN) {
  51		u8 ethlen = len;
  52
  53		if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
 
 
 
  54			return false;
  55
  56		if (offset + len > VLAN_ETH_HLEN)
  57			ethlen -= offset + len - VLAN_ETH_HLEN;
  58
  59		memcpy(dst_u8, vlanh + offset, ethlen);
  60
  61		len -= ethlen;
  62		if (len == 0)
  63			return true;
  64
  65		dst_u8 += ethlen;
  66		offset = ETH_HLEN;
  67	} else {
  68		offset -= VLAN_HLEN;
  69	}
  70
  71	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
  72}
  73
  74static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
  75{
  76	unsigned int thoff = nft_thoff(pkt);
  77
  78	if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
  79		return -1;
  80
  81	switch (pkt->tprot) {
  82	case IPPROTO_UDP:
  83		pkt->inneroff = thoff + sizeof(struct udphdr);
  84		break;
  85	case IPPROTO_TCP: {
  86		struct tcphdr *th, _tcph;
  87
  88		th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
  89		if (!th)
  90			return -1;
  91
  92		pkt->inneroff = thoff + __tcp_hdrlen(th);
  93		}
  94		break;
  95	case IPPROTO_GRE: {
  96		u32 offset = sizeof(struct gre_base_hdr);
  97		struct gre_base_hdr *gre, _gre;
  98		__be16 version;
  99
 100		gre = skb_header_pointer(pkt->skb, thoff, sizeof(_gre), &_gre);
 101		if (!gre)
 102			return -1;
 103
 104		version = gre->flags & GRE_VERSION;
 105		switch (version) {
 106		case GRE_VERSION_0:
 107			if (gre->flags & GRE_ROUTING)
 108				return -1;
 109
 110			if (gre->flags & GRE_CSUM) {
 111				offset += sizeof_field(struct gre_full_hdr, csum) +
 112					  sizeof_field(struct gre_full_hdr, reserved1);
 113			}
 114			if (gre->flags & GRE_KEY)
 115				offset += sizeof_field(struct gre_full_hdr, key);
 116
 117			if (gre->flags & GRE_SEQ)
 118				offset += sizeof_field(struct gre_full_hdr, seq);
 119			break;
 120		default:
 121			return -1;
 122		}
 123
 124		pkt->inneroff = thoff + offset;
 125		}
 126		break;
 127	case IPPROTO_IPIP:
 128		pkt->inneroff = thoff;
 129		break;
 130	default:
 131		return -1;
 132	}
 133
 134	pkt->flags |= NFT_PKTINFO_INNER;
 135
 136	return 0;
 137}
 138
 139int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
 140{
 141	if (!(pkt->flags & NFT_PKTINFO_INNER) &&
 142	    __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
 143		return -1;
 144
 145	return pkt->inneroff;
 146}
 147
 148static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
 149{
 150	unsigned int boundary = offset + len;
 151
 152	/* data past ether src/dst requested, copy needed */
 153	if (boundary > offsetof(struct ethhdr, h_proto))
 154		return true;
 155
 156	return false;
 157}
 158
 159void nft_payload_eval(const struct nft_expr *expr,
 160		      struct nft_regs *regs,
 161		      const struct nft_pktinfo *pkt)
 162{
 163	const struct nft_payload *priv = nft_expr_priv(expr);
 164	const struct sk_buff *skb = pkt->skb;
 165	u32 *dest = &regs->data[priv->dreg];
 166	int offset;
 167
 168	if (priv->len % NFT_REG32_SIZE)
 169		dest[priv->len / NFT_REG32_SIZE] = 0;
 170
 171	switch (priv->base) {
 172	case NFT_PAYLOAD_LL_HEADER:
 173		if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
 174			goto err;
 175
 176		if (skb_vlan_tag_present(skb) &&
 177		    nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
 178			if (!nft_payload_copy_vlan(dest, skb,
 179						   priv->offset, priv->len))
 180				goto err;
 181			return;
 182		}
 183		offset = skb_mac_header(skb) - skb->data;
 184		break;
 185	case NFT_PAYLOAD_NETWORK_HEADER:
 186		offset = skb_network_offset(skb);
 187		break;
 188	case NFT_PAYLOAD_TRANSPORT_HEADER:
 189		if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
 190			goto err;
 191		offset = nft_thoff(pkt);
 192		break;
 193	case NFT_PAYLOAD_INNER_HEADER:
 194		offset = nft_payload_inner_offset(pkt);
 195		if (offset < 0)
 196			goto err;
 197		break;
 198	default:
 199		WARN_ON_ONCE(1);
 200		goto err;
 201	}
 202	offset += priv->offset;
 203
 204	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
 205		goto err;
 206	return;
 207err:
 208	regs->verdict.code = NFT_BREAK;
 209}
 210
 211static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
 212	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
 213	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
 214	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
 215	[NFTA_PAYLOAD_OFFSET]		= NLA_POLICY_MAX(NLA_BE32, 255),
 216	[NFTA_PAYLOAD_LEN]		= NLA_POLICY_MAX(NLA_BE32, 255),
 217	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
 218	[NFTA_PAYLOAD_CSUM_OFFSET]	= NLA_POLICY_MAX(NLA_BE32, 255),
 219	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
 220};
 221
 222static int nft_payload_init(const struct nft_ctx *ctx,
 223			    const struct nft_expr *expr,
 224			    const struct nlattr * const tb[])
 225{
 226	struct nft_payload *priv = nft_expr_priv(expr);
 227
 228	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 229	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
 230	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 231
 232	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
 233					&priv->dreg, NULL, NFT_DATA_VALUE,
 234					priv->len);
 235}
 236
 237static int nft_payload_dump(struct sk_buff *skb,
 238			    const struct nft_expr *expr, bool reset)
 239{
 240	const struct nft_payload *priv = nft_expr_priv(expr);
 241
 242	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
 243	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
 244	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
 245	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
 246		goto nla_put_failure;
 247	return 0;
 248
 249nla_put_failure:
 250	return -1;
 251}
 252
 253static bool nft_payload_reduce(struct nft_regs_track *track,
 254			       const struct nft_expr *expr)
 255{
 256	const struct nft_payload *priv = nft_expr_priv(expr);
 257	const struct nft_payload *payload;
 258
 259	if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
 260		nft_reg_track_update(track, expr, priv->dreg, priv->len);
 261		return false;
 262	}
 263
 264	payload = nft_expr_priv(track->regs[priv->dreg].selector);
 265	if (priv->base != payload->base ||
 266	    priv->offset != payload->offset ||
 267	    priv->len != payload->len) {
 268		nft_reg_track_update(track, expr, priv->dreg, priv->len);
 269		return false;
 270	}
 271
 272	if (!track->regs[priv->dreg].bitwise)
 273		return true;
 274
 275	return nft_expr_reduce_bitwise(track, expr);
 276}
 277
 278static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
 279				     u32 priv_len, u32 field_len)
 280{
 281	unsigned int remainder, delta, k;
 282	struct nft_data mask = {};
 283	__be32 remainder_mask;
 284
 285	if (priv_len == field_len) {
 286		memset(&reg->mask, 0xff, priv_len);
 287		return true;
 288	} else if (priv_len > field_len) {
 289		return false;
 290	}
 291
 292	memset(&mask, 0xff, field_len);
 293	remainder = priv_len % sizeof(u32);
 294	if (remainder) {
 295		k = priv_len / sizeof(u32);
 296		delta = field_len - priv_len;
 297		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
 298		mask.data[k] = (__force u32)remainder_mask;
 299	}
 300
 301	memcpy(&reg->mask, &mask, field_len);
 302
 303	return true;
 304}
 305
 306static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
 307				  struct nft_flow_rule *flow,
 308				  const struct nft_payload *priv)
 309{
 310	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 311
 312	switch (priv->offset) {
 313	case offsetof(struct ethhdr, h_source):
 314		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
 315			return -EOPNOTSUPP;
 316
 317		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
 318				  src, ETH_ALEN, reg);
 319		break;
 320	case offsetof(struct ethhdr, h_dest):
 321		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
 322			return -EOPNOTSUPP;
 323
 324		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
 325				  dst, ETH_ALEN, reg);
 326		break;
 327	case offsetof(struct ethhdr, h_proto):
 328		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 329			return -EOPNOTSUPP;
 330
 331		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
 332				  n_proto, sizeof(__be16), reg);
 333		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
 334		break;
 335	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
 336		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 337			return -EOPNOTSUPP;
 338
 339		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
 340					vlan_tci, sizeof(__be16), reg,
 341					NFT_OFFLOAD_F_NETWORK2HOST);
 342		break;
 343	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
 344		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 345			return -EOPNOTSUPP;
 346
 347		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
 348				  vlan_tpid, sizeof(__be16), reg);
 349		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
 350		break;
 351	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
 352		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 353			return -EOPNOTSUPP;
 354
 355		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
 356					vlan_tci, sizeof(__be16), reg,
 357					NFT_OFFLOAD_F_NETWORK2HOST);
 358		break;
 359	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
 360							sizeof(struct vlan_hdr):
 361		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 362			return -EOPNOTSUPP;
 363
 364		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
 365				  vlan_tpid, sizeof(__be16), reg);
 366		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
 367		break;
 368	default:
 369		return -EOPNOTSUPP;
 370	}
 371
 372	return 0;
 373}
 374
 375static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
 376				  struct nft_flow_rule *flow,
 377				  const struct nft_payload *priv)
 378{
 379	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 380
 381	switch (priv->offset) {
 382	case offsetof(struct iphdr, saddr):
 383		if (!nft_payload_offload_mask(reg, priv->len,
 384					      sizeof(struct in_addr)))
 385			return -EOPNOTSUPP;
 386
 387		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
 388				  sizeof(struct in_addr), reg);
 389		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
 390		break;
 391	case offsetof(struct iphdr, daddr):
 392		if (!nft_payload_offload_mask(reg, priv->len,
 393					      sizeof(struct in_addr)))
 394			return -EOPNOTSUPP;
 395
 396		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
 397				  sizeof(struct in_addr), reg);
 398		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
 399		break;
 400	case offsetof(struct iphdr, protocol):
 401		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
 402			return -EOPNOTSUPP;
 403
 404		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
 405				  sizeof(__u8), reg);
 406		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
 407		break;
 408	default:
 409		return -EOPNOTSUPP;
 410	}
 411
 412	return 0;
 413}
 414
 415static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
 416				  struct nft_flow_rule *flow,
 417				  const struct nft_payload *priv)
 418{
 419	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 420
 421	switch (priv->offset) {
 422	case offsetof(struct ipv6hdr, saddr):
 423		if (!nft_payload_offload_mask(reg, priv->len,
 424					      sizeof(struct in6_addr)))
 425			return -EOPNOTSUPP;
 426
 427		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
 428				  sizeof(struct in6_addr), reg);
 429		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
 430		break;
 431	case offsetof(struct ipv6hdr, daddr):
 432		if (!nft_payload_offload_mask(reg, priv->len,
 433					      sizeof(struct in6_addr)))
 434			return -EOPNOTSUPP;
 435
 436		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
 437				  sizeof(struct in6_addr), reg);
 438		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
 439		break;
 440	case offsetof(struct ipv6hdr, nexthdr):
 441		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
 442			return -EOPNOTSUPP;
 443
 444		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
 445				  sizeof(__u8), reg);
 446		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
 447		break;
 448	default:
 449		return -EOPNOTSUPP;
 450	}
 451
 452	return 0;
 453}
 454
 455static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
 456				  struct nft_flow_rule *flow,
 457				  const struct nft_payload *priv)
 458{
 459	int err;
 460
 461	switch (ctx->dep.l3num) {
 462	case htons(ETH_P_IP):
 463		err = nft_payload_offload_ip(ctx, flow, priv);
 464		break;
 465	case htons(ETH_P_IPV6):
 466		err = nft_payload_offload_ip6(ctx, flow, priv);
 467		break;
 468	default:
 469		return -EOPNOTSUPP;
 470	}
 471
 472	return err;
 473}
 474
 475static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
 476				   struct nft_flow_rule *flow,
 477				   const struct nft_payload *priv)
 478{
 479	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 480
 481	switch (priv->offset) {
 482	case offsetof(struct tcphdr, source):
 483		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 484			return -EOPNOTSUPP;
 485
 486		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
 487				  sizeof(__be16), reg);
 488		break;
 489	case offsetof(struct tcphdr, dest):
 490		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 491			return -EOPNOTSUPP;
 492
 493		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
 494				  sizeof(__be16), reg);
 495		break;
 496	default:
 497		return -EOPNOTSUPP;
 498	}
 499
 500	return 0;
 501}
 502
 503static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
 504				   struct nft_flow_rule *flow,
 505				   const struct nft_payload *priv)
 506{
 507	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 508
 509	switch (priv->offset) {
 510	case offsetof(struct udphdr, source):
 511		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 512			return -EOPNOTSUPP;
 513
 514		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
 515				  sizeof(__be16), reg);
 516		break;
 517	case offsetof(struct udphdr, dest):
 518		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 519			return -EOPNOTSUPP;
 520
 521		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
 522				  sizeof(__be16), reg);
 523		break;
 524	default:
 525		return -EOPNOTSUPP;
 526	}
 527
 528	return 0;
 529}
 530
 531static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
 532				  struct nft_flow_rule *flow,
 533				  const struct nft_payload *priv)
 534{
 535	int err;
 536
 537	switch (ctx->dep.protonum) {
 538	case IPPROTO_TCP:
 539		err = nft_payload_offload_tcp(ctx, flow, priv);
 540		break;
 541	case IPPROTO_UDP:
 542		err = nft_payload_offload_udp(ctx, flow, priv);
 543		break;
 544	default:
 545		return -EOPNOTSUPP;
 546	}
 547
 548	return err;
 549}
 550
 551static int nft_payload_offload(struct nft_offload_ctx *ctx,
 552			       struct nft_flow_rule *flow,
 553			       const struct nft_expr *expr)
 554{
 555	const struct nft_payload *priv = nft_expr_priv(expr);
 556	int err;
 557
 558	switch (priv->base) {
 559	case NFT_PAYLOAD_LL_HEADER:
 560		err = nft_payload_offload_ll(ctx, flow, priv);
 561		break;
 562	case NFT_PAYLOAD_NETWORK_HEADER:
 563		err = nft_payload_offload_nh(ctx, flow, priv);
 564		break;
 565	case NFT_PAYLOAD_TRANSPORT_HEADER:
 566		err = nft_payload_offload_th(ctx, flow, priv);
 567		break;
 568	default:
 569		err = -EOPNOTSUPP;
 570		break;
 571	}
 572	return err;
 573}
 574
 575static const struct nft_expr_ops nft_payload_ops = {
 576	.type		= &nft_payload_type,
 577	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
 578	.eval		= nft_payload_eval,
 579	.init		= nft_payload_init,
 580	.dump		= nft_payload_dump,
 581	.reduce		= nft_payload_reduce,
 582	.offload	= nft_payload_offload,
 583};
 584
 585const struct nft_expr_ops nft_payload_fast_ops = {
 586	.type		= &nft_payload_type,
 587	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
 588	.eval		= nft_payload_eval,
 589	.init		= nft_payload_init,
 590	.dump		= nft_payload_dump,
 591	.reduce		= nft_payload_reduce,
 592	.offload	= nft_payload_offload,
 593};
 594
 595void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
 596			    const struct nft_pktinfo *pkt,
 597			    struct nft_inner_tun_ctx *tun_ctx)
 598{
 599	const struct nft_payload *priv = nft_expr_priv(expr);
 600	const struct sk_buff *skb = pkt->skb;
 601	u32 *dest = &regs->data[priv->dreg];
 602	int offset;
 603
 604	if (priv->len % NFT_REG32_SIZE)
 605		dest[priv->len / NFT_REG32_SIZE] = 0;
 606
 607	switch (priv->base) {
 608	case NFT_PAYLOAD_TUN_HEADER:
 609		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TUN))
 610			goto err;
 611
 612		offset = tun_ctx->inner_tunoff;
 613		break;
 614	case NFT_PAYLOAD_LL_HEADER:
 615		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_LL))
 616			goto err;
 617
 618		offset = tun_ctx->inner_lloff;
 619		break;
 620	case NFT_PAYLOAD_NETWORK_HEADER:
 621		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_NH))
 622			goto err;
 623
 624		offset = tun_ctx->inner_nhoff;
 625		break;
 626	case NFT_PAYLOAD_TRANSPORT_HEADER:
 627		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
 628			goto err;
 629
 630		offset = tun_ctx->inner_thoff;
 631		break;
 632	default:
 633		WARN_ON_ONCE(1);
 634		goto err;
 635	}
 636	offset += priv->offset;
 637
 638	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
 639		goto err;
 640
 641	return;
 642err:
 643	regs->verdict.code = NFT_BREAK;
 644}
 645
 646static int nft_payload_inner_init(const struct nft_ctx *ctx,
 647				  const struct nft_expr *expr,
 648				  const struct nlattr * const tb[])
 649{
 650	struct nft_payload *priv = nft_expr_priv(expr);
 651	u32 base;
 652
 653	base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 654	switch (base) {
 655	case NFT_PAYLOAD_TUN_HEADER:
 656	case NFT_PAYLOAD_LL_HEADER:
 657	case NFT_PAYLOAD_NETWORK_HEADER:
 658	case NFT_PAYLOAD_TRANSPORT_HEADER:
 659		break;
 660	default:
 661		return -EOPNOTSUPP;
 662	}
 663
 664	priv->base   = base;
 665	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
 666	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 667
 668	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
 669					&priv->dreg, NULL, NFT_DATA_VALUE,
 670					priv->len);
 671}
 672
 673static const struct nft_expr_ops nft_payload_inner_ops = {
 674	.type		= &nft_payload_type,
 675	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
 676	.init		= nft_payload_inner_init,
 677	.dump		= nft_payload_dump,
 678	/* direct call to nft_payload_inner_eval(). */
 679};
 680
 681static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
 682{
 683	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
 684	if (*sum == 0)
 685		*sum = CSUM_MANGLED_0;
 686}
 687
 688static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
 689{
 690	struct udphdr *uh, _uh;
 691
 692	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
 693	if (!uh)
 694		return false;
 695
 696	return (__force bool)uh->check;
 697}
 698
 699static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
 700				     struct sk_buff *skb,
 701				     unsigned int *l4csum_offset)
 702{
 703	if (pkt->fragoff)
 704		return -1;
 705
 706	switch (pkt->tprot) {
 707	case IPPROTO_TCP:
 708		*l4csum_offset = offsetof(struct tcphdr, check);
 709		break;
 710	case IPPROTO_UDP:
 711		if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
 712			return -1;
 713		fallthrough;
 714	case IPPROTO_UDPLITE:
 715		*l4csum_offset = offsetof(struct udphdr, check);
 716		break;
 717	case IPPROTO_ICMPV6:
 718		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
 719		break;
 720	default:
 721		return -1;
 722	}
 723
 724	*l4csum_offset += nft_thoff(pkt);
 725	return 0;
 726}
 727
 728static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
 729{
 730	struct sctphdr *sh;
 731
 732	if (skb_ensure_writable(skb, offset + sizeof(*sh)))
 733		return -1;
 734
 735	sh = (struct sctphdr *)(skb->data + offset);
 736	sh->checksum = sctp_compute_cksum(skb, offset);
 737	skb->ip_summed = CHECKSUM_UNNECESSARY;
 738	return 0;
 739}
 740
 741static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
 742				     struct sk_buff *skb,
 743				     __wsum fsum, __wsum tsum)
 744{
 745	int l4csum_offset;
 746	__sum16 sum;
 747
 748	/* If we cannot determine layer 4 checksum offset or this packet doesn't
 749	 * require layer 4 checksum recalculation, skip this packet.
 750	 */
 751	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
 752		return 0;
 753
 754	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
 755		return -1;
 756
 757	/* Checksum mangling for an arbitrary amount of bytes, based on
 758	 * inet_proto_csum_replace*() functions.
 759	 */
 760	if (skb->ip_summed != CHECKSUM_PARTIAL) {
 761		nft_csum_replace(&sum, fsum, tsum);
 762		if (skb->ip_summed == CHECKSUM_COMPLETE) {
 763			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
 764					      tsum);
 765		}
 766	} else {
 767		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
 768					  tsum));
 769	}
 770
 771	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
 772	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
 773		return -1;
 774
 775	return 0;
 776}
 777
 778static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
 779				 __wsum fsum, __wsum tsum, int csum_offset)
 780{
 781	__sum16 sum;
 782
 783	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
 784		return -1;
 785
 786	nft_csum_replace(&sum, fsum, tsum);
 787	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
 788	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
 789		return -1;
 790
 791	return 0;
 792}
 793
 794struct nft_payload_set {
 795	enum nft_payload_bases	base:8;
 796	u8			offset;
 797	u8			len;
 798	u8			sreg;
 799	u8			csum_type;
 800	u8			csum_offset;
 801	u8			csum_flags;
 802};
 803
 804/* This is not struct vlan_hdr. */
 805struct nft_payload_vlan_hdr {
 806	__be16			h_vlan_proto;
 807	__be16			h_vlan_TCI;
 808};
 809
 810static bool
 811nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
 812		     int *vlan_hlen)
 813{
 814	struct nft_payload_vlan_hdr *vlanh;
 815	__be16 vlan_proto;
 816	u16 vlan_tci;
 817
 818	if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
 819		*vlan_hlen = VLAN_HLEN;
 820		return true;
 821	}
 822
 823	switch (offset) {
 824	case offsetof(struct vlan_ethhdr, h_vlan_proto):
 825		if (len == 2) {
 826			vlan_proto = nft_reg_load_be16(src);
 827			skb->vlan_proto = vlan_proto;
 828		} else if (len == 4) {
 829			vlanh = (struct nft_payload_vlan_hdr *)src;
 830			__vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
 831					       ntohs(vlanh->h_vlan_TCI));
 832		} else {
 833			return false;
 834		}
 835		break;
 836	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
 837		if (len != 2)
 838			return false;
 839
 840		vlan_tci = ntohs(nft_reg_load_be16(src));
 841		skb->vlan_tci = vlan_tci;
 842		break;
 843	default:
 844		return false;
 845	}
 846
 847	return true;
 848}
 849
 850static void nft_payload_set_eval(const struct nft_expr *expr,
 851				 struct nft_regs *regs,
 852				 const struct nft_pktinfo *pkt)
 853{
 854	const struct nft_payload_set *priv = nft_expr_priv(expr);
 
 855	const u32 *src = &regs->data[priv->sreg];
 856	int offset, csum_offset, vlan_hlen = 0;
 857	struct sk_buff *skb = pkt->skb;
 858	__wsum fsum, tsum;
 859
 860	switch (priv->base) {
 861	case NFT_PAYLOAD_LL_HEADER:
 862		if (!skb_mac_header_was_set(skb))
 863			goto err;
 864
 865		if (skb_vlan_tag_present(skb) &&
 866		    nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
 867			if (!nft_payload_set_vlan(src, skb,
 868						  priv->offset, priv->len,
 869						  &vlan_hlen))
 870				goto err;
 871
 872			if (!vlan_hlen)
 873				return;
 874		}
 875
 876		offset = skb_mac_header(skb) - skb->data - vlan_hlen;
 877		break;
 878	case NFT_PAYLOAD_NETWORK_HEADER:
 879		offset = skb_network_offset(skb);
 880		break;
 881	case NFT_PAYLOAD_TRANSPORT_HEADER:
 882		if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
 883			goto err;
 884		offset = nft_thoff(pkt);
 885		break;
 886	case NFT_PAYLOAD_INNER_HEADER:
 887		offset = nft_payload_inner_offset(pkt);
 888		if (offset < 0)
 889			goto err;
 890		break;
 891	default:
 892		WARN_ON_ONCE(1);
 893		goto err;
 894	}
 895
 896	csum_offset = offset + priv->csum_offset;
 897	offset += priv->offset;
 898
 899	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
 900	    ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
 901	      priv->base != NFT_PAYLOAD_INNER_HEADER) ||
 902	     skb->ip_summed != CHECKSUM_PARTIAL)) {
 903		fsum = skb_checksum(skb, offset, priv->len, 0);
 904		tsum = csum_partial(src, priv->len, 0);
 905
 906		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
 907		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
 908			goto err;
 909
 910		if (priv->csum_flags &&
 911		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
 912			goto err;
 913	}
 914
 915	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
 916	    skb_store_bits(skb, offset, src, priv->len) < 0)
 917		goto err;
 918
 919	if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
 920	    pkt->tprot == IPPROTO_SCTP &&
 921	    skb->ip_summed != CHECKSUM_PARTIAL) {
 922		if (pkt->fragoff == 0 &&
 923		    nft_payload_csum_sctp(skb, nft_thoff(pkt)))
 924			goto err;
 925	}
 926
 927	return;
 928err:
 929	regs->verdict.code = NFT_BREAK;
 930}
 931
 932static int nft_payload_set_init(const struct nft_ctx *ctx,
 933				const struct nft_expr *expr,
 934				const struct nlattr * const tb[])
 935{
 936	struct nft_payload_set *priv = nft_expr_priv(expr);
 937	u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
 938	int err;
 939
 940	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 941	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
 942	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 943
 944	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
 945		csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
 946	if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
 947		err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
 948					  &csum_offset);
 949		if (err < 0)
 950			return err;
 951
 952		priv->csum_offset = csum_offset;
 953	}
 954	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
 955		u32 flags;
 956
 957		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
 958		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
 959			return -EINVAL;
 960
 961		priv->csum_flags = flags;
 962	}
 963
 964	switch (csum_type) {
 965	case NFT_PAYLOAD_CSUM_NONE:
 966	case NFT_PAYLOAD_CSUM_INET:
 967		break;
 968	case NFT_PAYLOAD_CSUM_SCTP:
 969		if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
 970			return -EINVAL;
 971
 972		if (priv->csum_offset != offsetof(struct sctphdr, checksum))
 973			return -EINVAL;
 974		break;
 975	default:
 976		return -EOPNOTSUPP;
 977	}
 978	priv->csum_type = csum_type;
 979
 980	return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
 981				       priv->len);
 982}
 983
 984static int nft_payload_set_dump(struct sk_buff *skb,
 985				const struct nft_expr *expr, bool reset)
 986{
 987	const struct nft_payload_set *priv = nft_expr_priv(expr);
 988
 989	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
 990	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
 991	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
 992	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
 993	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
 994	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
 995			 htonl(priv->csum_offset)) ||
 996	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
 997		goto nla_put_failure;
 998	return 0;
 999
1000nla_put_failure:
1001	return -1;
1002}
1003
1004static bool nft_payload_set_reduce(struct nft_regs_track *track,
1005				   const struct nft_expr *expr)
1006{
1007	int i;
1008
1009	for (i = 0; i < NFT_REG32_NUM; i++) {
1010		if (!track->regs[i].selector)
1011			continue;
1012
1013		if (track->regs[i].selector->ops != &nft_payload_ops &&
1014		    track->regs[i].selector->ops != &nft_payload_fast_ops)
1015			continue;
1016
1017		__nft_reg_track_cancel(track, i);
1018	}
1019
1020	return false;
1021}
1022
1023static const struct nft_expr_ops nft_payload_set_ops = {
1024	.type		= &nft_payload_type,
1025	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
1026	.eval		= nft_payload_set_eval,
1027	.init		= nft_payload_set_init,
1028	.dump		= nft_payload_set_dump,
1029	.reduce		= nft_payload_set_reduce,
1030};
1031
1032static const struct nft_expr_ops *
1033nft_payload_select_ops(const struct nft_ctx *ctx,
1034		       const struct nlattr * const tb[])
1035{
1036	enum nft_payload_bases base;
1037	unsigned int offset, len;
1038	int err;
1039
1040	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
1041	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
1042	    tb[NFTA_PAYLOAD_LEN] == NULL)
1043		return ERR_PTR(-EINVAL);
1044
1045	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
1046	switch (base) {
1047	case NFT_PAYLOAD_LL_HEADER:
1048	case NFT_PAYLOAD_NETWORK_HEADER:
1049	case NFT_PAYLOAD_TRANSPORT_HEADER:
1050	case NFT_PAYLOAD_INNER_HEADER:
1051		break;
1052	default:
1053		return ERR_PTR(-EOPNOTSUPP);
1054	}
1055
1056	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
1057		if (tb[NFTA_PAYLOAD_DREG] != NULL)
1058			return ERR_PTR(-EINVAL);
1059		return &nft_payload_set_ops;
1060	}
1061
1062	if (tb[NFTA_PAYLOAD_DREG] == NULL)
1063		return ERR_PTR(-EINVAL);
1064
1065	err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
1066	if (err < 0)
1067		return ERR_PTR(err);
1068
1069	err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
1070	if (err < 0)
1071		return ERR_PTR(err);
1072
1073	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
1074	    base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
1075		return &nft_payload_fast_ops;
1076	else
1077		return &nft_payload_ops;
1078}
1079
1080struct nft_expr_type nft_payload_type __read_mostly = {
1081	.name		= "payload",
1082	.select_ops	= nft_payload_select_ops,
1083	.inner_ops	= &nft_payload_inner_ops,
1084	.policy		= nft_payload_policy,
1085	.maxattr	= NFTA_PAYLOAD_MAX,
1086	.owner		= THIS_MODULE,
1087};