Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.10.11
 
  1/*
  2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  3 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 as
  7 * published by the Free Software Foundation.
  8 *
  9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/if_vlan.h>
 14#include <linux/init.h>
 15#include <linux/module.h>
 16#include <linux/netlink.h>
 17#include <linux/netfilter.h>
 18#include <linux/netfilter/nf_tables.h>
 19#include <net/netfilter/nf_tables_core.h>
 20#include <net/netfilter/nf_tables.h>
 
 21/* For layer 4 checksum field offset. */
 22#include <linux/tcp.h>
 23#include <linux/udp.h>
 
 24#include <linux/icmpv6.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26/* add vlan header into the user buffer for if tag was removed by offloads */
 27static bool
 28nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
 29{
 30	int mac_off = skb_mac_header(skb) - skb->data;
 31	u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
 32	struct vlan_ethhdr veth;
 
 
 
 
 
 
 33
 34	vlanh = (u8 *) &veth;
 35	if (offset < ETH_HLEN) {
 36		u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
 37
 38		if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
 
 
 
 39			return false;
 40
 41		veth.h_vlan_proto = skb->vlan_proto;
 
 42
 43		memcpy(dst_u8, vlanh + offset, ethlen);
 44
 45		len -= ethlen;
 46		if (len == 0)
 47			return true;
 48
 49		dst_u8 += ethlen;
 50		offset = ETH_HLEN;
 51	} else if (offset >= VLAN_ETH_HLEN) {
 52		offset -= VLAN_HLEN;
 53		goto skip;
 54	}
 55
 56	veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 57	veth.h_vlan_encapsulated_proto = skb->protocol;
 58
 59	vlanh += offset;
 
 
 60
 61	vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
 62	memcpy(dst_u8, vlanh, vlan_len);
 63
 64	len -= vlan_len;
 65	if (!len)
 66		return true;
 
 
 
 67
 68	dst_u8 += vlan_len;
 69 skip:
 70	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71}
 72
 73static void nft_payload_eval(const struct nft_expr *expr,
 74			     struct nft_regs *regs,
 75			     const struct nft_pktinfo *pkt)
 76{
 77	const struct nft_payload *priv = nft_expr_priv(expr);
 78	const struct sk_buff *skb = pkt->skb;
 79	u32 *dest = &regs->data[priv->dreg];
 80	int offset;
 81
 82	dest[priv->len / NFT_REG32_SIZE] = 0;
 
 
 83	switch (priv->base) {
 84	case NFT_PAYLOAD_LL_HEADER:
 85		if (!skb_mac_header_was_set(skb))
 86			goto err;
 87
 88		if (skb_vlan_tag_present(skb)) {
 89			if (!nft_payload_copy_vlan(dest, skb,
 90						   priv->offset, priv->len))
 91				goto err;
 92			return;
 93		}
 94		offset = skb_mac_header(skb) - skb->data;
 95		break;
 96	case NFT_PAYLOAD_NETWORK_HEADER:
 97		offset = skb_network_offset(skb);
 98		break;
 99	case NFT_PAYLOAD_TRANSPORT_HEADER:
100		if (!pkt->tprot_set)
 
 
 
 
 
 
101			goto err;
102		offset = pkt->xt.thoff;
103		break;
104	default:
105		BUG();
 
106	}
107	offset += priv->offset;
108
109	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
110		goto err;
111	return;
112err:
113	regs->verdict.code = NFT_BREAK;
114}
115
116static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
117	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
118	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
119	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
120	[NFTA_PAYLOAD_OFFSET]		= { .type = NLA_U32 },
121	[NFTA_PAYLOAD_LEN]		= { .type = NLA_U32 },
122	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
123	[NFTA_PAYLOAD_CSUM_OFFSET]	= { .type = NLA_U32 },
 
124};
125
126static int nft_payload_init(const struct nft_ctx *ctx,
127			    const struct nft_expr *expr,
128			    const struct nlattr * const tb[])
129{
130	struct nft_payload *priv = nft_expr_priv(expr);
131
132	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
133	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
134	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
135	priv->dreg   = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
136
137	return nft_validate_register_store(ctx, priv->dreg, NULL,
138					   NFT_DATA_VALUE, priv->len);
 
139}
140
141static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
 
142{
143	const struct nft_payload *priv = nft_expr_priv(expr);
144
145	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
146	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
147	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
148	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
149		goto nla_put_failure;
150	return 0;
151
152nla_put_failure:
153	return -1;
154}
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156static const struct nft_expr_ops nft_payload_ops = {
157	.type		= &nft_payload_type,
158	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
159	.eval		= nft_payload_eval,
160	.init		= nft_payload_init,
161	.dump		= nft_payload_dump,
 
 
162};
163
164const struct nft_expr_ops nft_payload_fast_ops = {
165	.type		= &nft_payload_type,
166	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
167	.eval		= nft_payload_eval,
168	.init		= nft_payload_init,
169	.dump		= nft_payload_dump,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170};
171
172static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
173{
174	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
175	if (*sum == 0)
176		*sum = CSUM_MANGLED_0;
177}
178
179static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
180{
181	struct udphdr *uh, _uh;
182
183	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
184	if (!uh)
185		return false;
186
187	return uh->check;
188}
189
190static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
191				     struct sk_buff *skb,
192				     unsigned int *l4csum_offset)
193{
 
 
 
194	switch (pkt->tprot) {
195	case IPPROTO_TCP:
196		*l4csum_offset = offsetof(struct tcphdr, check);
197		break;
198	case IPPROTO_UDP:
199		if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
200			return -1;
201		/* Fall through. */
202	case IPPROTO_UDPLITE:
203		*l4csum_offset = offsetof(struct udphdr, check);
204		break;
205	case IPPROTO_ICMPV6:
206		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
207		break;
208	default:
209		return -1;
210	}
211
212	*l4csum_offset += pkt->xt.thoff;
 
 
 
 
 
 
 
 
 
 
 
 
 
213	return 0;
214}
215
216static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
217				     struct sk_buff *skb,
218				     __wsum fsum, __wsum tsum)
219{
220	int l4csum_offset;
221	__sum16 sum;
222
223	/* If we cannot determine layer 4 checksum offset or this packet doesn't
224	 * require layer 4 checksum recalculation, skip this packet.
225	 */
226	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
227		return 0;
228
229	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
230		return -1;
231
232	/* Checksum mangling for an arbitrary amount of bytes, based on
233	 * inet_proto_csum_replace*() functions.
234	 */
235	if (skb->ip_summed != CHECKSUM_PARTIAL) {
236		nft_csum_replace(&sum, fsum, tsum);
237		if (skb->ip_summed == CHECKSUM_COMPLETE) {
238			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
239					      tsum);
240		}
241	} else {
242		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
243					  tsum));
244	}
245
246	if (!skb_make_writable(skb, l4csum_offset + sizeof(sum)) ||
247	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
248		return -1;
249
250	return 0;
251}
252
253static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
254				 __wsum fsum, __wsum tsum, int csum_offset)
255{
256	__sum16 sum;
257
258	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
259		return -1;
260
261	nft_csum_replace(&sum, fsum, tsum);
262	if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
263	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
264		return -1;
265
266	return 0;
267}
268
 
 
 
 
 
 
 
 
 
 
269static void nft_payload_set_eval(const struct nft_expr *expr,
270				 struct nft_regs *regs,
271				 const struct nft_pktinfo *pkt)
272{
273	const struct nft_payload_set *priv = nft_expr_priv(expr);
274	struct sk_buff *skb = pkt->skb;
275	const u32 *src = &regs->data[priv->sreg];
276	int offset, csum_offset;
277	__wsum fsum, tsum;
278
279	switch (priv->base) {
280	case NFT_PAYLOAD_LL_HEADER:
281		if (!skb_mac_header_was_set(skb))
282			goto err;
283		offset = skb_mac_header(skb) - skb->data;
284		break;
285	case NFT_PAYLOAD_NETWORK_HEADER:
286		offset = skb_network_offset(skb);
287		break;
288	case NFT_PAYLOAD_TRANSPORT_HEADER:
289		if (!pkt->tprot_set)
 
 
 
 
 
 
290			goto err;
291		offset = pkt->xt.thoff;
292		break;
293	default:
294		BUG();
 
295	}
296
297	csum_offset = offset + priv->csum_offset;
298	offset += priv->offset;
299
300	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
301	    (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
 
302	     skb->ip_summed != CHECKSUM_PARTIAL)) {
303		fsum = skb_checksum(skb, offset, priv->len, 0);
304		tsum = csum_partial(src, priv->len, 0);
305
306		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
307		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
308			goto err;
309
310		if (priv->csum_flags &&
311		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
312			goto err;
313	}
314
315	if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
316	    skb_store_bits(skb, offset, src, priv->len) < 0)
317		goto err;
318
 
 
 
 
 
 
 
 
319	return;
320err:
321	regs->verdict.code = NFT_BREAK;
322}
323
324static int nft_payload_set_init(const struct nft_ctx *ctx,
325				const struct nft_expr *expr,
326				const struct nlattr * const tb[])
327{
328	struct nft_payload_set *priv = nft_expr_priv(expr);
 
 
329
330	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
331	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
332	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
333	priv->sreg        = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
334
335	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
336		priv->csum_type =
337			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
338	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
339		priv->csum_offset =
340			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
 
 
 
 
341	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
342		u32 flags;
343
344		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
345		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
346			return -EINVAL;
347
348		priv->csum_flags = flags;
349	}
350
351	switch (priv->csum_type) {
352	case NFT_PAYLOAD_CSUM_NONE:
353	case NFT_PAYLOAD_CSUM_INET:
354		break;
 
 
 
 
 
 
 
355	default:
356		return -EOPNOTSUPP;
357	}
 
358
359	return nft_validate_register_load(priv->sreg, priv->len);
 
360}
361
362static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
 
363{
364	const struct nft_payload_set *priv = nft_expr_priv(expr);
365
366	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
367	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
368	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
369	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
370	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
371	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
372			 htonl(priv->csum_offset)) ||
373	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
374		goto nla_put_failure;
375	return 0;
376
377nla_put_failure:
378	return -1;
379}
380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381static const struct nft_expr_ops nft_payload_set_ops = {
382	.type		= &nft_payload_type,
383	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
384	.eval		= nft_payload_set_eval,
385	.init		= nft_payload_set_init,
386	.dump		= nft_payload_set_dump,
 
387};
388
389static const struct nft_expr_ops *
390nft_payload_select_ops(const struct nft_ctx *ctx,
391		       const struct nlattr * const tb[])
392{
393	enum nft_payload_bases base;
394	unsigned int offset, len;
 
395
396	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
397	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
398	    tb[NFTA_PAYLOAD_LEN] == NULL)
399		return ERR_PTR(-EINVAL);
400
401	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
402	switch (base) {
403	case NFT_PAYLOAD_LL_HEADER:
404	case NFT_PAYLOAD_NETWORK_HEADER:
405	case NFT_PAYLOAD_TRANSPORT_HEADER:
 
406		break;
407	default:
408		return ERR_PTR(-EOPNOTSUPP);
409	}
410
411	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
412		if (tb[NFTA_PAYLOAD_DREG] != NULL)
413			return ERR_PTR(-EINVAL);
414		return &nft_payload_set_ops;
415	}
416
417	if (tb[NFTA_PAYLOAD_DREG] == NULL)
418		return ERR_PTR(-EINVAL);
419
420	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
421	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
 
 
 
 
422
423	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
424	    base != NFT_PAYLOAD_LL_HEADER)
425		return &nft_payload_fast_ops;
426	else
427		return &nft_payload_ops;
428}
429
430struct nft_expr_type nft_payload_type __read_mostly = {
431	.name		= "payload",
432	.select_ops	= nft_payload_select_ops,
 
433	.policy		= nft_payload_policy,
434	.maxattr	= NFTA_PAYLOAD_MAX,
435	.owner		= THIS_MODULE,
436};
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
   4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
   5 *
 
 
 
 
   6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/if_vlan.h>
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/netlink.h>
  14#include <linux/netfilter.h>
  15#include <linux/netfilter/nf_tables.h>
  16#include <net/netfilter/nf_tables_core.h>
  17#include <net/netfilter/nf_tables.h>
  18#include <net/netfilter/nf_tables_offload.h>
  19/* For layer 4 checksum field offset. */
  20#include <linux/tcp.h>
  21#include <linux/udp.h>
  22#include <net/gre.h>
  23#include <linux/icmpv6.h>
  24#include <linux/ip.h>
  25#include <linux/ipv6.h>
  26#include <net/sctp/checksum.h>
  27
  28static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
  29					 struct vlan_ethhdr *veth)
  30{
  31	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
  32		return false;
  33
  34	veth->h_vlan_proto = skb->vlan_proto;
  35	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
  36	veth->h_vlan_encapsulated_proto = skb->protocol;
  37
  38	return true;
  39}
  40
  41/* add vlan header into the user buffer for if tag was removed by offloads */
  42static bool
  43nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
  44{
  45	int mac_off = skb_mac_header(skb) - skb->data;
  46	u8 *vlanh, *dst_u8 = (u8 *) d;
  47	struct vlan_ethhdr veth;
  48	u8 vlan_hlen = 0;
  49
  50	if ((skb->protocol == htons(ETH_P_8021AD) ||
  51	     skb->protocol == htons(ETH_P_8021Q)) &&
  52	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
  53		vlan_hlen += VLAN_HLEN;
  54
  55	vlanh = (u8 *) &veth;
  56	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
  57		u8 ethlen = len;
  58
  59		if (vlan_hlen &&
  60		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
  61			return false;
  62		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
  63			return false;
  64
  65		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
  66			ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
  67
  68		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
  69
  70		len -= ethlen;
  71		if (len == 0)
  72			return true;
  73
  74		dst_u8 += ethlen;
  75		offset = ETH_HLEN + vlan_hlen;
  76	} else {
  77		offset -= VLAN_HLEN + vlan_hlen;
 
  78	}
  79
  80	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
  81}
  82
  83static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
  84{
  85	unsigned int thoff = nft_thoff(pkt);
  86
  87	if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
  88		return -1;
  89
  90	switch (pkt->tprot) {
  91	case IPPROTO_UDP:
  92		pkt->inneroff = thoff + sizeof(struct udphdr);
  93		break;
  94	case IPPROTO_TCP: {
  95		struct tcphdr *th, _tcph;
  96
  97		th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
  98		if (!th)
  99			return -1;
 100
 101		pkt->inneroff = thoff + __tcp_hdrlen(th);
 102		}
 103		break;
 104	case IPPROTO_GRE: {
 105		u32 offset = sizeof(struct gre_base_hdr);
 106		struct gre_base_hdr *gre, _gre;
 107		__be16 version;
 108
 109		gre = skb_header_pointer(pkt->skb, thoff, sizeof(_gre), &_gre);
 110		if (!gre)
 111			return -1;
 112
 113		version = gre->flags & GRE_VERSION;
 114		switch (version) {
 115		case GRE_VERSION_0:
 116			if (gre->flags & GRE_ROUTING)
 117				return -1;
 118
 119			if (gre->flags & GRE_CSUM) {
 120				offset += sizeof_field(struct gre_full_hdr, csum) +
 121					  sizeof_field(struct gre_full_hdr, reserved1);
 122			}
 123			if (gre->flags & GRE_KEY)
 124				offset += sizeof_field(struct gre_full_hdr, key);
 125
 126			if (gre->flags & GRE_SEQ)
 127				offset += sizeof_field(struct gre_full_hdr, seq);
 128			break;
 129		default:
 130			return -1;
 131		}
 132
 133		pkt->inneroff = thoff + offset;
 134		}
 135		break;
 136	case IPPROTO_IPIP:
 137		pkt->inneroff = thoff;
 138		break;
 139	default:
 140		return -1;
 141	}
 142
 143	pkt->flags |= NFT_PKTINFO_INNER;
 144
 145	return 0;
 146}
 147
 148int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
 149{
 150	if (!(pkt->flags & NFT_PKTINFO_INNER) &&
 151	    __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
 152		return -1;
 153
 154	return pkt->inneroff;
 155}
 156
 157void nft_payload_eval(const struct nft_expr *expr,
 158		      struct nft_regs *regs,
 159		      const struct nft_pktinfo *pkt)
 160{
 161	const struct nft_payload *priv = nft_expr_priv(expr);
 162	const struct sk_buff *skb = pkt->skb;
 163	u32 *dest = &regs->data[priv->dreg];
 164	int offset;
 165
 166	if (priv->len % NFT_REG32_SIZE)
 167		dest[priv->len / NFT_REG32_SIZE] = 0;
 168
 169	switch (priv->base) {
 170	case NFT_PAYLOAD_LL_HEADER:
 171		if (!skb_mac_header_was_set(skb))
 172			goto err;
 173
 174		if (skb_vlan_tag_present(skb)) {
 175			if (!nft_payload_copy_vlan(dest, skb,
 176						   priv->offset, priv->len))
 177				goto err;
 178			return;
 179		}
 180		offset = skb_mac_header(skb) - skb->data;
 181		break;
 182	case NFT_PAYLOAD_NETWORK_HEADER:
 183		offset = skb_network_offset(skb);
 184		break;
 185	case NFT_PAYLOAD_TRANSPORT_HEADER:
 186		if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
 187			goto err;
 188		offset = nft_thoff(pkt);
 189		break;
 190	case NFT_PAYLOAD_INNER_HEADER:
 191		offset = nft_payload_inner_offset(pkt);
 192		if (offset < 0)
 193			goto err;
 
 194		break;
 195	default:
 196		WARN_ON_ONCE(1);
 197		goto err;
 198	}
 199	offset += priv->offset;
 200
 201	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
 202		goto err;
 203	return;
 204err:
 205	regs->verdict.code = NFT_BREAK;
 206}
 207
 208static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
 209	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
 210	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
 211	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
 212	[NFTA_PAYLOAD_OFFSET]		= NLA_POLICY_MAX(NLA_BE32, 255),
 213	[NFTA_PAYLOAD_LEN]		= NLA_POLICY_MAX(NLA_BE32, 255),
 214	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
 215	[NFTA_PAYLOAD_CSUM_OFFSET]	= NLA_POLICY_MAX(NLA_BE32, 255),
 216	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
 217};
 218
 219static int nft_payload_init(const struct nft_ctx *ctx,
 220			    const struct nft_expr *expr,
 221			    const struct nlattr * const tb[])
 222{
 223	struct nft_payload *priv = nft_expr_priv(expr);
 224
 225	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 226	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
 227	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
 228
 229	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
 230					&priv->dreg, NULL, NFT_DATA_VALUE,
 231					priv->len);
 232}
 233
 234static int nft_payload_dump(struct sk_buff *skb,
 235			    const struct nft_expr *expr, bool reset)
 236{
 237	const struct nft_payload *priv = nft_expr_priv(expr);
 238
 239	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
 240	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
 241	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
 242	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
 243		goto nla_put_failure;
 244	return 0;
 245
 246nla_put_failure:
 247	return -1;
 248}
 249
 250static bool nft_payload_reduce(struct nft_regs_track *track,
 251			       const struct nft_expr *expr)
 252{
 253	const struct nft_payload *priv = nft_expr_priv(expr);
 254	const struct nft_payload *payload;
 255
 256	if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
 257		nft_reg_track_update(track, expr, priv->dreg, priv->len);
 258		return false;
 259	}
 260
 261	payload = nft_expr_priv(track->regs[priv->dreg].selector);
 262	if (priv->base != payload->base ||
 263	    priv->offset != payload->offset ||
 264	    priv->len != payload->len) {
 265		nft_reg_track_update(track, expr, priv->dreg, priv->len);
 266		return false;
 267	}
 268
 269	if (!track->regs[priv->dreg].bitwise)
 270		return true;
 271
 272	return nft_expr_reduce_bitwise(track, expr);
 273}
 274
 275static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
 276				     u32 priv_len, u32 field_len)
 277{
 278	unsigned int remainder, delta, k;
 279	struct nft_data mask = {};
 280	__be32 remainder_mask;
 281
 282	if (priv_len == field_len) {
 283		memset(&reg->mask, 0xff, priv_len);
 284		return true;
 285	} else if (priv_len > field_len) {
 286		return false;
 287	}
 288
 289	memset(&mask, 0xff, field_len);
 290	remainder = priv_len % sizeof(u32);
 291	if (remainder) {
 292		k = priv_len / sizeof(u32);
 293		delta = field_len - priv_len;
 294		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
 295		mask.data[k] = (__force u32)remainder_mask;
 296	}
 297
 298	memcpy(&reg->mask, &mask, field_len);
 299
 300	return true;
 301}
 302
 303static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
 304				  struct nft_flow_rule *flow,
 305				  const struct nft_payload *priv)
 306{
 307	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 308
 309	switch (priv->offset) {
 310	case offsetof(struct ethhdr, h_source):
 311		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
 312			return -EOPNOTSUPP;
 313
 314		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
 315				  src, ETH_ALEN, reg);
 316		break;
 317	case offsetof(struct ethhdr, h_dest):
 318		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
 319			return -EOPNOTSUPP;
 320
 321		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
 322				  dst, ETH_ALEN, reg);
 323		break;
 324	case offsetof(struct ethhdr, h_proto):
 325		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 326			return -EOPNOTSUPP;
 327
 328		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
 329				  n_proto, sizeof(__be16), reg);
 330		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
 331		break;
 332	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
 333		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 334			return -EOPNOTSUPP;
 335
 336		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
 337					vlan_tci, sizeof(__be16), reg,
 338					NFT_OFFLOAD_F_NETWORK2HOST);
 339		break;
 340	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
 341		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 342			return -EOPNOTSUPP;
 343
 344		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
 345				  vlan_tpid, sizeof(__be16), reg);
 346		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
 347		break;
 348	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
 349		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 350			return -EOPNOTSUPP;
 351
 352		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
 353					vlan_tci, sizeof(__be16), reg,
 354					NFT_OFFLOAD_F_NETWORK2HOST);
 355		break;
 356	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
 357							sizeof(struct vlan_hdr):
 358		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 359			return -EOPNOTSUPP;
 360
 361		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
 362				  vlan_tpid, sizeof(__be16), reg);
 363		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
 364		break;
 365	default:
 366		return -EOPNOTSUPP;
 367	}
 368
 369	return 0;
 370}
 371
 372static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
 373				  struct nft_flow_rule *flow,
 374				  const struct nft_payload *priv)
 375{
 376	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 377
 378	switch (priv->offset) {
 379	case offsetof(struct iphdr, saddr):
 380		if (!nft_payload_offload_mask(reg, priv->len,
 381					      sizeof(struct in_addr)))
 382			return -EOPNOTSUPP;
 383
 384		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
 385				  sizeof(struct in_addr), reg);
 386		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
 387		break;
 388	case offsetof(struct iphdr, daddr):
 389		if (!nft_payload_offload_mask(reg, priv->len,
 390					      sizeof(struct in_addr)))
 391			return -EOPNOTSUPP;
 392
 393		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
 394				  sizeof(struct in_addr), reg);
 395		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
 396		break;
 397	case offsetof(struct iphdr, protocol):
 398		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
 399			return -EOPNOTSUPP;
 400
 401		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
 402				  sizeof(__u8), reg);
 403		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
 404		break;
 405	default:
 406		return -EOPNOTSUPP;
 407	}
 408
 409	return 0;
 410}
 411
 412static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
 413				  struct nft_flow_rule *flow,
 414				  const struct nft_payload *priv)
 415{
 416	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 417
 418	switch (priv->offset) {
 419	case offsetof(struct ipv6hdr, saddr):
 420		if (!nft_payload_offload_mask(reg, priv->len,
 421					      sizeof(struct in6_addr)))
 422			return -EOPNOTSUPP;
 423
 424		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
 425				  sizeof(struct in6_addr), reg);
 426		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
 427		break;
 428	case offsetof(struct ipv6hdr, daddr):
 429		if (!nft_payload_offload_mask(reg, priv->len,
 430					      sizeof(struct in6_addr)))
 431			return -EOPNOTSUPP;
 432
 433		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
 434				  sizeof(struct in6_addr), reg);
 435		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
 436		break;
 437	case offsetof(struct ipv6hdr, nexthdr):
 438		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
 439			return -EOPNOTSUPP;
 440
 441		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
 442				  sizeof(__u8), reg);
 443		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
 444		break;
 445	default:
 446		return -EOPNOTSUPP;
 447	}
 448
 449	return 0;
 450}
 451
 452static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
 453				  struct nft_flow_rule *flow,
 454				  const struct nft_payload *priv)
 455{
 456	int err;
 457
 458	switch (ctx->dep.l3num) {
 459	case htons(ETH_P_IP):
 460		err = nft_payload_offload_ip(ctx, flow, priv);
 461		break;
 462	case htons(ETH_P_IPV6):
 463		err = nft_payload_offload_ip6(ctx, flow, priv);
 464		break;
 465	default:
 466		return -EOPNOTSUPP;
 467	}
 468
 469	return err;
 470}
 471
 472static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
 473				   struct nft_flow_rule *flow,
 474				   const struct nft_payload *priv)
 475{
 476	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 477
 478	switch (priv->offset) {
 479	case offsetof(struct tcphdr, source):
 480		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 481			return -EOPNOTSUPP;
 482
 483		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
 484				  sizeof(__be16), reg);
 485		break;
 486	case offsetof(struct tcphdr, dest):
 487		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 488			return -EOPNOTSUPP;
 489
 490		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
 491				  sizeof(__be16), reg);
 492		break;
 493	default:
 494		return -EOPNOTSUPP;
 495	}
 496
 497	return 0;
 498}
 499
 500static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
 501				   struct nft_flow_rule *flow,
 502				   const struct nft_payload *priv)
 503{
 504	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 505
 506	switch (priv->offset) {
 507	case offsetof(struct udphdr, source):
 508		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 509			return -EOPNOTSUPP;
 510
 511		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
 512				  sizeof(__be16), reg);
 513		break;
 514	case offsetof(struct udphdr, dest):
 515		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
 516			return -EOPNOTSUPP;
 517
 518		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
 519				  sizeof(__be16), reg);
 520		break;
 521	default:
 522		return -EOPNOTSUPP;
 523	}
 524
 525	return 0;
 526}
 527
 528static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
 529				  struct nft_flow_rule *flow,
 530				  const struct nft_payload *priv)
 531{
 532	int err;
 533
 534	switch (ctx->dep.protonum) {
 535	case IPPROTO_TCP:
 536		err = nft_payload_offload_tcp(ctx, flow, priv);
 537		break;
 538	case IPPROTO_UDP:
 539		err = nft_payload_offload_udp(ctx, flow, priv);
 540		break;
 541	default:
 542		return -EOPNOTSUPP;
 543	}
 544
 545	return err;
 546}
 547
 548static int nft_payload_offload(struct nft_offload_ctx *ctx,
 549			       struct nft_flow_rule *flow,
 550			       const struct nft_expr *expr)
 551{
 552	const struct nft_payload *priv = nft_expr_priv(expr);
 553	int err;
 554
 555	switch (priv->base) {
 556	case NFT_PAYLOAD_LL_HEADER:
 557		err = nft_payload_offload_ll(ctx, flow, priv);
 558		break;
 559	case NFT_PAYLOAD_NETWORK_HEADER:
 560		err = nft_payload_offload_nh(ctx, flow, priv);
 561		break;
 562	case NFT_PAYLOAD_TRANSPORT_HEADER:
 563		err = nft_payload_offload_th(ctx, flow, priv);
 564		break;
 565	default:
 566		err = -EOPNOTSUPP;
 567		break;
 568	}
 569	return err;
 570}
 571
 572static const struct nft_expr_ops nft_payload_ops = {
 573	.type		= &nft_payload_type,
 574	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
 575	.eval		= nft_payload_eval,
 576	.init		= nft_payload_init,
 577	.dump		= nft_payload_dump,
 578	.reduce		= nft_payload_reduce,
 579	.offload	= nft_payload_offload,
 580};
 581
 582const struct nft_expr_ops nft_payload_fast_ops = {
 583	.type		= &nft_payload_type,
 584	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
 585	.eval		= nft_payload_eval,
 586	.init		= nft_payload_init,
 587	.dump		= nft_payload_dump,
 588	.reduce		= nft_payload_reduce,
 589	.offload	= nft_payload_offload,
 590};
 591
 592void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
 593			    const struct nft_pktinfo *pkt,
 594			    struct nft_inner_tun_ctx *tun_ctx)
 595{
 596	const struct nft_payload *priv = nft_expr_priv(expr);
 597	const struct sk_buff *skb = pkt->skb;
 598	u32 *dest = &regs->data[priv->dreg];
 599	int offset;
 600
 601	if (priv->len % NFT_REG32_SIZE)
 602		dest[priv->len / NFT_REG32_SIZE] = 0;
 603
 604	switch (priv->base) {
 605	case NFT_PAYLOAD_TUN_HEADER:
 606		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TUN))
 607			goto err;
 608
 609		offset = tun_ctx->inner_tunoff;
 610		break;
 611	case NFT_PAYLOAD_LL_HEADER:
 612		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_LL))
 613			goto err;
 614
 615		offset = tun_ctx->inner_lloff;
 616		break;
 617	case NFT_PAYLOAD_NETWORK_HEADER:
 618		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_NH))
 619			goto err;
 620
 621		offset = tun_ctx->inner_nhoff;
 622		break;
 623	case NFT_PAYLOAD_TRANSPORT_HEADER:
 624		if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
 625			goto err;
 626
 627		offset = tun_ctx->inner_thoff;
 628		break;
 629	default:
 630		WARN_ON_ONCE(1);
 631		goto err;
 632	}
 633	offset += priv->offset;
 634
 635	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
 636		goto err;
 637
 638	return;
 639err:
 640	regs->verdict.code = NFT_BREAK;
 641}
 642
 643static int nft_payload_inner_init(const struct nft_ctx *ctx,
 644				  const struct nft_expr *expr,
 645				  const struct nlattr * const tb[])
 646{
 647	struct nft_payload *priv = nft_expr_priv(expr);
 648	u32 base;
 649
 650	base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 651	switch (base) {
 652	case NFT_PAYLOAD_TUN_HEADER:
 653	case NFT_PAYLOAD_LL_HEADER:
 654	case NFT_PAYLOAD_NETWORK_HEADER:
 655	case NFT_PAYLOAD_TRANSPORT_HEADER:
 656		break;
 657	default:
 658		return -EOPNOTSUPP;
 659	}
 660
 661	priv->base   = base;
 662	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
 663	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 664
 665	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
 666					&priv->dreg, NULL, NFT_DATA_VALUE,
 667					priv->len);
 668}
 669
 670static const struct nft_expr_ops nft_payload_inner_ops = {
 671	.type		= &nft_payload_type,
 672	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
 673	.init		= nft_payload_inner_init,
 674	.dump		= nft_payload_dump,
 675	/* direct call to nft_payload_inner_eval(). */
 676};
 677
 678static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
 679{
 680	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
 681	if (*sum == 0)
 682		*sum = CSUM_MANGLED_0;
 683}
 684
 685static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
 686{
 687	struct udphdr *uh, _uh;
 688
 689	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
 690	if (!uh)
 691		return false;
 692
 693	return (__force bool)uh->check;
 694}
 695
 696static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
 697				     struct sk_buff *skb,
 698				     unsigned int *l4csum_offset)
 699{
 700	if (pkt->fragoff)
 701		return -1;
 702
 703	switch (pkt->tprot) {
 704	case IPPROTO_TCP:
 705		*l4csum_offset = offsetof(struct tcphdr, check);
 706		break;
 707	case IPPROTO_UDP:
 708		if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
 709			return -1;
 710		fallthrough;
 711	case IPPROTO_UDPLITE:
 712		*l4csum_offset = offsetof(struct udphdr, check);
 713		break;
 714	case IPPROTO_ICMPV6:
 715		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
 716		break;
 717	default:
 718		return -1;
 719	}
 720
 721	*l4csum_offset += nft_thoff(pkt);
 722	return 0;
 723}
 724
 725static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
 726{
 727	struct sctphdr *sh;
 728
 729	if (skb_ensure_writable(skb, offset + sizeof(*sh)))
 730		return -1;
 731
 732	sh = (struct sctphdr *)(skb->data + offset);
 733	sh->checksum = sctp_compute_cksum(skb, offset);
 734	skb->ip_summed = CHECKSUM_UNNECESSARY;
 735	return 0;
 736}
 737
 738static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
 739				     struct sk_buff *skb,
 740				     __wsum fsum, __wsum tsum)
 741{
 742	int l4csum_offset;
 743	__sum16 sum;
 744
 745	/* If we cannot determine layer 4 checksum offset or this packet doesn't
 746	 * require layer 4 checksum recalculation, skip this packet.
 747	 */
 748	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
 749		return 0;
 750
 751	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
 752		return -1;
 753
 754	/* Checksum mangling for an arbitrary amount of bytes, based on
 755	 * inet_proto_csum_replace*() functions.
 756	 */
 757	if (skb->ip_summed != CHECKSUM_PARTIAL) {
 758		nft_csum_replace(&sum, fsum, tsum);
 759		if (skb->ip_summed == CHECKSUM_COMPLETE) {
 760			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
 761					      tsum);
 762		}
 763	} else {
 764		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
 765					  tsum));
 766	}
 767
 768	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
 769	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
 770		return -1;
 771
 772	return 0;
 773}
 774
 775static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
 776				 __wsum fsum, __wsum tsum, int csum_offset)
 777{
 778	__sum16 sum;
 779
 780	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
 781		return -1;
 782
 783	nft_csum_replace(&sum, fsum, tsum);
 784	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
 785	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
 786		return -1;
 787
 788	return 0;
 789}
 790
 791struct nft_payload_set {
 792	enum nft_payload_bases	base:8;
 793	u8			offset;
 794	u8			len;
 795	u8			sreg;
 796	u8			csum_type;
 797	u8			csum_offset;
 798	u8			csum_flags;
 799};
 800
 801static void nft_payload_set_eval(const struct nft_expr *expr,
 802				 struct nft_regs *regs,
 803				 const struct nft_pktinfo *pkt)
 804{
 805	const struct nft_payload_set *priv = nft_expr_priv(expr);
 806	struct sk_buff *skb = pkt->skb;
 807	const u32 *src = &regs->data[priv->sreg];
 808	int offset, csum_offset;
 809	__wsum fsum, tsum;
 810
 811	switch (priv->base) {
 812	case NFT_PAYLOAD_LL_HEADER:
 813		if (!skb_mac_header_was_set(skb))
 814			goto err;
 815		offset = skb_mac_header(skb) - skb->data;
 816		break;
 817	case NFT_PAYLOAD_NETWORK_HEADER:
 818		offset = skb_network_offset(skb);
 819		break;
 820	case NFT_PAYLOAD_TRANSPORT_HEADER:
 821		if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
 822			goto err;
 823		offset = nft_thoff(pkt);
 824		break;
 825	case NFT_PAYLOAD_INNER_HEADER:
 826		offset = nft_payload_inner_offset(pkt);
 827		if (offset < 0)
 828			goto err;
 
 829		break;
 830	default:
 831		WARN_ON_ONCE(1);
 832		goto err;
 833	}
 834
 835	csum_offset = offset + priv->csum_offset;
 836	offset += priv->offset;
 837
 838	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
 839	    ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
 840	      priv->base != NFT_PAYLOAD_INNER_HEADER) ||
 841	     skb->ip_summed != CHECKSUM_PARTIAL)) {
 842		fsum = skb_checksum(skb, offset, priv->len, 0);
 843		tsum = csum_partial(src, priv->len, 0);
 844
 845		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
 846		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
 847			goto err;
 848
 849		if (priv->csum_flags &&
 850		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
 851			goto err;
 852	}
 853
 854	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
 855	    skb_store_bits(skb, offset, src, priv->len) < 0)
 856		goto err;
 857
 858	if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
 859	    pkt->tprot == IPPROTO_SCTP &&
 860	    skb->ip_summed != CHECKSUM_PARTIAL) {
 861		if (pkt->fragoff == 0 &&
 862		    nft_payload_csum_sctp(skb, nft_thoff(pkt)))
 863			goto err;
 864	}
 865
 866	return;
 867err:
 868	regs->verdict.code = NFT_BREAK;
 869}
 870
 871static int nft_payload_set_init(const struct nft_ctx *ctx,
 872				const struct nft_expr *expr,
 873				const struct nlattr * const tb[])
 874{
 875	struct nft_payload_set *priv = nft_expr_priv(expr);
 876	u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
 877	int err;
 878
 879	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 880	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
 881	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
 882
 883	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
 884		csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
 885	if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
 886		err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
 887					  &csum_offset);
 888		if (err < 0)
 889			return err;
 890
 891		priv->csum_offset = csum_offset;
 892	}
 893	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
 894		u32 flags;
 895
 896		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
 897		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
 898			return -EINVAL;
 899
 900		priv->csum_flags = flags;
 901	}
 902
 903	switch (csum_type) {
 904	case NFT_PAYLOAD_CSUM_NONE:
 905	case NFT_PAYLOAD_CSUM_INET:
 906		break;
 907	case NFT_PAYLOAD_CSUM_SCTP:
 908		if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
 909			return -EINVAL;
 910
 911		if (priv->csum_offset != offsetof(struct sctphdr, checksum))
 912			return -EINVAL;
 913		break;
 914	default:
 915		return -EOPNOTSUPP;
 916	}
 917	priv->csum_type = csum_type;
 918
 919	return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
 920				       priv->len);
 921}
 922
 923static int nft_payload_set_dump(struct sk_buff *skb,
 924				const struct nft_expr *expr, bool reset)
 925{
 926	const struct nft_payload_set *priv = nft_expr_priv(expr);
 927
 928	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
 929	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
 930	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
 931	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
 932	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
 933	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
 934			 htonl(priv->csum_offset)) ||
 935	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
 936		goto nla_put_failure;
 937	return 0;
 938
 939nla_put_failure:
 940	return -1;
 941}
 942
 943static bool nft_payload_set_reduce(struct nft_regs_track *track,
 944				   const struct nft_expr *expr)
 945{
 946	int i;
 947
 948	for (i = 0; i < NFT_REG32_NUM; i++) {
 949		if (!track->regs[i].selector)
 950			continue;
 951
 952		if (track->regs[i].selector->ops != &nft_payload_ops &&
 953		    track->regs[i].selector->ops != &nft_payload_fast_ops)
 954			continue;
 955
 956		__nft_reg_track_cancel(track, i);
 957	}
 958
 959	return false;
 960}
 961
 962static const struct nft_expr_ops nft_payload_set_ops = {
 963	.type		= &nft_payload_type,
 964	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
 965	.eval		= nft_payload_set_eval,
 966	.init		= nft_payload_set_init,
 967	.dump		= nft_payload_set_dump,
 968	.reduce		= nft_payload_set_reduce,
 969};
 970
 971static const struct nft_expr_ops *
 972nft_payload_select_ops(const struct nft_ctx *ctx,
 973		       const struct nlattr * const tb[])
 974{
 975	enum nft_payload_bases base;
 976	unsigned int offset, len;
 977	int err;
 978
 979	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
 980	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
 981	    tb[NFTA_PAYLOAD_LEN] == NULL)
 982		return ERR_PTR(-EINVAL);
 983
 984	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
 985	switch (base) {
 986	case NFT_PAYLOAD_LL_HEADER:
 987	case NFT_PAYLOAD_NETWORK_HEADER:
 988	case NFT_PAYLOAD_TRANSPORT_HEADER:
 989	case NFT_PAYLOAD_INNER_HEADER:
 990		break;
 991	default:
 992		return ERR_PTR(-EOPNOTSUPP);
 993	}
 994
 995	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
 996		if (tb[NFTA_PAYLOAD_DREG] != NULL)
 997			return ERR_PTR(-EINVAL);
 998		return &nft_payload_set_ops;
 999	}
1000
1001	if (tb[NFTA_PAYLOAD_DREG] == NULL)
1002		return ERR_PTR(-EINVAL);
1003
1004	err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
1005	if (err < 0)
1006		return ERR_PTR(err);
1007
1008	err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
1009	if (err < 0)
1010		return ERR_PTR(err);
1011
1012	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
1013	    base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
1014		return &nft_payload_fast_ops;
1015	else
1016		return &nft_payload_ops;
1017}
1018
1019struct nft_expr_type nft_payload_type __read_mostly = {
1020	.name		= "payload",
1021	.select_ops	= nft_payload_select_ops,
1022	.inner_ops	= &nft_payload_inner_ops,
1023	.policy		= nft_payload_policy,
1024	.maxattr	= NFTA_PAYLOAD_MAX,
1025	.owner		= THIS_MODULE,
1026};