Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Checksumming functions for IP, TCP, UDP and so on
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Borrows very liberally from tcp.c and ip.c, see those
12 * files for more names.
13 */
14
15#ifndef _CHECKSUM_H
16#define _CHECKSUM_H
17
18#include <linux/errno.h>
19#include <asm/types.h>
20#include <asm/byteorder.h>
21#include <asm/checksum.h>
22#if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
23#include <linux/uaccess.h>
24#endif
25
26#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
27static __always_inline
28__wsum csum_and_copy_from_user (const void __user *src, void *dst,
29 int len)
30{
31 if (copy_from_user(dst, src, len))
32 return 0;
33 return csum_partial(dst, len, ~0U);
34}
35#endif
36
37#ifndef HAVE_CSUM_COPY_USER
38static __always_inline __wsum csum_and_copy_to_user
39(const void *src, void __user *dst, int len)
40{
41 __wsum sum = csum_partial(src, len, ~0U);
42
43 if (copy_to_user(dst, src, len) == 0)
44 return sum;
45 return 0;
46}
47#endif
48
49#ifndef _HAVE_ARCH_CSUM_AND_COPY
50static __always_inline __wsum
51csum_partial_copy_nocheck(const void *src, void *dst, int len)
52{
53 memcpy(dst, src, len);
54 return csum_partial(dst, len, 0);
55}
56#endif
57
58#ifndef HAVE_ARCH_CSUM_ADD
59static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
60{
61 u32 res = (__force u32)csum;
62 res += (__force u32)addend;
63 return (__force __wsum)(res + (res < (__force u32)addend));
64}
65#endif
66
67static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
68{
69 return csum_add(csum, ~addend);
70}
71
72static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
73{
74 u16 res = (__force u16)csum;
75
76 res += (__force u16)addend;
77 return (__force __sum16)(res + (res < (__force u16)addend));
78}
79
80static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
81{
82 return csum16_add(csum, ~addend);
83}
84
85#ifndef HAVE_ARCH_CSUM_SHIFT
86static __always_inline __wsum csum_shift(__wsum sum, int offset)
87{
88 /* rotate sum to align it with a 16b boundary */
89 if (offset & 1)
90 return (__force __wsum)ror32((__force u32)sum, 8);
91 return sum;
92}
93#endif
94
95static __always_inline __wsum
96csum_block_add(__wsum csum, __wsum csum2, int offset)
97{
98 return csum_add(csum, csum_shift(csum2, offset));
99}
100
101static __always_inline __wsum
102csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
103{
104 return csum_block_add(csum, csum2, offset);
105}
106
107static __always_inline __wsum
108csum_block_sub(__wsum csum, __wsum csum2, int offset)
109{
110 return csum_block_add(csum, ~csum2, offset);
111}
112
113static __always_inline __wsum csum_unfold(__sum16 n)
114{
115 return (__force __wsum)n;
116}
117
118static __always_inline
119__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
120{
121 return csum_partial(buff, len, sum);
122}
123
124#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
125
126static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
127{
128 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
129}
130
131static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
132{
133 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
134
135 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
136}
137
138/* Implements RFC 1624 (Incremental Internet Checksum)
139 * 3. Discussion states :
140 * HC' = ~(~HC + ~m + m')
141 * m : old value of a 16bit field
142 * m' : new value of a 16bit field
143 */
144static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
145{
146 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
147}
148
149static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
150{
151 *csum = csum_add(csum_sub(*csum, old), new);
152}
153
154static inline unsigned short csum_from32to16(unsigned int sum)
155{
156 sum += (sum >> 16) | (sum << 16);
157 return (unsigned short)(sum >> 16);
158}
159
160struct sk_buff;
161void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
162 __be32 from, __be32 to, bool pseudohdr);
163void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
164 const __be32 *from, const __be32 *to,
165 bool pseudohdr);
166void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
167 __wsum diff, bool pseudohdr);
168
169static __always_inline
170void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
171 __be16 from, __be16 to, bool pseudohdr)
172{
173 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
174 (__force __be32)to, pseudohdr);
175}
176
177static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
178 int start, int offset)
179{
180 __sum16 *psum = (__sum16 *)(ptr + offset);
181 __wsum delta;
182
183 /* Subtract out checksum up to start */
184 csum = csum_sub(csum, csum_partial(ptr, start, 0));
185
186 /* Set derived checksum in packet */
187 delta = csum_sub((__force __wsum)csum_fold(csum),
188 (__force __wsum)*psum);
189 *psum = csum_fold(csum);
190
191 return delta;
192}
193
194static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
195{
196 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
197}
198
199static __always_inline __wsum wsum_negate(__wsum val)
200{
201 return (__force __wsum)-((__force u32)val);
202}
203#endif
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Checksumming functions for IP, TCP, UDP and so on
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Borrows very liberally from tcp.c and ip.c, see those
12 * files for more names.
13 */
14
15#ifndef _CHECKSUM_H
16#define _CHECKSUM_H
17
18#include <linux/errno.h>
19#include <asm/types.h>
20#include <asm/byteorder.h>
21#include <linux/uaccess.h>
22#include <asm/checksum.h>
23
24#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
25static inline
26__wsum csum_and_copy_from_user (const void __user *src, void *dst,
27 int len, __wsum sum, int *err_ptr)
28{
29 if (access_ok(src, len))
30 return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
31
32 if (len)
33 *err_ptr = -EFAULT;
34
35 return sum;
36}
37#endif
38
39#ifndef HAVE_CSUM_COPY_USER
40static __inline__ __wsum csum_and_copy_to_user
41(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
42{
43 sum = csum_partial(src, len, sum);
44
45 if (access_ok(dst, len)) {
46 if (copy_to_user(dst, src, len) == 0)
47 return sum;
48 }
49 if (len)
50 *err_ptr = -EFAULT;
51
52 return (__force __wsum)-1; /* invalid checksum */
53}
54#endif
55
56#ifndef HAVE_ARCH_CSUM_ADD
57static inline __wsum csum_add(__wsum csum, __wsum addend)
58{
59 u32 res = (__force u32)csum;
60 res += (__force u32)addend;
61 return (__force __wsum)(res + (res < (__force u32)addend));
62}
63#endif
64
65static inline __wsum csum_sub(__wsum csum, __wsum addend)
66{
67 return csum_add(csum, ~addend);
68}
69
70static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
71{
72 u16 res = (__force u16)csum;
73
74 res += (__force u16)addend;
75 return (__force __sum16)(res + (res < (__force u16)addend));
76}
77
78static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
79{
80 return csum16_add(csum, ~addend);
81}
82
83static inline __wsum
84csum_block_add(__wsum csum, __wsum csum2, int offset)
85{
86 u32 sum = (__force u32)csum2;
87
88 /* rotate sum to align it with a 16b boundary */
89 if (offset & 1)
90 sum = ror32(sum, 8);
91
92 return csum_add(csum, (__force __wsum)sum);
93}
94
95static inline __wsum
96csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
97{
98 return csum_block_add(csum, csum2, offset);
99}
100
101static inline __wsum
102csum_block_sub(__wsum csum, __wsum csum2, int offset)
103{
104 return csum_block_add(csum, ~csum2, offset);
105}
106
107static inline __wsum csum_unfold(__sum16 n)
108{
109 return (__force __wsum)n;
110}
111
112static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
113{
114 return csum_partial(buff, len, sum);
115}
116
117#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
118
119static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
120{
121 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
122}
123
124static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
125{
126 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
127
128 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
129}
130
131/* Implements RFC 1624 (Incremental Internet Checksum)
132 * 3. Discussion states :
133 * HC' = ~(~HC + ~m + m')
134 * m : old value of a 16bit field
135 * m' : new value of a 16bit field
136 */
137static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
138{
139 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
140}
141
142struct sk_buff;
143void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
144 __be32 from, __be32 to, bool pseudohdr);
145void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
146 const __be32 *from, const __be32 *to,
147 bool pseudohdr);
148void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
149 __wsum diff, bool pseudohdr);
150
151static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
152 __be16 from, __be16 to,
153 bool pseudohdr)
154{
155 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
156 (__force __be32)to, pseudohdr);
157}
158
159static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
160 int start, int offset)
161{
162 __sum16 *psum = (__sum16 *)(ptr + offset);
163 __wsum delta;
164
165 /* Subtract out checksum up to start */
166 csum = csum_sub(csum, csum_partial(ptr, start, 0));
167
168 /* Set derived checksum in packet */
169 delta = csum_sub((__force __wsum)csum_fold(csum),
170 (__force __wsum)*psum);
171 *psum = csum_fold(csum);
172
173 return delta;
174}
175
176static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
177{
178 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
179}
180
181#endif