Loading...
1/*
2 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
3 * Subject to the GNU Public License v.2
4 *
5 * Wrappers of assembly checksum functions for x86-64.
6 */
7#include <asm/checksum.h>
8#include <linux/module.h>
9#include <asm/smap.h>
10
11/**
12 * csum_partial_copy_from_user - Copy and checksum from user space.
13 * @src: source address (user space)
14 * @dst: destination address
15 * @len: number of bytes to be copied.
16 * @isum: initial sum that is added into the result (32bit unfolded)
17 * @errp: set to -EFAULT for an bad source address.
18 *
19 * Returns an 32bit unfolded checksum of the buffer.
20 * src and dst are best aligned to 64bits.
21 */
22__wsum
23csum_partial_copy_from_user(const void __user *src, void *dst,
24 int len, __wsum isum, int *errp)
25{
26 might_sleep();
27 *errp = 0;
28
29 if (!likely(access_ok(VERIFY_READ, src, len)))
30 goto out_err;
31
32 /*
33 * Why 6, not 7? To handle odd addresses aligned we
34 * would need to do considerable complications to fix the
35 * checksum which is defined as an 16bit accumulator. The
36 * fix alignment code is primarily for performance
37 * compatibility with 32bit and that will handle odd
38 * addresses slowly too.
39 */
40 if (unlikely((unsigned long)src & 6)) {
41 while (((unsigned long)src & 6) && len >= 2) {
42 __u16 val16;
43
44 *errp = __get_user(val16, (const __u16 __user *)src);
45 if (*errp)
46 return isum;
47
48 *(__u16 *)dst = val16;
49 isum = (__force __wsum)add32_with_carry(
50 (__force unsigned)isum, val16);
51 src += 2;
52 dst += 2;
53 len -= 2;
54 }
55 }
56 stac();
57 isum = csum_partial_copy_generic((__force const void *)src,
58 dst, len, isum, errp, NULL);
59 clac();
60 if (unlikely(*errp))
61 goto out_err;
62
63 return isum;
64
65out_err:
66 *errp = -EFAULT;
67 memset(dst, 0, len);
68
69 return isum;
70}
71EXPORT_SYMBOL(csum_partial_copy_from_user);
72
73/**
74 * csum_partial_copy_to_user - Copy and checksum to user space.
75 * @src: source address
76 * @dst: destination address (user space)
77 * @len: number of bytes to be copied.
78 * @isum: initial sum that is added into the result (32bit unfolded)
79 * @errp: set to -EFAULT for an bad destination address.
80 *
81 * Returns an 32bit unfolded checksum of the buffer.
82 * src and dst are best aligned to 64bits.
83 */
84__wsum
85csum_partial_copy_to_user(const void *src, void __user *dst,
86 int len, __wsum isum, int *errp)
87{
88 __wsum ret;
89
90 might_sleep();
91
92 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
93 *errp = -EFAULT;
94 return 0;
95 }
96
97 if (unlikely((unsigned long)dst & 6)) {
98 while (((unsigned long)dst & 6) && len >= 2) {
99 __u16 val16 = *(__u16 *)src;
100
101 isum = (__force __wsum)add32_with_carry(
102 (__force unsigned)isum, val16);
103 *errp = __put_user(val16, (__u16 __user *)dst);
104 if (*errp)
105 return isum;
106 src += 2;
107 dst += 2;
108 len -= 2;
109 }
110 }
111
112 *errp = 0;
113 stac();
114 ret = csum_partial_copy_generic(src, (void __force *)dst,
115 len, isum, NULL, errp);
116 clac();
117 return ret;
118}
119EXPORT_SYMBOL(csum_partial_copy_to_user);
120
121/**
122 * csum_partial_copy_nocheck - Copy and checksum.
123 * @src: source address
124 * @dst: destination address
125 * @len: number of bytes to be copied.
126 * @sum: initial sum that is added into the result (32bit unfolded)
127 *
128 * Returns an 32bit unfolded checksum of the buffer.
129 */
130__wsum
131csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
132{
133 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
134}
135EXPORT_SYMBOL(csum_partial_copy_nocheck);
136
137__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
138 const struct in6_addr *daddr,
139 __u32 len, unsigned short proto, __wsum sum)
140{
141 __u64 rest, sum64;
142
143 rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
144 (__force __u64)sum;
145
146 asm(" addq (%[saddr]),%[sum]\n"
147 " adcq 8(%[saddr]),%[sum]\n"
148 " adcq (%[daddr]),%[sum]\n"
149 " adcq 8(%[daddr]),%[sum]\n"
150 " adcq $0,%[sum]\n"
151
152 : [sum] "=r" (sum64)
153 : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
154
155 return csum_fold(
156 (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
157}
158EXPORT_SYMBOL(csum_ipv6_magic);
1/*
2 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
3 * Subject to the GNU Public License v.2
4 *
5 * Wrappers of assembly checksum functions for x86-64.
6 */
7#include <asm/checksum.h>
8#include <linux/module.h>
9#include <asm/smap.h>
10
11/**
12 * csum_partial_copy_from_user - Copy and checksum from user space.
13 * @src: source address (user space)
14 * @dst: destination address
15 * @len: number of bytes to be copied.
16 * @isum: initial sum that is added into the result (32bit unfolded)
17 * @errp: set to -EFAULT for an bad source address.
18 *
19 * Returns an 32bit unfolded checksum of the buffer.
20 * src and dst are best aligned to 64bits.
21 */
22__wsum
23csum_partial_copy_from_user(const void __user *src, void *dst,
24 int len, __wsum isum, int *errp)
25{
26 might_sleep();
27 *errp = 0;
28
29 if (!likely(access_ok(VERIFY_READ, src, len)))
30 goto out_err;
31
32 /*
33 * Why 6, not 7? To handle odd addresses aligned we
34 * would need to do considerable complications to fix the
35 * checksum which is defined as an 16bit accumulator. The
36 * fix alignment code is primarily for performance
37 * compatibility with 32bit and that will handle odd
38 * addresses slowly too.
39 */
40 if (unlikely((unsigned long)src & 6)) {
41 while (((unsigned long)src & 6) && len >= 2) {
42 __u16 val16;
43
44 if (__get_user(val16, (const __u16 __user *)src))
45 goto out_err;
46
47 *(__u16 *)dst = val16;
48 isum = (__force __wsum)add32_with_carry(
49 (__force unsigned)isum, val16);
50 src += 2;
51 dst += 2;
52 len -= 2;
53 }
54 }
55 stac();
56 isum = csum_partial_copy_generic((__force const void *)src,
57 dst, len, isum, errp, NULL);
58 clac();
59 if (unlikely(*errp))
60 goto out_err;
61
62 return isum;
63
64out_err:
65 *errp = -EFAULT;
66 memset(dst, 0, len);
67
68 return isum;
69}
70EXPORT_SYMBOL(csum_partial_copy_from_user);
71
72/**
73 * csum_partial_copy_to_user - Copy and checksum to user space.
74 * @src: source address
75 * @dst: destination address (user space)
76 * @len: number of bytes to be copied.
77 * @isum: initial sum that is added into the result (32bit unfolded)
78 * @errp: set to -EFAULT for an bad destination address.
79 *
80 * Returns an 32bit unfolded checksum of the buffer.
81 * src and dst are best aligned to 64bits.
82 */
83__wsum
84csum_partial_copy_to_user(const void *src, void __user *dst,
85 int len, __wsum isum, int *errp)
86{
87 __wsum ret;
88
89 might_sleep();
90
91 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
92 *errp = -EFAULT;
93 return 0;
94 }
95
96 if (unlikely((unsigned long)dst & 6)) {
97 while (((unsigned long)dst & 6) && len >= 2) {
98 __u16 val16 = *(__u16 *)src;
99
100 isum = (__force __wsum)add32_with_carry(
101 (__force unsigned)isum, val16);
102 *errp = __put_user(val16, (__u16 __user *)dst);
103 if (*errp)
104 return isum;
105 src += 2;
106 dst += 2;
107 len -= 2;
108 }
109 }
110
111 *errp = 0;
112 stac();
113 ret = csum_partial_copy_generic(src, (void __force *)dst,
114 len, isum, NULL, errp);
115 clac();
116 return ret;
117}
118EXPORT_SYMBOL(csum_partial_copy_to_user);
119
120/**
121 * csum_partial_copy_nocheck - Copy and checksum.
122 * @src: source address
123 * @dst: destination address
124 * @len: number of bytes to be copied.
125 * @sum: initial sum that is added into the result (32bit unfolded)
126 *
127 * Returns an 32bit unfolded checksum of the buffer.
128 */
129__wsum
130csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
131{
132 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
133}
134EXPORT_SYMBOL(csum_partial_copy_nocheck);
135
136__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
137 const struct in6_addr *daddr,
138 __u32 len, __u8 proto, __wsum sum)
139{
140 __u64 rest, sum64;
141
142 rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
143 (__force __u64)sum;
144
145 asm(" addq (%[saddr]),%[sum]\n"
146 " adcq 8(%[saddr]),%[sum]\n"
147 " adcq (%[daddr]),%[sum]\n"
148 " adcq 8(%[daddr]),%[sum]\n"
149 " adcq $0,%[sum]\n"
150
151 : [sum] "=r" (sum64)
152 : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
153
154 return csum_fold(
155 (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
156}
157EXPORT_SYMBOL(csum_ipv6_magic);