Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
  3 * Subject to the GNU Public License v.2
  4 *
  5 * Wrappers of assembly checksum functions for x86-64.
  6 */
  7#include <asm/checksum.h>
  8#include <linux/module.h>
  9#include <asm/smap.h>
 10
 11/**
 12 * csum_partial_copy_from_user - Copy and checksum from user space.
 13 * @src: source address (user space)
 14 * @dst: destination address
 15 * @len: number of bytes to be copied.
 16 * @isum: initial sum that is added into the result (32bit unfolded)
 17 * @errp: set to -EFAULT for an bad source address.
 18 *
 19 * Returns an 32bit unfolded checksum of the buffer.
 20 * src and dst are best aligned to 64bits.
 21 */
 22__wsum
 23csum_partial_copy_from_user(const void __user *src, void *dst,
 24			    int len, __wsum isum, int *errp)
 25{
 26	might_sleep();
 27	*errp = 0;
 28
 29	if (!likely(access_ok(VERIFY_READ, src, len)))
 30		goto out_err;
 31
 32	/*
 33	 * Why 6, not 7? To handle odd addresses aligned we
 34	 * would need to do considerable complications to fix the
 35	 * checksum which is defined as an 16bit accumulator. The
 36	 * fix alignment code is primarily for performance
 37	 * compatibility with 32bit and that will handle odd
 38	 * addresses slowly too.
 39	 */
 40	if (unlikely((unsigned long)src & 6)) {
 41		while (((unsigned long)src & 6) && len >= 2) {
 42			__u16 val16;
 43
 44			*errp = __get_user(val16, (const __u16 __user *)src);
 45			if (*errp)
 46				return isum;
 47
 48			*(__u16 *)dst = val16;
 49			isum = (__force __wsum)add32_with_carry(
 50					(__force unsigned)isum, val16);
 51			src += 2;
 52			dst += 2;
 53			len -= 2;
 54		}
 55	}
 56	stac();
 57	isum = csum_partial_copy_generic((__force const void *)src,
 58				dst, len, isum, errp, NULL);
 59	clac();
 60	if (unlikely(*errp))
 61		goto out_err;
 62
 63	return isum;
 64
 65out_err:
 66	*errp = -EFAULT;
 67	memset(dst, 0, len);
 68
 69	return isum;
 70}
 71EXPORT_SYMBOL(csum_partial_copy_from_user);
 72
 73/**
 74 * csum_partial_copy_to_user - Copy and checksum to user space.
 75 * @src: source address
 76 * @dst: destination address (user space)
 77 * @len: number of bytes to be copied.
 78 * @isum: initial sum that is added into the result (32bit unfolded)
 79 * @errp: set to -EFAULT for an bad destination address.
 80 *
 81 * Returns an 32bit unfolded checksum of the buffer.
 82 * src and dst are best aligned to 64bits.
 83 */
 84__wsum
 85csum_partial_copy_to_user(const void *src, void __user *dst,
 86			  int len, __wsum isum, int *errp)
 87{
 88	__wsum ret;
 89
 90	might_sleep();
 91
 92	if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
 93		*errp = -EFAULT;
 94		return 0;
 95	}
 96
 97	if (unlikely((unsigned long)dst & 6)) {
 98		while (((unsigned long)dst & 6) && len >= 2) {
 99			__u16 val16 = *(__u16 *)src;
100
101			isum = (__force __wsum)add32_with_carry(
102					(__force unsigned)isum, val16);
103			*errp = __put_user(val16, (__u16 __user *)dst);
104			if (*errp)
105				return isum;
106			src += 2;
107			dst += 2;
108			len -= 2;
109		}
110	}
111
112	*errp = 0;
113	stac();
114	ret = csum_partial_copy_generic(src, (void __force *)dst,
115					len, isum, NULL, errp);
116	clac();
117	return ret;
118}
119EXPORT_SYMBOL(csum_partial_copy_to_user);
120
121/**
122 * csum_partial_copy_nocheck - Copy and checksum.
123 * @src: source address
124 * @dst: destination address
125 * @len: number of bytes to be copied.
126 * @sum: initial sum that is added into the result (32bit unfolded)
127 *
128 * Returns an 32bit unfolded checksum of the buffer.
129 */
130__wsum
131csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
132{
133	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
134}
135EXPORT_SYMBOL(csum_partial_copy_nocheck);
136
137__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
138			const struct in6_addr *daddr,
139			__u32 len, unsigned short proto, __wsum sum)
140{
141	__u64 rest, sum64;
142
143	rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
144		(__force __u64)sum;
145
146	asm("	addq (%[saddr]),%[sum]\n"
147	    "	adcq 8(%[saddr]),%[sum]\n"
148	    "	adcq (%[daddr]),%[sum]\n"
149	    "	adcq 8(%[daddr]),%[sum]\n"
150	    "	adcq $0,%[sum]\n"
151
152	    : [sum] "=r" (sum64)
153	    : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
154
155	return csum_fold(
156	       (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
157}
158EXPORT_SYMBOL(csum_ipv6_magic);
v3.1
  1/*
  2 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
  3 * Subject to the GNU Public License v.2
  4 *
  5 * Wrappers of assembly checksum functions for x86-64.
  6 */
  7#include <asm/checksum.h>
  8#include <linux/module.h>
 
  9
 10/**
 11 * csum_partial_copy_from_user - Copy and checksum from user space.
 12 * @src: source address (user space)
 13 * @dst: destination address
 14 * @len: number of bytes to be copied.
 15 * @isum: initial sum that is added into the result (32bit unfolded)
 16 * @errp: set to -EFAULT for an bad source address.
 17 *
 18 * Returns an 32bit unfolded checksum of the buffer.
 19 * src and dst are best aligned to 64bits.
 20 */
 21__wsum
 22csum_partial_copy_from_user(const void __user *src, void *dst,
 23			    int len, __wsum isum, int *errp)
 24{
 25	might_sleep();
 26	*errp = 0;
 27
 28	if (!likely(access_ok(VERIFY_READ, src, len)))
 29		goto out_err;
 30
 31	/*
 32	 * Why 6, not 7? To handle odd addresses aligned we
 33	 * would need to do considerable complications to fix the
 34	 * checksum which is defined as an 16bit accumulator. The
 35	 * fix alignment code is primarily for performance
 36	 * compatibility with 32bit and that will handle odd
 37	 * addresses slowly too.
 38	 */
 39	if (unlikely((unsigned long)src & 6)) {
 40		while (((unsigned long)src & 6) && len >= 2) {
 41			__u16 val16;
 42
 43			*errp = __get_user(val16, (const __u16 __user *)src);
 44			if (*errp)
 45				return isum;
 46
 47			*(__u16 *)dst = val16;
 48			isum = (__force __wsum)add32_with_carry(
 49					(__force unsigned)isum, val16);
 50			src += 2;
 51			dst += 2;
 52			len -= 2;
 53		}
 54	}
 
 55	isum = csum_partial_copy_generic((__force const void *)src,
 56				dst, len, isum, errp, NULL);
 
 57	if (unlikely(*errp))
 58		goto out_err;
 59
 60	return isum;
 61
 62out_err:
 63	*errp = -EFAULT;
 64	memset(dst, 0, len);
 65
 66	return isum;
 67}
 68EXPORT_SYMBOL(csum_partial_copy_from_user);
 69
 70/**
 71 * csum_partial_copy_to_user - Copy and checksum to user space.
 72 * @src: source address
 73 * @dst: destination address (user space)
 74 * @len: number of bytes to be copied.
 75 * @isum: initial sum that is added into the result (32bit unfolded)
 76 * @errp: set to -EFAULT for an bad destination address.
 77 *
 78 * Returns an 32bit unfolded checksum of the buffer.
 79 * src and dst are best aligned to 64bits.
 80 */
 81__wsum
 82csum_partial_copy_to_user(const void *src, void __user *dst,
 83			  int len, __wsum isum, int *errp)
 84{
 
 
 85	might_sleep();
 86
 87	if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
 88		*errp = -EFAULT;
 89		return 0;
 90	}
 91
 92	if (unlikely((unsigned long)dst & 6)) {
 93		while (((unsigned long)dst & 6) && len >= 2) {
 94			__u16 val16 = *(__u16 *)src;
 95
 96			isum = (__force __wsum)add32_with_carry(
 97					(__force unsigned)isum, val16);
 98			*errp = __put_user(val16, (__u16 __user *)dst);
 99			if (*errp)
100				return isum;
101			src += 2;
102			dst += 2;
103			len -= 2;
104		}
105	}
106
107	*errp = 0;
108	return csum_partial_copy_generic(src, (void __force *)dst,
109					 len, isum, NULL, errp);
 
 
 
110}
111EXPORT_SYMBOL(csum_partial_copy_to_user);
112
113/**
114 * csum_partial_copy_nocheck - Copy and checksum.
115 * @src: source address
116 * @dst: destination address
117 * @len: number of bytes to be copied.
118 * @isum: initial sum that is added into the result (32bit unfolded)
119 *
120 * Returns an 32bit unfolded checksum of the buffer.
121 */
122__wsum
123csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
124{
125	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
126}
127EXPORT_SYMBOL(csum_partial_copy_nocheck);
128
129__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
130			const struct in6_addr *daddr,
131			__u32 len, unsigned short proto, __wsum sum)
132{
133	__u64 rest, sum64;
134
135	rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
136		(__force __u64)sum;
137
138	asm("	addq (%[saddr]),%[sum]\n"
139	    "	adcq 8(%[saddr]),%[sum]\n"
140	    "	adcq (%[daddr]),%[sum]\n"
141	    "	adcq 8(%[daddr]),%[sum]\n"
142	    "	adcq $0,%[sum]\n"
143
144	    : [sum] "=r" (sum64)
145	    : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
146
147	return csum_fold(
148	       (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
149}
150EXPORT_SYMBOL(csum_ipv6_magic);