Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/x86_64/lib/csum-partial.c
  4 *
  5 * This file contains network checksum routines that are better done
  6 * in an architecture-specific manner due to speed.
  7 */
  8
  9#include <linux/compiler.h>
 10#include <linux/export.h>
 11#include <asm/checksum.h>
 12#include <asm/word-at-a-time.h>
 13
 14static inline __wsum csum_finalize_sum(u64 temp64)
 15{
 16	return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
 17}
 18
 19static inline unsigned long update_csum_40b(unsigned long sum, const unsigned long m[5])
 20{
 21	asm("addq %1,%0\n\t"
 22	     "adcq %2,%0\n\t"
 23	     "adcq %3,%0\n\t"
 24	     "adcq %4,%0\n\t"
 25	     "adcq %5,%0\n\t"
 26	     "adcq $0,%0"
 27		:"+r" (sum)
 28		:"m" (m[0]), "m" (m[1]), "m" (m[2]),
 29		 "m" (m[3]), "m" (m[4]));
 30	return sum;
 31}
 32
 33/*
 34 * Do a checksum on an arbitrary memory area.
 35 * Returns a 32bit checksum.
 36 *
 37 * This isn't as time critical as it used to be because many NICs
 38 * do hardware checksumming these days.
 39 *
 40 * Still, with CHECKSUM_COMPLETE this is called to compute
 41 * checksums on IPv6 headers (40 bytes) and other small parts.
 42 * it's best to have buff aligned on a 64-bit boundary
 43 */
 44__wsum csum_partial(const void *buff, int len, __wsum sum)
 45{
 46	u64 temp64 = (__force u64)sum;
 
 
 
 
 
 
 
 
 
 
 
 47
 48	/* Do two 40-byte chunks in parallel to get better ILP */
 49	if (likely(len >= 80)) {
 50		u64 temp64_2 = 0;
 51		do {
 52			temp64 = update_csum_40b(temp64, buff);
 53			temp64_2 = update_csum_40b(temp64_2, buff + 40);
 54			buff += 80;
 55			len -= 80;
 56		} while (len >= 80);
 57
 58		asm("addq %1,%0\n\t"
 59		    "adcq $0,%0"
 60		    :"+r" (temp64): "r" (temp64_2));
 61	}
 62
 63	/*
 64	 * len == 40 is the hot case due to IPv6 headers, so return
 65	 * early for that exact case without checking the tail bytes.
 66	 */
 67	if (len >= 40) {
 68		temp64 = update_csum_40b(temp64, buff);
 69		len -= 40;
 70		if (!len)
 71			return csum_finalize_sum(temp64);
 72		buff += 40;
 73	}
 74
 75	if (len & 32) {
 76		asm("addq 0*8(%[src]),%[res]\n\t"
 77		    "adcq 1*8(%[src]),%[res]\n\t"
 78		    "adcq 2*8(%[src]),%[res]\n\t"
 79		    "adcq 3*8(%[src]),%[res]\n\t"
 80		    "adcq $0,%[res]"
 81		    : [res] "+r"(temp64)
 82		    : [src] "r"(buff), "m"(*(const char(*)[32])buff));
 
 83		buff += 32;
 84	}
 85	if (len & 16) {
 86		asm("addq 0*8(%[src]),%[res]\n\t"
 87		    "adcq 1*8(%[src]),%[res]\n\t"
 88		    "adcq $0,%[res]"
 89		    : [res] "+r"(temp64)
 90		    : [src] "r"(buff), "m"(*(const char(*)[16])buff));
 
 91		buff += 16;
 92	}
 93	if (len & 8) {
 94		asm("addq 0*8(%[src]),%[res]\n\t"
 95		    "adcq $0,%[res]"
 96		    : [res] "+r"(temp64)
 97		    : [src] "r"(buff), "m"(*(const char(*)[8])buff));
 
 98		buff += 8;
 99	}
100	if (len & 7) {
101		unsigned int shift = (-len << 3) & 63;
102		unsigned long trail;
103
104		trail = (load_unaligned_zeropad(buff) << shift) >> shift;
105
106		asm("addq %[trail],%[res]\n\t"
107		    "adcq $0,%[res]"
108		    : [res] "+r"(temp64)
109		    : [trail] "r"(trail));
 
 
 
 
 
110	}
111	return csum_finalize_sum(temp64);
112}
113EXPORT_SYMBOL(csum_partial);
114
115/*
116 * this routine is used for miscellaneous IP-like checksums, mainly
117 * in icmp.c
118 */
119__sum16 ip_compute_csum(const void *buff, int len)
120{
121	return csum_fold(csum_partial(buff, len, 0));
122}
123EXPORT_SYMBOL(ip_compute_csum);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/x86_64/lib/csum-partial.c
  4 *
  5 * This file contains network checksum routines that are better done
  6 * in an architecture-specific manner due to speed.
  7 */
  8 
  9#include <linux/compiler.h>
 10#include <linux/export.h>
 11#include <asm/checksum.h>
 12#include <asm/word-at-a-time.h>
 13
 14static inline unsigned short from32to16(unsigned a) 
 
 
 
 
 
 15{
 16	unsigned short b = a >> 16; 
 17	asm("addw %w2,%w0\n\t"
 18	    "adcw $0,%w0\n" 
 19	    : "=r" (b)
 20	    : "0" (b), "r" (a));
 21	return b;
 
 
 
 
 22}
 23
 24/*
 25 * Do a checksum on an arbitrary memory area.
 26 * Returns a 32bit checksum.
 27 *
 28 * This isn't as time critical as it used to be because many NICs
 29 * do hardware checksumming these days.
 30 *
 31 * Still, with CHECKSUM_COMPLETE this is called to compute
 32 * checksums on IPv6 headers (40 bytes) and other small parts.
 33 * it's best to have buff aligned on a 64-bit boundary
 34 */
 35__wsum csum_partial(const void *buff, int len, __wsum sum)
 36{
 37	u64 temp64 = (__force u64)sum;
 38	unsigned odd, result;
 39
 40	odd = 1 & (unsigned long) buff;
 41	if (unlikely(odd)) {
 42		if (unlikely(len == 0))
 43			return sum;
 44		temp64 = ror32((__force u32)sum, 8);
 45		temp64 += (*(unsigned char *)buff << 8);
 46		len--;
 47		buff++;
 48	}
 49
 50	while (unlikely(len >= 64)) {
 51		asm("addq 0*8(%[src]),%[res]\n\t"
 52		    "adcq 1*8(%[src]),%[res]\n\t"
 53		    "adcq 2*8(%[src]),%[res]\n\t"
 54		    "adcq 3*8(%[src]),%[res]\n\t"
 55		    "adcq 4*8(%[src]),%[res]\n\t"
 56		    "adcq 5*8(%[src]),%[res]\n\t"
 57		    "adcq 6*8(%[src]),%[res]\n\t"
 58		    "adcq 7*8(%[src]),%[res]\n\t"
 59		    "adcq $0,%[res]"
 60		    : [res] "+r" (temp64)
 61		    : [src] "r" (buff)
 62		    : "memory");
 63		buff += 64;
 64		len -= 64;
 
 
 
 
 
 
 
 
 
 
 65	}
 66
 67	if (len & 32) {
 68		asm("addq 0*8(%[src]),%[res]\n\t"
 69		    "adcq 1*8(%[src]),%[res]\n\t"
 70		    "adcq 2*8(%[src]),%[res]\n\t"
 71		    "adcq 3*8(%[src]),%[res]\n\t"
 72		    "adcq $0,%[res]"
 73			: [res] "+r" (temp64)
 74			: [src] "r" (buff)
 75			: "memory");
 76		buff += 32;
 77	}
 78	if (len & 16) {
 79		asm("addq 0*8(%[src]),%[res]\n\t"
 80		    "adcq 1*8(%[src]),%[res]\n\t"
 81		    "adcq $0,%[res]"
 82			: [res] "+r" (temp64)
 83			: [src] "r" (buff)
 84			: "memory");
 85		buff += 16;
 86	}
 87	if (len & 8) {
 88		asm("addq 0*8(%[src]),%[res]\n\t"
 89		    "adcq $0,%[res]"
 90			: [res] "+r" (temp64)
 91			: [src] "r" (buff)
 92			: "memory");
 93		buff += 8;
 94	}
 95	if (len & 7) {
 96		unsigned int shift = (8 - (len & 7)) * 8;
 97		unsigned long trail;
 98
 99		trail = (load_unaligned_zeropad(buff) << shift) >> shift;
100
101		asm("addq %[trail],%[res]\n\t"
102		    "adcq $0,%[res]"
103			: [res] "+r" (temp64)
104			: [trail] "r" (trail));
105	}
106	result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
107	if (unlikely(odd)) {
108		result = from32to16(result);
109		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
110	}
111	return (__force __wsum)result;
112}
113EXPORT_SYMBOL(csum_partial);
114
115/*
116 * this routine is used for miscellaneous IP-like checksums, mainly
117 * in icmp.c
118 */
119__sum16 ip_compute_csum(const void *buff, int len)
120{
121	return csum_fold(csum_partial(buff,len,0));
122}
123EXPORT_SYMBOL(ip_compute_csum);