Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  2/*
  3 * Copyright 2012 Xyratex Technology Limited
  4 *
  5 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
  6 * calculation.
  7 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
  8 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
  9 * at:
 10 * http://www.intel.com/products/processor/manuals/
 11 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
 12 * Volume 2B: Instruction Set Reference, N-Z
 13 *
 14 * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
 15 *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
 16 */
 17
 18#include <linux/linkage.h>
 19
 20
 21.section .rodata
 22.align 16
 23/*
 24 * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
 25 * #define CONSTANT_R1  0x154442bd4LL
 26 *
 27 * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
 28 * #define CONSTANT_R2  0x1c6e41596LL
 29 */
 30.Lconstant_R2R1:
 31	.octa 0x00000001c6e415960000000154442bd4
 32/*
 33 * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
 34 * #define CONSTANT_R3  0x1751997d0LL
 35 *
 36 * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
 37 * #define CONSTANT_R4  0x0ccaa009eLL
 38 */
 39.Lconstant_R4R3:
 40	.octa 0x00000000ccaa009e00000001751997d0
 41/*
 42 * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
 43 * #define CONSTANT_R5  0x163cd6124LL
 44 */
 45.Lconstant_R5:
 46	.octa 0x00000000000000000000000163cd6124
 47.Lconstant_mask32:
 48	.octa 0x000000000000000000000000FFFFFFFF
 49/*
 50 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
 51 *
 52 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
 53 * #define CONSTANT_RU  0x1F7011641LL
 54 */
 55.Lconstant_RUpoly:
 56	.octa 0x00000001F701164100000001DB710641
 57
 58#define CONSTANT %xmm0
 59
 60#ifdef __x86_64__
 61#define BUF     %rdi
 62#define LEN     %rsi
 63#define CRC     %edx
 64#else
 65#define BUF     %eax
 66#define LEN     %edx
 67#define CRC     %ecx
 68#endif
 69
 70
 71
 72.text
 73/**
 74 *      Calculate crc32
 75 *      BUF - buffer (16 bytes aligned)
 76 *      LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
 77 *      CRC - initial crc32
 78 *      return %eax crc32
 79 *      uint crc32_pclmul_le_16(unsigned char const *buffer,
 80 *	                     size_t len, uint crc32)
 81 */
 82
 83SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
 84	movdqa  (BUF), %xmm1
 85	movdqa  0x10(BUF), %xmm2
 86	movdqa  0x20(BUF), %xmm3
 87	movdqa  0x30(BUF), %xmm4
 88	movd    CRC, CONSTANT
 89	pxor    CONSTANT, %xmm1
 90	sub     $0x40, LEN
 91	add     $0x40, BUF
 92	cmp     $0x40, LEN
 93	jb      .Lless_64
 94
 95#ifdef __x86_64__
 96	movdqa .Lconstant_R2R1(%rip), CONSTANT
 97#else
 98	movdqa .Lconstant_R2R1, CONSTANT
 99#endif
100
101.Lloop_64:/*  64 bytes Full cache line folding */
102	prefetchnta    0x40(BUF)
103	movdqa  %xmm1, %xmm5
104	movdqa  %xmm2, %xmm6
105	movdqa  %xmm3, %xmm7
106#ifdef __x86_64__
107	movdqa  %xmm4, %xmm8
108#endif
109	pclmulqdq $0x00, CONSTANT, %xmm1
110	pclmulqdq $0x00, CONSTANT, %xmm2
111	pclmulqdq $0x00, CONSTANT, %xmm3
112#ifdef __x86_64__
113	pclmulqdq $0x00, CONSTANT, %xmm4
114#endif
115	pclmulqdq $0x11, CONSTANT, %xmm5
116	pclmulqdq $0x11, CONSTANT, %xmm6
117	pclmulqdq $0x11, CONSTANT, %xmm7
118#ifdef __x86_64__
119	pclmulqdq $0x11, CONSTANT, %xmm8
120#endif
121	pxor    %xmm5, %xmm1
122	pxor    %xmm6, %xmm2
123	pxor    %xmm7, %xmm3
124#ifdef __x86_64__
125	pxor    %xmm8, %xmm4
126#else
127	/* xmm8 unsupported for x32 */
128	movdqa  %xmm4, %xmm5
129	pclmulqdq $0x00, CONSTANT, %xmm4
130	pclmulqdq $0x11, CONSTANT, %xmm5
131	pxor    %xmm5, %xmm4
132#endif
133
134	pxor    (BUF), %xmm1
135	pxor    0x10(BUF), %xmm2
136	pxor    0x20(BUF), %xmm3
137	pxor    0x30(BUF), %xmm4
138
139	sub     $0x40, LEN
140	add     $0x40, BUF
141	cmp     $0x40, LEN
142	jge     .Lloop_64
143.Lless_64:/*  Folding cache line into 128bit */
144#ifdef __x86_64__
145	movdqa  .Lconstant_R4R3(%rip), CONSTANT
146#else
147	movdqa  .Lconstant_R4R3, CONSTANT
148#endif
149	prefetchnta     (BUF)
150
151	movdqa  %xmm1, %xmm5
152	pclmulqdq $0x00, CONSTANT, %xmm1
153	pclmulqdq $0x11, CONSTANT, %xmm5
154	pxor    %xmm5, %xmm1
155	pxor    %xmm2, %xmm1
156
157	movdqa  %xmm1, %xmm5
158	pclmulqdq $0x00, CONSTANT, %xmm1
159	pclmulqdq $0x11, CONSTANT, %xmm5
160	pxor    %xmm5, %xmm1
161	pxor    %xmm3, %xmm1
162
163	movdqa  %xmm1, %xmm5
164	pclmulqdq $0x00, CONSTANT, %xmm1
165	pclmulqdq $0x11, CONSTANT, %xmm5
166	pxor    %xmm5, %xmm1
167	pxor    %xmm4, %xmm1
168
169	cmp     $0x10, LEN
170	jb      .Lfold_64
171.Lloop_16:/* Folding rest buffer into 128bit */
172	movdqa  %xmm1, %xmm5
173	pclmulqdq $0x00, CONSTANT, %xmm1
174	pclmulqdq $0x11, CONSTANT, %xmm5
175	pxor    %xmm5, %xmm1
176	pxor    (BUF), %xmm1
177	sub     $0x10, LEN
178	add     $0x10, BUF
179	cmp     $0x10, LEN
180	jge     .Lloop_16
181
182.Lfold_64:
183	/* perform the last 64 bit fold, also adds 32 zeroes
184	 * to the input stream */
185	pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
186	psrldq  $0x08, %xmm1
187	pxor    CONSTANT, %xmm1
188
189	/* final 32-bit fold */
190	movdqa  %xmm1, %xmm2
191#ifdef __x86_64__
192	movdqa  .Lconstant_R5(%rip), CONSTANT
193	movdqa  .Lconstant_mask32(%rip), %xmm3
194#else
195	movdqa  .Lconstant_R5, CONSTANT
196	movdqa  .Lconstant_mask32, %xmm3
197#endif
198	psrldq  $0x04, %xmm2
199	pand    %xmm3, %xmm1
200	pclmulqdq $0x00, CONSTANT, %xmm1
201	pxor    %xmm2, %xmm1
202
203	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
204#ifdef __x86_64__
205	movdqa  .Lconstant_RUpoly(%rip), CONSTANT
206#else
207	movdqa  .Lconstant_RUpoly, CONSTANT
208#endif
209	movdqa  %xmm1, %xmm2
210	pand    %xmm3, %xmm1
211	pclmulqdq $0x10, CONSTANT, %xmm1
212	pand    %xmm3, %xmm1
213	pclmulqdq $0x00, CONSTANT, %xmm1
214	pxor    %xmm2, %xmm1
215	pextrd  $0x01, %xmm1, %eax
216
217	RET
218SYM_FUNC_END(crc32_pclmul_le_16)
v5.14.15
  1/* GPL HEADER START
  2 *
  3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 only,
  7 * as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but
 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 12 * General Public License version 2 for more details (a copy is included
 13 * in the LICENSE file that accompanied this code).
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * version 2 along with this program; If not, see http://www.gnu.org/licenses
 17 *
 18 * Please  visit http://www.xyratex.com/contact if you need additional
 19 * information or have any questions.
 20 *
 21 * GPL HEADER END
 22 */
 23
 24/*
 25 * Copyright 2012 Xyratex Technology Limited
 26 *
 27 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
 28 * calculation.
 29 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
 30 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
 31 * at:
 32 * http://www.intel.com/products/processor/manuals/
 33 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
 34 * Volume 2B: Instruction Set Reference, N-Z
 35 *
 36 * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
 37 *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
 38 */
 39
 40#include <linux/linkage.h>
 41
 42
 43.section .rodata
 44.align 16
 45/*
 46 * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
 47 * #define CONSTANT_R1  0x154442bd4LL
 48 *
 49 * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
 50 * #define CONSTANT_R2  0x1c6e41596LL
 51 */
 52.Lconstant_R2R1:
 53	.octa 0x00000001c6e415960000000154442bd4
 54/*
 55 * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
 56 * #define CONSTANT_R3  0x1751997d0LL
 57 *
 58 * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
 59 * #define CONSTANT_R4  0x0ccaa009eLL
 60 */
 61.Lconstant_R4R3:
 62	.octa 0x00000000ccaa009e00000001751997d0
 63/*
 64 * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
 65 * #define CONSTANT_R5  0x163cd6124LL
 66 */
 67.Lconstant_R5:
 68	.octa 0x00000000000000000000000163cd6124
 69.Lconstant_mask32:
 70	.octa 0x000000000000000000000000FFFFFFFF
 71/*
 72 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
 73 *
 74 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
 75 * #define CONSTANT_RU  0x1F7011641LL
 76 */
 77.Lconstant_RUpoly:
 78	.octa 0x00000001F701164100000001DB710641
 79
 80#define CONSTANT %xmm0
 81
 82#ifdef __x86_64__
 83#define BUF     %rdi
 84#define LEN     %rsi
 85#define CRC     %edx
 86#else
 87#define BUF     %eax
 88#define LEN     %edx
 89#define CRC     %ecx
 90#endif
 91
 92
 93
 94.text
 95/**
 96 *      Calculate crc32
 97 *      BUF - buffer (16 bytes aligned)
 98 *      LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
 99 *      CRC - initial crc32
100 *      return %eax crc32
101 *      uint crc32_pclmul_le_16(unsigned char const *buffer,
102 *	                     size_t len, uint crc32)
103 */
104
105SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
106	movdqa  (BUF), %xmm1
107	movdqa  0x10(BUF), %xmm2
108	movdqa  0x20(BUF), %xmm3
109	movdqa  0x30(BUF), %xmm4
110	movd    CRC, CONSTANT
111	pxor    CONSTANT, %xmm1
112	sub     $0x40, LEN
113	add     $0x40, BUF
114	cmp     $0x40, LEN
115	jb      less_64
116
117#ifdef __x86_64__
118	movdqa .Lconstant_R2R1(%rip), CONSTANT
119#else
120	movdqa .Lconstant_R2R1, CONSTANT
121#endif
122
123loop_64:/*  64 bytes Full cache line folding */
124	prefetchnta    0x40(BUF)
125	movdqa  %xmm1, %xmm5
126	movdqa  %xmm2, %xmm6
127	movdqa  %xmm3, %xmm7
128#ifdef __x86_64__
129	movdqa  %xmm4, %xmm8
130#endif
131	pclmulqdq $0x00, CONSTANT, %xmm1
132	pclmulqdq $0x00, CONSTANT, %xmm2
133	pclmulqdq $0x00, CONSTANT, %xmm3
134#ifdef __x86_64__
135	pclmulqdq $0x00, CONSTANT, %xmm4
136#endif
137	pclmulqdq $0x11, CONSTANT, %xmm5
138	pclmulqdq $0x11, CONSTANT, %xmm6
139	pclmulqdq $0x11, CONSTANT, %xmm7
140#ifdef __x86_64__
141	pclmulqdq $0x11, CONSTANT, %xmm8
142#endif
143	pxor    %xmm5, %xmm1
144	pxor    %xmm6, %xmm2
145	pxor    %xmm7, %xmm3
146#ifdef __x86_64__
147	pxor    %xmm8, %xmm4
148#else
149	/* xmm8 unsupported for x32 */
150	movdqa  %xmm4, %xmm5
151	pclmulqdq $0x00, CONSTANT, %xmm4
152	pclmulqdq $0x11, CONSTANT, %xmm5
153	pxor    %xmm5, %xmm4
154#endif
155
156	pxor    (BUF), %xmm1
157	pxor    0x10(BUF), %xmm2
158	pxor    0x20(BUF), %xmm3
159	pxor    0x30(BUF), %xmm4
160
161	sub     $0x40, LEN
162	add     $0x40, BUF
163	cmp     $0x40, LEN
164	jge     loop_64
165less_64:/*  Folding cache line into 128bit */
166#ifdef __x86_64__
167	movdqa  .Lconstant_R4R3(%rip), CONSTANT
168#else
169	movdqa  .Lconstant_R4R3, CONSTANT
170#endif
171	prefetchnta     (BUF)
172
173	movdqa  %xmm1, %xmm5
174	pclmulqdq $0x00, CONSTANT, %xmm1
175	pclmulqdq $0x11, CONSTANT, %xmm5
176	pxor    %xmm5, %xmm1
177	pxor    %xmm2, %xmm1
178
179	movdqa  %xmm1, %xmm5
180	pclmulqdq $0x00, CONSTANT, %xmm1
181	pclmulqdq $0x11, CONSTANT, %xmm5
182	pxor    %xmm5, %xmm1
183	pxor    %xmm3, %xmm1
184
185	movdqa  %xmm1, %xmm5
186	pclmulqdq $0x00, CONSTANT, %xmm1
187	pclmulqdq $0x11, CONSTANT, %xmm5
188	pxor    %xmm5, %xmm1
189	pxor    %xmm4, %xmm1
190
191	cmp     $0x10, LEN
192	jb      fold_64
193loop_16:/* Folding rest buffer into 128bit */
194	movdqa  %xmm1, %xmm5
195	pclmulqdq $0x00, CONSTANT, %xmm1
196	pclmulqdq $0x11, CONSTANT, %xmm5
197	pxor    %xmm5, %xmm1
198	pxor    (BUF), %xmm1
199	sub     $0x10, LEN
200	add     $0x10, BUF
201	cmp     $0x10, LEN
202	jge     loop_16
203
204fold_64:
205	/* perform the last 64 bit fold, also adds 32 zeroes
206	 * to the input stream */
207	pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
208	psrldq  $0x08, %xmm1
209	pxor    CONSTANT, %xmm1
210
211	/* final 32-bit fold */
212	movdqa  %xmm1, %xmm2
213#ifdef __x86_64__
214	movdqa  .Lconstant_R5(%rip), CONSTANT
215	movdqa  .Lconstant_mask32(%rip), %xmm3
216#else
217	movdqa  .Lconstant_R5, CONSTANT
218	movdqa  .Lconstant_mask32, %xmm3
219#endif
220	psrldq  $0x04, %xmm2
221	pand    %xmm3, %xmm1
222	pclmulqdq $0x00, CONSTANT, %xmm1
223	pxor    %xmm2, %xmm1
224
225	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
226#ifdef __x86_64__
227	movdqa  .Lconstant_RUpoly(%rip), CONSTANT
228#else
229	movdqa  .Lconstant_RUpoly, CONSTANT
230#endif
231	movdqa  %xmm1, %xmm2
232	pand    %xmm3, %xmm1
233	pclmulqdq $0x10, CONSTANT, %xmm1
234	pand    %xmm3, %xmm1
235	pclmulqdq $0x00, CONSTANT, %xmm1
236	pxor    %xmm2, %xmm1
237	pextrd  $0x01, %xmm1, %eax
238
239	ret
240SYM_FUNC_END(crc32_pclmul_le_16)