Loading...
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19/* --------------------------------------------------------------------------
20 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
21 * This implementation is herby placed in the public domain.
22 * The authors offers no warranty. Use at your own risk.
23 * Please send bug reports to the authors.
24 * Last modified: 17 APR 08, 1700 PDT
25 * ----------------------------------------------------------------------- */
26
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/crypto.h>
30#include <linux/module.h>
31#include <linux/scatterlist.h>
32#include <asm/byteorder.h>
33#include <crypto/scatterwalk.h>
34#include <crypto/vmac.h>
35#include <crypto/internal/hash.h>
36
37/*
38 * Constants and masks
39 */
40#define UINT64_C(x) x##ULL
41static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
42static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
43static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
44static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
45static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
46
47#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
48
49#ifdef __LITTLE_ENDIAN
50#define INDEX_HIGH 1
51#define INDEX_LOW 0
52#else
53#define INDEX_HIGH 0
54#define INDEX_LOW 1
55#endif
56
57/*
58 * The following routines are used in this implementation. They are
59 * written via macros to simulate zero-overhead call-by-reference.
60 *
61 * MUL64: 64x64->128-bit multiplication
62 * PMUL64: assumes top bits cleared on inputs
63 * ADD128: 128x128->128-bit addition
64 */
65
66#define ADD128(rh, rl, ih, il) \
67 do { \
68 u64 _il = (il); \
69 (rl) += (_il); \
70 if ((rl) < (_il)) \
71 (rh)++; \
72 (rh) += (ih); \
73 } while (0)
74
75#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
76
77#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
78 do { \
79 u64 _i1 = (i1), _i2 = (i2); \
80 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
81 rh = MUL32(_i1>>32, _i2>>32); \
82 rl = MUL32(_i1, _i2); \
83 ADD128(rh, rl, (m >> 32), (m << 32)); \
84 } while (0)
85
86#define MUL64(rh, rl, i1, i2) \
87 do { \
88 u64 _i1 = (i1), _i2 = (i2); \
89 u64 m1 = MUL32(_i1, _i2>>32); \
90 u64 m2 = MUL32(_i1>>32, _i2); \
91 rh = MUL32(_i1>>32, _i2>>32); \
92 rl = MUL32(_i1, _i2); \
93 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
94 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
95 } while (0)
96
97/*
98 * For highest performance the L1 NH and L2 polynomial hashes should be
99 * carefully implemented to take advantage of one's target architecture.
100 * Here these two hash functions are defined multiple time; once for
101 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
102 * for the rest (32-bit) architectures.
103 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
104 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
105 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
106 * NH computations at once).
107 */
108
109#ifdef CONFIG_64BIT
110
111#define nh_16(mp, kp, nw, rh, rl) \
112 do { \
113 int i; u64 th, tl; \
114 rh = rl = 0; \
115 for (i = 0; i < nw; i += 2) { \
116 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
117 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
118 ADD128(rh, rl, th, tl); \
119 } \
120 } while (0)
121
122#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
123 do { \
124 int i; u64 th, tl; \
125 rh1 = rl1 = rh = rl = 0; \
126 for (i = 0; i < nw; i += 2) { \
127 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
128 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
129 ADD128(rh, rl, th, tl); \
130 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
131 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
132 ADD128(rh1, rl1, th, tl); \
133 } \
134 } while (0)
135
136#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
137#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
138 do { \
139 int i; u64 th, tl; \
140 rh = rl = 0; \
141 for (i = 0; i < nw; i += 8) { \
142 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
143 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
144 ADD128(rh, rl, th, tl); \
145 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
146 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
147 ADD128(rh, rl, th, tl); \
148 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
149 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
150 ADD128(rh, rl, th, tl); \
151 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
152 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
153 ADD128(rh, rl, th, tl); \
154 } \
155 } while (0)
156
157#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
158 do { \
159 int i; u64 th, tl; \
160 rh1 = rl1 = rh = rl = 0; \
161 for (i = 0; i < nw; i += 8) { \
162 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
163 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
164 ADD128(rh, rl, th, tl); \
165 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
166 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
167 ADD128(rh1, rl1, th, tl); \
168 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
169 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
170 ADD128(rh, rl, th, tl); \
171 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
172 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
173 ADD128(rh1, rl1, th, tl); \
174 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
175 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
176 ADD128(rh, rl, th, tl); \
177 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
178 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
179 ADD128(rh1, rl1, th, tl); \
180 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
181 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
182 ADD128(rh, rl, th, tl); \
183 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
184 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
185 ADD128(rh1, rl1, th, tl); \
186 } \
187 } while (0)
188#endif
189
190#define poly_step(ah, al, kh, kl, mh, ml) \
191 do { \
192 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
193 /* compute ab*cd, put bd into result registers */ \
194 PMUL64(t3h, t3l, al, kh); \
195 PMUL64(t2h, t2l, ah, kl); \
196 PMUL64(t1h, t1l, ah, 2*kh); \
197 PMUL64(ah, al, al, kl); \
198 /* add 2 * ac to result */ \
199 ADD128(ah, al, t1h, t1l); \
200 /* add together ad + bc */ \
201 ADD128(t2h, t2l, t3h, t3l); \
202 /* now (ah,al), (t2l,2*t2h) need summing */ \
203 /* first add the high registers, carrying into t2h */ \
204 ADD128(t2h, ah, z, t2l); \
205 /* double t2h and add top bit of ah */ \
206 t2h = 2 * t2h + (ah >> 63); \
207 ah &= m63; \
208 /* now add the low registers */ \
209 ADD128(ah, al, mh, ml); \
210 ADD128(ah, al, z, t2h); \
211 } while (0)
212
213#else /* ! CONFIG_64BIT */
214
215#ifndef nh_16
216#define nh_16(mp, kp, nw, rh, rl) \
217 do { \
218 u64 t1, t2, m1, m2, t; \
219 int i; \
220 rh = rl = t = 0; \
221 for (i = 0; i < nw; i += 2) { \
222 t1 = pe64_to_cpup(mp+i) + kp[i]; \
223 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
224 m2 = MUL32(t1 >> 32, t2); \
225 m1 = MUL32(t1, t2 >> 32); \
226 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
227 MUL32(t1, t2)); \
228 rh += (u64)(u32)(m1 >> 32) \
229 + (u32)(m2 >> 32); \
230 t += (u64)(u32)m1 + (u32)m2; \
231 } \
232 ADD128(rh, rl, (t >> 32), (t << 32)); \
233 } while (0)
234#endif
235
236static void poly_step_func(u64 *ahi, u64 *alo,
237 const u64 *kh, const u64 *kl,
238 const u64 *mh, const u64 *ml)
239{
240#define a0 (*(((u32 *)alo)+INDEX_LOW))
241#define a1 (*(((u32 *)alo)+INDEX_HIGH))
242#define a2 (*(((u32 *)ahi)+INDEX_LOW))
243#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
244#define k0 (*(((u32 *)kl)+INDEX_LOW))
245#define k1 (*(((u32 *)kl)+INDEX_HIGH))
246#define k2 (*(((u32 *)kh)+INDEX_LOW))
247#define k3 (*(((u32 *)kh)+INDEX_HIGH))
248
249 u64 p, q, t;
250 u32 t2;
251
252 p = MUL32(a3, k3);
253 p += p;
254 p += *(u64 *)mh;
255 p += MUL32(a0, k2);
256 p += MUL32(a1, k1);
257 p += MUL32(a2, k0);
258 t = (u32)(p);
259 p >>= 32;
260 p += MUL32(a0, k3);
261 p += MUL32(a1, k2);
262 p += MUL32(a2, k1);
263 p += MUL32(a3, k0);
264 t |= ((u64)((u32)p & 0x7fffffff)) << 32;
265 p >>= 31;
266 p += (u64)(((u32 *)ml)[INDEX_LOW]);
267 p += MUL32(a0, k0);
268 q = MUL32(a1, k3);
269 q += MUL32(a2, k2);
270 q += MUL32(a3, k1);
271 q += q;
272 p += q;
273 t2 = (u32)(p);
274 p >>= 32;
275 p += (u64)(((u32 *)ml)[INDEX_HIGH]);
276 p += MUL32(a0, k1);
277 p += MUL32(a1, k0);
278 q = MUL32(a2, k3);
279 q += MUL32(a3, k2);
280 q += q;
281 p += q;
282 *(u64 *)(alo) = (p << 32) | t2;
283 p >>= 32;
284 *(u64 *)(ahi) = p + t;
285
286#undef a0
287#undef a1
288#undef a2
289#undef a3
290#undef k0
291#undef k1
292#undef k2
293#undef k3
294}
295
296#define poly_step(ah, al, kh, kl, mh, ml) \
297 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
298
299#endif /* end of specialized NH and poly definitions */
300
301/* At least nh_16 is defined. Defined others as needed here */
302#ifndef nh_16_2
303#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
304 do { \
305 nh_16(mp, kp, nw, rh, rl); \
306 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
307 } while (0)
308#endif
309#ifndef nh_vmac_nhbytes
310#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
311 nh_16(mp, kp, nw, rh, rl)
312#endif
313#ifndef nh_vmac_nhbytes_2
314#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
315 do { \
316 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
317 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
318 } while (0)
319#endif
320
321static void vhash_abort(struct vmac_ctx *ctx)
322{
323 ctx->polytmp[0] = ctx->polykey[0] ;
324 ctx->polytmp[1] = ctx->polykey[1] ;
325 ctx->first_block_processed = 0;
326}
327
328static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
329{
330 u64 rh, rl, t, z = 0;
331
332 /* fully reduce (p1,p2)+(len,0) mod p127 */
333 t = p1 >> 63;
334 p1 &= m63;
335 ADD128(p1, p2, len, t);
336 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
337 t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
338 ADD128(p1, p2, z, t);
339 p1 &= m63;
340
341 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
342 t = p1 + (p2 >> 32);
343 t += (t >> 32);
344 t += (u32)t > 0xfffffffeu;
345 p1 += (t >> 32);
346 p2 += (p1 << 32);
347
348 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
349 p1 += k1;
350 p1 += (0 - (p1 < k1)) & 257;
351 p2 += k2;
352 p2 += (0 - (p2 < k2)) & 257;
353
354 /* compute (p1+k1)*(p2+k2)%p64 */
355 MUL64(rh, rl, p1, p2);
356 t = rh >> 56;
357 ADD128(t, rl, z, rh);
358 rh <<= 8;
359 ADD128(t, rl, z, rh);
360 t += t << 8;
361 rl += t;
362 rl += (0 - (rl < t)) & 257;
363 rl += (0 - (rl > p64-1)) & 257;
364 return rl;
365}
366
367static void vhash_update(const unsigned char *m,
368 unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
369 struct vmac_ctx *ctx)
370{
371 u64 rh, rl, *mptr;
372 const u64 *kptr = (u64 *)ctx->nhkey;
373 int i;
374 u64 ch, cl;
375 u64 pkh = ctx->polykey[0];
376 u64 pkl = ctx->polykey[1];
377
378 if (!mbytes)
379 return;
380
381 BUG_ON(mbytes % VMAC_NHBYTES);
382
383 mptr = (u64 *)m;
384 i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
385
386 ch = ctx->polytmp[0];
387 cl = ctx->polytmp[1];
388
389 if (!ctx->first_block_processed) {
390 ctx->first_block_processed = 1;
391 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
392 rh &= m62;
393 ADD128(ch, cl, rh, rl);
394 mptr += (VMAC_NHBYTES/sizeof(u64));
395 i--;
396 }
397
398 while (i--) {
399 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
400 rh &= m62;
401 poly_step(ch, cl, pkh, pkl, rh, rl);
402 mptr += (VMAC_NHBYTES/sizeof(u64));
403 }
404
405 ctx->polytmp[0] = ch;
406 ctx->polytmp[1] = cl;
407}
408
409static u64 vhash(unsigned char m[], unsigned int mbytes,
410 u64 *tagl, struct vmac_ctx *ctx)
411{
412 u64 rh, rl, *mptr;
413 const u64 *kptr = (u64 *)ctx->nhkey;
414 int i, remaining;
415 u64 ch, cl;
416 u64 pkh = ctx->polykey[0];
417 u64 pkl = ctx->polykey[1];
418
419 mptr = (u64 *)m;
420 i = mbytes / VMAC_NHBYTES;
421 remaining = mbytes % VMAC_NHBYTES;
422
423 if (ctx->first_block_processed) {
424 ch = ctx->polytmp[0];
425 cl = ctx->polytmp[1];
426 } else if (i) {
427 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
428 ch &= m62;
429 ADD128(ch, cl, pkh, pkl);
430 mptr += (VMAC_NHBYTES/sizeof(u64));
431 i--;
432 } else if (remaining) {
433 nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
434 ch &= m62;
435 ADD128(ch, cl, pkh, pkl);
436 mptr += (VMAC_NHBYTES/sizeof(u64));
437 goto do_l3;
438 } else {/* Empty String */
439 ch = pkh; cl = pkl;
440 goto do_l3;
441 }
442
443 while (i--) {
444 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
445 rh &= m62;
446 poly_step(ch, cl, pkh, pkl, rh, rl);
447 mptr += (VMAC_NHBYTES/sizeof(u64));
448 }
449 if (remaining) {
450 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
451 rh &= m62;
452 poly_step(ch, cl, pkh, pkl, rh, rl);
453 }
454
455do_l3:
456 vhash_abort(ctx);
457 remaining *= 8;
458 return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
459}
460
461static u64 vmac(unsigned char m[], unsigned int mbytes,
462 const unsigned char n[16], u64 *tagl,
463 struct vmac_ctx_t *ctx)
464{
465 u64 *in_n, *out_p;
466 u64 p, h;
467 int i;
468
469 in_n = ctx->__vmac_ctx.cached_nonce;
470 out_p = ctx->__vmac_ctx.cached_aes;
471
472 i = n[15] & 1;
473 if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
474 in_n[0] = *(u64 *)(n);
475 in_n[1] = *(u64 *)(n+8);
476 ((unsigned char *)in_n)[15] &= 0xFE;
477 crypto_cipher_encrypt_one(ctx->child,
478 (unsigned char *)out_p, (unsigned char *)in_n);
479
480 ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
481 }
482 p = be64_to_cpup(out_p + i);
483 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
484 return le64_to_cpu(p + h);
485}
486
487static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
488{
489 u64 in[2] = {0}, out[2];
490 unsigned i;
491 int err = 0;
492
493 err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
494 if (err)
495 return err;
496
497 /* Fill nh key */
498 ((unsigned char *)in)[0] = 0x80;
499 for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
500 crypto_cipher_encrypt_one(ctx->child,
501 (unsigned char *)out, (unsigned char *)in);
502 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
503 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
504 ((unsigned char *)in)[15] += 1;
505 }
506
507 /* Fill poly key */
508 ((unsigned char *)in)[0] = 0xC0;
509 in[1] = 0;
510 for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
511 crypto_cipher_encrypt_one(ctx->child,
512 (unsigned char *)out, (unsigned char *)in);
513 ctx->__vmac_ctx.polytmp[i] =
514 ctx->__vmac_ctx.polykey[i] =
515 be64_to_cpup(out) & mpoly;
516 ctx->__vmac_ctx.polytmp[i+1] =
517 ctx->__vmac_ctx.polykey[i+1] =
518 be64_to_cpup(out+1) & mpoly;
519 ((unsigned char *)in)[15] += 1;
520 }
521
522 /* Fill ip key */
523 ((unsigned char *)in)[0] = 0xE0;
524 in[1] = 0;
525 for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
526 do {
527 crypto_cipher_encrypt_one(ctx->child,
528 (unsigned char *)out, (unsigned char *)in);
529 ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
530 ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
531 ((unsigned char *)in)[15] += 1;
532 } while (ctx->__vmac_ctx.l3key[i] >= p64
533 || ctx->__vmac_ctx.l3key[i+1] >= p64);
534 }
535
536 /* Invalidate nonce/aes cache and reset other elements */
537 ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
538 ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
539 ctx->__vmac_ctx.first_block_processed = 0;
540
541 return err;
542}
543
544static int vmac_setkey(struct crypto_shash *parent,
545 const u8 *key, unsigned int keylen)
546{
547 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
548
549 if (keylen != VMAC_KEY_LEN) {
550 crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
551 return -EINVAL;
552 }
553
554 return vmac_set_key((u8 *)key, ctx);
555}
556
557static int vmac_init(struct shash_desc *pdesc)
558{
559 return 0;
560}
561
562static int vmac_update(struct shash_desc *pdesc, const u8 *p,
563 unsigned int len)
564{
565 struct crypto_shash *parent = pdesc->tfm;
566 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
567 int expand;
568 int min;
569
570 expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
571 VMAC_NHBYTES - ctx->partial_size : 0;
572
573 min = len < expand ? len : expand;
574
575 memcpy(ctx->partial + ctx->partial_size, p, min);
576 ctx->partial_size += min;
577
578 if (len < expand)
579 return 0;
580
581 vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
582 ctx->partial_size = 0;
583
584 len -= expand;
585 p += expand;
586
587 if (len % VMAC_NHBYTES) {
588 memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
589 len % VMAC_NHBYTES);
590 ctx->partial_size = len % VMAC_NHBYTES;
591 }
592
593 vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
594
595 return 0;
596}
597
598static int vmac_final(struct shash_desc *pdesc, u8 *out)
599{
600 struct crypto_shash *parent = pdesc->tfm;
601 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
602 vmac_t mac;
603 u8 nonce[16] = {};
604
605 /* vmac() ends up accessing outside the array bounds that
606 * we specify. In appears to access up to the next 2-word
607 * boundary. We'll just be uber cautious and zero the
608 * unwritten bytes in the buffer.
609 */
610 if (ctx->partial_size) {
611 memset(ctx->partial + ctx->partial_size, 0,
612 VMAC_NHBYTES - ctx->partial_size);
613 }
614 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
615 memcpy(out, &mac, sizeof(vmac_t));
616 memset(&mac, 0, sizeof(vmac_t));
617 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
618 ctx->partial_size = 0;
619 return 0;
620}
621
622static int vmac_init_tfm(struct crypto_tfm *tfm)
623{
624 struct crypto_cipher *cipher;
625 struct crypto_instance *inst = (void *)tfm->__crt_alg;
626 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
627 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
628
629 cipher = crypto_spawn_cipher(spawn);
630 if (IS_ERR(cipher))
631 return PTR_ERR(cipher);
632
633 ctx->child = cipher;
634 return 0;
635}
636
637static void vmac_exit_tfm(struct crypto_tfm *tfm)
638{
639 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
640 crypto_free_cipher(ctx->child);
641}
642
643static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
644{
645 struct shash_instance *inst;
646 struct crypto_alg *alg;
647 int err;
648
649 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
650 if (err)
651 return err;
652
653 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
654 CRYPTO_ALG_TYPE_MASK);
655 if (IS_ERR(alg))
656 return PTR_ERR(alg);
657
658 inst = shash_alloc_instance("vmac", alg);
659 err = PTR_ERR(inst);
660 if (IS_ERR(inst))
661 goto out_put_alg;
662
663 err = crypto_init_spawn(shash_instance_ctx(inst), alg,
664 shash_crypto_instance(inst),
665 CRYPTO_ALG_TYPE_MASK);
666 if (err)
667 goto out_free_inst;
668
669 inst->alg.base.cra_priority = alg->cra_priority;
670 inst->alg.base.cra_blocksize = alg->cra_blocksize;
671 inst->alg.base.cra_alignmask = alg->cra_alignmask;
672
673 inst->alg.digestsize = sizeof(vmac_t);
674 inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
675 inst->alg.base.cra_init = vmac_init_tfm;
676 inst->alg.base.cra_exit = vmac_exit_tfm;
677
678 inst->alg.init = vmac_init;
679 inst->alg.update = vmac_update;
680 inst->alg.final = vmac_final;
681 inst->alg.setkey = vmac_setkey;
682
683 err = shash_register_instance(tmpl, inst);
684 if (err) {
685out_free_inst:
686 shash_free_instance(shash_crypto_instance(inst));
687 }
688
689out_put_alg:
690 crypto_mod_put(alg);
691 return err;
692}
693
694static struct crypto_template vmac_tmpl = {
695 .name = "vmac",
696 .create = vmac_create,
697 .free = shash_free_instance,
698 .module = THIS_MODULE,
699};
700
701static int __init vmac_module_init(void)
702{
703 return crypto_register_template(&vmac_tmpl);
704}
705
706static void __exit vmac_module_exit(void)
707{
708 crypto_unregister_template(&vmac_tmpl);
709}
710
711module_init(vmac_module_init);
712module_exit(vmac_module_exit);
713
714MODULE_LICENSE("GPL");
715MODULE_DESCRIPTION("VMAC hash algorithm");
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19/* --------------------------------------------------------------------------
20 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
21 * This implementation is herby placed in the public domain.
22 * The authors offers no warranty. Use at your own risk.
23 * Please send bug reports to the authors.
24 * Last modified: 17 APR 08, 1700 PDT
25 * ----------------------------------------------------------------------- */
26
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/crypto.h>
30#include <linux/scatterlist.h>
31#include <asm/byteorder.h>
32#include <crypto/scatterwalk.h>
33#include <crypto/vmac.h>
34#include <crypto/internal/hash.h>
35
36/*
37 * Constants and masks
38 */
39#define UINT64_C(x) x##ULL
40const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
41const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
42const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
45
46#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
47
48#ifdef __LITTLE_ENDIAN
49#define INDEX_HIGH 1
50#define INDEX_LOW 0
51#else
52#define INDEX_HIGH 0
53#define INDEX_LOW 1
54#endif
55
56/*
57 * The following routines are used in this implementation. They are
58 * written via macros to simulate zero-overhead call-by-reference.
59 *
60 * MUL64: 64x64->128-bit multiplication
61 * PMUL64: assumes top bits cleared on inputs
62 * ADD128: 128x128->128-bit addition
63 */
64
65#define ADD128(rh, rl, ih, il) \
66 do { \
67 u64 _il = (il); \
68 (rl) += (_il); \
69 if ((rl) < (_il)) \
70 (rh)++; \
71 (rh) += (ih); \
72 } while (0)
73
74#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
75
76#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
77 do { \
78 u64 _i1 = (i1), _i2 = (i2); \
79 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
80 rh = MUL32(_i1>>32, _i2>>32); \
81 rl = MUL32(_i1, _i2); \
82 ADD128(rh, rl, (m >> 32), (m << 32)); \
83 } while (0)
84
85#define MUL64(rh, rl, i1, i2) \
86 do { \
87 u64 _i1 = (i1), _i2 = (i2); \
88 u64 m1 = MUL32(_i1, _i2>>32); \
89 u64 m2 = MUL32(_i1>>32, _i2); \
90 rh = MUL32(_i1>>32, _i2>>32); \
91 rl = MUL32(_i1, _i2); \
92 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
93 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
94 } while (0)
95
96/*
97 * For highest performance the L1 NH and L2 polynomial hashes should be
98 * carefully implemented to take advantage of one's target architecture.
99 * Here these two hash functions are defined multiple time; once for
100 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
101 * for the rest (32-bit) architectures.
102 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
103 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
104 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
105 * NH computations at once).
106 */
107
108#ifdef CONFIG_64BIT
109
110#define nh_16(mp, kp, nw, rh, rl) \
111 do { \
112 int i; u64 th, tl; \
113 rh = rl = 0; \
114 for (i = 0; i < nw; i += 2) { \
115 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
116 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
117 ADD128(rh, rl, th, tl); \
118 } \
119 } while (0)
120
121#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
122 do { \
123 int i; u64 th, tl; \
124 rh1 = rl1 = rh = rl = 0; \
125 for (i = 0; i < nw; i += 2) { \
126 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
127 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
128 ADD128(rh, rl, th, tl); \
129 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
130 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
131 ADD128(rh1, rl1, th, tl); \
132 } \
133 } while (0)
134
135#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
136#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
137 do { \
138 int i; u64 th, tl; \
139 rh = rl = 0; \
140 for (i = 0; i < nw; i += 8) { \
141 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
142 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
143 ADD128(rh, rl, th, tl); \
144 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
145 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
146 ADD128(rh, rl, th, tl); \
147 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
148 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
149 ADD128(rh, rl, th, tl); \
150 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
151 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
152 ADD128(rh, rl, th, tl); \
153 } \
154 } while (0)
155
156#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
157 do { \
158 int i; u64 th, tl; \
159 rh1 = rl1 = rh = rl = 0; \
160 for (i = 0; i < nw; i += 8) { \
161 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
162 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
163 ADD128(rh, rl, th, tl); \
164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
165 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
166 ADD128(rh1, rl1, th, tl); \
167 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
168 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
169 ADD128(rh, rl, th, tl); \
170 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
171 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
172 ADD128(rh1, rl1, th, tl); \
173 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
174 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
175 ADD128(rh, rl, th, tl); \
176 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
177 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
178 ADD128(rh1, rl1, th, tl); \
179 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
180 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
181 ADD128(rh, rl, th, tl); \
182 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
183 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
184 ADD128(rh1, rl1, th, tl); \
185 } \
186 } while (0)
187#endif
188
189#define poly_step(ah, al, kh, kl, mh, ml) \
190 do { \
191 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
192 /* compute ab*cd, put bd into result registers */ \
193 PMUL64(t3h, t3l, al, kh); \
194 PMUL64(t2h, t2l, ah, kl); \
195 PMUL64(t1h, t1l, ah, 2*kh); \
196 PMUL64(ah, al, al, kl); \
197 /* add 2 * ac to result */ \
198 ADD128(ah, al, t1h, t1l); \
199 /* add together ad + bc */ \
200 ADD128(t2h, t2l, t3h, t3l); \
201 /* now (ah,al), (t2l,2*t2h) need summing */ \
202 /* first add the high registers, carrying into t2h */ \
203 ADD128(t2h, ah, z, t2l); \
204 /* double t2h and add top bit of ah */ \
205 t2h = 2 * t2h + (ah >> 63); \
206 ah &= m63; \
207 /* now add the low registers */ \
208 ADD128(ah, al, mh, ml); \
209 ADD128(ah, al, z, t2h); \
210 } while (0)
211
212#else /* ! CONFIG_64BIT */
213
214#ifndef nh_16
215#define nh_16(mp, kp, nw, rh, rl) \
216 do { \
217 u64 t1, t2, m1, m2, t; \
218 int i; \
219 rh = rl = t = 0; \
220 for (i = 0; i < nw; i += 2) { \
221 t1 = pe64_to_cpup(mp+i) + kp[i]; \
222 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
223 m2 = MUL32(t1 >> 32, t2); \
224 m1 = MUL32(t1, t2 >> 32); \
225 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
226 MUL32(t1, t2)); \
227 rh += (u64)(u32)(m1 >> 32) \
228 + (u32)(m2 >> 32); \
229 t += (u64)(u32)m1 + (u32)m2; \
230 } \
231 ADD128(rh, rl, (t >> 32), (t << 32)); \
232 } while (0)
233#endif
234
235static void poly_step_func(u64 *ahi, u64 *alo,
236 const u64 *kh, const u64 *kl,
237 const u64 *mh, const u64 *ml)
238{
239#define a0 (*(((u32 *)alo)+INDEX_LOW))
240#define a1 (*(((u32 *)alo)+INDEX_HIGH))
241#define a2 (*(((u32 *)ahi)+INDEX_LOW))
242#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
243#define k0 (*(((u32 *)kl)+INDEX_LOW))
244#define k1 (*(((u32 *)kl)+INDEX_HIGH))
245#define k2 (*(((u32 *)kh)+INDEX_LOW))
246#define k3 (*(((u32 *)kh)+INDEX_HIGH))
247
248 u64 p, q, t;
249 u32 t2;
250
251 p = MUL32(a3, k3);
252 p += p;
253 p += *(u64 *)mh;
254 p += MUL32(a0, k2);
255 p += MUL32(a1, k1);
256 p += MUL32(a2, k0);
257 t = (u32)(p);
258 p >>= 32;
259 p += MUL32(a0, k3);
260 p += MUL32(a1, k2);
261 p += MUL32(a2, k1);
262 p += MUL32(a3, k0);
263 t |= ((u64)((u32)p & 0x7fffffff)) << 32;
264 p >>= 31;
265 p += (u64)(((u32 *)ml)[INDEX_LOW]);
266 p += MUL32(a0, k0);
267 q = MUL32(a1, k3);
268 q += MUL32(a2, k2);
269 q += MUL32(a3, k1);
270 q += q;
271 p += q;
272 t2 = (u32)(p);
273 p >>= 32;
274 p += (u64)(((u32 *)ml)[INDEX_HIGH]);
275 p += MUL32(a0, k1);
276 p += MUL32(a1, k0);
277 q = MUL32(a2, k3);
278 q += MUL32(a3, k2);
279 q += q;
280 p += q;
281 *(u64 *)(alo) = (p << 32) | t2;
282 p >>= 32;
283 *(u64 *)(ahi) = p + t;
284
285#undef a0
286#undef a1
287#undef a2
288#undef a3
289#undef k0
290#undef k1
291#undef k2
292#undef k3
293}
294
295#define poly_step(ah, al, kh, kl, mh, ml) \
296 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
297
298#endif /* end of specialized NH and poly definitions */
299
300/* At least nh_16 is defined. Defined others as needed here */
301#ifndef nh_16_2
302#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
303 do { \
304 nh_16(mp, kp, nw, rh, rl); \
305 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
306 } while (0)
307#endif
308#ifndef nh_vmac_nhbytes
309#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
310 nh_16(mp, kp, nw, rh, rl)
311#endif
312#ifndef nh_vmac_nhbytes_2
313#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
314 do { \
315 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
316 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
317 } while (0)
318#endif
319
320static void vhash_abort(struct vmac_ctx *ctx)
321{
322 ctx->polytmp[0] = ctx->polykey[0] ;
323 ctx->polytmp[1] = ctx->polykey[1] ;
324 ctx->first_block_processed = 0;
325}
326
327static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
328{
329 u64 rh, rl, t, z = 0;
330
331 /* fully reduce (p1,p2)+(len,0) mod p127 */
332 t = p1 >> 63;
333 p1 &= m63;
334 ADD128(p1, p2, len, t);
335 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
336 t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
337 ADD128(p1, p2, z, t);
338 p1 &= m63;
339
340 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
341 t = p1 + (p2 >> 32);
342 t += (t >> 32);
343 t += (u32)t > 0xfffffffeu;
344 p1 += (t >> 32);
345 p2 += (p1 << 32);
346
347 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
348 p1 += k1;
349 p1 += (0 - (p1 < k1)) & 257;
350 p2 += k2;
351 p2 += (0 - (p2 < k2)) & 257;
352
353 /* compute (p1+k1)*(p2+k2)%p64 */
354 MUL64(rh, rl, p1, p2);
355 t = rh >> 56;
356 ADD128(t, rl, z, rh);
357 rh <<= 8;
358 ADD128(t, rl, z, rh);
359 t += t << 8;
360 rl += t;
361 rl += (0 - (rl < t)) & 257;
362 rl += (0 - (rl > p64-1)) & 257;
363 return rl;
364}
365
366static void vhash_update(const unsigned char *m,
367 unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
368 struct vmac_ctx *ctx)
369{
370 u64 rh, rl, *mptr;
371 const u64 *kptr = (u64 *)ctx->nhkey;
372 int i;
373 u64 ch, cl;
374 u64 pkh = ctx->polykey[0];
375 u64 pkl = ctx->polykey[1];
376
377 mptr = (u64 *)m;
378 i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
379
380 ch = ctx->polytmp[0];
381 cl = ctx->polytmp[1];
382
383 if (!ctx->first_block_processed) {
384 ctx->first_block_processed = 1;
385 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
386 rh &= m62;
387 ADD128(ch, cl, rh, rl);
388 mptr += (VMAC_NHBYTES/sizeof(u64));
389 i--;
390 }
391
392 while (i--) {
393 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
394 rh &= m62;
395 poly_step(ch, cl, pkh, pkl, rh, rl);
396 mptr += (VMAC_NHBYTES/sizeof(u64));
397 }
398
399 ctx->polytmp[0] = ch;
400 ctx->polytmp[1] = cl;
401}
402
403static u64 vhash(unsigned char m[], unsigned int mbytes,
404 u64 *tagl, struct vmac_ctx *ctx)
405{
406 u64 rh, rl, *mptr;
407 const u64 *kptr = (u64 *)ctx->nhkey;
408 int i, remaining;
409 u64 ch, cl;
410 u64 pkh = ctx->polykey[0];
411 u64 pkl = ctx->polykey[1];
412
413 mptr = (u64 *)m;
414 i = mbytes / VMAC_NHBYTES;
415 remaining = mbytes % VMAC_NHBYTES;
416
417 if (ctx->first_block_processed) {
418 ch = ctx->polytmp[0];
419 cl = ctx->polytmp[1];
420 } else if (i) {
421 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
422 ch &= m62;
423 ADD128(ch, cl, pkh, pkl);
424 mptr += (VMAC_NHBYTES/sizeof(u64));
425 i--;
426 } else if (remaining) {
427 nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
428 ch &= m62;
429 ADD128(ch, cl, pkh, pkl);
430 mptr += (VMAC_NHBYTES/sizeof(u64));
431 goto do_l3;
432 } else {/* Empty String */
433 ch = pkh; cl = pkl;
434 goto do_l3;
435 }
436
437 while (i--) {
438 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
439 rh &= m62;
440 poly_step(ch, cl, pkh, pkl, rh, rl);
441 mptr += (VMAC_NHBYTES/sizeof(u64));
442 }
443 if (remaining) {
444 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
445 rh &= m62;
446 poly_step(ch, cl, pkh, pkl, rh, rl);
447 }
448
449do_l3:
450 vhash_abort(ctx);
451 remaining *= 8;
452 return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
453}
454
455static u64 vmac(unsigned char m[], unsigned int mbytes,
456 unsigned char n[16], u64 *tagl,
457 struct vmac_ctx_t *ctx)
458{
459 u64 *in_n, *out_p;
460 u64 p, h;
461 int i;
462
463 in_n = ctx->__vmac_ctx.cached_nonce;
464 out_p = ctx->__vmac_ctx.cached_aes;
465
466 i = n[15] & 1;
467 if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
468 in_n[0] = *(u64 *)(n);
469 in_n[1] = *(u64 *)(n+8);
470 ((unsigned char *)in_n)[15] &= 0xFE;
471 crypto_cipher_encrypt_one(ctx->child,
472 (unsigned char *)out_p, (unsigned char *)in_n);
473
474 ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
475 }
476 p = be64_to_cpup(out_p + i);
477 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
478 return le64_to_cpu(p + h);
479}
480
481static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
482{
483 u64 in[2] = {0}, out[2];
484 unsigned i;
485 int err = 0;
486
487 err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
488 if (err)
489 return err;
490
491 /* Fill nh key */
492 ((unsigned char *)in)[0] = 0x80;
493 for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
494 crypto_cipher_encrypt_one(ctx->child,
495 (unsigned char *)out, (unsigned char *)in);
496 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
497 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
498 ((unsigned char *)in)[15] += 1;
499 }
500
501 /* Fill poly key */
502 ((unsigned char *)in)[0] = 0xC0;
503 in[1] = 0;
504 for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
505 crypto_cipher_encrypt_one(ctx->child,
506 (unsigned char *)out, (unsigned char *)in);
507 ctx->__vmac_ctx.polytmp[i] =
508 ctx->__vmac_ctx.polykey[i] =
509 be64_to_cpup(out) & mpoly;
510 ctx->__vmac_ctx.polytmp[i+1] =
511 ctx->__vmac_ctx.polykey[i+1] =
512 be64_to_cpup(out+1) & mpoly;
513 ((unsigned char *)in)[15] += 1;
514 }
515
516 /* Fill ip key */
517 ((unsigned char *)in)[0] = 0xE0;
518 in[1] = 0;
519 for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
520 do {
521 crypto_cipher_encrypt_one(ctx->child,
522 (unsigned char *)out, (unsigned char *)in);
523 ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
524 ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
525 ((unsigned char *)in)[15] += 1;
526 } while (ctx->__vmac_ctx.l3key[i] >= p64
527 || ctx->__vmac_ctx.l3key[i+1] >= p64);
528 }
529
530 /* Invalidate nonce/aes cache and reset other elements */
531 ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
532 ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
533 ctx->__vmac_ctx.first_block_processed = 0;
534
535 return err;
536}
537
538static int vmac_setkey(struct crypto_shash *parent,
539 const u8 *key, unsigned int keylen)
540{
541 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
542
543 if (keylen != VMAC_KEY_LEN) {
544 crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
545 return -EINVAL;
546 }
547
548 return vmac_set_key((u8 *)key, ctx);
549}
550
551static int vmac_init(struct shash_desc *pdesc)
552{
553 return 0;
554}
555
556static int vmac_update(struct shash_desc *pdesc, const u8 *p,
557 unsigned int len)
558{
559 struct crypto_shash *parent = pdesc->tfm;
560 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
561
562 vhash_update(p, len, &ctx->__vmac_ctx);
563
564 return 0;
565}
566
567static int vmac_final(struct shash_desc *pdesc, u8 *out)
568{
569 struct crypto_shash *parent = pdesc->tfm;
570 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
571 vmac_t mac;
572 u8 nonce[16] = {};
573
574 mac = vmac(NULL, 0, nonce, NULL, ctx);
575 memcpy(out, &mac, sizeof(vmac_t));
576 memset(&mac, 0, sizeof(vmac_t));
577 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
578 return 0;
579}
580
581static int vmac_init_tfm(struct crypto_tfm *tfm)
582{
583 struct crypto_cipher *cipher;
584 struct crypto_instance *inst = (void *)tfm->__crt_alg;
585 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
586 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
587
588 cipher = crypto_spawn_cipher(spawn);
589 if (IS_ERR(cipher))
590 return PTR_ERR(cipher);
591
592 ctx->child = cipher;
593 return 0;
594}
595
596static void vmac_exit_tfm(struct crypto_tfm *tfm)
597{
598 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
599 crypto_free_cipher(ctx->child);
600}
601
602static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
603{
604 struct shash_instance *inst;
605 struct crypto_alg *alg;
606 int err;
607
608 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
609 if (err)
610 return err;
611
612 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
613 CRYPTO_ALG_TYPE_MASK);
614 if (IS_ERR(alg))
615 return PTR_ERR(alg);
616
617 inst = shash_alloc_instance("vmac", alg);
618 err = PTR_ERR(inst);
619 if (IS_ERR(inst))
620 goto out_put_alg;
621
622 err = crypto_init_spawn(shash_instance_ctx(inst), alg,
623 shash_crypto_instance(inst),
624 CRYPTO_ALG_TYPE_MASK);
625 if (err)
626 goto out_free_inst;
627
628 inst->alg.base.cra_priority = alg->cra_priority;
629 inst->alg.base.cra_blocksize = alg->cra_blocksize;
630 inst->alg.base.cra_alignmask = alg->cra_alignmask;
631
632 inst->alg.digestsize = sizeof(vmac_t);
633 inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
634 inst->alg.base.cra_init = vmac_init_tfm;
635 inst->alg.base.cra_exit = vmac_exit_tfm;
636
637 inst->alg.init = vmac_init;
638 inst->alg.update = vmac_update;
639 inst->alg.final = vmac_final;
640 inst->alg.setkey = vmac_setkey;
641
642 err = shash_register_instance(tmpl, inst);
643 if (err) {
644out_free_inst:
645 shash_free_instance(shash_crypto_instance(inst));
646 }
647
648out_put_alg:
649 crypto_mod_put(alg);
650 return err;
651}
652
653static struct crypto_template vmac_tmpl = {
654 .name = "vmac",
655 .create = vmac_create,
656 .free = shash_free_instance,
657 .module = THIS_MODULE,
658};
659
660static int __init vmac_module_init(void)
661{
662 return crypto_register_template(&vmac_tmpl);
663}
664
665static void __exit vmac_module_exit(void)
666{
667 crypto_unregister_template(&vmac_tmpl);
668}
669
670module_init(vmac_module_init);
671module_exit(vmac_module_exit);
672
673MODULE_LICENSE("GPL");
674MODULE_DESCRIPTION("VMAC hash algorithm");
675