Loading...
1/*
2 * VMAC: Message Authentication Code using Universal Hashing
3 *
4 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
5 *
6 * Copyright (c) 2009, Intel Corporation.
7 * Copyright (c) 2018, Google Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 */
22
23/*
24 * Derived from:
25 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
26 * This implementation is herby placed in the public domain.
27 * The authors offers no warranty. Use at your own risk.
28 * Last modified: 17 APR 08, 1700 PDT
29 */
30
31#include <asm/unaligned.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/crypto.h>
35#include <linux/module.h>
36#include <linux/scatterlist.h>
37#include <asm/byteorder.h>
38#include <crypto/scatterwalk.h>
39#include <crypto/internal/cipher.h>
40#include <crypto/internal/hash.h>
41
42/*
43 * User definable settings.
44 */
45#define VMAC_TAG_LEN 64
46#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
47#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
48#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
49#define VMAC_NONCEBYTES 16
50
51/* per-transform (per-key) context */
52struct vmac_tfm_ctx {
53 struct crypto_cipher *cipher;
54 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
55 u64 polykey[2*VMAC_TAG_LEN/64];
56 u64 l3key[2*VMAC_TAG_LEN/64];
57};
58
59/* per-request context */
60struct vmac_desc_ctx {
61 union {
62 u8 partial[VMAC_NHBYTES]; /* partial block */
63 __le64 partial_words[VMAC_NHBYTES / 8];
64 };
65 unsigned int partial_size; /* size of the partial block */
66 bool first_block_processed;
67 u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
68 union {
69 u8 bytes[VMAC_NONCEBYTES];
70 __be64 pads[VMAC_NONCEBYTES / 8];
71 } nonce;
72 unsigned int nonce_size; /* nonce bytes filled so far */
73};
74
75/*
76 * Constants and masks
77 */
78#define UINT64_C(x) x##ULL
79static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
80static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
81static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
82static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
83static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
84
85#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
86
87#ifdef __LITTLE_ENDIAN
88#define INDEX_HIGH 1
89#define INDEX_LOW 0
90#else
91#define INDEX_HIGH 0
92#define INDEX_LOW 1
93#endif
94
95/*
96 * The following routines are used in this implementation. They are
97 * written via macros to simulate zero-overhead call-by-reference.
98 *
99 * MUL64: 64x64->128-bit multiplication
100 * PMUL64: assumes top bits cleared on inputs
101 * ADD128: 128x128->128-bit addition
102 */
103
104#define ADD128(rh, rl, ih, il) \
105 do { \
106 u64 _il = (il); \
107 (rl) += (_il); \
108 if ((rl) < (_il)) \
109 (rh)++; \
110 (rh) += (ih); \
111 } while (0)
112
113#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
114
115#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
116 do { \
117 u64 _i1 = (i1), _i2 = (i2); \
118 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
119 rh = MUL32(_i1>>32, _i2>>32); \
120 rl = MUL32(_i1, _i2); \
121 ADD128(rh, rl, (m >> 32), (m << 32)); \
122 } while (0)
123
124#define MUL64(rh, rl, i1, i2) \
125 do { \
126 u64 _i1 = (i1), _i2 = (i2); \
127 u64 m1 = MUL32(_i1, _i2>>32); \
128 u64 m2 = MUL32(_i1>>32, _i2); \
129 rh = MUL32(_i1>>32, _i2>>32); \
130 rl = MUL32(_i1, _i2); \
131 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
132 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
133 } while (0)
134
135/*
136 * For highest performance the L1 NH and L2 polynomial hashes should be
137 * carefully implemented to take advantage of one's target architecture.
138 * Here these two hash functions are defined multiple time; once for
139 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
140 * for the rest (32-bit) architectures.
141 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
142 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
143 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
144 * NH computations at once).
145 */
146
147#ifdef CONFIG_64BIT
148
149#define nh_16(mp, kp, nw, rh, rl) \
150 do { \
151 int i; u64 th, tl; \
152 rh = rl = 0; \
153 for (i = 0; i < nw; i += 2) { \
154 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
155 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
156 ADD128(rh, rl, th, tl); \
157 } \
158 } while (0)
159
160#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
161 do { \
162 int i; u64 th, tl; \
163 rh1 = rl1 = rh = rl = 0; \
164 for (i = 0; i < nw; i += 2) { \
165 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
166 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
167 ADD128(rh, rl, th, tl); \
168 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
169 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
170 ADD128(rh1, rl1, th, tl); \
171 } \
172 } while (0)
173
174#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
175#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
176 do { \
177 int i; u64 th, tl; \
178 rh = rl = 0; \
179 for (i = 0; i < nw; i += 8) { \
180 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
181 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
182 ADD128(rh, rl, th, tl); \
183 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
184 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
185 ADD128(rh, rl, th, tl); \
186 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
187 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
188 ADD128(rh, rl, th, tl); \
189 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
190 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
191 ADD128(rh, rl, th, tl); \
192 } \
193 } while (0)
194
195#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
196 do { \
197 int i; u64 th, tl; \
198 rh1 = rl1 = rh = rl = 0; \
199 for (i = 0; i < nw; i += 8) { \
200 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
201 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
202 ADD128(rh, rl, th, tl); \
203 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
204 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
205 ADD128(rh1, rl1, th, tl); \
206 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
207 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
208 ADD128(rh, rl, th, tl); \
209 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
210 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
211 ADD128(rh1, rl1, th, tl); \
212 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
213 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
214 ADD128(rh, rl, th, tl); \
215 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
216 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
217 ADD128(rh1, rl1, th, tl); \
218 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
219 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
220 ADD128(rh, rl, th, tl); \
221 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
222 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
223 ADD128(rh1, rl1, th, tl); \
224 } \
225 } while (0)
226#endif
227
228#define poly_step(ah, al, kh, kl, mh, ml) \
229 do { \
230 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
231 /* compute ab*cd, put bd into result registers */ \
232 PMUL64(t3h, t3l, al, kh); \
233 PMUL64(t2h, t2l, ah, kl); \
234 PMUL64(t1h, t1l, ah, 2*kh); \
235 PMUL64(ah, al, al, kl); \
236 /* add 2 * ac to result */ \
237 ADD128(ah, al, t1h, t1l); \
238 /* add together ad + bc */ \
239 ADD128(t2h, t2l, t3h, t3l); \
240 /* now (ah,al), (t2l,2*t2h) need summing */ \
241 /* first add the high registers, carrying into t2h */ \
242 ADD128(t2h, ah, z, t2l); \
243 /* double t2h and add top bit of ah */ \
244 t2h = 2 * t2h + (ah >> 63); \
245 ah &= m63; \
246 /* now add the low registers */ \
247 ADD128(ah, al, mh, ml); \
248 ADD128(ah, al, z, t2h); \
249 } while (0)
250
251#else /* ! CONFIG_64BIT */
252
253#ifndef nh_16
254#define nh_16(mp, kp, nw, rh, rl) \
255 do { \
256 u64 t1, t2, m1, m2, t; \
257 int i; \
258 rh = rl = t = 0; \
259 for (i = 0; i < nw; i += 2) { \
260 t1 = pe64_to_cpup(mp+i) + kp[i]; \
261 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
262 m2 = MUL32(t1 >> 32, t2); \
263 m1 = MUL32(t1, t2 >> 32); \
264 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
265 MUL32(t1, t2)); \
266 rh += (u64)(u32)(m1 >> 32) \
267 + (u32)(m2 >> 32); \
268 t += (u64)(u32)m1 + (u32)m2; \
269 } \
270 ADD128(rh, rl, (t >> 32), (t << 32)); \
271 } while (0)
272#endif
273
274static void poly_step_func(u64 *ahi, u64 *alo,
275 const u64 *kh, const u64 *kl,
276 const u64 *mh, const u64 *ml)
277{
278#define a0 (*(((u32 *)alo)+INDEX_LOW))
279#define a1 (*(((u32 *)alo)+INDEX_HIGH))
280#define a2 (*(((u32 *)ahi)+INDEX_LOW))
281#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
282#define k0 (*(((u32 *)kl)+INDEX_LOW))
283#define k1 (*(((u32 *)kl)+INDEX_HIGH))
284#define k2 (*(((u32 *)kh)+INDEX_LOW))
285#define k3 (*(((u32 *)kh)+INDEX_HIGH))
286
287 u64 p, q, t;
288 u32 t2;
289
290 p = MUL32(a3, k3);
291 p += p;
292 p += *(u64 *)mh;
293 p += MUL32(a0, k2);
294 p += MUL32(a1, k1);
295 p += MUL32(a2, k0);
296 t = (u32)(p);
297 p >>= 32;
298 p += MUL32(a0, k3);
299 p += MUL32(a1, k2);
300 p += MUL32(a2, k1);
301 p += MUL32(a3, k0);
302 t |= ((u64)((u32)p & 0x7fffffff)) << 32;
303 p >>= 31;
304 p += (u64)(((u32 *)ml)[INDEX_LOW]);
305 p += MUL32(a0, k0);
306 q = MUL32(a1, k3);
307 q += MUL32(a2, k2);
308 q += MUL32(a3, k1);
309 q += q;
310 p += q;
311 t2 = (u32)(p);
312 p >>= 32;
313 p += (u64)(((u32 *)ml)[INDEX_HIGH]);
314 p += MUL32(a0, k1);
315 p += MUL32(a1, k0);
316 q = MUL32(a2, k3);
317 q += MUL32(a3, k2);
318 q += q;
319 p += q;
320 *(u64 *)(alo) = (p << 32) | t2;
321 p >>= 32;
322 *(u64 *)(ahi) = p + t;
323
324#undef a0
325#undef a1
326#undef a2
327#undef a3
328#undef k0
329#undef k1
330#undef k2
331#undef k3
332}
333
334#define poly_step(ah, al, kh, kl, mh, ml) \
335 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
336
337#endif /* end of specialized NH and poly definitions */
338
339/* At least nh_16 is defined. Defined others as needed here */
340#ifndef nh_16_2
341#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
342 do { \
343 nh_16(mp, kp, nw, rh, rl); \
344 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
345 } while (0)
346#endif
347#ifndef nh_vmac_nhbytes
348#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
349 nh_16(mp, kp, nw, rh, rl)
350#endif
351#ifndef nh_vmac_nhbytes_2
352#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
353 do { \
354 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
355 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
356 } while (0)
357#endif
358
359static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
360{
361 u64 rh, rl, t, z = 0;
362
363 /* fully reduce (p1,p2)+(len,0) mod p127 */
364 t = p1 >> 63;
365 p1 &= m63;
366 ADD128(p1, p2, len, t);
367 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
368 t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
369 ADD128(p1, p2, z, t);
370 p1 &= m63;
371
372 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
373 t = p1 + (p2 >> 32);
374 t += (t >> 32);
375 t += (u32)t > 0xfffffffeu;
376 p1 += (t >> 32);
377 p2 += (p1 << 32);
378
379 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
380 p1 += k1;
381 p1 += (0 - (p1 < k1)) & 257;
382 p2 += k2;
383 p2 += (0 - (p2 < k2)) & 257;
384
385 /* compute (p1+k1)*(p2+k2)%p64 */
386 MUL64(rh, rl, p1, p2);
387 t = rh >> 56;
388 ADD128(t, rl, z, rh);
389 rh <<= 8;
390 ADD128(t, rl, z, rh);
391 t += t << 8;
392 rl += t;
393 rl += (0 - (rl < t)) & 257;
394 rl += (0 - (rl > p64-1)) & 257;
395 return rl;
396}
397
398/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
399static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
400 struct vmac_desc_ctx *dctx,
401 const __le64 *mptr, unsigned int blocks)
402{
403 const u64 *kptr = tctx->nhkey;
404 const u64 pkh = tctx->polykey[0];
405 const u64 pkl = tctx->polykey[1];
406 u64 ch = dctx->polytmp[0];
407 u64 cl = dctx->polytmp[1];
408 u64 rh, rl;
409
410 if (!dctx->first_block_processed) {
411 dctx->first_block_processed = true;
412 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
413 rh &= m62;
414 ADD128(ch, cl, rh, rl);
415 mptr += (VMAC_NHBYTES/sizeof(u64));
416 blocks--;
417 }
418
419 while (blocks--) {
420 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
421 rh &= m62;
422 poly_step(ch, cl, pkh, pkl, rh, rl);
423 mptr += (VMAC_NHBYTES/sizeof(u64));
424 }
425
426 dctx->polytmp[0] = ch;
427 dctx->polytmp[1] = cl;
428}
429
430static int vmac_setkey(struct crypto_shash *tfm,
431 const u8 *key, unsigned int keylen)
432{
433 struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
434 __be64 out[2];
435 u8 in[16] = { 0 };
436 unsigned int i;
437 int err;
438
439 if (keylen != VMAC_KEY_LEN)
440 return -EINVAL;
441
442 err = crypto_cipher_setkey(tctx->cipher, key, keylen);
443 if (err)
444 return err;
445
446 /* Fill nh key */
447 in[0] = 0x80;
448 for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
449 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
450 tctx->nhkey[i] = be64_to_cpu(out[0]);
451 tctx->nhkey[i+1] = be64_to_cpu(out[1]);
452 in[15]++;
453 }
454
455 /* Fill poly key */
456 in[0] = 0xC0;
457 in[15] = 0;
458 for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
459 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
460 tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
461 tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
462 in[15]++;
463 }
464
465 /* Fill ip key */
466 in[0] = 0xE0;
467 in[15] = 0;
468 for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
469 do {
470 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
471 tctx->l3key[i] = be64_to_cpu(out[0]);
472 tctx->l3key[i+1] = be64_to_cpu(out[1]);
473 in[15]++;
474 } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
475 }
476
477 return 0;
478}
479
480static int vmac_init(struct shash_desc *desc)
481{
482 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
483 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
484
485 dctx->partial_size = 0;
486 dctx->first_block_processed = false;
487 memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
488 dctx->nonce_size = 0;
489 return 0;
490}
491
492static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
493{
494 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
495 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
496 unsigned int n;
497
498 /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
499 if (dctx->nonce_size < VMAC_NONCEBYTES) {
500 n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
501 memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
502 dctx->nonce_size += n;
503 p += n;
504 len -= n;
505 }
506
507 if (dctx->partial_size) {
508 n = min(len, VMAC_NHBYTES - dctx->partial_size);
509 memcpy(&dctx->partial[dctx->partial_size], p, n);
510 dctx->partial_size += n;
511 p += n;
512 len -= n;
513 if (dctx->partial_size == VMAC_NHBYTES) {
514 vhash_blocks(tctx, dctx, dctx->partial_words, 1);
515 dctx->partial_size = 0;
516 }
517 }
518
519 if (len >= VMAC_NHBYTES) {
520 n = round_down(len, VMAC_NHBYTES);
521 /* TODO: 'p' may be misaligned here */
522 vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
523 p += n;
524 len -= n;
525 }
526
527 if (len) {
528 memcpy(dctx->partial, p, len);
529 dctx->partial_size = len;
530 }
531
532 return 0;
533}
534
535static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
536 struct vmac_desc_ctx *dctx)
537{
538 unsigned int partial = dctx->partial_size;
539 u64 ch = dctx->polytmp[0];
540 u64 cl = dctx->polytmp[1];
541
542 /* L1 and L2-hash the final block if needed */
543 if (partial) {
544 /* Zero-pad to next 128-bit boundary */
545 unsigned int n = round_up(partial, 16);
546 u64 rh, rl;
547
548 memset(&dctx->partial[partial], 0, n - partial);
549 nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
550 rh &= m62;
551 if (dctx->first_block_processed)
552 poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
553 rh, rl);
554 else
555 ADD128(ch, cl, rh, rl);
556 }
557
558 /* L3-hash the 128-bit output of L2-hash */
559 return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
560}
561
562static int vmac_final(struct shash_desc *desc, u8 *out)
563{
564 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
565 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
566 int index;
567 u64 hash, pad;
568
569 if (dctx->nonce_size != VMAC_NONCEBYTES)
570 return -EINVAL;
571
572 /*
573 * The VMAC specification requires a nonce at least 1 bit shorter than
574 * the block cipher's block length, so we actually only accept a 127-bit
575 * nonce. We define the unused bit to be the first one and require that
576 * it be 0, so the needed prepending of a 0 bit is implicit.
577 */
578 if (dctx->nonce.bytes[0] & 0x80)
579 return -EINVAL;
580
581 /* Finish calculating the VHASH of the message */
582 hash = vhash_final(tctx, dctx);
583
584 /* Generate pseudorandom pad by encrypting the nonce */
585 BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
586 index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
587 dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
588 crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
589 dctx->nonce.bytes);
590 pad = be64_to_cpu(dctx->nonce.pads[index]);
591
592 /* The VMAC is the sum of VHASH and the pseudorandom pad */
593 put_unaligned_be64(hash + pad, out);
594 return 0;
595}
596
597static int vmac_init_tfm(struct crypto_tfm *tfm)
598{
599 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
600 struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
601 struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
602 struct crypto_cipher *cipher;
603
604 cipher = crypto_spawn_cipher(spawn);
605 if (IS_ERR(cipher))
606 return PTR_ERR(cipher);
607
608 tctx->cipher = cipher;
609 return 0;
610}
611
612static void vmac_exit_tfm(struct crypto_tfm *tfm)
613{
614 struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
615
616 crypto_free_cipher(tctx->cipher);
617}
618
619static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
620{
621 struct shash_instance *inst;
622 struct crypto_cipher_spawn *spawn;
623 struct crypto_alg *alg;
624 u32 mask;
625 int err;
626
627 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
628 if (err)
629 return err;
630
631 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
632 if (!inst)
633 return -ENOMEM;
634 spawn = shash_instance_ctx(inst);
635
636 err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
637 crypto_attr_alg_name(tb[1]), 0, mask);
638 if (err)
639 goto err_free_inst;
640 alg = crypto_spawn_cipher_alg(spawn);
641
642 err = -EINVAL;
643 if (alg->cra_blocksize != VMAC_NONCEBYTES)
644 goto err_free_inst;
645
646 err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
647 if (err)
648 goto err_free_inst;
649
650 inst->alg.base.cra_priority = alg->cra_priority;
651 inst->alg.base.cra_blocksize = alg->cra_blocksize;
652 inst->alg.base.cra_alignmask = alg->cra_alignmask;
653
654 inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
655 inst->alg.base.cra_init = vmac_init_tfm;
656 inst->alg.base.cra_exit = vmac_exit_tfm;
657
658 inst->alg.descsize = sizeof(struct vmac_desc_ctx);
659 inst->alg.digestsize = VMAC_TAG_LEN / 8;
660 inst->alg.init = vmac_init;
661 inst->alg.update = vmac_update;
662 inst->alg.final = vmac_final;
663 inst->alg.setkey = vmac_setkey;
664
665 inst->free = shash_free_singlespawn_instance;
666
667 err = shash_register_instance(tmpl, inst);
668 if (err) {
669err_free_inst:
670 shash_free_singlespawn_instance(inst);
671 }
672 return err;
673}
674
675static struct crypto_template vmac64_tmpl = {
676 .name = "vmac64",
677 .create = vmac_create,
678 .module = THIS_MODULE,
679};
680
681static int __init vmac_module_init(void)
682{
683 return crypto_register_template(&vmac64_tmpl);
684}
685
686static void __exit vmac_module_exit(void)
687{
688 crypto_unregister_template(&vmac64_tmpl);
689}
690
691subsys_initcall(vmac_module_init);
692module_exit(vmac_module_exit);
693
694MODULE_LICENSE("GPL");
695MODULE_DESCRIPTION("VMAC hash algorithm");
696MODULE_ALIAS_CRYPTO("vmac64");
697MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19/* --------------------------------------------------------------------------
20 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
21 * This implementation is herby placed in the public domain.
22 * The authors offers no warranty. Use at your own risk.
23 * Please send bug reports to the authors.
24 * Last modified: 17 APR 08, 1700 PDT
25 * ----------------------------------------------------------------------- */
26
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/crypto.h>
30#include <linux/scatterlist.h>
31#include <asm/byteorder.h>
32#include <crypto/scatterwalk.h>
33#include <crypto/vmac.h>
34#include <crypto/internal/hash.h>
35
36/*
37 * Constants and masks
38 */
39#define UINT64_C(x) x##ULL
40const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
41const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
42const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
45
46#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
47
48#ifdef __LITTLE_ENDIAN
49#define INDEX_HIGH 1
50#define INDEX_LOW 0
51#else
52#define INDEX_HIGH 0
53#define INDEX_LOW 1
54#endif
55
56/*
57 * The following routines are used in this implementation. They are
58 * written via macros to simulate zero-overhead call-by-reference.
59 *
60 * MUL64: 64x64->128-bit multiplication
61 * PMUL64: assumes top bits cleared on inputs
62 * ADD128: 128x128->128-bit addition
63 */
64
65#define ADD128(rh, rl, ih, il) \
66 do { \
67 u64 _il = (il); \
68 (rl) += (_il); \
69 if ((rl) < (_il)) \
70 (rh)++; \
71 (rh) += (ih); \
72 } while (0)
73
74#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
75
76#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
77 do { \
78 u64 _i1 = (i1), _i2 = (i2); \
79 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
80 rh = MUL32(_i1>>32, _i2>>32); \
81 rl = MUL32(_i1, _i2); \
82 ADD128(rh, rl, (m >> 32), (m << 32)); \
83 } while (0)
84
85#define MUL64(rh, rl, i1, i2) \
86 do { \
87 u64 _i1 = (i1), _i2 = (i2); \
88 u64 m1 = MUL32(_i1, _i2>>32); \
89 u64 m2 = MUL32(_i1>>32, _i2); \
90 rh = MUL32(_i1>>32, _i2>>32); \
91 rl = MUL32(_i1, _i2); \
92 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
93 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
94 } while (0)
95
96/*
97 * For highest performance the L1 NH and L2 polynomial hashes should be
98 * carefully implemented to take advantage of one's target architecture.
99 * Here these two hash functions are defined multiple time; once for
100 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
101 * for the rest (32-bit) architectures.
102 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
103 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
104 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
105 * NH computations at once).
106 */
107
108#ifdef CONFIG_64BIT
109
110#define nh_16(mp, kp, nw, rh, rl) \
111 do { \
112 int i; u64 th, tl; \
113 rh = rl = 0; \
114 for (i = 0; i < nw; i += 2) { \
115 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
116 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
117 ADD128(rh, rl, th, tl); \
118 } \
119 } while (0)
120
121#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
122 do { \
123 int i; u64 th, tl; \
124 rh1 = rl1 = rh = rl = 0; \
125 for (i = 0; i < nw; i += 2) { \
126 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
127 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
128 ADD128(rh, rl, th, tl); \
129 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
130 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
131 ADD128(rh1, rl1, th, tl); \
132 } \
133 } while (0)
134
135#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
136#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
137 do { \
138 int i; u64 th, tl; \
139 rh = rl = 0; \
140 for (i = 0; i < nw; i += 8) { \
141 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
142 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
143 ADD128(rh, rl, th, tl); \
144 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
145 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
146 ADD128(rh, rl, th, tl); \
147 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
148 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
149 ADD128(rh, rl, th, tl); \
150 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
151 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
152 ADD128(rh, rl, th, tl); \
153 } \
154 } while (0)
155
156#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
157 do { \
158 int i; u64 th, tl; \
159 rh1 = rl1 = rh = rl = 0; \
160 for (i = 0; i < nw; i += 8) { \
161 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
162 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
163 ADD128(rh, rl, th, tl); \
164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
165 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
166 ADD128(rh1, rl1, th, tl); \
167 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
168 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
169 ADD128(rh, rl, th, tl); \
170 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
171 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
172 ADD128(rh1, rl1, th, tl); \
173 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
174 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
175 ADD128(rh, rl, th, tl); \
176 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
177 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
178 ADD128(rh1, rl1, th, tl); \
179 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
180 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
181 ADD128(rh, rl, th, tl); \
182 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
183 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
184 ADD128(rh1, rl1, th, tl); \
185 } \
186 } while (0)
187#endif
188
189#define poly_step(ah, al, kh, kl, mh, ml) \
190 do { \
191 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
192 /* compute ab*cd, put bd into result registers */ \
193 PMUL64(t3h, t3l, al, kh); \
194 PMUL64(t2h, t2l, ah, kl); \
195 PMUL64(t1h, t1l, ah, 2*kh); \
196 PMUL64(ah, al, al, kl); \
197 /* add 2 * ac to result */ \
198 ADD128(ah, al, t1h, t1l); \
199 /* add together ad + bc */ \
200 ADD128(t2h, t2l, t3h, t3l); \
201 /* now (ah,al), (t2l,2*t2h) need summing */ \
202 /* first add the high registers, carrying into t2h */ \
203 ADD128(t2h, ah, z, t2l); \
204 /* double t2h and add top bit of ah */ \
205 t2h = 2 * t2h + (ah >> 63); \
206 ah &= m63; \
207 /* now add the low registers */ \
208 ADD128(ah, al, mh, ml); \
209 ADD128(ah, al, z, t2h); \
210 } while (0)
211
212#else /* ! CONFIG_64BIT */
213
214#ifndef nh_16
215#define nh_16(mp, kp, nw, rh, rl) \
216 do { \
217 u64 t1, t2, m1, m2, t; \
218 int i; \
219 rh = rl = t = 0; \
220 for (i = 0; i < nw; i += 2) { \
221 t1 = pe64_to_cpup(mp+i) + kp[i]; \
222 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
223 m2 = MUL32(t1 >> 32, t2); \
224 m1 = MUL32(t1, t2 >> 32); \
225 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
226 MUL32(t1, t2)); \
227 rh += (u64)(u32)(m1 >> 32) \
228 + (u32)(m2 >> 32); \
229 t += (u64)(u32)m1 + (u32)m2; \
230 } \
231 ADD128(rh, rl, (t >> 32), (t << 32)); \
232 } while (0)
233#endif
234
235static void poly_step_func(u64 *ahi, u64 *alo,
236 const u64 *kh, const u64 *kl,
237 const u64 *mh, const u64 *ml)
238{
239#define a0 (*(((u32 *)alo)+INDEX_LOW))
240#define a1 (*(((u32 *)alo)+INDEX_HIGH))
241#define a2 (*(((u32 *)ahi)+INDEX_LOW))
242#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
243#define k0 (*(((u32 *)kl)+INDEX_LOW))
244#define k1 (*(((u32 *)kl)+INDEX_HIGH))
245#define k2 (*(((u32 *)kh)+INDEX_LOW))
246#define k3 (*(((u32 *)kh)+INDEX_HIGH))
247
248 u64 p, q, t;
249 u32 t2;
250
251 p = MUL32(a3, k3);
252 p += p;
253 p += *(u64 *)mh;
254 p += MUL32(a0, k2);
255 p += MUL32(a1, k1);
256 p += MUL32(a2, k0);
257 t = (u32)(p);
258 p >>= 32;
259 p += MUL32(a0, k3);
260 p += MUL32(a1, k2);
261 p += MUL32(a2, k1);
262 p += MUL32(a3, k0);
263 t |= ((u64)((u32)p & 0x7fffffff)) << 32;
264 p >>= 31;
265 p += (u64)(((u32 *)ml)[INDEX_LOW]);
266 p += MUL32(a0, k0);
267 q = MUL32(a1, k3);
268 q += MUL32(a2, k2);
269 q += MUL32(a3, k1);
270 q += q;
271 p += q;
272 t2 = (u32)(p);
273 p >>= 32;
274 p += (u64)(((u32 *)ml)[INDEX_HIGH]);
275 p += MUL32(a0, k1);
276 p += MUL32(a1, k0);
277 q = MUL32(a2, k3);
278 q += MUL32(a3, k2);
279 q += q;
280 p += q;
281 *(u64 *)(alo) = (p << 32) | t2;
282 p >>= 32;
283 *(u64 *)(ahi) = p + t;
284
285#undef a0
286#undef a1
287#undef a2
288#undef a3
289#undef k0
290#undef k1
291#undef k2
292#undef k3
293}
294
295#define poly_step(ah, al, kh, kl, mh, ml) \
296 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
297
298#endif /* end of specialized NH and poly definitions */
299
300/* At least nh_16 is defined. Defined others as needed here */
301#ifndef nh_16_2
302#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
303 do { \
304 nh_16(mp, kp, nw, rh, rl); \
305 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
306 } while (0)
307#endif
308#ifndef nh_vmac_nhbytes
309#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
310 nh_16(mp, kp, nw, rh, rl)
311#endif
312#ifndef nh_vmac_nhbytes_2
313#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
314 do { \
315 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
316 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
317 } while (0)
318#endif
319
320static void vhash_abort(struct vmac_ctx *ctx)
321{
322 ctx->polytmp[0] = ctx->polykey[0] ;
323 ctx->polytmp[1] = ctx->polykey[1] ;
324 ctx->first_block_processed = 0;
325}
326
327static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
328{
329 u64 rh, rl, t, z = 0;
330
331 /* fully reduce (p1,p2)+(len,0) mod p127 */
332 t = p1 >> 63;
333 p1 &= m63;
334 ADD128(p1, p2, len, t);
335 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
336 t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
337 ADD128(p1, p2, z, t);
338 p1 &= m63;
339
340 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
341 t = p1 + (p2 >> 32);
342 t += (t >> 32);
343 t += (u32)t > 0xfffffffeu;
344 p1 += (t >> 32);
345 p2 += (p1 << 32);
346
347 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
348 p1 += k1;
349 p1 += (0 - (p1 < k1)) & 257;
350 p2 += k2;
351 p2 += (0 - (p2 < k2)) & 257;
352
353 /* compute (p1+k1)*(p2+k2)%p64 */
354 MUL64(rh, rl, p1, p2);
355 t = rh >> 56;
356 ADD128(t, rl, z, rh);
357 rh <<= 8;
358 ADD128(t, rl, z, rh);
359 t += t << 8;
360 rl += t;
361 rl += (0 - (rl < t)) & 257;
362 rl += (0 - (rl > p64-1)) & 257;
363 return rl;
364}
365
366static void vhash_update(const unsigned char *m,
367 unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
368 struct vmac_ctx *ctx)
369{
370 u64 rh, rl, *mptr;
371 const u64 *kptr = (u64 *)ctx->nhkey;
372 int i;
373 u64 ch, cl;
374 u64 pkh = ctx->polykey[0];
375 u64 pkl = ctx->polykey[1];
376
377 mptr = (u64 *)m;
378 i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
379
380 ch = ctx->polytmp[0];
381 cl = ctx->polytmp[1];
382
383 if (!ctx->first_block_processed) {
384 ctx->first_block_processed = 1;
385 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
386 rh &= m62;
387 ADD128(ch, cl, rh, rl);
388 mptr += (VMAC_NHBYTES/sizeof(u64));
389 i--;
390 }
391
392 while (i--) {
393 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
394 rh &= m62;
395 poly_step(ch, cl, pkh, pkl, rh, rl);
396 mptr += (VMAC_NHBYTES/sizeof(u64));
397 }
398
399 ctx->polytmp[0] = ch;
400 ctx->polytmp[1] = cl;
401}
402
403static u64 vhash(unsigned char m[], unsigned int mbytes,
404 u64 *tagl, struct vmac_ctx *ctx)
405{
406 u64 rh, rl, *mptr;
407 const u64 *kptr = (u64 *)ctx->nhkey;
408 int i, remaining;
409 u64 ch, cl;
410 u64 pkh = ctx->polykey[0];
411 u64 pkl = ctx->polykey[1];
412
413 mptr = (u64 *)m;
414 i = mbytes / VMAC_NHBYTES;
415 remaining = mbytes % VMAC_NHBYTES;
416
417 if (ctx->first_block_processed) {
418 ch = ctx->polytmp[0];
419 cl = ctx->polytmp[1];
420 } else if (i) {
421 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
422 ch &= m62;
423 ADD128(ch, cl, pkh, pkl);
424 mptr += (VMAC_NHBYTES/sizeof(u64));
425 i--;
426 } else if (remaining) {
427 nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
428 ch &= m62;
429 ADD128(ch, cl, pkh, pkl);
430 mptr += (VMAC_NHBYTES/sizeof(u64));
431 goto do_l3;
432 } else {/* Empty String */
433 ch = pkh; cl = pkl;
434 goto do_l3;
435 }
436
437 while (i--) {
438 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
439 rh &= m62;
440 poly_step(ch, cl, pkh, pkl, rh, rl);
441 mptr += (VMAC_NHBYTES/sizeof(u64));
442 }
443 if (remaining) {
444 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
445 rh &= m62;
446 poly_step(ch, cl, pkh, pkl, rh, rl);
447 }
448
449do_l3:
450 vhash_abort(ctx);
451 remaining *= 8;
452 return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
453}
454
455static u64 vmac(unsigned char m[], unsigned int mbytes,
456 unsigned char n[16], u64 *tagl,
457 struct vmac_ctx_t *ctx)
458{
459 u64 *in_n, *out_p;
460 u64 p, h;
461 int i;
462
463 in_n = ctx->__vmac_ctx.cached_nonce;
464 out_p = ctx->__vmac_ctx.cached_aes;
465
466 i = n[15] & 1;
467 if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
468 in_n[0] = *(u64 *)(n);
469 in_n[1] = *(u64 *)(n+8);
470 ((unsigned char *)in_n)[15] &= 0xFE;
471 crypto_cipher_encrypt_one(ctx->child,
472 (unsigned char *)out_p, (unsigned char *)in_n);
473
474 ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
475 }
476 p = be64_to_cpup(out_p + i);
477 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
478 return le64_to_cpu(p + h);
479}
480
481static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
482{
483 u64 in[2] = {0}, out[2];
484 unsigned i;
485 int err = 0;
486
487 err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
488 if (err)
489 return err;
490
491 /* Fill nh key */
492 ((unsigned char *)in)[0] = 0x80;
493 for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
494 crypto_cipher_encrypt_one(ctx->child,
495 (unsigned char *)out, (unsigned char *)in);
496 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
497 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
498 ((unsigned char *)in)[15] += 1;
499 }
500
501 /* Fill poly key */
502 ((unsigned char *)in)[0] = 0xC0;
503 in[1] = 0;
504 for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
505 crypto_cipher_encrypt_one(ctx->child,
506 (unsigned char *)out, (unsigned char *)in);
507 ctx->__vmac_ctx.polytmp[i] =
508 ctx->__vmac_ctx.polykey[i] =
509 be64_to_cpup(out) & mpoly;
510 ctx->__vmac_ctx.polytmp[i+1] =
511 ctx->__vmac_ctx.polykey[i+1] =
512 be64_to_cpup(out+1) & mpoly;
513 ((unsigned char *)in)[15] += 1;
514 }
515
516 /* Fill ip key */
517 ((unsigned char *)in)[0] = 0xE0;
518 in[1] = 0;
519 for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
520 do {
521 crypto_cipher_encrypt_one(ctx->child,
522 (unsigned char *)out, (unsigned char *)in);
523 ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
524 ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
525 ((unsigned char *)in)[15] += 1;
526 } while (ctx->__vmac_ctx.l3key[i] >= p64
527 || ctx->__vmac_ctx.l3key[i+1] >= p64);
528 }
529
530 /* Invalidate nonce/aes cache and reset other elements */
531 ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
532 ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
533 ctx->__vmac_ctx.first_block_processed = 0;
534
535 return err;
536}
537
538static int vmac_setkey(struct crypto_shash *parent,
539 const u8 *key, unsigned int keylen)
540{
541 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
542
543 if (keylen != VMAC_KEY_LEN) {
544 crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
545 return -EINVAL;
546 }
547
548 return vmac_set_key((u8 *)key, ctx);
549}
550
551static int vmac_init(struct shash_desc *pdesc)
552{
553 return 0;
554}
555
556static int vmac_update(struct shash_desc *pdesc, const u8 *p,
557 unsigned int len)
558{
559 struct crypto_shash *parent = pdesc->tfm;
560 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
561
562 vhash_update(p, len, &ctx->__vmac_ctx);
563
564 return 0;
565}
566
567static int vmac_final(struct shash_desc *pdesc, u8 *out)
568{
569 struct crypto_shash *parent = pdesc->tfm;
570 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
571 vmac_t mac;
572 u8 nonce[16] = {};
573
574 mac = vmac(NULL, 0, nonce, NULL, ctx);
575 memcpy(out, &mac, sizeof(vmac_t));
576 memset(&mac, 0, sizeof(vmac_t));
577 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
578 return 0;
579}
580
581static int vmac_init_tfm(struct crypto_tfm *tfm)
582{
583 struct crypto_cipher *cipher;
584 struct crypto_instance *inst = (void *)tfm->__crt_alg;
585 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
586 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
587
588 cipher = crypto_spawn_cipher(spawn);
589 if (IS_ERR(cipher))
590 return PTR_ERR(cipher);
591
592 ctx->child = cipher;
593 return 0;
594}
595
596static void vmac_exit_tfm(struct crypto_tfm *tfm)
597{
598 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
599 crypto_free_cipher(ctx->child);
600}
601
602static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
603{
604 struct shash_instance *inst;
605 struct crypto_alg *alg;
606 int err;
607
608 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
609 if (err)
610 return err;
611
612 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
613 CRYPTO_ALG_TYPE_MASK);
614 if (IS_ERR(alg))
615 return PTR_ERR(alg);
616
617 inst = shash_alloc_instance("vmac", alg);
618 err = PTR_ERR(inst);
619 if (IS_ERR(inst))
620 goto out_put_alg;
621
622 err = crypto_init_spawn(shash_instance_ctx(inst), alg,
623 shash_crypto_instance(inst),
624 CRYPTO_ALG_TYPE_MASK);
625 if (err)
626 goto out_free_inst;
627
628 inst->alg.base.cra_priority = alg->cra_priority;
629 inst->alg.base.cra_blocksize = alg->cra_blocksize;
630 inst->alg.base.cra_alignmask = alg->cra_alignmask;
631
632 inst->alg.digestsize = sizeof(vmac_t);
633 inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
634 inst->alg.base.cra_init = vmac_init_tfm;
635 inst->alg.base.cra_exit = vmac_exit_tfm;
636
637 inst->alg.init = vmac_init;
638 inst->alg.update = vmac_update;
639 inst->alg.final = vmac_final;
640 inst->alg.setkey = vmac_setkey;
641
642 err = shash_register_instance(tmpl, inst);
643 if (err) {
644out_free_inst:
645 shash_free_instance(shash_crypto_instance(inst));
646 }
647
648out_put_alg:
649 crypto_mod_put(alg);
650 return err;
651}
652
653static struct crypto_template vmac_tmpl = {
654 .name = "vmac",
655 .create = vmac_create,
656 .free = shash_free_instance,
657 .module = THIS_MODULE,
658};
659
660static int __init vmac_module_init(void)
661{
662 return crypto_register_template(&vmac_tmpl);
663}
664
665static void __exit vmac_module_exit(void)
666{
667 crypto_unregister_template(&vmac_tmpl);
668}
669
670module_init(vmac_module_init);
671module_exit(vmac_module_exit);
672
673MODULE_LICENSE("GPL");
674MODULE_DESCRIPTION("VMAC hash algorithm");
675